[med-svn] [python-mne] 04/13: New version. Update packaging.

Yaroslav Halchenko debian at onerussian.com
Wed Nov 25 16:20:59 UTC 2015


This is an automated email from the git hooks/post-receive script.

yoh pushed a commit to branch master
in repository python-mne.

commit f2369192d1881e2d171a2bae2d3e7ad1dbd7d1a3
Author: jaeilepp <jaeilepp at student.jyu.fi>
Date:   Mon Nov 9 06:07:13 2015 -0500

    New version.
    Update packaging.
---
 debian/changelog                                   |    9 +
 debian/control                                     |    3 +-
 debian/files                                       |    1 +
 debian/patches/debian_paths                        |   56 -
 debian/patches/paths                               |   70 +
 debian/patches/series                              |    3 +-
 debian/patches/test_fix                            |   23 +
 debian/python-mne.postinst.debhelper               |    7 +
 debian/python-mne.prerm.debhelper                  |   12 +
 debian/python-mne.substvars                        |    4 +
 debian/python-mne/DEBIAN/control                   |   16 +
 debian/python-mne/DEBIAN/md5sums                   |  420 +++
 debian/python-mne/DEBIAN/postinst                  |    9 +
 debian/python-mne/DEBIAN/prerm                     |   14 +
 debian/python-mne/usr/bin/mne                      |   39 +
 .../lib/python2.7/dist-packages/mne/__init__.py    |  106 +
 .../lib/python2.7/dist-packages/mne/baseline.py    |   97 +
 .../dist-packages/mne/beamformer/__init__.py       |    6 +
 .../dist-packages/mne/beamformer/_dics.py          |  611 +++++
 .../dist-packages/mne/beamformer/_lcmv.py          |  821 ++++++
 .../dist-packages/mne/beamformer/_rap_music.py     |  274 ++
 .../dist-packages/mne/beamformer/tests/__init__.py |    0
 .../mne/beamformer/tests/test_dics.py              |  312 +++
 .../mne/beamformer/tests/test_lcmv.py              |  378 +++
 .../mne/beamformer/tests/test_rap_music.py         |  152 ++
 .../usr/lib/python2.7/dist-packages/mne/bem.py     | 1660 ++++++++++++
 .../dist-packages/mne/channels/__init__.py         |   11 +
 .../dist-packages/mne/channels/channels.py         |  783 ++++++
 .../mne/channels/data/layouts/CTF-275.lout         |  276 ++
 .../mne/channels/data/layouts/CTF151.lay           |  153 ++
 .../mne/channels/data/layouts/CTF275.lay           |  275 ++
 .../mne/channels/data/layouts/EEG1005.lay          |  337 +++
 .../mne/channels/data/layouts/EGI256.lout          |  259 ++
 .../mne/channels/data/layouts/KIT-157.lout         |  158 ++
 .../mne/channels/data/layouts/KIT-AD.lout          |  209 ++
 .../mne/channels/data/layouts/Vectorview-all.lout  |  307 +++
 .../mne/channels/data/layouts/Vectorview-grad.lout |  205 ++
 .../mne/channels/data/layouts/Vectorview-mag.lout  |  103 +
 .../mne/channels/data/layouts/biosemi.lay          |   64 +
 .../mne/channels/data/layouts/magnesWH3600.lout    |  249 ++
 .../channels/data/montages/GSN-HydroCel-128.sfp    |  131 +
 .../channels/data/montages/GSN-HydroCel-129.sfp    |  132 +
 .../channels/data/montages/GSN-HydroCel-256.sfp    |  259 ++
 .../channels/data/montages/GSN-HydroCel-257.sfp    |  260 ++
 .../mne/channels/data/montages/GSN-HydroCel-32.sfp |   36 +
 .../channels/data/montages/GSN-HydroCel-64_1.0.sfp |   67 +
 .../channels/data/montages/GSN-HydroCel-65_1.0.sfp |   68 +
 .../mne/channels/data/montages/biosemi128.txt      |  132 +
 .../mne/channels/data/montages/biosemi16.txt       |   20 +
 .../mne/channels/data/montages/biosemi160.txt      |  164 ++
 .../mne/channels/data/montages/biosemi256.txt      |  260 ++
 .../mne/channels/data/montages/biosemi32.txt       |   36 +
 .../mne/channels/data/montages/biosemi64.txt       |   68 +
 .../mne/channels/data/montages/easycap-M1.txt      |   75 +
 .../mne/channels/data/montages/easycap-M10.txt     |   62 +
 .../mne/channels/data/montages/standard_1005.elc   |  698 +++++
 .../mne/channels/data/montages/standard_1020.elc   |  200 ++
 .../channels/data/montages/standard_alphabetic.elc |  142 +
 .../channels/data/montages/standard_postfixed.elc  |  212 ++
 .../channels/data/montages/standard_prefixed.elc   |  160 ++
 .../mne/channels/data/montages/standard_primed.elc |  212 ++
 .../mne/channels/data/neighbors/KIT-157_neighb.mat |  Bin 0 -> 4939 bytes
 .../mne/channels/data/neighbors/KIT-208_neighb.mat |  Bin 0 -> 6636 bytes
 .../channels/data/neighbors/biosemi16_neighb.mat   |  Bin 0 -> 511 bytes
 .../channels/data/neighbors/biosemi32_neighb.mat   |  Bin 0 -> 942 bytes
 .../channels/data/neighbors/biosemi64_neighb.mat   |  Bin 0 -> 1812 bytes
 .../mne/channels/data/neighbors/bti148_neighb.mat  |  Bin 0 -> 3920 bytes
 .../mne/channels/data/neighbors/bti248_neighb.mat  |  Bin 0 -> 6577 bytes
 .../channels/data/neighbors/bti248grad_neighb.mat  |  Bin 0 -> 8337 bytes
 .../mne/channels/data/neighbors/ctf151_neighb.mat  |  Bin 0 -> 4380 bytes
 .../mne/channels/data/neighbors/ctf275_neighb.mat  |  Bin 0 -> 7831 bytes
 .../mne/channels/data/neighbors/ctf64_neighb.mat   |  Bin 0 -> 2397 bytes
 .../data/neighbors/easycap128ch-avg_neighb.mat     |  Bin 0 -> 3870 bytes
 .../data/neighbors/easycap32ch-avg_neighb.mat      |  Bin 0 -> 1127 bytes
 .../data/neighbors/easycap64ch-avg_neighb.mat      |  Bin 0 -> 1861 bytes
 .../channels/data/neighbors/easycapM11_neighb.mat  |  Bin 0 -> 1792 bytes
 .../channels/data/neighbors/easycapM14_neighb.mat  |  Bin 0 -> 3529 bytes
 .../channels/data/neighbors/easycapM15_neighb.mat  |  Bin 0 -> 3906 bytes
 .../channels/data/neighbors/easycapM1_neighb.mat   |  Bin 0 -> 2145 bytes
 .../channels/data/neighbors/neuromag122_neighb.mat |  Bin 0 -> 3400 bytes
 .../data/neighbors/neuromag306mag_neighb.mat       |  Bin 0 -> 2753 bytes
 .../data/neighbors/neuromag306planar_neighb.mat    |  Bin 0 -> 5580 bytes
 .../dist-packages/mne/channels/interpolation.py    |  207 ++
 .../python2.7/dist-packages/mne/channels/layout.py |  825 ++++++
 .../dist-packages/mne/channels/montage.py          |  533 ++++
 .../dist-packages/mne/channels/tests/__init__.py   |    0
 .../mne/channels/tests/test_channels.py            |  152 ++
 .../mne/channels/tests/test_interpolation.py       |  120 +
 .../mne/channels/tests/test_layout.py              |  380 +++
 .../mne/channels/tests/test_montage.py             |  209 ++
 .../usr/lib/python2.7/dist-packages/mne/chpi.py    |  440 +++
 .../dist-packages/mne/commands/__init__.py         |    1 +
 .../dist-packages/mne/commands/mne_browse_raw.py   |  107 +
 .../dist-packages/mne/commands/mne_bti2fiff.py     |   91 +
 .../mne/commands/mne_clean_eog_ecg.py              |  137 +
 .../dist-packages/mne/commands/mne_compare_fiff.py |   27 +
 .../mne/commands/mne_compute_proj_ecg.py           |  213 ++
 .../mne/commands/mne_compute_proj_eog.py           |  198 ++
 .../dist-packages/mne/commands/mne_coreg.py        |   29 +
 .../dist-packages/mne/commands/mne_flash_bem.py    |   90 +
 .../mne/commands/mne_flash_bem_model.py            |  145 +
 .../mne/commands/mne_freeview_bem_surfaces.py      |   92 +
 .../dist-packages/mne/commands/mne_kit2fiff.py     |   72 +
 .../mne/commands/mne_make_scalp_surfaces.py        |  144 +
 .../dist-packages/mne/commands/mne_maxfilter.py    |  148 +
 .../dist-packages/mne/commands/mne_report.py       |   93 +
 .../dist-packages/mne/commands/mne_surf2bem.py     |   48 +
 .../mne/commands/mne_watershed_bem.py              |   62 +
 .../dist-packages/mne/commands/tests/__init__.py   |    0
 .../mne/commands/tests/test_commands.py            |  244 ++
 .../python2.7/dist-packages/mne/commands/utils.py  |   45 +
 .../dist-packages/mne/connectivity/__init__.py     |    6 +
 .../dist-packages/mne/connectivity/effective.py    |  162 ++
 .../dist-packages/mne/connectivity/spectral.py     | 1062 ++++++++
 .../mne/connectivity/tests/__init__.py             |    0
 .../mne/connectivity/tests/test_effective.py       |   40 +
 .../mne/connectivity/tests/test_spectral.py        |  227 ++
 .../mne/connectivity/tests/test_utils.py           |   23 +
 .../dist-packages/mne/connectivity/utils.py        |   45 +
 .../usr/lib/python2.7/dist-packages/mne/coreg.py   | 1088 ++++++++
 .../usr/lib/python2.7/dist-packages/mne/cov.py     | 1915 +++++++++++++
 .../usr/lib/python2.7/dist-packages/mne/cuda.py    |  384 +++
 .../dist-packages/mne/data/FreeSurferColorLUT.txt  | 1397 ++++++++++
 .../python2.7/dist-packages/mne/data/__init__.py   |    0
 .../python2.7/dist-packages/mne/data/coil_def.dat  |  461 ++++
 .../dist-packages/mne/data/coil_def_Elekta.dat     |   70 +
 .../dist-packages/mne/data/mne_analyze.sel         |   13 +
 .../dist-packages/mne/datasets/__init__.py         |   11 +
 .../dist-packages/mne/datasets/_fake/__init__.py   |    4 +
 .../dist-packages/mne/datasets/_fake/_fake.py      |   25 +
 .../mne/datasets/brainstorm/__init__.py            |    4 +
 .../mne/datasets/brainstorm/bst_auditory.py        |   60 +
 .../mne/datasets/brainstorm/bst_raw.py             |   59 +
 .../mne/datasets/brainstorm/bst_resting.py         |   51 +
 .../dist-packages/mne/datasets/eegbci/__init__.py  |    4 +
 .../dist-packages/mne/datasets/eegbci/eegbci.py    |  163 ++
 .../dist-packages/mne/datasets/megsim/__init__.py  |    4 +
 .../dist-packages/mne/datasets/megsim/megsim.py    |  166 ++
 .../dist-packages/mne/datasets/megsim/urls.py      |  172 ++
 .../dist-packages/mne/datasets/sample/__init__.py  |    5 +
 .../dist-packages/mne/datasets/sample/sample.py    |   42 +
 .../dist-packages/mne/datasets/somato/__init__.py  |    4 +
 .../dist-packages/mne/datasets/somato/somato.py    |   29 +
 .../mne/datasets/spm_face/__init__.py              |    4 +
 .../mne/datasets/spm_face/spm_data.py              |   28 +
 .../dist-packages/mne/datasets/testing/__init__.py |    4 +
 .../dist-packages/mne/datasets/testing/_testing.py |   47 +
 .../dist-packages/mne/datasets/tests/__init__.py   |    0
 .../mne/datasets/tests/test_datasets.py            |   46 +
 .../python2.7/dist-packages/mne/datasets/utils.py  |  329 +++
 .../dist-packages/mne/decoding/__init__.py         |    7 +
 .../python2.7/dist-packages/mne/decoding/base.py   |  622 +++++
 .../python2.7/dist-packages/mne/decoding/csp.py    |  467 ++++
 .../python2.7/dist-packages/mne/decoding/ems.py    |  117 +
 .../python2.7/dist-packages/mne/decoding/mixin.py  |   30 +
 .../dist-packages/mne/decoding/tests/__init__.py   |    0
 .../dist-packages/mne/decoding/tests/test_csp.py   |  108 +
 .../dist-packages/mne/decoding/tests/test_ems.py   |   56 +
 .../mne/decoding/tests/test_time_gen.py            |  309 +++
 .../mne/decoding/tests/test_transformer.py         |  162 ++
 .../dist-packages/mne/decoding/time_gen.py         | 1287 +++++++++
 .../dist-packages/mne/decoding/transformer.py      |  536 ++++
 .../lib/python2.7/dist-packages/mne/defaults.py    |   54 +
 .../usr/lib/python2.7/dist-packages/mne/dipole.py  |  720 +++++
 .../usr/lib/python2.7/dist-packages/mne/epochs.py  | 2602 ++++++++++++++++++
 .../usr/lib/python2.7/dist-packages/mne/event.py   |  775 ++++++
 .../usr/lib/python2.7/dist-packages/mne/evoked.py  | 1284 +++++++++
 .../dist-packages/mne/externals/FieldTrip.py       |  508 ++++
 .../dist-packages/mne/externals/__init__.py        |    5 +
 .../dist-packages/mne/externals/decorator.py       |  253 ++
 .../dist-packages/mne/externals/h5io/__init__.py   |    6 +
 .../dist-packages/mne/externals/h5io/_h5io.py      |  297 ++
 .../python2.7/dist-packages/mne/externals/jdcal.py |  116 +
 .../python2.7/dist-packages/mne/externals/six.py   |  577 ++++
 .../mne/externals/tempita/__init__.py              | 1303 +++++++++
 .../dist-packages/mne/externals/tempita/_looper.py |  163 ++
 .../dist-packages/mne/externals/tempita/compat3.py |   45 +
 .../usr/lib/python2.7/dist-packages/mne/filter.py  | 1571 +++++++++++
 .../usr/lib/python2.7/dist-packages/mne/fixes.py   |  888 ++++++
 .../dist-packages/mne/forward/__init__.py          |   19 +
 .../dist-packages/mne/forward/_compute_forward.py  |  863 ++++++
 .../mne/forward/_field_interpolation.py            |  413 +++
 .../dist-packages/mne/forward/_lead_dots.py        |  521 ++++
 .../dist-packages/mne/forward/_make_forward.py     |  584 ++++
 .../python2.7/dist-packages/mne/forward/forward.py | 1670 ++++++++++++
 .../dist-packages/mne/forward/tests/__init__.py    |    0
 .../mne/forward/tests/test_field_interpolation.py  |  223 ++
 .../mne/forward/tests/test_forward.py              |  331 +++
 .../mne/forward/tests/test_make_forward.py         |  356 +++
 .../python2.7/dist-packages/mne/gui/__init__.py    |  101 +
 .../python2.7/dist-packages/mne/gui/_coreg_gui.py  | 1383 ++++++++++
 .../dist-packages/mne/gui/_fiducials_gui.py        |  453 ++++
 .../dist-packages/mne/gui/_file_traits.py          |  509 ++++
 .../dist-packages/mne/gui/_kit2fiff_gui.py         |  508 ++++
 .../python2.7/dist-packages/mne/gui/_marker_gui.py |  435 +++
 .../lib/python2.7/dist-packages/mne/gui/_viewer.py |  331 +++
 .../dist-packages/mne/gui/tests/__init__.py        |    0
 .../dist-packages/mne/gui/tests/test_coreg_gui.py  |  187 ++
 .../mne/gui/tests/test_fiducials_gui.py            |   67 +
 .../mne/gui/tests/test_file_traits.py              |  104 +
 .../mne/gui/tests/test_kit2fiff_gui.py             |  106 +
 .../dist-packages/mne/gui/tests/test_marker_gui.py |   83 +
 .../dist-packages/mne/html/bootstrap.min.css       |    7 +
 .../dist-packages/mne/html/bootstrap.min.js        |    7 +
 .../python2.7/dist-packages/mne/html/d3.v3.min.js  |    1 +
 .../dist-packages/mne/html/jquery-1.10.2.min.js    |    1 +
 .../dist-packages/mne/html/jquery-ui.min.css       |    6 +
 .../dist-packages/mne/html/jquery-ui.min.js        |    1 +
 .../dist-packages/mne/inverse_sparse/__init__.py   |    8 +
 .../dist-packages/mne/inverse_sparse/_gamma_map.py |  301 +++
 .../mne/inverse_sparse/mxne_debiasing.py           |  135 +
 .../mne/inverse_sparse/mxne_inverse.py             |  531 ++++
 .../dist-packages/mne/inverse_sparse/mxne_optim.py | 1046 +++++++
 .../mne/inverse_sparse/tests/__init__.py           |    0
 .../mne/inverse_sparse/tests/test_gamma_map.py     |   64 +
 .../inverse_sparse/tests/test_mxne_debiasing.py    |   22 +
 .../mne/inverse_sparse/tests/test_mxne_inverse.py  |  114 +
 .../mne/inverse_sparse/tests/test_mxne_optim.py    |  196 ++
 .../lib/python2.7/dist-packages/mne/io/__init__.py |   85 +
 .../dist-packages/mne/io/array/__init__.py         |    5 +
 .../python2.7/dist-packages/mne/io/array/array.py  |   50 +
 .../dist-packages/mne/io/array/tests/__init__.py   |    0
 .../dist-packages/mne/io/array/tests/test_array.py |  114 +
 .../usr/lib/python2.7/dist-packages/mne/io/base.py | 2218 +++++++++++++++
 .../dist-packages/mne/io/brainvision/__init__.py   |    7 +
 .../mne/io/brainvision/brainvision.py              |  512 ++++
 .../mne/io/brainvision/tests/__init__.py           |    1 +
 .../mne/io/brainvision/tests/test_brainvision.py   |  207 ++
 .../python2.7/dist-packages/mne/io/bti/__init__.py |    5 +
 .../lib/python2.7/dist-packages/mne/io/bti/bti.py  | 1365 ++++++++++
 .../dist-packages/mne/io/bti/constants.py          |   99 +
 .../lib/python2.7/dist-packages/mne/io/bti/read.py |  120 +
 .../dist-packages/mne/io/bti/tests/__init__.py     |    0
 .../dist-packages/mne/io/bti/tests/test_bti.py     |  258 ++
 .../python2.7/dist-packages/mne/io/compensator.py  |  160 ++
 .../python2.7/dist-packages/mne/io/constants.py    |  797 ++++++
 .../usr/lib/python2.7/dist-packages/mne/io/ctf.py  |  256 ++
 .../usr/lib/python2.7/dist-packages/mne/io/diff.py |   39 +
 .../python2.7/dist-packages/mne/io/edf/__init__.py |    7 +
 .../lib/python2.7/dist-packages/mne/io/edf/edf.py  |  628 +++++
 .../dist-packages/mne/io/edf/tests/__init__.py     |    0
 .../dist-packages/mne/io/edf/tests/test_edf.py     |  275 ++
 .../python2.7/dist-packages/mne/io/egi/__init__.py |    5 +
 .../lib/python2.7/dist-packages/mne/io/egi/egi.py  |  330 +++
 .../dist-packages/mne/io/egi/tests/__init__.py     |    0
 .../dist-packages/mne/io/egi/tests/test_egi.py     |   82 +
 .../dist-packages/mne/io/fiff/__init__.py          |    2 +
 .../lib/python2.7/dist-packages/mne/io/fiff/raw.py |  487 ++++
 .../dist-packages/mne/io/fiff/tests/__init__.py    |    0
 .../dist-packages/mne/io/fiff/tests/test_raw.py    | 1188 ++++++++
 .../python2.7/dist-packages/mne/io/kit/__init__.py |    8 +
 .../dist-packages/mne/io/kit/constants.py          |   99 +
 .../python2.7/dist-packages/mne/io/kit/coreg.py    |   87 +
 .../lib/python2.7/dist-packages/mne/io/kit/kit.py  |  827 ++++++
 .../dist-packages/mne/io/kit/tests/__init__.py     |    3 +
 .../dist-packages/mne/io/kit/tests/test_coreg.py   |   30 +
 .../dist-packages/mne/io/kit/tests/test_kit.py     |  163 ++
 .../lib/python2.7/dist-packages/mne/io/matrix.py   |  130 +
 .../python2.7/dist-packages/mne/io/meas_info.py    | 1408 ++++++++++
 .../usr/lib/python2.7/dist-packages/mne/io/open.py |  251 ++
 .../usr/lib/python2.7/dist-packages/mne/io/pick.py |  623 +++++
 .../python2.7/dist-packages/mne/io/proc_history.py |  290 ++
 .../usr/lib/python2.7/dist-packages/mne/io/proj.py |  723 +++++
 .../python2.7/dist-packages/mne/io/reference.py    |  387 +++
 .../usr/lib/python2.7/dist-packages/mne/io/tag.py  |  518 ++++
 .../dist-packages/mne/io/tests/__init__.py         |    3 +
 .../mne/io/tests/test_apply_function.py            |   58 +
 .../dist-packages/mne/io/tests/test_compensator.py |   72 +
 .../dist-packages/mne/io/tests/test_meas_info.py   |  211 ++
 .../dist-packages/mne/io/tests/test_pick.py        |  186 ++
 .../mne/io/tests/test_proc_history.py              |   47 +
 .../dist-packages/mne/io/tests/test_raw.py         |   51 +
 .../dist-packages/mne/io/tests/test_reference.py   |  307 +++
 .../usr/lib/python2.7/dist-packages/mne/io/tree.py |  158 ++
 .../lib/python2.7/dist-packages/mne/io/write.py    |  395 +++
 .../usr/lib/python2.7/dist-packages/mne/label.py   | 1961 ++++++++++++++
 .../dist-packages/mne/minimum_norm/__init__.py     |   10 +
 .../dist-packages/mne/minimum_norm/inverse.py      | 1576 +++++++++++
 .../dist-packages/mne/minimum_norm/psf_ctf.py      |  436 +++
 .../mne/minimum_norm/tests/__init__.py             |    0
 .../mne/minimum_norm/tests/test_inverse.py         |  621 +++++
 .../mne/minimum_norm/tests/test_psf_ctf.py         |   81 +
 .../mne/minimum_norm/tests/test_snr.py             |   42 +
 .../mne/minimum_norm/tests/test_time_frequency.py  |  200 ++
 .../mne/minimum_norm/time_frequency.py             |  688 +++++
 .../usr/lib/python2.7/dist-packages/mne/misc.py    |  108 +
 .../lib/python2.7/dist-packages/mne/parallel.py    |  148 +
 .../dist-packages/mne/preprocessing/__init__.py    |   19 +
 .../dist-packages/mne/preprocessing/bads.py        |   40 +
 .../dist-packages/mne/preprocessing/ctps_.py       |  169 ++
 .../dist-packages/mne/preprocessing/ecg.py         |  317 +++
 .../dist-packages/mne/preprocessing/eog.py         |  208 ++
 .../dist-packages/mne/preprocessing/ica.py         | 2453 +++++++++++++++++
 .../dist-packages/mne/preprocessing/infomax_.py    |  315 +++
 .../dist-packages/mne/preprocessing/maxfilter.py   |  227 ++
 .../dist-packages/mne/preprocessing/maxwell.py     |  644 +++++
 .../dist-packages/mne/preprocessing/peak_finder.py |  168 ++
 .../dist-packages/mne/preprocessing/ssp.py         |  396 +++
 .../dist-packages/mne/preprocessing/stim.py        |  130 +
 .../mne/preprocessing/tests/__init__.py            |    0
 .../mne/preprocessing/tests/test_ctps.py           |   84 +
 .../mne/preprocessing/tests/test_ecg.py            |   24 +
 .../mne/preprocessing/tests/test_eeglab_infomax.py |  204 ++
 .../mne/preprocessing/tests/test_eog.py            |   18 +
 .../mne/preprocessing/tests/test_ica.py            |  592 ++++
 .../mne/preprocessing/tests/test_infomax.py        |  179 ++
 .../mne/preprocessing/tests/test_maxwell.py        |  256 ++
 .../mne/preprocessing/tests/test_peak_finder.py    |   10 +
 .../mne/preprocessing/tests/test_ssp.py            |  103 +
 .../mne/preprocessing/tests/test_stim.py           |   96 +
 .../mne/preprocessing/tests/test_xdawn.py          |  145 +
 .../dist-packages/mne/preprocessing/xdawn.py       |  484 ++++
 .../usr/lib/python2.7/dist-packages/mne/proj.py    |  396 +++
 .../dist-packages/mne/realtime/__init__.py         |   14 +
 .../python2.7/dist-packages/mne/realtime/client.py |  375 +++
 .../python2.7/dist-packages/mne/realtime/epochs.py |  420 +++
 .../dist-packages/mne/realtime/fieldtrip_client.py |  351 +++
 .../dist-packages/mne/realtime/mockclient.py       |  190 ++
 .../mne/realtime/stim_server_client.py             |  314 +++
 .../dist-packages/mne/realtime/tests/__init__.py   |    0
 .../mne/realtime/tests/test_fieldtrip_client.py    |   89 +
 .../mne/realtime/tests/test_mockclient.py          |  144 +
 .../mne/realtime/tests/test_stim_client_server.py  |   84 +
 .../usr/lib/python2.7/dist-packages/mne/report.py  | 1824 +++++++++++++
 .../lib/python2.7/dist-packages/mne/selection.py   |  111 +
 .../dist-packages/mne/simulation/__init__.py       |    9 +
 .../dist-packages/mne/simulation/evoked.py         |  214 ++
 .../dist-packages/mne/simulation/metrics.py        |   68 +
 .../python2.7/dist-packages/mne/simulation/raw.py  |  569 ++++
 .../dist-packages/mne/simulation/source.py         |  329 +++
 .../dist-packages/mne/simulation/tests/__init__.py |    0
 .../mne/simulation/tests/test_evoked.py            |   75 +
 .../mne/simulation/tests/test_metrics.py           |   52 +
 .../dist-packages/mne/simulation/tests/test_raw.py |  248 ++
 .../mne/simulation/tests/test_source.py            |  201 ++
 .../python2.7/dist-packages/mne/source_estimate.py | 2856 ++++++++++++++++++++
 .../python2.7/dist-packages/mne/source_space.py    | 2584 ++++++++++++++++++
 .../python2.7/dist-packages/mne/stats/__init__.py  |   14 +
 .../dist-packages/mne/stats/cluster_level.py       | 1555 +++++++++++
 .../dist-packages/mne/stats/multi_comp.py          |  102 +
 .../dist-packages/mne/stats/parametric.py          |  357 +++
 .../dist-packages/mne/stats/permutations.py        |  152 ++
 .../dist-packages/mne/stats/regression.py          |  337 +++
 .../dist-packages/mne/stats/tests/__init__.py      |    0
 .../mne/stats/tests/test_cluster_level.py          |  479 ++++
 .../mne/stats/tests/test_multi_comp.py             |   47 +
 .../mne/stats/tests/test_parametric.py             |  111 +
 .../mne/stats/tests/test_permutations.py           |   33 +
 .../mne/stats/tests/test_regression.py             |  110 +
 .../usr/lib/python2.7/dist-packages/mne/surface.py | 1113 ++++++++
 .../python2.7/dist-packages/mne/tests/__init__.py  |    0
 .../python2.7/dist-packages/mne/tests/test_bem.py  |  264 ++
 .../python2.7/dist-packages/mne/tests/test_chpi.py |  168 ++
 .../dist-packages/mne/tests/test_coreg.py          |  174 ++
 .../python2.7/dist-packages/mne/tests/test_cov.py  |  464 ++++
 .../dist-packages/mne/tests/test_defaults.py       |   22 +
 .../dist-packages/mne/tests/test_dipole.py         |  256 ++
 .../mne/tests/test_docstring_parameters.py         |  160 ++
 .../dist-packages/mne/tests/test_epochs.py         | 1793 ++++++++++++
 .../dist-packages/mne/tests/test_event.py          |  339 +++
 .../dist-packages/mne/tests/test_evoked.py         |  483 ++++
 .../dist-packages/mne/tests/test_filter.py         |  379 +++
 .../dist-packages/mne/tests/test_fixes.py          |  194 ++
 .../dist-packages/mne/tests/test_import_nesting.py |   53 +
 .../dist-packages/mne/tests/test_label.py          |  749 +++++
 .../python2.7/dist-packages/mne/tests/test_misc.py |   14 +
 .../python2.7/dist-packages/mne/tests/test_proj.py |  278 ++
 .../dist-packages/mne/tests/test_report.py         |  266 ++
 .../dist-packages/mne/tests/test_selection.py      |   27 +
 .../mne/tests/test_source_estimate.py              |  700 +++++
 .../dist-packages/mne/tests/test_source_space.py   |  687 +++++
 .../dist-packages/mne/tests/test_surface.py        |  165 ++
 .../dist-packages/mne/tests/test_transforms.py     |  198 ++
 .../dist-packages/mne/tests/test_utils.py          |  516 ++++
 .../dist-packages/mne/time_frequency/__init__.py   |   11 +
 .../dist-packages/mne/time_frequency/_stockwell.py |  255 ++
 .../dist-packages/mne/time_frequency/ar.py         |  165 ++
 .../dist-packages/mne/time_frequency/csd.py        |  258 ++
 .../dist-packages/mne/time_frequency/multitaper.py |  554 ++++
 .../dist-packages/mne/time_frequency/psd.py        |  199 ++
 .../dist-packages/mne/time_frequency/stft.py       |  237 ++
 .../mne/time_frequency/tests/__init__.py           |    0
 .../mne/time_frequency/tests/test_ar.py            |   38 +
 .../mne/time_frequency/tests/test_csd.py           |  163 ++
 .../mne/time_frequency/tests/test_multitaper.py    |   55 +
 .../mne/time_frequency/tests/test_psd.py           |  157 ++
 .../mne/time_frequency/tests/test_stft.py          |   60 +
 .../mne/time_frequency/tests/test_stockwell.py     |   96 +
 .../mne/time_frequency/tests/test_tfr.py           |  324 +++
 .../dist-packages/mne/time_frequency/tfr.py        | 1376 ++++++++++
 .../lib/python2.7/dist-packages/mne/transforms.py  |  689 +++++
 .../usr/lib/python2.7/dist-packages/mne/utils.py   | 1892 +++++++++++++
 .../usr/lib/python2.7/dist-packages/mne/viz/_3d.py |  925 +++++++
 .../python2.7/dist-packages/mne/viz/__init__.py    |   24 +
 .../lib/python2.7/dist-packages/mne/viz/circle.py  |  414 +++
 .../python2.7/dist-packages/mne/viz/decoding.py    |  236 ++
 .../lib/python2.7/dist-packages/mne/viz/epochs.py  | 1517 +++++++++++
 .../lib/python2.7/dist-packages/mne/viz/evoked.py  |  809 ++++++
 .../usr/lib/python2.7/dist-packages/mne/viz/ica.py |  761 ++++++
 .../lib/python2.7/dist-packages/mne/viz/misc.py    |  580 ++++
 .../lib/python2.7/dist-packages/mne/viz/montage.py |   58 +
 .../usr/lib/python2.7/dist-packages/mne/viz/raw.py |  672 +++++
 .../dist-packages/mne/viz/tests/__init__.py        |    0
 .../dist-packages/mne/viz/tests/test_3d.py         |  194 ++
 .../dist-packages/mne/viz/tests/test_circle.py     |   94 +
 .../dist-packages/mne/viz/tests/test_decoding.py   |  124 +
 .../dist-packages/mne/viz/tests/test_epochs.py     |  171 ++
 .../dist-packages/mne/viz/tests/test_evoked.py     |  137 +
 .../dist-packages/mne/viz/tests/test_ica.py        |  200 ++
 .../dist-packages/mne/viz/tests/test_misc.py       |  135 +
 .../dist-packages/mne/viz/tests/test_montage.py    |   30 +
 .../dist-packages/mne/viz/tests/test_raw.py        |  125 +
 .../dist-packages/mne/viz/tests/test_topo.py       |  137 +
 .../dist-packages/mne/viz/tests/test_topomap.py    |  258 ++
 .../dist-packages/mne/viz/tests/test_utils.py      |   87 +
 .../lib/python2.7/dist-packages/mne/viz/topo.py    |  622 +++++
 .../lib/python2.7/dist-packages/mne/viz/topomap.py | 1622 +++++++++++
 .../lib/python2.7/dist-packages/mne/viz/utils.py   |  844 ++++++
 .../usr/share/doc/python-mne/AUTHORS.rst           |   19 +
 .../usr/share/doc/python-mne/README.rst.gz         |  Bin 0 -> 2409 bytes
 .../usr/share/doc/python-mne/changelog.Debian.gz   |  Bin 0 -> 797 bytes
 .../python-mne/usr/share/doc/python-mne/copyright  |  150 +
 debian/python-mne/usr/share/man/man1/mne.1.gz      |  Bin 0 -> 328 bytes
 debian/rules                                       |    1 +
 424 files changed, 122628 insertions(+), 59 deletions(-)

diff --git a/debian/changelog b/debian/changelog
index 1d6255e..c1e917d 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,12 @@
+python-mne (0.10+dfsg-2) UNRELEASED; urgency=medium
+
+  * NMU
+  * New upstream version
+  * Removed depedency on mayavi2
+  * Added patch
+
+ -- Jaakko Leppakangas <jaeilepp at student.jyu.fi>  Mon, 23 Nov 2015 04:17:53 -0500
+
 python-mne (0.8.6+dfsg-1) unstable; urgency=low
 
   * New upstream version
diff --git a/debian/control b/debian/control
index 19c0170..0746e7b 100644
--- a/debian/control
+++ b/debian/control
@@ -23,8 +23,7 @@ Build-Depends: debhelper (>= 9),
                libjs-jquery,
                libjs-jquery-ui,
                yui-compressor,
-               mayavi2,
-Standards-Version: 3.9.5
+Standards-Version: 3.9.6
 Vcs-Browser: http://anonscm.debian.org/gitweb/?p=debian-med/python-mne.git
 Vcs-Git: git://anonscm.debian.org/debian-med/python-mne.git
 Homepage: http://martinos.org/mne
diff --git a/debian/files b/debian/files
new file mode 100644
index 0000000..f9625d1
--- /dev/null
+++ b/debian/files
@@ -0,0 +1 @@
+python-mne_0.10+dfsg-1.1_all.deb python optional
diff --git a/debian/patches/debian_paths b/debian/patches/debian_paths
deleted file mode 100644
index 1f73199..0000000
--- a/debian/patches/debian_paths
+++ /dev/null
@@ -1,56 +0,0 @@
-From: Michael Hanke <michael.hanke at gmail.com>
-Subject: Debian path config for ext. depdendencies
-
-This should deal with the issue of minified javascript libs, by using system
-libs. This does not deal with the missing bootstrap v3 library (the Debian
-package seems stuck on v2).
-
-The idea is to patch upstream to use system libs, whenever they are giving by
-their absolute path.
-
-Additional overhead comes from UTF-8 encoding handling that differs between
-Python 2x and 3x.
---- a/mne/report.py
-+++ b/mne/report.py
-@@ -29,7 +29,7 @@
- from .externals.decorator import decorator
- from .externals.tempita import HTMLTemplate, Template
- from .externals.six import BytesIO
--from .externals.six import moves
-+from .externals.six import moves, PY3
- 
- tempdir = _TempDir()
- temp_fname = op.join(tempdir, 'test')
-@@ -712,21 +712,27 @@
-         """Initialize the renderer.
-         """
- 
--        inc_fnames = ['jquery-1.10.2.min.js', 'jquery-ui.min.js',
-+        inc_fnames = ['/usr/share/javascript/jquery/jquery.min.js',
-+                      '/usr/share/javascript/jquery-ui/jquery-ui.min.js',
-                       'bootstrap.min.js', 'jquery-ui.min.css',
-                       'bootstrap.min.css']
- 
-         include = list()
-         for inc_fname in inc_fnames:
-             logger.info('Embedding : %s' % inc_fname)
--            f = open(op.join(op.dirname(__file__), 'html', inc_fname),
--                     'r')
-+            if not os.path.isabs(inc_fname):
-+                inc_fname = op.join(op.dirname(__file__), 'html', inc_fname)
-+            f = open(inc_fname, 'r')
-+            if PY3:
-+                f_contents = f.read()
-+            else:
-+                f_contents = f.read().decode('UTF-8')
-             if inc_fname.endswith('.js'):
-                 include.append(u'<script type="text/javascript">'
--                               + f.read() + u'</script>')
-+                               + f_contents + u'</script>')
-             elif inc_fname.endswith('.css'):
-                 include.append(u'<style type="text/css">'
--                               + f.read() + u'</style>')
-+                               + f_contents + u'</style>')
-             f.close()
- 
-         self.include = ''.join(include)
diff --git a/debian/patches/paths b/debian/patches/paths
new file mode 100644
index 0000000..5fd41d6
--- /dev/null
+++ b/debian/patches/paths
@@ -0,0 +1,70 @@
+Description: <short summary of the patch>
+ TODO: Put a short summary on the line above and replace this paragraph
+ with a longer explanation of this change. Complete the meta-information
+ with other relevant fields (see below for details). To make it easier, the
+ information below has been extracted from the changelog. Adjust it or drop
+ it.
+ .
+ python-mne (0.10+dfsg-1.1) UNRELEASED; urgency=medium
+ .
+   * New upstream version
+   * Removed depedency on mayavi2
+Author: Jaakko Leppakangas <jaeilepp at student.jyu.fi>
+
+---
+The information above should follow the Patch Tagging Guidelines, please
+checkout http://dep.debian.net/deps/dep3/ to learn about the format. Here
+are templates for supplementary fields that you might want to add:
+
+Origin: <vendor|upstream|other>, <url of original patch>
+Bug: <url in upstream bugtracker>
+Bug-Debian: https://bugs.debian.org/<bugnumber>
+Bug-Ubuntu: https://launchpad.net/bugs/<bugnumber>
+Forwarded: <no|not-needed|url proving that it has been forwarded>
+Reviewed-By: <name and email of someone who approved the patch>
+Last-Update: <YYYY-MM-DD>
+
+--- python-mne-0.10+dfsg.orig/mne/report.py
++++ python-mne-0.10+dfsg/mne/report.py
+@@ -32,7 +32,7 @@ from .parallel import parallel_func, che
+ 
+ from .externals.tempita import HTMLTemplate, Template
+ from .externals.six import BytesIO
+-from .externals.six import moves
++from .externals.six import moves, PY3
+ 
+ VALID_EXTENSIONS = ['raw.fif', 'raw.fif.gz', 'sss.fif', 'sss.fif.gz',
+                     '-eve.fif', '-eve.fif.gz', '-cov.fif', '-cov.fif.gz',
+@@ -1160,21 +1160,27 @@ class Report(object):
+         """Initialize the renderer.
+         """
+ 
+-        inc_fnames = ['jquery-1.10.2.min.js', 'jquery-ui.min.js',
++        inc_fnames = ['/usr/share/javascript/jquery/jquery.min.js',
++                      '/usr/share/javascript/jquery-ui/jquery-ui.min.js',
+                       'bootstrap.min.js', 'jquery-ui.min.css',
+                       'bootstrap.min.css']
+ 
+         include = list()
+         for inc_fname in inc_fnames:
+             logger.info('Embedding : %s' % inc_fname)
+-            f = open(op.join(op.dirname(__file__), 'html', inc_fname),
+-                     'r')
++            if not os.path.isabs(inc_fname):
++                inc_fname = op.join(op.dirname(__file__), 'html', inc_fname)
++            f = open(inc_fname, 'r')
++            if PY3:
++                f_contents = f.read()
++            else:
++                f_contents = f.read().decode('UTF-8')
+             if inc_fname.endswith('.js'):
+                 include.append(u'<script type="text/javascript">' +
+-                               f.read() + u'</script>')
++                               f_contents + u'</script>')
+             elif inc_fname.endswith('.css'):
+                 include.append(u'<style type="text/css">' +
+-                               f.read() + u'</style>')
++                               f_contents + u'</style>')
+             f.close()
+ 
+         self.include = ''.join(include)
diff --git a/debian/patches/series b/debian/patches/series
index 87a6838..5f4c7d5 100644
--- a/debian/patches/series
+++ b/debian/patches/series
@@ -1,2 +1,3 @@
 changeset_9c7ef6d10f1f767b742525ea31e42a65a0469327.diff
-debian_paths
+paths
+test_fix
diff --git a/debian/patches/test_fix b/debian/patches/test_fix
new file mode 100644
index 0000000..e7179c3
--- /dev/null
+++ b/debian/patches/test_fix
@@ -0,0 +1,23 @@
+Description: In case environment variable MNE_FORCE_SERIAL is set, the log
+output has 3 lines causing the test to fail.
+ .
+ python-mne (0.10+dfsg-1.1) UNRELEASED; urgency=medium
+ .
+   * New upstream version
+   * Removed depedency on mayavi2
+Author: Jaakko Leppakangas <jaeilepp at student.jyu.fi>
+
+--- python-mne-0.10+dfsg.orig/mne/tests/test_filter.py
++++ python-mne-0.10+dfsg/mne/tests/test_filter.py
+@@ -132,9 +132,9 @@ def test_notch_filters():
+             set_log_file()
+             with open(log_file) as fid:
+                 out = fid.readlines()
+-            if len(out) != 2:
++            if len(out) != 2 and len(out) != 3:  # force_serial: len(out) == 3
+                 raise ValueError('Detected frequencies not logged properly')
+-            out = np.fromstring(out[1], sep=', ')
++            out = np.fromstring(out[-1], sep=', ')
+             assert_array_almost_equal(out, freqs)
+         new_power = np.sqrt(sum_squared(b) / b.size)
+         assert_almost_equal(new_power, orig_power, tol)
diff --git a/debian/python-mne.postinst.debhelper b/debian/python-mne.postinst.debhelper
new file mode 100644
index 0000000..751e905
--- /dev/null
+++ b/debian/python-mne.postinst.debhelper
@@ -0,0 +1,7 @@
+
+# Automatically added by dh_python2:
+if which pycompile >/dev/null 2>&1; then
+	pycompile -p python-mne 
+fi
+
+# End automatically added section
diff --git a/debian/python-mne.prerm.debhelper b/debian/python-mne.prerm.debhelper
new file mode 100644
index 0000000..daa5455
--- /dev/null
+++ b/debian/python-mne.prerm.debhelper
@@ -0,0 +1,12 @@
+
+# Automatically added by dh_python2:
+if which pyclean >/dev/null 2>&1; then
+	pyclean -p python-mne 
+else
+	dpkg -L python-mne | grep \.py$ | while read file
+	do
+		rm -f "${file}"[co] >/dev/null
+  	done
+fi
+
+# End automatically added section
diff --git a/debian/python-mne.substvars b/debian/python-mne.substvars
new file mode 100644
index 0000000..51b4534
--- /dev/null
+++ b/debian/python-mne.substvars
@@ -0,0 +1,4 @@
+python:Versions=2.7
+python:Provides=python2.7-mne
+python:Depends=python (>= 2.7), python (<< 2.8), python:any (>= 2.6.6-7~), python:any
+misc:Depends=
diff --git a/debian/python-mne/DEBIAN/control b/debian/python-mne/DEBIAN/control
new file mode 100644
index 0000000..f4bc434
--- /dev/null
+++ b/debian/python-mne/DEBIAN/control
@@ -0,0 +1,16 @@
+Package: python-mne
+Version: 0.10+dfsg-1.1
+Architecture: all
+Maintainer: Debian Med Packaging Team <debian-med-packaging at lists.alioth.debian.org>
+Installed-Size: 8876
+Depends: python (>= 2.7), python (<< 2.8), python-numpy, python-scipy, python-sklearn, python-matplotlib, python-joblib (>= 0.4.5), xvfb, xauth, libgl1-mesa-dri, help2man, libjs-jquery, libjs-jquery-ui
+Recommends: python-nose, mayavi2
+Suggests: python-dap, ipython
+Provides: python2.7-mne
+Section: python
+Priority: optional
+Homepage: http://martinos.org/mne
+Description: Python modules for MEG and EEG data analysis
+ This package is designed for sensor- and source-space analysis of MEG
+ and EEG data, including frequency-domain and time-frequency analyses
+ and non-parametric statistics.
diff --git a/debian/python-mne/DEBIAN/md5sums b/debian/python-mne/DEBIAN/md5sums
new file mode 100644
index 0000000..9d0d01a
--- /dev/null
+++ b/debian/python-mne/DEBIAN/md5sums
@@ -0,0 +1,420 @@
+9746cdbdfb33978b3ff84834233e5d54  usr/bin/mne
+aebf2c764f2799e40c78ee38b078b76d  usr/lib/python2.7/dist-packages/mne-0.10.dev0.egg-info/PKG-INFO
+a092c136fe2fc1c10c5d1b33e046949f  usr/lib/python2.7/dist-packages/mne-0.10.dev0.egg-info/SOURCES.txt
+68b329da9893e34099c7d8ad5cb9c940  usr/lib/python2.7/dist-packages/mne-0.10.dev0.egg-info/dependency_links.txt
+68b329da9893e34099c7d8ad5cb9c940  usr/lib/python2.7/dist-packages/mne-0.10.dev0.egg-info/not-zip-safe
+7d43895d9727af393f9b8148144d84e1  usr/lib/python2.7/dist-packages/mne-0.10.dev0.egg-info/top_level.txt
+4e5affc3591891a793a7b1796b0a9d7b  usr/lib/python2.7/dist-packages/mne/__init__.py
+5c817df96c6ea2e4259eed3c94f8ab33  usr/lib/python2.7/dist-packages/mne/baseline.py
+db6a30a28cc8cd8a2b7e29119832049e  usr/lib/python2.7/dist-packages/mne/beamformer/__init__.py
+2bcccf90e246b50ac9cd0aefceeb78bc  usr/lib/python2.7/dist-packages/mne/beamformer/_dics.py
+7297291fd44db856372a4a889b88a0ec  usr/lib/python2.7/dist-packages/mne/beamformer/_lcmv.py
+e89a371b2e8fcc9a1c546807eb9cdf03  usr/lib/python2.7/dist-packages/mne/beamformer/_rap_music.py
+d41d8cd98f00b204e9800998ecf8427e  usr/lib/python2.7/dist-packages/mne/beamformer/tests/__init__.py
+0eb6270a49c44d228509678458dbafad  usr/lib/python2.7/dist-packages/mne/beamformer/tests/test_dics.py
+01e9603ee21efeb939e875c926c33506  usr/lib/python2.7/dist-packages/mne/beamformer/tests/test_lcmv.py
+c1932785d2eeb49969316967cd91338e  usr/lib/python2.7/dist-packages/mne/beamformer/tests/test_rap_music.py
+e073eb7cfbb804576ce32de2e0aa918e  usr/lib/python2.7/dist-packages/mne/bem.py
+dbd759fd09f8e2c9f9fe9600358d03f3  usr/lib/python2.7/dist-packages/mne/channels/__init__.py
+2959b8dedb550479257faebb409cac80  usr/lib/python2.7/dist-packages/mne/channels/channels.py
+455dc1a6e492c05a12145eb9de1c0bbd  usr/lib/python2.7/dist-packages/mne/channels/data/layouts/CTF-275.lout
+331de625f1a44aa35a31619e23f1dbad  usr/lib/python2.7/dist-packages/mne/channels/data/layouts/CTF151.lay
+ab7b69358891bb304336c2b6d427e430  usr/lib/python2.7/dist-packages/mne/channels/data/layouts/CTF275.lay
+dd09a84651dbe0f83bfda6dd5ab902dc  usr/lib/python2.7/dist-packages/mne/channels/data/layouts/EEG1005.lay
+d8fcb537486d111666b150299687f095  usr/lib/python2.7/dist-packages/mne/channels/data/layouts/EGI256.lout
+889924acb1e3c11cced2f5710ebc69fa  usr/lib/python2.7/dist-packages/mne/channels/data/layouts/KIT-157.lout
+1a1500b5da8c1edf3d32563ebeb1aead  usr/lib/python2.7/dist-packages/mne/channels/data/layouts/KIT-AD.lout
+3413b7060da1a3eccd22d9c71e6ec652  usr/lib/python2.7/dist-packages/mne/channels/data/layouts/Vectorview-all.lout
+b69243b9a7c3cee10dcda9f97bd03167  usr/lib/python2.7/dist-packages/mne/channels/data/layouts/Vectorview-grad.lout
+f5cd8e5b74bc26d32b82e6c74f40e4f7  usr/lib/python2.7/dist-packages/mne/channels/data/layouts/Vectorview-mag.lout
+4f9e2f4cf147570ec2cf89e4e30660c0  usr/lib/python2.7/dist-packages/mne/channels/data/layouts/biosemi.lay
+d173d92a72cbfe6119bf1ac1c81ad4db  usr/lib/python2.7/dist-packages/mne/channels/data/layouts/magnesWH3600.lout
+51c0ebc722985ee48c4c950897c69a99  usr/lib/python2.7/dist-packages/mne/channels/data/montages/GSN-HydroCel-128.sfp
+0108ad1ccbbca619db821ddaa33158cb  usr/lib/python2.7/dist-packages/mne/channels/data/montages/GSN-HydroCel-129.sfp
+b970efdca9ae25bcaf6c87bc212f7c87  usr/lib/python2.7/dist-packages/mne/channels/data/montages/GSN-HydroCel-256.sfp
+1efc231ec4901d23dc0c04ab01e2b384  usr/lib/python2.7/dist-packages/mne/channels/data/montages/GSN-HydroCel-257.sfp
+a86f5ac35958a66db7da123545c92956  usr/lib/python2.7/dist-packages/mne/channels/data/montages/GSN-HydroCel-32.sfp
+8b32e50ebca17223c7f4c807cc82664a  usr/lib/python2.7/dist-packages/mne/channels/data/montages/GSN-HydroCel-64_1.0.sfp
+923610bd3d3764b934e8f7143165a153  usr/lib/python2.7/dist-packages/mne/channels/data/montages/GSN-HydroCel-65_1.0.sfp
+ae8f1ef390401bfd79bca9808c9121d8  usr/lib/python2.7/dist-packages/mne/channels/data/montages/biosemi128.txt
+471bb43b06665a9cc5a6d7ad48432d15  usr/lib/python2.7/dist-packages/mne/channels/data/montages/biosemi16.txt
+808d9a68097fef32a50eeafb217a491f  usr/lib/python2.7/dist-packages/mne/channels/data/montages/biosemi160.txt
+1db6750e83797856a1f761e220597b39  usr/lib/python2.7/dist-packages/mne/channels/data/montages/biosemi256.txt
+15e7d28c9bce784fb30c06446506ff5c  usr/lib/python2.7/dist-packages/mne/channels/data/montages/biosemi32.txt
+c744c16aca3a039b16a18638e95de9f8  usr/lib/python2.7/dist-packages/mne/channels/data/montages/biosemi64.txt
+8eb2ea592c2f94e19fde2c9bf97cff99  usr/lib/python2.7/dist-packages/mne/channels/data/montages/easycap-M1.txt
+e68a8ec4feff2e9192b90c13a9a7c6e5  usr/lib/python2.7/dist-packages/mne/channels/data/montages/easycap-M10.txt
+c76c30b155e952029f64c83f8836bf05  usr/lib/python2.7/dist-packages/mne/channels/data/montages/standard_1005.elc
+cc6837af23f426d7d0c1d250a7f9fe52  usr/lib/python2.7/dist-packages/mne/channels/data/montages/standard_1020.elc
+fc54b431996a44cc2818e8d4a7633c50  usr/lib/python2.7/dist-packages/mne/channels/data/montages/standard_alphabetic.elc
+e7da1a865dc9b9742c895db003c5a775  usr/lib/python2.7/dist-packages/mne/channels/data/montages/standard_postfixed.elc
+a1b958e624c632a7a091ca2783ca85b2  usr/lib/python2.7/dist-packages/mne/channels/data/montages/standard_prefixed.elc
+b33e8e0d26ff12cb81358d9e23ab9b51  usr/lib/python2.7/dist-packages/mne/channels/data/montages/standard_primed.elc
+4c7d88381911ede6fbb54c6b40851232  usr/lib/python2.7/dist-packages/mne/channels/data/neighbors/KIT-157_neighb.mat
+49408af71bdaa6555f6510194a963b46  usr/lib/python2.7/dist-packages/mne/channels/data/neighbors/KIT-208_neighb.mat
+fc000aea7e65dd6211c289a7dcdab2fe  usr/lib/python2.7/dist-packages/mne/channels/data/neighbors/biosemi16_neighb.mat
+138143925863c05f287d3f9dd2c96eff  usr/lib/python2.7/dist-packages/mne/channels/data/neighbors/biosemi32_neighb.mat
+2903c332c12cf83a0a039a66849c5e6c  usr/lib/python2.7/dist-packages/mne/channels/data/neighbors/biosemi64_neighb.mat
+c23c9e32f703bff290c663778b4e7a53  usr/lib/python2.7/dist-packages/mne/channels/data/neighbors/bti148_neighb.mat
+a8e9cf300dec7edb41c723fe7eda09e5  usr/lib/python2.7/dist-packages/mne/channels/data/neighbors/bti248_neighb.mat
+d267c6f214a31793acbbd47de559621d  usr/lib/python2.7/dist-packages/mne/channels/data/neighbors/bti248grad_neighb.mat
+8a6a2d2a3dbe7ffd77555fa99a6301e5  usr/lib/python2.7/dist-packages/mne/channels/data/neighbors/ctf151_neighb.mat
+e79beb3325cc18d89997de5598b37958  usr/lib/python2.7/dist-packages/mne/channels/data/neighbors/ctf275_neighb.mat
+ff1e6060a2d0967cbd2696b52ba5198f  usr/lib/python2.7/dist-packages/mne/channels/data/neighbors/ctf64_neighb.mat
+cdf338387927076955388e18e5e1d63e  usr/lib/python2.7/dist-packages/mne/channels/data/neighbors/easycap128ch-avg_neighb.mat
+9459d1653f5d5f47814c6259a50a1daa  usr/lib/python2.7/dist-packages/mne/channels/data/neighbors/easycap32ch-avg_neighb.mat
+620dc591710c7bc882c4c234ad1c7ac2  usr/lib/python2.7/dist-packages/mne/channels/data/neighbors/easycap64ch-avg_neighb.mat
+03416c3e71666d16627a9baa1ac65410  usr/lib/python2.7/dist-packages/mne/channels/data/neighbors/easycapM11_neighb.mat
+a7fc7cb76c581c0a1194cfc5b40bae13  usr/lib/python2.7/dist-packages/mne/channels/data/neighbors/easycapM14_neighb.mat
+ee83d9f6f6fa9f51e0b4ca0bf7c902d4  usr/lib/python2.7/dist-packages/mne/channels/data/neighbors/easycapM15_neighb.mat
+83cf15a44b1c6544a81e0cde4da545e8  usr/lib/python2.7/dist-packages/mne/channels/data/neighbors/easycapM1_neighb.mat
+6b5e81fc470892f0be3c2399f1ba6765  usr/lib/python2.7/dist-packages/mne/channels/data/neighbors/neuromag122_neighb.mat
+c0fa93b2f22ab999e6dc11aced2c3ba4  usr/lib/python2.7/dist-packages/mne/channels/data/neighbors/neuromag306mag_neighb.mat
+774455db0c4dee25cea97ace87b86124  usr/lib/python2.7/dist-packages/mne/channels/data/neighbors/neuromag306planar_neighb.mat
+bf447d95de73d0412954b77eb5b805e5  usr/lib/python2.7/dist-packages/mne/channels/interpolation.py
+3f70921d8a7e970af33ba8d4e8ab0de9  usr/lib/python2.7/dist-packages/mne/channels/layout.py
+8f33c1a60771d2f024f8f88e0dc33737  usr/lib/python2.7/dist-packages/mne/channels/montage.py
+d41d8cd98f00b204e9800998ecf8427e  usr/lib/python2.7/dist-packages/mne/channels/tests/__init__.py
+e972d09ec7ac44acdc656cbb43158920  usr/lib/python2.7/dist-packages/mne/channels/tests/test_channels.py
+557911656c0ca06935b0da35036469d3  usr/lib/python2.7/dist-packages/mne/channels/tests/test_interpolation.py
+0797ff319d2c2d7672234c928f50d3e0  usr/lib/python2.7/dist-packages/mne/channels/tests/test_layout.py
+7e5200857cbb52620a9246e5fc52c022  usr/lib/python2.7/dist-packages/mne/channels/tests/test_montage.py
+aa5db542e6e330bb5fa2a5e1b6df427b  usr/lib/python2.7/dist-packages/mne/chpi.py
+1cdee5c73237acc92ec632e8856e371c  usr/lib/python2.7/dist-packages/mne/commands/__init__.py
+5c18d7272e63466416c859b51105f10f  usr/lib/python2.7/dist-packages/mne/commands/mne_browse_raw.py
+ed3254df4aaa10863ffdabd006d90d2f  usr/lib/python2.7/dist-packages/mne/commands/mne_bti2fiff.py
+1cf3fb0878a2788768aaef5348cfd49e  usr/lib/python2.7/dist-packages/mne/commands/mne_clean_eog_ecg.py
+9cba7b8f9186fb54009f4ceca49cb188  usr/lib/python2.7/dist-packages/mne/commands/mne_compare_fiff.py
+9596a78c4ed68dd7c7251827335f47c3  usr/lib/python2.7/dist-packages/mne/commands/mne_compute_proj_ecg.py
+b3c03864b4f71c8ccd4526bb7d9e093f  usr/lib/python2.7/dist-packages/mne/commands/mne_compute_proj_eog.py
+50a03a4ce865debf0bc31fa0f70b6359  usr/lib/python2.7/dist-packages/mne/commands/mne_coreg.py
+539f122ccebc85f375f2834c8f362baa  usr/lib/python2.7/dist-packages/mne/commands/mne_flash_bem.py
+959cbf6ce36fd1bce52bd4380c252c1f  usr/lib/python2.7/dist-packages/mne/commands/mne_flash_bem_model.py
+4ac377005633dfb1f7bfcaf0048aae1a  usr/lib/python2.7/dist-packages/mne/commands/mne_freeview_bem_surfaces.py
+68ed5395f9658ab495e40a677b410090  usr/lib/python2.7/dist-packages/mne/commands/mne_kit2fiff.py
+676ec050453355c8a2407eac94872783  usr/lib/python2.7/dist-packages/mne/commands/mne_make_scalp_surfaces.py
+88f521964b6f97859c17bd254688364a  usr/lib/python2.7/dist-packages/mne/commands/mne_maxfilter.py
+955fe0e93566bc7c13ecb76a5efb25cf  usr/lib/python2.7/dist-packages/mne/commands/mne_report.py
+4c14340fdd38c9aba8037e2c4208ef9c  usr/lib/python2.7/dist-packages/mne/commands/mne_surf2bem.py
+89ed2c73947d10dd31fa7e79d6085b31  usr/lib/python2.7/dist-packages/mne/commands/mne_watershed_bem.py
+d41d8cd98f00b204e9800998ecf8427e  usr/lib/python2.7/dist-packages/mne/commands/tests/__init__.py
+0427ee1f859d8571b129d3bc894d1b90  usr/lib/python2.7/dist-packages/mne/commands/tests/test_commands.py
+84526ffbd229eef55cf86c864a4ab6ef  usr/lib/python2.7/dist-packages/mne/commands/utils.py
+b48fe652685cfcd06c5b4a79f79bb8c8  usr/lib/python2.7/dist-packages/mne/connectivity/__init__.py
+4996e81155926ff851b5c5bf3c409582  usr/lib/python2.7/dist-packages/mne/connectivity/effective.py
+6514b2ab2564b0f9f9ea17acb394017b  usr/lib/python2.7/dist-packages/mne/connectivity/spectral.py
+d41d8cd98f00b204e9800998ecf8427e  usr/lib/python2.7/dist-packages/mne/connectivity/tests/__init__.py
+3fdbbe5e01858be2b4d8a49111d46dfc  usr/lib/python2.7/dist-packages/mne/connectivity/tests/test_effective.py
+c9f23a6eb19d7b4754d04671e5da3411  usr/lib/python2.7/dist-packages/mne/connectivity/tests/test_spectral.py
+20f65c707404535acbcd64e43cb629b5  usr/lib/python2.7/dist-packages/mne/connectivity/tests/test_utils.py
+f52b9403897a62b84da88add1eac31ab  usr/lib/python2.7/dist-packages/mne/connectivity/utils.py
+8f15743baedf30d4430a6edf95d07ad8  usr/lib/python2.7/dist-packages/mne/coreg.py
+5847a6ab16086c343c7dfe00e9d400ee  usr/lib/python2.7/dist-packages/mne/cov.py
+e216548257048e4636ab26b18c6486b9  usr/lib/python2.7/dist-packages/mne/cuda.py
+106a0ad4551e0d5894c3906680702113  usr/lib/python2.7/dist-packages/mne/data/FreeSurferColorLUT.txt
+d41d8cd98f00b204e9800998ecf8427e  usr/lib/python2.7/dist-packages/mne/data/__init__.py
+13845622a8f0ccda9dc31a43cd2d551f  usr/lib/python2.7/dist-packages/mne/data/coil_def.dat
+126ea97a6b597fde32d29242e75aeea7  usr/lib/python2.7/dist-packages/mne/data/coil_def_Elekta.dat
+f25e4bd0604a4406404b5de237476767  usr/lib/python2.7/dist-packages/mne/data/helmets/122m.fif.gz
+0d03d2b181475983e5ee6c34e3b9f95c  usr/lib/python2.7/dist-packages/mne/data/helmets/306m.fif.gz
+fd124d09480607ebaa497d8118f30b8a  usr/lib/python2.7/dist-packages/mne/data/helmets/306m_rt.fif.gz
+d40b14d945042a3f03c70c4f1015c463  usr/lib/python2.7/dist-packages/mne/data/helmets/BabySQUID.fif.gz
+4e72069edc10cc7a94d2085c1b4953c2  usr/lib/python2.7/dist-packages/mne/data/helmets/CTF_275.fif.gz
+ac5ee17923dd76dca209351a36c52c97  usr/lib/python2.7/dist-packages/mne/data/helmets/KIT.fif.gz
+78e15c4d5e21c2b983cb6dc2040e80e0  usr/lib/python2.7/dist-packages/mne/data/helmets/Magnes_2500wh.fif.gz
+5574096fe34e957ef4fa03cee72e3ead  usr/lib/python2.7/dist-packages/mne/data/helmets/Magnes_3600wh.fif.gz
+1fcb2f95b353e3dca123d627034c5d6d  usr/lib/python2.7/dist-packages/mne/data/icos.fif.gz
+337b52490094cf740505954bdaa345f5  usr/lib/python2.7/dist-packages/mne/data/mne_analyze.sel
+da14ccefcfad827fbc384999d91ca5fb  usr/lib/python2.7/dist-packages/mne/datasets/__init__.py
+aa7001c12fe4ed77769acaa7da9ef7dd  usr/lib/python2.7/dist-packages/mne/datasets/_fake/__init__.py
+e50bfa8ad1d02864b589ed55815a94cc  usr/lib/python2.7/dist-packages/mne/datasets/_fake/_fake.py
+8fc71dce703e6c56b96a28da3e7812e4  usr/lib/python2.7/dist-packages/mne/datasets/brainstorm/__init__.py
+23f9d6d5c3787e22653295cde7166ec0  usr/lib/python2.7/dist-packages/mne/datasets/brainstorm/bst_auditory.py
+f08f9de168b4f79cdfd18929dcd852bf  usr/lib/python2.7/dist-packages/mne/datasets/brainstorm/bst_raw.py
+581f9a340f5d4fffa46f3f1d2a4c1daf  usr/lib/python2.7/dist-packages/mne/datasets/brainstorm/bst_resting.py
+6a38966a4508332d5cb0f78a6e5a5385  usr/lib/python2.7/dist-packages/mne/datasets/eegbci/__init__.py
+a70da3847fb62895d8c503abf5d176e4  usr/lib/python2.7/dist-packages/mne/datasets/eegbci/eegbci.py
+1473506e279c002b2cd0a53f2e869c08  usr/lib/python2.7/dist-packages/mne/datasets/megsim/__init__.py
+8cf2cfab4035a0e345b0841ff0f61304  usr/lib/python2.7/dist-packages/mne/datasets/megsim/megsim.py
+1396d9d7bb848fac94a466d713be4ea2  usr/lib/python2.7/dist-packages/mne/datasets/megsim/urls.py
+66d3b852d4280a86e3a21e2e887a8069  usr/lib/python2.7/dist-packages/mne/datasets/sample/__init__.py
+4e6b7545c2f24b0a4c698e0726bbbeee  usr/lib/python2.7/dist-packages/mne/datasets/sample/sample.py
+870859525ad65157f05f0030d4c47607  usr/lib/python2.7/dist-packages/mne/datasets/somato/__init__.py
+fbd3463f3c25a3b619b30575a7aa40a2  usr/lib/python2.7/dist-packages/mne/datasets/somato/somato.py
+7ad58ce5f069b2c22dd7c02489d4cd2f  usr/lib/python2.7/dist-packages/mne/datasets/spm_face/__init__.py
+bc3e40b45edf84aed8229a8e7a20596d  usr/lib/python2.7/dist-packages/mne/datasets/spm_face/spm_data.py
+f037cd4153683f64671c37ccc6e9b569  usr/lib/python2.7/dist-packages/mne/datasets/testing/__init__.py
+1ec51383285f09a41f03c6f3adff057f  usr/lib/python2.7/dist-packages/mne/datasets/testing/_testing.py
+d41d8cd98f00b204e9800998ecf8427e  usr/lib/python2.7/dist-packages/mne/datasets/tests/__init__.py
+14ec77ad5ee0ac03ef84cd53821936e4  usr/lib/python2.7/dist-packages/mne/datasets/tests/test_datasets.py
+17e9c0a68bb60e2351459308ffa7471f  usr/lib/python2.7/dist-packages/mne/datasets/utils.py
+ec00154ea56629844128205cf28abcd3  usr/lib/python2.7/dist-packages/mne/decoding/__init__.py
+a8501da1c293feeefa3268af33294020  usr/lib/python2.7/dist-packages/mne/decoding/base.py
+0816c9579de1c658a54601cf7238b3e8  usr/lib/python2.7/dist-packages/mne/decoding/csp.py
+db83b6fcd4f140163199dfd5d955c6ab  usr/lib/python2.7/dist-packages/mne/decoding/ems.py
+f9b6725f8174ada46f6f0443a7ad2dac  usr/lib/python2.7/dist-packages/mne/decoding/mixin.py
+d41d8cd98f00b204e9800998ecf8427e  usr/lib/python2.7/dist-packages/mne/decoding/tests/__init__.py
+a2cf9cc57852e768425ba1129714e55d  usr/lib/python2.7/dist-packages/mne/decoding/tests/test_csp.py
+fb12b90e50a99cf51704b8fd61080108  usr/lib/python2.7/dist-packages/mne/decoding/tests/test_ems.py
+16c3637f27a3fa36b2fe87e312da2463  usr/lib/python2.7/dist-packages/mne/decoding/tests/test_time_gen.py
+fe266ae0a98ca362eb0935ed64e51f72  usr/lib/python2.7/dist-packages/mne/decoding/tests/test_transformer.py
+94bf6ac8cfd85173703a615847ab6bd7  usr/lib/python2.7/dist-packages/mne/decoding/time_gen.py
+1347b71ab3c54dd60d7a2f5aaa39d4c5  usr/lib/python2.7/dist-packages/mne/decoding/transformer.py
+2668e46de9dcb790b9702d946809e2fb  usr/lib/python2.7/dist-packages/mne/defaults.py
+30a8d53d03cee7909e3db19c1c8efa90  usr/lib/python2.7/dist-packages/mne/dipole.py
+2340b1a4411fc432bcb66e1b09f79a49  usr/lib/python2.7/dist-packages/mne/epochs.py
+ecc9c493d804e8538ef658d67a82b893  usr/lib/python2.7/dist-packages/mne/event.py
+51e3bd117fc37d60fbc36382d1a47fa0  usr/lib/python2.7/dist-packages/mne/evoked.py
+af7bd1994161817f644ce041dca5b990  usr/lib/python2.7/dist-packages/mne/externals/FieldTrip.py
+ed24185366b69a201237064440c68437  usr/lib/python2.7/dist-packages/mne/externals/__init__.py
+2442d5dfb83ab784af212bf3ab355a30  usr/lib/python2.7/dist-packages/mne/externals/decorator.py
+48dcfbccff6c04efc9099645e3905bf7  usr/lib/python2.7/dist-packages/mne/externals/h5io/__init__.py
+457727c39c241b8cd6727da2244c31e1  usr/lib/python2.7/dist-packages/mne/externals/h5io/_h5io.py
+8be60bb7b2e66e4323d5fbc3e51e5a67  usr/lib/python2.7/dist-packages/mne/externals/jdcal.py
+e9ce70dae819e124a71cc1669fbe2cde  usr/lib/python2.7/dist-packages/mne/externals/six.py
+d6911f1935832fdb446b807d32f49a30  usr/lib/python2.7/dist-packages/mne/externals/tempita/__init__.py
+f4c7d264c6501bc987f270a77d52b849  usr/lib/python2.7/dist-packages/mne/externals/tempita/_looper.py
+4b87cc3de07a4f196b071a03052a2fd7  usr/lib/python2.7/dist-packages/mne/externals/tempita/compat3.py
+15e24b1bc3f6daf5280e3f9f019f44ae  usr/lib/python2.7/dist-packages/mne/filter.py
+d2e3304deaf8d1256404cddff7c32e48  usr/lib/python2.7/dist-packages/mne/fixes.py
+f873d9e313971e235d36513cfab7bf7f  usr/lib/python2.7/dist-packages/mne/forward/__init__.py
+84bee3df63fd5b331623f95f162e410e  usr/lib/python2.7/dist-packages/mne/forward/_compute_forward.py
+2fd5b22792203c8ee789e0692ecfe141  usr/lib/python2.7/dist-packages/mne/forward/_field_interpolation.py
+4842cfaeb1061361d03feb4521c1a697  usr/lib/python2.7/dist-packages/mne/forward/_lead_dots.py
+f1766c99053c2512f5a2cd144e3ee4ca  usr/lib/python2.7/dist-packages/mne/forward/_make_forward.py
+46ff6e60096804f31c8ff7b210cb2d2e  usr/lib/python2.7/dist-packages/mne/forward/forward.py
+d41d8cd98f00b204e9800998ecf8427e  usr/lib/python2.7/dist-packages/mne/forward/tests/__init__.py
+4713d66ad6fcb88a78cab6417a308671  usr/lib/python2.7/dist-packages/mne/forward/tests/test_field_interpolation.py
+25aea6fc72e99c494c89e523ea58c775  usr/lib/python2.7/dist-packages/mne/forward/tests/test_forward.py
+1f595e03d73d936d037ba4ff9e74053f  usr/lib/python2.7/dist-packages/mne/forward/tests/test_make_forward.py
+f58612f4a9c9229d9c0d69699c841bf5  usr/lib/python2.7/dist-packages/mne/gui/__init__.py
+a463156d353c846a41f972b88030bf8d  usr/lib/python2.7/dist-packages/mne/gui/_coreg_gui.py
+8d9cd6efa4c4670962b3523703dd7e4b  usr/lib/python2.7/dist-packages/mne/gui/_fiducials_gui.py
+95c503b5c35b8f0aac063828c557b777  usr/lib/python2.7/dist-packages/mne/gui/_file_traits.py
+bfb824f8e39d14b01b7f0f636a7aeebc  usr/lib/python2.7/dist-packages/mne/gui/_kit2fiff_gui.py
+31fdb8e194f97c48d680a7270c2dfdcf  usr/lib/python2.7/dist-packages/mne/gui/_marker_gui.py
+ff4c4de9d49b3c670473c977daee51a9  usr/lib/python2.7/dist-packages/mne/gui/_viewer.py
+d41d8cd98f00b204e9800998ecf8427e  usr/lib/python2.7/dist-packages/mne/gui/tests/__init__.py
+3bf36d350cfc9031ccc6652c61c7f432  usr/lib/python2.7/dist-packages/mne/gui/tests/test_coreg_gui.py
+267fa1c34910894ad4e85895be807e4a  usr/lib/python2.7/dist-packages/mne/gui/tests/test_fiducials_gui.py
+f98bc04d10f4fa79869d72bfe1e86634  usr/lib/python2.7/dist-packages/mne/gui/tests/test_file_traits.py
+549e3ad69ec75270dad38c339936cde4  usr/lib/python2.7/dist-packages/mne/gui/tests/test_kit2fiff_gui.py
+0c99cb1ce3ceb58dcb3d58c340fe9ac8  usr/lib/python2.7/dist-packages/mne/gui/tests/test_marker_gui.py
+35fc838ce584c1eb81b3bebe245442d6  usr/lib/python2.7/dist-packages/mne/html/bootstrap.min.css
+9f63f8fc1efa67daa90d5726ef82c0eb  usr/lib/python2.7/dist-packages/mne/html/bootstrap.min.js
+c325b90c891b43337b02cae3821ca54e  usr/lib/python2.7/dist-packages/mne/html/jquery-ui.min.css
+8f3a4e2a1ca8313f02016935fa95afea  usr/lib/python2.7/dist-packages/mne/inverse_sparse/__init__.py
+04a93bf7ec080160368285a7aab4314e  usr/lib/python2.7/dist-packages/mne/inverse_sparse/_gamma_map.py
+b9b9dc0da3e55bee2e24879ef1fec414  usr/lib/python2.7/dist-packages/mne/inverse_sparse/mxne_debiasing.py
+ac8f8b9ea29f2932d3987e578ccc387e  usr/lib/python2.7/dist-packages/mne/inverse_sparse/mxne_inverse.py
+951563689b81f2824781fe03bf3b93da  usr/lib/python2.7/dist-packages/mne/inverse_sparse/mxne_optim.py
+d41d8cd98f00b204e9800998ecf8427e  usr/lib/python2.7/dist-packages/mne/inverse_sparse/tests/__init__.py
+355cca4c16b9bbc3d3de2f02c2db3661  usr/lib/python2.7/dist-packages/mne/inverse_sparse/tests/test_gamma_map.py
+ec618de4df09ab6e679e2d42262da1f9  usr/lib/python2.7/dist-packages/mne/inverse_sparse/tests/test_mxne_debiasing.py
+3a9973bd54c5737146ef8a2b0d3ae0d1  usr/lib/python2.7/dist-packages/mne/inverse_sparse/tests/test_mxne_inverse.py
+525b6ebddcb323d2497e148d6afe8607  usr/lib/python2.7/dist-packages/mne/inverse_sparse/tests/test_mxne_optim.py
+229278a2d35b53b81b77df60eaddb52d  usr/lib/python2.7/dist-packages/mne/io/__init__.py
+d585bb7c7eeb385a1a64683093c3dc06  usr/lib/python2.7/dist-packages/mne/io/array/__init__.py
+d4a56083f7697f37780a941190fd0aaa  usr/lib/python2.7/dist-packages/mne/io/array/array.py
+d41d8cd98f00b204e9800998ecf8427e  usr/lib/python2.7/dist-packages/mne/io/array/tests/__init__.py
+a10f585cf9973a67bf6208c068ca69f6  usr/lib/python2.7/dist-packages/mne/io/array/tests/test_array.py
+9202b61596636f8fd81e84eb17c97710  usr/lib/python2.7/dist-packages/mne/io/base.py
+249cb24a4a4e6167718513a6e55ae598  usr/lib/python2.7/dist-packages/mne/io/brainvision/__init__.py
+c36d014e3e662895fd2f594af0a43734  usr/lib/python2.7/dist-packages/mne/io/brainvision/brainvision.py
+68b329da9893e34099c7d8ad5cb9c940  usr/lib/python2.7/dist-packages/mne/io/brainvision/tests/__init__.py
+6f59d60725706f5d17dd565a1acdd8bc  usr/lib/python2.7/dist-packages/mne/io/brainvision/tests/test_brainvision.py
+17189985fa036d7056b5c005430b7702  usr/lib/python2.7/dist-packages/mne/io/bti/__init__.py
+fd2602492913bdd3991e8bcbbb89a287  usr/lib/python2.7/dist-packages/mne/io/bti/bti.py
+611c75df711ef20f776f00d01f2153a2  usr/lib/python2.7/dist-packages/mne/io/bti/constants.py
+8319c87d949adaf0a3eebcfc4134961e  usr/lib/python2.7/dist-packages/mne/io/bti/read.py
+d41d8cd98f00b204e9800998ecf8427e  usr/lib/python2.7/dist-packages/mne/io/bti/tests/__init__.py
+f008f6d3289da2f9081473fe612a8922  usr/lib/python2.7/dist-packages/mne/io/bti/tests/test_bti.py
+be2aa4cbfcaf00f04fc2cb5151090785  usr/lib/python2.7/dist-packages/mne/io/compensator.py
+195f5f56f88eb431d796b272e75f202c  usr/lib/python2.7/dist-packages/mne/io/constants.py
+66b9bd9cdbaf4371650bcc664a9c3599  usr/lib/python2.7/dist-packages/mne/io/ctf.py
+1a50cdde138a1be7e12e94695796cc7d  usr/lib/python2.7/dist-packages/mne/io/diff.py
+bdae9d1c018cc3607f14b3dd9248cc67  usr/lib/python2.7/dist-packages/mne/io/edf/__init__.py
+541c10c01fe386af71e2e5acf3d89a7c  usr/lib/python2.7/dist-packages/mne/io/edf/edf.py
+d41d8cd98f00b204e9800998ecf8427e  usr/lib/python2.7/dist-packages/mne/io/edf/tests/__init__.py
+c04184cf8b3376d7a82f7be04cdc9f61  usr/lib/python2.7/dist-packages/mne/io/edf/tests/test_edf.py
+39ff14e2d5d2090e66c210baed70b2c1  usr/lib/python2.7/dist-packages/mne/io/egi/__init__.py
+4a71e3da4558012c42c32b6de3f034e7  usr/lib/python2.7/dist-packages/mne/io/egi/egi.py
+d41d8cd98f00b204e9800998ecf8427e  usr/lib/python2.7/dist-packages/mne/io/egi/tests/__init__.py
+3a9bbb0a8a8c3dcc9ac4ebe51476e62e  usr/lib/python2.7/dist-packages/mne/io/egi/tests/test_egi.py
+827b5934c0fd701923be32c86f14e5d1  usr/lib/python2.7/dist-packages/mne/io/fiff/__init__.py
+3302b5c04a0d006b4d509121c5f55794  usr/lib/python2.7/dist-packages/mne/io/fiff/raw.py
+d41d8cd98f00b204e9800998ecf8427e  usr/lib/python2.7/dist-packages/mne/io/fiff/tests/__init__.py
+34a06a53c22cdd818486f9fea11a6fe9  usr/lib/python2.7/dist-packages/mne/io/fiff/tests/test_raw.py
+dfc126d9d85836c897c3a8a4729f8c04  usr/lib/python2.7/dist-packages/mne/io/kit/__init__.py
+3f12d8ae98fb0e1dee46dea67bae3272  usr/lib/python2.7/dist-packages/mne/io/kit/constants.py
+3cc3513e8b587584cf15814676a7cf6e  usr/lib/python2.7/dist-packages/mne/io/kit/coreg.py
+392a6b80f3cca71eba1e9827dbc3eb00  usr/lib/python2.7/dist-packages/mne/io/kit/kit.py
+2ed5166dfce59f6d77f7c2583d078ab7  usr/lib/python2.7/dist-packages/mne/io/kit/tests/__init__.py
+60954d9f69870e1ed980fa4034bb4b93  usr/lib/python2.7/dist-packages/mne/io/kit/tests/test_coreg.py
+93a17f2929b3728689f5fdb46ed20fca  usr/lib/python2.7/dist-packages/mne/io/kit/tests/test_kit.py
+7babfb770547f35d60c7257573be1fed  usr/lib/python2.7/dist-packages/mne/io/matrix.py
+535d0b988be15cc468ca4ce5ac5dce40  usr/lib/python2.7/dist-packages/mne/io/meas_info.py
+55c12876d96201fb2385635f2e99378b  usr/lib/python2.7/dist-packages/mne/io/open.py
+ebd0b5108ee804e30b5a9cd66d8a84e0  usr/lib/python2.7/dist-packages/mne/io/pick.py
+4c4583c029b6dd403612391ef6df6a64  usr/lib/python2.7/dist-packages/mne/io/proc_history.py
+4fe54d3553783b9cf833c3f3c2dea898  usr/lib/python2.7/dist-packages/mne/io/proj.py
+be85ab4a48fb551e1f042c96e2080601  usr/lib/python2.7/dist-packages/mne/io/reference.py
+d3d980aed3d3b6c5418006fd28fbf672  usr/lib/python2.7/dist-packages/mne/io/tag.py
+2ed5166dfce59f6d77f7c2583d078ab7  usr/lib/python2.7/dist-packages/mne/io/tests/__init__.py
+42973e7e04781e0b5ace8d7d813e79dc  usr/lib/python2.7/dist-packages/mne/io/tests/test_apply_function.py
+a863188ce1175918c38d47ae3de61b21  usr/lib/python2.7/dist-packages/mne/io/tests/test_compensator.py
+18cf219ea8447f83589369cbf04f3053  usr/lib/python2.7/dist-packages/mne/io/tests/test_meas_info.py
+8e32dfbe2800b560b8478a8487b8bbbe  usr/lib/python2.7/dist-packages/mne/io/tests/test_pick.py
+ab6f42f6592f1668a10384ca774310f3  usr/lib/python2.7/dist-packages/mne/io/tests/test_proc_history.py
+c120ae6875fc51d71f6de2ab68145cdd  usr/lib/python2.7/dist-packages/mne/io/tests/test_raw.py
+92bd15b5c9f9230a2ee77bb85e0b7a18  usr/lib/python2.7/dist-packages/mne/io/tests/test_reference.py
+2823b4d3ff36d2332239e9ea2d911dcb  usr/lib/python2.7/dist-packages/mne/io/tree.py
+171ba35d75ac290a478409158149d771  usr/lib/python2.7/dist-packages/mne/io/write.py
+d6af924fdd80fc1aff473dee94f2af8e  usr/lib/python2.7/dist-packages/mne/label.py
+fd2bd2a679c751092c68fbf6c49b9ae5  usr/lib/python2.7/dist-packages/mne/minimum_norm/__init__.py
+522f55247bc4819819afec5d86bedc41  usr/lib/python2.7/dist-packages/mne/minimum_norm/inverse.py
+4f80ad1d88ab5579f2ed48901109cf65  usr/lib/python2.7/dist-packages/mne/minimum_norm/psf_ctf.py
+d41d8cd98f00b204e9800998ecf8427e  usr/lib/python2.7/dist-packages/mne/minimum_norm/tests/__init__.py
+0c371b6a5399c9a88a3d517c93e0d4ad  usr/lib/python2.7/dist-packages/mne/minimum_norm/tests/test_inverse.py
+83ab417f2f30a955b1ceda064b34bf50  usr/lib/python2.7/dist-packages/mne/minimum_norm/tests/test_psf_ctf.py
+95bd213a0175da60d43d547a81bb43e3  usr/lib/python2.7/dist-packages/mne/minimum_norm/tests/test_snr.py
+00a9cfe5137dec151f0bc3da33a5543d  usr/lib/python2.7/dist-packages/mne/minimum_norm/tests/test_time_frequency.py
+959225c84073d3139b5dcd2f8860a68a  usr/lib/python2.7/dist-packages/mne/minimum_norm/time_frequency.py
+96b52371c7d59a4640ebfd2849a3b2ba  usr/lib/python2.7/dist-packages/mne/misc.py
+f46f5ba1791c79695f83ffdb42c9e469  usr/lib/python2.7/dist-packages/mne/parallel.py
+7315665e0844ca19be555d1fc320deb4  usr/lib/python2.7/dist-packages/mne/preprocessing/__init__.py
+ad9a16361541897d071eb78e4fa76959  usr/lib/python2.7/dist-packages/mne/preprocessing/bads.py
+d0ca27a6be1e2d60ec27458a5f11baea  usr/lib/python2.7/dist-packages/mne/preprocessing/ctps_.py
+86bcade9b573365ac560bc6ba0b447e3  usr/lib/python2.7/dist-packages/mne/preprocessing/ecg.py
+4648b9a539096f3833573964bed7f193  usr/lib/python2.7/dist-packages/mne/preprocessing/eog.py
+fe043f3a58790c59506cca03fd93552d  usr/lib/python2.7/dist-packages/mne/preprocessing/ica.py
+14b92c74525512918ba5b982578a0632  usr/lib/python2.7/dist-packages/mne/preprocessing/infomax_.py
+1ca9cb48fb74d3e990a6025083081ab9  usr/lib/python2.7/dist-packages/mne/preprocessing/maxfilter.py
+90615e9a982a967c2cd83f5cdb0d8927  usr/lib/python2.7/dist-packages/mne/preprocessing/maxwell.py
+69a1fef06880ef953fc5b6428b49dedb  usr/lib/python2.7/dist-packages/mne/preprocessing/peak_finder.py
+e3fa2cf57e2b0c752313b52ff8234b62  usr/lib/python2.7/dist-packages/mne/preprocessing/ssp.py
+c0310aa257f39524b099c5485ef6e500  usr/lib/python2.7/dist-packages/mne/preprocessing/stim.py
+d41d8cd98f00b204e9800998ecf8427e  usr/lib/python2.7/dist-packages/mne/preprocessing/tests/__init__.py
+e1486765b50e78f3cf672977775dcb24  usr/lib/python2.7/dist-packages/mne/preprocessing/tests/test_ctps.py
+38e964e639d9d16f67336153c97e75b5  usr/lib/python2.7/dist-packages/mne/preprocessing/tests/test_ecg.py
+ed4b31f25a678a72dadeba457fcce211  usr/lib/python2.7/dist-packages/mne/preprocessing/tests/test_eeglab_infomax.py
+2e13fce38174c4c4583d5cb9ce70868d  usr/lib/python2.7/dist-packages/mne/preprocessing/tests/test_eog.py
+56b056efae5e9e409f1618e6de3cc602  usr/lib/python2.7/dist-packages/mne/preprocessing/tests/test_ica.py
+32aa126a8c83af3fcd9860f899184571  usr/lib/python2.7/dist-packages/mne/preprocessing/tests/test_infomax.py
+1b5917b0556b2abfcd7c670fb6f481f4  usr/lib/python2.7/dist-packages/mne/preprocessing/tests/test_maxwell.py
+2ba0a4001ad2dfcbceda96e52e089841  usr/lib/python2.7/dist-packages/mne/preprocessing/tests/test_peak_finder.py
+5b0e6a376d6d1bb5a7675c2fc58fae35  usr/lib/python2.7/dist-packages/mne/preprocessing/tests/test_ssp.py
+2433df0915fd1f1b9f86c6a86766415d  usr/lib/python2.7/dist-packages/mne/preprocessing/tests/test_stim.py
+5b53e5641c4ba3e794eaeb05b1954f3d  usr/lib/python2.7/dist-packages/mne/preprocessing/tests/test_xdawn.py
+669e860006097b9afce22cbba3fdb5ed  usr/lib/python2.7/dist-packages/mne/preprocessing/xdawn.py
+65946ec455ebf85d6d63446ec6ed171d  usr/lib/python2.7/dist-packages/mne/proj.py
+faf5dbc90fcb909d70bf5bc0f3f8ec6b  usr/lib/python2.7/dist-packages/mne/realtime/__init__.py
+c6c846d02a2ac0fab16e8b3bc3133748  usr/lib/python2.7/dist-packages/mne/realtime/client.py
+1273445c25cb7af8573b1f121038ffbb  usr/lib/python2.7/dist-packages/mne/realtime/epochs.py
+d10d11c229d308abe627cffc2d78e73d  usr/lib/python2.7/dist-packages/mne/realtime/fieldtrip_client.py
+cbc19ff4a7cb8310d751c749b0492773  usr/lib/python2.7/dist-packages/mne/realtime/mockclient.py
+baa38ba511b4bd8987792251f79c8c58  usr/lib/python2.7/dist-packages/mne/realtime/stim_server_client.py
+d41d8cd98f00b204e9800998ecf8427e  usr/lib/python2.7/dist-packages/mne/realtime/tests/__init__.py
+e62b8535e31264a77bd28ffb4fe588ce  usr/lib/python2.7/dist-packages/mne/realtime/tests/test_fieldtrip_client.py
+7e94d3bb24792861bb0fce44bb26dfb0  usr/lib/python2.7/dist-packages/mne/realtime/tests/test_mockclient.py
+ac07e2682197a3325b0953e70e3e731e  usr/lib/python2.7/dist-packages/mne/realtime/tests/test_stim_client_server.py
+f1064fb441717ad0524509a72e9669b5  usr/lib/python2.7/dist-packages/mne/report.py
+4753a4271fef05cd11e9a22b3e3a57f4  usr/lib/python2.7/dist-packages/mne/selection.py
+6d399ddfacc4c187743f47dcfd5a5b21  usr/lib/python2.7/dist-packages/mne/simulation/__init__.py
+6a14df65b3a7c7f59c3a6b96cf365587  usr/lib/python2.7/dist-packages/mne/simulation/evoked.py
+d0c4f1cb0545dc68e73a4da3e0dc3722  usr/lib/python2.7/dist-packages/mne/simulation/metrics.py
+6f9619101b5a9213d304122ffd04122e  usr/lib/python2.7/dist-packages/mne/simulation/raw.py
+3cc4ac6e2a40b361085ab8e4885cd1f4  usr/lib/python2.7/dist-packages/mne/simulation/source.py
+d41d8cd98f00b204e9800998ecf8427e  usr/lib/python2.7/dist-packages/mne/simulation/tests/__init__.py
+c577de70161f5e81625de61d9aaebc11  usr/lib/python2.7/dist-packages/mne/simulation/tests/test_evoked.py
+d20092b2e5b898b788f11961ad88a6bf  usr/lib/python2.7/dist-packages/mne/simulation/tests/test_metrics.py
+b20191bc60515c4086b5a1e221fd5fef  usr/lib/python2.7/dist-packages/mne/simulation/tests/test_raw.py
+5014fbae162f9cd573a41d449ba13409  usr/lib/python2.7/dist-packages/mne/simulation/tests/test_source.py
+bcbe69b9283b2ab6104123b4441a599d  usr/lib/python2.7/dist-packages/mne/source_estimate.py
+c13a982bcb92e4e9a5bb4388888e31f5  usr/lib/python2.7/dist-packages/mne/source_space.py
+8759c4cff02ebc0d01065e26f092c1cb  usr/lib/python2.7/dist-packages/mne/stats/__init__.py
+6e4a180d816d2f365d2810e29fa811f7  usr/lib/python2.7/dist-packages/mne/stats/cluster_level.py
+b5a8234848352583e705ed0feed252fb  usr/lib/python2.7/dist-packages/mne/stats/multi_comp.py
+7dfa1008999aea2e0d137399b9a9a2c3  usr/lib/python2.7/dist-packages/mne/stats/parametric.py
+f973f1455d131dae18ddc8307102b072  usr/lib/python2.7/dist-packages/mne/stats/permutations.py
+46bf9553e5ace8ce72c16c7a6b5ab47f  usr/lib/python2.7/dist-packages/mne/stats/regression.py
+d41d8cd98f00b204e9800998ecf8427e  usr/lib/python2.7/dist-packages/mne/stats/tests/__init__.py
+847bd45c6ccdfc13f48cbfb2c45f5329  usr/lib/python2.7/dist-packages/mne/stats/tests/test_cluster_level.py
+ba500debd553032649d12b51e177d1d8  usr/lib/python2.7/dist-packages/mne/stats/tests/test_multi_comp.py
+054c5907250e4e5af85b40bfb78eb757  usr/lib/python2.7/dist-packages/mne/stats/tests/test_parametric.py
+f1b80c80e26b8453e1d985233d6b4f91  usr/lib/python2.7/dist-packages/mne/stats/tests/test_permutations.py
+c9cbb6779d7c8c4574d2aee371547db8  usr/lib/python2.7/dist-packages/mne/stats/tests/test_regression.py
+4866c28fd6cf1657c49e4dab9f221007  usr/lib/python2.7/dist-packages/mne/surface.py
+d41d8cd98f00b204e9800998ecf8427e  usr/lib/python2.7/dist-packages/mne/tests/__init__.py
+b0505956f4c2e9ca5bd0ec253e74060d  usr/lib/python2.7/dist-packages/mne/tests/test_bem.py
+cf223e44e4d1525a445fee8dd2e33681  usr/lib/python2.7/dist-packages/mne/tests/test_chpi.py
+23f87419092d04092b75b3eaa5c9a6a7  usr/lib/python2.7/dist-packages/mne/tests/test_coreg.py
+3c9d61112202d7029da7246d19886c3a  usr/lib/python2.7/dist-packages/mne/tests/test_cov.py
+408f9d03547f64585881fde61446adbf  usr/lib/python2.7/dist-packages/mne/tests/test_defaults.py
+63c2590f7a0bfea6f942a19a049600a0  usr/lib/python2.7/dist-packages/mne/tests/test_dipole.py
+b70a80aca3c4bf4a19992cb34b1d314f  usr/lib/python2.7/dist-packages/mne/tests/test_docstring_parameters.py
+8a800a32fadc4d74361c26213509248e  usr/lib/python2.7/dist-packages/mne/tests/test_epochs.py
+5c050563c56e38a4c68e7ef15abbfc22  usr/lib/python2.7/dist-packages/mne/tests/test_event.py
+06c4c1fcb08efdf3651826c4bf1e98d7  usr/lib/python2.7/dist-packages/mne/tests/test_evoked.py
+a1993bfbd1c0d089054a3256725a6bb0  usr/lib/python2.7/dist-packages/mne/tests/test_filter.py
+3478b8da62652292031d856f3961f768  usr/lib/python2.7/dist-packages/mne/tests/test_fixes.py
+b405a7aa1d8373c25549496cd6453400  usr/lib/python2.7/dist-packages/mne/tests/test_import_nesting.py
+0809b08b795e2cd804b5ca0f628a71e2  usr/lib/python2.7/dist-packages/mne/tests/test_label.py
+bb404764c120142460b55ed158201542  usr/lib/python2.7/dist-packages/mne/tests/test_misc.py
+d88bbe2bf5d400c566ca7d654f5c8f43  usr/lib/python2.7/dist-packages/mne/tests/test_proj.py
+46c728ff857e8ec99b64e3864508cecf  usr/lib/python2.7/dist-packages/mne/tests/test_report.py
+8b9f678612c5a217b4dfc5f5f34a407a  usr/lib/python2.7/dist-packages/mne/tests/test_selection.py
+7683dce3b4af89f19eb67d64922bffa0  usr/lib/python2.7/dist-packages/mne/tests/test_source_estimate.py
+81ba31a754eaae9007d6e9085adfbadf  usr/lib/python2.7/dist-packages/mne/tests/test_source_space.py
+c492161dfd44a71a8b13f8d5a192ffa0  usr/lib/python2.7/dist-packages/mne/tests/test_surface.py
+f965640fec9a98754b7f6731b08615ee  usr/lib/python2.7/dist-packages/mne/tests/test_transforms.py
+0735c7b5ee2c42d2636347112c5b9965  usr/lib/python2.7/dist-packages/mne/tests/test_utils.py
+b94879b9b89f5c9f6e5c633ecd931cc9  usr/lib/python2.7/dist-packages/mne/time_frequency/__init__.py
+8f4e331c1fc785ebfb69d7c61de6bfec  usr/lib/python2.7/dist-packages/mne/time_frequency/_stockwell.py
+a6a1a23d53b37a4d22792c06ab9f1f12  usr/lib/python2.7/dist-packages/mne/time_frequency/ar.py
+fd39f69b067a07bc2071886d990e24c7  usr/lib/python2.7/dist-packages/mne/time_frequency/csd.py
+d4350ec85a6d7e5c38bdf80bc67c4a01  usr/lib/python2.7/dist-packages/mne/time_frequency/multitaper.py
+f98ec1ba433a2861d874e1989436cd19  usr/lib/python2.7/dist-packages/mne/time_frequency/psd.py
+35b6b46954064e992c89972648af35a4  usr/lib/python2.7/dist-packages/mne/time_frequency/stft.py
+d41d8cd98f00b204e9800998ecf8427e  usr/lib/python2.7/dist-packages/mne/time_frequency/tests/__init__.py
+4655cbf3a599532f0ec99ee57117f828  usr/lib/python2.7/dist-packages/mne/time_frequency/tests/test_ar.py
+848f5a817cdd42addc790a4f914508c9  usr/lib/python2.7/dist-packages/mne/time_frequency/tests/test_csd.py
+9879635b429af7f367c63a6ff7fb1351  usr/lib/python2.7/dist-packages/mne/time_frequency/tests/test_multitaper.py
+90b3c1f9e87ae1272b96e815115c6f75  usr/lib/python2.7/dist-packages/mne/time_frequency/tests/test_psd.py
+a459c8bbeab19d90696c090fa6b32ad8  usr/lib/python2.7/dist-packages/mne/time_frequency/tests/test_stft.py
+79a480b90a6d460a170ead1c29f2f248  usr/lib/python2.7/dist-packages/mne/time_frequency/tests/test_stockwell.py
+f41351544c7a8718adeda88139677e9e  usr/lib/python2.7/dist-packages/mne/time_frequency/tests/test_tfr.py
+7a682833d8b099736f257273f828d73f  usr/lib/python2.7/dist-packages/mne/time_frequency/tfr.py
+4f859cf50ab8b0ef8886becfdb0dea12  usr/lib/python2.7/dist-packages/mne/transforms.py
+045c31b61aeb3ad3314ca4a8d951db09  usr/lib/python2.7/dist-packages/mne/utils.py
+a68134a7fe30cb7dde68deb0fd414cb1  usr/lib/python2.7/dist-packages/mne/viz/_3d.py
+d7eac0be7a7f42e0d92a05e849c42c47  usr/lib/python2.7/dist-packages/mne/viz/__init__.py
+7c6af820cf059520ee8b40d3b08865d4  usr/lib/python2.7/dist-packages/mne/viz/circle.py
+1e9221f2eb52ef3412afe525491c6c5b  usr/lib/python2.7/dist-packages/mne/viz/decoding.py
+05560d8e02a3d19edbd03f8f6409b29a  usr/lib/python2.7/dist-packages/mne/viz/epochs.py
+8c6b74b1d0658459cd7dd9c11df57539  usr/lib/python2.7/dist-packages/mne/viz/evoked.py
+069d4ad5e8021f8672deb9c4f852459b  usr/lib/python2.7/dist-packages/mne/viz/ica.py
+19c712c0a81c0fe58f08f2d0affa31b1  usr/lib/python2.7/dist-packages/mne/viz/misc.py
+588618b2da3abe81277bee2fc3698526  usr/lib/python2.7/dist-packages/mne/viz/montage.py
+946be80f35ecddd9ce742ef6bb669b10  usr/lib/python2.7/dist-packages/mne/viz/raw.py
+d41d8cd98f00b204e9800998ecf8427e  usr/lib/python2.7/dist-packages/mne/viz/tests/__init__.py
+13a9f3dcd5b122196dc473509b82d7e0  usr/lib/python2.7/dist-packages/mne/viz/tests/test_3d.py
+04163fedd640f9ae60d594cd9ac61660  usr/lib/python2.7/dist-packages/mne/viz/tests/test_circle.py
+4c2134d983c8e55b4597051a1dec2861  usr/lib/python2.7/dist-packages/mne/viz/tests/test_decoding.py
+6befa19793dd6128dab1d496a912cada  usr/lib/python2.7/dist-packages/mne/viz/tests/test_epochs.py
+07c7eaea3b3362b2857efed89c15699f  usr/lib/python2.7/dist-packages/mne/viz/tests/test_evoked.py
+b0c5fdcabbc540941ec7118b202d706c  usr/lib/python2.7/dist-packages/mne/viz/tests/test_ica.py
+8645d0002fbb3b378d813fa539434ec1  usr/lib/python2.7/dist-packages/mne/viz/tests/test_misc.py
+717f14b21cb631612b69f08836ca1c3d  usr/lib/python2.7/dist-packages/mne/viz/tests/test_montage.py
+713977f2b75f16eeca805a4f3f735500  usr/lib/python2.7/dist-packages/mne/viz/tests/test_raw.py
+c2cb617c33ab62f2a33cb32a5b6b69ec  usr/lib/python2.7/dist-packages/mne/viz/tests/test_topo.py
+9a8aad32721bfc3ca7a253f2eb5567c9  usr/lib/python2.7/dist-packages/mne/viz/tests/test_topomap.py
+7f610861e4d7501613d8f2ef5ae0e1fd  usr/lib/python2.7/dist-packages/mne/viz/tests/test_utils.py
+94a197464f43bdcf7e8ad78f4b373e1f  usr/lib/python2.7/dist-packages/mne/viz/topo.py
+cdd316478f1597bc346d7c67fb723069  usr/lib/python2.7/dist-packages/mne/viz/topomap.py
+0bee4842dff0f790bdf828683f37975a  usr/lib/python2.7/dist-packages/mne/viz/utils.py
+a83da5b8a63fd95eaa90fefd1326ee9f  usr/share/doc/python-mne/AUTHORS.rst
+d29a3b1fb499f7d727eaed547989667e  usr/share/doc/python-mne/README.rst.gz
+081e826bbc2ca3a30653321623554bcd  usr/share/doc/python-mne/changelog.Debian.gz
+a3dce987d3a7afde1d7d54410d3999c6  usr/share/doc/python-mne/copyright
+d454fa4a23061ccfdc90116005bbeeb7  usr/share/man/man1/mne.1.gz
diff --git a/debian/python-mne/DEBIAN/postinst b/debian/python-mne/DEBIAN/postinst
new file mode 100755
index 0000000..f4c0e77
--- /dev/null
+++ b/debian/python-mne/DEBIAN/postinst
@@ -0,0 +1,9 @@
+#!/bin/sh
+set -e
+
+# Automatically added by dh_python2:
+if which pycompile >/dev/null 2>&1; then
+	pycompile -p python-mne 
+fi
+
+# End automatically added section
diff --git a/debian/python-mne/DEBIAN/prerm b/debian/python-mne/DEBIAN/prerm
new file mode 100755
index 0000000..e96ea1c
--- /dev/null
+++ b/debian/python-mne/DEBIAN/prerm
@@ -0,0 +1,14 @@
+#!/bin/sh
+set -e
+
+# Automatically added by dh_python2:
+if which pyclean >/dev/null 2>&1; then
+	pyclean -p python-mne 
+else
+	dpkg -L python-mne | grep \.py$ | while read file
+	do
+		rm -f "${file}"[co] >/dev/null
+  	done
+fi
+
+# End automatically added section
diff --git a/debian/python-mne/usr/bin/mne b/debian/python-mne/usr/bin/mne
new file mode 100755
index 0000000..9154bba
--- /dev/null
+++ b/debian/python-mne/usr/bin/mne
@@ -0,0 +1,39 @@
+#! /usr/bin/python
+# -*- coding: utf-8 -*-
+
+import sys
+import glob
+import subprocess
+import os.path as op
+
+import mne
+
+mne_bin_dir = op.dirname(mne.__file__)
+valid_commands = sorted(glob.glob(op.join(mne_bin_dir,
+                                          'commands', 'mne_*.py')))
+valid_commands = [c.split(op.sep)[-1][4:-3] for c in valid_commands]
+
+
+def print_help():
+    print("Usage : mne command options\n")
+    print("Accepted commands :\n")
+    for c in valid_commands:
+        print("\t- %s" % c)
+    print("\nExample : mne browse_raw --raw sample_audvis_raw.fif")
+    print("\nGetting help example : mne compute_proj_eog -h")
+    sys.exit(0)
+
+if len(sys.argv) == 1:
+    print_help()
+elif ("help" in sys.argv[1] or "-h" in sys.argv[1]):
+    print_help()
+elif sys.argv[1] == "--version":
+    print("MNE %s" % mne.__version__)
+elif sys.argv[1] not in valid_commands:
+    print('Invalid command: "%s"\n' % sys.argv[1])
+    print_help()
+    sys.exit(0)
+else:
+    cmd = sys.argv[1]
+    cmd_path = op.join(mne_bin_dir, 'commands', 'mne_%s.py' % cmd)
+    sys.exit(subprocess.call([sys.executable, cmd_path] + sys.argv[2:]))
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/__init__.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/__init__.py
new file mode 100644
index 0000000..dfecae9
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/__init__.py
@@ -0,0 +1,106 @@
+"""MNE for MEG and EEG data analysis
+"""
+
+# PEP0440 compatible formatted version, see:
+# https://www.python.org/dev/peps/pep-0440/
+#
+# Generic release markers:
+#   X.Y
+#   X.Y.Z   # For bugfix releases
+#
+# Admissible pre-release markers:
+#   X.YaN   # Alpha release
+#   X.YbN   # Beta release
+#   X.YrcN  # Release Candidate
+#   X.Y     # Final release
+#
+# Dev branch marker is: 'X.Y.devN' where N is an integer.
+#
+
+__version__ = '0.10.dev0'
+
+# have to import verbose first since it's needed by many things
+from .utils import (set_log_level, set_log_file, verbose, set_config,
+                    get_config, get_config_path, set_cache_dir,
+                    set_memmap_min_size)
+from .io.pick import (pick_types, pick_channels,
+                      pick_channels_regexp, pick_channels_forward,
+                      pick_types_forward, pick_channels_cov,
+                      pick_channels_evoked, pick_info)
+from .io.base import concatenate_raws
+from .chpi import get_chpi_positions
+from .io.meas_info import create_info
+from .io.kit import read_epochs_kit
+from .bem import (make_sphere_model, make_bem_model, make_bem_solution,
+                  read_bem_surfaces, write_bem_surface, write_bem_surfaces,
+                  read_bem_solution, write_bem_solution)
+from .cov import (read_cov, write_cov, Covariance,
+                  compute_covariance, compute_raw_data_covariance,
+                  compute_raw_covariance, whiten_evoked, make_ad_hoc_cov)
+from .event import (read_events, write_events, find_events, merge_events,
+                    pick_events, make_fixed_length_events, concatenate_events,
+                    find_stim_steps)
+from .forward import (read_forward_solution, apply_forward, apply_forward_raw,
+                      do_forward_solution, average_forward_solutions,
+                      write_forward_solution, make_forward_solution,
+                      convert_forward_solution, make_field_map)
+from .source_estimate import (read_source_estimate, MixedSourceEstimate,
+                              SourceEstimate, VolSourceEstimate, morph_data,
+                              morph_data_precomputed, compute_morph_matrix,
+                              grade_to_tris, grade_to_vertices,
+                              spatial_src_connectivity,
+                              spatial_tris_connectivity,
+                              spatial_dist_connectivity,
+                              spatio_temporal_src_connectivity,
+                              spatio_temporal_tris_connectivity,
+                              spatio_temporal_dist_connectivity,
+                              save_stc_as_volume, extract_label_time_course)
+from .surface import (read_surface, write_surface, decimate_surface,
+                      read_morph_map, get_head_surf, get_meg_helmet_surf)
+from .source_space import (read_source_spaces, vertex_to_mni,
+                           write_source_spaces, setup_source_space,
+                           setup_volume_source_space, SourceSpaces,
+                           add_source_space_distances, morph_source_spaces,
+                           get_volume_labels_from_aseg)
+from .epochs import Epochs, EpochsArray, read_epochs
+from .evoked import (Evoked, EvokedArray, read_evokeds, write_evokeds,
+                     grand_average, combine_evoked)
+from .label import (read_label, label_sign_flip,
+                    write_label, stc_to_label, grow_labels, Label, split_label,
+                    BiHemiLabel, read_labels_from_annot, write_labels_to_annot)
+from .misc import parse_config, read_reject_parameters
+from .coreg import (create_default_subject, scale_bem, scale_mri, scale_labels,
+                    scale_source_space)
+from .transforms import (read_trans, write_trans,
+                         transform_surface_to, Transform)
+from .proj import (read_proj, write_proj, compute_proj_epochs,
+                   compute_proj_evoked, compute_proj_raw, sensitivity_map)
+from .selection import read_selection
+from .dipole import read_dipole, Dipole, fit_dipole
+from .channels import equalize_channels, rename_channels, find_layout
+
+from . import beamformer
+from . import channels
+from . import chpi
+from . import commands
+from . import connectivity
+from . import coreg
+from . import cuda
+from . import datasets
+from . import epochs
+from . import externals
+from . import io
+from . import filter
+from . import gui
+from . import minimum_norm
+from . import preprocessing
+from . import simulation
+from . import stats
+from . import time_frequency
+from . import viz
+from . import decoding
+from . import realtime
+
+# initialize logging
+set_log_level(None, False)
+set_log_file()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/baseline.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/baseline.py
new file mode 100644
index 0000000..7436587
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/baseline.py
@@ -0,0 +1,97 @@
+"""Util function to baseline correct data
+"""
+
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#
+# License: BSD (3-clause)
+
+import numpy as np
+
+from .utils import logger, verbose
+
+
+ at verbose
+def rescale(data, times, baseline, mode, verbose=None, copy=True):
+    """Rescale aka baseline correct data
+
+    Parameters
+    ----------
+    data : array
+        It can be of any shape. The only constraint is that the last
+        dimension should be time.
+    times : 1D array
+        Time instants is seconds.
+    baseline : tuple or list of length 2, or None
+        The time interval to apply rescaling / baseline correction.
+        If None do not apply it. If baseline is (a, b)
+        the interval is between "a (s)" and "b (s)".
+        If a is None the beginning of the data is used
+        and if b is None then b is set to the end of the interval.
+        If baseline is equal ot (None, None) all the time
+        interval is used. If None, no correction is applied.
+    mode : 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent' | 'zlogratio'
+        Do baseline correction with ratio (power is divided by mean
+        power during baseline) or zscore (power is divided by standard
+        deviation of power during baseline after subtracting the mean,
+        power = [power - mean(power_baseline)] / std(power_baseline)).
+        logratio is the same an mean but in log-scale, zlogratio is the
+        same as zscore but data is rendered in log-scale first.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+    copy : bool
+        Operate on a copy of the data, or in place.
+
+    Returns
+    -------
+    data_scaled: array
+        Array of same shape as data after rescaling.
+    """
+    if copy:
+        data = data.copy()
+
+    valid_modes = ('logratio', 'ratio', 'zscore', 'mean', 'percent',
+                   'zlogratio')
+    if mode not in valid_modes:
+        raise Exception('mode should be any of : %s' % (valid_modes, ))
+
+    if baseline is not None:
+        logger.info("Applying baseline correction ... (mode: %s)" % mode)
+        bmin, bmax = baseline
+        if bmin is None:
+            imin = 0
+        else:
+            imin = int(np.where(times >= bmin)[0][0])
+        if bmax is None:
+            imax = len(times)
+        else:
+            imax = int(np.where(times <= bmax)[0][-1]) + 1
+
+        # avoid potential "empty slice" warning
+        if data.shape[-1] > 0:
+            mean = np.mean(data[..., imin:imax], axis=-1)[..., None]
+        else:
+            mean = 0  # otherwise we get an ugly nan
+        if mode == 'mean':
+            data -= mean
+        if mode == 'logratio':
+            data /= mean
+            data = np.log10(data)  # a value of 1 means 10 times bigger
+        if mode == 'ratio':
+            data /= mean
+        elif mode == 'zscore':
+            std = np.std(data[..., imin:imax], axis=-1)[..., None]
+            data -= mean
+            data /= std
+        elif mode == 'percent':
+            data -= mean
+            data /= mean
+        elif mode == 'zlogratio':
+            data /= mean
+            data = np.log10(data)
+            std = np.std(data[..., imin:imax], axis=-1)[..., None]
+            data /= std
+
+    else:
+        logger.info("No baseline correction applied...")
+
+    return data
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/beamformer/__init__.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/beamformer/__init__.py
new file mode 100644
index 0000000..75ea807
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/beamformer/__init__.py
@@ -0,0 +1,6 @@
+"""Beamformers for source localization
+"""
+
+from ._lcmv import lcmv, lcmv_epochs, lcmv_raw, tf_lcmv
+from ._dics import dics, dics_epochs, dics_source_power, tf_dics
+from ._rap_music import rap_music
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/beamformer/_dics.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/beamformer/_dics.py
new file mode 100644
index 0000000..3f50e32
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/beamformer/_dics.py
@@ -0,0 +1,611 @@
+"""Dynamic Imaging of Coherent Sources (DICS).
+"""
+
+# Authors: Roman Goj <roman.goj at gmail.com>
+#
+# License: BSD (3-clause)
+
+import warnings
+from copy import deepcopy
+
+import numpy as np
+from scipy import linalg
+
+from ..utils import logger, verbose
+from ..forward import _subject_from_forward
+from ..minimum_norm.inverse import combine_xyz, _check_reference
+from ..source_estimate import _make_stc
+from ..time_frequency import CrossSpectralDensity, compute_epochs_csd
+from ._lcmv import _prepare_beamformer_input, _setup_picks
+from ..externals import six
+
+
+ at verbose
+def _apply_dics(data, info, tmin, forward, noise_csd, data_csd, reg,
+                label=None, picks=None, pick_ori=None, verbose=None):
+    """Dynamic Imaging of Coherent Sources (DICS).
+
+    Calculate the DICS spatial filter based on a given cross-spectral
+    density object and return estimates of source activity based on given data.
+
+    Parameters
+    ----------
+    data : array or list / iterable
+        Sensor space data. If data.ndim == 2 a single observation is assumed
+        and a single stc is returned. If data.ndim == 3 or if data is
+        a list / iterable, a list of stc's is returned.
+    info : dict
+        Measurement info.
+    tmin : float
+        Time of first sample.
+    forward : dict
+        Forward operator.
+    noise_csd : instance of CrossSpectralDensity
+        The noise cross-spectral density.
+    data_csd : instance of CrossSpectralDensity
+        The data cross-spectral density.
+    reg : float
+        The regularization for the cross-spectral density.
+    label : Label | None
+        Restricts the solution to a given label.
+    picks : array-like of int | None
+        Indices (in info) of data channels. If None, MEG and EEG data channels
+        (without bad channels) will be used.
+    pick_ori : None | 'normal'
+        If 'normal', rather than pooling the orientations by taking the norm,
+        only the radial component is kept.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    stc : SourceEstimate | VolSourceEstimate
+        Source time courses
+    """
+
+    is_free_ori, _, proj, vertno, G =\
+        _prepare_beamformer_input(info, forward, label, picks, pick_ori)
+
+    Cm = data_csd.data
+
+    # Calculating regularized inverse, equivalent to an inverse operation after
+    # regularization: Cm += reg * np.trace(Cm) / len(Cm) * np.eye(len(Cm))
+    Cm_inv = linalg.pinv(Cm, reg)
+
+    # Compute spatial filters
+    W = np.dot(G.T, Cm_inv)
+    n_orient = 3 if is_free_ori else 1
+    n_sources = G.shape[1] // n_orient
+
+    for k in range(n_sources):
+        Wk = W[n_orient * k: n_orient * k + n_orient]
+        Gk = G[:, n_orient * k: n_orient * k + n_orient]
+        Ck = np.dot(Wk, Gk)
+
+        # TODO: max-power is not implemented yet, however DICS does employ
+        # orientation picking when one eigen value is much larger than the
+        # other
+
+        if is_free_ori:
+            # Free source orientation
+            Wk[:] = np.dot(linalg.pinv(Ck, 0.1), Wk)
+        else:
+            # Fixed source orientation
+            Wk /= Ck
+
+        # Noise normalization
+        noise_norm = np.dot(np.dot(Wk.conj(), noise_csd.data), Wk.T)
+        noise_norm = np.abs(noise_norm).trace()
+        Wk /= np.sqrt(noise_norm)
+
+    # Pick source orientation normal to cortical surface
+    if pick_ori == 'normal':
+        W = W[2::3]
+        is_free_ori = False
+
+    if isinstance(data, np.ndarray) and data.ndim == 2:
+        data = [data]
+        return_single = True
+    else:
+        return_single = False
+
+    subject = _subject_from_forward(forward)
+    for i, M in enumerate(data):
+        if len(M) != len(picks):
+            raise ValueError('data and picks must have the same length')
+
+        if not return_single:
+            logger.info("Processing epoch : %d" % (i + 1))
+
+        # Apply SSPs
+        if info['projs']:
+            M = np.dot(proj, M)
+
+        # project to source space using beamformer weights
+        if is_free_ori:
+            sol = np.dot(W, M)
+            logger.info('combining the current components...')
+            sol = combine_xyz(sol)
+        else:
+            # Linear inverse: do not delay compuation due to non-linear abs
+            sol = np.dot(W, M)
+
+        tstep = 1.0 / info['sfreq']
+        if np.iscomplexobj(sol):
+            sol = np.abs(sol)  # XXX : STC cannot contain (yet?) complex values
+        yield _make_stc(sol, vertices=vertno, tmin=tmin, tstep=tstep,
+                        subject=subject)
+
+    logger.info('[done]')
+
+
+ at verbose
+def dics(evoked, forward, noise_csd, data_csd, reg=0.01, label=None,
+         pick_ori=None, verbose=None):
+    """Dynamic Imaging of Coherent Sources (DICS).
+
+    Compute a Dynamic Imaging of Coherent Sources (DICS) beamformer
+    on evoked data and return estimates of source time courses.
+
+    NOTE : Fixed orientation forward operators will result in complex time
+    courses in which case absolute values will be  returned. Therefore the
+    orientation will no longer be fixed.
+
+    NOTE : This implementation has not been heavily tested so please
+    report any issues or suggestions.
+
+    Parameters
+    ----------
+    evoked : Evoked
+        Evoked data.
+    forward : dict
+        Forward operator.
+    noise_csd : instance of CrossSpectralDensity
+        The noise cross-spectral density.
+    data_csd : instance of CrossSpectralDensity
+        The data cross-spectral density.
+    reg : float
+        The regularization for the cross-spectral density.
+    label : Label | None
+        Restricts the solution to a given label.
+    pick_ori : None | 'normal'
+        If 'normal', rather than pooling the orientations by taking the norm,
+        only the radial component is kept.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    stc : SourceEstimate | VolSourceEstimate
+        Source time courses
+
+    See Also
+    --------
+    dics_epochs
+
+    Notes
+    -----
+    The original reference is:
+    Gross et al. Dynamic imaging of coherent sources: Studying neural
+    interactions in the human brain. PNAS (2001) vol. 98 (2) pp. 694-699
+    """
+    _check_reference(evoked)
+    info = evoked.info
+    data = evoked.data
+    tmin = evoked.times[0]
+
+    picks = _setup_picks(picks=None, info=info, forward=forward)
+    data = data[picks]
+
+    stc = _apply_dics(data, info, tmin, forward, noise_csd, data_csd, reg=reg,
+                      label=label, pick_ori=pick_ori, picks=picks)
+    return six.advance_iterator(stc)
+
+
+ at verbose
+def dics_epochs(epochs, forward, noise_csd, data_csd, reg=0.01, label=None,
+                pick_ori=None, return_generator=False, verbose=None):
+    """Dynamic Imaging of Coherent Sources (DICS).
+
+    Compute a Dynamic Imaging of Coherent Sources (DICS) beamformer
+    on single trial data and return estimates of source time courses.
+
+    NOTE : Fixed orientation forward operators will result in complex time
+    courses in which case absolute values will be  returned. Therefore the
+    orientation will no longer be fixed.
+
+    NOTE : This implementation has not been heavily tested so please
+    report any issues or suggestions.
+
+    Parameters
+    ----------
+    epochs : Epochs
+        Single trial epochs.
+    forward : dict
+        Forward operator.
+    noise_csd : instance of CrossSpectralDensity
+        The noise cross-spectral density.
+    data_csd : instance of CrossSpectralDensity
+        The data cross-spectral density.
+    reg : float
+        The regularization for the cross-spectral density.
+    label : Label | None
+        Restricts the solution to a given label.
+    pick_ori : None | 'normal'
+        If 'normal', rather than pooling the orientations by taking the norm,
+        only the radial component is kept.
+    return_generator : bool
+        Return a generator object instead of a list. This allows iterating
+        over the stcs without having to keep them all in memory.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    stc: list | generator of SourceEstimate | VolSourceEstimate
+        The source estimates for all epochs
+
+    See Also
+    --------
+    dics
+
+    Notes
+    -----
+    The original reference is:
+    Gross et al. Dynamic imaging of coherent sources: Studying neural
+    interactions in the human brain. PNAS (2001) vol. 98 (2) pp. 694-699
+    """
+    _check_reference(epochs)
+
+    info = epochs.info
+    tmin = epochs.times[0]
+
+    picks = _setup_picks(picks=None, info=info, forward=forward)
+    data = epochs.get_data()[:, picks, :]
+
+    stcs = _apply_dics(data, info, tmin, forward, noise_csd, data_csd, reg=reg,
+                       label=label, pick_ori=pick_ori, picks=picks)
+
+    if not return_generator:
+        stcs = list(stcs)
+
+    return stcs
+
+
+ at verbose
+def dics_source_power(info, forward, noise_csds, data_csds, reg=0.01,
+                      label=None, pick_ori=None, verbose=None):
+    """Dynamic Imaging of Coherent Sources (DICS).
+
+    Calculate source power in time and frequency windows specified in the
+    calculation of the data cross-spectral density matrix or matrices. Source
+    power is normalized by noise power.
+
+    NOTE : This implementation has not been heavily tested so please
+    report any issues or suggestions.
+
+    Parameters
+    ----------
+    info : dict
+        Measurement info, e.g. epochs.info.
+    forward : dict
+        Forward operator.
+    noise_csds : instance or list of instances of CrossSpectralDensity
+        The noise cross-spectral density matrix for a single frequency or a
+        list of matrices for multiple frequencies.
+    data_csds : instance or list of instances of CrossSpectralDensity
+        The data cross-spectral density matrix for a single frequency or a list
+        of matrices for multiple frequencies.
+    reg : float
+        The regularization for the cross-spectral density.
+    label : Label | None
+        Restricts the solution to a given label.
+    pick_ori : None | 'normal'
+        If 'normal', rather than pooling the orientations by taking the norm,
+        only the radial component is kept.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    stc : SourceEstimate | VolSourceEstimate
+        Source power with frequency instead of time.
+
+    Notes
+    -----
+    The original reference is:
+    Gross et al. Dynamic imaging of coherent sources: Studying neural
+    interactions in the human brain. PNAS (2001) vol. 98 (2) pp. 694-699
+    """
+
+    if isinstance(data_csds, CrossSpectralDensity):
+        data_csds = [data_csds]
+
+    if isinstance(noise_csds, CrossSpectralDensity):
+        noise_csds = [noise_csds]
+
+    def csd_shapes(x):
+        return tuple(c.data.shape for c in x)
+
+    if (csd_shapes(data_csds) != csd_shapes(noise_csds) or
+       any(len(set(csd_shapes(c))) > 1 for c in [data_csds, noise_csds])):
+        raise ValueError('One noise CSD matrix should be provided for each '
+                         'data CSD matrix and vice versa. All CSD matrices '
+                         'should have identical shape.')
+
+    frequencies = []
+    for data_csd, noise_csd in zip(data_csds, noise_csds):
+        if not np.allclose(data_csd.frequencies, noise_csd.frequencies):
+            raise ValueError('Data and noise CSDs should be calculated at '
+                             'identical frequencies')
+
+        # If CSD is summed over multiple frequencies, take the average
+        # frequency
+        if(len(data_csd.frequencies) > 1):
+            frequencies.append(np.mean(data_csd.frequencies))
+        else:
+            frequencies.append(data_csd.frequencies[0])
+    fmin = frequencies[0]
+
+    if len(frequencies) > 2:
+        fstep = []
+        for i in range(len(frequencies) - 1):
+            fstep.append(frequencies[i + 1] - frequencies[i])
+        if not np.allclose(fstep, np.mean(fstep), 1e-5):
+            warnings.warn('Uneven frequency spacing in CSD object, '
+                          'frequencies in the resulting stc file will be '
+                          'inaccurate.')
+        fstep = fstep[0]
+    elif len(frequencies) > 1:
+        fstep = frequencies[1] - frequencies[0]
+    else:
+        fstep = 1  # dummy value
+
+    picks = _setup_picks(picks=None, info=info, forward=forward)
+
+    is_free_ori, _, proj, vertno, G =\
+        _prepare_beamformer_input(info, forward, label, picks=picks,
+                                  pick_ori=pick_ori)
+
+    n_orient = 3 if is_free_ori else 1
+    n_sources = G.shape[1] // n_orient
+    source_power = np.zeros((n_sources, len(data_csds)))
+    n_csds = len(data_csds)
+
+    logger.info('Computing DICS source power...')
+    for i, (data_csd, noise_csd) in enumerate(zip(data_csds, noise_csds)):
+        if n_csds > 1:
+            logger.info('    computing DICS spatial filter %d out of %d' %
+                        (i + 1, n_csds))
+
+        Cm = data_csd.data
+
+        # Calculating regularized inverse, equivalent to an inverse operation
+        # after the following regularization:
+        # Cm += reg * np.trace(Cm) / len(Cm) * np.eye(len(Cm))
+        Cm_inv = linalg.pinv(Cm, reg)
+
+        # Compute spatial filters
+        W = np.dot(G.T, Cm_inv)
+        for k in range(n_sources):
+            Wk = W[n_orient * k: n_orient * k + n_orient]
+            Gk = G[:, n_orient * k: n_orient * k + n_orient]
+            Ck = np.dot(Wk, Gk)
+
+            if is_free_ori:
+                # Free source orientation
+                Wk[:] = np.dot(linalg.pinv(Ck, 0.1), Wk)
+            else:
+                # Fixed source orientation
+                Wk /= Ck
+
+            # Noise normalization
+            noise_norm = np.dot(np.dot(Wk.conj(), noise_csd.data), Wk.T)
+            noise_norm = np.abs(noise_norm).trace()
+
+            # Calculating source power
+            sp_temp = np.dot(np.dot(Wk.conj(), data_csd.data), Wk.T)
+            sp_temp /= max(noise_norm, 1e-40)  # Avoid division by 0
+
+            if pick_ori == 'normal':
+                source_power[k, i] = np.abs(sp_temp)[2, 2]
+            else:
+                source_power[k, i] = np.abs(sp_temp).trace()
+
+    logger.info('[done]')
+
+    subject = _subject_from_forward(forward)
+    return _make_stc(source_power, vertices=vertno, tmin=fmin / 1000.,
+                     tstep=fstep / 1000., subject=subject)
+
+
+ at verbose
+def tf_dics(epochs, forward, noise_csds, tmin, tmax, tstep, win_lengths,
+            freq_bins, subtract_evoked=False, mode='fourier', n_ffts=None,
+            mt_bandwidths=None, mt_adaptive=False, mt_low_bias=True, reg=0.01,
+            label=None, pick_ori=None, verbose=None):
+    """5D time-frequency beamforming based on DICS.
+
+    Calculate source power in time-frequency windows using a spatial filter
+    based on the Dynamic Imaging of Coherent Sources (DICS) beamforming
+    approach. For each time window and frequency bin combination cross-spectral
+    density (CSD) is computed and used to create a beamformer spatial filter
+    with noise CSD used for normalization.
+
+    NOTE : This implementation has not been heavily tested so please
+    report any issues or suggestions.
+
+    Parameters
+    ----------
+    epochs : Epochs
+        Single trial epochs.
+    forward : dict
+        Forward operator.
+    noise_csds : list of instances of CrossSpectralDensity
+        Noise cross-spectral density for each frequency bin.
+    tmin : float
+        Minimum time instant to consider.
+    tmax : float
+        Maximum time instant to consider.
+    tstep : float
+        Spacing between consecutive time windows, should be smaller than or
+        equal to the shortest time window length.
+    win_lengths : list of float
+        Time window lengths in seconds. One time window length should be
+        provided for each frequency bin.
+    freq_bins : list of tuples of float
+        Start and end point of frequency bins of interest.
+    subtract_evoked : bool
+        If True, subtract the averaged evoked response prior to computing the
+        tf source grid.
+    mode : str
+        Spectrum estimation mode can be either: 'multitaper' or 'fourier'.
+    n_ffts : list | None
+        FFT lengths to use for each frequency bin.
+    mt_bandwidths : list of float
+        The bandwidths of the multitaper windowing function in Hz. Only used in
+        'multitaper' mode. One value should be provided for each frequency bin.
+    mt_adaptive : bool
+        Use adaptive weights to combine the tapered spectra into CSD. Only used
+        in 'multitaper' mode.
+    mt_low_bias : bool
+        Only use tapers with more than 90% spectral concentration within
+        bandwidth. Only used in 'multitaper' mode.
+    reg : float
+        The regularization for the cross-spectral density.
+    label : Label | None
+        Restricts the solution to a given label.
+    pick_ori : None | 'normal'
+        If 'normal', rather than pooling the orientations by taking the norm,
+        only the radial component is kept.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    stcs : list of SourceEstimate | VolSourceEstimate
+        Source power at each time window. One SourceEstimate object is returned
+        for each frequency bin.
+
+    Notes
+    -----
+    The original reference is:
+    Dalal et al. Five-dimensional neuroimaging: Localization of the
+    time-frequency dynamics of cortical activity.
+    NeuroImage (2008) vol. 40 (4) pp. 1686-1700
+
+    NOTE : Dalal et al. used a synthetic aperture magnetometry beamformer (SAM)
+    in each time-frequency window instead of DICS.
+    """
+    _check_reference(epochs)
+
+    if pick_ori not in [None, 'normal']:
+        raise ValueError('Unrecognized orientation option in pick_ori, '
+                         'available choices are None and normal')
+    if len(noise_csds) != len(freq_bins):
+        raise ValueError('One noise CSD object expected per frequency bin')
+    if len(win_lengths) != len(freq_bins):
+        raise ValueError('One time window length expected per frequency bin')
+    if any(win_length < tstep for win_length in win_lengths):
+        raise ValueError('Time step should not be larger than any of the '
+                         'window lengths')
+    if n_ffts is not None and len(n_ffts) != len(freq_bins):
+        raise ValueError('When specifying number of FFT samples, one value '
+                         'must be provided per frequency bin')
+    if mt_bandwidths is not None and len(mt_bandwidths) != len(freq_bins):
+        raise ValueError('When using multitaper mode and specifying '
+                         'multitaper transform bandwidth, one value must be '
+                         'provided per frequency bin')
+
+    if n_ffts is None:
+        n_ffts = [None] * len(freq_bins)
+    if mt_bandwidths is None:
+        mt_bandwidths = [None] * len(freq_bins)
+
+    # Multiplying by 1e3 to avoid numerical issues, e.g. 0.3 // 0.05 == 5
+    n_time_steps = int(((tmax - tmin) * 1e3) // (tstep * 1e3))
+
+    # Subtract evoked response
+    if subtract_evoked:
+        epochs.subtract_evoked()
+
+    sol_final = []
+    for freq_bin, win_length, noise_csd, n_fft, mt_bandwidth in\
+            zip(freq_bins, win_lengths, noise_csds, n_ffts, mt_bandwidths):
+        n_overlap = int((win_length * 1e3) // (tstep * 1e3))
+
+        # Scale noise CSD to allow data and noise CSDs to have different length
+        noise_csd = deepcopy(noise_csd)
+        noise_csd.data /= noise_csd.n_fft
+
+        sol_single = []
+        sol_overlap = []
+        for i_time in range(n_time_steps):
+            win_tmin = tmin + i_time * tstep
+            win_tmax = win_tmin + win_length
+
+            # If in the last step the last time point was not covered in
+            # previous steps and will not be covered now, a solution needs to
+            # be calculated for an additional time window
+            if i_time == n_time_steps - 1 and win_tmax - tstep < tmax and\
+               win_tmax >= tmax + (epochs.times[-1] - epochs.times[-2]):
+                warnings.warn('Adding a time window to cover last time points')
+                win_tmin = tmax - win_length
+                win_tmax = tmax
+
+            if win_tmax < tmax + (epochs.times[-1] - epochs.times[-2]):
+                logger.info('Computing time-frequency DICS beamformer for '
+                            'time window %d to %d ms, in frequency range '
+                            '%d to %d Hz' % (win_tmin * 1e3, win_tmax * 1e3,
+                                             freq_bin[0], freq_bin[1]))
+
+                # Counteracts unsafe floating point arithmetic ensuring all
+                # relevant samples will be taken into account when selecting
+                # data in time windows
+                win_tmin = win_tmin - 1e-10
+                win_tmax = win_tmax + 1e-10
+
+                # Calculating data CSD in current time window
+                data_csd = compute_epochs_csd(epochs, mode=mode,
+                                              fmin=freq_bin[0],
+                                              fmax=freq_bin[1], fsum=True,
+                                              tmin=win_tmin, tmax=win_tmax,
+                                              n_fft=n_fft,
+                                              mt_bandwidth=mt_bandwidth,
+                                              mt_low_bias=mt_low_bias)
+
+                # Scale data CSD to allow data and noise CSDs to have different
+                # length
+                data_csd.data /= data_csd.n_fft
+
+                stc = dics_source_power(epochs.info, forward, noise_csd,
+                                        data_csd, reg=reg, label=label,
+                                        pick_ori=pick_ori)
+                sol_single.append(stc.data[:, 0])
+
+            # Average over all time windows that contain the current time
+            # point, which is the current time window along with
+            # n_overlap - 1 previous ones
+            if i_time - n_overlap < 0:
+                curr_sol = np.mean(sol_single[0:i_time + 1], axis=0)
+            else:
+                curr_sol = np.mean(sol_single[i_time - n_overlap + 1:
+                                              i_time + 1], axis=0)
+
+            # The final result for the current time point in the current
+            # frequency bin
+            sol_overlap.append(curr_sol)
+
+        # Gathering solutions for all time points for current frequency bin
+        sol_final.append(sol_overlap)
+
+    sol_final = np.array(sol_final)
+
+    # Creating stc objects containing all time points for each frequency bin
+    stcs = []
+    for i_freq, _ in enumerate(freq_bins):
+        stc = _make_stc(sol_final[i_freq, :, :].T, vertices=stc.vertices,
+                        tmin=tmin, tstep=tstep, subject=stc.subject)
+        stcs.append(stc)
+
+    return stcs
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/beamformer/_lcmv.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/beamformer/_lcmv.py
new file mode 100644
index 0000000..4e2b2fe
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/beamformer/_lcmv.py
@@ -0,0 +1,821 @@
+"""Compute Linearly constrained minimum variance (LCMV) beamformer.
+"""
+
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Roman Goj <roman.goj at gmail.com>
+#
+# License: BSD (3-clause)
+
+import warnings
+
+import numpy as np
+from scipy import linalg
+
+from ..io.constants import FIFF
+from ..io.proj import make_projector
+from ..io.pick import (
+    pick_types, pick_channels_forward, pick_channels_cov, pick_info)
+from ..forward import _subject_from_forward
+from ..minimum_norm.inverse import _get_vertno, combine_xyz, _check_reference
+from ..cov import compute_whitener, compute_covariance
+from ..source_estimate import _make_stc, SourceEstimate
+from ..source_space import label_src_vertno_sel
+from ..utils import logger, verbose
+from .. import Epochs
+from ..externals import six
+
+
+def _setup_picks(picks, info, forward, noise_cov=None):
+    if picks is None:
+        picks = pick_types(info, meg=True, eeg=True, ref_meg=False,
+                           exclude='bads')
+
+    ok_ch_names = set([c['ch_name'] for c in forward['info']['chs']])
+    if noise_cov is not None:
+        ok_ch_names.union(set(noise_cov.ch_names))
+
+    if noise_cov is not None and set(info['bads']) != set(noise_cov['bads']):
+        logger.info('info["bads"] and noise_cov["bads"] do not match, '
+                    'excluding bad channels from both')
+
+    bads = set(info['bads'])
+    if noise_cov is not None:
+        bads.union(set(noise_cov['bads']))
+
+    ok_ch_names -= bads
+
+    ch_names = [info['chs'][k]['ch_name'] for k in picks]
+    ch_names = [c for c in ch_names if c in ok_ch_names]
+
+    picks = [info['ch_names'].index(k) for k in ch_names if k in
+             info['ch_names']]
+    return picks
+
+
+ at verbose
+def _apply_lcmv(data, info, tmin, forward, noise_cov, data_cov, reg,
+                label=None, picks=None, pick_ori=None, rank=None,
+                verbose=None):
+    """ LCMV beamformer for evoked data, single epochs, and raw data
+
+    Parameters
+    ----------
+    data : array or list / iterable
+        Sensor space data. If data.ndim == 2 a single observation is assumed
+        and a single stc is returned. If data.ndim == 3 or if data is
+        a list / iterable, a list of stc's is returned.
+    info : dict
+        Measurement info.
+    tmin : float
+        Time of first sample.
+    forward : dict
+        Forward operator.
+    noise_cov : Covariance
+        The noise covariance.
+    data_cov : Covariance
+        The data covariance.
+    reg : float
+        The regularization for the whitened data covariance.
+    label : Label
+        Restricts the LCMV solution to a given label.
+    picks : array-like of int | None
+        Indices (in info) of data channels. If None, MEG and EEG data channels
+        (without bad channels) will be used.
+    pick_ori : None | 'normal' | 'max-power'
+        If 'normal', rather than pooling the orientations by taking the norm,
+        only the radial component is kept. If 'max-power', the source
+        orientation that maximizes output source power is chosen.
+    rank : None | int | dict
+        Specified rank of the noise covariance matrix. If None, the rank is
+        detected automatically. If int, the rank is specified for the MEG
+        channels. A dictionary with entries 'eeg' and/or 'meg' can be used
+        to specify the rank for each modality.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    stc : SourceEstimate | VolSourceEstimate (or list of thereof)
+        Source time courses.
+    """
+    is_free_ori, ch_names, proj, vertno, G = (
+        _prepare_beamformer_input(
+            info, forward, label, picks, pick_ori))
+
+    # Handle whitening + data covariance
+    whitener, _ = compute_whitener(noise_cov, info, picks, rank=rank)
+
+    # whiten the leadfield
+    G = np.dot(whitener, G)
+
+    # Apply SSPs + whitener to data covariance
+    data_cov = pick_channels_cov(data_cov, include=ch_names)
+    Cm = data_cov['data']
+    if info['projs']:
+        Cm = np.dot(proj, np.dot(Cm, proj.T))
+    Cm = np.dot(whitener, np.dot(Cm, whitener.T))
+
+    # Calculating regularized inverse, equivalent to an inverse operation after
+    # the following regularization:
+    # Cm += reg * np.trace(Cm) / len(Cm) * np.eye(len(Cm))
+    Cm_inv = linalg.pinv(Cm, reg)
+
+    # Compute spatial filters
+    W = np.dot(G.T, Cm_inv)
+    n_orient = 3 if is_free_ori else 1
+    n_sources = G.shape[1] // n_orient
+    for k in range(n_sources):
+        Wk = W[n_orient * k: n_orient * k + n_orient]
+        Gk = G[:, n_orient * k: n_orient * k + n_orient]
+        Ck = np.dot(Wk, Gk)
+
+        # Find source orientation maximizing output source power
+        if pick_ori == 'max-power':
+            eig_vals, eig_vecs = linalg.eigh(Ck)
+
+            # Choosing the eigenvector associated with the middle eigenvalue.
+            # The middle and not the minimal eigenvalue is used because MEG is
+            # insensitive to one (radial) of the three dipole orientations and
+            # therefore the smallest eigenvalue reflects mostly noise.
+            for i in range(3):
+                if i != eig_vals.argmax() and i != eig_vals.argmin():
+                    idx_middle = i
+
+            # TODO: The eigenvector associated with the smallest eigenvalue
+            # should probably be used when using combined EEG and MEG data
+            max_ori = eig_vecs[:, idx_middle]
+
+            Wk[:] = np.dot(max_ori, Wk)
+            Ck = np.dot(max_ori, np.dot(Ck, max_ori))
+            is_free_ori = False
+
+        if is_free_ori:
+            # Free source orientation
+            Wk[:] = np.dot(linalg.pinv(Ck, 0.1), Wk)
+        else:
+            # Fixed source orientation
+            Wk /= Ck
+
+    # Pick source orientation maximizing output source power
+    if pick_ori == 'max-power':
+        W = W[0::3]
+
+    # Preparing noise normalization
+    noise_norm = np.sum(W ** 2, axis=1)
+    if is_free_ori:
+        noise_norm = np.sum(np.reshape(noise_norm, (-1, 3)), axis=1)
+    noise_norm = np.sqrt(noise_norm)
+
+    # Pick source orientation normal to cortical surface
+    if pick_ori == 'normal':
+        W = W[2::3]
+        is_free_ori = False
+
+    # Applying noise normalization
+    if not is_free_ori:
+        W /= noise_norm[:, None]
+
+    if isinstance(data, np.ndarray) and data.ndim == 2:
+        data = [data]
+        return_single = True
+    else:
+        return_single = False
+
+    subject = _subject_from_forward(forward)
+    for i, M in enumerate(data):
+        if len(M) != len(picks):
+            raise ValueError('data and picks must have the same length')
+
+        if not return_single:
+            logger.info("Processing epoch : %d" % (i + 1))
+
+        # SSP and whitening
+        if info['projs']:
+            M = np.dot(proj, M)
+        M = np.dot(whitener, M)
+
+        # project to source space using beamformer weights
+
+        if is_free_ori:
+            sol = np.dot(W, M)
+            logger.info('combining the current components...')
+            sol = combine_xyz(sol)
+            sol /= noise_norm[:, None]
+        else:
+            # Linear inverse: do computation here or delayed
+            if M.shape[0] < W.shape[0] and pick_ori != 'max-power':
+                sol = (W, M)
+            else:
+                sol = np.dot(W, M)
+            if pick_ori == 'max-power':
+                sol = np.abs(sol)
+
+        tstep = 1.0 / info['sfreq']
+        yield _make_stc(sol, vertices=vertno, tmin=tmin, tstep=tstep,
+                        subject=subject)
+
+    logger.info('[done]')
+
+
+def _prepare_beamformer_input(info, forward, label, picks, pick_ori):
+    """Input preparation common for all beamformer functions.
+
+    Check input values, prepare channel list and gain matrix. For documentation
+    of parameters, please refer to _apply_lcmv.
+    """
+
+    is_free_ori = forward['source_ori'] == FIFF.FIFFV_MNE_FREE_ORI
+
+    if pick_ori in ['normal', 'max-power'] and not is_free_ori:
+        raise ValueError('Normal or max-power orientation can only be picked '
+                         'when a forward operator with free orientation is '
+                         'used.')
+    if pick_ori == 'normal' and not forward['surf_ori']:
+        raise ValueError('Normal orientation can only be picked when a '
+                         'forward operator oriented in surface coordinates is '
+                         'used.')
+    if pick_ori == 'normal' and not forward['src'][0]['type'] == 'surf':
+        raise ValueError('Normal orientation can only be picked when a '
+                         'forward operator with a surface-based source space '
+                         'is used.')
+
+    # Restrict forward solution to selected channels
+    ch_names = [info['chs'][k]['ch_name'] for k in picks]
+    forward = pick_channels_forward(forward, include=ch_names)
+
+    # Get gain matrix (forward operator)
+    if label is not None:
+        vertno, src_sel = label_src_vertno_sel(label, forward['src'])
+
+        if is_free_ori:
+            src_sel = 3 * src_sel
+            src_sel = np.c_[src_sel, src_sel + 1, src_sel + 2]
+            src_sel = src_sel.ravel()
+
+        G = forward['sol']['data'][:, src_sel]
+    else:
+        vertno = _get_vertno(forward['src'])
+        G = forward['sol']['data']
+
+    # Apply SSPs
+    proj, ncomp, _ = make_projector(info['projs'], ch_names)
+    if info['projs']:
+        G = np.dot(proj, G)
+
+    return is_free_ori, ch_names, proj, vertno, G
+
+
+ at verbose
+def lcmv(evoked, forward, noise_cov, data_cov, reg=0.01, label=None,
+         pick_ori=None, picks=None, rank=None, verbose=None):
+    """Linearly Constrained Minimum Variance (LCMV) beamformer.
+
+    Compute Linearly Constrained Minimum Variance (LCMV) beamformer
+    on evoked data.
+
+    NOTE : This implementation has not been heavily tested so please
+    report any issue or suggestions.
+
+    Parameters
+    ----------
+    evoked : Evoked
+        Evoked data to invert
+    forward : dict
+        Forward operator
+    noise_cov : Covariance
+        The noise covariance
+    data_cov : Covariance
+        The data covariance
+    reg : float
+        The regularization for the whitened data covariance.
+    label : Label
+        Restricts the LCMV solution to a given label
+    pick_ori : None | 'normal' | 'max-power'
+        If 'normal', rather than pooling the orientations by taking the norm,
+        only the radial component is kept. If 'max-power', the source
+        orientation that maximizes output source power is chosen.
+    picks : array-like of int
+        Channel indices to use for beamforming (if None all channels
+        are used except bad channels).
+    rank : None | int | dict
+        Specified rank of the noise covariance matrix. If None, the rank is
+        detected automatically. If int, the rank is specified for the MEG
+        channels. A dictionary with entries 'eeg' and/or 'meg' can be used
+        to specify the rank for each modality.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    stc : SourceEstimate | VolSourceEstimate
+        Source time courses
+
+    See Also
+    --------
+    lcmv_raw, lcmv_epochs
+
+    Notes
+    -----
+    The original reference is:
+    Van Veen et al. Localization of brain electrical activity via linearly
+    constrained minimum variance spatial filtering.
+    Biomedical Engineering (1997) vol. 44 (9) pp. 867--880
+
+    The reference for finding the max-power orientation is:
+    Sekihara et al. Asymptotic SNR of scalar and vector minimum-variance
+    beamformers for neuromagnetic source reconstruction.
+    Biomedical Engineering (2004) vol. 51 (10) pp. 1726--34
+    """
+    _check_reference(evoked)
+
+    info = evoked.info
+    data = evoked.data
+    tmin = evoked.times[0]
+
+    picks = _setup_picks(picks, info, forward, noise_cov)
+
+    data = data[picks]
+
+    stc = _apply_lcmv(
+        data=data, info=info, tmin=tmin, forward=forward, noise_cov=noise_cov,
+        data_cov=data_cov, reg=reg, label=label, picks=picks, rank=rank,
+        pick_ori=pick_ori)
+
+    return six.advance_iterator(stc)
+
+
+ at verbose
+def lcmv_epochs(epochs, forward, noise_cov, data_cov, reg=0.01, label=None,
+                pick_ori=None, return_generator=False, picks=None, rank=None,
+                verbose=None):
+    """Linearly Constrained Minimum Variance (LCMV) beamformer.
+
+    Compute Linearly Constrained Minimum Variance (LCMV) beamformer
+    on single trial data.
+
+    NOTE : This implementation has not been heavily tested so please
+    report any issue or suggestions.
+
+    Parameters
+    ----------
+    epochs : Epochs
+        Single trial epochs.
+    forward : dict
+        Forward operator.
+    noise_cov : Covariance
+        The noise covariance.
+    data_cov : Covariance
+        The data covariance.
+    reg : float
+        The regularization for the whitened data covariance.
+    label : Label
+        Restricts the LCMV solution to a given label.
+    pick_ori : None | 'normal' | 'max-power'
+        If 'normal', rather than pooling the orientations by taking the norm,
+        only the radial component is kept. If 'max-power', the source
+        orientation that maximizes output source power is chosen.
+    return_generator : bool
+        Return a generator object instead of a list. This allows iterating
+        over the stcs without having to keep them all in memory.
+    picks : array-like of int
+        Channel indices to use for beamforming (if None all channels
+        are used except bad channels).
+    rank : None | int | dict
+        Specified rank of the noise covariance matrix. If None, the rank is
+        detected automatically. If int, the rank is specified for the MEG
+        channels. A dictionary with entries 'eeg' and/or 'meg' can be used
+        to specify the rank for each modality.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    stc: list | generator of (SourceEstimate | VolSourceEstimate)
+        The source estimates for all epochs
+
+    See Also
+    --------
+    lcmv_raw, lcmv
+
+    Notes
+    -----
+    The original reference is:
+    Van Veen et al. Localization of brain electrical activity via linearly
+    constrained minimum variance spatial filtering.
+    Biomedical Engineering (1997) vol. 44 (9) pp. 867--880
+
+    The reference for finding the max-power orientation is:
+    Sekihara et al. Asymptotic SNR of scalar and vector minimum-variance
+    beamformers for neuromagnetic source reconstruction.
+    Biomedical Engineering (2004) vol. 51 (10) pp. 1726--34
+    """
+    _check_reference(epochs)
+
+    info = epochs.info
+    tmin = epochs.times[0]
+
+    picks = _setup_picks(picks, info, forward, noise_cov)
+
+    data = epochs.get_data()[:, picks, :]
+
+    stcs = _apply_lcmv(
+        data=data, info=info, tmin=tmin, forward=forward, noise_cov=noise_cov,
+        data_cov=data_cov, reg=reg, label=label, picks=picks, rank=rank,
+        pick_ori=pick_ori)
+
+    if not return_generator:
+        stcs = [s for s in stcs]
+
+    return stcs
+
+
+ at verbose
+def lcmv_raw(raw, forward, noise_cov, data_cov, reg=0.01, label=None,
+             start=None, stop=None, picks=None, pick_ori=None, rank=None,
+             verbose=None):
+    """Linearly Constrained Minimum Variance (LCMV) beamformer.
+
+    Compute Linearly Constrained Minimum Variance (LCMV) beamformer
+    on raw data.
+
+    NOTE : This implementation has not been heavily tested so please
+    report any issue or suggestions.
+
+    Parameters
+    ----------
+    raw : mne.io.Raw
+        Raw data to invert.
+    forward : dict
+        Forward operator.
+    noise_cov : Covariance
+        The noise covariance.
+    data_cov : Covariance
+        The data covariance.
+    reg : float
+        The regularization for the whitened data covariance.
+    label : Label
+        Restricts the LCMV solution to a given label.
+    start : int
+        Index of first time sample (index not time is seconds).
+    stop : int
+        Index of first time sample not to include (index not time is seconds).
+    picks : array-like of int
+        Channel indices to use for beamforming (if None all channels
+        are used except bad channels).
+    pick_ori : None | 'normal' | 'max-power'
+        If 'normal', rather than pooling the orientations by taking the norm,
+        only the radial component is kept. If 'max-power', the source
+        orientation that maximizes output source power is chosen.
+    rank : None | int | dict
+        Specified rank of the noise covariance matrix. If None, the rank is
+        detected automatically. If int, the rank is specified for the MEG
+        channels. A dictionary with entries 'eeg' and/or 'meg' can be used
+        to specify the rank for each modality.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    stc : SourceEstimate | VolSourceEstimate
+        Source time courses
+
+    See Also
+    --------
+    lcmv, lcmv_epochs
+
+    Notes
+    -----
+    The original reference is:
+    Van Veen et al. Localization of brain electrical activity via linearly
+    constrained minimum variance spatial filtering.
+    Biomedical Engineering (1997) vol. 44 (9) pp. 867--880
+
+    The reference for finding the max-power orientation is:
+    Sekihara et al. Asymptotic SNR of scalar and vector minimum-variance
+    beamformers for neuromagnetic source reconstruction.
+    Biomedical Engineering (2004) vol. 51 (10) pp. 1726--34
+    """
+    _check_reference(raw)
+
+    info = raw.info
+
+    picks = _setup_picks(picks, info, forward, noise_cov)
+
+    data, times = raw[picks, start:stop]
+    tmin = times[0]
+
+    stc = _apply_lcmv(
+        data=data, info=info, tmin=tmin, forward=forward, noise_cov=noise_cov,
+        data_cov=data_cov, reg=reg, label=label, picks=picks, rank=rank,
+        pick_ori=pick_ori)
+
+    return six.advance_iterator(stc)
+
+
+ at verbose
+def _lcmv_source_power(info, forward, noise_cov, data_cov, reg=0.01,
+                       label=None, picks=None, pick_ori=None,
+                       rank=None, verbose=None):
+    """Linearly Constrained Minimum Variance (LCMV) beamformer.
+
+    Calculate source power in a time window based on the provided data
+    covariance. Noise covariance is used to whiten the data covariance making
+    the output equivalent to the neural activity index as defined by
+    Van Veen et al. 1997.
+
+    NOTE : This implementation has not been heavily tested so please
+    report any issues or suggestions.
+
+    Parameters
+    ----------
+    info : dict
+        Measurement info, e.g. epochs.info.
+    forward : dict
+        Forward operator.
+    noise_cov : Covariance
+        The noise covariance.
+    data_cov : Covariance
+        The data covariance.
+    reg : float
+        The regularization for the whitened data covariance.
+    label : Label | None
+        Restricts the solution to a given label.
+    picks : array-like of int | None
+        Indices (in info) of data channels. If None, MEG and EEG data channels
+        (without bad channels) will be used.
+    pick_ori : None | 'normal'
+        If 'normal', rather than pooling the orientations by taking the norm,
+        only the radial component is kept.
+    rank : None | int | dict
+        Specified rank of the noise covariance matrix. If None, the rank is
+        detected automatically. If int, the rank is specified for the MEG
+        channels. A dictionary with entries 'eeg' and/or 'meg' can be used
+        to specify the rank for each modality.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    stc : SourceEstimate
+        Source power with a single time point representing the entire time
+        window for which data covariance was calculated.
+
+    Notes
+    -----
+    The original reference is:
+    Van Veen et al. Localization of brain electrical activity via linearly
+    constrained minimum variance spatial filtering.
+    Biomedical Engineering (1997) vol. 44 (9) pp. 867--880
+    """
+    if picks is None:
+        picks = pick_types(info, meg=True, eeg=True, ref_meg=False,
+                           exclude='bads')
+
+    is_free_ori, ch_names, proj, vertno, G =\
+        _prepare_beamformer_input(
+            info, forward, label, picks, pick_ori)
+
+    # Handle whitening
+    info = pick_info(
+        info, [info['ch_names'].index(k) for k in ch_names
+               if k in info['ch_names']])
+    whitener, _ = compute_whitener(noise_cov, info, picks, rank=rank)
+
+    # whiten the leadfield
+    G = np.dot(whitener, G)
+
+    # Apply SSPs + whitener to data covariance
+    data_cov = pick_channels_cov(data_cov, include=ch_names)
+    Cm = data_cov['data']
+    if info['projs']:
+        Cm = np.dot(proj, np.dot(Cm, proj.T))
+    Cm = np.dot(whitener, np.dot(Cm, whitener.T))
+
+    # Calculating regularized inverse, equivalent to an inverse operation after
+    # the following regularization:
+    # Cm += reg * np.trace(Cm) / len(Cm) * np.eye(len(Cm))
+    Cm_inv = linalg.pinv(Cm, reg)
+
+    # Compute spatial filters
+    W = np.dot(G.T, Cm_inv)
+    n_orient = 3 if is_free_ori else 1
+    n_sources = G.shape[1] // n_orient
+    source_power = np.zeros((n_sources, 1))
+    for k in range(n_sources):
+        Wk = W[n_orient * k: n_orient * k + n_orient]
+        Gk = G[:, n_orient * k: n_orient * k + n_orient]
+        Ck = np.dot(Wk, Gk)
+
+        if is_free_ori:
+            # Free source orientation
+            Wk[:] = np.dot(linalg.pinv(Ck, 0.1), Wk)
+        else:
+            # Fixed source orientation
+            Wk /= Ck
+
+        # Noise normalization
+        noise_norm = np.dot(Wk, Wk.T)
+        noise_norm = noise_norm.trace()
+
+        # Calculating source power
+        sp_temp = np.dot(np.dot(Wk, Cm), Wk.T)
+        sp_temp /= max(noise_norm, 1e-40)  # Avoid division by 0
+
+        if pick_ori == 'normal':
+            source_power[k, 0] = sp_temp[2, 2]
+        else:
+            source_power[k, 0] = sp_temp.trace()
+
+    logger.info('[done]')
+
+    subject = _subject_from_forward(forward)
+    return SourceEstimate(source_power, vertices=vertno, tmin=1,
+                          tstep=1, subject=subject)
+
+
+ at verbose
+def tf_lcmv(epochs, forward, noise_covs, tmin, tmax, tstep, win_lengths,
+            freq_bins, subtract_evoked=False, reg=0.01, label=None,
+            pick_ori=None, n_jobs=1, picks=None, rank=None, verbose=None):
+    """5D time-frequency beamforming based on LCMV.
+
+    Calculate source power in time-frequency windows using a spatial filter
+    based on the Linearly Constrained Minimum Variance (LCMV) beamforming
+    approach. Band-pass filtered epochs are divided into time windows from
+    which covariance is computed and used to create a beamformer spatial
+    filter.
+
+    NOTE : This implementation has not been heavily tested so please
+    report any issues or suggestions.
+
+    Parameters
+    ----------
+    epochs : Epochs
+        Single trial epochs.
+    forward : dict
+        Forward operator.
+    noise_covs : list of instances of Covariance
+        Noise covariance for each frequency bin.
+    tmin : float
+        Minimum time instant to consider.
+    tmax : float
+        Maximum time instant to consider.
+    tstep : float
+        Spacing between consecutive time windows, should be smaller than or
+        equal to the shortest time window length.
+    win_lengths : list of float
+        Time window lengths in seconds. One time window length should be
+        provided for each frequency bin.
+    freq_bins : list of tuples of float
+        Start and end point of frequency bins of interest.
+    subtract_evoked : bool
+        If True, subtract the averaged evoked response prior to computing the
+        tf source grid.
+    reg : float
+        The regularization for the whitened data covariance.
+    label : Label | None
+        Restricts the solution to a given label.
+    pick_ori : None | 'normal'
+        If 'normal', rather than pooling the orientations by taking the norm,
+        only the radial component is kept.
+    n_jobs : int | str
+        Number of jobs to run in parallel. Can be 'cuda' if scikits.cuda
+        is installed properly and CUDA is initialized.
+    picks : array-like of int
+        Channel indices to use for beamforming (if None all channels
+        are used except bad channels).
+    rank : None | int | dict
+        Specified rank of the noise covariance matrix. If None, the rank is
+        detected automatically. If int, the rank is specified for the MEG
+        channels. A dictionary with entries 'eeg' and/or 'meg' can be used
+        to specify the rank for each modality.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    stcs : list of SourceEstimate
+        Source power at each time window. One SourceEstimate object is returned
+        for each frequency bin.
+
+    Notes
+    -----
+    The original reference is:
+    Dalal et al. Five-dimensional neuroimaging: Localization of the
+    time-frequency dynamics of cortical activity.
+    NeuroImage (2008) vol. 40 (4) pp. 1686-1700
+    """
+    _check_reference(epochs)
+
+    if pick_ori not in [None, 'normal']:
+        raise ValueError('Unrecognized orientation option in pick_ori, '
+                         'available choices are None and normal')
+    if len(noise_covs) != len(freq_bins):
+        raise ValueError('One noise covariance object expected per frequency '
+                         'bin')
+    if len(win_lengths) != len(freq_bins):
+        raise ValueError('One time window length expected per frequency bin')
+    if any(win_length < tstep for win_length in win_lengths):
+        raise ValueError('Time step should not be larger than any of the '
+                         'window lengths')
+
+    # Extract raw object from the epochs object
+    raw = epochs._raw
+    if raw is None:
+        raise ValueError('The provided epochs object does not contain the '
+                         'underlying raw object. Please use preload=False '
+                         'when constructing the epochs object')
+
+    picks = _setup_picks(picks, epochs.info, forward, noise_covs[0])
+    ch_names = [epochs.ch_names[k] for k in picks]
+
+    # Use picks from epochs for picking channels in the raw object
+    raw_picks = [raw.ch_names.index(c) for c in ch_names]
+
+    # Make sure epochs.events contains only good events:
+    epochs.drop_bad_epochs()
+
+    # Multiplying by 1e3 to avoid numerical issues, e.g. 0.3 // 0.05 == 5
+    n_time_steps = int(((tmax - tmin) * 1e3) // (tstep * 1e3))
+
+    sol_final = []
+    for (l_freq, h_freq), win_length, noise_cov in \
+            zip(freq_bins, win_lengths, noise_covs):
+        n_overlap = int((win_length * 1e3) // (tstep * 1e3))
+
+        raw_band = raw.copy()
+        raw_band.filter(l_freq, h_freq, picks=raw_picks, method='iir',
+                        n_jobs=n_jobs)
+        raw_band.info['highpass'] = l_freq
+        raw_band.info['lowpass'] = h_freq
+        epochs_band = Epochs(raw_band, epochs.events, epochs.event_id,
+                             tmin=epochs.tmin, tmax=epochs.tmax, baseline=None,
+                             picks=raw_picks, proj=epochs.proj, preload=True)
+        del raw_band
+
+        if subtract_evoked:
+            epochs_band.subtract_evoked()
+
+        sol_single = []
+        sol_overlap = []
+        for i_time in range(n_time_steps):
+            win_tmin = tmin + i_time * tstep
+            win_tmax = win_tmin + win_length
+
+            # If in the last step the last time point was not covered in
+            # previous steps and will not be covered now, a solution needs to
+            # be calculated for an additional time window
+            if i_time == n_time_steps - 1 and win_tmax - tstep < tmax and\
+               win_tmax >= tmax + (epochs.times[-1] - epochs.times[-2]):
+                warnings.warn('Adding a time window to cover last time points')
+                win_tmin = tmax - win_length
+                win_tmax = tmax
+
+            if win_tmax < tmax + (epochs.times[-1] - epochs.times[-2]):
+                logger.info('Computing time-frequency LCMV beamformer for '
+                            'time window %d to %d ms, in frequency range '
+                            '%d to %d Hz' % (win_tmin * 1e3, win_tmax * 1e3,
+                                             l_freq, h_freq))
+
+                # Counteracts unsafe floating point arithmetic ensuring all
+                # relevant samples will be taken into account when selecting
+                # data in time windows
+                win_tmin = win_tmin - 1e-10
+                win_tmax = win_tmax + 1e-10
+
+                # Calculating data covariance from filtered epochs in current
+                # time window
+                data_cov = compute_covariance(epochs_band, tmin=win_tmin,
+                                              tmax=win_tmax)
+
+                stc = _lcmv_source_power(epochs_band.info, forward, noise_cov,
+                                         data_cov, reg=reg, label=label,
+                                         pick_ori=pick_ori, verbose=verbose)
+                sol_single.append(stc.data[:, 0])
+
+            # Average over all time windows that contain the current time
+            # point, which is the current time window along with
+            # n_overlap - 1 previous ones
+            if i_time - n_overlap < 0:
+                curr_sol = np.mean(sol_single[0:i_time + 1], axis=0)
+            else:
+                curr_sol = np.mean(sol_single[i_time - n_overlap + 1:
+                                              i_time + 1], axis=0)
+
+            # The final result for the current time point in the current
+            # frequency bin
+            sol_overlap.append(curr_sol)
+
+        # Gathering solutions for all time points for current frequency bin
+        sol_final.append(sol_overlap)
+
+    sol_final = np.array(sol_final)
+
+    # Creating stc objects containing all time points for each frequency bin
+    stcs = []
+    for i_freq, _ in enumerate(freq_bins):
+        stc = SourceEstimate(sol_final[i_freq, :, :].T, vertices=stc.vertices,
+                             tmin=tmin, tstep=tstep, subject=stc.subject)
+        stcs.append(stc)
+
+    return stcs
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/beamformer/_rap_music.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/beamformer/_rap_music.py
new file mode 100644
index 0000000..5e96da7
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/beamformer/_rap_music.py
@@ -0,0 +1,274 @@
+"""Compute a Recursively Applied and Projected MUltiple
+Signal Classification (RAP-MUSIC).
+"""
+
+# Authors: Yousra Bekhti <yousra.bekhti at gmail.com>
+#          Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#
+# License: BSD (3-clause)
+
+import numpy as np
+from scipy import linalg
+
+from ..io.pick import pick_channels_evoked
+from ..cov import compute_whitener
+from ..utils import logger, verbose
+from ..dipole import Dipole
+from ._lcmv import _prepare_beamformer_input, _setup_picks
+
+
+def _apply_rap_music(data, info, times, forward, noise_cov, n_dipoles=2,
+                     picks=None, return_explained_data=False):
+    """RAP-MUSIC for evoked data
+
+    Parameters
+    ----------
+    data : array, shape (n_channels, n_times)
+        Evoked data.
+    info : dict
+        Measurement info.
+    times : array
+        Times.
+    forward : instance of Forward
+        Forward operator.
+    noise_cov : instance of Covariance
+        The noise covariance.
+    n_dipoles : int
+        The number of dipoles to estimate. The default value is 2.
+    picks : array-like of int | None
+        Indices (in info) of data channels. If None, MEG and EEG data channels
+        (without bad channels) will be used.
+    return_explained_data : bool
+        If True, the explained data is returned as an array.
+
+    Returns
+    -------
+    dipoles : list of instances of Dipole
+        The dipole fits.
+    explained_data : array | None
+        Data explained by the dipoles using a least square fitting with the
+        selected active dipoles and their estimated orientation.
+        Computed only if return_explained_data is True.
+    """
+
+    is_free_ori, ch_names, proj, vertno, G = _prepare_beamformer_input(
+        info, forward, label=None, picks=picks, pick_ori=None)
+
+    gain = G.copy()
+
+    # Handle whitening + data covariance
+    whitener, _ = compute_whitener(noise_cov, info, picks)
+    if info['projs']:
+        whitener = np.dot(whitener, proj)
+
+    # whiten the leadfield and the data
+    G = np.dot(whitener, G)
+    data = np.dot(whitener, data)
+
+    eig_values, eig_vectors = linalg.eigh(np.dot(data, data.T))
+    phi_sig = eig_vectors[:, -n_dipoles:]
+
+    n_orient = 3 if is_free_ori else 1
+    n_channels = G.shape[0]
+    A = np.empty((n_channels, n_dipoles))
+    gain_dip = np.empty((n_channels, n_dipoles))
+    oris = np.empty((n_dipoles, 3))
+    poss = np.empty((n_dipoles, 3))
+
+    G_proj = G.copy()
+    phi_sig_proj = phi_sig.copy()
+
+    for k in range(n_dipoles):
+        subcorr_max = -1.
+        for i_source in range(G.shape[1] // n_orient):
+            idx_k = slice(n_orient * i_source, n_orient * (i_source + 1))
+            Gk = G_proj[:, idx_k]
+            if n_orient == 3:
+                Gk = np.dot(Gk, forward['source_nn'][idx_k])
+
+            subcorr, ori = _compute_subcorr(Gk, phi_sig_proj)
+            if subcorr > subcorr_max:
+                subcorr_max = subcorr
+                source_idx = i_source
+                source_ori = ori
+                if n_orient == 3 and source_ori[-1] < 0:
+                    # make sure ori is relative to surface ori
+                    source_ori *= -1  # XXX
+
+                source_pos = forward['source_rr'][i_source]
+                if n_orient == 1:
+                    source_ori = forward['source_nn'][i_source]
+
+        idx_k = slice(n_orient * source_idx, n_orient * (source_idx + 1))
+        Ak = G[:, idx_k]
+        if n_orient == 3:
+            Ak = np.dot(Ak, np.dot(forward['source_nn'][idx_k], source_ori))
+
+        A[:, k] = Ak.ravel()
+
+        if return_explained_data:
+            gain_k = gain[:, idx_k]
+            if n_orient == 3:
+                gain_k = np.dot(gain_k,
+                                np.dot(forward['source_nn'][idx_k],
+                                       source_ori))
+            gain_dip[:, k] = gain_k.ravel()
+
+        oris[k] = source_ori
+        poss[k] = source_pos
+
+        logger.info("source %s found: p = %s" % (k + 1, source_idx))
+        if n_orient == 3:
+            logger.info("ori = %s %s %s" % tuple(oris[k]))
+
+        projection = _compute_proj(A[:, :k + 1])
+        G_proj = np.dot(projection, G)
+        phi_sig_proj = np.dot(projection, phi_sig)
+
+    sol = linalg.lstsq(A, data)[0]
+
+    gof, explained_data = [], None
+    if return_explained_data:
+        explained_data = np.dot(gain_dip, sol)
+        gof = (linalg.norm(np.dot(whitener, explained_data)) /
+               linalg.norm(data))
+
+    return _make_dipoles(times, poss,
+                         oris, sol, gof), explained_data
+
+
+def _make_dipoles(times, poss, oris, sol, gof):
+    """Instanciates a list of Dipoles
+
+    Parameters
+    ----------
+    times : array, shape (n_times,)
+        The time instants.
+    poss : array, shape (n_dipoles, 3)
+        The dipoles' positions.
+    oris : array, shape (n_dipoles, 3)
+        The dipoles' orientations.
+    sol : array, shape (n_times,)
+        The dipoles' amplitudes over time.
+    gof : array, shape (n_times,)
+        The goodness of fit of the dipoles.
+        Shared between all dipoles.
+
+    Returns
+    -------
+    dipoles : list
+        The list of Dipole instances.
+    """
+    amplitude = sol * 1e9
+    oris = np.array(oris)
+
+    dipoles = []
+    for i_dip in range(poss.shape[0]):
+        i_pos = poss[i_dip][np.newaxis, :].repeat(len(times), axis=0)
+        i_ori = oris[i_dip][np.newaxis, :].repeat(len(times), axis=0)
+        dipoles.append(Dipole(times, i_pos, amplitude[i_dip],
+                              i_ori, gof))
+
+    return dipoles
+
+
+def _compute_subcorr(G, phi_sig):
+    """ Compute the subspace correlation
+    """
+    Ug, Sg, Vg = linalg.svd(G, full_matrices=False)
+    tmp = np.dot(Ug.T.conjugate(), phi_sig)
+    Uc, Sc, Vc = linalg.svd(tmp, full_matrices=False)
+    X = np.dot(np.dot(Vg.T, np.diag(1. / Sg)), Uc)  # subcorr
+    return Sc[0], X[:, 0] / linalg.norm(X[:, 0])
+
+
+def _compute_proj(A):
+    """ Compute the orthogonal projection operation for
+    a manifold vector A.
+    """
+    U, _, _ = linalg.svd(A, full_matrices=False)
+    return np.identity(A.shape[0]) - np.dot(U, U.T.conjugate())
+
+
+ at verbose
+def rap_music(evoked, forward, noise_cov, n_dipoles=5, return_residual=False,
+              picks=None, verbose=None):
+    """RAP-MUSIC source localization method.
+
+    Compute Recursively Applied and Projected MUltiple SIgnal Classification
+    (RAP-MUSIC) on evoked data.
+
+    Parameters
+    ----------
+    evoked : instance of Evoked
+        Evoked data to localize.
+    forward : instance of Forward
+        Forward operator.
+    noise_cov : instance of Covariance
+        The noise covariance.
+    n_dipoles : int
+        The number of dipoles to look for. The default value is 5.
+    return_residual : bool
+        If True, the residual is returned as an Evoked instance.
+    picks : array-like of int | None
+        Indices (in info) of data channels. If None, MEG and EEG data channels
+        (without bad channels) will be used.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    dipoles : list of instance of Dipole
+        The dipole fits.
+    residual : instance of Evoked
+        The residual a.k.a. data not explained by the dipoles.
+        Only returned if return_residual is True.
+
+    See Also
+    --------
+    mne.fit_dipole
+
+    Notes
+    -----
+    The references are:
+
+        J.C. Mosher and R.M. Leahy. 1999. Source localization using recursively
+        applied and projected (RAP) MUSIC. Signal Processing, IEEE Trans. 47, 2
+        (February 1999), 332-340.
+        DOI=10.1109/78.740118 http://dx.doi.org/10.1109/78.740118
+
+        Mosher, J.C.; Leahy, R.M., EEG and MEG source localization using
+        recursively applied (RAP) MUSIC, Signals, Systems and Computers, 1996.
+        pp.1201,1207 vol.2, 3-6 Nov. 1996
+        doi: 10.1109/ACSSC.1996.599135
+
+    .. versionadded:: 0.9.0
+    """
+
+    info = evoked.info
+    data = evoked.data
+    times = evoked.times
+
+    picks = _setup_picks(picks, info, forward, noise_cov)
+
+    data = data[picks]
+
+    dipoles, explained_data = _apply_rap_music(data, info, times, forward,
+                                               noise_cov, n_dipoles,
+                                               picks, return_residual)
+
+    if return_residual:
+        residual = evoked.copy()
+        selection = [info['ch_names'][p] for p in picks]
+
+        residual = pick_channels_evoked(residual,
+                                        include=selection)
+        residual.data -= explained_data
+        active_projs = [p for p in residual.info['projs'] if p['active']]
+        for p in active_projs:
+            p['active'] = False
+        residual.add_proj(active_projs, remove_existing=True)
+        residual.apply_proj()
+        return dipoles, residual
+    else:
+        return dipoles
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/beamformer/tests/__init__.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/beamformer/tests/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/beamformer/tests/test_dics.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/beamformer/tests/test_dics.py
new file mode 100644
index 0000000..b5f48d7
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/beamformer/tests/test_dics.py
@@ -0,0 +1,312 @@
+from __future__ import print_function
+import warnings
+import os.path as op
+import copy as cp
+
+from nose.tools import assert_true, assert_raises
+import numpy as np
+from numpy.testing import assert_array_equal, assert_array_almost_equal
+
+import mne
+from mne.datasets import testing
+from mne.beamformer import dics, dics_epochs, dics_source_power, tf_dics
+from mne.time_frequency import compute_epochs_csd
+from mne.externals.six import advance_iterator
+from mne.utils import run_tests_if_main, clean_warning_registry
+
+# Note that this is the first test file, this will apply to all subsequent
+# tests in a full nosetest:
+warnings.simplefilter("always")  # ensure we can verify expected warnings
+
+data_path = testing.data_path(download=False)
+fname_raw = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc_raw.fif')
+fname_fwd = op.join(data_path, 'MEG', 'sample',
+                    'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif')
+fname_fwd_vol = op.join(data_path, 'MEG', 'sample',
+                        'sample_audvis_trunc-meg-vol-7-fwd.fif')
+fname_event = op.join(data_path, 'MEG', 'sample',
+                      'sample_audvis_trunc_raw-eve.fif')
+label = 'Aud-lh'
+fname_label = op.join(data_path, 'MEG', 'sample', 'labels', '%s.label' % label)
+
+# bit of a hack to deal with old scipy/numpy throwing warnings in tests
+clean_warning_registry()
+
+
+def read_forward_solution_meg(*args, **kwargs):
+    fwd = mne.read_forward_solution(*args, **kwargs)
+    return mne.pick_types_forward(fwd, meg=True, eeg=False)
+
+
+def _get_data(tmin=-0.11, tmax=0.15, read_all_forward=True, compute_csds=True):
+    """Read in data used in tests
+    """
+    label = mne.read_label(fname_label)
+    events = mne.read_events(fname_event)[:10]
+    raw = mne.io.Raw(fname_raw, preload=False)
+    forward = mne.read_forward_solution(fname_fwd)
+    if read_all_forward:
+        forward_surf_ori = read_forward_solution_meg(fname_fwd, surf_ori=True)
+        forward_fixed = read_forward_solution_meg(fname_fwd, force_fixed=True,
+                                                  surf_ori=True)
+        forward_vol = mne.read_forward_solution(fname_fwd_vol, surf_ori=True)
+    else:
+        forward_surf_ori = None
+        forward_fixed = None
+        forward_vol = None
+
+    event_id, tmin, tmax = 1, tmin, tmax
+
+    # Setup for reading the raw data
+    raw.info['bads'] = ['MEG 2443', 'EEG 053']  # 2 bads channels
+
+    # Set up pick list: MEG - bad channels
+    left_temporal_channels = mne.read_selection('Left-temporal')
+    picks = mne.pick_types(raw.info, meg=True, eeg=False,
+                           stim=True, eog=True, exclude='bads',
+                           selection=left_temporal_channels)
+
+    # Read epochs
+    epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
+                        picks=picks, baseline=(None, 0), preload=True,
+                        reject=dict(grad=4000e-13, mag=4e-12, eog=150e-6))
+    epochs.resample(200, npad=0, n_jobs=2)
+    evoked = epochs.average()
+
+    # Computing the data and noise cross-spectral density matrices
+    if compute_csds:
+        data_csd = compute_epochs_csd(epochs, mode='multitaper', tmin=0.045,
+                                      tmax=None, fmin=8, fmax=12,
+                                      mt_bandwidth=72.72)
+        noise_csd = compute_epochs_csd(epochs, mode='multitaper', tmin=None,
+                                       tmax=0.0, fmin=8, fmax=12,
+                                       mt_bandwidth=72.72)
+    else:
+        data_csd, noise_csd = None, None
+
+    return raw, epochs, evoked, data_csd, noise_csd, label, forward,\
+        forward_surf_ori, forward_fixed, forward_vol
+
+
+ at testing.requires_testing_data
+def test_dics():
+    """Test DICS with evoked data and single trials
+    """
+    raw, epochs, evoked, data_csd, noise_csd, label, forward,\
+        forward_surf_ori, forward_fixed, forward_vol = _get_data()
+
+    stc = dics(evoked, forward, noise_csd=noise_csd, data_csd=data_csd,
+               label=label)
+
+    stc.crop(0, None)
+    stc_pow = np.sum(stc.data, axis=1)
+    idx = np.argmax(stc_pow)
+    max_stc = stc.data[idx]
+    tmax = stc.times[np.argmax(max_stc)]
+
+    # Incorrect due to limited number of epochs
+    assert_true(0.04 < tmax < 0.05)
+    assert_true(10 < np.max(max_stc) < 13)
+
+    # Test picking normal orientation
+    stc_normal = dics(evoked, forward_surf_ori, noise_csd, data_csd,
+                      pick_ori="normal", label=label)
+    stc_normal.crop(0, None)
+
+    # The amplitude of normal orientation results should always be smaller than
+    # free orientation results
+    assert_true((np.abs(stc_normal.data) <= stc.data).all())
+
+    # Test if fixed forward operator is detected when picking normal
+    # orientation
+    assert_raises(ValueError, dics_epochs, epochs, forward_fixed, noise_csd,
+                  data_csd, pick_ori="normal")
+
+    # Test if non-surface oriented forward operator is detected when picking
+    # normal orientation
+    assert_raises(ValueError, dics_epochs, epochs, forward, noise_csd,
+                  data_csd, pick_ori="normal")
+
+    # Test if volume forward operator is detected when picking normal
+    # orientation
+    assert_raises(ValueError, dics_epochs, epochs, forward_vol, noise_csd,
+                  data_csd, pick_ori="normal")
+
+    # Now test single trial using fixed orientation forward solution
+    # so we can compare it to the evoked solution
+    stcs = dics_epochs(epochs, forward_fixed, noise_csd, data_csd, reg=0.01,
+                       label=label)
+
+    # Testing returning of generator
+    stcs_ = dics_epochs(epochs, forward_fixed, noise_csd, data_csd, reg=0.01,
+                        return_generator=True, label=label)
+    assert_array_equal(stcs[0].data, advance_iterator(stcs_).data)
+
+    # Test whether correct number of trials was returned
+    epochs.drop_bad_epochs()
+    assert_true(len(epochs.events) == len(stcs))
+
+    # Average the single trial estimates
+    stc_avg = np.zeros_like(stc.data)
+    for this_stc in stcs:
+        stc_avg += this_stc.crop(0, None).data
+    stc_avg /= len(stcs)
+
+    idx = np.argmax(np.max(stc_avg, axis=1))
+    max_stc = stc_avg[idx]
+    tmax = stc.times[np.argmax(max_stc)]
+
+    assert_true(0.045 < tmax < 0.06)  # incorrect due to limited # of epochs
+    assert_true(12 < np.max(max_stc) < 18.5)
+
+
+ at testing.requires_testing_data
+def test_dics_source_power():
+    """Test DICS source power computation
+    """
+    raw, epochs, evoked, data_csd, noise_csd, label, forward,\
+        forward_surf_ori, forward_fixed, forward_vol = _get_data()
+
+    stc_source_power = dics_source_power(epochs.info, forward, noise_csd,
+                                         data_csd, label=label)
+
+    max_source_idx = np.argmax(stc_source_power.data)
+    max_source_power = np.max(stc_source_power.data)
+
+    # TODO: Maybe these could be more directly compared to dics() results?
+    assert_true(max_source_idx == 0)
+    assert_true(0.5 < max_source_power < 1.15)
+
+    # Test picking normal orientation and using a list of CSD matrices
+    stc_normal = dics_source_power(epochs.info, forward_surf_ori,
+                                   [noise_csd] * 2, [data_csd] * 2,
+                                   pick_ori="normal", label=label)
+
+    assert_true(stc_normal.data.shape == (stc_source_power.data.shape[0], 2))
+
+    # The normal orientation results should always be smaller than free
+    # orientation results
+    assert_true((np.abs(stc_normal.data[:, 0]) <=
+                 stc_source_power.data[:, 0]).all())
+
+    # Test if fixed forward operator is detected when picking normal
+    # orientation
+    assert_raises(ValueError, dics_source_power, raw.info, forward_fixed,
+                  noise_csd, data_csd, pick_ori="normal")
+
+    # Test if non-surface oriented forward operator is detected when picking
+    # normal orientation
+    assert_raises(ValueError, dics_source_power, raw.info, forward, noise_csd,
+                  data_csd, pick_ori="normal")
+
+    # Test if volume forward operator is detected when picking normal
+    # orientation
+    assert_raises(ValueError, dics_source_power, epochs.info, forward_vol,
+                  noise_csd, data_csd, pick_ori="normal")
+
+    # Test detection of different number of CSD matrices provided
+    assert_raises(ValueError, dics_source_power, epochs.info, forward,
+                  [noise_csd] * 2, [data_csd] * 3)
+
+    # Test detection of different frequencies in noise and data CSD objects
+    noise_csd.frequencies = [1, 2]
+    data_csd.frequencies = [1, 2, 3]
+    assert_raises(ValueError, dics_source_power, epochs.info, forward,
+                  noise_csd, data_csd)
+
+    # Test detection of uneven frequency spacing
+    data_csds = [cp.deepcopy(data_csd) for i in range(3)]
+    frequencies = [1, 3, 4]
+    for freq, data_csd in zip(frequencies, data_csds):
+        data_csd.frequencies = [freq]
+    noise_csds = data_csds
+    with warnings.catch_warnings(record=True) as w:
+        dics_source_power(epochs.info, forward, noise_csds, data_csds)
+    assert len(w) == 1
+
+
+ at testing.requires_testing_data
+def test_tf_dics():
+    """Test TF beamforming based on DICS
+    """
+    tmin, tmax, tstep = -0.2, 0.2, 0.1
+    raw, epochs, _, _, _, label, forward, _, _, _ =\
+        _get_data(tmin, tmax, read_all_forward=False, compute_csds=False)
+
+    freq_bins = [(4, 20), (30, 55)]
+    win_lengths = [0.2, 0.2]
+    reg = 0.001
+
+    noise_csds = []
+    for freq_bin, win_length in zip(freq_bins, win_lengths):
+        noise_csd = compute_epochs_csd(epochs, mode='fourier',
+                                       fmin=freq_bin[0], fmax=freq_bin[1],
+                                       fsum=True, tmin=tmin,
+                                       tmax=tmin + win_length)
+        noise_csds.append(noise_csd)
+
+    stcs = tf_dics(epochs, forward, noise_csds, tmin, tmax, tstep, win_lengths,
+                   freq_bins, reg=reg, label=label)
+
+    assert_true(len(stcs) == len(freq_bins))
+    assert_true(stcs[0].shape[1] == 4)
+
+    # Manually calculating source power in several time windows to compare
+    # results and test overlapping
+    source_power = []
+    time_windows = [(-0.1, 0.1), (0.0, 0.2)]
+    for time_window in time_windows:
+        data_csd = compute_epochs_csd(epochs, mode='fourier',
+                                      fmin=freq_bins[0][0],
+                                      fmax=freq_bins[0][1], fsum=True,
+                                      tmin=time_window[0], tmax=time_window[1])
+        noise_csd = compute_epochs_csd(epochs, mode='fourier',
+                                       fmin=freq_bins[0][0],
+                                       fmax=freq_bins[0][1], fsum=True,
+                                       tmin=-0.2, tmax=0.0)
+        data_csd.data /= data_csd.n_fft
+        noise_csd.data /= noise_csd.n_fft
+        stc_source_power = dics_source_power(epochs.info, forward, noise_csd,
+                                             data_csd, reg=reg, label=label)
+        source_power.append(stc_source_power.data)
+
+    # Averaging all time windows that overlap the time period 0 to 100 ms
+    source_power = np.mean(source_power, axis=0)
+
+    # Selecting the first frequency bin in tf_dics results
+    stc = stcs[0]
+
+    # Comparing tf_dics results with dics_source_power results
+    assert_array_almost_equal(stc.data[:, 2], source_power[:, 0])
+
+    # Test if using unsupported max-power orientation is detected
+    assert_raises(ValueError, tf_dics, epochs, forward, noise_csds, tmin, tmax,
+                  tstep, win_lengths, freq_bins=freq_bins,
+                  pick_ori='max-power')
+
+    # Test if incorrect number of noise CSDs is detected
+    assert_raises(ValueError, tf_dics, epochs, forward, [noise_csds[0]], tmin,
+                  tmax, tstep, win_lengths, freq_bins=freq_bins)
+
+    # Test if freq_bins and win_lengths incompatibility is detected
+    assert_raises(ValueError, tf_dics, epochs, forward, noise_csds, tmin, tmax,
+                  tstep, win_lengths=[0, 1, 2], freq_bins=freq_bins)
+
+    # Test if time step exceeding window lengths is detected
+    assert_raises(ValueError, tf_dics, epochs, forward, noise_csds, tmin, tmax,
+                  tstep=0.15, win_lengths=[0.2, 0.1], freq_bins=freq_bins)
+
+    # Test if incorrect number of mt_bandwidths is detected
+    assert_raises(ValueError, tf_dics, epochs, forward, noise_csds, tmin, tmax,
+                  tstep, win_lengths, freq_bins, mode='multitaper',
+                  mt_bandwidths=[20])
+
+    # Pass only one epoch to test if subtracting evoked responses yields zeros
+    stcs = tf_dics(epochs[0], forward, noise_csds, tmin, tmax, tstep,
+                   win_lengths, freq_bins, subtract_evoked=True, reg=reg,
+                   label=label)
+
+    assert_array_almost_equal(stcs[0].data, np.zeros_like(stcs[0].data))
+
+
+run_tests_if_main()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/beamformer/tests/test_lcmv.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/beamformer/tests/test_lcmv.py
new file mode 100644
index 0000000..d92c60a
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/beamformer/tests/test_lcmv.py
@@ -0,0 +1,378 @@
+import os.path as op
+
+from nose.tools import assert_true, assert_raises
+import numpy as np
+from numpy.testing import assert_array_almost_equal, assert_array_equal
+import warnings
+
+import mne
+from mne import compute_covariance
+from mne.datasets import testing
+from mne.beamformer import lcmv, lcmv_epochs, lcmv_raw, tf_lcmv
+from mne.beamformer._lcmv import _lcmv_source_power
+from mne.externals.six import advance_iterator
+from mne.utils import run_tests_if_main, slow_test
+
+
+data_path = testing.data_path(download=False)
+fname_raw = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc_raw.fif')
+fname_cov = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc-cov.fif')
+fname_fwd = op.join(data_path, 'MEG', 'sample',
+                    'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif')
+fname_fwd_vol = op.join(data_path, 'MEG', 'sample',
+                        'sample_audvis_trunc-meg-vol-7-fwd.fif')
+fname_event = op.join(data_path, 'MEG', 'sample',
+                      'sample_audvis_trunc_raw-eve.fif')
+label = 'Aud-lh'
+fname_label = op.join(data_path, 'MEG', 'sample', 'labels', '%s.label' % label)
+
+warnings.simplefilter('always')  # enable b/c these tests throw warnings
+
+
+def read_forward_solution_meg(*args, **kwargs):
+    fwd = mne.read_forward_solution(*args, **kwargs)
+    return mne.pick_types_forward(fwd, meg=True, eeg=False)
+
+
+def _get_data(tmin=-0.1, tmax=0.15, all_forward=True, epochs=True,
+              epochs_preload=True, data_cov=True):
+    """Read in data used in tests
+    """
+    label = mne.read_label(fname_label)
+    events = mne.read_events(fname_event)
+    raw = mne.io.Raw(fname_raw, preload=True)
+    forward = mne.read_forward_solution(fname_fwd)
+    if all_forward:
+        forward_surf_ori = read_forward_solution_meg(fname_fwd, surf_ori=True)
+        forward_fixed = read_forward_solution_meg(fname_fwd, force_fixed=True,
+                                                  surf_ori=True)
+        forward_vol = read_forward_solution_meg(fname_fwd_vol, surf_ori=True)
+    else:
+        forward_surf_ori = None
+        forward_fixed = None
+        forward_vol = None
+
+    event_id, tmin, tmax = 1, tmin, tmax
+
+    # Setup for reading the raw data
+    raw.info['bads'] = ['MEG 2443', 'EEG 053']  # 2 bads channels
+
+    if epochs:
+        # Set up pick list: MEG - bad channels
+        left_temporal_channels = mne.read_selection('Left-temporal')
+        picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=True,
+                               eog=True, ref_meg=False, exclude='bads',
+                               selection=left_temporal_channels)
+
+        # Read epochs
+        epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
+                            picks=picks, baseline=(None, 0),
+                            preload=epochs_preload,
+                            reject=dict(grad=4000e-13, mag=4e-12, eog=150e-6))
+        if epochs_preload:
+            epochs.resample(200, npad=0, n_jobs=2)
+        evoked = epochs.average()
+        info = evoked.info
+    else:
+        epochs = None
+        evoked = None
+        info = raw.info
+
+    noise_cov = mne.read_cov(fname_cov)
+    noise_cov = mne.cov.regularize(noise_cov, info, mag=0.05, grad=0.05,
+                                   eeg=0.1, proj=True)
+    if data_cov:
+        with warnings.catch_warnings(record=True):
+            data_cov = mne.compute_covariance(epochs, tmin=0.04, tmax=0.15)
+    else:
+        data_cov = None
+
+    return raw, epochs, evoked, data_cov, noise_cov, label, forward,\
+        forward_surf_ori, forward_fixed, forward_vol
+
+
+ at slow_test
+ at testing.requires_testing_data
+def test_lcmv():
+    """Test LCMV with evoked data and single trials
+    """
+    raw, epochs, evoked, data_cov, noise_cov, label, forward,\
+        forward_surf_ori, forward_fixed, forward_vol = _get_data()
+
+    for fwd in [forward, forward_vol]:
+        stc = lcmv(evoked, fwd, noise_cov, data_cov, reg=0.01)
+        stc.crop(0.02, None)
+
+        stc_pow = np.sum(stc.data, axis=1)
+        idx = np.argmax(stc_pow)
+        max_stc = stc.data[idx]
+        tmax = stc.times[np.argmax(max_stc)]
+
+        assert_true(0.09 < tmax < 0.105, tmax)
+        assert_true(0.9 < np.max(max_stc) < 3., np.max(max_stc))
+
+        if fwd is forward:
+            # Test picking normal orientation (surface source space only)
+            stc_normal = lcmv(evoked, forward_surf_ori, noise_cov, data_cov,
+                              reg=0.01, pick_ori="normal")
+            stc_normal.crop(0.02, None)
+
+            stc_pow = np.sum(np.abs(stc_normal.data), axis=1)
+            idx = np.argmax(stc_pow)
+            max_stc = stc_normal.data[idx]
+            tmax = stc_normal.times[np.argmax(max_stc)]
+
+            assert_true(0.04 < tmax < 0.11, tmax)
+            assert_true(0.4 < np.max(max_stc) < 2., np.max(max_stc))
+
+            # The amplitude of normal orientation results should always be
+            # smaller than free orientation results
+            assert_true((np.abs(stc_normal.data) <= stc.data).all())
+
+        # Test picking source orientation maximizing output source power
+        stc_max_power = lcmv(evoked, fwd, noise_cov, data_cov, reg=0.01,
+                             pick_ori="max-power")
+        stc_max_power.crop(0.02, None)
+        stc_pow = np.sum(stc_max_power.data, axis=1)
+        idx = np.argmax(stc_pow)
+        max_stc = stc_max_power.data[idx]
+        tmax = stc.times[np.argmax(max_stc)]
+
+        assert_true(0.09 < tmax < 0.11, tmax)
+        assert_true(0.8 < np.max(max_stc) < 3., np.max(max_stc))
+
+        # Maximum output source power orientation results should be similar to
+        # free orientation results
+        assert_true((stc_max_power.data - stc.data < 1).all())
+
+    # Test if fixed forward operator is detected when picking normal or
+    # max-power orientation
+    assert_raises(ValueError, lcmv, evoked, forward_fixed, noise_cov, data_cov,
+                  reg=0.01, pick_ori="normal")
+    assert_raises(ValueError, lcmv, evoked, forward_fixed, noise_cov, data_cov,
+                  reg=0.01, pick_ori="max-power")
+
+    # Test if non-surface oriented forward operator is detected when picking
+    # normal orientation
+    assert_raises(ValueError, lcmv, evoked, forward, noise_cov, data_cov,
+                  reg=0.01, pick_ori="normal")
+
+    # Test if volume forward operator is detected when picking normal
+    # orientation
+    assert_raises(ValueError, lcmv, evoked, forward_vol, noise_cov, data_cov,
+                  reg=0.01, pick_ori="normal")
+
+    # Now test single trial using fixed orientation forward solution
+    # so we can compare it to the evoked solution
+    stcs = lcmv_epochs(epochs, forward_fixed, noise_cov, data_cov, reg=0.01)
+    stcs_ = lcmv_epochs(epochs, forward_fixed, noise_cov, data_cov, reg=0.01,
+                        return_generator=True)
+    assert_array_equal(stcs[0].data, advance_iterator(stcs_).data)
+
+    epochs.drop_bad_epochs()
+    assert_true(len(epochs.events) == len(stcs))
+
+    # average the single trial estimates
+    stc_avg = np.zeros_like(stcs[0].data)
+    for this_stc in stcs:
+        stc_avg += this_stc.data
+    stc_avg /= len(stcs)
+
+    # compare it to the solution using evoked with fixed orientation
+    stc_fixed = lcmv(evoked, forward_fixed, noise_cov, data_cov, reg=0.01)
+    assert_array_almost_equal(stc_avg, stc_fixed.data)
+
+    # use a label so we have few source vertices and delayed computation is
+    # not used
+    stcs_label = lcmv_epochs(epochs, forward_fixed, noise_cov, data_cov,
+                             reg=0.01, label=label)
+
+    assert_array_almost_equal(stcs_label[0].data, stcs[0].in_label(label).data)
+
+
+ at testing.requires_testing_data
+def test_lcmv_raw():
+    """Test LCMV with raw data
+    """
+    raw, _, _, _, noise_cov, label, forward, _, _, _ =\
+        _get_data(all_forward=False, epochs=False, data_cov=False)
+
+    tmin, tmax = 0, 20
+    start, stop = raw.time_as_index([tmin, tmax])
+
+    # use only the left-temporal MEG channels for LCMV
+    left_temporal_channels = mne.read_selection('Left-temporal')
+    picks = mne.pick_types(raw.info, meg=True, exclude='bads',
+                           selection=left_temporal_channels)
+
+    data_cov = mne.compute_raw_covariance(raw, tmin=tmin, tmax=tmax)
+
+    stc = lcmv_raw(raw, forward, noise_cov, data_cov, reg=0.01, label=label,
+                   start=start, stop=stop, picks=picks)
+
+    assert_array_almost_equal(np.array([tmin, tmax]),
+                              np.array([stc.times[0], stc.times[-1]]),
+                              decimal=2)
+
+    # make sure we get an stc with vertices only in the lh
+    vertno = [forward['src'][0]['vertno'], forward['src'][1]['vertno']]
+    assert_true(len(stc.vertices[0]) == len(np.intersect1d(vertno[0],
+                                                           label.vertices)))
+    assert_true(len(stc.vertices[1]) == 0)
+
+
+ at testing.requires_testing_data
+def test_lcmv_source_power():
+    """Test LCMV source power computation
+    """
+    raw, epochs, evoked, data_cov, noise_cov, label, forward,\
+        forward_surf_ori, forward_fixed, forward_vol = _get_data()
+
+    stc_source_power = _lcmv_source_power(epochs.info, forward, noise_cov,
+                                          data_cov, label=label)
+
+    max_source_idx = np.argmax(stc_source_power.data)
+    max_source_power = np.max(stc_source_power.data)
+
+    assert_true(max_source_idx == 0, max_source_idx)
+    assert_true(0.4 < max_source_power < 2.4, max_source_power)
+
+    # Test picking normal orientation and using a list of CSD matrices
+    stc_normal = _lcmv_source_power(epochs.info, forward_surf_ori, noise_cov,
+                                    data_cov, pick_ori="normal", label=label)
+
+    # The normal orientation results should always be smaller than free
+    # orientation results
+    assert_true((np.abs(stc_normal.data[:, 0]) <=
+                 stc_source_power.data[:, 0]).all())
+
+    # Test if fixed forward operator is detected when picking normal
+    # orientation
+    assert_raises(ValueError, _lcmv_source_power, raw.info, forward_fixed,
+                  noise_cov, data_cov, pick_ori="normal")
+
+    # Test if non-surface oriented forward operator is detected when picking
+    # normal orientation
+    assert_raises(ValueError, _lcmv_source_power, raw.info, forward, noise_cov,
+                  data_cov, pick_ori="normal")
+
+    # Test if volume forward operator is detected when picking normal
+    # orientation
+    assert_raises(ValueError, _lcmv_source_power, epochs.info, forward_vol,
+                  noise_cov, data_cov, pick_ori="normal")
+
+
+ at testing.requires_testing_data
+def test_tf_lcmv():
+    """Test TF beamforming based on LCMV
+    """
+    label = mne.read_label(fname_label)
+    events = mne.read_events(fname_event)
+    raw = mne.io.Raw(fname_raw, preload=True)
+    forward = mne.read_forward_solution(fname_fwd)
+
+    event_id, tmin, tmax = 1, -0.2, 0.2
+
+    # Setup for reading the raw data
+    raw.info['bads'] = ['MEG 2443', 'EEG 053']  # 2 bads channels
+
+    # Set up pick list: MEG - bad channels
+    left_temporal_channels = mne.read_selection('Left-temporal')
+    picks = mne.pick_types(raw.info, meg=True, eeg=False,
+                           stim=True, eog=True, exclude='bads',
+                           selection=left_temporal_channels)
+
+    # Read epochs
+    epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
+                        picks=picks, baseline=None, preload=False,
+                        reject=dict(grad=4000e-13, mag=4e-12, eog=150e-6))
+    epochs.drop_bad_epochs()
+
+    freq_bins = [(4, 12), (15, 40)]
+    time_windows = [(-0.1, 0.1), (0.0, 0.2)]
+    win_lengths = [0.2, 0.2]
+    tstep = 0.1
+    reg = 0.05
+
+    source_power = []
+    noise_covs = []
+    for (l_freq, h_freq), win_length in zip(freq_bins, win_lengths):
+        raw_band = raw.copy()
+        raw_band.filter(l_freq, h_freq, method='iir', n_jobs=1, picks=picks)
+        epochs_band = mne.Epochs(raw_band, epochs.events, epochs.event_id,
+                                 tmin=tmin, tmax=tmax, baseline=None,
+                                 proj=True, picks=picks)
+        with warnings.catch_warnings(record=True):  # not enough samples
+            noise_cov = compute_covariance(epochs_band, tmin=tmin, tmax=tmin +
+                                           win_length)
+        noise_cov = mne.cov.regularize(noise_cov, epochs_band.info, mag=reg,
+                                       grad=reg, eeg=reg, proj=True)
+        noise_covs.append(noise_cov)
+        del raw_band  # to save memory
+
+        # Manually calculating source power in on frequency band and several
+        # time windows to compare to tf_lcmv results and test overlapping
+        if (l_freq, h_freq) == freq_bins[0]:
+            for time_window in time_windows:
+                with warnings.catch_warnings(record=True):
+                    data_cov = compute_covariance(epochs_band,
+                                                  tmin=time_window[0],
+                                                  tmax=time_window[1])
+                stc_source_power = _lcmv_source_power(epochs.info, forward,
+                                                      noise_cov, data_cov,
+                                                      reg=reg, label=label)
+                source_power.append(stc_source_power.data)
+
+    with warnings.catch_warnings(record=True):
+        stcs = tf_lcmv(epochs, forward, noise_covs, tmin, tmax, tstep,
+                       win_lengths, freq_bins, reg=reg, label=label)
+
+    assert_true(len(stcs) == len(freq_bins))
+    assert_true(stcs[0].shape[1] == 4)
+
+    # Averaging all time windows that overlap the time period 0 to 100 ms
+    source_power = np.mean(source_power, axis=0)
+
+    # Selecting the first frequency bin in tf_lcmv results
+    stc = stcs[0]
+
+    # Comparing tf_lcmv results with _lcmv_source_power results
+    assert_array_almost_equal(stc.data[:, 2], source_power[:, 0])
+
+    # Test if using unsupported max-power orientation is detected
+    assert_raises(ValueError, tf_lcmv, epochs, forward, noise_covs, tmin, tmax,
+                  tstep, win_lengths, freq_bins=freq_bins,
+                  pick_ori='max-power')
+
+    # Test if incorrect number of noise CSDs is detected
+    # Test if incorrect number of noise covariances is detected
+    assert_raises(ValueError, tf_lcmv, epochs, forward, [noise_covs[0]], tmin,
+                  tmax, tstep, win_lengths, freq_bins)
+
+    # Test if freq_bins and win_lengths incompatibility is detected
+    assert_raises(ValueError, tf_lcmv, epochs, forward, noise_covs, tmin, tmax,
+                  tstep, win_lengths=[0, 1, 2], freq_bins=freq_bins)
+
+    # Test if time step exceeding window lengths is detected
+    assert_raises(ValueError, tf_lcmv, epochs, forward, noise_covs, tmin, tmax,
+                  tstep=0.15, win_lengths=[0.2, 0.1], freq_bins=freq_bins)
+
+    # Test correct detection of preloaded epochs objects that do not contain
+    # the underlying raw object
+    epochs_preloaded = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
+                                  baseline=(None, 0), preload=True)
+    with warnings.catch_warnings(record=True):  # not enough samples
+        assert_raises(ValueError, tf_lcmv, epochs_preloaded, forward,
+                      noise_covs, tmin, tmax, tstep, win_lengths, freq_bins)
+
+    with warnings.catch_warnings(record=True):  # not enough samples
+        # Pass only one epoch to test if subtracting evoked
+        # responses yields zeros
+        stcs = tf_lcmv(epochs[0], forward, noise_covs, tmin, tmax, tstep,
+                       win_lengths, freq_bins, subtract_evoked=True, reg=reg,
+                       label=label)
+
+    assert_array_almost_equal(stcs[0].data, np.zeros_like(stcs[0].data))
+
+
+run_tests_if_main()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/beamformer/tests/test_rap_music.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/beamformer/tests/test_rap_music.py
new file mode 100644
index 0000000..ce73f0b
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/beamformer/tests/test_rap_music.py
@@ -0,0 +1,152 @@
+# Authors: Yousra Bekhti <yousra.bekhti at gmail.com>
+#          Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#
+# License: BSD (3-clause)
+
+import os.path as op
+import numpy as np
+from scipy import linalg
+
+import warnings
+from nose.tools import assert_true
+
+import mne
+from mne.datasets import testing
+from mne.beamformer import rap_music
+from mne.utils import run_tests_if_main
+
+
+data_path = testing.data_path(download=False)
+fname_ave = op.join(data_path, 'MEG', 'sample', 'sample_audvis-ave.fif')
+fname_cov = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc-cov.fif')
+fname_fwd = op.join(data_path, 'MEG', 'sample',
+                    'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif')
+
+warnings.simplefilter('always')  # enable b/c these tests throw warnings
+
+
+def _read_forward_solution_meg(fname_fwd, **kwargs):
+    fwd = mne.read_forward_solution(fname_fwd, **kwargs)
+    return mne.pick_types_forward(fwd, meg=True, eeg=False,
+                                  exclude=['MEG 2443'])
+
+
+def _get_data(event_id=1):
+    """Read in data used in tests
+    """
+    # Read evoked
+    evoked = mne.read_evokeds(fname_ave, event_id)
+    evoked.pick_types(meg=True, eeg=False)
+    evoked.crop(0, 0.3)
+
+    forward = mne.read_forward_solution(fname_fwd)
+
+    forward_surf_ori = _read_forward_solution_meg(fname_fwd, surf_ori=True)
+    forward_fixed = _read_forward_solution_meg(fname_fwd, force_fixed=True,
+                                               surf_ori=True)
+
+    noise_cov = mne.read_cov(fname_cov)
+
+    return evoked, noise_cov, forward, forward_surf_ori, forward_fixed
+
+
+def simu_data(evoked, forward, noise_cov, n_dipoles, times):
+    """Simulate an evoked dataset with 2 sources
+
+    One source is put in each hemisphere.
+    """
+    # Generate the two dipoles data
+    mu, sigma = 0.1, 0.005
+    s1 = 1 / (sigma * np.sqrt(2 * np.pi)) * np.exp(-(times - mu) ** 2 /
+                                                   (2 * sigma ** 2))
+
+    mu, sigma = 0.075, 0.008
+    s2 = 1 / (sigma * np.sqrt(2 * np.pi)) * np.exp(-(times - mu) ** 2 /
+                                                   (2 * sigma ** 2))
+    data = np.array([s1, s2]) * 1e-9
+
+    src = forward['src']
+    rng = np.random.RandomState(42)
+
+    rndi = rng.randint(len(src[0]['vertno']))
+    lh_vertno = src[0]['vertno'][[rndi]]
+
+    rndi = rng.randint(len(src[1]['vertno']))
+    rh_vertno = src[1]['vertno'][[rndi]]
+
+    vertices = [lh_vertno, rh_vertno]
+    tmin, tstep = times.min(), 1 / evoked.info['sfreq']
+    stc = mne.SourceEstimate(data, vertices=vertices, tmin=tmin, tstep=tstep)
+
+    sim_evoked = mne.simulation.simulate_evoked(forward, stc, evoked.info,
+                                                noise_cov, snr=20,
+                                                random_state=rng)
+
+    return sim_evoked, stc
+
+
+def _check_dipoles(dipoles, fwd, stc, evoked, residual=None):
+    src = fwd['src']
+    pos1 = fwd['source_rr'][np.where(src[0]['vertno'] ==
+                                     stc.vertices[0])]
+    pos2 = fwd['source_rr'][np.where(src[1]['vertno'] ==
+                                     stc.vertices[1])[0] +
+                            len(src[0]['vertno'])]
+
+    # Check the position of the two dipoles
+    assert_true(dipoles[0].pos[0] in np.array([pos1, pos2]))
+    assert_true(dipoles[1].pos[0] in np.array([pos1, pos2]))
+
+    ori1 = fwd['source_nn'][np.where(src[0]['vertno'] ==
+                                     stc.vertices[0])[0]][0]
+    ori2 = fwd['source_nn'][np.where(src[1]['vertno'] ==
+                                     stc.vertices[1])[0] +
+                            len(src[0]['vertno'])][0]
+
+    # Check the orientation of the dipoles
+    assert_true(np.max(np.abs(np.dot(dipoles[0].ori[0],
+                                     np.array([ori1, ori2]).T))) > 0.99)
+
+    assert_true(np.max(np.abs(np.dot(dipoles[1].ori[0],
+                                     np.array([ori1, ori2]).T))) > 0.99)
+
+    if residual is not None:
+        picks_grad = mne.pick_types(residual.info, meg='grad')
+        picks_mag = mne.pick_types(residual.info, meg='mag')
+        rel_tol = 0.02
+        for picks in [picks_grad, picks_mag]:
+            assert_true(linalg.norm(residual.data[picks], ord='fro') <
+                        rel_tol *
+                        linalg.norm(evoked.data[picks], ord='fro'))
+
+
+ at testing.requires_testing_data
+def test_rap_music_simulated():
+    """Test RAP-MUSIC with simulated evoked
+    """
+    evoked, noise_cov, forward, forward_surf_ori, forward_fixed =\
+        _get_data()
+
+    n_dipoles = 2
+    sim_evoked, stc = simu_data(evoked, forward_fixed, noise_cov,
+                                n_dipoles, evoked.times)
+    # Check dipoles for fixed ori
+    dipoles = rap_music(sim_evoked, forward_fixed, noise_cov,
+                        n_dipoles=n_dipoles)
+    _check_dipoles(dipoles, forward_fixed, stc, evoked)
+
+    dipoles, residual = rap_music(sim_evoked, forward_fixed, noise_cov,
+                                  n_dipoles=n_dipoles, return_residual=True)
+    _check_dipoles(dipoles, forward_fixed, stc, evoked, residual)
+
+    # Check dipoles for free ori
+    dipoles, residual = rap_music(sim_evoked, forward, noise_cov,
+                                  n_dipoles=n_dipoles, return_residual=True)
+    _check_dipoles(dipoles, forward_fixed, stc, evoked, residual)
+
+    # Check dipoles for free surface ori
+    dipoles, residual = rap_music(sim_evoked, forward_surf_ori, noise_cov,
+                                  n_dipoles=n_dipoles, return_residual=True)
+    _check_dipoles(dipoles, forward_fixed, stc, evoked, residual)
+
+run_tests_if_main()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/bem.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/bem.py
new file mode 100644
index 0000000..2e83e22
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/bem.py
@@ -0,0 +1,1660 @@
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
+#          Eric Larson <larson.eric.d at gmail.com>
+#          Lorenzo De Santis <lorenzo.de-santis at u-psud.fr>
+#
+# License: BSD (3-clause)
+
+import sys
+import os
+import os.path as op
+import shutil
+import glob
+import numpy as np
+from scipy import linalg
+
+from .fixes import partial
+from .utils import (verbose, logger, run_subprocess, deprecated,
+                    get_subjects_dir)
+from .transforms import _ensure_trans, apply_trans
+from .io.constants import FIFF
+from .io.write import (start_file, start_block, write_float, write_int,
+                       write_float_matrix, write_int_matrix, end_block,
+                       end_file)
+from .io.tag import find_tag
+from .io.tree import dir_tree_find
+from .io.open import fiff_open
+from .externals.six import string_types
+
+
+# ############################################################################
+# Compute BEM solution
+
+# define VEC_DIFF(from,to,diff) {\
+# (diff)[X] = (to)[X] - (from)[X];\
+
+# The following approach is based on:
+#
+# de Munck JC: "A linear discretization of the volume conductor boundary
+# integral equation using analytically integrated elements",
+# IEEE Trans Biomed Eng. 1992 39(9) : 986 - 990
+#
+
+
+class ConductorModel(dict):
+    """BEM or sphere model"""
+    def __repr__(self):
+        if self['is_sphere']:
+            center = ', '.join('%0.1f' % (x * 1000.) for x in self['r0'])
+            pl = '' if len(self['layers']) == 1 else 's'
+            rad = self.radius
+            if rad is None:  # no radius / MEG only
+                extra = 'Sphere (no layers): r0=[%s] mm' % center
+            else:
+                extra = ('Sphere (%s layer%s): r0=[%s] R=%1.f mm'
+                         % (len(self['layers']) - 1, pl, center, rad * 1000.))
+        else:
+            pl = '' if len(self['surfs']) == 1 else 's'
+            extra = ('BEM (%s layer%s)' % (len(self['surfs']), pl))
+        return '<ConductorModel  |  %s>' % extra
+
+    @property
+    def radius(self):
+        if not self['is_sphere']:
+            raise RuntimeError('radius undefined for BEM')
+        return None if len(self['layers']) == 0 else self['layers'][-1]['rad']
+
+
+def _calc_beta(rk, rk_norm, rk1, rk1_norm):
+    """These coefficients are used to calculate the magic vector omega"""
+    rkk1 = rk1[0] - rk[0]
+    size = np.sqrt(np.dot(rkk1, rkk1))
+    rkk1 /= size
+    num = rk_norm + np.dot(rk, rkk1)
+    den = rk1_norm + np.dot(rk1, rkk1)
+    res = np.log(num / den) / size
+    return res
+
+
+def _lin_pot_coeff(fros, tri_rr, tri_nn, tri_area):
+    """The linear potential matrix element computations"""
+    from .source_space import _fast_cross_nd_sum
+    omega = np.zeros((len(fros), 3))
+
+    # we replicate a little bit of the _get_solids code here for speed
+    v1 = tri_rr[np.newaxis, 0, :] - fros
+    v2 = tri_rr[np.newaxis, 1, :] - fros
+    v3 = tri_rr[np.newaxis, 2, :] - fros
+    triples = _fast_cross_nd_sum(v1, v2, v3)
+    l1 = np.sqrt(np.sum(v1 * v1, axis=1))
+    l2 = np.sqrt(np.sum(v2 * v2, axis=1))
+    l3 = np.sqrt(np.sum(v3 * v3, axis=1))
+    ss = (l1 * l2 * l3 +
+          np.sum(v1 * v2, axis=1) * l3 +
+          np.sum(v1 * v3, axis=1) * l2 +
+          np.sum(v2 * v3, axis=1) * l1)
+    solids = np.arctan2(triples, ss)
+
+    # We *could* subselect the good points from v1, v2, v3, triples, solids,
+    # l1, l2, and l3, but there are *very* few bad points. So instead we do
+    # some unnecessary calculations, and then omit them from the final
+    # solution. These three lines ensure we don't get invalid values in
+    # _calc_beta.
+    bad_mask = np.abs(solids) < np.pi / 1e6
+    l1[bad_mask] = 1.
+    l2[bad_mask] = 1.
+    l3[bad_mask] = 1.
+
+    # Calculate the magic vector vec_omega
+    beta = [_calc_beta(v1, l1, v2, l2)[:, np.newaxis],
+            _calc_beta(v2, l2, v3, l3)[:, np.newaxis],
+            _calc_beta(v3, l3, v1, l1)[:, np.newaxis]]
+    vec_omega = (beta[2] - beta[0]) * v1
+    vec_omega += (beta[0] - beta[1]) * v2
+    vec_omega += (beta[1] - beta[2]) * v3
+
+    area2 = 2.0 * tri_area
+    n2 = 1.0 / (area2 * area2)
+    # leave omega = 0 otherwise
+    # Put it all together...
+    yys = [v1, v2, v3]
+    idx = [0, 1, 2, 0, 2]
+    for k in range(3):
+        diff = yys[idx[k - 1]] - yys[idx[k + 1]]
+        zdots = _fast_cross_nd_sum(yys[idx[k + 1]], yys[idx[k - 1]], tri_nn)
+        omega[:, k] = -n2 * (area2 * zdots * 2. * solids -
+                             triples * (diff * vec_omega).sum(axis=-1))
+    # omit the bad points from the solution
+    omega[bad_mask] = 0.
+    return omega
+
+
+def _correct_auto_elements(surf, mat):
+    """Improve auto-element approximation..."""
+    pi2 = 2.0 * np.pi
+    tris_flat = surf['tris'].ravel()
+    misses = pi2 - mat.sum(axis=1)
+    for j, miss in enumerate(misses):
+        # How much is missing?
+        n_memb = len(surf['neighbor_tri'][j])
+        # The node itself receives one half
+        mat[j, j] = miss / 2.0
+        # The rest is divided evenly among the member nodes...
+        miss /= (4.0 * n_memb)
+        members = np.where(j == tris_flat)[0]
+        mods = members % 3
+        offsets = np.array([[1, 2], [-1, 1], [-1, -2]])
+        tri_1 = members + offsets[mods, 0]
+        tri_2 = members + offsets[mods, 1]
+        for t1, t2 in zip(tri_1, tri_2):
+            mat[j, tris_flat[t1]] += miss
+            mat[j, tris_flat[t2]] += miss
+    return
+
+
+def _fwd_bem_lin_pot_coeff(surfs):
+    """Calculate the coefficients for linear collocation approach"""
+    # taken from fwd_bem_linear_collocation.c
+    nps = [surf['np'] for surf in surfs]
+    np_tot = sum(nps)
+    coeff = np.zeros((np_tot, np_tot))
+    offsets = np.cumsum(np.concatenate(([0], nps)))
+    for si_1, surf1 in enumerate(surfs):
+        rr_ord = np.arange(nps[si_1])
+        for si_2, surf2 in enumerate(surfs):
+            logger.info("        %s (%d) -> %s (%d) ..." %
+                        (_bem_explain_surface(surf1['id']), nps[si_1],
+                         _bem_explain_surface(surf2['id']), nps[si_2]))
+            tri_rr = surf2['rr'][surf2['tris']]
+            tri_nn = surf2['tri_nn']
+            tri_area = surf2['tri_area']
+            submat = coeff[offsets[si_1]:offsets[si_1 + 1],
+                           offsets[si_2]:offsets[si_2 + 1]]  # view
+            for k in range(surf2['ntri']):
+                tri = surf2['tris'][k]
+                if si_1 == si_2:
+                    skip_idx = ((rr_ord == tri[0]) |
+                                (rr_ord == tri[1]) |
+                                (rr_ord == tri[2]))
+                else:
+                    skip_idx = list()
+                # No contribution from a triangle that
+                # this vertex belongs to
+                # if sidx1 == sidx2 and (tri == j).any():
+                #     continue
+                # Otherwise do the hard job
+                coeffs = _lin_pot_coeff(surf1['rr'], tri_rr[k], tri_nn[k],
+                                        tri_area[k])
+                coeffs[skip_idx] = 0.
+                submat[:, tri] -= coeffs
+            if si_1 == si_2:
+                _correct_auto_elements(surf1, submat)
+    return coeff
+
+
+def _fwd_bem_multi_solution(solids, gamma, nps):
+    """Do multi surface solution
+
+      * Invert I - solids/(2*M_PI)
+      * Take deflation into account
+      * The matrix is destroyed after inversion
+      * This is the general multilayer case
+
+    """
+    pi2 = 1.0 / (2 * np.pi)
+    n_tot = np.sum(nps)
+    assert solids.shape == (n_tot, n_tot)
+    nsurf = len(nps)
+    defl = 1.0 / n_tot
+    # Modify the matrix
+    offsets = np.cumsum(np.concatenate(([0], nps)))
+    for si_1 in range(nsurf):
+        for si_2 in range(nsurf):
+            mult = pi2 if gamma is None else pi2 * gamma[si_1, si_2]
+            slice_j = slice(offsets[si_1], offsets[si_1 + 1])
+            slice_k = slice(offsets[si_2], offsets[si_2 + 1])
+            solids[slice_j, slice_k] = defl - solids[slice_j, slice_k] * mult
+    solids += np.eye(n_tot)
+    return linalg.inv(solids, overwrite_a=True)
+
+
+def _fwd_bem_homog_solution(solids, nps):
+    """Helper to make a homogeneous solution"""
+    return _fwd_bem_multi_solution(solids, None, nps)
+
+
+def _fwd_bem_ip_modify_solution(solution, ip_solution, ip_mult, n_tri):
+    """Modify the solution according to the IP approach"""
+    n_last = n_tri[-1]
+    mult = (1.0 + ip_mult) / ip_mult
+
+    logger.info('        Combining...')
+    offsets = np.cumsum(np.concatenate(([0], n_tri)))
+    for si in range(len(n_tri)):
+        # Pick the correct submatrix (right column) and multiply
+        sub = solution[offsets[si]:offsets[si + 1], np.sum(n_tri[:-1]):]
+        # Multiply
+        sub -= 2 * np.dot(sub, ip_solution)
+
+    # The lower right corner is a special case
+    sub[-n_last:, -n_last:] += mult * ip_solution
+
+    # Final scaling
+    logger.info('        Scaling...')
+    solution *= ip_mult
+    return
+
+
+def _fwd_bem_linear_collocation_solution(m):
+    """Compute the linear collocation potential solution"""
+    # first, add surface geometries
+    from .surface import _complete_surface_info
+    for surf in m['surfs']:
+        _complete_surface_info(surf, verbose=False)
+
+    logger.info('Computing the linear collocation solution...')
+    logger.info('    Matrix coefficients...')
+    coeff = _fwd_bem_lin_pot_coeff(m['surfs'])
+    m['nsol'] = len(coeff)
+    logger.info("    Inverting the coefficient matrix...")
+    nps = [surf['np'] for surf in m['surfs']]
+    m['solution'] = _fwd_bem_multi_solution(coeff, m['gamma'], nps)
+    if len(m['surfs']) == 3:
+        ip_mult = m['sigma'][1] / m['sigma'][2]
+        if ip_mult <= FIFF.FWD_BEM_IP_APPROACH_LIMIT:
+            logger.info('IP approach required...')
+            logger.info('    Matrix coefficients (homog)...')
+            coeff = _fwd_bem_lin_pot_coeff([m['surfs'][-1]])
+            logger.info('    Inverting the coefficient matrix (homog)...')
+            ip_solution = _fwd_bem_homog_solution(coeff,
+                                                  [m['surfs'][-1]['np']])
+            logger.info('    Modify the original solution to incorporate '
+                        'IP approach...')
+            _fwd_bem_ip_modify_solution(m['solution'], ip_solution, ip_mult,
+                                        nps)
+    m['bem_method'] = FIFF.FWD_BEM_LINEAR_COLL
+    logger.info("Solution ready.")
+
+
+ at verbose
+def make_bem_solution(surfs, verbose=None):
+    """Create a BEM solution using the linear collocation approach
+
+    Parameters
+    ----------
+    surfs : list of dict
+        The BEM surfaces to use (`from make_bem_model`)
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    bem : instance of ConductorModel
+        The BEM solution.
+
+    Notes
+    -----
+    .. versionadded:: 0.10.0
+
+    See Also
+    --------
+    make_bem_model
+    read_bem_surfaces
+    write_bem_surfaces
+    read_bem_solution
+    write_bem_solution
+    """
+    logger.info('Approximation method : Linear collocation\n')
+    if isinstance(surfs, string_types):
+        # Load the surfaces
+        logger.info('Loading surfaces...')
+        surfs = read_bem_surfaces(surfs)
+    bem = ConductorModel(is_sphere=False, surfs=surfs)
+    _add_gamma_multipliers(bem)
+    if len(bem['surfs']) == 3:
+        logger.info('Three-layer model surfaces loaded.')
+    elif len(bem['surfs']) == 1:
+        logger.info('Homogeneous model surface loaded.')
+    else:
+        raise RuntimeError('Only 1- or 3-layer BEM computations supported')
+    _fwd_bem_linear_collocation_solution(bem)
+    logger.info('BEM geometry computations complete.')
+    return bem
+
+
+# ############################################################################
+# Make BEM model
+
+def _ico_downsample(surf, dest_grade):
+    """Downsample the surface if isomorphic to a subdivided icosahedron"""
+    from .surface import _get_ico_surface
+    n_tri = surf['ntri']
+    found = -1
+    bad_msg = ("A surface with %d triangles cannot be isomorphic with a "
+               "subdivided icosahedron." % surf['ntri'])
+    if n_tri % 20 != 0:
+        raise RuntimeError(bad_msg)
+    n_tri = n_tri // 20
+    found = int(round(np.log(n_tri) / np.log(4)))
+    if n_tri != 4 ** found:
+        raise RuntimeError(bad_msg)
+    del n_tri
+
+    if dest_grade > found:
+        raise RuntimeError('For this surface, decimation grade should be %d '
+                           'or less, not %s.' % (found, dest_grade))
+
+    source = _get_ico_surface(found)
+    dest = _get_ico_surface(dest_grade, patch_stats=True)
+    del dest['tri_cent']
+    del dest['tri_nn']
+    del dest['neighbor_tri']
+    del dest['tri_area']
+    if not np.array_equal(source['tris'], surf['tris']):
+        raise RuntimeError('The source surface has a matching number of '
+                           'triangles but ordering is wrong')
+    logger.info('Going from %dth to %dth subdivision of an icosahedron '
+                '(n_tri: %d -> %d)' % (found, dest_grade, surf['ntri'],
+                                       dest['ntri']))
+    # Find the mapping
+    dest['rr'] = surf['rr'][_get_ico_map(source, dest)]
+    return dest
+
+
+def _get_ico_map(fro, to):
+    """Helper to get a mapping between ico surfaces"""
+    from .surface import _compute_nearest
+    nearest, dists = _compute_nearest(fro['rr'], to['rr'], return_dists=True)
+    n_bads = (dists > 5e-3).sum()
+    if n_bads > 0:
+        raise RuntimeError('No matching vertex for %d destination vertices'
+                           % (n_bads))
+    return nearest
+
+
+def _order_surfaces(surfs):
+    """Reorder the surfaces"""
+    if len(surfs) != 3:
+        return surfs
+    # we have three surfaces
+    surf_order = [FIFF.FIFFV_BEM_SURF_ID_HEAD,
+                  FIFF.FIFFV_BEM_SURF_ID_SKULL,
+                  FIFF.FIFFV_BEM_SURF_ID_BRAIN]
+    ids = np.array([surf['id'] for surf in surfs])
+    if set(ids) != set(surf_order):
+        raise RuntimeError('bad surface ids: %s' % ids)
+    order = [np.where(ids == id_)[0][0] for id_ in surf_order]
+    surfs = [surfs[idx] for idx in order]
+    return surfs
+
+
+def _assert_complete_surface(surf):
+    """Check the sum of solid angles as seen from inside"""
+    # from surface_checks.c
+    from .source_space import _get_solids
+    tot_angle = 0.
+    # Center of mass....
+    cm = surf['rr'].mean(axis=0)
+    logger.info('%s CM is %6.2f %6.2f %6.2f mm' %
+                (_surf_name[surf['id']],
+                 1000 * cm[0], 1000 * cm[1], 1000 * cm[2]))
+    tot_angle = _get_solids(surf['rr'][surf['tris']], cm[np.newaxis, :])[0]
+    if np.abs(tot_angle / (2 * np.pi) - 1.0) > 1e-5:
+        raise RuntimeError('Surface %s is not complete (sum of solid angles '
+                           '= %g * 4*PI instead).' %
+                           (_surf_name[surf['id']], tot_angle))
+
+
+_surf_name = {
+    FIFF.FIFFV_BEM_SURF_ID_HEAD: 'outer skin ',
+    FIFF.FIFFV_BEM_SURF_ID_SKULL: 'outer skull',
+    FIFF.FIFFV_BEM_SURF_ID_BRAIN: 'inner skull',
+    FIFF.FIFFV_BEM_SURF_ID_UNKNOWN: 'unknown    ',
+}
+
+
+def _assert_inside(fro, to):
+    """Helper to check one set of points is inside a surface"""
+    # this is "is_inside" in surface_checks.c
+    from .source_space import _get_solids
+    tot_angle = _get_solids(to['rr'][to['tris']], fro['rr'])
+    if (np.abs(tot_angle / (2 * np.pi) - 1.0) > 1e-5).any():
+        raise RuntimeError('Surface %s is not completely inside surface %s'
+                           % (_surf_name[fro['id']], _surf_name[to['id']]))
+
+
+def _check_surfaces(surfs):
+    """Check that the surfaces are complete and non-intersecting"""
+    for surf in surfs:
+        _assert_complete_surface(surf)
+    # Then check the topology
+    for surf_1, surf_2 in zip(surfs[:-1], surfs[1:]):
+        logger.info('Checking that %s surface is inside %s surface...' %
+                    (_surf_name[surf_2['id']], _surf_name[surf_1['id']]))
+        _assert_inside(surf_2, surf_1)
+
+
+def _check_surface_size(surf):
+    """Check that the coordinate limits are reasonable"""
+    sizes = surf['rr'].max(axis=0) - surf['rr'].min(axis=0)
+    if (sizes < 0.05).any():
+        raise RuntimeError('Dimensions of the surface %s seem too small '
+                           '(%9.5f mm). Maybe the the unit of measure is '
+                           'meters instead of mm' %
+                           (_surf_name[surf['id']], 1000 * sizes.min()))
+
+
+def _check_thicknesses(surfs):
+    """How close are we?"""
+    from .surface import _compute_nearest
+    for surf_1, surf_2 in zip(surfs[:-1], surfs[1:]):
+        min_dist = _compute_nearest(surf_1['rr'], surf_2['rr'],
+                                    return_dists=True)[0]
+        min_dist = min_dist.min()
+        logger.info('Checking distance between %s and %s surfaces...' %
+                    (_surf_name[surf_1['id']], _surf_name[surf_2['id']]))
+        logger.info('Minimum distance between the %s and %s surfaces is '
+                    'approximately %6.1f mm' %
+                    (_surf_name[surf_1['id']], _surf_name[surf_2['id']],
+                     1000 * min_dist))
+
+
+def _surfaces_to_bem(fname_surfs, ids, sigmas, ico=None):
+    """Convert surfaces to a BEM
+    """
+    from .surface import _read_surface_geom
+    # equivalent of mne_surf2bem
+    surfs = list()
+    assert len(fname_surfs) in (1, 3)
+    for fname in fname_surfs:
+        surfs.append(_read_surface_geom(fname, patch_stats=False,
+                                        verbose=False))
+        surfs[-1]['rr'] /= 1000.
+    # Downsampling if the surface is isomorphic with a subdivided icosahedron
+    if ico is not None:
+        for si, surf in enumerate(surfs):
+            surfs[si] = _ico_downsample(surf, ico)
+    for surf, id_ in zip(surfs, ids):
+        surf['id'] = id_
+
+    # Shifting surfaces is not implemented here
+
+    # Order the surfaces for the benefit of the topology checks
+    for surf, sigma in zip(surfs, sigmas):
+        surf['sigma'] = sigma
+    surfs = _order_surfaces(surfs)
+
+    # Check topology as best we can
+    _check_surfaces(surfs)
+    for surf in surfs:
+        _check_surface_size(surf)
+    _check_thicknesses(surfs)
+    logger.info('Surfaces passed the basic topology checks.')
+    return surfs
+
+
+ at verbose
+def make_bem_model(subject, ico=4, conductivity=(0.3, 0.006, 0.3),
+                   subjects_dir=None, verbose=None):
+    """Create a BEM model for a subject
+
+    .. note:: To get a single layer bem corresponding to the --homog flag in
+              the command line tool set the ``connectivity`` accordingly
+
+    Parameters
+    ----------
+    subject : str
+        The subject.
+    ico : int | None
+        The surface ico downsampling to use, e.g. 5=20484, 4=5120, 3=1280.
+        If None, no subsampling is applied.
+    conductivity : array of int, shape (3,) or (1,)
+        The conductivities to use for each shell. Should be a single element
+        for a one-layer model, or three elements for a three-layer model.
+        Defaults to ``[0.3, 0.006, 0.3]``. The MNE-C default for a
+        single-layer model would be ``[0.3]``.
+    subjects_dir : string, or None
+        Path to SUBJECTS_DIR if it is not set in the environment.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    surfaces : list of dict
+        The BEM surfaces. Use `make_bem_solution` to turn these into a
+        `ConductorModel` suitable for forward calculation.
+
+    Notes
+    -----
+    .. versionadded:: 0.10.0
+
+    See Also
+    --------
+    make_bem_solution
+    make_sphere_model
+    read_bem_surfaces
+    write_bem_surfaces
+    """
+    conductivity = np.array(conductivity, float)
+    if conductivity.ndim != 1 or conductivity.size not in (1, 3):
+        raise ValueError('conductivity must be 1D array-like with 1 or 3 '
+                         'elements')
+    subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
+    subject_dir = op.join(subjects_dir, subject)
+    bem_dir = op.join(subject_dir, 'bem')
+    inner_skull = op.join(bem_dir, 'inner_skull.surf')
+    outer_skull = op.join(bem_dir, 'outer_skull.surf')
+    outer_skin = op.join(bem_dir, 'outer_skin.surf')
+    surfaces = [inner_skull, outer_skull, outer_skin]
+    ids = [FIFF.FIFFV_BEM_SURF_ID_BRAIN,
+           FIFF.FIFFV_BEM_SURF_ID_SKULL,
+           FIFF.FIFFV_BEM_SURF_ID_HEAD]
+    logger.info('Creating the BEM geometry...')
+    if len(conductivity) == 1:
+        surfaces = surfaces[:1]
+        ids = ids[:1]
+    surfaces = _surfaces_to_bem(surfaces, ids, conductivity, ico)
+    logger.info('Complete.\n')
+    return surfaces
+
+
+# ############################################################################
+# Compute EEG sphere model
+
+def _fwd_eeg_get_multi_sphere_model_coeffs(m, n_terms):
+    """Get the model depended weighting factor for n"""
+    nlayer = len(m['layers'])
+    if nlayer in (0, 1):
+        return 1.
+
+    # Initialize the arrays
+    c1 = np.zeros(nlayer - 1)
+    c2 = np.zeros(nlayer - 1)
+    cr = np.zeros(nlayer - 1)
+    cr_mult = np.zeros(nlayer - 1)
+    for k in range(nlayer - 1):
+        c1[k] = m['layers'][k]['sigma'] / m['layers'][k + 1]['sigma']
+        c2[k] = c1[k] - 1.0
+        cr_mult[k] = m['layers'][k]['rel_rad']
+        cr[k] = cr_mult[k]
+        cr_mult[k] *= cr_mult[k]
+
+    coeffs = np.zeros(n_terms - 1)
+    for n in range(1, n_terms):
+        # Increment the radius coefficients
+        for k in range(nlayer - 1):
+            cr[k] *= cr_mult[k]
+
+        # Multiply the matrices
+        M = np.eye(2)
+        n1 = n + 1.0
+        for k in range(nlayer - 2, -1, -1):
+            M = np.dot([[n + n1 * c1[k], n1 * c2[k] / cr[k]],
+                        [n * c2[k] * cr[k], n1 + n * c1[k]]], M)
+        num = n * (2.0 * n + 1.0) ** (nlayer - 1)
+        coeffs[n - 1] = num / (n * M[1, 1] + n1 * M[1, 0])
+    return coeffs
+
+
+def _compose_linear_fitting_data(mu, u):
+    # y is the data to be fitted (nterms-1 x 1)
+    # M is the model matrix      (nterms-1 x nfit-1)
+    for k in range(u['nterms'] - 1):
+        k1 = k + 1
+        mu1n = np.power(mu[0], k1)
+        u['y'][k] = u['w'][k] * (u['fn'][k1] - mu1n * u['fn'][0])
+        for p in range(u['nfit'] - 1):
+            u['M'][k][p] = u['w'][k] * (np.power(mu[p + 1], k1) - mu1n)
+
+
+def _compute_linear_parameters(mu, u):
+    """Compute the best-fitting linear parameters"""
+    _compose_linear_fitting_data(mu, u)
+    uu, sing, vv = linalg.svd(u['M'], full_matrices=False)
+
+    # Compute the residuals
+    u['resi'] = u['y'].copy()
+
+    vec = np.empty(u['nfit'] - 1)
+    for p in range(u['nfit'] - 1):
+        vec[p] = np.dot(uu[:, p], u['y'])
+        for k in range(u['nterms'] - 1):
+            u['resi'][k] -= uu[k, p] * vec[p]
+        vec[p] = vec[p] / sing[p]
+
+    lambda_ = np.zeros(u['nfit'])
+    for p in range(u['nfit'] - 1):
+        sum_ = 0.
+        for q in range(u['nfit'] - 1):
+            sum_ += vv[q, p] * vec[q]
+        lambda_[p + 1] = sum_
+    lambda_[0] = u['fn'][0] - np.sum(lambda_[1:])
+    rv = np.dot(u['resi'], u['resi']) / np.dot(u['y'], u['y'])
+    return rv, lambda_
+
+
+def _one_step(mu, u):
+    """Evaluate the residual sum of squares fit for one set of mu values"""
+    if np.abs(mu).max() > 1.0:
+        return 1.0
+
+    # Compose the data for the linear fitting, compute SVD, then residuals
+    _compose_linear_fitting_data(mu, u)
+    u['uu'], u['sing'], u['vv'] = linalg.svd(u['M'])
+    u['resi'][:] = u['y'][:]
+    for p in range(u['nfit'] - 1):
+        dot = np.dot(u['uu'][p], u['y'])
+        for k in range(u['nterms'] - 1):
+            u['resi'][k] = u['resi'][k] - u['uu'][p, k] * dot
+
+    # Return their sum of squares
+    return np.dot(u['resi'], u['resi'])
+
+
+def _fwd_eeg_fit_berg_scherg(m, nterms, nfit):
+    """Fit the Berg-Scherg equivalent spherical model dipole parameters"""
+    from scipy.optimize import fmin_cobyla
+    assert nfit >= 2
+    u = dict(y=np.zeros(nterms - 1), resi=np.zeros(nterms - 1),
+             nfit=nfit, nterms=nterms, M=np.zeros((nterms - 1, nfit - 1)))
+
+    # (1) Calculate the coefficients of the true expansion
+    u['fn'] = _fwd_eeg_get_multi_sphere_model_coeffs(m, nterms + 1)
+
+    # (2) Calculate the weighting
+    f = (min([layer['rad'] for layer in m['layers']]) /
+         max([layer['rad'] for layer in m['layers']]))
+
+    # correct weighting
+    k = np.arange(1, nterms + 1)
+    u['w'] = np.sqrt((2.0 * k + 1) * (3.0 * k + 1.0) /
+                     k) * np.power(f, (k - 1.0))
+    u['w'][-1] = 0
+
+    # Do the nonlinear minimization, constraining mu to the interval [-1, +1]
+    mu_0 = np.random.RandomState(0).rand(nfit) * f
+    fun = partial(_one_step, u=u)
+    max_ = 1. - 2e-4  # adjust for fmin_cobyla "catol" that not all scipy have
+    cons = [(lambda x: max_ - np.abs(x[ii])) for ii in range(nfit)]
+    mu = fmin_cobyla(fun, mu_0, cons, rhobeg=0.5, rhoend=5e-3, disp=0)
+
+    # (6) Do the final step: calculation of the linear parameters
+    rv, lambda_ = _compute_linear_parameters(mu, u)
+    order = np.argsort(mu)[::-1]
+    mu, lambda_ = mu[order], lambda_[order]  # sort: largest mu first
+
+    m['mu'] = mu
+    # This division takes into account the actual conductivities
+    m['lambda'] = lambda_ / m['layers'][-1]['sigma']
+    m['nfit'] = nfit
+    return rv
+
+
+ at verbose
+def make_sphere_model(r0=(0., 0., 0.04), head_radius=0.09, info=None,
+                      relative_radii=(0.90, 0.92, 0.97, 1.0),
+                      sigmas=(0.33, 1.0, 0.004, 0.33), verbose=None):
+    """Create a spherical model for forward solution calculation
+
+    Parameters
+    ----------
+    r0 : array-like | str
+        Head center to use (in head coordinates). If 'auto', the head
+        center will be calculated from the digitization points in info.
+    head_radius : float | str | None
+        If float, compute spherical shells for EEG using the given radius.
+        If 'auto', estimate an approriate radius from the dig points in Info,
+        If None, exclude shells.
+    info : instance of mne.io.meas_info.Info | None
+        Measurement info. Only needed if ``r0`` or ``head_radius`` are
+        ``'auto'``.
+    relative_radii : array-like
+        Relative radii for the spherical shells.
+    sigmas : array-like
+        Sigma values for the spherical shells.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    sphere : instance of ConductorModel
+        The resulting spherical conductor model.
+
+    Notes
+    -----
+    .. versionadded:: 0.9.0
+
+    See Also
+    --------
+    make_bem_model
+    make_bem_solution
+    """
+    for name in ('r0', 'head_radius'):
+        param = locals()[name]
+        if isinstance(param, string_types):
+            if param != 'auto':
+                raise ValueError('%s, if str, must be "auto" not "%s"'
+                                 % (name, param))
+
+    if (isinstance(r0, string_types) and r0 == 'auto') or \
+       (isinstance(head_radius, string_types) and head_radius == 'auto'):
+        if info is None:
+            raise ValueError('Info must not be None for auto mode')
+        head_radius_fit, r0_fit = fit_sphere_to_headshape(info)[:2]
+        if isinstance(r0, string_types):
+            r0 = r0_fit / 1000.
+        if isinstance(head_radius, string_types):
+            head_radius = head_radius_fit / 1000.
+    sphere = ConductorModel(is_sphere=True, r0=np.array(r0),
+                            coord_frame=FIFF.FIFFV_COORD_HEAD)
+    sphere['layers'] = list()
+    if head_radius is not None:
+        # Eventually these could be configurable...
+        relative_radii = np.array(relative_radii, float)
+        sigmas = np.array(sigmas, float)
+        order = np.argsort(relative_radii)
+        relative_radii = relative_radii[order]
+        sigmas = sigmas[order]
+        for rel_rad, sig in zip(relative_radii, sigmas):
+            # sort layers by (relative) radius, and scale radii
+            layer = dict(rad=rel_rad, sigma=sig)
+            layer['rel_rad'] = layer['rad'] = rel_rad
+            sphere['layers'].append(layer)
+
+        # scale the radii
+        R = sphere['layers'][-1]['rad']
+        rR = sphere['layers'][-1]['rel_rad']
+        for layer in sphere['layers']:
+            layer['rad'] /= R
+            layer['rel_rad'] /= rR
+
+        #
+        # Setup the EEG sphere model calculations
+        #
+
+        # Scale the relative radii
+        for k in range(len(relative_radii)):
+            sphere['layers'][k]['rad'] = (head_radius *
+                                          sphere['layers'][k]['rel_rad'])
+        rv = _fwd_eeg_fit_berg_scherg(sphere, 200, 3)
+        logger.info('\nEquiv. model fitting -> RV = %g %%' % (100 * rv))
+        for k in range(3):
+            logger.info('mu%d = %g    lambda%d = %g'
+                        % (k + 1, sphere['mu'][k], k + 1,
+                           sphere['layers'][-1]['sigma'] *
+                           sphere['lambda'][k]))
+        logger.info('Set up EEG sphere model with scalp radius %7.1f mm\n'
+                    % (1000 * head_radius,))
+    return ConductorModel(sphere)
+
+
+# #############################################################################
+# Helpers
+
+ at verbose
+def fit_sphere_to_headshape(info, dig_kinds=(FIFF.FIFFV_POINT_EXTRA,),
+                            verbose=None):
+    """Fit a sphere to the headshape points to determine head center
+
+    Parameters
+    ----------
+    info : instance of mne.io.meas_info.Info
+        Measurement info.
+    dig_kinds : tuple of int
+        Kind of digitization points to use in the fitting. These can be
+        any kind defined in io.constants.FIFF::
+
+            FIFFV_POINT_CARDINAL
+            FIFFV_POINT_HPI
+            FIFFV_POINT_EEG
+            FIFFV_POINT_EXTRA
+
+        Defaults to (FIFFV_POINT_EXTRA,).
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    radius : float
+        Sphere radius in mm.
+    origin_head: ndarray, shape (3,)
+        Head center in head coordinates (mm).
+    origin_device: ndarray, shape (3,)
+        Head center in device coordinates (mm).
+    """
+    # get head digization points of the specified kind
+    hsp = [p['r'] for p in info['dig'] if p['kind'] in dig_kinds]
+    if any(p['coord_frame'] != FIFF.FIFFV_COORD_HEAD for p in info['dig']):
+        raise RuntimeError('Digitization points not in head coordinates, '
+                           'contact mne-python developers')
+
+    # exclude some frontal points (nose etc.)
+    hsp = [p for p in hsp if not (p[2] < 0 and p[1] > 0)]
+
+    if len(hsp) == 0:
+        raise ValueError('No head digitization points of the specified '
+                         'kinds (%s) found.' % dig_kinds)
+
+    radius, origin_head = _fit_sphere(np.array(hsp), disp=False)
+    # compute origin in device coordinates
+    head_to_dev = _ensure_trans(info['dev_head_t'], 'head', 'meg')
+    origin_device = apply_trans(head_to_dev, origin_head)
+    radius *= 1e3
+    origin_head *= 1e3
+    origin_device *= 1e3
+
+    logger.info('Fitted sphere radius:'.ljust(30) + '%0.1f mm' % radius)
+    logger.info('Origin head coordinates:'.ljust(30) +
+                '%0.1f %0.1f %0.1f mm' % tuple(origin_head))
+    logger.info('Origin device coordinates:'.ljust(30) +
+                '%0.1f %0.1f %0.1f mm' % tuple(origin_device))
+
+    return radius, origin_head, origin_device
+
+
+def _fit_sphere(points, disp='auto'):
+    """Aux function to fit a sphere to an arbitrary set of points"""
+    from scipy.optimize import fmin_cobyla
+    if isinstance(disp, string_types) and disp == 'auto':
+        disp = True if logger.level <= 20 else False
+    # initial guess for center and radius
+    radii = (np.max(points, axis=1) - np.min(points, axis=1)) / 2.
+    radius_init = radii.mean()
+    center_init = np.median(points, axis=0)
+
+    # optimization
+    x0 = np.concatenate([center_init, [radius_init]])
+
+    def cost_fun(center_rad):
+        d = points - center_rad[:3]
+        d = (np.sqrt(np.sum(d * d, axis=1)) - center_rad[3])
+        return np.sum(d * d)
+
+    def constraint(center_rad):
+        return center_rad[3]  # radius must be >= 0
+
+    x_opt = fmin_cobyla(cost_fun, x0, constraint, rhobeg=radius_init,
+                        rhoend=radius_init * 1e-6, disp=disp)
+
+    origin = x_opt[:3]
+    radius = x_opt[3]
+    return radius, origin
+
+
+# ############################################################################
+# Create BEM surfaces
+
+ at verbose
+def make_watershed_bem(subject, subjects_dir=None, overwrite=False,
+                       volume='T1', atlas=False, gcaatlas=False, preflood=None,
+                       verbose=None):
+    """
+    Create BEM surfaces using the watershed algorithm included with FreeSurfer
+
+    Parameters
+    ----------
+    subject : str
+        Subject name (required)
+    subjects_dir : str
+        Directory containing subjects data. If None use
+        the Freesurfer SUBJECTS_DIR environment variable.
+    overwrite : bool
+        Write over existing files
+    volume : str
+        Defaults to T1
+    atlas : bool
+        Specify the --atlas option for mri_watershed
+    gcaatlas : bool
+        Use the subcortical atlas
+    preflood : int
+        Change the preflood height
+    verbose : bool, str or None
+        If not None, override default verbose level
+
+    .. versionadded:: 0.10
+    """
+    from .surface import read_surface
+    env = os.environ.copy()
+
+    if not os.environ.get('FREESURFER_HOME'):
+        raise RuntimeError('FREESURFER_HOME environment variable not set')
+
+    env['SUBJECT'] = subject
+
+    subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
+    env['SUBJECTS_DIR'] = subjects_dir
+
+    subject_dir = op.join(subjects_dir, subject)
+    mri_dir = op.join(subject_dir, 'mri')
+    T1_dir = op.join(mri_dir, volume)
+    T1_mgz = op.join(mri_dir, volume + '.mgz')
+    bem_dir = op.join(subject_dir, 'bem')
+    ws_dir = op.join(subject_dir, 'bem', 'watershed')
+
+    if not op.isdir(subject_dir):
+        raise RuntimeError('Could not find the MRI data directory "%s"'
+                           % subject_dir)
+    if not op.isdir(bem_dir):
+        os.makedirs(bem_dir)
+    if not op.isdir(T1_dir) and not op.isfile(T1_mgz):
+        raise RuntimeError('Could not find the MRI data')
+    if op.isdir(ws_dir):
+        if not overwrite:
+            raise RuntimeError('%s already exists. Use the --overwrite option'
+                               'to recreate it.' % ws_dir)
+        else:
+            shutil.rmtree(ws_dir)
+    # put together the command
+    cmd = ['mri_watershed']
+    if preflood:
+        cmd += ["-h",  "%s" % int(preflood)]
+
+    if gcaatlas:
+        cmd += ['-atlas', '-T1', '-brain_atlas', env['FREESURFER_HOME'] +
+                '/average/RB_all_withskull_2007-08-08.gca',
+                subject_dir + '/mri/transforms/talairach_with_skull.lta']
+    elif atlas:
+        cmd += ['-atlas']
+    if op.exists(T1_mgz):
+        cmd += ['-useSRAS', '-surf', op.join(ws_dir, subject), T1_mgz,
+                op.join(ws_dir, 'ws')]
+    else:
+        cmd += ['-useSRAS', '-surf', op.join(ws_dir, subject), T1_dir,
+                op.join(ws_dir, 'ws')]
+    # report and run
+    logger.info('\nRunning mri_watershed for BEM segmentation with the '
+                'following parameters:\n\n'
+                'SUBJECTS_DIR = %s\n'
+                'SUBJECT = %s\n'
+                'Results dir = %s\n' % (subjects_dir, subject, ws_dir))
+    os.makedirs(op.join(ws_dir, 'ws'))
+    run_subprocess(cmd, env=env, stdout=sys.stdout, stderr=sys.stderr)
+    #
+    os.chdir(ws_dir)
+    if op.isfile(T1_mgz):
+        # XXX : do this with python code
+        surfaces = [subject + '_brain_surface', subject +
+                    '_inner_skull_surface', subject + '_outer_skull_surface',
+                    subject + '_outer_skin_surface']
+        for s in surfaces:
+            cmd = ['mne_convert_surface', '--surf', s, '--mghmri', T1_mgz,
+                   '--surfout', s, "--replacegeom"]
+            run_subprocess(cmd, env=env, stdout=sys.stdout, stderr=sys.stderr)
+    os.chdir(bem_dir)
+    if op.isfile(subject + '-head.fif'):
+        os.remove(subject + '-head.fif')
+
+    # run the equivalent of mne_surf2bem
+    points, tris = read_surface(op.join(ws_dir,
+                                        subject + '_outer_skin_surface'))
+    points *= 1e-3
+    surf = dict(coord_frame=5, id=4, nn=None, np=len(points),
+                ntri=len(tris), rr=points, sigma=1, tris=tris)
+    write_bem_surfaces(subject + '-head.fif', surf)
+
+    logger.info('Created %s/%s-head.fif\n\nComplete.' % (bem_dir, subject))
+
+
+# ############################################################################
+# Read
+
+ at verbose
+def read_bem_surfaces(fname, patch_stats=False, s_id=None, verbose=None):
+    """Read the BEM surfaces from a FIF file
+
+    Parameters
+    ----------
+    fname : string
+        The name of the file containing the surfaces.
+    patch_stats : bool, optional (default False)
+        Calculate and add cortical patch statistics to the surfaces.
+    s_id : int | None
+        If int, only read and return the surface with the given s_id.
+        An error will be raised if it doesn't exist. If None, all
+        surfaces are read and returned.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    surf: list | dict
+        A list of dictionaries that each contain a surface. If s_id
+        is not None, only the requested surface will be returned.
+
+    See Also
+    --------
+    write_bem_surfaces, write_bem_solution, make_bem_model
+    """
+    from .surface import _complete_surface_info
+    # Default coordinate frame
+    coord_frame = FIFF.FIFFV_COORD_MRI
+    # Open the file, create directory
+    f, tree, _ = fiff_open(fname)
+    with f as fid:
+        # Find BEM
+        bem = dir_tree_find(tree, FIFF.FIFFB_BEM)
+        if bem is None or len(bem) == 0:
+            raise ValueError('BEM data not found')
+
+        bem = bem[0]
+        # Locate all surfaces
+        bemsurf = dir_tree_find(bem, FIFF.FIFFB_BEM_SURF)
+        if bemsurf is None:
+            raise ValueError('BEM surface data not found')
+
+        logger.info('    %d BEM surfaces found' % len(bemsurf))
+        # Coordinate frame possibly at the top level
+        tag = find_tag(fid, bem, FIFF.FIFF_BEM_COORD_FRAME)
+        if tag is not None:
+            coord_frame = tag.data
+        # Read all surfaces
+        if s_id is not None:
+            surf = [_read_bem_surface(fid, bsurf, coord_frame, s_id)
+                    for bsurf in bemsurf]
+            surf = [s for s in surf if s is not None]
+            if not len(surf) == 1:
+                raise ValueError('surface with id %d not found' % s_id)
+        else:
+            surf = list()
+            for bsurf in bemsurf:
+                logger.info('    Reading a surface...')
+                this = _read_bem_surface(fid, bsurf, coord_frame)
+                surf.append(this)
+                logger.info('[done]')
+            logger.info('    %d BEM surfaces read' % len(surf))
+        if patch_stats:
+            for this in surf:
+                _complete_surface_info(this)
+    return surf[0] if s_id is not None else surf
+
+
+def _read_bem_surface(fid, this, def_coord_frame, s_id=None):
+    """Read one bem surface
+    """
+    # fid should be open as a context manager here
+    res = dict()
+    # Read all the interesting stuff
+    tag = find_tag(fid, this, FIFF.FIFF_BEM_SURF_ID)
+
+    if tag is None:
+        res['id'] = FIFF.FIFFV_BEM_SURF_ID_UNKNOWN
+    else:
+        res['id'] = int(tag.data)
+
+    if s_id is not None and res['id'] != s_id:
+        return None
+
+    tag = find_tag(fid, this, FIFF.FIFF_BEM_SIGMA)
+    res['sigma'] = 1.0 if tag is None else float(tag.data)
+
+    tag = find_tag(fid, this, FIFF.FIFF_BEM_SURF_NNODE)
+    if tag is None:
+        raise ValueError('Number of vertices not found')
+
+    res['np'] = int(tag.data)
+
+    tag = find_tag(fid, this, FIFF.FIFF_BEM_SURF_NTRI)
+    if tag is None:
+        raise ValueError('Number of triangles not found')
+    res['ntri'] = int(tag.data)
+
+    tag = find_tag(fid, this, FIFF.FIFF_MNE_COORD_FRAME)
+    if tag is None:
+        tag = find_tag(fid, this, FIFF.FIFF_BEM_COORD_FRAME)
+        if tag is None:
+            res['coord_frame'] = def_coord_frame
+        else:
+            res['coord_frame'] = tag.data
+    else:
+        res['coord_frame'] = tag.data
+
+    # Vertices, normals, and triangles
+    tag = find_tag(fid, this, FIFF.FIFF_BEM_SURF_NODES)
+    if tag is None:
+        raise ValueError('Vertex data not found')
+
+    res['rr'] = tag.data.astype(np.float)  # XXX : double because of mayavi bug
+    if res['rr'].shape[0] != res['np']:
+        raise ValueError('Vertex information is incorrect')
+
+    tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NORMALS)
+    if tag is None:
+        tag = tag = find_tag(fid, this, FIFF.FIFF_BEM_SURF_NORMALS)
+    if tag is None:
+        res['nn'] = list()
+    else:
+        res['nn'] = tag.data
+        if res['nn'].shape[0] != res['np']:
+            raise ValueError('Vertex normal information is incorrect')
+
+    tag = find_tag(fid, this, FIFF.FIFF_BEM_SURF_TRIANGLES)
+    if tag is None:
+        raise ValueError('Triangulation not found')
+
+    res['tris'] = tag.data - 1  # index start at 0 in Python
+    if res['tris'].shape[0] != res['ntri']:
+        raise ValueError('Triangulation information is incorrect')
+
+    return res
+
+
+ at verbose
+def read_bem_solution(fname, verbose=None):
+    """Read the BEM solution from a file
+
+    Parameters
+    ----------
+    fname : string
+        The file containing the BEM solution.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    bem : instance of ConductorModel
+        The BEM solution.
+
+    See Also
+    --------
+    write_bem_solution, read_bem_surfaces, write_bem_surfaces,
+    make_bem_solution
+    """
+    # mirrors fwd_bem_load_surfaces from fwd_bem_model.c
+    logger.info('Loading surfaces...')
+    bem_surfs = read_bem_surfaces(fname, patch_stats=True, verbose=False)
+    if len(bem_surfs) == 3:
+        logger.info('Three-layer model surfaces loaded.')
+        needed = np.array([FIFF.FIFFV_BEM_SURF_ID_HEAD,
+                           FIFF.FIFFV_BEM_SURF_ID_SKULL,
+                           FIFF.FIFFV_BEM_SURF_ID_BRAIN])
+        if not all(x['id'] in needed for x in bem_surfs):
+            raise RuntimeError('Could not find necessary BEM surfaces')
+        # reorder surfaces as necessary (shouldn't need to?)
+        reorder = [None] * 3
+        for x in bem_surfs:
+            reorder[np.where(x['id'] == needed)[0][0]] = x
+        bem_surfs = reorder
+    elif len(bem_surfs) == 1:
+        if not bem_surfs[0]['id'] == FIFF.FIFFV_BEM_SURF_ID_BRAIN:
+            raise RuntimeError('BEM Surfaces not found')
+        logger.info('Homogeneous model surface loaded.')
+
+    # convert from surfaces to solution
+    bem = ConductorModel(is_sphere=False, surfs=bem_surfs)
+    logger.info('\nLoading the solution matrix...\n')
+    f, tree, _ = fiff_open(fname)
+    with f as fid:
+        # Find the BEM data
+        nodes = dir_tree_find(tree, FIFF.FIFFB_BEM)
+        if len(nodes) == 0:
+            raise RuntimeError('No BEM data in %s' % fname)
+        bem_node = nodes[0]
+
+        # Approximation method
+        tag = find_tag(f, bem_node, FIFF.FIFF_BEM_APPROX)
+        if tag is None:
+            raise RuntimeError('No BEM solution found in %s' % fname)
+        method = tag.data[0]
+        if method not in (FIFF.FIFFV_BEM_APPROX_CONST,
+                          FIFF.FIFFV_BEM_APPROX_LINEAR):
+            raise RuntimeError('Cannot handle BEM approximation method : %d'
+                               % method)
+
+        tag = find_tag(fid, bem_node, FIFF.FIFF_BEM_POT_SOLUTION)
+        dims = tag.data.shape
+        if len(dims) != 2:
+            raise RuntimeError('Expected a two-dimensional solution matrix '
+                               'instead of a %d dimensional one' % dims[0])
+
+        dim = 0
+        for surf in bem['surfs']:
+            if method == FIFF.FIFFV_BEM_APPROX_LINEAR:
+                dim += surf['np']
+            else:  # method == FIFF.FIFFV_BEM_APPROX_CONST
+                dim += surf['ntri']
+
+        if dims[0] != dim or dims[1] != dim:
+            raise RuntimeError('Expected a %d x %d solution matrix instead of '
+                               'a %d x %d one' % (dim, dim, dims[1], dims[0]))
+        sol = tag.data
+        nsol = dims[0]
+
+    bem['solution'] = sol
+    bem['nsol'] = nsol
+    bem['bem_method'] = method
+
+    # Gamma factors and multipliers
+    _add_gamma_multipliers(bem)
+    kind = {
+        FIFF.FIFFV_BEM_APPROX_CONST: 'constant collocation',
+        FIFF.FIFFV_BEM_APPROX_LINEAR: 'linear_collocation',
+    }[bem['bem_method']]
+    logger.info('Loaded %s BEM solution from %s', kind, fname)
+    return bem
+
+
+def _add_gamma_multipliers(bem):
+    """Helper to add gamma and multipliers in-place"""
+    bem['sigma'] = np.array([surf['sigma'] for surf in bem['surfs']])
+    # Dirty trick for the zero conductivity outside
+    sigma = np.r_[0.0, bem['sigma']]
+    bem['source_mult'] = 2.0 / (sigma[1:] + sigma[:-1])
+    bem['field_mult'] = sigma[1:] - sigma[:-1]
+    # make sure subsequent "zip"s work correctly
+    assert len(bem['surfs']) == len(bem['field_mult'])
+    bem['gamma'] = ((sigma[1:] - sigma[:-1])[np.newaxis, :] /
+                    (sigma[1:] + sigma[:-1])[:, np.newaxis])
+
+
+_surf_dict = {'inner_skull': FIFF.FIFFV_BEM_SURF_ID_BRAIN,
+              'outer_skull': FIFF.FIFFV_BEM_SURF_ID_SKULL,
+              'head': FIFF.FIFFV_BEM_SURF_ID_HEAD}
+
+
+def _bem_find_surface(bem, id_):
+    """Find surface from already-loaded BEM"""
+    if isinstance(id_, string_types):
+        name = id_
+        id_ = _surf_dict[id_]
+    else:
+        name = _bem_explain_surface(id_)
+    idx = np.where(np.array([s['id'] for s in bem['surfs']]) == id_)[0]
+    if len(idx) != 1:
+        raise RuntimeError('BEM model does not have the %s triangulation'
+                           % name.replace('_', ' '))
+    return bem['surfs'][idx[0]]
+
+
+def _bem_explain_surface(id_):
+    """Return a string corresponding to the given surface ID"""
+    _rev_dict = dict((val, key) for key, val in _surf_dict.items())
+    return _rev_dict[id_]
+
+
+# ############################################################################
+# Write
+
+ at deprecated('write_bem_surface is deprecated and will be removed in 0.11, '
+            'use write_bem_surfaces instead')
+def write_bem_surface(fname, surf):
+    """Write one bem surface
+
+    Parameters
+    ----------
+    fname : string
+        File to write
+    surf : dict
+        A surface structured as obtained with read_bem_surfaces
+
+    See Also
+    --------
+    read_bem_surfaces
+    """
+    write_bem_surfaces(fname, surf)
+
+
+def write_bem_surfaces(fname, surfs):
+    """Write BEM surfaces to a fiff file
+
+    Parameters
+    ----------
+    fname : str
+        Filename to write.
+    surfs : dict | list of dict
+        The surfaces, or a single surface.
+    """
+    if isinstance(surfs, dict):
+        surfs = [surfs]
+    with start_file(fname) as fid:
+        start_block(fid, FIFF.FIFFB_BEM)
+        write_int(fid, FIFF.FIFF_BEM_COORD_FRAME, surfs[0]['coord_frame'])
+        _write_bem_surfaces_block(fid, surfs)
+        end_block(fid, FIFF.FIFFB_BEM)
+        end_file(fid)
+
+
+def _write_bem_surfaces_block(fid, surfs):
+    """Helper to actually write bem surfaces"""
+    for surf in surfs:
+        start_block(fid, FIFF.FIFFB_BEM_SURF)
+        write_float(fid, FIFF.FIFF_BEM_SIGMA, surf['sigma'])
+        write_int(fid, FIFF.FIFF_BEM_SURF_ID, surf['id'])
+        write_int(fid, FIFF.FIFF_MNE_COORD_FRAME, surf['coord_frame'])
+        write_int(fid, FIFF.FIFF_BEM_SURF_NNODE, surf['np'])
+        write_int(fid, FIFF.FIFF_BEM_SURF_NTRI, surf['ntri'])
+        write_float_matrix(fid, FIFF.FIFF_BEM_SURF_NODES, surf['rr'])
+        # index start at 0 in Python
+        write_int_matrix(fid, FIFF.FIFF_BEM_SURF_TRIANGLES,
+                         surf['tris'] + 1)
+        if 'nn' in surf and surf['nn'] is not None and len(surf['nn']) > 0:
+            write_float_matrix(fid, FIFF.FIFF_BEM_SURF_NORMALS, surf['nn'])
+        end_block(fid, FIFF.FIFFB_BEM_SURF)
+
+
+def write_bem_solution(fname, bem):
+    """Write a BEM model with solution
+
+    Parameters
+    ----------
+    fname : str
+        The filename to use.
+    bem : instance of ConductorModel
+        The BEM model with solution to save.
+
+    See Also
+    --------
+    read_bem_solution
+    """
+    with start_file(fname) as fid:
+        start_block(fid, FIFF.FIFFB_BEM)
+        # Coordinate frame (mainly for backward compatibility)
+        write_int(fid, FIFF.FIFF_BEM_COORD_FRAME,
+                  bem['surfs'][0]['coord_frame'])
+        # Surfaces
+        _write_bem_surfaces_block(fid, bem['surfs'])
+        # The potential solution
+        if 'solution' in bem:
+            if bem['bem_method'] != FIFF.FWD_BEM_LINEAR_COLL:
+                raise RuntimeError('Only linear collocation supported')
+            write_int(fid, FIFF.FIFF_BEM_APPROX, FIFF.FIFFV_BEM_APPROX_LINEAR)
+            write_float_matrix(fid, FIFF.FIFF_BEM_POT_SOLUTION,
+                               bem['solution'])
+        end_block(fid, FIFF.FIFFB_BEM)
+        end_file(fid)
+
+
+# #############################################################################
+# Create 3-Layers BEM model from Flash MRI images
+
+def _prepare_env(subject, subjects_dir):
+    """Helper to prepare an env object for subprocess calls"""
+    env = os.environ.copy()
+    if not isinstance(subject, string_types):
+        raise TypeError('The subject argument must be set')
+    env['SUBJECT'] = subject
+    env['SUBJECTS_DIR'] = subjects_dir
+    mri_dir = op.join(subjects_dir, subject, 'mri')
+    bem_dir = op.join(subjects_dir, subject, 'bem')
+    return env, mri_dir, bem_dir
+
+
+ at verbose
+def convert_flash_mris(subject, flash30=True, convert=True, unwarp=False,
+                       subjects_dir=None, verbose=None):
+    """Convert DICOM files for use with make_flash_bem
+
+    Parameters
+    ----------
+    subject : str
+        Subject name.
+    flash30 : bool
+        Use 30-degree flip angle data.
+    convert : bool
+        Assume that the Flash MRI images have already been converted
+        to mgz files.
+    unwarp : bool
+        Run grad_unwarp with -unwarp option on each of the converted
+        data sets. It requires FreeSurfer's MATLAB toolbox to be properly
+        installed.
+    subjects_dir : string, or None
+        Path to SUBJECTS_DIR if it is not set in the environment.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Notes
+    -----
+    Before running this script do the following:
+    (unless convert=False is specified)
+
+        1. Copy all of your FLASH images in a single directory <source> and
+           create a directory <dest> to hold the output of mne_organize_dicom
+        2. cd to <dest> and run
+           $ mne_organize_dicom <source>
+           to create an appropriate directory structure
+        3. Create symbolic links to make flash05 and flash30 point to the
+           appropriate series:
+           $ ln -s <FLASH 5 series dir> flash05
+           $ ln -s <FLASH 30 series dir> flash30
+        4. cd to the directory where flash05 and flash30 links are
+        5. Set SUBJECTS_DIR and SUBJECT environment variables appropriately
+        6. Run this script
+
+    This function assumes that the Freesurfer segmentation of the subject
+    has been completed. In particular, the T1.mgz and brain.mgz MRI volumes
+    should be, as usual, in the subject's mri directory.
+    """
+    env, mri_dir = _prepare_env(subject, subjects_dir)[:2]
+    # Step 1a : Data conversion to mgz format
+    if not op.exists(op.join(mri_dir, 'flash', 'parameter_maps')):
+        os.makedirs(op.join(mri_dir, 'flash', 'parameter_maps'))
+    echos_done = 0
+    if convert:
+        logger.info("\n---- Converting Flash images ----")
+        echos = ['001', '002', '003', '004', '005', '006', '007', '008']
+        if flash30:
+            flashes = ['05']
+        else:
+            flashes = ['05', '30']
+        #
+        missing = False
+        for flash in flashes:
+            for echo in echos:
+                if not op.isdir(op.join('flash' + flash, echo)):
+                    missing = True
+        if missing:
+            echos = ['002', '003', '004', '005', '006', '007', '008', '009']
+            for flash in flashes:
+                for echo in echos:
+                    if not op.isdir(op.join('flash' + flash, echo)):
+                        raise RuntimeError("Directory %s is missing."
+                                           % op.join('flash' + flash, echo))
+        #
+        for flash in flashes:
+            for echo in echos:
+                if not op.isdir(op.join('flash' + flash, echo)):
+                    raise RuntimeError("Directory %s is missing."
+                                       % op.join('flash' + flash, echo))
+                sample_file = glob.glob(op.join('flash' + flash, echo, '*'))[0]
+                dest_file = op.join(mri_dir, 'flash',
+                                    'mef' + flash + '_' + echo + '.mgz')
+                # do not redo if already present
+                if op.isfile(dest_file):
+                    logger.info("The file %s is already there")
+                else:
+                    cmd = ['mri_convert', sample_file, dest_file]
+                    run_subprocess(cmd, env=env, stdout=sys.stdout,
+                                   stderr=sys.stderr)
+                    echos_done += 1
+    # Step 1b : Run grad_unwarp on converted files
+    os.chdir(op.join(mri_dir, "flash"))
+    files = glob.glob("mef*.mgz")
+    if unwarp:
+        logger.info("\n---- Unwarp mgz data sets ----")
+        for infile in files:
+            outfile = infile.replace(".mgz", "u.mgz")
+            cmd = ['grad_unwarp', '-i', infile, '-o', outfile, '-unwarp',
+                   'true']
+            run_subprocess(cmd, env=env, stdout=sys.stdout, stderr=sys.stderr)
+    # Clear parameter maps if some of the data were reconverted
+    if echos_done > 0 and op.exists("parameter_maps"):
+        shutil.rmtree("parameter_maps")
+        logger.info("\nParameter maps directory cleared")
+    if not op.exists("parameter_maps"):
+        os.makedirs("parameter_maps")
+    # Step 2 : Create the parameter maps
+    if flash30:
+        logger.info("\n---- Creating the parameter maps ----")
+        if unwarp:
+            files = glob.glob("mef05*u.mgz")
+        if len(os.listdir('parameter_maps')) == 0:
+            cmd = ['mri_ms_fitparms'] + files + ['parameter_maps']
+            run_subprocess(cmd, env=env, stdout=sys.stdout, stderr=sys.stderr)
+        else:
+            logger.info("Parameter maps were already computed")
+        # Step 3 : Synthesize the flash 5 images
+        logger.info("\n---- Synthesizing flash 5 images ----")
+        os.chdir('parameter_maps')
+        if not op.exists('flash5.mgz'):
+            cmd = ['mri_synthesize', '20 5 5', 'T1.mgz', 'PD.mgz',
+                   'flash5.mgz']
+            run_subprocess(cmd, env=env, stdout=sys.stdout, stderr=sys.stderr)
+            os.remove('flash5_reg.mgz')
+        else:
+            logger.info("Synthesized flash 5 volume is already there")
+    else:
+        logger.info("\n---- Averaging flash5 echoes ----")
+        os.chdir('parameter_maps')
+        if unwarp:
+            files = glob.glob("mef05*u.mgz")
+        else:
+            files = glob.glob("mef05*.mgz")
+        cmd = ['mri_average', '-noconform', files, 'flash5.mgz']
+        run_subprocess(cmd, env=env, stdout=sys.stdout)
+        if op.exists('flash5_reg.mgz'):
+            os.remove('flash5_reg.mgz')
+
+
+ at verbose
+def make_flash_bem(subject, overwrite=False, show=True, subjects_dir=None,
+                   verbose=None):
+    """Create 3-Layer BEM model from prepared flash MRI images
+
+    Parameters
+    -----------
+    subject : str
+        Subject name.
+    overwrite : bool
+        Write over existing .surf files in bem folder.
+    show : bool
+        Show surfaces to visually inspect all three BEM surfaces (recommended).
+    subjects_dir : string, or None
+        Path to SUBJECTS_DIR if it is not set in the environment.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Notes
+    -----
+    This program assumes that FreeSurfer and MNE are installed and
+    sourced properly.
+
+    This function extracts the BEM surfaces (outer skull, inner skull, and
+    outer skin) from multiecho FLASH MRI data with spin angles of 5 and 30
+    degrees, in mgz format.
+
+    This function assumes that the flash images are available in the
+    folder mri/bem/flash within the freesurfer subject reconstruction.
+
+    See Also
+    --------
+    convert_flash_mris
+    """
+    from .viz.misc import plot_bem
+    env, mri_dir, bem_dir = _prepare_env(subject, subjects_dir)
+
+    logger.info('\nProcessing the flash MRI data to produce BEM meshes with '
+                'the following parameters:\n'
+                'SUBJECTS_DIR = %s\n'
+                'SUBJECT = %s\n'
+                'Result dir = %s\n' % (subjects_dir, subject,
+                                       op.join(bem_dir, 'flash')))
+    # Step 4 : Register with MPRAGE
+    logger.info("\n---- Registering flash 5 with MPRAGE ----")
+    if not op.exists('flash5_reg.mgz'):
+        if op.exists(op.join(mri_dir, 'T1.mgz')):
+            ref_volume = op.join(mri_dir, 'T1.mgz')
+        else:
+            ref_volume = op.join(mri_dir, 'T1')
+        cmd = ['fsl_rigid_register', '-r', ref_volume, '-i', 'flash5.mgz',
+               '-o', 'flash5_reg.mgz']
+        run_subprocess(cmd, env=env, stdout=sys.stdout, stderr=sys.stderr)
+    else:
+        logger.info("Registered flash 5 image is already there")
+    # Step 5a : Convert flash5 into COR
+    logger.info("\n---- Converting flash5 volume into COR format ----")
+    shutil.rmtree(op.join(mri_dir, 'flash5'), ignore_errors=True)
+    os.makedirs(op.join(mri_dir, 'flash5'))
+    cmd = ['mri_convert', 'flash5_reg.mgz', op.join(mri_dir, 'flash5')]
+    run_subprocess(cmd, env=env, stdout=sys.stdout, stderr=sys.stderr)
+    # Step 5b and c : Convert the mgz volumes into COR
+    os.chdir(mri_dir)
+    convert_T1 = False
+    if not op.isdir('T1') or len(glob.glob(op.join('T1', 'COR*'))) == 0:
+        convert_T1 = True
+    convert_brain = False
+    if not op.isdir('brain') or len(glob.glob(op.join('brain', 'COR*'))) == 0:
+        convert_brain = True
+    logger.info("\n---- Converting T1 volume into COR format ----")
+    if convert_T1:
+        if not op.isfile('T1.mgz'):
+            raise RuntimeError("Both T1 mgz and T1 COR volumes missing.")
+        os.makedirs('T1')
+        cmd = ['mri_convert', 'T1.mgz', 'T1']
+        run_subprocess(cmd, env=env, stdout=sys.stdout, stderr=sys.stderr)
+    else:
+        logger.info("T1 volume is already in COR format")
+    logger.info("\n---- Converting brain volume into COR format ----")
+    if convert_brain:
+        if not op.isfile('brain.mgz'):
+            raise RuntimeError("Both brain mgz and brain COR volumes missing.")
+        os.makedirs('brain')
+        cmd = ['mri_convert', 'brain.mgz', 'brain']
+        run_subprocess(cmd, env=env, stdout=sys.stdout, stderr=sys.stderr)
+    else:
+        logger.info("Brain volume is already in COR format")
+    # Finally ready to go
+    logger.info("\n---- Creating the BEM surfaces ----")
+    cmd = ['mri_make_bem_surfaces', subject]
+    run_subprocess(cmd, env=env, stdout=sys.stdout, stderr=sys.stderr)
+    logger.info("\n---- Converting the tri files into surf files ----")
+    os.chdir(bem_dir)
+    if not op.exists('flash'):
+        os.makedirs('flash')
+    os.chdir('flash')
+    surfs = ['inner_skull', 'outer_skull', 'outer_skin']
+    for surf in surfs:
+        shutil.move(op.join(bem_dir, surf + '.tri'), surf + '.tri')
+        cmd = ['mne_convert_surface', '--tri', surf + '.tri', '--surfout',
+               surf + '.surf', '--swap', '--mghmri',
+               op.join(subjects_dir, subject, 'mri', 'flash', 'parameter_maps',
+                       'flash5_reg.mgz')]
+        run_subprocess(cmd, env=env, stdout=sys.stdout, stderr=sys.stderr)
+    # Cleanup section
+    logger.info("\n---- Cleaning up ----")
+    os.chdir(bem_dir)
+    os.remove('inner_skull_tmp.tri')
+    os.chdir(mri_dir)
+    if convert_T1:
+        shutil.rmtree('T1')
+        logger.info("Deleted the T1 COR volume")
+    if convert_brain:
+        shutil.rmtree('brain')
+        logger.info("Deleted the brain COR volume")
+    shutil.rmtree('flash5')
+    logger.info("Deleted the flash5 COR volume")
+    # Create symbolic links to the .surf files in the bem folder
+    logger.info("\n---- Creating symbolic links ----")
+    os.chdir(bem_dir)
+    for surf in surfs:
+        surf = surf + '.surf'
+        if not overwrite and op.exists(surf):
+            skip_symlink = True
+        else:
+            if op.exists(surf):
+                os.remove(surf)
+            os.symlink(op.join('flash', surf), op.join(surf))
+            skip_symlink = False
+    if skip_symlink:
+        logger.info("Unable to create all symbolic links to .surf files "
+                    "in bem folder. Use --overwrite option to recreate them.")
+        dest = op.join(bem_dir, 'flash')
+    else:
+        logger.info("Symbolic links to .surf files created in bem folder")
+        dest = bem_dir
+    logger.info("\nThank you for waiting.\nThe BEM triangulations for this "
+                "subject are now available at:\n%s.\nWe hope the BEM meshes "
+                "created will facilitate your MEG and EEG data analyses."
+                % dest)
+    # Show computed BEM surfaces
+    if show:
+        plot_bem(subject=subject, subjects_dir=subjects_dir,
+                 orientation='coronal', slices=None, show=True)
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/__init__.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/__init__.py
new file mode 100644
index 0000000..025538f
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/__init__.py
@@ -0,0 +1,11 @@
+"""
+Module dedicated to the manipulation of channels,
+setting of sensors locations used for processing and plotting.
+"""
+
+from .layout import (Layout, make_eeg_layout, make_grid_layout, read_layout,
+                     find_layout, generate_2d_layout)
+from .montage import read_montage, read_dig_montage, Montage, DigMontage
+
+from .channels import (equalize_channels, rename_channels, fix_mag_coil_types,
+                       read_ch_connectivity, _get_ch_type)
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/channels.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/channels.py
new file mode 100644
index 0000000..514930d
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/channels.py
@@ -0,0 +1,783 @@
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
+#          Denis Engemann <denis.engemann at gmail.com>
+#          Andrew Dykstra <andrew.r.dykstra at gmail.com>
+#          Teon Brooks <teon.brooks at gmail.com>
+#
+# License: BSD (3-clause)
+
+import os
+import os.path as op
+import warnings
+
+import numpy as np
+from scipy import sparse
+
+from ..externals.six import string_types
+
+from ..utils import verbose, logger
+from ..io.pick import (channel_type, pick_info, pick_types,
+                       _check_excludes_includes)
+from ..io.constants import FIFF
+
+
+def _get_meg_system(info):
+    """Educated guess for the helmet type based on channels"""
+    system = '306m'
+    for ch in info['chs']:
+        if ch['kind'] == FIFF.FIFFV_MEG_CH:
+            coil_type = ch['coil_type'] & 0xFFFF
+            if coil_type == FIFF.FIFFV_COIL_NM_122:
+                system = '122m'
+                break
+            elif coil_type // 1000 == 3:  # All Vectorview coils are 30xx
+                system = '306m'
+                break
+            elif (coil_type == FIFF.FIFFV_COIL_MAGNES_MAG or
+                  coil_type == FIFF.FIFFV_COIL_MAGNES_GRAD):
+                nmag = np.sum([c['kind'] == FIFF.FIFFV_MEG_CH
+                               for c in info['chs']])
+                system = 'Magnes_3600wh' if nmag > 150 else 'Magnes_2500wh'
+                break
+            elif coil_type == FIFF.FIFFV_COIL_CTF_GRAD:
+                system = 'CTF_275'
+                break
+            elif coil_type == FIFF.FIFFV_COIL_KIT_GRAD:
+                system = 'KIT'
+                break
+            elif coil_type == FIFF.FIFFV_COIL_BABY_GRAD:
+                system = 'BabySQUID'
+                break
+    return system
+
+
+def _contains_ch_type(info, ch_type):
+    """Check whether a certain channel type is in an info object
+
+    Parameters
+    ---------
+    info : instance of mne.io.meas_info.Info
+        The measurement information.
+    ch_type : str
+        the channel type to be checked for
+
+    Returns
+    -------
+    has_ch_type : bool
+        Whether the channel type is present or not.
+    """
+    if not isinstance(ch_type, string_types):
+        raise ValueError('`ch_type` is of class {actual_class}. It must be '
+                         '`str`'.format(actual_class=type(ch_type)))
+
+    valid_channel_types = ['grad', 'mag', 'planar1', 'planar2', 'eeg', 'stim',
+                           'eog', 'emg', 'ecg', 'ref_meg', 'resp', 'exci',
+                           'ias', 'syst', 'seeg', 'misc']
+
+    if ch_type not in valid_channel_types:
+        raise ValueError('ch_type must be one of %s, not "%s"'
+                         % (valid_channel_types, ch_type))
+    if info is None:
+        raise ValueError('Cannot check for channels of type "%s" because info '
+                         'is None' % (ch_type,))
+    return ch_type in [channel_type(info, ii) for ii in range(info['nchan'])]
+
+
+def _get_ch_type(inst, ch_type):
+    """Helper to choose a single channel type (usually for plotting)
+
+    Usually used in plotting to plot a single datatype, e.g. look for mags,
+    then grads, then ... to plot.
+    """
+    if ch_type is None:
+        for type_ in ['mag', 'grad', 'planar1', 'planar2', 'eeg']:
+            if type_ in inst:
+                ch_type = type_
+                break
+        else:
+            raise RuntimeError('No plottable channel types found')
+    return ch_type
+
+
+ at verbose
+def equalize_channels(candidates, verbose=None):
+    """Equalize channel picks for a collection of MNE-Python objects
+
+    Parameters
+    ----------
+    candidates : list
+        list Raw | Epochs | Evoked.
+    verbose : None | bool
+        whether to be verbose or not.
+
+    Notes
+    -----
+    This function operates inplace.
+    """
+    from ..io.base import _BaseRaw
+    from ..epochs import _BaseEpochs
+    from ..evoked import Evoked
+    from ..time_frequency import AverageTFR
+
+    if not all(isinstance(c, (_BaseRaw, _BaseEpochs, Evoked, AverageTFR))
+               for c in candidates):
+        valid = ['Raw', 'Epochs', 'Evoked', 'AverageTFR']
+        raise ValueError('candidates must be ' + ' or '.join(valid))
+
+    chan_max_idx = np.argmax([c.info['nchan'] for c in candidates])
+    chan_template = candidates[chan_max_idx].ch_names
+    logger.info('Identiying common channels ...')
+    channels = [set(c.ch_names) for c in candidates]
+    common_channels = set(chan_template).intersection(*channels)
+    dropped = list()
+    for c in candidates:
+        drop_them = list(set(c.ch_names) - common_channels)
+        if drop_them:
+            c.drop_channels(drop_them)
+            dropped.extend(drop_them)
+    if dropped:
+        dropped = list(set(dropped))
+        logger.info('Dropped the following channels:\n%s' % dropped)
+    else:
+        logger.info('all channels are corresponding, nothing to do.')
+
+
+class ContainsMixin(object):
+    """Mixin class for Raw, Evoked, Epochs
+    """
+    def __contains__(self, ch_type):
+        """Check channel type membership"""
+        if ch_type == 'meg':
+            has_ch_type = (_contains_ch_type(self.info, 'mag') or
+                           _contains_ch_type(self.info, 'grad'))
+        else:
+            has_ch_type = _contains_ch_type(self.info, ch_type)
+        return has_ch_type
+
+
+class SetChannelsMixin(object):
+    """Mixin class for Raw, Evoked, Epochs
+    """
+    def _get_channel_positions(self, picks=None):
+        """Gets channel locations from info
+
+        Parameters
+        ----------
+        picks : array-like of int | None
+            Indices of channels to include. If None (default), all meg and eeg
+            channels that are available are returned (bad channels excluded).
+
+        Notes
+        -----
+        .. versionadded:: 0.9.0
+        """
+        if picks is None:
+            picks = pick_types(self.info, meg=True, eeg=True)
+        chs = self.info['chs']
+        pos = np.array([chs[k]['loc'][:3] for k in picks])
+        n_zero = np.sum(np.sum(np.abs(pos), axis=1) == 0)
+        if n_zero > 1:  # XXX some systems have origin (0, 0, 0)
+            raise ValueError('Could not extract channel positions for '
+                             '{} channels'.format(n_zero))
+        return pos
+
+    def _set_channel_positions(self, pos, names):
+        """Update channel locations in info
+
+        Parameters
+        ----------
+        pos : array-like | np.ndarray, shape (n_points, 3)
+            The channel positions to be set.
+        names : list of str
+            The names of the channels to be set.
+
+        Notes
+        -----
+        .. versionadded:: 0.9.0
+        """
+        if len(pos) != len(names):
+            raise ValueError('Number of channel positions not equal to '
+                             'the number of names given.')
+        pos = np.asarray(pos, dtype=np.float)
+        if pos.shape[-1] != 3 or pos.ndim != 2:
+            msg = ('Channel positions must have the shape (n_points, 3) '
+                   'not %s.' % (pos.shape,))
+            raise ValueError(msg)
+        for name, p in zip(names, pos):
+            if name in self.ch_names:
+                idx = self.ch_names.index(name)
+                self.info['chs'][idx]['loc'][:3] = p
+            else:
+                msg = ('%s was not found in the info. Cannot be updated.'
+                       % name)
+                raise ValueError(msg)
+
+    def set_channel_types(self, mapping):
+        """Define the sensor type of channels.
+
+        Note: The following sensor types are accepted:
+            ecg, eeg, emg, eog, exci, ias, misc, resp, seeg, stim, syst
+
+        Parameters
+        ----------
+        mapping : dict
+            a dictionary mapping a channel to a sensor type (str)
+            {'EEG061': 'eog'}.
+
+        Notes
+        -----
+        .. versionadded:: 0.9.0
+        """
+        human2fiff = {'ecg': FIFF.FIFFV_ECG_CH,
+                      'eeg': FIFF.FIFFV_EEG_CH,
+                      'emg': FIFF.FIFFV_EMG_CH,
+                      'eog': FIFF.FIFFV_EOG_CH,
+                      'exci': FIFF.FIFFV_EXCI_CH,
+                      'ias': FIFF.FIFFV_IAS_CH,
+                      'misc': FIFF.FIFFV_MISC_CH,
+                      'resp': FIFF.FIFFV_RESP_CH,
+                      'seeg': FIFF.FIFFV_SEEG_CH,
+                      'stim': FIFF.FIFFV_STIM_CH,
+                      'syst': FIFF.FIFFV_SYST_CH}
+
+        human2unit = {'ecg': FIFF.FIFF_UNIT_V,
+                      'eeg': FIFF.FIFF_UNIT_V,
+                      'emg': FIFF.FIFF_UNIT_V,
+                      'eog': FIFF.FIFF_UNIT_V,
+                      'exci': FIFF.FIFF_UNIT_NONE,
+                      'ias': FIFF.FIFF_UNIT_NONE,
+                      'misc': FIFF.FIFF_UNIT_V,
+                      'resp': FIFF.FIFF_UNIT_NONE,
+                      'seeg': FIFF.FIFF_UNIT_V,
+                      'stim': FIFF.FIFF_UNIT_NONE,
+                      'syst': FIFF.FIFF_UNIT_NONE}
+
+        unit2human = {FIFF.FIFF_UNIT_V: 'V',
+                      FIFF.FIFF_UNIT_NONE: 'NA'}
+        ch_names = self.info['ch_names']
+
+        # first check and assemble clean mappings of index and name
+        for ch_name, ch_type in mapping.items():
+            if ch_name not in ch_names:
+                raise ValueError("This channel name (%s) doesn't exist in "
+                                 "info." % ch_name)
+
+            c_ind = ch_names.index(ch_name)
+            if ch_type not in human2fiff:
+                raise ValueError('This function cannot change to this '
+                                 'channel type: %s. Accepted channel types '
+                                 'are %s.' % (ch_type,
+                                              ", ".join(human2unit.keys())))
+            # Set sensor type
+            self.info['chs'][c_ind]['kind'] = human2fiff[ch_type]
+            unit_old = self.info['chs'][c_ind]['unit']
+            unit_new = human2unit[ch_type]
+            if unit_old != human2unit[ch_type]:
+                warnings.warn("The unit for Channel %s has changed "
+                              "from %s to %s." % (ch_name,
+                                                  unit2human[unit_old],
+                                                  unit2human[unit_new]))
+            self.info['chs'][c_ind]['unit'] = human2unit[ch_type]
+            if ch_type in ['eeg', 'seeg']:
+                self.info['chs'][c_ind]['coil_type'] = FIFF.FIFFV_COIL_EEG
+            else:
+                self.info['chs'][c_ind]['coil_type'] = FIFF.FIFFV_COIL_NONE
+
+    def rename_channels(self, mapping):
+        """Rename channels.
+
+        Parameters
+        ----------
+        mapping : dict | callable
+            a dictionary mapping the old channel to a new channel name
+            e.g. {'EEG061' : 'EEG161'}. Can also be a callable function
+            that takes and returns a string (new in version 0.10.0).
+
+        Notes
+        -----
+        .. versionadded:: 0.9.0
+        """
+        rename_channels(self.info, mapping)
+
+    def set_montage(self, montage):
+        """Set EEG sensor configuration
+
+        Parameters
+        ----------
+        montage : instance of Montage or DigMontage
+
+        Notes
+        -----
+        Operates in place.
+
+        .. versionadded:: 0.9.0
+        """
+        from .montage import _set_montage
+        _set_montage(self.info, montage)
+
+
+class UpdateChannelsMixin(object):
+    """Mixin class for Raw, Evoked, Epochs, AverageTFR
+    """
+    def pick_types(self, meg=True, eeg=False, stim=False, eog=False,
+                   ecg=False, emg=False, ref_meg='auto', misc=False,
+                   resp=False, chpi=False, exci=False, ias=False, syst=False,
+                   seeg=False, include=[], exclude='bads', selection=None,
+                   copy=False):
+        """Pick some channels by type and names
+
+        Parameters
+        ----------
+        meg : bool | str
+            If True include all MEG channels. If False include None
+            If string it can be 'mag', 'grad', 'planar1' or 'planar2' to select
+            only magnetometers, all gradiometers, or a specific type of
+            gradiometer.
+        eeg : bool
+            If True include EEG channels.
+        stim : bool
+            If True include stimulus channels.
+        eog : bool
+            If True include EOG channels.
+        ecg : bool
+            If True include ECG channels.
+        emg : bool
+            If True include EMG channels.
+        ref_meg: bool | str
+            If True include CTF / 4D reference channels. If 'auto', the
+            reference channels are only included if compensations are present.
+        misc : bool
+            If True include miscellaneous analog channels.
+        resp : bool
+            If True include response-trigger channel. For some MEG systems this
+            is separate from the stim channel.
+        chpi : bool
+            If True include continuous HPI coil channels.
+        exci : bool
+            Flux excitation channel used to be a stimulus channel.
+        ias : bool
+            Internal Active Shielding data (maybe on Triux only).
+        syst : bool
+            System status channel information (on Triux systems only).
+        seeg : bool
+            Stereotactic EEG channels.
+        include : list of string
+            List of additional channels to include. If empty do not include
+            any.
+        exclude : list of string | str
+            List of channels to exclude. If 'bads' (default), exclude channels
+            in ``info['bads']``.
+        selection : list of string
+            Restrict sensor channels (MEG, EEG) to this list of channel names.
+        copy : bool
+            If True, returns new instance. Else, modifies in place. Defaults to
+            False.
+
+        Notes
+        -----
+        .. versionadded:: 0.9.0
+        """
+        inst = self.copy() if copy else self
+        idx = pick_types(
+            self.info, meg=meg, eeg=eeg, stim=stim, eog=eog, ecg=ecg, emg=emg,
+            ref_meg=ref_meg, misc=misc, resp=resp, chpi=chpi, exci=exci,
+            ias=ias, syst=syst, seeg=seeg, include=include, exclude=exclude,
+            selection=selection)
+        inst._pick_drop_channels(idx)
+        return inst
+
+    def pick_channels(self, ch_names, copy=False):
+        """Pick some channels
+
+        Parameters
+        ----------
+        ch_names : list
+            The list of channels to select.
+        copy : bool
+            If True, returns new instance. Else, modifies in place. Defaults to
+            False.
+
+        See Also
+        --------
+        drop_channels
+
+        Notes
+        -----
+        .. versionadded:: 0.9.0
+        """
+        inst = self.copy() if copy else self
+        _check_excludes_includes(ch_names)
+
+        idx = [inst.ch_names.index(c) for c in ch_names if c in inst.ch_names]
+        inst._pick_drop_channels(idx)
+
+        return inst
+
+    def drop_channels(self, ch_names, copy=False):
+        """Drop some channels
+
+        Parameters
+        ----------
+        ch_names : list
+            The list of channels to remove.
+        copy : bool
+            If True, returns new instance. Else, modifies in place. Defaults to
+            False.
+
+        See Also
+        --------
+        pick_channels
+
+        Notes
+        -----
+        .. versionadded:: 0.9.0
+        """
+        inst = self.copy() if copy else self
+
+        bad_idx = [inst.ch_names.index(c) for c in ch_names
+                   if c in inst.ch_names]
+        idx = np.setdiff1d(np.arange(len(inst.ch_names)), bad_idx)
+        inst._pick_drop_channels(idx)
+
+        return inst
+
+    def _pick_drop_channels(self, idx):
+        # avoid circular imports
+        from ..io.base import _BaseRaw
+        from ..epochs import _BaseEpochs
+        from ..evoked import Evoked
+        from ..time_frequency import AverageTFR
+
+        if isinstance(self, (_BaseRaw, _BaseEpochs)):
+            if not self.preload:
+                raise RuntimeError('If Raw or Epochs, data must be preloaded '
+                                   'to drop or pick channels')
+
+        def inst_has(attr):
+            return getattr(self, attr, None) is not None
+
+        if inst_has('picks'):
+            self.picks = self.picks[idx]
+
+        if inst_has('_cals'):
+            self._cals = self._cals[idx]
+
+        self.info = pick_info(self.info, idx, copy=False)
+
+        if inst_has('_projector'):
+            self._projector = self._projector[idx][:, idx]
+
+        if isinstance(self, _BaseRaw) and inst_has('_data'):
+            self._data = self._data.take(idx, axis=0)
+        elif isinstance(self, _BaseEpochs) and inst_has('_data'):
+            self._data = self._data.take(idx, axis=1)
+        elif isinstance(self, AverageTFR) and inst_has('data'):
+            self.data = self.data.take(idx, axis=0)
+        elif isinstance(self, Evoked):
+            self.data = self.data.take(idx, axis=0)
+
+    def add_channels(self, add_list, copy=False):
+        """Append new channels to the instance.
+
+        Parameters
+        ----------
+        add_list : list
+            A list of objects to append to self. Must contain all the same
+            type as the current object
+        copy : bool
+            Whether to return a new instance or modify in place
+
+        Returns
+        -------
+        out : MNE object of type(self)
+            An object with new channels appended (will be the same
+            object if copy==False)
+        """
+        # avoid circular imports
+        from ..io.base import _BaseRaw
+        from ..epochs import _BaseEpochs
+        from ..io.meas_info import _merge_info
+
+        if not isinstance(add_list, (list, tuple)):
+            raise AssertionError('Input must be a list or tuple of objs')
+
+        # Object-specific checks
+        if isinstance(self, (_BaseRaw, _BaseEpochs)):
+            if not all([inst.preload for inst in add_list] + [self.preload]):
+                raise AssertionError('All data must be preloaded')
+            data_name = '_data'
+            if isinstance(self, _BaseRaw):
+                con_axis = 0
+                comp_class = _BaseRaw
+            elif isinstance(self, _BaseEpochs):
+                con_axis = 1
+                comp_class = _BaseEpochs
+        else:
+            data_name = 'data'
+            con_axis = 0
+            comp_class = type(self)
+        if not all(isinstance(inst, comp_class) for inst in add_list):
+            raise AssertionError('All input data must be of same type')
+        data = [getattr(inst, data_name) for inst in [self] + add_list]
+
+        # Make sure that all dimensions other than channel axis are the same
+        compare_axes = [i for i in range(data[0].ndim) if i != con_axis]
+        shapes = np.array([dat.shape for dat in data])[:, compare_axes]
+        if not ((shapes[0] - shapes) == 0).all():
+            raise AssertionError('All dimensions except channels must match')
+
+        # Create final data / info objects
+        data = np.concatenate(data, axis=con_axis)
+        infos = [self.info] + [inst.info for inst in add_list]
+        new_info = _merge_info(infos)
+
+        # Now update the attributes
+        if copy is True:
+            out = self.copy()
+        else:
+            out = self
+        setattr(out, data_name, data)
+        out.info = new_info
+        if isinstance(self, _BaseRaw):
+            out._cals = np.concatenate([getattr(inst, '_cals')
+                                        for inst in [self] + add_list])
+        return out
+
+
+class InterpolationMixin(object):
+    """Mixin class for Raw, Evoked, Epochs
+    """
+
+    def interpolate_bads(self, reset_bads=True, mode='accurate'):
+        """Interpolate bad MEG and EEG channels.
+
+        Operates in place.
+
+        Parameters
+        ----------
+        reset_bads : bool
+            If True, remove the bads from info.
+        mode : str
+            Either `'accurate'` or `'fast'`, determines the quality of the
+            Legendre polynomial expansion used for interpolation of MEG
+            channels.
+
+        Returns
+        -------
+        self : mne.io.Raw, mne.Epochs or mne.Evoked
+            The interpolated data.
+
+        Notes
+        -----
+        .. versionadded:: 0.9.0
+        """
+        from .interpolation import _interpolate_bads_eeg, _interpolate_bads_meg
+
+        if getattr(self, 'preload', None) is False:
+            raise ValueError('Data must be preloaded.')
+
+        _interpolate_bads_eeg(self)
+        _interpolate_bads_meg(self, mode=mode)
+
+        if reset_bads is True:
+            self.info['bads'] = []
+
+        return self
+
+
+def rename_channels(info, mapping):
+    """Rename channels.
+
+    Parameters
+    ----------
+    info : dict
+        Measurement info.
+    mapping : dict | callable
+        a dictionary mapping the old channel to a new channel name
+        e.g. {'EEG061' : 'EEG161'}. Can also be a callable function
+        that takes and returns a string (new in version 0.10.0).
+    """
+    info._check_consistency()
+    bads = list(info['bads'])  # make our own local copies
+    ch_names = list(info['ch_names'])
+
+    # first check and assemble clean mappings of index and name
+    if isinstance(mapping, dict):
+        orig_names = sorted(list(mapping.keys()))
+        missing = [orig_name not in ch_names for orig_name in orig_names]
+        if any(missing):
+            raise ValueError("Channel name(s) in mapping missing from info: "
+                             "%s" % np.array(orig_names)[np.array(missing)])
+        new_names = [(ch_names.index(ch_name), new_name)
+                     for ch_name, new_name in mapping.items()]
+    elif callable(mapping):
+        new_names = [(ci, mapping(ch_name))
+                     for ci, ch_name in enumerate(ch_names)]
+    else:
+        raise ValueError('mapping must be callable or dict, not %s'
+                         % (type(mapping),))
+
+    # check we got all strings out of the mapping
+    if any(not isinstance(new_name[1], string_types)
+           for new_name in new_names):
+        raise ValueError('New channel mapping must only be to strings')
+
+    # do the remapping locally
+    for c_ind, new_name in new_names:
+        for bi, bad in enumerate(bads):
+            if bad == ch_names[c_ind]:
+                bads[bi] = new_name
+        ch_names[c_ind] = new_name
+
+    # check that all the channel names are unique
+    if len(ch_names) != len(np.unique(ch_names)):
+        raise ValueError('New channel names are not unique, renaming failed')
+
+    # do the reampping in info
+    info['bads'] = bads
+    info['ch_names'] = ch_names
+    for ch, ch_name in zip(info['chs'], ch_names):
+        ch['ch_name'] = ch_name
+    info._check_consistency()
+
+
+def _recursive_flatten(cell, dtype):
+    """Helper to unpack mat files in Python"""
+    while not isinstance(cell[0], dtype):
+        cell = [c for d in cell for c in d]
+    return cell
+
+
+def read_ch_connectivity(fname, picks=None):
+    """Parse FieldTrip neighbors .mat file
+
+    More information on these neighbor definitions can be found on the
+    related FieldTrip documentation pages:
+    http://fieldtrip.fcdonders.nl/template/neighbours
+
+    Parameters
+    ----------
+    fname : str
+        The file name. Example: 'neuromag306mag', 'neuromag306planar',
+        'ctf275', 'biosemi64', etc.
+    picks : array-like of int, shape (n_channels,)
+        The indices of the channels to include. Must match the template.
+        Defaults to None.
+
+    Returns
+    -------
+    ch_connectivity : scipy.sparse matrix
+        The connectivity matrix.
+    ch_names : list
+        The list of channel names present in connectivity matrix.
+    """
+    from scipy.io import loadmat
+    if not op.isabs(fname):
+        templates_dir = op.realpath(op.join(op.dirname(__file__),
+                                            'data', 'neighbors'))
+        templates = os.listdir(templates_dir)
+        for f in templates:
+            if f == fname:
+                break
+            if f == fname + '_neighb.mat':
+                fname += '_neighb.mat'
+                break
+        else:
+            raise ValueError('I do not know about this neighbor '
+                             'template: "{}"'.format(fname))
+
+        fname = op.join(templates_dir, fname)
+
+    nb = loadmat(fname)['neighbours']
+    ch_names = _recursive_flatten(nb['label'], string_types)
+    neighbors = [_recursive_flatten(c, string_types) for c in
+                 nb['neighblabel'].flatten()]
+    assert len(ch_names) == len(neighbors)
+    if picks is not None:
+        if max(picks) >= len(ch_names):
+            raise ValueError('The picks must be compatible with '
+                             'channels. Found a pick ({}) which exceeds '
+                             'the channel range ({})'
+                             .format(max(picks), len(ch_names)))
+    connectivity = _ch_neighbor_connectivity(ch_names, neighbors)
+    if picks is not None:
+        # picking before constructing matrix is buggy
+        connectivity = connectivity[picks][:, picks]
+        ch_names = [ch_names[p] for p in picks]
+    return connectivity, ch_names
+
+
+def _ch_neighbor_connectivity(ch_names, neighbors):
+    """Compute sensor connectivity matrix
+
+    Parameters
+    ----------
+    ch_names : list of str
+        The channel names.
+    neighbors : list of list
+        A list of list of channel names. The neighbors to
+        which the channels in ch_names are connected with.
+        Must be of the same length as ch_names.
+
+    Returns
+    -------
+    ch_connectivity : scipy.sparse matrix
+        The connectivity matrix.
+    """
+    if len(ch_names) != len(neighbors):
+        raise ValueError('`ch_names` and `neighbors` must '
+                         'have the same length')
+    set_neighbors = set([c for d in neighbors for c in d])
+    rest = set(ch_names) - set_neighbors
+    if len(rest) > 0:
+        raise ValueError('Some of your neighbors are not present in the '
+                         'list of channel names')
+
+    for neigh in neighbors:
+        if (not isinstance(neigh, list) and
+           not all(isinstance(c, string_types) for c in neigh)):
+            raise ValueError('`neighbors` must be a list of lists of str')
+
+    ch_connectivity = np.eye(len(ch_names), dtype=bool)
+    for ii, neigbs in enumerate(neighbors):
+        ch_connectivity[ii, [ch_names.index(i) for i in neigbs]] = True
+
+    ch_connectivity = sparse.csr_matrix(ch_connectivity)
+    return ch_connectivity
+
+
+def fix_mag_coil_types(info):
+    """Fix Elekta magnetometer coil types
+
+    Parameters
+    ----------
+    info : dict
+        The info dict to correct. Corrections are done in-place.
+
+    Notes
+    -----
+    This function changes magnetometer coil types 3022 (T1: SQ20483N) and
+    3023 (T2: SQ20483-A) to 3024 (T3: SQ20950N) in the channel definition
+    records in the info structure.
+
+    Neuromag Vectorview systems can contain magnetometers with two
+    different coil sizes (3022 and 3023 vs. 3024). The systems
+    incorporating coils of type 3024 were introduced last and are used at
+    the majority of MEG sites. At some sites with 3024 magnetometers,
+    the data files have still defined the magnetometers to be of type
+    3022 to ensure compatibility with older versions of Neuromag software.
+    In the MNE software as well as in the present version of Neuromag
+    software coil type 3024 is fully supported. Therefore, it is now safe
+    to upgrade the data files to use the true coil type.
+
+    .. note:: The effect of the difference between the coil sizes on the
+              current estimates computed by the MNE software is very small.
+              Therefore the use of mne_fix_mag_coil_types is not mandatory.
+    """
+    picks = pick_types(info, meg='mag')
+    for ii in picks:
+        ch = info['chs'][ii]
+        if ch['coil_type'] in (FIFF.FIFFV_COIL_VV_MAG_T1,
+                               FIFF.FIFFV_COIL_VV_MAG_T2):
+            ch['coil_type'] = FIFF.FIFFV_COIL_VV_MAG_T3
+    info._check_consistency()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/layouts/CTF-275.lout b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/layouts/CTF-275.lout
new file mode 100644
index 0000000..53d924c
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/layouts/CTF-275.lout
@@ -0,0 +1,276 @@
+  -42.27    42.33   -39.99    31.80
+001    -4.09    10.91     4.00     3.00 MLC11-2622
+002    -7.25     8.87     4.00     3.00 MLC12-2622
+003   -10.79     7.43     4.00     3.00 MLC13-2622
+004   -14.40     5.31     4.00     3.00 MLC14-2622
+005   -17.45     2.88     4.00     3.00 MLC15-2622
+006   -19.94    -0.21     4.00     3.00 MLC16-2622
+007   -22.30    -3.88     4.00     3.00 MLC17-2622
+008    -7.70     5.16     4.00     3.00 MLC21-2622
+009   -11.18     3.69     4.00     3.00 MLC22-2622
+010   -14.17     1.40     4.00     3.00 MLC23-2622
+011   -16.42    -1.52     4.00     3.00 MLC24-2622
+012   -18.64    -4.88     4.00     3.00 MLC25-2622
+013   -12.55    -2.00     4.00     3.00 MLC31-2622
+014   -15.13    -5.41     4.00     3.00 MLC32-2622
+015    -9.57     0.28     4.00     3.00 MLC41-2622
+016   -11.51    -5.56     4.00     3.00 MLC42-2622
+017    -4.04     4.58     4.00     3.00 MLC51-2622
+018    -6.04     1.35     4.00     3.00 MLC52-2622
+019    -8.79    -3.34     4.00     3.00 MLC53-2622
+020    -8.32    -7.10     4.00     3.00 MLC54-2622
+021    -6.60   -10.22     4.00     3.00 MLC55-2622
+022    -4.01    -1.76     4.00     3.00 MLC61-2622
+023    -5.55    -4.97     4.00     3.00 MLC62-2622
+024    -3.74    -8.12     4.00     3.00 MLC63-2622
+025    -7.63    28.14     4.00     3.00 MLF11-2622
+026   -12.92    27.01     4.00     3.00 MLF12-2622
+027   -18.14    25.41     4.00     3.00 MLF13-2622
+028   -23.34    23.65     4.00     3.00 MLF14-2622
+029    -4.64    25.47     4.00     3.00 MLF21-2622
+030    -9.22    24.68     4.00     3.00 MLF22-2622
+031   -13.60    23.41     4.00     3.00 MLF23-2622
+032   -18.31    21.53     4.00     3.00 MLF24-2622
+033   -22.68    19.69     4.00     3.00 MLF25-2622
+034    -6.57    22.14     4.00     3.00 MLF31-2622
+035   -10.75    21.22     4.00     3.00 MLF32-2622
+036   -15.16    19.49     4.00     3.00 MLF33-2622
+037   -19.01    17.57     4.00     3.00 MLF34-2622
+038   -22.93    15.25     4.00     3.00 MLF35-2622
+039    -4.25    19.38     4.00     3.00 MLF41-2622
+040    -8.17    18.80     4.00     3.00 MLF42-2622
+041   -12.29    17.37     4.00     3.00 MLF43-2622
+042   -15.93    15.49     4.00     3.00 MLF44-2622
+043   -19.89    13.39     4.00     3.00 MLF45-2622
+044   -24.12    10.50     4.00     3.00 MLF46-2622
+045    -5.48    16.15     4.00     3.00 MLF51-2622
+046    -9.58    15.10     4.00     3.00 MLF52-2622
+047   -13.17    13.43     4.00     3.00 MLF53-2622
+048   -16.66    11.39     4.00     3.00 MLF54-2622
+049   -20.76     9.06     4.00     3.00 MLF55-2622
+050   -24.71     5.73     4.00     3.00 MLF56-2622
+051    -7.17    12.78     4.00     3.00 MLF61-2622
+052   -10.58    11.08     4.00     3.00 MLF62-2622
+053   -13.93     9.16     4.00     3.00 MLF63-2622
+054   -17.37     7.29     4.00     3.00 MLF64-2622
+055   -20.83     4.87     4.00     3.00 MLF65-2622
+056   -23.40     1.59     4.00     3.00 MLF66-2622
+057   -25.90    -2.51     4.00     3.00 MLF67-2622
+058    -6.96   -27.32     4.00     3.00 MLO11-2622
+059   -11.88   -25.97     4.00     3.00 MLO12-2622
+060   -16.48   -23.69     4.00     3.00 MLO13-2622
+061   -20.64   -20.44     4.00     3.00 MLO14-2622
+062    -4.82   -30.75     4.00     3.00 MLO21-2622
+063   -10.11   -29.77     4.00     3.00 MLO22-2622
+064   -15.52   -27.87     4.00     3.00 MLO23-2622
+065   -20.40   -24.85     4.00     3.00 MLO24-2622
+066    -7.92   -33.45     4.00     3.00 MLO31-2622
+067   -13.84   -31.94     4.00     3.00 MLO32-2622
+068   -19.61   -29.16     4.00     3.00 MLO33-2622
+069   -24.70   -25.44     4.00     3.00 MLO34-2622
+070    -5.16   -36.86     4.00     3.00 MLO41-2622
+071   -11.67   -35.84     4.00     3.00 MLO42-2622
+072   -17.98   -33.55     4.00     3.00 MLO43-2622
+073   -23.91   -30.00     4.00     3.00 MLO44-2622
+074    -8.79   -39.34     4.00     3.00 MLO51-2622
+075   -15.83   -37.54     4.00     3.00 MLO52-2622
+076   -22.47   -34.34     4.00     3.00 MLO53-2622
+077    -4.98   -13.36     4.00     3.00 MLP11-2622
+078   -10.20   -10.01     4.00     3.00 MLP12-2622
+079    -3.80   -16.69     4.00     3.00 MLP21-2622
+080    -8.73   -13.30     4.00     3.00 MLP22-2622
+081   -13.58    -8.80     4.00     3.00 MLP23-2622
+082    -5.66   -19.72     4.00     3.00 MLP31-2622
+083    -8.41   -16.83     4.00     3.00 MLP32-2622
+084   -12.08   -14.80     4.00     3.00 MLP33-2622
+085   -15.13   -11.95     4.00     3.00 MLP34-2622
+086   -17.18    -8.63     4.00     3.00 MLP35-2622
+087    -9.92   -20.16     4.00     3.00 MLP41-2622
+088   -13.37   -18.09     4.00     3.00 MLP42-2622
+089   -16.59   -15.58     4.00     3.00 MLP43-2622
+090   -19.06   -11.87     4.00     3.00 MLP44-2622
+091   -20.87    -8.06     4.00     3.00 MLP45-2622
+092    -4.02   -24.07     4.00     3.00 MLP51-2622
+093    -8.77   -23.79     4.00     3.00 MLP52-2622
+094   -12.92   -22.08     4.00     3.00 MLP53-2622
+095   -16.83   -19.50     4.00     3.00 MLP54-2622
+096   -20.23   -16.32     4.00     3.00 MLP55-2622
+097   -22.76   -11.97     4.00     3.00 MLP56-2622
+098   -24.58    -7.58     4.00     3.00 MLP57-2622
+099   -27.14    12.98     4.00     3.00 MLT11-2622
+100   -28.19     7.51     4.00     3.00 MLT12-2622
+101   -28.08     2.09     4.00     3.00 MLT13-2622
+102   -28.56    -5.98     4.00     3.00 MLT14-2622
+103   -26.96   -11.17     4.00     3.00 MLT15-2622
+104   -24.11   -16.46     4.00     3.00 MLT16-2622
+105   -27.30    17.85     4.00     3.00 MLT21-2622
+106   -31.47    10.04     4.00     3.00 MLT22-2622
+107   -31.85     3.70     4.00     3.00 MLT23-2622
+108   -32.08    -2.62     4.00     3.00 MLT24-2622
+109   -31.09    -9.80     4.00     3.00 MLT25-2622
+110   -28.71   -15.38     4.00     3.00 MLT26-2622
+111   -24.78   -20.78     4.00     3.00 MLT27-2622
+112   -28.61    21.64     4.00     3.00 MLT31-2622
+113   -32.09    15.32     4.00     3.00 MLT32-2622
+114   -35.40     5.79     4.00     3.00 MLT33-2622
+115   -35.85    -1.29     4.00     3.00 MLT34-2622
+116   -34.97    -7.76     4.00     3.00 MLT35-2622
+117   -32.89   -13.91     4.00     3.00 MLT36-2622
+118   -29.32   -20.20     4.00     3.00 MLT37-2622
+119   -33.87    18.93     4.00     3.00 MLT41-2622
+120   -36.68    11.37     4.00     3.00 MLT42-2622
+121   -38.92     2.11     4.00     3.00 MLT43-2622
+122   -38.70    -5.16     4.00     3.00 MLT44-2622
+123   -36.95   -12.13     4.00     3.00 MLT45-2622
+124   -33.72   -18.79     4.00     3.00 MLT46-2622
+125   -29.28   -25.28     4.00     3.00 MLT47-2622
+126   -38.78    14.74     4.00     3.00 MLT51-2622
+127   -41.29     6.62     4.00     3.00 MLT52-2622
+128   -41.87    -1.80     4.00     3.00 MLT53-2622
+129   -40.62    -9.63     4.00     3.00 MLT54-2622
+130   -37.78   -16.89     4.00     3.00 MLT55-2622
+131   -33.73   -24.02     4.00     3.00 MLT56-2622
+132   -28.51   -29.92     4.00     3.00 MLT57-2622
+133    -0.24    10.97     4.00     3.00 MRC11-2622
+134     2.99     8.95     4.00     3.00 MRC12-2622
+135     6.57     7.62     4.00     3.00 MRC13-2622
+136    10.22     5.56     4.00     3.00 MRC14-2622
+137    13.27     3.22     4.00     3.00 MRC15-2622
+138    15.86     0.21     4.00     3.00 MRC16-2622
+139    18.32    -3.45     4.00     3.00 MRC17-2622
+140     3.53     5.28     4.00     3.00 MRC21-2622
+141     7.00     3.85     4.00     3.00 MRC22-2622
+142    10.06     1.68     4.00     3.00 MRC23-2622
+143    12.33    -1.20     4.00     3.00 MRC24-2622
+144    14.73    -4.52     4.00     3.00 MRC25-2622
+145     8.51    -1.76     4.00     3.00 MRC31-2622
+146    11.17    -5.14     4.00     3.00 MRC32-2622
+147     5.51     0.46     4.00     3.00 MRC41-2622
+148     7.56    -5.33     4.00     3.00 MRC42-2622
+149    -0.17     4.62     4.00     3.00 MRC51-2622
+150     1.93     1.46     4.00     3.00 MRC52-2622
+151     4.78    -3.16     4.00     3.00 MRC53-2622
+152     4.39    -6.98     4.00     3.00 MRC54-2622
+153     2.73   -10.10     4.00     3.00 MRC55-2622
+154    -0.07    -1.75     4.00     3.00 MRC61-2622
+155     1.58    -4.86     4.00     3.00 MRC62-2622
+156    -0.15    -8.08     4.00     3.00 MRC63-2622
+157     2.97    28.24     4.00     3.00 MRF11-2622
+158     8.25    27.25     4.00     3.00 MRF12-2622
+159    13.54    25.74     4.00     3.00 MRF13-2622
+160    18.74    24.12     4.00     3.00 MRF14-2622
+161     0.03    25.52     4.00     3.00 MRF21-2622
+162     4.63    24.85     4.00     3.00 MRF22-2622
+163     9.03    23.67     4.00     3.00 MRF23-2622
+164    13.78    21.87     4.00     3.00 MRF24-2622
+165    18.19    20.13     4.00     3.00 MRF25-2622
+166     2.05    22.22     4.00     3.00 MRF31-2622
+167     6.27    21.38     4.00     3.00 MRF32-2622
+168    10.63    19.79     4.00     3.00 MRF33-2622
+169    14.57    17.90     4.00     3.00 MRF34-2622
+170    18.54    15.70     4.00     3.00 MRF35-2622
+171    -0.22    19.42     4.00     3.00 MRF41-2622
+172     3.75    18.84     4.00     3.00 MRF42-2622
+173     7.86    17.57     4.00     3.00 MRF43-2622
+174    11.53    15.78     4.00     3.00 MRF44-2622
+175    15.55    13.76     4.00     3.00 MRF45-2622
+176    19.83    10.96     4.00     3.00 MRF46-2622
+177     1.08    16.23     4.00     3.00 MRF51-2622
+178     5.20    15.33     4.00     3.00 MRF52-2622
+179     8.81    13.68     4.00     3.00 MRF53-2622
+180    12.37    11.71     4.00     3.00 MRF54-2622
+181    16.53     9.44     4.00     3.00 MRF55-2622
+182    20.54     6.21     4.00     3.00 MRF56-2622
+183     2.82    12.87     4.00     3.00 MRF61-2622
+184     6.27    11.29     4.00     3.00 MRF62-2622
+185     9.66     9.43     4.00     3.00 MRF63-2622
+186    13.14     7.59     4.00     3.00 MRF64-2622
+187    16.52     5.22     4.00     3.00 MRF65-2622
+188    19.31     2.05     4.00     3.00 MRF66-2622
+189    21.91    -1.92     4.00     3.00 MRF67-2622
+190     3.46   -27.20     4.00     3.00 MRO11-2622
+191     8.35   -25.76     4.00     3.00 MRO12-2622
+192    12.92   -23.40     4.00     3.00 MRO13-2622
+193    17.02   -20.06     4.00     3.00 MRO14-2622
+194     1.43   -30.69     4.00     3.00 MRO21-2622
+195     6.66   -29.60     4.00     3.00 MRO22-2622
+196    12.02   -27.57     4.00     3.00 MRO23-2622
+197    16.88   -24.46     4.00     3.00 MRO24-2622
+198     4.55   -33.35     4.00     3.00 MRO31-2622
+199    10.46   -31.70     4.00     3.00 MRO32-2622
+200    16.07   -28.88     4.00     3.00 MRO33-2622
+201    21.16   -24.93     4.00     3.00 MRO34-2622
+202     1.88   -36.78     4.00     3.00 MRO41-2622
+203     8.37   -35.64     4.00     3.00 MRO42-2622
+204    14.63   -33.19     4.00     3.00 MRO43-2622
+205    20.45   -29.57     4.00     3.00 MRO44-2622
+206     5.57   -39.20     4.00     3.00 MRO51-2622
+207    12.57   -37.26     4.00     3.00 MRO52-2622
+208    19.11   -33.96     4.00     3.00 MRO53-2622
+209     1.20   -13.27     4.00     3.00 MRP11-2622
+210     6.34    -9.81     4.00     3.00 MRP12-2622
+211     0.06   -16.65     4.00     3.00 MRP21-2622
+212     4.94   -13.15     4.00     3.00 MRP22-2622
+213     9.72    -8.56     4.00     3.00 MRP23-2622
+214     2.03   -19.64     4.00     3.00 MRP31-2622
+215     4.72   -16.72     4.00     3.00 MRP32-2622
+216     8.28   -14.64     4.00     3.00 MRP33-2622
+217    11.32   -11.68     4.00     3.00 MRP34-2622
+218    13.30    -8.29     4.00     3.00 MRP35-2622
+219     6.32   -19.99     4.00     3.00 MRP41-2622
+220     9.66   -17.86     4.00     3.00 MRP42-2622
+221    12.83   -15.29     4.00     3.00 MRP43-2622
+222    15.21   -11.53     4.00     3.00 MRP44-2622
+223    16.99    -7.64     4.00     3.00 MRP45-2622
+224     0.42   -24.03     4.00     3.00 MRP51-2622
+225     5.29   -23.71     4.00     3.00 MRP52-2622
+226     9.32   -21.86     4.00     3.00 MRP53-2622
+227    13.19   -19.21     4.00     3.00 MRP54-2622
+228    16.49   -15.99     4.00     3.00 MRP55-2622
+229    18.98   -11.54     4.00     3.00 MRP56-2622
+230    20.69    -7.11     4.00     3.00 MRP57-2622
+231    22.81    13.51     4.00     3.00 MRT11-2622
+232    23.97     8.09     4.00     3.00 MRT12-2622
+233    23.97     2.65     4.00     3.00 MRT13-2622
+234    24.63    -5.42     4.00     3.00 MRT14-2622
+235    23.16   -10.65     4.00     3.00 MRT15-2622
+236    20.37   -16.02     4.00     3.00 MRT16-2622
+237    22.88    18.38     4.00     3.00 MRT21-2622
+238    27.23    10.62     4.00     3.00 MRT22-2622
+239    27.73     4.35     4.00     3.00 MRT23-2622
+240    28.08    -1.95     4.00     3.00 MRT24-2622
+241    27.24    -9.21     4.00     3.00 MRT25-2622
+242    24.97   -14.84     4.00     3.00 MRT26-2622
+243    21.15   -20.30     4.00     3.00 MRT27-2622
+244    24.07    22.26     4.00     3.00 MRT31-2622
+245    27.72    15.94     4.00     3.00 MRT32-2622
+246    31.24     6.55     4.00     3.00 MRT33-2622
+247    31.84    -0.55     4.00     3.00 MRT34-2622
+248    31.09    -7.10     4.00     3.00 MRT35-2622
+249    29.13   -13.33     4.00     3.00 MRT36-2622
+250    25.63   -19.73     4.00     3.00 MRT37-2622
+251    29.40    19.66     4.00     3.00 MRT41-2622
+252    32.38    12.17     4.00     3.00 MRT42-2622
+253    34.86     2.97     4.00     3.00 MRT43-2622
+254    34.80    -4.39     4.00     3.00 MRT44-2622
+255    33.11   -11.36     4.00     3.00 MRT45-2622
+256    30.03   -18.16     4.00     3.00 MRT46-2622
+257    25.54   -24.88     4.00     3.00 MRT47-2622
+258    34.47    15.52     4.00     3.00 MRT51-2622
+259    37.12     7.54     4.00     3.00 MRT52-2622
+260    37.93    -0.94     4.00     3.00 MRT53-2622
+261    36.82    -8.89     4.00     3.00 MRT54-2622
+262    34.10   -16.25     4.00     3.00 MRT55-2622
+263    30.13   -23.45     4.00     3.00 MRT56-2622
+264    25.07   -29.43     4.00     3.00 MRT57-2622
+265    -2.13     7.84     4.00     3.00 MZC01-2622
+266    -2.05     1.38     4.00     3.00 MZC02-2622
+267    -1.99    -5.04     4.00     3.00 MZC03-2622
+268    -1.93   -11.44     4.00     3.00 MZC04-2622
+269    -2.33    28.50     4.00     3.00 MZF01-2622
+270    -2.28    22.54     4.00     3.00 MZF02-2622
+271    -2.20    14.52     4.00     3.00 MZF03-2622
+272    -1.77   -27.22     4.00     3.00 MZO01-2622
+273    -1.71   -34.04     4.00     3.00 MZO02-2622
+274    -1.66   -39.69     4.00     3.00 MZO03-2622
+275    -1.81   -21.05     4.00     3.00 MZP01-2622
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/layouts/CTF151.lay b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/layouts/CTF151.lay
new file mode 100644
index 0000000..c9d68f3
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/layouts/CTF151.lay
@@ -0,0 +1,153 @@
+1 -0.440000 -4.000000 0.551100 0.351100 MLC11
+2 -1.200000 -4.130000 0.551100 0.351100 MLC12
+3 -2.220000 -4.270000 0.551100 0.351100 MLC13
+4 -2.820000 -4.710000 0.551100 0.351100 MLC14
+5 -3.340000 -5.230000 0.551100 0.351100 MLC15
+6 -0.820000 -4.550000 0.551100 0.351100 MLC21
+7 -1.620000 -4.570000 0.551100 0.351100 MLC22
+8 -2.160000 -4.970000 0.551100 0.351100 MLC23
+9 -2.640000 -5.370000 0.551100 0.351100 MLC24
+10 -1.270000 -5.050000 0.551100 0.351100 MLC31
+11 -1.780000 -5.450000 0.551100 0.351100 MLC32
+12 -1.300000 -5.930000 0.551100 0.351100 MLC33
+13 -0.440000 -5.050000 0.551100 0.351100 MLC41
+14 -0.820000 -5.530000 0.551100 0.351100 MLC42
+15 -0.400000 -6.010000 0.551100 0.351100 MLC43
+16 -1.170000 -2.010000 0.551100 0.351100 MLF11
+17 -2.260000 -2.230000 0.551100 0.351100 MLF12
+18 -0.490000 -2.300000 0.551100 0.351100 MLF21
+19 -1.540000 -2.470000 0.551100 0.351100 MLF22
+20 -2.540000 -2.750000 0.551100 0.351100 MLF23
+21 -1.000000 -2.750000 0.551100 0.351100 MLF31
+22 -1.950000 -2.980000 0.551100 0.351100 MLF32
+23 -2.780000 -3.300000 0.551100 0.351100 MLF33
+24 -3.440000 -3.770000 0.551100 0.351100 MLF34
+25 -0.450000 -3.100000 0.551100 0.351100 MLF41
+26 -1.380000 -3.260000 0.551100 0.351100 MLF42
+27 -2.280000 -3.570000 0.551100 0.351100 MLF43
+28 -2.870000 -4.060000 0.551100 0.351100 MLF44
+29 -3.500000 -4.510000 0.551100 0.351100 MLF45
+30 -0.850000 -3.580000 0.551100 0.351100 MLF51
+31 -1.700000 -3.790000 0.551100 0.351100 MLF52
+32 -0.470000 -7.690000 0.551100 0.351100 MLO11
+33 -1.650000 -7.420000 0.551100 0.351100 MLO12
+34 -1.210000 -7.930000 0.551100 0.351100 MLO21
+35 -2.350000 -7.580000 0.551100 0.351100 MLO22
+36 -0.600000 -8.400000 0.551100 0.351100 MLO31
+37 -1.920000 -8.120000 0.551100 0.351100 MLO32
+38 -3.110000 -7.670000 0.551100 0.351100 MLO33
+39 -1.400000 -8.560000 0.551100 0.351100 MLO41
+40 -2.750000 -8.210000 0.551100 0.351100 MLO42
+41 -3.910000 -7.620000 0.551100 0.351100 MLO43
+42 -0.840000 -6.390000 0.551100 0.351100 MLP11
+43 -1.710000 -6.320000 0.551100 0.351100 MLP12
+44 -2.240000 -5.870000 0.551100 0.351100 MLP13
+45 -0.440000 -6.900000 0.551100 0.351100 MLP21
+46 -1.220000 -6.760000 0.551100 0.351100 MLP22
+47 -0.970000 -7.220000 0.551100 0.351100 MLP31
+48 -1.900000 -6.880000 0.551100 0.351100 MLP32
+49 -2.470000 -6.390000 0.551100 0.351100 MLP33
+50 -2.990000 -5.850000 0.551100 0.351100 MLP34
+51 -3.420000 -3.120000 0.551100 0.351100 MLT11
+52 -4.100000 -4.200000 0.551100 0.351100 MLT12
+53 -4.040000 -5.030000 0.551100 0.351100 MLT13
+54 -3.780000 -5.770000 0.551100 0.351100 MLT14
+55 -3.210000 -6.440000 0.551100 0.351100 MLT15
+56 -2.570000 -7.010000 0.551100 0.351100 MLT16
+57 -3.320000 -2.550000 0.551100 0.351100 MLT21
+58 -4.260000 -3.520000 0.551100 0.351100 MLT22
+59 -4.720000 -4.710000 0.551100 0.351100 MLT23
+60 -4.520000 -5.590000 0.551100 0.351100 MLT24
+61 -4.040000 -6.350000 0.551100 0.351100 MLT25
+62 -3.280000 -7.060000 0.551100 0.351100 MLT26
+63 -4.340000 -2.900000 0.551100 0.351100 MLT31
+64 -5.040000 -4.050000 0.551100 0.351100 MLT32
+65 -5.200000 -5.210000 0.551100 0.351100 MLT33
+66 -4.820000 -6.140000 0.551100 0.351100 MLT34
+67 -4.090000 -7.000000 0.551100 0.351100 MLT35
+68 -5.210000 -3.450000 0.551100 0.351100 MLT41
+69 -5.640000 -4.620000 0.551100 0.351100 MLT42
+70 -5.500000 -5.730000 0.551100 0.351100 MLT43
+71 -4.910000 -6.720000 0.551100 0.351100 MLT44
+72 0.410000 -4.000000 0.551100 0.351100 MRC11
+73 1.170000 -4.130000 0.551100 0.351100 MRC12
+74 2.200000 -4.270000 0.551100 0.351100 MRC13
+75 2.800000 -4.710000 0.551100 0.351100 MRC14
+76 3.320000 -5.230000 0.551100 0.351100 MRC15
+77 0.800000 -4.560000 0.551100 0.351100 MRC21
+78 1.600000 -4.570000 0.551100 0.351100 MRC22
+79 2.140000 -4.970000 0.551100 0.351100 MRC23
+80 2.620000 -5.370000 0.551100 0.351100 MRC24
+81 1.260000 -5.050000 0.551100 0.351100 MRC31
+82 1.760000 -5.450000 0.551100 0.351100 MRC32
+83 1.280000 -5.930000 0.551100 0.351100 MRC33
+84 0.420000 -5.050000 0.551100 0.351100 MRC41
+85 0.810000 -5.540000 0.551100 0.351100 MRC42
+86 0.380000 -6.010000 0.551100 0.351100 MRC43
+87 1.130000 -2.010000 0.551100 0.351100 MRF11
+88 2.240000 -2.230000 0.551100 0.351100 MRF12
+89 0.460000 -2.290000 0.551100 0.351100 MRF21
+90 1.510000 -2.470000 0.551100 0.351100 MRF22
+91 2.520000 -2.740000 0.551100 0.351100 MRF23
+92 0.970000 -2.740000 0.551100 0.351100 MRF31
+93 1.920000 -2.980000 0.551100 0.351100 MRF32
+94 2.760000 -3.300000 0.551100 0.351100 MRF33
+95 3.420000 -3.770000 0.551100 0.351100 MRF34
+96 0.420000 -3.100000 0.551100 0.351100 MRF41
+97 1.360000 -3.260000 0.551100 0.351100 MRF42
+98 2.260000 -3.570000 0.551100 0.351100 MRF43
+99 2.840000 -4.050000 0.551100 0.351100 MRF44
+100 3.480000 -4.510000 0.551100 0.351100 MRF45
+101 0.820000 -3.580000 0.551100 0.351100 MRF51
+102 1.670000 -3.790000 0.551100 0.351100 MRF52
+103 0.470000 -7.690000 0.551100 0.351100 MRO11
+104 1.640000 -7.420000 0.551100 0.351100 MRO12
+105 1.200000 -7.930000 0.551100 0.351100 MRO21
+106 2.350000 -7.580000 0.551100 0.351100 MRO22
+107 0.580000 -8.390000 0.551100 0.351100 MRO31
+108 1.910000 -8.110000 0.551100 0.351100 MRO32
+109 3.110000 -7.670000 0.551100 0.351100 MRO33
+110 1.380000 -8.570000 0.551100 0.351100 MRO41
+111 2.750000 -8.220000 0.551100 0.351100 MRO42
+112 3.900000 -7.610000 0.551100 0.351100 MRO43
+113 0.820000 -6.380000 0.551100 0.351100 MRP11
+114 1.700000 -6.320000 0.551100 0.351100 MRP12
+115 2.220000 -5.870000 0.551100 0.351100 MRP13
+116 0.420000 -6.900000 0.551100 0.351100 MRP21
+117 1.200000 -6.750000 0.551100 0.351100 MRP22
+118 0.960000 -7.220000 0.551100 0.351100 MRP31
+119 1.880000 -6.870000 0.551100 0.351100 MRP32
+120 2.470000 -6.390000 0.551100 0.351100 MRP33
+121 2.990000 -5.850000 0.551100 0.351100 MRP34
+122 3.390000 -3.120000 0.551100 0.351100 MRT11
+123 4.070000 -4.190000 0.551100 0.351100 MRT12
+124 4.020000 -5.030000 0.551100 0.351100 MRT13
+125 3.760000 -5.770000 0.551100 0.351100 MRT14
+126 3.200000 -6.430000 0.551100 0.351100 MRT15
+127 2.570000 -7.010000 0.551100 0.351100 MRT16
+128 3.300000 -2.540000 0.551100 0.351100 MRT21
+129 4.230000 -3.510000 0.551100 0.351100 MRT22
+130 4.700000 -4.710000 0.551100 0.351100 MRT23
+131 4.500000 -5.590000 0.551100 0.351100 MRT24
+132 4.020000 -6.360000 0.551100 0.351100 MRT25
+133 3.260000 -7.060000 0.551100 0.351100 MRT26
+134 4.310000 -2.900000 0.551100 0.351100 MRT31
+135 5.020000 -4.050000 0.551100 0.351100 MRT32
+136 5.180000 -5.210000 0.551100 0.351100 MRT33
+137 4.800000 -6.140000 0.551100 0.351100 MRT34
+138 4.080000 -7.000000 0.551100 0.351100 MRT35
+139 5.200000 -3.450000 0.551100 0.351100 MRT41
+140 5.620000 -4.610000 0.551100 0.351100 MRT42
+141 5.480000 -5.730000 0.551100 0.351100 MRT43
+142 4.900000 -6.710000 0.551100 0.351100 MRT44
+143 0.000000 -4.510000 0.551100 0.351100 MZC01
+144 0.000000 -5.550000 0.551100 0.351100 MZC02
+145 0.000000 -1.930000 0.551100 0.351100 MZF01
+146 0.000000 -2.660000 0.551100 0.351100 MZF02
+147 0.000000 -3.510000 0.551100 0.351100 MZF03
+148 0.000000 -8.050000 0.551100 0.351100 MZO01
+149 0.000000 -8.660000 0.551100 0.351100 MZO02
+150 0.000000 -6.470000 0.551100 0.351100 MZP01
+151 0.000000 -7.290000 0.551100 0.351100 MZP02
+152 5.000000 -2.000000 0.551100 0.351100 SCALE
+153 -5.50000 -1.500000 0.551100 0.351100 COMNT
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/layouts/CTF275.lay b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/layouts/CTF275.lay
new file mode 100644
index 0000000..2af28d3
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/layouts/CTF275.lay
@@ -0,0 +1,275 @@
+1 -0.029414 0.428191 0.100000 0.040000 MLC11
+2 -0.105398 0.378716 0.100000 0.040000 MLC12
+3 -0.187924 0.341472 0.100000 0.040000 MLC13
+4 -0.268071 0.285079 0.100000 0.040000 MLC14
+5 -0.330692 0.221374 0.100000 0.040000 MLC15
+6 -0.378697 0.144627 0.100000 0.040000 MLC16
+7 -0.411309 0.049716 0.100000 0.040000 MLC17
+8 -0.112105 0.295427 0.100000 0.040000 MLC21
+9 -0.189457 0.259287 0.100000 0.040000 MLC22
+10 -0.254180 0.203140 0.100000 0.040000 MLC23
+11 -0.298355 0.137997 0.100000 0.040000 MLC24
+12 -0.337649 0.050767 0.100000 0.040000 MLC25
+13 -0.213750 0.138862 0.100000 0.040000 MLC31
+14 -0.266243 0.056433 0.100000 0.040000 MLC32
+15 -0.150010 0.191395 0.100000 0.040000 MLC41
+16 -0.188739 0.067511 0.100000 0.040000 MLC42
+17 -0.027405 0.285532 0.100000 0.040000 MLC51
+18 -0.072194 0.217381 0.100000 0.040000 MLC52
+19 -0.130467 0.119358 0.100000 0.040000 MLC53
+20 -0.119656 0.041473 0.100000 0.040000 MLC54
+21 -0.083927 -0.021961 0.100000 0.040000 MLC55
+22 -0.027810 0.155198 0.100000 0.040000 MLC61
+23 -0.062042 0.088583 0.100000 0.040000 MLC62
+24 -0.025587 0.023975 0.100000 0.040000 MLC63
+25 -0.154623 0.879985 0.100000 0.040000 MLF11
+26 -0.322264 0.823233 0.100000 0.040000 MLF12
+27 -0.478342 0.740223 0.100000 0.040000 MLF13
+28 -0.622338 0.633371 0.100000 0.040000 MLF14
+29 -0.052995 0.810917 0.100000 0.040000 MLF21
+30 -0.193258 0.778479 0.100000 0.040000 MLF22
+31 -0.319702 0.726613 0.100000 0.040000 MLF23
+32 -0.447065 0.639878 0.100000 0.040000 MLF24
+33 -0.551024 0.545805 0.100000 0.040000 MLF25
+34 -0.106993 0.717661 0.100000 0.040000 MLF31
+35 -0.227303 0.683510 0.100000 0.040000 MLF32
+36 -0.344973 0.613898 0.100000 0.040000 MLF33
+37 -0.437794 0.535071 0.100000 0.040000 MLF34
+38 -0.516944 0.440135 0.100000 0.040000 MLF35
+39 -0.037498 0.646457 0.100000 0.040000 MLF41
+40 -0.145663 0.629747 0.100000 0.040000 MLF42
+41 -0.257022 0.575998 0.100000 0.040000 MLF43
+42 -0.344741 0.511350 0.100000 0.040000 MLF44
+43 -0.434608 0.430669 0.100000 0.040000 MLF45
+44 -0.512928 0.325699 0.100000 0.040000 MLF46
+45 -0.065241 0.564676 0.100000 0.040000 MLF51
+46 -0.176866 0.530203 0.100000 0.040000 MLF52
+47 -0.264799 0.476609 0.100000 0.040000 MLF53
+48 -0.344149 0.409817 0.100000 0.040000 MLF54
+49 -0.432009 0.328939 0.100000 0.040000 MLF55
+50 -0.502082 0.225317 0.100000 0.040000 MLF56
+51 -0.108196 0.473300 0.100000 0.040000 MLF61
+52 -0.191454 0.428184 0.100000 0.040000 MLF62
+53 -0.268505 0.371569 0.100000 0.040000 MLF63
+54 -0.343162 0.314227 0.100000 0.040000 MLF64
+55 -0.415355 0.241209 0.100000 0.040000 MLF65
+56 -0.459435 0.157639 0.100000 0.040000 MLF66
+57 -0.484998 0.050963 0.100000 0.040000 MLF67
+58 -0.086701 -0.382545 0.100000 0.040000 MLO11
+59 -0.173621 -0.361571 0.100000 0.040000 MLO12
+60 -0.257557 -0.329066 0.100000 0.040000 MLO13
+61 -0.337129 -0.278810 0.100000 0.040000 MLO14
+62 -0.050176 -0.456757 0.100000 0.040000 MLO21
+63 -0.138937 -0.440153 0.100000 0.040000 MLO22
+64 -0.234625 -0.414329 0.100000 0.040000 MLO23
+65 -0.323700 -0.370345 0.100000 0.040000 MLO24
+66 -0.099528 -0.519048 0.100000 0.040000 MLO31
+67 -0.201576 -0.499713 0.100000 0.040000 MLO32
+68 -0.300736 -0.464088 0.100000 0.040000 MLO33
+69 -0.395767 -0.412426 0.100000 0.040000 MLO34
+70 -0.054171 -0.598130 0.100000 0.040000 MLO41
+71 -0.162924 -0.587463 0.100000 0.040000 MLO42
+72 -0.270457 -0.559057 0.100000 0.040000 MLO43
+73 -0.375045 -0.514503 0.100000 0.040000 MLO44
+74 -0.114841 -0.674066 0.100000 0.040000 MLO51
+75 -0.232779 -0.654920 0.100000 0.040000 MLO52
+76 -0.347032 -0.617457 0.100000 0.040000 MLO53
+77 -0.050706 -0.086860 0.100000 0.040000 MLP11
+78 -0.157880 -0.022819 0.100000 0.040000 MLP12
+79 -0.027384 -0.156541 0.100000 0.040000 MLP21
+80 -0.125969 -0.090281 0.100000 0.040000 MLP22
+81 -0.229468 -0.007021 0.100000 0.040000 MLP23
+82 -0.063851 -0.221282 0.100000 0.040000 MLP31
+83 -0.117483 -0.164444 0.100000 0.040000 MLP32
+84 -0.191075 -0.130343 0.100000 0.040000 MLP33
+85 -0.256310 -0.076997 0.100000 0.040000 MLP34
+86 -0.301408 -0.017428 0.100000 0.040000 MLP35
+87 -0.145628 -0.236552 0.100000 0.040000 MLP41
+88 -0.211609 -0.201084 0.100000 0.040000 MLP42
+89 -0.277557 -0.161143 0.100000 0.040000 MLP43
+90 -0.330491 -0.093163 0.100000 0.040000 MLP44
+91 -0.372987 -0.024823 0.100000 0.040000 MLP45
+92 -0.032003 -0.311166 0.100000 0.040000 MLP51
+93 -0.120201 -0.309697 0.100000 0.040000 MLP52
+94 -0.197411 -0.282930 0.100000 0.040000 MLP53
+95 -0.273221 -0.242434 0.100000 0.040000 MLP54
+96 -0.341326 -0.192353 0.100000 0.040000 MLP55
+97 -0.397869 -0.117824 0.100000 0.040000 MLP56
+98 -0.439023 -0.040798 0.100000 0.040000 MLP57
+99 -0.600517 0.341742 0.100000 0.040000 MLT11
+100 -0.583854 0.221014 0.100000 0.040000 MLT12
+101 -0.546672 0.118228 0.100000 0.040000 MLT13
+102 -0.525679 -0.043954 0.100000 0.040000 MLT14
+103 -0.482366 -0.132402 0.100000 0.040000 MLT15
+104 -0.408785 -0.217740 0.100000 0.040000 MLT16
+105 -0.657080 0.441193 0.100000 0.040000 MLT21
+106 -0.681569 0.225254 0.100000 0.040000 MLT22
+107 -0.647357 0.101107 0.100000 0.040000 MLT23
+108 -0.618158 -0.017119 0.100000 0.040000 MLT24
+109 -0.570925 -0.147553 0.100000 0.040000 MLT25
+110 -0.505869 -0.237678 0.100000 0.040000 MLT26
+111 -0.406336 -0.310886 0.100000 0.040000 MLT27
+112 -0.758025 0.508412 0.100000 0.040000 MLT31
+113 -0.761740 0.316423 0.100000 0.040000 MLT32
+114 -0.751268 0.088675 0.100000 0.040000 MLT33
+115 -0.712573 -0.047448 0.100000 0.040000 MLT34
+116 -0.658112 -0.159355 0.100000 0.040000 MLT35
+117 -0.592395 -0.256839 0.100000 0.040000 MLT36
+118 -0.495312 -0.345113 0.100000 0.040000 MLT37
+119 -0.885393 0.353401 0.100000 0.040000 MLT41
+120 -0.847844 0.160648 0.100000 0.040000 MLT42
+121 -0.823787 -0.043736 0.100000 0.040000 MLT43
+122 -0.758805 -0.175411 0.100000 0.040000 MLT44
+123 -0.684634 -0.280647 0.100000 0.040000 MLT45
+124 -0.591783 -0.373867 0.100000 0.040000 MLT46
+125 -0.476572 -0.454666 0.100000 0.040000 MLT47
+126 -0.983285 0.161080 0.100000 0.040000 MLT51
+127 -0.944753 -0.028756 0.100000 0.040000 MLT52
+128 -0.872989 -0.188195 0.100000 0.040000 MLT53
+129 -0.785517 -0.310620 0.100000 0.040000 MLT54
+130 -0.688014 -0.407791 0.100000 0.040000 MLT55
+131 -0.571347 -0.497554 0.100000 0.040000 MLT56
+132 -0.457303 -0.565438 0.100000 0.040000 MLT57
+133 0.063389 0.426606 0.100000 0.040000 MRC11
+134 0.137902 0.375428 0.100000 0.040000 MRC12
+135 0.219516 0.336386 0.100000 0.040000 MRC13
+136 0.297688 0.277771 0.100000 0.040000 MRC14
+137 0.355955 0.213304 0.100000 0.040000 MRC15
+138 0.404150 0.135598 0.100000 0.040000 MRC16
+139 0.434870 0.040656 0.100000 0.040000 MRC17
+140 0.142678 0.292126 0.100000 0.040000 MRC21
+141 0.219470 0.254066 0.100000 0.040000 MRC22
+142 0.281922 0.196472 0.100000 0.040000 MRC23
+143 0.325059 0.128269 0.100000 0.040000 MRC24
+144 0.361805 0.044213 0.100000 0.040000 MRC25
+145 0.240157 0.132538 0.100000 0.040000 MRC31
+146 0.290750 0.048681 0.100000 0.040000 MRC32
+147 0.178346 0.187415 0.100000 0.040000 MRC41
+148 0.213493 0.062545 0.100000 0.040000 MRC42
+149 0.058440 0.284194 0.100000 0.040000 MRC51
+150 0.101359 0.215083 0.100000 0.040000 MRC52
+151 0.156968 0.115486 0.100000 0.040000 MRC53
+152 0.144211 0.038238 0.100000 0.040000 MRC54
+153 0.106635 -0.024115 0.100000 0.040000 MRC55
+154 0.055338 0.153928 0.100000 0.040000 MRC61
+155 0.088138 0.086634 0.100000 0.040000 MRC62
+156 0.049557 0.022680 0.100000 0.040000 MRC63
+157 0.197726 0.874477 0.100000 0.040000 MRF11
+158 0.364689 0.811426 0.100000 0.040000 MRF12
+159 0.518245 0.722181 0.100000 0.040000 MRF13
+160 0.658136 0.611411 0.100000 0.040000 MRF14
+161 0.095713 0.807816 0.100000 0.040000 MRF21
+162 0.233999 0.772267 0.100000 0.040000 MRF22
+163 0.358821 0.715911 0.100000 0.040000 MRF23
+164 0.484765 0.623142 0.100000 0.040000 MRF24
+165 0.585405 0.526324 0.100000 0.040000 MRF25
+166 0.147633 0.713396 0.100000 0.040000 MRF31
+167 0.265823 0.676341 0.100000 0.040000 MRF32
+168 0.382256 0.601823 0.100000 0.040000 MRF33
+169 0.473850 0.521768 0.100000 0.040000 MRF34
+170 0.548726 0.424836 0.100000 0.040000 MRF35
+171 0.075451 0.644959 0.100000 0.040000 MRF41
+172 0.182924 0.624842 0.100000 0.040000 MRF42
+173 0.292900 0.568899 0.100000 0.040000 MRF43
+174 0.379529 0.501620 0.100000 0.040000 MRF44
+175 0.465778 0.418231 0.100000 0.040000 MRF45
+176 0.541913 0.311405 0.100000 0.040000 MRF46
+177 0.102375 0.561860 0.100000 0.040000 MRF51
+178 0.212879 0.524802 0.100000 0.040000 MRF52
+179 0.299077 0.468924 0.100000 0.040000 MRF53
+180 0.376186 0.400507 0.100000 0.040000 MRF54
+181 0.461150 0.316311 0.100000 0.040000 MRF55
+182 0.527532 0.213125 0.100000 0.040000 MRF56
+183 0.143360 0.469857 0.100000 0.040000 MRF61
+184 0.224730 0.422291 0.100000 0.040000 MRF62
+185 0.301012 0.364856 0.100000 0.040000 MRF63
+186 0.373056 0.305526 0.100000 0.040000 MRF64
+187 0.443172 0.230008 0.100000 0.040000 MRF65
+188 0.482916 0.144546 0.100000 0.040000 MRF66
+189 0.509363 0.039864 0.100000 0.040000 MRF67
+190 0.101312 -0.384464 0.100000 0.040000 MRO11
+191 0.188777 -0.365285 0.100000 0.040000 MRO12
+192 0.274286 -0.333994 0.100000 0.040000 MRO13
+193 0.354824 -0.285987 0.100000 0.040000 MRO14
+194 0.062633 -0.457476 0.100000 0.040000 MRO21
+195 0.152570 -0.440791 0.100000 0.040000 MRO22
+196 0.248565 -0.418432 0.100000 0.040000 MRO23
+197 0.338845 -0.376241 0.100000 0.040000 MRO24
+198 0.111160 -0.521375 0.100000 0.040000 MRO31
+199 0.212466 -0.502957 0.100000 0.040000 MRO32
+200 0.313063 -0.468465 0.100000 0.040000 MRO33
+201 0.409385 -0.418933 0.100000 0.040000 MRO34
+202 0.063270 -0.599845 0.100000 0.040000 MRO41
+203 0.172480 -0.589865 0.100000 0.040000 MRO42
+204 0.279919 -0.563495 0.100000 0.040000 MRO43
+205 0.386742 -0.520993 0.100000 0.040000 MRO44
+206 0.121969 -0.676100 0.100000 0.040000 MRO51
+207 0.240331 -0.658743 0.100000 0.040000 MRO52
+208 0.356156 -0.623026 0.100000 0.040000 MRO53
+209 0.071855 -0.088269 0.100000 0.040000 MRP11
+210 0.180874 -0.026656 0.100000 0.040000 MRP12
+211 0.047839 -0.157479 0.100000 0.040000 MRP21
+212 0.147221 -0.093053 0.100000 0.040000 MRP22
+213 0.252807 -0.012686 0.100000 0.040000 MRP23
+214 0.082012 -0.222790 0.100000 0.040000 MRP31
+215 0.136825 -0.166819 0.100000 0.040000 MRP32
+216 0.210796 -0.134697 0.100000 0.040000 MRP33
+217 0.277587 -0.083946 0.100000 0.040000 MRP34
+218 0.322867 -0.024718 0.100000 0.040000 MRP35
+219 0.162954 -0.240118 0.100000 0.040000 MRP41
+220 0.230510 -0.205793 0.100000 0.040000 MRP42
+221 0.296283 -0.169213 0.100000 0.040000 MRP43
+222 0.351532 -0.101316 0.100000 0.040000 MRP44
+223 0.395383 -0.032706 0.100000 0.040000 MRP45
+224 0.048690 -0.312307 0.100000 0.040000 MRP51
+225 0.137008 -0.312230 0.100000 0.040000 MRP52
+226 0.214275 -0.287336 0.100000 0.040000 MRP53
+227 0.290637 -0.248388 0.100000 0.040000 MRP54
+228 0.360555 -0.199475 0.100000 0.040000 MRP55
+229 0.419086 -0.126737 0.100000 0.040000 MRP56
+230 0.463976 -0.050387 0.100000 0.040000 MRP57
+231 0.628409 0.323946 0.100000 0.040000 MRT11
+232 0.609835 0.205866 0.100000 0.040000 MRT12
+233 0.571838 0.105198 0.100000 0.040000 MRT13
+234 0.544252 -0.054539 0.100000 0.040000 MRT14
+235 0.500732 -0.143104 0.100000 0.040000 MRT15
+236 0.427582 -0.225716 0.100000 0.040000 MRT16
+237 0.685440 0.421411 0.100000 0.040000 MRT21
+238 0.705800 0.208084 0.100000 0.040000 MRT22
+239 0.667392 0.088109 0.100000 0.040000 MRT23
+240 0.637062 -0.030086 0.100000 0.040000 MRT24
+241 0.588417 -0.159092 0.100000 0.040000 MRT25
+242 0.522350 -0.247039 0.100000 0.040000 MRT26
+243 0.422093 -0.318167 0.100000 0.040000 MRT27
+244 0.789789 0.482334 0.100000 0.040000 MRT31
+245 0.786599 0.293212 0.100000 0.040000 MRT32
+246 0.770320 0.070984 0.100000 0.040000 MRT33
+247 0.731214 -0.061690 0.100000 0.040000 MRT34
+248 0.674802 -0.172109 0.100000 0.040000 MRT35
+249 0.607500 -0.268226 0.100000 0.040000 MRT36
+250 0.510484 -0.353209 0.100000 0.040000 MRT37
+251 0.910695 0.324672 0.100000 0.040000 MRT41
+252 0.867982 0.137317 0.100000 0.040000 MRT42
+253 0.839920 -0.060661 0.100000 0.040000 MRT43
+254 0.773256 -0.189639 0.100000 0.040000 MRT44
+255 0.698444 -0.293384 0.100000 0.040000 MRT45
+256 0.604482 -0.385347 0.100000 0.040000 MRT46
+257 0.489291 -0.462983 0.100000 0.040000 MRT47
+258 1.000000 0.135648 0.100000 0.040000 MRT51
+259 0.959092 -0.049055 0.100000 0.040000 MRT52
+260 0.886964 -0.204289 0.100000 0.040000 MRT53
+261 0.796842 -0.324881 0.100000 0.040000 MRT54
+262 0.698769 -0.420596 0.100000 0.040000 MRT55
+263 0.582500 -0.506810 0.100000 0.040000 MRT56
+264 0.467934 -0.572706 0.100000 0.040000 MRT57
+265 0.016063 0.355556 0.100000 0.040000 MZC01
+266 0.014747 0.217488 0.100000 0.040000 MZC02
+267 0.013199 0.087763 0.100000 0.040000 MZC03
+268 0.011197 -0.046263 0.100000 0.040000 MZC04
+269 0.022267 0.897778 0.100000 0.040000 MZF01
+270 0.019840 0.730557 0.100000 0.040000 MZF02
+271 0.017559 0.517279 0.100000 0.040000 MZF03
+272 0.007392 -0.378522 0.100000 0.040000 MZO01
+273 0.005634 -0.528155 0.100000 0.040000 MZO02
+274 0.003722 -0.675585 0.100000 0.040000 MZO03
+275 0.008864 -0.248776 0.100000 0.040000 MZP01
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/layouts/EEG1005.lay b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/layouts/EEG1005.lay
new file mode 100644
index 0000000..a600468
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/layouts/EEG1005.lay
@@ -0,0 +1,337 @@
+1	-0.485328	1.493835	0.069221	0.051916	Fp1
+2	0.000000	1.570696	0.069221	0.051916	Fpz
+3	0.485501	1.493884	0.069221	0.051916	Fp2
+4	-1.154207	1.588656	0.069221	0.051916	AF9
+5	-0.923319	1.270781	0.069221	0.051916	AF7
+6	-0.706117	1.226029	0.069221	0.051916	AF5
+7	-0.477022	1.197254	0.069221	0.051916	AF3
+8	-0.240008	1.182594	0.069221	0.051916	AF1
+9	0.000000	1.178022	0.069221	0.051916	AFz
+10	0.240008	1.182594	0.069221	0.051916	AF2
+11	0.476904	1.197159	0.069221	0.051916	AF4
+12	0.706117	1.226029	0.069221	0.051916	AF6
+13	0.923319	1.270781	0.069221	0.051916	AF8
+14	1.154207	1.588656	0.069221	0.051916	AF10
+15	-1.588376	1.154294	0.069221	0.051916	F9
+16	-1.270781	0.923319	0.069221	0.051916	F7
+17	-0.968950	0.852434	0.069221	0.051916	F5
+18	-0.652084	0.812357	0.069221	0.051916	F3
+19	-0.327689	0.791876	0.069221	0.051916	F1
+20	0.000000	0.785398	0.069221	0.051916	Fz
+21	0.327689	0.791876	0.069221	0.051916	F2
+22	0.652084	0.812357	0.069221	0.051916	F4
+23	0.968950	0.852434	0.069221	0.051916	F6
+24	1.270781	0.923319	0.069221	0.051916	F8
+25	1.588496	1.154168	0.069221	0.051916	F10
+26	-1.867677	0.606883	0.069221	0.051916	FT9
+27	-1.493930	0.485359	0.069221	0.051916	FT7
+28	-1.126134	0.436152	0.069221	0.051916	FC5
+29	-0.752811	0.409634	0.069221	0.051916	FC3
+30	-0.376942	0.396836	0.069221	0.051916	FC1
+31	0.000000	0.392844	0.069221	0.051916	FCz
+32	0.376942	0.396836	0.069221	0.051916	FC2
+33	0.752811	0.409634	0.069221	0.051916	FC4
+34	1.126134	0.436152	0.069221	0.051916	FC6
+35	1.493930	0.485359	0.069221	0.051916	FT8
+36	1.867677	0.606883	0.069221	0.051916	FT10
+37	-1.963487	-0.000213	0.069221	0.051916	T9
+38	-1.570796	0.000000	0.069221	0.051916	T7
+39	-1.178106	0.000128	0.069221	0.051916	C5
+40	-0.785398	0.000111	0.069221	0.051916	C3
+41	-0.392736	0.000205	0.069221	0.051916	C1
+42	0.000000	0.000200	0.069221	0.051916	Cz
+43	0.392736	0.000103	0.069221	0.051916	C2
+44	0.785398	0.000111	0.069221	0.051916	C4
+45	1.178106	0.000128	0.069221	0.051916	C6
+46	1.570796	-0.000000	0.069221	0.051916	T8
+47	1.963487	-0.000000	0.069221	0.051916	T10
+48	-1.867677	-0.606883	0.069221	0.051916	TP9
+49	-1.494026	-0.485389	0.069221	0.051916	TP7
+50	-1.126048	-0.435839	0.069221	0.051916	CP5
+51	-0.752775	-0.409460	0.069221	0.051916	CP3
+52	-0.376804	-0.396486	0.069221	0.051916	CP1
+53	-0.000000	-0.392551	0.069221	0.051916	CPz
+54	0.376804	-0.396486	0.069221	0.051916	CP2
+55	0.752795	-0.409357	0.069221	0.051916	CP4
+56	1.126048	-0.435839	0.069221	0.051916	CP6
+57	1.494026	-0.485389	0.069221	0.051916	TP8
+58	1.867603	-0.607072	0.069221	0.051916	TP10
+59	-1.588496	-1.154168	0.069221	0.051916	P9
+60	-1.270862	-0.923378	0.069221	0.051916	P7
+61	-0.969077	-0.852293	0.069221	0.051916	P5
+62	-0.652231	-0.811998	0.069221	0.051916	P3
+63	-0.327776	-0.791360	0.069221	0.051916	P1
+64	-0.000000	-0.785257	0.069221	0.051916	Pz
+65	0.327776	-0.791360	0.069221	0.051916	P2
+66	0.652231	-0.811998	0.069221	0.051916	P4
+67	0.969077	-0.852293	0.069221	0.051916	P6
+68	1.270862	-0.923378	0.069221	0.051916	P8
+69	1.588496	-1.154168	0.069221	0.051916	P10
+70	-1.154207	-1.588656	0.069221	0.051916	PO9
+71	-0.923319	-1.270781	0.069221	0.051916	PO7
+72	-0.706303	-1.225606	0.069221	0.051916	PO5
+73	-0.476710	-1.197888	0.069221	0.051916	PO3
+74	-0.240097	-1.182523	0.069221	0.051916	PO1
+75	-0.000000	-1.178022	0.069221	0.051916	POz
+76	0.240223	-1.182505	0.069221	0.051916	PO2
+77	0.476710	-1.197888	0.069221	0.051916	PO4
+78	0.706303	-1.225606	0.069221	0.051916	PO6
+79	0.923319	-1.270781	0.069221	0.051916	PO8
+80	1.154207	-1.588656	0.069221	0.051916	PO10
+81	-0.485359	-1.493930	0.069221	0.051916	O1
+82	-0.000000	-1.570796	0.069221	0.051916	Oz
+83	0.485359	-1.493930	0.069221	0.051916	O2
+84	-0.606613	-1.867239	0.069221	0.051916	I1
+85	-0.000000	-1.963478	0.069221	0.051916	Iz
+86	0.606613	-1.867239	0.069221	0.051916	I2
+87	-0.802226	1.574520	0.069221	0.051916	AFp9h
+88	-0.626475	1.393612	0.069221	0.051916	AFp7h
+89	-0.451133	1.382849	0.069221	0.051916	AFp5h
+90	-0.271959	1.376738	0.069221	0.051916	AFp3h
+91	-0.090887	1.374548	0.069221	0.051916	AFp1h
+92	0.090887	1.374548	0.069221	0.051916	AFp2h
+93	0.271959	1.376738	0.069221	0.051916	AFp4h
+94	0.451133	1.382849	0.069221	0.051916	AFp6h
+95	0.626475	1.393612	0.069221	0.051916	AFp8h
+96	0.802226	1.574520	0.069221	0.051916	AFp10h
+97	-1.249550	1.249550	0.069221	0.051916	AFF9h
+98	-0.982948	1.075122	0.069221	0.051916	AFF7h
+99	-0.713694	1.024626	0.069221	0.051916	AFF5h
+100	-0.432315	0.996167	0.069221	0.051916	AFF3h
+101	-0.144727	0.983315	0.069221	0.051916	AFF1h
+102	0.144727	0.983315	0.069221	0.051916	AFF2h
+103	0.432315	0.996167	0.069221	0.051916	AFF4h
+104	0.713694	1.024626	0.069221	0.051916	AFF6h
+105	0.982881	1.075049	0.069221	0.051916	AFF8h
+106	1.249550	1.249550	0.069221	0.051916	AFF10h
+107	-1.574645	0.802293	0.069221	0.051916	FFT9h
+108	-1.232019	0.675885	0.069221	0.051916	FFT7h
+109	-0.886990	0.627578	0.069221	0.051916	FFC5h
+110	-0.534535	0.601827	0.069221	0.051916	FFC3h
+111	-0.178478	0.590622	0.069221	0.051916	FFC1h
+112	0.178478	0.590622	0.069221	0.051916	FFC2h
+113	0.534535	0.601827	0.069221	0.051916	FFC4h
+114	0.886990	0.627578	0.069221	0.051916	FFC6h
+115	1.232019	0.675885	0.069221	0.051916	FFT8h
+116	1.574645	0.802293	0.069221	0.051916	FFT10h
+117	-1.745475	0.276484	0.069221	0.051916	FTT9h
+118	-1.358553	0.230430	0.069221	0.051916	FTT7h
+119	-0.971386	0.211155	0.069221	0.051916	FCC5h
+120	-0.583084	0.201295	0.069221	0.051916	FCC3h
+121	-0.194460	0.196994	0.069221	0.051916	FCC1h
+122	0.194460	0.196994	0.069221	0.051916	FCC2h
+123	0.583084	0.201295	0.069221	0.051916	FCC4h
+124	0.971386	0.211155	0.069221	0.051916	FCC6h
+125	1.358553	0.230430	0.069221	0.051916	FTT8h
+126	1.745475	0.276484	0.069221	0.051916	FTT10h
+127	-1.745506	-0.276309	0.069221	0.051916	TTP9h
+128	-1.358573	-0.230293	0.069221	0.051916	TTP7h
+129	-0.971375	-0.211008	0.069221	0.051916	CCP5h
+130	-0.583085	-0.200906	0.069221	0.051916	CCP3h
+131	-0.194448	-0.196679	0.069221	0.051916	CCP1h
+132	0.194448	-0.196679	0.069221	0.051916	CCP2h
+133	0.583078	-0.201010	0.069221	0.051916	CCP4h
+134	0.971375	-0.211008	0.069221	0.051916	CCP6h
+135	1.358573	-0.230293	0.069221	0.051916	TTP8h
+136	1.745475	-0.276484	0.069221	0.051916	TTP10h
+137	-1.574667	-0.802213	0.069221	0.051916	TPP9h
+138	-1.232021	-0.675979	0.069221	0.051916	TPP7h
+139	-0.887025	-0.627306	0.069221	0.051916	CPP5h
+140	-0.534524	-0.601312	0.069221	0.051916	CPP3h
+141	-0.178473	-0.590144	0.069221	0.051916	CPP1h
+142	0.178473	-0.590144	0.069221	0.051916	CPP2h
+143	0.534524	-0.601312	0.069221	0.051916	CPP4h
+144	0.887025	-0.627306	0.069221	0.051916	CPP6h
+145	1.231976	-0.676032	0.069221	0.051916	TPP8h
+146	1.574586	-0.802352	0.069221	0.051916	TPP10h
+147	-1.249639	-1.249639	0.069221	0.051916	PPO9h
+148	-0.983137	-1.074700	0.069221	0.051916	PPO7h
+149	-0.713821	-1.024109	0.069221	0.051916	PPO5h
+150	-0.432363	-0.995909	0.069221	0.051916	PPO3h
+151	-0.144761	-0.982953	0.069221	0.051916	PPO1h
+152	0.144761	-0.982953	0.069221	0.051916	PPO2h
+153	0.432253	-0.995937	0.069221	0.051916	PPO4h
+154	0.713967	-1.023998	0.069221	0.051916	PPO6h
+155	0.983137	-1.074700	0.069221	0.051916	PPO8h
+156	1.249639	-1.249639	0.069221	0.051916	PPO10h
+157	-0.802293	-1.574645	0.069221	0.051916	POO9h
+158	-0.626849	-1.393237	0.069221	0.051916	POO7h
+159	-0.451236	-1.382715	0.069221	0.051916	POO5h
+160	-0.271951	-1.377572	0.069221	0.051916	POO3h
+161	-0.090910	-1.374606	0.069221	0.051916	POO1h
+162	0.090910	-1.374606	0.069221	0.051916	POO2h
+163	0.271951	-1.377572	0.069221	0.051916	POO4h
+164	0.451236	-1.382715	0.069221	0.051916	POO6h
+165	0.626849	-1.393237	0.069221	0.051916	POO8h
+166	0.802293	-1.574645	0.069221	0.051916	POO10h
+167	-0.276453	-1.745460	0.069221	0.051916	OI1h
+168	0.276453	-1.745460	0.069221	0.051916	OI2h
+169	-0.245655	1.551367	0.069221	0.051916	Fp1h
+170	0.245655	1.551367	0.069221	0.051916	Fp2h
+171	-1.038573	1.429729	0.069221	0.051916	AF9h
+172	-0.816811	1.245775	0.069221	0.051916	AF7h
+173	-0.592502	1.210176	0.069221	0.051916	AF5h
+174	-0.359066	1.188527	0.069221	0.051916	AF3h
+175	-0.120203	1.179114	0.069221	0.051916	AF1h
+176	0.120212	1.179076	0.069221	0.051916	AF2h
+177	0.359066	1.188527	0.069221	0.051916	AF4h
+178	0.592545	1.210263	0.069221	0.051916	AF6h
+179	0.816811	1.245775	0.069221	0.051916	AF8h
+180	1.038668	1.429679	0.069221	0.051916	AF10h
+181	-1.429588	1.038701	0.069221	0.051916	F9h
+182	-1.122287	0.883303	0.069221	0.051916	F7h
+183	-0.811863	0.829210	0.069221	0.051916	F5h
+184	-0.490601	0.800049	0.069221	0.051916	F3h
+185	-0.164017	0.787126	0.069221	0.051916	F1h
+186	0.164017	0.787126	0.069221	0.051916	F2h
+187	0.490601	0.800049	0.069221	0.051916	F4h
+188	0.811863	0.829210	0.069221	0.051916	F6h
+189	1.122287	0.883303	0.069221	0.051916	F8h
+190	1.429588	1.038701	0.069221	0.051916	F10h
+191	-1.680799	0.546075	0.069221	0.051916	FT9h
+192	-1.310995	0.457012	0.069221	0.051916	FT7h
+193	-0.939857	0.420814	0.069221	0.051916	FC5h
+194	-0.565142	0.401905	0.069221	0.051916	FC3h
+195	-0.188491	0.393826	0.069221	0.051916	FC1h
+196	0.188491	0.393826	0.069221	0.051916	FC2h
+197	0.565142	0.401905	0.069221	0.051916	FC4h
+198	0.939857	0.420814	0.069221	0.051916	FC6h
+199	1.310995	0.457012	0.069221	0.051916	FT8h
+200	1.680740	0.546236	0.069221	0.051916	FT10h
+201	-1.767191	0.000000	0.069221	0.051916	T9h
+202	-1.374500	0.000000	0.069221	0.051916	T7h
+203	-0.981850	0.000118	0.069221	0.051916	C5h
+204	-0.589058	0.000212	0.069221	0.051916	C3h
+205	-0.196395	0.000101	0.069221	0.051916	C1h
+206	0.196395	0.000201	0.069221	0.051916	C2h
+207	0.589058	0.000212	0.069221	0.051916	C4h
+208	0.981850	0.000118	0.069221	0.051916	C6h
+209	1.374500	-0.000000	0.069221	0.051916	T8h
+210	1.767191	-0.000000	0.069221	0.051916	T10h
+211	-1.680646	-0.546088	0.069221	0.051916	TP9h
+212	-1.310970	-0.456960	0.069221	0.051916	TP7h
+213	-0.939815	-0.420500	0.069221	0.051916	CP5h
+214	-0.565062	-0.401491	0.069221	0.051916	CP3h
+215	-0.188515	-0.393352	0.069221	0.051916	CP1h
+216	0.188515	-0.393352	0.069221	0.051916	CP2h
+217	0.565062	-0.401491	0.069221	0.051916	CP4h
+218	0.939815	-0.420500	0.069221	0.051916	CP6h
+219	1.310970	-0.456960	0.069221	0.051916	TP8h
+220	1.680646	-0.546088	0.069221	0.051916	TP10h
+221	-1.429668	-1.038758	0.069221	0.051916	P9h
+222	-1.122286	-0.883271	0.069221	0.051916	P7h
+223	-0.812037	-0.829137	0.069221	0.051916	P5h
+224	-0.490726	-0.799336	0.069221	0.051916	P3h
+225	-0.164146	-0.786762	0.069221	0.051916	P1h
+226	0.164146	-0.786762	0.069221	0.051916	P2h
+227	0.490600	-0.799436	0.069221	0.051916	P4h
+228	0.812037	-0.829137	0.069221	0.051916	P6h
+229	1.122286	-0.883271	0.069221	0.051916	P8h
+230	1.429668	-1.038758	0.069221	0.051916	P10h
+231	-1.038821	-1.429709	0.069221	0.051916	PO9h
+232	-0.816502	-1.246067	0.069221	0.051916	PO7h
+233	-0.593079	-1.209372	0.069221	0.051916	PO5h
+234	-0.359230	-1.188332	0.069221	0.051916	PO3h
+235	-0.120221	-1.179168	0.069221	0.051916	PO1h
+236	0.120348	-1.179159	0.069221	0.051916	PO2h
+237	0.359230	-1.188332	0.069221	0.051916	PO4h
+238	0.593079	-1.209372	0.069221	0.051916	PO6h
+239	0.816502	-1.246067	0.069221	0.051916	PO8h
+240	1.038710	-1.429804	0.069221	0.051916	PO10h
+241	-0.245671	-1.551466	0.069221	0.051916	O1h
+242	0.245671	-1.551466	0.069221	0.051916	O2h
+243	-0.307129	-1.939338	0.069221	0.051916	I1h
+244	0.307129	-1.939338	0.069221	0.051916	I2h
+245	-0.891328	1.749684	0.069221	0.051916	AFp9
+246	-0.713143	1.399582	0.069221	0.051916	AFp7
+247	-0.539182	1.387878	0.069221	0.051916	AFp5
+248	-0.361777	1.379743	0.069221	0.051916	AFp3
+249	-0.181624	1.374948	0.069221	0.051916	AFp1
+250	0.000000	1.374461	0.069221	0.051916	AFpz
+251	0.181624	1.374948	0.069221	0.051916	AFp2
+252	0.361802	1.379839	0.069221	0.051916	AFp4
+253	0.539182	1.387878	0.069221	0.051916	AFp6
+254	0.713143	1.399582	0.069221	0.051916	AFp8
+255	0.891489	1.749582	0.069221	0.051916	AFp10
+256	-1.388504	1.388504	0.069221	0.051916	AFF9
+257	-1.110721	1.110721	0.069221	0.051916	AFF7
+258	-0.850463	1.046170	0.069221	0.051916	AFF5
+259	-0.574170	1.008058	0.069221	0.051916	AFF3
+260	-0.288981	0.988233	0.069221	0.051916	AFF1
+261	0.000000	0.981739	0.069221	0.051916	AFFz
+262	0.288981	0.988233	0.069221	0.051916	AFF2
+263	0.574170	1.008058	0.069221	0.051916	AFF4
+264	0.850463	1.046170	0.069221	0.051916	AFF6
+265	1.110721	1.110721	0.069221	0.051916	AFF8
+266	1.388504	1.388504	0.069221	0.051916	AFF10
+267	-1.749576	0.891591	0.069221	0.051916	FFT9
+268	-1.399582	0.713143	0.069221	0.051916	FFT7
+269	-1.060830	0.648168	0.069221	0.051916	FFC5
+270	-0.711350	0.612390	0.069221	0.051916	FFC3
+271	-0.356750	0.594619	0.069221	0.051916	FFC1
+272	0.000000	0.589085	0.069221	0.051916	FFCz
+273	0.356750	0.594619	0.069221	0.051916	FFC2
+274	0.711350	0.612390	0.069221	0.051916	FFC4
+275	1.060749	0.648119	0.069221	0.051916	FFC6
+276	1.399582	0.713143	0.069221	0.051916	FFT8
+277	1.749576	0.891591	0.069221	0.051916	FFT10
+278	-1.939489	0.307119	0.069221	0.051916	FTT9
+279	-1.551442	0.245824	0.069221	0.051916	FTT7
+280	-1.165132	0.219351	0.069221	0.051916	FCC5
+281	-0.777319	0.205363	0.069221	0.051916	FCC3
+282	-0.388766	0.198515	0.069221	0.051916	FCC1
+283	0.000000	0.196434	0.069221	0.051916	FCCz
+284	0.388766	0.198515	0.069221	0.051916	FCC2
+285	0.777319	0.205363	0.069221	0.051916	FCC4
+286	1.165132	0.219351	0.069221	0.051916	FCC6
+287	1.551466	0.245671	0.069221	0.051916	FTT8
+288	1.939489	0.307119	0.069221	0.051916	FTT10
+289	-1.939553	-0.307197	0.069221	0.051916	TTP9
+290	-1.551565	-0.245687	0.069221	0.051916	TTP7
+291	-1.165206	-0.219084	0.069221	0.051916	CCP5
+292	-0.777275	-0.205069	0.069221	0.051916	CCP3
+293	-0.388806	-0.198175	0.069221	0.051916	CCP1
+294	-0.000000	-0.196218	0.069221	0.051916	CCPz
+295	0.388801	-0.198275	0.069221	0.051916	CCP2
+296	0.777275	-0.205069	0.069221	0.051916	CCP4
+297	1.165206	-0.219084	0.069221	0.051916	CCP6
+298	1.551565	-0.245687	0.069221	0.051916	TTP8
+299	1.939553	-0.307197	0.069221	0.051916	TTP10
+300	-1.749664	-0.891531	0.069221	0.051916	TPP9
+301	-1.399671	-0.713188	0.069221	0.051916	TPP7
+302	-1.060852	-0.647970	0.069221	0.051916	CPP5
+303	-0.711356	-0.612379	0.069221	0.051916	CPP3
+304	-0.356663	-0.594548	0.069221	0.051916	CPP1
+305	-0.000000	-0.588863	0.069221	0.051916	CPPz
+306	0.356778	-0.594448	0.069221	0.051916	CPP2
+307	0.711384	-0.612287	0.069221	0.051916	CPP4
+308	1.060852	-0.647970	0.069221	0.051916	CPP6
+309	1.399671	-0.713188	0.069221	0.051916	TPP8
+310	1.749664	-0.891531	0.069221	0.051916	TPP10
+311	-1.388427	-1.388427	0.069221	0.051916	PPO9
+312	-1.110721	-1.110721	0.069221	0.051916	PPO7
+313	-0.850511	-1.046155	0.069221	0.051916	PPO5
+314	-0.574228	-1.007462	0.069221	0.051916	PPO3
+315	-0.289055	-0.987715	0.069221	0.051916	PPO1
+316	-0.000000	-0.981655	0.069221	0.051916	PPOz
+317	0.289055	-0.987715	0.069221	0.051916	PPO2
+318	0.574228	-1.007462	0.069221	0.051916	PPO4
+319	0.850454	-1.046223	0.069221	0.051916	PPO6
+320	1.110721	-1.110721	0.069221	0.051916	PPO8
+321	1.388427	-1.388427	0.069221	0.051916	PPO10
+322	-0.891143	-1.749540	0.069221	0.051916	POO9
+323	-0.713143	-1.399582	0.069221	0.051916	POO7
+324	-0.539360	-1.387717	0.069221	0.051916	POO5
+325	-0.362020	-1.379310	0.069221	0.051916	POO3
+326	-0.181486	-1.375484	0.069221	0.051916	POO1
+327	-0.000000	-1.374422	0.069221	0.051916	POOz
+328	0.181626	-1.375468	0.069221	0.051916	POO2
+329	0.362020	-1.379310	0.069221	0.051916	POO4
+330	0.539360	-1.387717	0.069221	0.051916	POO6
+331	0.713143	-1.399582	0.069221	0.051916	POO8
+332	0.891143	-1.749540	0.069221	0.051916	POO10
+333	-0.546073	-1.680586	0.069221	0.051916	OI1
+334	-0.000000	-1.767132	0.069221	0.051916	OIz
+335	0.546073	-1.680586	0.069221	0.051916	OI2
+336	-1.963487	1.749684	0.069221	0.051916	COMNT
+337	1.963487	1.749684	0.069221	0.051916	SCALE
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/layouts/EGI256.lout b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/layouts/EGI256.lout
new file mode 100644
index 0000000..bc9076a
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/layouts/EGI256.lout
@@ -0,0 +1,259 @@
+-42.19    43.52   -41.70    28.71
+001	0.235020883	0.231411875	0.023840595	0.024283894	EEG 001
+002	0.180062322	0.24066255	0.023840595	0.024283894	EEG 002
+003	0.134498312	0.239722125	0.023840595	0.024283894	EEG 003
+004	0.098183698	0.230899463	0.023840595	0.024283894	EEG 004
+005	0.066117291	0.206774428	0.023840595	0.024283894	EEG 005
+006	0.038417416	0.175224454	0.023840595	0.024283894	EEG 006
+007	0.019093339	0.142334211	0.023840595	0.024283894	EEG 007
+008	0	0.106825455	0.023840595	0.024283894	EEG 008
+009	-0.017539353	0.062826857	0.023840595	0.024283894	EEG 009
+010	0.181942866	0.296413546	0.023840595	0.024283894	EEG 010
+011	0.13038807	0.293232492	0.023840595	0.024283894	EEG 011
+012	0.084273706	0.277147412	0.023840595	0.024283894	EEG 012
+013	0.050175359	0.251802841	0.023840595	0.024283894	EEG 013
+014	0.021773201	0.21699757	0.023840595	0.024283894	EEG 014
+015	0	0.180469732	0.023840595	0.024283894	EEG 015
+016	-0.019093339	0.142334211	0.023840595	0.024283894	EEG 016
+017	-0.036255497	0.09269913	0.023840595	0.024283894	EEG 017
+018	0.113098849	0.348229946	0.023840595	0.024283894	EEG 018
+019	0.069000992	0.329792276	0.023840595	0.024283894	EEG 019
+020	0.029776066	0.297506089	0.023840595	0.024283894	EEG 020
+021	0	0.258687873	0.023840595	0.024283894	EEG 021
+022	-0.021773201	0.21699757	0.023840595	0.024283894	EEG 022
+023	-0.038417416	0.175224454	0.023840595	0.024283894	EEG 023
+024	-0.055153266	0.126645408	0.023840595	0.024283894	EEG 024
+025	0.036940443	0.37703699	0.023840595	0.024283894	EEG 025
+026	0	0.343720309	0.023840595	0.024283894	EEG 026
+027	-0.029776066	0.297506089	0.023840595	0.024283894	EEG 027
+028	-0.050175359	0.251802841	0.023840595	0.024283894	EEG 028
+029	-0.066117291	0.206774428	0.023840595	0.024283894	EEG 029
+030	-0.079525249	0.158534511	0.023840595	0.024283894	EEG 030
+031	0	0.415202995	0.023840595	0.024283894	EEG 031
+032	-0.036940443	0.37703699	0.023840595	0.024283894	EEG 032
+033	-0.069000992	0.329792276	0.023840595	0.024283894	EEG 033
+034	-0.084273706	0.277147412	0.023840595	0.024283894	EEG 034
+035	-0.098183698	0.230899463	0.023840595	0.024283894	EEG 035
+036	-0.098479668	0.187945851	0.023840595	0.024283894	EEG 036
+037	-0.113098849	0.348229946	0.023840595	0.024283894	EEG 037
+038	-0.13038807	0.293232492	0.023840595	0.024283894	EEG 038
+039	-0.134498312	0.239722125	0.023840595	0.024283894	EEG 039
+040	-0.130890927	0.191286703	0.023840595	0.024283894	EEG 040
+041	-0.116009122	0.150111634	0.023840595	0.024283894	EEG 041
+042	-0.094840856	0.116834626	0.023840595	0.024283894	EEG 042
+043	-0.076990927	0.086006856	0.023840595	0.024283894	EEG 043
+044	-0.055587556	0.053147386	0.023840595	0.024283894	EEG 044
+045	-0.029699902	0.019405615	0.023840595	0.024283894	EEG 045
+046	-0.181942866	0.296413546	0.023840595	0.024283894	EEG 046
+047	-0.180062322	0.24066255	0.023840595	0.024283894	EEG 047
+048	-0.17285275	0.187572361	0.023840595	0.024283894	EEG 048
+049	-0.156410469	0.141423921	0.023840595	0.024283894	EEG 049
+050	-0.132742164	0.104084677	0.023840595	0.024283894	EEG 050
+051	-0.108362109	0.07207399	0.023840595	0.024283894	EEG 051
+052	-0.087032894	0.041560718	0.023840595	0.024283894	EEG 052
+053	-0.057033727	0.006635523	0.023840595	0.024283894	EEG 053
+054	-0.235020883	0.231411875	0.023840595	0.024283894	EEG 054
+055	-0.21721779	0.1735557	0.023840595	0.024283894	EEG 055
+056	-0.196096643	0.121848964	0.023840595	0.024283894	EEG 056
+057	-0.169122926	0.084563661	0.023840595	0.024283894	EEG 057
+058	-0.142622009	0.056366314	0.023840595	0.024283894	EEG 058
+059	-0.11607512	0.026701856	0.023840595	0.024283894	EEG 059
+060	-0.086703907	-0.006962228	0.023840595	0.024283894	EEG 060
+061	-0.271241865	0.131933691	0.023840595	0.024283894	EEG 061
+062	-0.237546771	0.082946276	0.023840595	0.024283894	EEG 062
+063	-0.20434592	0.049982898	0.023840595	0.024283894	EEG 063
+064	-0.175001011	0.027246728	0.023840595	0.024283894	EEG 064
+065	-0.144183544	0.006552794	0.023840595	0.024283894	EEG 065
+066	-0.117629392	-0.020953359	0.023840595	0.024283894	EEG 066
+067	-0.32017538	0.064356008	0.023840595	0.024283894	EEG 067
+068	-0.277394242	0.035815905	0.023840595	0.024283894	EEG 068
+069	-0.241320281	0.000293927	0.023840595	0.024283894	EEG 069
+070	-0.202988841	-0.017932839	0.023840595	0.024283894	EEG 070
+071	-0.170816713	-0.027588171	0.023840595	0.024283894	EEG 071
+072	-0.142940198	-0.038849379	0.023840595	0.024283894	EEG 072
+073	-0.364333595	-0.009526546	0.023840595	0.024283894	EEG 073
+074	-0.227828247	-0.074709585	0.023840595	0.024283894	EEG 074
+075	-0.186334435	-0.079063391	0.023840595	0.024283894	EEG 075
+076	-0.152612576	-0.080357072	0.023840595	0.024283894	EEG 076
+077	-0.122986168	-0.070147895	0.023840595	0.024283894	EEG 077
+078	-0.092860036	-0.059724481	0.023840595	0.024283894	EEG 078
+079	-0.063373134	-0.044961361	0.023840595	0.024283894	EEG 079
+080	-0.033138055	-0.028518783	0.023840595	0.024283894	EEG 080
+081	0	-0.006448832	0.023840595	0.024283894	EEG 081
+082	-0.384631539	-0.115563191	0.023840595	0.024283894	EEG 082
+083	-0.230231782	-0.157310034	0.023840595	0.024283894	EEG 083
+084	-0.201004697	-0.132397774	0.023840595	0.024283894	EEG 084
+085	-0.158874627	-0.130476761	0.023840595	0.024283894	EEG 085
+086	-0.125435162	-0.117006671	0.023840595	0.024283894	EEG 086
+087	-0.093818787	-0.102184911	0.023840595	0.024283894	EEG 087
+088	-0.063690231	-0.085009427	0.023840595	0.024283894	EEG 088
+089	-0.034226984	-0.069230419	0.023840595	0.024283894	EEG 089
+090	0	-0.043222928	0.023840595	0.024283894	EEG 090
+091	-0.376606255	-0.236283155	0.023840595	0.024283894	EEG 091
+092	-0.320841548	-0.246056831	0.023840595	0.024283894	EEG 092
+093	-0.264511728	-0.247963981	0.023840595	0.024283894	EEG 093
+094	-0.235119884	-0.22133859	0.023840595	0.024283894	EEG 094
+095	-0.200260526	-0.201104991	0.023840595	0.024283894	EEG 095
+096	-0.16089296	-0.182074387	0.023840595	0.024283894	EEG 096
+097	-0.123315473	-0.169463521	0.023840595	0.024283894	EEG 097
+098	-0.093577895	-0.148219199	0.023840595	0.024283894	EEG 098
+099	-0.062757092	-0.127508907	0.023840595	0.024283894	EEG 099
+100	-0.033465994	-0.105718695	0.023840595	0.024283894	EEG 100
+101	0	-0.123212516	0.023840595	0.024283894	EEG 101
+102	-0.309236143	-0.330394078	0.023840595	0.024283894	EEG 102
+103	-0.264402365	-0.317489099	0.023840595	0.024283894	EEG 103
+104	-0.215607267	-0.297916345	0.023840595	0.024283894	EEG 104
+105	-0.194042397	-0.266008675	0.023840595	0.024283894	EEG 105
+106	-0.156365562	-0.241406814	0.023840595	0.024283894	EEG 106
+107	-0.117304936	-0.222733874	0.023840595	0.024283894	EEG 107
+108	-0.08375779	-0.200153314	0.023840595	0.024283894	EEG 108
+109	-0.056791169	-0.173578646	0.023840595	0.024283894	EEG 109
+110	-0.028490371	-0.146436894	0.023840595	0.024283894	EEG 110
+111	-0.235425173	-0.391140875	0.023840595	0.024283894	EEG 111
+112	-0.20031364	-0.367491502	0.023840595	0.024283894	EEG 112
+113	-0.160198907	-0.335751192	0.023840595	0.024283894	EEG 113
+114	-0.148968879	-0.297338854	0.023840595	0.024283894	EEG 114
+115	-0.09913078	-0.279612547	0.023840595	0.024283894	EEG 115
+116	-0.06561825	-0.2506161	0.023840595	0.024283894	EEG 116
+117	-0.036528871	-0.219887692	0.023840595	0.024283894	EEG 117
+118	-0.01914107	-0.187670154	0.023840595	0.024283894	EEG 118
+119	0	-0.159638357	0.023840595	0.024283894	EEG 119
+120	-0.178151028	-0.424680349	0.023840595	0.024283894	EEG 120
+121	-0.142872329	-0.395550026	0.023840595	0.024283894	EEG 121
+122	-0.106134228	-0.360226213	0.023840595	0.024283894	EEG 122
+123	-0.074015552	-0.317797572	0.023840595	0.024283894	EEG 123
+124	-0.049414286	-0.292978277	0.023840595	0.024283894	EEG 124
+125	-0.020856534	-0.260833466	0.023840595	0.024283894	EEG 125
+126	0	-0.223512279	0.023840595	0.024283894	EEG 126
+127	0.01914107	-0.187670154	0.023840595	0.024283894	EEG 127
+128	0.028490371	-0.146436894	0.023840595	0.024283894	EEG 128
+129	0.033465994	-0.105718695	0.023840595	0.024283894	EEG 129
+130	0.034226984	-0.069230419	0.023840595	0.024283894	EEG 130
+131	0.033138055	-0.028518783	0.023840595	0.024283894	EEG 131
+132	0.029699902	0.019405615	0.023840595	0.024283894	EEG 132
+133	-0.11640639	-0.433892117	0.023840595	0.024283894	EEG 133
+134	-0.085226238	-0.411234759	0.023840595	0.024283894	EEG 134
+135	-0.054701526	-0.36252645	0.023840595	0.024283894	EEG 135
+136	-0.02321088	-0.335534555	0.023840595	0.024283894	EEG 136
+137	0	-0.303018075	0.023840595	0.024283894	EEG 137
+138	0.020856534	-0.260833466	0.023840595	0.024283894	EEG 138
+139	0.036528871	-0.219887692	0.023840595	0.024283894	EEG 139
+140	0.056791169	-0.173578646	0.023840595	0.024283894	EEG 140
+141	0.062757092	-0.127508907	0.023840595	0.024283894	EEG 141
+142	0.063690231	-0.085009427	0.023840595	0.024283894	EEG 142
+143	0.063373134	-0.044961361	0.023840595	0.024283894	EEG 143
+144	0.057033727	0.006635523	0.023840595	0.024283894	EEG 144
+145	-0.061719572	-0.45	0.023840595	0.024283894	EEG 145
+146	-0.032116421	-0.419782634	0.023840595	0.024283894	EEG 146
+147	-9.99E-17	-0.379508917	0.023840595	0.024283894	EEG 147
+148	0.02321088	-0.335534555	0.023840595	0.024283894	EEG 148
+149	0.049414286	-0.292978277	0.023840595	0.024283894	EEG 149
+150	0.06561825	-0.2506161	0.023840595	0.024283894	EEG 150
+151	0.08375779	-0.200153314	0.023840595	0.024283894	EEG 151
+152	0.093577895	-0.148219199	0.023840595	0.024283894	EEG 152
+153	0.093818787	-0.102184911	0.023840595	0.024283894	EEG 153
+154	0.092860036	-0.059724481	0.023840595	0.024283894	EEG 154
+155	0.086703907	-0.006962228	0.023840595	0.024283894	EEG 155
+156	0.032116421	-0.419782634	0.023840595	0.024283894	EEG 156
+157	0.054701526	-0.36252645	0.023840595	0.024283894	EEG 157
+158	0.074015552	-0.317797572	0.023840595	0.024283894	EEG 158
+159	0.09913078	-0.279612547	0.023840595	0.024283894	EEG 159
+160	0.117304936	-0.222733874	0.023840595	0.024283894	EEG 160
+161	0.123315473	-0.169463521	0.023840595	0.024283894	EEG 161
+162	0.125435162	-0.117006671	0.023840595	0.024283894	EEG 162
+163	0.122986168	-0.070147895	0.023840595	0.024283894	EEG 163
+164	0.117629392	-0.020953359	0.023840595	0.024283894	EEG 164
+165	0.061719572	-0.45	0.023840595	0.024283894	EEG 165
+166	0.085226238	-0.411234759	0.023840595	0.024283894	EEG 166
+167	0.106134228	-0.360226213	0.023840595	0.024283894	EEG 167
+168	0.148968879	-0.297338854	0.023840595	0.024283894	EEG 168
+169	0.156365562	-0.241406814	0.023840595	0.024283894	EEG 169
+170	0.16089296	-0.182074387	0.023840595	0.024283894	EEG 170
+171	0.158874627	-0.130476761	0.023840595	0.024283894	EEG 171
+172	0.152612576	-0.080357072	0.023840595	0.024283894	EEG 172
+173	0.142940198	-0.038849379	0.023840595	0.024283894	EEG 173
+174	0.11640639	-0.433892117	0.023840595	0.024283894	EEG 174
+175	0.142872329	-0.395550026	0.023840595	0.024283894	EEG 175
+176	0.160198907	-0.335751192	0.023840595	0.024283894	EEG 176
+177	0.194042397	-0.266008675	0.023840595	0.024283894	EEG 177
+178	0.200260526	-0.201104991	0.023840595	0.024283894	EEG 178
+179	0.201004697	-0.132397774	0.023840595	0.024283894	EEG 179
+180	0.186334435	-0.079063391	0.023840595	0.024283894	EEG 180
+181	0.170816713	-0.027588171	0.023840595	0.024283894	EEG 181
+182	0.144183544	0.006552794	0.023840595	0.024283894	EEG 182
+183	0.11607512	0.026701856	0.023840595	0.024283894	EEG 183
+184	0.087032894	0.041560718	0.023840595	0.024283894	EEG 184
+185	0.055587556	0.053147386	0.023840595	0.024283894	EEG 185
+186	0.017539353	0.062826857	0.023840595	0.024283894	EEG 186
+187	0.178151028	-0.424680349	0.023840595	0.024283894	EEG 187
+188	0.20031364	-0.367491502	0.023840595	0.024283894	EEG 188
+189	0.215607267	-0.297916345	0.023840595	0.024283894	EEG 189
+190	0.235119884	-0.22133859	0.023840595	0.024283894	EEG 190
+191	0.230231782	-0.157310034	0.023840595	0.024283894	EEG 191
+192	0.227828247	-0.074709585	0.023840595	0.024283894	EEG 192
+193	0.202988841	-0.017932839	0.023840595	0.024283894	EEG 193
+194	0.175001011	0.027246728	0.023840595	0.024283894	EEG 194
+195	0.142622009	0.056366314	0.023840595	0.024283894	EEG 195
+196	0.108362109	0.07207399	0.023840595	0.024283894	EEG 196
+197	0.076990927	0.086006856	0.023840595	0.024283894	EEG 197
+198	0.036255497	0.09269913	0.023840595	0.024283894	EEG 198
+199	0.235425173	-0.391140875	0.023840595	0.024283894	EEG 199
+200	0.264402365	-0.317489099	0.023840595	0.024283894	EEG 200
+201	0.264511728	-0.247963981	0.023840595	0.024283894	EEG 201
+202	0.241320281	0.000293927	0.023840595	0.024283894	EEG 202
+203	0.20434592	0.049982898	0.023840595	0.024283894	EEG 203
+204	0.169122926	0.084563661	0.023840595	0.024283894	EEG 204
+205	0.132742164	0.104084677	0.023840595	0.024283894	EEG 205
+206	0.094840856	0.116834626	0.023840595	0.024283894	EEG 206
+207	0.055153266	0.126645408	0.023840595	0.024283894	EEG 207
+208	0.309236143	-0.330394078	0.023840595	0.024283894	EEG 208
+209	0.320841548	-0.246056831	0.023840595	0.024283894	EEG 209
+210	0.277394242	0.035815905	0.023840595	0.024283894	EEG 210
+211	0.237546771	0.082946276	0.023840595	0.024283894	EEG 211
+212	0.196096643	0.121848964	0.023840595	0.024283894	EEG 212
+213	0.156410469	0.141423921	0.023840595	0.024283894	EEG 213
+214	0.116009122	0.150111634	0.023840595	0.024283894	EEG 214
+215	0.079525249	0.158534511	0.023840595	0.024283894	EEG 215
+216	0.376606255	-0.236283155	0.023840595	0.024283894	EEG 216
+217	0.384631539	-0.115563191	0.023840595	0.024283894	EEG 217
+218	0.364333595	-0.009526546	0.023840595	0.024283894	EEG 218
+219	0.32017538	0.064356008	0.023840595	0.024283894	EEG 219
+220	0.271241865	0.131933691	0.023840595	0.024283894	EEG 220
+221	0.21721779	0.1735557	0.023840595	0.024283894	EEG 221
+222	0.17285275	0.187572361	0.023840595	0.024283894	EEG 222
+223	0.130890927	0.191286703	0.023840595	0.024283894	EEG 223
+224	0.098479668	0.187945851	0.023840595	0.024283894	EEG 224
+225	0.316289645	0.145736715	0.023840595	0.024283894	EEG 225
+226	0.302702771	0.230332844	0.023840595	0.024283894	EEG 226
+227	0.368412876	0.104246485	0.023840595	0.024283894	EEG 227
+228	0.409165374	0.012374488	0.023840595	0.024283894	EEG 228
+229	0.423731189	-0.12797492	0.023840595	0.024283894	EEG 229
+230	0.298254153	0.303894316	0.023840595	0.024283894	EEG 230
+231	0.362100214	0.20909316	0.023840595	0.024283894	EEG 231
+232	0.410199617	0.143137194	0.023840595	0.024283894	EEG 232
+233	0.447869069	0.013249996	0.023840595	0.024283894	EEG 233
+234	0.269381414	0.382730951	0.023840595	0.024283894	EEG 234
+235	0.342518502	0.308483235	0.023840595	0.024283894	EEG 235
+236	0.395968691	0.254174349	0.023840595	0.024283894	EEG 236
+237	0.45	0.157922288	0.023840595	0.024283894	EEG 237
+238	0.2187115	0.45	0.023840595	0.024283894	EEG 238
+239	0.327880174	0.384827106	0.023840595	0.024283894	EEG 239
+240	0.38583302	0.329449945	0.023840595	0.024283894	EEG 240
+241	-0.2187115	0.45	0.023840595	0.024283894	EEG 241
+242	-0.327880174	0.384827106	0.023840595	0.024283894	EEG 242
+243	-0.38583302	0.329449945	0.023840595	0.024283894	EEG 243
+244	-0.269381414	0.382730951	0.023840595	0.024283894	EEG 244
+245	-0.342518502	0.308483235	0.023840595	0.024283894	EEG 245
+246	-0.395968691	0.254174349	0.023840595	0.024283894	EEG 246
+247	-0.45	0.157922288	0.023840595	0.024283894	EEG 247
+248	-0.298254153	0.303894316	0.023840595	0.024283894	EEG 248
+249	-0.362100214	0.20909316	0.023840595	0.024283894	EEG 249
+250	-0.410199617	0.143137194	0.023840595	0.024283894	EEG 250
+251	-0.447869069	0.013249996	0.023840595	0.024283894	EEG 251
+252	-0.302702771	0.230332844	0.023840595	0.024283894	EEG 252
+253	-0.316289645	0.145736715	0.023840595	0.024283894	EEG 253
+254	-0.368412876	0.104246485	0.023840595	0.024283894	EEG 254
+255	-0.409165374	0.012374488	0.023840595	0.024283894	EEG 255
+256	-0.423731189	-0.12797492	0.023840595	0.024283894	EEG 256
+257	-0.45	-0.45	0.023840595	0.024283894	EEG 257
+258	0.45	-0.45	0.023840595	0.024283894	EEG 258
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/layouts/KIT-157.lout b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/layouts/KIT-157.lout
new file mode 100644
index 0000000..2cf5637
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/layouts/KIT-157.lout
@@ -0,0 +1,158 @@
+-42.19	43.52	-41.7	28.71
+001     9.78   -14.18     4.00     3.00 MEG 001
+002     3.31   -16.56     4.00     3.00 MEG 002
+003    12.02   -19.42     4.00     3.00 MEG 003
+004     8.08   -21.05     4.00     3.00 MEG 004
+005     4.12   -22.01     4.00     3.00 MEG 005
+006    15.80   -16.63     4.00     3.00 MEG 006
+007    10.21   -12.01     4.00     3.00 MEG 007
+008     7.23   -13.67     4.00     3.00 MEG 008
+009   -22.12    -3.07     4.00     3.00 MEG 009
+010   -13.99   -13.09     4.00     3.00 MEG 010
+011   -21.05    -7.51     4.00     3.00 MEG 011
+012   -18.85   -12.06     4.00     3.00 MEG 012
+013    -0.14   -16.77     4.00     3.00 MEG 013
+014    -6.69   -15.41     4.00     3.00 MEG 014
+015   -10.69   -15.56     4.00     3.00 MEG 015
+016    -3.91   -10.00     4.00     3.00 MEG 016
+017     0.80    -6.66     4.00     3.00 MEG 017
+018     3.74   -20.66     4.00     3.00 MEG 018
+019    15.01   -15.63     4.00     3.00 MEG 019
+020     4.16   -14.75     4.00     3.00 MEG 020
+021    16.72    -0.60     4.00     3.00 MEG 021
+022    14.31    -7.30     4.00     3.00 MEG 022
+023     1.27   -13.23     4.00     3.00 MEG 023
+024     9.63   -10.10     4.00     3.00 MEG 024
+025    -1.74   -14.94     4.00     3.00 MEG 025
+026    -4.68   -14.12     4.00     3.00 MEG 026
+027    -1.65    -8.33     4.00     3.00 MEG 027
+028    -6.53    -8.53     4.00     3.00 MEG 028
+029    -8.52    -6.61     4.00     3.00 MEG 029
+030   -10.18    -4.27     4.00     3.00 MEG 030
+031   -11.14    -1.21     4.00     3.00 MEG 031
+032    -4.02   -18.39     4.00     3.00 MEG 032
+033    19.69     0.13     4.00     3.00 MEG 033
+034     4.03    -8.21     4.00     3.00 MEG 034
+035     3.56     0.14     4.00     3.00 MEG 035
+036     4.19   -12.79     4.00     3.00 MEG 036
+037    19.43    -3.03     4.00     3.00 MEG 037
+038    20.99    -9.54     4.00     3.00 MEG 038
+039    15.93   -11.27     4.00     3.00 MEG 039
+040    22.46    -5.52     4.00     3.00 MEG 040
+041    -9.37    -8.82     4.00     3.00 MEG 041
+042    -6.93   -10.92     4.00     3.00 MEG 042
+043    -1.56   -13.07     4.00     3.00 MEG 043
+044    -7.75   -20.89     4.00     3.00 MEG 044
+045   -11.74   -19.07     4.00     3.00 MEG 045
+046     0.31   -22.23     4.00     3.00 MEG 046
+047    -3.75   -21.89     4.00     3.00 MEG 047
+048    -3.89    -5.28     4.00     3.00 MEG 048
+049    23.23    -0.95     4.00     3.00 MEG 049
+050    13.94   -14.13     4.00     3.00 MEG 050
+051     7.41   -17.72     4.00     3.00 MEG 051
+052    19.50    -8.59     4.00     3.00 MEG 052
+053    18.26    -7.47     4.00     3.00 MEG 053
+054    18.19    -2.34     4.00     3.00 MEG 054
+055    14.76    -9.91     4.00     3.00 MEG 055
+056    21.32    -0.18     4.00     3.00 MEG 056
+057    -1.88    -3.98     4.00     3.00 MEG 057
+058     3.56    -3.73     4.00     3.00 MEG 058
+059   -12.57    -8.25     4.00     3.00 MEG 059
+060    -7.56   -12.70     4.00     3.00 MEG 060
+061   -15.02    -1.73     4.00     3.00 MEG 061
+062   -11.53   -17.47     4.00     3.00 MEG 062
+063    -0.18   -18.90     4.00     3.00 MEG 063
+064    -6.61    -0.05     4.00     3.00 MEG 064
+065     6.73    -9.47     4.00     3.00 MEG 065
+066     1.16    -8.63     4.00     3.00 MEG 066
+067    18.43     8.05     4.00     3.00 MEG 067
+068    16.27    12.00     4.00     3.00 MEG 068
+069    19.53     3.47     4.00     3.00 MEG 069
+070    11.49     5.68     4.00     3.00 MEG 070
+071    12.54    -0.07     4.00     3.00 MEG 071
+072    12.40     3.05     4.00     3.00 MEG 072
+073   -15.98    -9.55     4.00     3.00 MEG 073
+074   -18.65    -1.75     4.00     3.00 MEG 074
+075   -17.81    -5.83     4.00     3.00 MEG 075
+076     -1.09     0.06     4.00     3.00 MEG 076
+077    -1.11     2.07     4.00     3.00 MEG 077
+078   -17.59   -10.78     4.00     3.00 MEG 078
+079   -20.36    -2.47     4.00     3.00 MEG 079
+080   -16.06    10.29     4.00     3.00 MEG 080
+081    10.71    -5.93     4.00     3.00 MEG 081
+082    12.02    -3.35     4.00     3.00 MEG 082
+083    19.99     8.66     4.00     3.00 MEG 083
+084    15.61    15.53     4.00     3.00 MEG 084
+085     5.76    -4.95     4.00     3.00 MEG 085
+086    12.48    13.62     4.00     3.00 MEG 086
+087    18.03     3.69     4.00     3.00 MEG 087
+088    14.69    11.11     4.00     3.00 MEG 088
+089   -19.42     6.89     4.00     3.00 MEG 089
+090   -16.09    14.39     4.00     3.00 MEG 090
+091    -6.70    -5.77     4.00     3.00 MEG 091
+092   -12.37   -11.31     4.00     3.00 MEG 092
+093    -1.72     9.34     4.00     3.00 MEG 093
+094    -4.12     1.65     4.00     3.00 MEG 094
+095   -18.66     2.58     4.00     3.00 MEG 095
+096   -17.76     6.59     4.00     3.00 MEG 096
+097     8.82    -5.11     4.00     3.00 MEG 097
+098     8.79    -7.85     4.00     3.00 MEG 098
+099    15.43     6.10     4.00     3.00 MEG 099
+100    11.93    11.57     4.00     3.00 MEG 100
+101    16.58     7.80     4.00     3.00 MEG 101
+102     8.27     6.69     4.00     3.00 MEG 102
+103    11.62    -8.00     4.00     3.00 MEG 103
+104    13.11    -5.40     4.00     3.00 MEG 104
+105   -13.38     0.11     4.00     3.00 MEG 105
+106   -12.78    -3.22     4.00     3.00 MEG 106
+107   -12.98     3.35     4.00     3.00 MEG 107
+108   -11.84     6.58     4.00     3.00 MEG 108
+109   -10.08     9.11     4.00     3.00 MEG 109
+110   -16.27    -5.03     4.00     3.00 MEG 110
+111   -11.45    -6.21     4.00     3.00 MEG 111
+112    -0.59     5.83     4.00     3.00 MEG 112
+113    14.18    -2.06     4.00     3.00 MEG 113
+114    14.48     1.15     4.00     3.00 MEG 114
+115    12.68     7.37     4.00     3.00 MEG 115
+116    13.93     4.46     4.00     3.00 MEG 116
+117     8.98    11.57     4.00     3.00 MEG 117
+118     6.35    12.95     4.00     3.00 MEG 118
+119    11.01     9.71     4.00     3.00 MEG 119
+120     0.01    16.08     4.00     3.00 MEG 120
+121   -16.87     2.69     4.00     3.00 MEG 121
+122   -16.02     6.38     4.00     3.00 MEG 122
+123   -14.38     9.83     4.00     3.00 MEG 123
+124   -12.23    12.65     4.00     3.00 MEG 124
+125   -10.14     5.19     4.00     3.00 MEG 125
+126    -5.63    12.72     4.00     3.00 MEG 126
+127    -2.90    13.72     4.00     3.00 MEG 127
+128    -7.93    11.11     4.00     3.00 MEG 128
+129     6.83    14.86     4.00     3.00 MEG 129
+130     7.63     3.51     4.00     3.00 MEG 130
+131     8.56     0.40     4.00     3.00 MEG 131
+132    -2.70     7.01     4.00     3.00 MEG 132
+133     3.09    11.73     4.00     3.00 MEG 133
+134     8.14     9.62     4.00     3.00 MEG 134
+135     2.84     2.47     4.00     3.00 MEG 135
+136     4.05     6.89     4.00     3.00 MEG 136
+137    -6.16    14.64     4.00     3.00 MEG 137
+138   -11.02     2.49     4.00     3.00 MEG 138
+139    -6.78     6.65     4.00     3.00 MEG 139
+140    -6.24     3.18     4.00     3.00 MEG 140
+141    -6.83     9.47     4.00     3.00 MEG 141
+142    -2.48    11.64     4.00     3.00 MEG 142
+143   -17.59    14.92     4.00     3.00 MEG 143
+144   -22.23     2.07     4.00     3.00 MEG 144
+145     3.20    13.71     4.00     3.00 MEG 145
+146     2.06     5.84     4.00     3.00 MEG 146
+147     5.76     1.93     4.00     3.00 MEG 147
+148    23.08     3.86     4.00     3.00 MEG 148
+149    21.96     8.34     4.00     3.00 MEG 149
+150    20.00    12.43     4.00     3.00 MEG 150
+151    17.22    16.08     4.00     3.00 MEG 151
+152     3.91     9.37     4.00     3.00 MEG 152
+153   -21.58     6.32     4.00     3.00 MEG 153
+154   -20.17    10.61     4.00     3.00 MEG 154
+155   -11.01    10.95     4.00     3.00 MEG 155
+156   -14.51     5.43     4.00     3.00 MEG 156
+157     1.28     9.74     4.00     3.00 MEG 157
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/layouts/KIT-AD.lout b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/layouts/KIT-AD.lout
new file mode 100644
index 0000000..e06356a
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/layouts/KIT-AD.lout
@@ -0,0 +1,209 @@
+    0.00     1.00     0.00     1.00
+001     0.61     0.56     0.02     0.04 MEG 001
+002     0.59     0.50     0.02     0.04 MEG 002
+003     0.48     0.42     0.02     0.04 MEG 003
+004     0.52     0.43     0.02     0.04 MEG 004
+005     0.43     0.44     0.02     0.04 MEG 005
+006     0.39     0.48     0.02     0.04 MEG 006
+007     0.52     0.70     0.02     0.04 MEG 007
+008     0.58     0.59     0.02     0.04 MEG 008
+009     0.47     0.71     0.02     0.04 MEG 009
+010     0.53     0.49     0.02     0.04 MEG 010
+011     0.57     0.53     0.02     0.04 MEG 011
+012     0.43     0.50     0.02     0.04 MEG 012
+013     0.40     0.55     0.02     0.04 MEG 013
+014     0.57     0.39     0.02     0.04 MEG 014
+015     0.38     0.41     0.02     0.04 MEG 015
+016     0.48     0.37     0.02     0.04 MEG 016
+017     0.16     0.84     0.02     0.04 MEG 017
+018     0.53     0.63     0.02     0.04 MEG 018
+019     0.48     0.53     0.02     0.04 MEG 019
+020     0.44     0.63     0.02     0.04 MEG 020
+021     0.53     0.56     0.02     0.04 MEG 021
+022     0.44     0.57     0.02     0.04 MEG 022
+023     0.56     0.46     0.02     0.04 MEG 023
+024     0.59     0.68     0.02     0.04 MEG 024
+025     0.34     0.86     0.02     0.04 MEG 025
+026     0.39     0.89     0.02     0.04 MEG 026
+027     0.50     0.91     0.02     0.04 MEG 027
+028     0.61     0.87     0.02     0.04 MEG 028
+029     0.66     0.84     0.02     0.04 MEG 029
+030     0.59     0.76     0.02     0.04 MEG 030
+031     0.39     0.62     0.02     0.04 MEG 031
+032     0.55     0.85     0.02     0.04 MEG 032
+033     0.28     0.39     0.02     0.04 MEG 033
+034     0.37     0.52     0.02     0.04 MEG 034
+035     0.36     0.59     0.02     0.04 MEG 035
+036     0.38     0.70     0.02     0.04 MEG 036
+037     0.07     0.87     0.02     0.04 MEG 037
+038     0.24     0.61     0.02     0.04 MEG 038
+039     0.32     0.68     0.02     0.04 MEG 039
+040     0.30     0.81     0.02     0.04 MEG 040
+041     0.43     0.96     0.02     0.04 MEG 041
+042     0.55     0.95     0.02     0.04 MEG 042
+043     0.42     0.74     0.02     0.04 MEG 043
+044     0.56     0.72     0.02     0.04 MEG 044
+045     0.47     0.76     0.02     0.04 MEG 045
+046     0.52     0.75     0.02     0.04 MEG 046
+047     0.45     0.85     0.02     0.04 MEG 047
+048     0.40     0.79     0.02     0.04 MEG 048
+049     0.24     0.79     0.02     0.04 MEG 049
+050     0.21     0.46     0.02     0.04 MEG 050
+051     0.32     0.76     0.02     0.04 MEG 051
+052     0.20     0.63     0.02     0.04 MEG 052
+053     0.27     0.33     0.02     0.04 MEG 053
+054     0.17     0.74     0.02     0.04 MEG 054
+055     0.05     0.65     0.02     0.04 MEG 055
+056     0.28     0.63     0.02     0.04 MEG 056
+057     0.70     0.62     0.02     0.04 MEG 057
+058     0.94     0.38     0.02     0.04 MEG 058
+059     0.91     0.73     0.02     0.04 MEG 059
+060     0.82     0.93     0.02     0.04 MEG 060
+061     0.93     0.63     0.02     0.04 MEG 061
+062     0.75     0.78     0.02     0.04 MEG 062
+063     0.69     0.78     0.02     0.04 MEG 063
+064     0.43     0.00     0.02     0.04 MEG 064
+065     0.18     0.40     0.02     0.04 MEG 065
+066     0.19     0.29     0.02     0.04 MEG 066
+067     0.15     0.56     0.02     0.04 MEG 067
+068     0.33     0.53     0.02     0.04 MEG 068
+069     0.35     0.47     0.02     0.04 MEG 069
+070     0.25     0.89     0.02     0.04 MEG 070
+071     0.24     0.53     0.02     0.04 MEG 071
+072     0.16     0.95     0.02     0.04 MEG 072
+073     0.67     0.75     0.02     0.04 MEG 073
+074     0.74     0.86     0.02     0.04 MEG 074
+075     0.81     0.71     0.02     0.04 MEG 075
+076     0.78     0.62     0.02     0.04 MEG 076
+077     0.65     0.65     0.02     0.04 MEG 077
+078     0.83     0.81     0.02     0.04 MEG 078
+079     0.82     0.53     0.02     0.04 MEG 079
+080     0.78     0.36     0.02     0.04 MEG 080
+081     0.56     0.65     0.02     0.04 MEG 081
+082     0.35     0.74     0.02     0.04 MEG 082
+083     0.21     0.71     0.02     0.04 MEG 083
+084     0.12     0.75     0.02     0.04 MEG 084
+085     0.11     0.66     0.02     0.04 MEG 085
+086     0.21     0.92     0.02     0.04 MEG 086
+087     0.13     0.96     0.02     0.04 MEG 087
+088     0.03     0.76     0.02     0.04 MEG 088
+089     0.66     0.89     0.02     0.04 MEG 089
+090     0.61     0.93     0.02     0.04 MEG 090
+091     0.63     0.79     0.02     0.04 MEG 091
+092     0.71     0.84     0.02     0.04 MEG 092
+093     0.44     0.91     0.02     0.04 MEG 093
+094     0.56     0.89     0.02     0.04 MEG 094
+095     0.42     0.68     0.02     0.04 MEG 095
+096     0.54     0.79     0.02     0.04 MEG 096
+097     0.11     0.86     0.02     0.04 MEG 097
+098     0.14     0.36     0.02     0.04 MEG 098
+099     0.32     0.60     0.02     0.04 MEG 099
+100     0.25     0.45     0.02     0.04 MEG 100
+101     0.19     0.54     0.02     0.04 MEG 101
+102     0.27     0.85     0.02     0.04 MEG 102
+103     0.27     0.75     0.02     0.04 MEG 103
+104     0.01     0.64     0.02     0.04 MEG 104
+105     0.69     0.68     0.02     0.04 MEG 105
+106     0.88     0.82     0.02     0.04 MEG 106
+107     0.45     0.80     0.02     0.04 MEG 107
+108     0.50     0.86     0.02     0.04 MEG 108
+109     0.36     0.80     0.02     0.04 MEG 109
+110     0.49     0.96     0.02     0.04 MEG 110
+111     0.37     0.93     0.02     0.04 MEG 111
+112     0.32     0.90     0.02     0.04 MEG 112
+113     0.07     0.42     0.02     0.04 MEG 113
+114     0.73     0.72     0.02     0.04 MEG 114
+115     0.19     0.12     0.02     0.04 MEG 115
+116     0.01     0.51     0.02     0.04 MEG 116
+117     0.07     0.29     0.02     0.04 MEG 117
+118     0.16     0.47     0.02     0.04 MEG 118
+119     0.22     0.33     0.02     0.04 MEG 119
+120     0.10     0.54     0.02     0.04 MEG 120
+121     0.78     0.89     0.02     0.04 MEG 121
+122     0.87     0.63     0.02     0.04 MEG 122
+123     0.86     0.72     0.02     0.04 MEG 123
+124     0.77     0.70     0.02     0.04 MEG 124
+125     0.63     0.71     0.02     0.04 MEG 125
+126     0.89     0.27     0.02     0.04 MEG 126
+127     0.97     0.62     0.02     0.04 MEG 127
+128     0.83     0.62     0.02     0.04 MEG 128
+129     0.77     0.11     0.02     0.04 MEG 129
+130     0.86     0.95     0.02     0.04 MEG 130
+131     0.71     0.42     0.02     0.04 MEG 131
+132     0.78     0.53     0.02     0.04 MEG 132
+133     0.65     0.57     0.02     0.04 MEG 133
+134     0.16     0.67     0.02     0.04 MEG 134
+135     0.29     0.71     0.02     0.04 MEG 135
+136     0.16     0.23     0.02     0.04 MEG 136
+137     0.82     0.34     0.02     0.04 MEG 137
+138     0.87     0.52     0.02     0.04 MEG 138
+139     0.81     0.22     0.02     0.04 MEG 139
+140     0.90     0.40     0.02     0.04 MEG 140
+141     0.97     0.49     0.02     0.04 MEG 141
+142     0.74     0.30     0.02     0.04 MEG 142
+143     0.81     0.44     0.02     0.04 MEG 143
+144     0.95     0.75     0.02     0.04 MEG 144
+145     0.13     0.19     0.02     0.04 MEG 145
+146     0.28     0.56     0.02     0.04 MEG 146
+147     0.74     0.15     0.02     0.04 MEG 147
+148     0.10     0.33     0.02     0.04 MEG 148
+149     0.35     0.02     0.02     0.04 MEG 149
+150     0.03     0.39     0.02     0.04 MEG 150
+151     0.27     0.06     0.02     0.04 MEG 151
+152     0.31     0.43     0.02     0.04 MEG 152
+153     0.77     0.26     0.02     0.04 MEG 153
+154     0.67     0.10     0.02     0.04 MEG 154
+155     0.76     0.44     0.02     0.04 MEG 155
+156     0.83     0.18     0.02     0.04 MEG 156
+157     0.61     0.02     0.02     0.04 MEG 157
+158     0.91     0.86     0.02     0.04 MEG 158
+159     0.92     0.51     0.02     0.04 MEG 159
+160     0.86     0.30     0.02     0.04 MEG 160
+161     0.44     0.12     0.02     0.04 MEG 161
+162     0.37     0.30     0.02     0.04 MEG 162
+163     0.30     0.17     0.02     0.04 MEG 163
+164     0.36     0.25     0.02     0.04 MEG 164
+165     0.41     0.22     0.02     0.04 MEG 165
+166     0.31     0.28     0.02     0.04 MEG 166
+167     0.05     0.53     0.02     0.04 MEG 167
+168     0.08     0.76     0.02     0.04 MEG 168
+169     0.69     0.24     0.02     0.04 MEG 169
+170     0.57     0.18     0.02     0.04 MEG 170
+171     0.50     0.17     0.02     0.04 MEG 171
+172     0.64     0.20     0.02     0.04 MEG 172
+173     0.65     0.42     0.02     0.04 MEG 173
+174     0.69     0.53     0.02     0.04 MEG 174
+175     0.61     0.44     0.02     0.04 MEG 175
+176     0.70     0.32     0.02     0.04 MEG 176
+177     0.44     0.17     0.02     0.04 MEG 177
+178     0.38     0.18     0.02     0.04 MEG 178
+179     0.32     0.22     0.02     0.04 MEG 179
+180     0.44     0.06     0.02     0.04 MEG 180
+181     0.22     0.16     0.02     0.04 MEG 181
+182     0.36     0.07     0.02     0.04 MEG 182
+183     0.28     0.11     0.02     0.04 MEG 183
+184     0.42     0.27     0.02     0.04 MEG 184
+185     0.52     0.32     0.02     0.04 MEG 185
+186     0.57     0.33     0.02     0.04 MEG 186
+187     0.47     0.32     0.02     0.04 MEG 187
+188     0.62     0.37     0.02     0.04 MEG 188
+189     0.73     0.49     0.02     0.04 MEG 189
+190     0.67     0.36     0.02     0.04 MEG 190
+191     0.74     0.57     0.02     0.04 MEG 191
+192     0.64     0.49     0.02     0.04 MEG 192
+193     0.59     0.06     0.02     0.04 MEG 193
+194     0.52    -0.00     0.02     0.04 MEG 194
+195     0.58     0.29     0.02     0.04 MEG 195
+196     0.53     0.27     0.02     0.04 MEG 196
+197     0.47     0.26     0.02     0.04 MEG 197
+198     0.34     0.39     0.02     0.04 MEG 198
+199     0.42     0.33     0.02     0.04 MEG 199
+200     0.38     0.35     0.02     0.04 MEG 200
+201     0.53     0.22     0.02     0.04 MEG 201
+202     0.59     0.24     0.02     0.04 MEG 202
+203     0.65     0.27     0.02     0.04 MEG 203
+204     0.27     0.26     0.02     0.04 MEG 204
+205     0.51     0.11     0.02     0.04 MEG 205
+206     0.65     0.15     0.02     0.04 MEG 206
+207     0.51     0.05     0.02     0.04 MEG 207
+208     0.69     0.05     0.02     0.04 MEG 208
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/layouts/Vectorview-all.lout b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/layouts/Vectorview-all.lout
new file mode 100644
index 0000000..b6395fb
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/layouts/Vectorview-all.lout
@@ -0,0 +1,307 @@
+-85.000000 90.000000 -83.000000 75.000000
+113	-73.416206	33.416687	6.000000	5.000000	MEG 0113
+112	-73.416206	38.416687	6.000000	5.000000	MEG 0112
+111	-67.416206	35.916687	6.000000	5.000000	MEG 0111
+122	-59.602242	38.489067	6.000000	5.000000	MEG 0122
+123	-59.602242	43.489067	6.000000	5.000000	MEG 0123
+121	-53.602242	40.989067	6.000000	5.000000	MEG 0121
+132	-68.018288	18.676970	6.000000	5.000000	MEG 0132
+133	-68.018288	23.676970	6.000000	5.000000	MEG 0133
+131	-62.018288	21.176970	6.000000	5.000000	MEG 0131
+143	-80.582848	8.095787	6.000000	5.000000	MEG 0143
+142	-80.582848	13.095787	6.000000	5.000000	MEG 0142
+141	-74.582848	10.595787	6.000000	5.000000	MEG 0141
+213	-56.595154	17.019251	6.000000	5.000000	MEG 0213
+212	-56.595154	22.019251	6.000000	5.000000	MEG 0212
+211	-50.595154	19.519251	6.000000	5.000000	MEG 0211
+222	-44.599728	17.543873	6.000000	5.000000	MEG 0222
+223	-44.599728	22.543873	6.000000	5.000000	MEG 0223
+221	-38.599728	20.043873	6.000000	5.000000	MEG 0221
+232	-47.416420	-0.216784	6.000000	5.000000	MEG 0232
+233	-47.416420	4.783216	6.000000	5.000000	MEG 0233
+231	-41.416420	2.283216	6.000000	5.000000	MEG 0231
+243	-59.280643	-2.761772	6.000000	5.000000	MEG 0243
+242	-59.280643	2.238228	6.000000	5.000000	MEG 0242
+241	-53.280643	-0.261772	6.000000	5.000000	MEG 0241
+313	-39.790501	47.430138	6.000000	5.000000	MEG 0313
+312	-39.790501	52.430138	6.000000	5.000000	MEG 0312
+311	-33.790501	49.930138	6.000000	5.000000	MEG 0311
+322	-38.014336	32.768585	6.000000	5.000000	MEG 0322
+323	-38.014336	37.768585	6.000000	5.000000	MEG 0323
+321	-32.014336	35.268585	6.000000	5.000000	MEG 0321
+333	-27.679966	28.868065	6.000000	5.000000	MEG 0333
+332	-27.679966	33.868065	6.000000	5.000000	MEG 0332
+331	-21.679966	31.368065	6.000000	5.000000	MEG 0331
+343	-49.684467	34.078434	6.000000	5.000000	MEG 0343
+342	-49.684467	39.078434	6.000000	5.000000	MEG 0342
+341	-43.684467	36.578434	6.000000	5.000000	MEG 0341
+413	-32.997990	15.607347	6.000000	5.000000	MEG 0413
+412	-32.997990	20.607347	6.000000	5.000000	MEG 0412
+411	-26.997990	18.107347	6.000000	5.000000	MEG 0411
+422	-21.084751	13.953575	6.000000	5.000000	MEG 0422
+423	-21.084751	18.953575	6.000000	5.000000	MEG 0423
+421	-15.084751	16.453575	6.000000	5.000000	MEG 0421
+432	-21.930935	-0.085500	6.000000	5.000000	MEG 0432
+433	-21.930935	4.914500	6.000000	5.000000	MEG 0433
+431	-15.930935	2.414500	6.000000	5.000000	MEG 0431
+443	-34.824663	0.362587	6.000000	5.000000	MEG 0443
+442	-34.824663	5.362587	6.000000	5.000000	MEG 0442
+441	-28.824663	2.862587	6.000000	5.000000	MEG 0441
+513	-27.861498	55.439636	6.000000	5.000000	MEG 0513
+512	-27.861498	60.439636	6.000000	5.000000	MEG 0512
+511	-21.861498	57.939636	6.000000	5.000000	MEG 0511
+523	-15.506709	59.619865	6.000000	5.000000	MEG 0523
+522	-15.506709	64.619865	6.000000	5.000000	MEG 0522
+521	-9.506709	62.119865	6.000000	5.000000	MEG 0521
+532	-14.616095	49.308380	6.000000	5.000000	MEG 0532
+533	-14.616095	54.308380	6.000000	5.000000	MEG 0533
+531	-8.616095	51.808380	6.000000	5.000000	MEG 0531
+542	-27.240477	43.863430	6.000000	5.000000	MEG 0542
+543	-27.240477	48.863430	6.000000	5.000000	MEG 0543
+541	-21.240477	46.363430	6.000000	5.000000	MEG 0541
+613	-14.782405	38.147827	6.000000	5.000000	MEG 0613
+612	-14.782405	43.147827	6.000000	5.000000	MEG 0612
+611	-8.782405	40.647827	6.000000	5.000000	MEG 0611
+622	-2.967276	27.260933	6.000000	5.000000	MEG 0622
+623	-2.967276	32.260933	6.000000	5.000000	MEG 0623
+621	3.032724	29.760933	6.000000	5.000000	MEG 0621
+633	-9.094766	14.700909	6.000000	5.000000	MEG 0633
+632	-9.094766	19.700909	6.000000	5.000000	MEG 0632
+631	-3.094766	17.200909	6.000000	5.000000	MEG 0631
+642	-15.199021	26.631405	6.000000	5.000000	MEG 0642
+643	-15.199021	31.631405	6.000000	5.000000	MEG 0643
+641	-9.199021	29.131405	6.000000	5.000000	MEG 0641
+713	-9.246834	1.693846	6.000000	5.000000	MEG 0713
+712	-9.246834	6.693846	6.000000	5.000000	MEG 0712
+711	-3.246834	4.193846	6.000000	5.000000	MEG 0711
+723	3.314525	1.573887	6.000000	5.000000	MEG 0723
+722	3.314525	6.573887	6.000000	5.000000	MEG 0722
+721	9.314525	4.073887	6.000000	5.000000	MEG 0721
+733	3.387173	-10.588106	6.000000	5.000000	MEG 0733
+732	3.387173	-5.588106	6.000000	5.000000	MEG 0732
+731	9.387173	-8.088106	6.000000	5.000000	MEG 0731
+743	-9.422897	-10.519942	6.000000	5.000000	MEG 0743
+742	-9.422897	-5.519942	6.000000	5.000000	MEG 0742
+741	-3.422897	-8.019942	6.000000	5.000000	MEG 0741
+813	-2.962408	61.007698	6.000000	5.000000	MEG 0813
+812	-2.962408	66.007698	6.000000	5.000000	MEG 0812
+811	3.037592	63.507698	6.000000	5.000000	MEG 0811
+822	-2.965545	50.641838	6.000000	5.000000	MEG 0822
+823	-2.965545	55.641838	6.000000	5.000000	MEG 0823
+821	3.034455	53.141838	6.000000	5.000000	MEG 0821
+913	9.504830	59.655254	6.000000	5.000000	MEG 0913
+912	9.504830	64.655254	6.000000	5.000000	MEG 0912
+911	15.504830	62.155254	6.000000	5.000000	MEG 0911
+923	21.967310	55.408710	6.000000	5.000000	MEG 0923
+922	21.967310	60.408710	6.000000	5.000000	MEG 0922
+921	27.967310	57.908710	6.000000	5.000000	MEG 0921
+932	21.254196	43.889683	6.000000	5.000000	MEG 0932
+933	21.254196	48.889683	6.000000	5.000000	MEG 0933
+931	27.254196	46.389683	6.000000	5.000000	MEG 0931
+942	8.661931	49.358044	6.000000	5.000000	MEG 0942
+943	8.661931	54.358044	6.000000	5.000000	MEG 0943
+941	14.661931	51.858044	6.000000	5.000000	MEG 0941
+1013	-2.967087	39.669956	6.000000	5.000000	MEG 1013
+1012	-2.967087	44.669956	6.000000	5.000000	MEG 1012
+1011	3.032913	42.169956	6.000000	5.000000	MEG 1011
+1023	8.751018	38.154079	6.000000	5.000000	MEG 1023
+1022	8.751018	43.154079	6.000000	5.000000	MEG 1022
+1021	14.751018	40.654079	6.000000	5.000000	MEG 1021
+1032	9.123913	26.648697	6.000000	5.000000	MEG 1032
+1033	9.123913	31.648697	6.000000	5.000000	MEG 1033
+1031	15.123913	29.148697	6.000000	5.000000	MEG 1031
+1043	3.200539	14.795620	6.000000	5.000000	MEG 1043
+1042	3.200539	19.795620	6.000000	5.000000	MEG 1042
+1041	9.200539	17.295620	6.000000	5.000000	MEG 1041
+1112	15.014965	13.912239	6.000000	5.000000	MEG 1112
+1113	15.014965	18.912239	6.000000	5.000000	MEG 1113
+1111	21.014965	16.412239	6.000000	5.000000	MEG 1111
+1123	26.958527	15.562130	6.000000	5.000000	MEG 1123
+1122	26.958527	20.562130	6.000000	5.000000	MEG 1122
+1121	32.958527	18.062130	6.000000	5.000000	MEG 1121
+1133	28.757563	0.227141	6.000000	5.000000	MEG 1133
+1132	28.757563	5.227141	6.000000	5.000000	MEG 1132
+1131	34.757563	2.727141	6.000000	5.000000	MEG 1131
+1142	15.882982	0.037700	6.000000	5.000000	MEG 1142
+1143	15.882982	5.037700	6.000000	5.000000	MEG 1143
+1141	21.882982	2.537700	6.000000	5.000000	MEG 1141
+1213	33.958897	47.388790	6.000000	5.000000	MEG 1213
+1212	33.958897	52.388790	6.000000	5.000000	MEG 1212
+1211	39.958897	49.888790	6.000000	5.000000	MEG 1211
+1223	43.923473	33.914738	6.000000	5.000000	MEG 1223
+1222	43.923473	38.914738	6.000000	5.000000	MEG 1222
+1221	49.923473	36.414738	6.000000	5.000000	MEG 1221
+1232	32.014336	32.768585	6.000000	5.000000	MEG 1232
+1233	32.014336	37.768585	6.000000	5.000000	MEG 1233
+1231	38.014336	35.268585	6.000000	5.000000	MEG 1231
+1243	21.600079	28.898149	6.000000	5.000000	MEG 1243
+1242	21.600079	33.898149	6.000000	5.000000	MEG 1242
+1241	27.600079	31.398149	6.000000	5.000000	MEG 1241
+1312	38.599728	17.543867	6.000000	5.000000	MEG 1312
+1313	38.599728	22.543867	6.000000	5.000000	MEG 1313
+1311	44.599728	20.043867	6.000000	5.000000	MEG 1311
+1323	50.558392	16.887651	6.000000	5.000000	MEG 1323
+1322	50.558392	21.887651	6.000000	5.000000	MEG 1322
+1321	56.558392	19.387651	6.000000	5.000000	MEG 1321
+1333	53.420483	-2.919475	6.000000	5.000000	MEG 1333
+1332	53.420483	2.080525	6.000000	5.000000	MEG 1332
+1331	59.420483	-0.419475	6.000000	5.000000	MEG 1331
+1342	41.371586	-0.216817	6.000000	5.000000	MEG 1342
+1343	41.371586	4.783183	6.000000	5.000000	MEG 1343
+1341	47.371586	2.283183	6.000000	5.000000	MEG 1341
+1412	53.704369	38.563030	6.000000	5.000000	MEG 1412
+1413	53.704369	43.563030	6.000000	5.000000	MEG 1413
+1411	59.704369	41.063030	6.000000	5.000000	MEG 1411
+1423	67.119286	33.843739	6.000000	5.000000	MEG 1423
+1422	67.119286	38.843739	6.000000	5.000000	MEG 1422
+1421	73.119286	36.343739	6.000000	5.000000	MEG 1421
+1433	74.438919	8.335863	6.000000	5.000000	MEG 1433
+1432	74.438919	13.335863	6.000000	5.000000	MEG 1432
+1431	80.438919	10.835863	6.000000	5.000000	MEG 1431
+1442	61.883209	18.562304	6.000000	5.000000	MEG 1442
+1443	61.883209	23.562304	6.000000	5.000000	MEG 1443
+1441	67.883209	21.062304	6.000000	5.000000	MEG 1441
+1512	-71.298943	-4.707253	6.000000	5.000000	MEG 1512
+1513	-71.298943	0.292747	6.000000	5.000000	MEG 1513
+1511	-65.298943	-2.207253	6.000000	5.000000	MEG 1511
+1522	-67.281609	-25.407852	6.000000	5.000000	MEG 1522
+1523	-67.281609	-20.407852	6.000000	5.000000	MEG 1523
+1521	-61.281609	-22.907852	6.000000	5.000000	MEG 1521
+1533	-71.702820	-40.152336	6.000000	5.000000	MEG 1533
+1532	-71.702820	-35.152336	6.000000	5.000000	MEG 1532
+1531	-65.702820	-37.652336	6.000000	5.000000	MEG 1531
+1543	-79.907913	-17.418098	6.000000	5.000000	MEG 1543
+1542	-79.907913	-12.418098	6.000000	5.000000	MEG 1542
+1541	-73.907913	-14.918098	6.000000	5.000000	MEG 1541
+1613	-56.916454	-20.312164	6.000000	5.000000	MEG 1613
+1612	-56.916454	-15.312164	6.000000	5.000000	MEG 1612
+1611	-50.916454	-17.812164	6.000000	5.000000	MEG 1611
+1622	-45.631779	-16.320436	6.000000	5.000000	MEG 1622
+1623	-45.631779	-11.320436	6.000000	5.000000	MEG 1623
+1621	-39.631779	-13.820436	6.000000	5.000000	MEG 1621
+1632	-37.896103	-30.578358	6.000000	5.000000	MEG 1632
+1633	-37.896103	-25.578358	6.000000	5.000000	MEG 1633
+1631	-31.896103	-28.078358	6.000000	5.000000	MEG 1631
+1643	-48.859089	-36.176094	6.000000	5.000000	MEG 1643
+1642	-48.859089	-31.176094	6.000000	5.000000	MEG 1642
+1641	-42.859089	-33.676094	6.000000	5.000000	MEG 1641
+1713	-56.796040	-59.082275	6.000000	5.000000	MEG 1713
+1712	-56.796040	-54.082275	6.000000	5.000000	MEG 1712
+1711	-50.796040	-56.582275	6.000000	5.000000	MEG 1711
+1722	-57.188797	-44.057373	6.000000	5.000000	MEG 1722
+1723	-57.188797	-39.057373	6.000000	5.000000	MEG 1723
+1721	-51.188797	-41.557373	6.000000	5.000000	MEG 1721
+1732	-41.902962	-58.279526	6.000000	5.000000	MEG 1732
+1733	-41.902962	-53.279526	6.000000	5.000000	MEG 1733
+1731	-35.902962	-55.779526	6.000000	5.000000	MEG 1731
+1743	-37.408134	-72.449036	6.000000	5.000000	MEG 1743
+1742	-37.408134	-67.449036	6.000000	5.000000	MEG 1742
+1741	-31.408134	-69.949036	6.000000	5.000000	MEG 1741
+1813	-33.801163	-13.768716	6.000000	5.000000	MEG 1813
+1812	-33.801163	-8.768716	6.000000	5.000000	MEG 1812
+1811	-27.801163	-11.268716	6.000000	5.000000	MEG 1811
+1822	-21.685101	-12.619589	6.000000	5.000000	MEG 1822
+1823	-21.685101	-7.619589	6.000000	5.000000	MEG 1823
+1821	-15.685101	-10.119589	6.000000	5.000000	MEG 1821
+1832	-9.600111	-22.190945	6.000000	5.000000	MEG 1832
+1833	-9.600111	-17.190945	6.000000	5.000000	MEG 1833
+1831	-3.600111	-19.690945	6.000000	5.000000	MEG 1831
+1843	-24.483526	-26.850609	6.000000	5.000000	MEG 1843
+1842	-24.483526	-21.850609	6.000000	5.000000	MEG 1842
+1841	-18.483526	-24.350609	6.000000	5.000000	MEG 1841
+1912	-25.866816	-40.850040	6.000000	5.000000	MEG 1912
+1913	-25.866816	-35.850040	6.000000	5.000000	MEG 1913
+1911	-19.866816	-38.350040	6.000000	5.000000	MEG 1911
+1923	-20.513481	-56.355225	6.000000	5.000000	MEG 1923
+1922	-20.513481	-51.355225	6.000000	5.000000	MEG 1922
+1921	-14.513481	-53.855225	6.000000	5.000000	MEG 1921
+1932	-23.428471	-67.375893	6.000000	5.000000	MEG 1932
+1933	-23.428471	-62.375893	6.000000	5.000000	MEG 1933
+1931	-17.428471	-64.875893	6.000000	5.000000	MEG 1931
+1943	-36.237587	-48.444530	6.000000	5.000000	MEG 1943
+1942	-36.237587	-43.444530	6.000000	5.000000	MEG 1942
+1941	-30.237587	-45.944530	6.000000	5.000000	MEG 1941
+2013	-10.441930	-34.308243	6.000000	5.000000	MEG 2013
+2012	-10.441930	-29.308243	6.000000	5.000000	MEG 2012
+2011	-4.441930	-31.808243	6.000000	5.000000	MEG 2011
+2023	4.357624	-34.289736	6.000000	5.000000	MEG 2023
+2022	4.357624	-29.289736	6.000000	5.000000	MEG 2022
+2021	10.357624	-31.789736	6.000000	5.000000	MEG 2021
+2032	4.645295	-46.290749	6.000000	5.000000	MEG 2032
+2033	4.645295	-41.290749	6.000000	5.000000	MEG 2033
+2031	10.645295	-43.790749	6.000000	5.000000	MEG 2031
+2042	-10.645079	-46.244335	6.000000	5.000000	MEG 2042
+2043	-10.645079	-41.244335	6.000000	5.000000	MEG 2043
+2041	-4.645079	-43.744335	6.000000	5.000000	MEG 2041
+2113	-3.052351	-58.889515	6.000000	5.000000	MEG 2113
+2112	-3.052351	-53.889515	6.000000	5.000000	MEG 2112
+2111	2.947649	-56.389515	6.000000	5.000000	MEG 2111
+2122	-2.999999	-70.362061	6.000000	5.000000	MEG 2122
+2123	-2.999999	-65.362061	6.000000	5.000000	MEG 2123
+2121	3.000001	-67.862061	6.000000	5.000000	MEG 2121
+2133	8.918572	-79.441826	6.000000	5.000000	MEG 2133
+2132	8.918572	-74.441826	6.000000	5.000000	MEG 2132
+2131	14.918572	-76.941826	6.000000	5.000000	MEG 2131
+2143	-14.987089	-79.428932	6.000000	5.000000	MEG 2143
+2142	-14.987089	-74.428932	6.000000	5.000000	MEG 2142
+2141	-8.987089	-76.928932	6.000000	5.000000	MEG 2141
+2212	15.641460	-12.579389	6.000000	5.000000	MEG 2212
+2213	15.641460	-7.579389	6.000000	5.000000	MEG 2213
+2211	21.641460	-10.079389	6.000000	5.000000	MEG 2211
+2223	27.786499	-13.669980	6.000000	5.000000	MEG 2223
+2222	27.786499	-8.669980	6.000000	5.000000	MEG 2222
+2221	33.786499	-11.169980	6.000000	5.000000	MEG 2221
+2233	18.501518	-26.949615	6.000000	5.000000	MEG 2233
+2232	18.501518	-21.949615	6.000000	5.000000	MEG 2232
+2231	24.501518	-24.449615	6.000000	5.000000	MEG 2231
+2242	3.641699	-22.206125	6.000000	5.000000	MEG 2242
+2243	3.641699	-17.206125	6.000000	5.000000	MEG 2243
+2241	9.641699	-19.706125	6.000000	5.000000	MEG 2241
+2312	19.852789	-40.871220	6.000000	5.000000	MEG 2312
+2313	19.852789	-35.871220	6.000000	5.000000	MEG 2313
+2311	25.852789	-38.371220	6.000000	5.000000	MEG 2311
+2323	30.078903	-48.474960	6.000000	5.000000	MEG 2323
+2322	30.078903	-43.474960	6.000000	5.000000	MEG 2322
+2321	36.078903	-45.974960	6.000000	5.000000	MEG 2321
+2332	17.363274	-67.365387	6.000000	5.000000	MEG 2332
+2333	17.363274	-62.365387	6.000000	5.000000	MEG 2333
+2331	23.363274	-64.865387	6.000000	5.000000	MEG 2331
+2343	14.329920	-56.380260	6.000000	5.000000	MEG 2343
+2342	14.329920	-51.380260	6.000000	5.000000	MEG 2342
+2341	20.329920	-53.880260	6.000000	5.000000	MEG 2341
+2412	39.644810	-16.175139	6.000000	5.000000	MEG 2412
+2413	39.644810	-11.175139	6.000000	5.000000	MEG 2413
+2411	45.644810	-13.675139	6.000000	5.000000	MEG 2411
+2423	50.812263	-20.401899	6.000000	5.000000	MEG 2423
+2422	50.812263	-15.401899	6.000000	5.000000	MEG 2422
+2421	56.812263	-17.901899	6.000000	5.000000	MEG 2421
+2433	42.694180	-36.278580	6.000000	5.000000	MEG 2433
+2432	42.694180	-31.278580	6.000000	5.000000	MEG 2432
+2431	48.694180	-33.778580	6.000000	5.000000	MEG 2431
+2442	31.896111	-30.578348	6.000000	5.000000	MEG 2442
+2443	31.896111	-25.578348	6.000000	5.000000	MEG 2443
+2441	37.896111	-28.078348	6.000000	5.000000	MEG 2441
+2512	35.812634	-58.300888	6.000000	5.000000	MEG 2512
+2513	35.812634	-53.300888	6.000000	5.000000	MEG 2513
+2511	41.812634	-55.800888	6.000000	5.000000	MEG 2511
+2522	51.171906	-43.981274	6.000000	5.000000	MEG 2522
+2523	51.171906	-38.981274	6.000000	5.000000	MEG 2523
+2521	57.171906	-41.481274	6.000000	5.000000	MEG 2521
+2533	50.704624	-59.132656	6.000000	5.000000	MEG 2533
+2532	50.704624	-54.132656	6.000000	5.000000	MEG 2532
+2531	56.704624	-56.632656	6.000000	5.000000	MEG 2531
+2543	31.320171	-72.484848	6.000000	5.000000	MEG 2543
+2542	31.320171	-67.484848	6.000000	5.000000	MEG 2542
+2541	37.320171	-69.984848	6.000000	5.000000	MEG 2541
+2612	65.137360	-4.702045	6.000000	5.000000	MEG 2612
+2613	65.137360	0.297955	6.000000	5.000000	MEG 2613
+2611	71.137360	-2.202045	6.000000	5.000000	MEG 2611
+2623	73.822243	-17.329140	6.000000	5.000000	MEG 2623
+2622	73.822243	-12.329140	6.000000	5.000000	MEG 2622
+2621	79.822243	-14.829140	6.000000	5.000000	MEG 2621
+2633	65.490112	-40.332645	6.000000	5.000000	MEG 2633
+2632	65.490112	-35.332645	6.000000	5.000000	MEG 2632
+2631	71.490112	-37.832645	6.000000	5.000000	MEG 2631
+2642	61.220192	-25.385981	6.000000	5.000000	MEG 2642
+2643	61.220192	-20.385981	6.000000	5.000000	MEG 2643
+2641	67.220192	-22.885981	6.000000	5.000000	MEG 2641
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/layouts/Vectorview-grad.lout b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/layouts/Vectorview-grad.lout
new file mode 100644
index 0000000..1f133a1
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/layouts/Vectorview-grad.lout
@@ -0,0 +1,205 @@
+-55.000000 55.000000 -65.000000 60.000000
+113	-48.186871	26.886379	6.000000	5.000000	MEG 0113
+112	-48.186871	31.886379	6.000000	5.000000	MEG 0112
+122	-39.322296	31.036510	6.000000	5.000000	MEG 0122
+123	-39.322296	36.036510	6.000000	5.000000	MEG 0123
+132	-44.722965	14.826612	6.000000	5.000000	MEG 0132
+133	-44.722965	19.826612	6.000000	5.000000	MEG 0133
+143	-52.785782	6.169280	6.000000	5.000000	MEG 0143
+142	-52.785782	11.169280	6.000000	5.000000	MEG 0142
+213	-37.392612	13.470296	6.000000	5.000000	MEG 0213
+212	-37.392612	18.470296	6.000000	5.000000	MEG 0212
+222	-29.695013	13.899532	6.000000	5.000000	MEG 0222
+223	-29.695013	18.899532	6.000000	5.000000	MEG 0223
+232	-31.502516	-0.631914	6.000000	5.000000	MEG 0232
+233	-31.502516	4.368086	6.000000	5.000000	MEG 0233
+243	-39.115921	-2.709978	6.000000	5.000000	MEG 0243
+242	-39.115921	2.290022	6.000000	5.000000	MEG 0242
+313	-26.608879	38.351933	6.000000	5.000000	MEG 0313
+312	-26.608879	43.351933	6.000000	5.000000	MEG 0312
+322	-25.469093	26.356115	6.000000	5.000000	MEG 0322
+323	-25.469093	31.356115	6.000000	5.000000	MEG 0323
+333	-18.837411	23.164780	6.000000	5.000000	MEG 0333
+332	-18.837411	28.164780	6.000000	5.000000	MEG 0332
+343	-32.957949	27.427811	6.000000	5.000000	MEG 0343
+342	-32.957949	32.427811	6.000000	5.000000	MEG 0342
+413	-22.250046	12.315103	6.000000	5.000000	MEG 0413
+412	-22.250046	17.315103	6.000000	5.000000	MEG 0412
+422	-14.605187	10.962016	6.000000	5.000000	MEG 0422
+423	-14.605187	15.962016	6.000000	5.000000	MEG 0423
+432	-15.148193	-0.524500	6.000000	5.000000	MEG 0432
+433	-15.148193	4.475500	6.000000	5.000000	MEG 0433
+443	-23.422245	-0.157884	6.000000	5.000000	MEG 0443
+442	-23.422245	4.842116	6.000000	5.000000	MEG 0442
+513	-18.953902	44.905155	6.000000	5.000000	MEG 0513
+512	-18.953902	49.905155	6.000000	5.000000	MEG 0512
+523	-11.025696	48.325344	6.000000	5.000000	MEG 0523
+522	-11.025696	53.325344	6.000000	5.000000	MEG 0522
+532	-10.454178	39.888676	6.000000	5.000000	MEG 0532
+533	-10.454178	44.888676	6.000000	5.000000	MEG 0533
+542	-18.555386	35.433716	6.000000	5.000000	MEG 0542
+543	-18.555386	40.433716	6.000000	5.000000	MEG 0543
+613	-10.560901	30.757313	6.000000	5.000000	MEG 0613
+612	-10.560901	35.757313	6.000000	5.000000	MEG 0612
+622	-2.979000	21.849854	6.000000	5.000000	MEG 0622
+623	-2.979000	26.849854	6.000000	5.000000	MEG 0623
+633	-6.911079	11.573471	6.000000	5.000000	MEG 0633
+632	-6.911079	16.573471	6.000000	5.000000	MEG 0632
+642	-10.828249	21.334785	6.000000	5.000000	MEG 0642
+643	-10.828249	26.334785	6.000000	5.000000	MEG 0643
+713	-7.008664	0.931329	6.000000	5.000000	MEG 0713
+712	-7.008664	5.931329	6.000000	5.000000	MEG 0712
+723	1.052102	0.833180	6.000000	5.000000	MEG 0723
+722	1.052102	5.833180	6.000000	5.000000	MEG 0722
+733	1.098721	-8.987786	6.000000	5.000000	MEG 0733
+732	1.098721	-3.987786	6.000000	5.000000	MEG 0732
+743	-7.121645	-8.933109	6.000000	5.000000	MEG 0743
+742	-7.121645	-3.933109	6.000000	5.000000	MEG 0742
+813	-2.975877	49.460842	6.000000	5.000000	MEG 0813
+812	-2.975877	54.460842	6.000000	5.000000	MEG 0812
+822	-2.977890	40.979687	6.000000	5.000000	MEG 0822
+823	-2.977890	45.979687	6.000000	5.000000	MEG 0823
+913	5.024490	48.354298	6.000000	5.000000	MEG 0913
+912	5.024490	53.354298	6.000000	5.000000	MEG 0912
+923	13.021803	44.879852	6.000000	5.000000	MEG 0923
+922	13.021803	49.879852	6.000000	5.000000	MEG 0922
+932	12.564190	35.455193	6.000000	5.000000	MEG 0932
+933	12.564190	40.455193	6.000000	5.000000	MEG 0933
+942	4.483593	39.929310	6.000000	5.000000	MEG 0942
+943	4.483593	44.929310	6.000000	5.000000	MEG 0943
+1013	-2.978879	32.002693	6.000000	5.000000	MEG 1013
+1012	-2.978879	37.002693	6.000000	5.000000	MEG 1012
+1023	4.540760	30.762428	6.000000	5.000000	MEG 1023
+1022	4.540760	35.762428	6.000000	5.000000	MEG 1022
+1032	4.780051	21.348934	6.000000	5.000000	MEG 1032
+1033	4.780051	26.348934	6.000000	5.000000	MEG 1033
+1043	0.978956	11.650963	6.000000	5.000000	MEG 1043
+1042	0.978956	16.650963	6.000000	5.000000	MEG 1042
+1112	8.560405	10.928195	6.000000	5.000000	MEG 1112
+1113	8.560405	15.928195	6.000000	5.000000	MEG 1113
+1123	16.224724	12.278107	6.000000	5.000000	MEG 1123
+1122	16.224724	17.278107	6.000000	5.000000	MEG 1122
+1133	17.379185	-0.268703	6.000000	5.000000	MEG 1133
+1132	17.379185	4.731297	6.000000	5.000000	MEG 1132
+1142	9.117422	-0.423700	6.000000	5.000000	MEG 1142
+1143	9.117422	4.576300	6.000000	5.000000	MEG 1143
+1213	20.716938	38.318100	6.000000	5.000000	MEG 1213
+1212	20.716938	43.318100	6.000000	5.000000	MEG 1212
+1223	27.111319	27.293877	6.000000	5.000000	MEG 1223
+1222	27.111319	32.293877	6.000000	5.000000	MEG 1222
+1232	19.469093	26.356115	6.000000	5.000000	MEG 1232
+1233	19.469093	31.356115	6.000000	5.000000	MEG 1233
+1243	12.786146	23.189396	6.000000	5.000000	MEG 1243
+1242	12.786146	28.189396	6.000000	5.000000	MEG 1242
+1312	23.695013	13.899529	6.000000	5.000000	MEG 1312
+1313	23.695013	18.899529	6.000000	5.000000	MEG 1313
+1323	31.369019	13.362624	6.000000	5.000000	MEG 1323
+1322	31.369019	18.362624	6.000000	5.000000	MEG 1322
+1333	33.205658	-2.836478	6.000000	5.000000	MEG 1333
+1332	33.205658	2.163522	6.000000	5.000000	MEG 1332
+1342	25.473745	-0.631941	6.000000	5.000000	MEG 1342
+1343	25.473745	4.368059	6.000000	5.000000	MEG 1343
+1412	33.387833	31.097027	6.000000	5.000000	MEG 1412
+1413	33.387833	36.097027	6.000000	5.000000	MEG 1413
+1423	41.996334	27.235786	6.000000	5.000000	MEG 1423
+1422	41.996334	32.235786	6.000000	5.000000	MEG 1422
+1433	46.693424	6.365705	6.000000	5.000000	MEG 1433
+1432	46.693424	11.365705	6.000000	5.000000	MEG 1432
+1442	38.636284	14.732794	6.000000	5.000000	MEG 1442
+1443	38.636284	19.732794	6.000000	5.000000	MEG 1443
+1512	-46.828197	-4.270524	6.000000	5.000000	MEG 1512
+1513	-46.828197	0.729476	6.000000	5.000000	MEG 1513
+1522	-44.250233	-20.875282	6.000000	5.000000	MEG 1522
+1523	-44.250233	-15.875282	6.000000	5.000000	MEG 1523
+1533	-47.087372	-32.702410	6.000000	5.000000	MEG 1533
+1532	-47.087372	-27.702410	6.000000	5.000000	MEG 1532
+1543	-52.352669	-14.466389	6.000000	5.000000	MEG 1543
+1542	-52.352669	-9.466389	6.000000	5.000000	MEG 1542
+1613	-37.598797	-16.787832	6.000000	5.000000	MEG 1613
+1612	-37.598797	-11.787832	6.000000	5.000000	MEG 1612
+1622	-30.357292	-13.585911	6.000000	5.000000	MEG 1622
+1623	-30.357292	-8.585911	6.000000	5.000000	MEG 1623
+1632	-25.393221	-25.022747	6.000000	5.000000	MEG 1632
+1633	-25.393221	-20.022747	6.000000	5.000000	MEG 1633
+1643	-32.428291	-29.512911	6.000000	5.000000	MEG 1643
+1642	-32.428291	-24.512911	6.000000	5.000000	MEG 1642
+1713	-37.521523	-47.886852	6.000000	5.000000	MEG 1713
+1712	-37.521523	-42.886852	6.000000	5.000000	MEG 1712
+1722	-37.773560	-35.834789	6.000000	5.000000	MEG 1722
+1723	-37.773560	-30.834789	6.000000	5.000000	MEG 1723
+1732	-27.964468	-47.242935	6.000000	5.000000	MEG 1732
+1733	-27.964468	-42.242935	6.000000	5.000000	MEG 1733
+1743	-25.080088	-58.608849	6.000000	5.000000	MEG 1743
+1742	-25.080088	-53.608849	6.000000	5.000000	MEG 1742
+1813	-22.765453	-11.539077	6.000000	5.000000	MEG 1813
+1812	-22.765453	-6.539077	6.000000	5.000000	MEG 1812
+1822	-14.990439	-10.617317	6.000000	5.000000	MEG 1822
+1823	-14.990439	-5.617317	6.000000	5.000000	MEG 1823
+1832	-7.235366	-18.294876	6.000000	5.000000	MEG 1832
+1833	-7.235366	-13.294876	6.000000	5.000000	MEG 1833
+1843	-16.786220	-22.032574	6.000000	5.000000	MEG 1843
+1842	-16.786220	-17.032574	6.000000	5.000000	MEG 1842
+1912	-17.673892	-33.262066	6.000000	5.000000	MEG 1912
+1913	-17.673892	-28.262066	6.000000	5.000000	MEG 1913
+1923	-14.238597	-45.699379	6.000000	5.000000	MEG 1923
+1922	-14.238597	-40.699379	6.000000	5.000000	MEG 1922
+1932	-16.109179	-54.539486	6.000000	5.000000	MEG 1932
+1933	-16.109179	-49.539486	6.000000	5.000000	MEG 1933
+1943	-24.328934	-39.353901	6.000000	5.000000	MEG 1943
+1942	-24.328934	-34.353901	6.000000	5.000000	MEG 1942
+2013	-7.775570	-28.014633	6.000000	5.000000	MEG 2013
+2012	-7.775570	-23.014633	6.000000	5.000000	MEG 2012
+2023	1.721470	-27.999788	6.000000	5.000000	MEG 2023
+2022	1.721470	-22.999788	6.000000	5.000000	MEG 2022
+2032	1.906072	-37.626270	6.000000	5.000000	MEG 2032
+2033	1.906072	-32.626270	6.000000	5.000000	MEG 2033
+2042	-7.905933	-37.589039	6.000000	5.000000	MEG 2042
+2043	-7.905933	-32.589039	6.000000	5.000000	MEG 2043
+2113	-3.033595	-47.732231	6.000000	5.000000	MEG 2113
+2112	-3.033595	-42.732231	6.000000	5.000000	MEG 2112
+2122	-2.999999	-56.934807	6.000000	5.000000	MEG 2122
+2123	-2.999999	-51.934807	6.000000	5.000000	MEG 2123
+2133	4.648282	-64.218044	6.000000	5.000000	MEG 2133
+2132	4.648282	-59.218044	6.000000	5.000000	MEG 2132
+2143	-10.692250	-64.207703	6.000000	5.000000	MEG 2143
+2142	-10.692250	-59.207703	6.000000	5.000000	MEG 2142
+2212	8.962435	-10.585071	6.000000	5.000000	MEG 2212
+2213	8.962435	-5.585071	6.000000	5.000000	MEG 2213
+2223	16.756042	-11.459877	6.000000	5.000000	MEG 2223
+2222	16.756042	-6.459877	6.000000	5.000000	MEG 2222
+2233	10.797766	-22.111992	6.000000	5.000000	MEG 2233
+2232	10.797766	-17.111992	6.000000	5.000000	MEG 2232
+2242	1.262053	-18.307052	6.000000	5.000000	MEG 2242
+2243	1.262053	-13.307052	6.000000	5.000000	MEG 2243
+2312	11.664891	-33.279053	6.000000	5.000000	MEG 2312
+2313	11.664891	-28.279053	6.000000	5.000000	MEG 2313
+2323	18.227104	-39.378311	6.000000	5.000000	MEG 2323
+2322	18.227104	-34.378311	6.000000	5.000000	MEG 2322
+2332	10.067341	-54.531059	6.000000	5.000000	MEG 2332
+2333	10.067341	-49.531059	6.000000	5.000000	MEG 2333
+2343	8.120804	-45.719460	6.000000	5.000000	MEG 2343
+2342	8.120804	-40.719460	6.000000	5.000000	MEG 2342
+2412	24.365654	-13.469363	6.000000	5.000000	MEG 2412
+2413	24.365654	-8.469363	6.000000	5.000000	MEG 2413
+2423	31.531933	-16.859812	6.000000	5.000000	MEG 2423
+2422	31.531933	-11.859812	6.000000	5.000000	MEG 2422
+2433	26.322470	-29.595119	6.000000	5.000000	MEG 2433
+2432	26.322470	-24.595119	6.000000	5.000000	MEG 2432
+2442	19.393225	-25.022739	6.000000	5.000000	MEG 2442
+2443	19.393225	-20.022739	6.000000	5.000000	MEG 2443
+2512	21.906504	-47.260071	6.000000	5.000000	MEG 2512
+2513	21.906504	-42.260071	6.000000	5.000000	MEG 2513
+2522	31.762718	-35.773750	6.000000	5.000000	MEG 2522
+2523	31.762718	-30.773750	6.000000	5.000000	MEG 2523
+2533	31.462860	-47.927265	6.000000	5.000000	MEG 2533
+2532	31.462860	-42.927265	6.000000	5.000000	MEG 2532
+2543	19.023640	-58.637577	6.000000	5.000000	MEG 2543
+2542	19.023640	-53.637577	6.000000	5.000000	MEG 2542
+2612	40.724506	-4.266347	6.000000	5.000000	MEG 2612
+2613	40.724506	0.733653	6.000000	5.000000	MEG 2613
+2623	46.297695	-14.395032	6.000000	5.000000	MEG 2623
+2622	46.297695	-9.395032	6.000000	5.000000	MEG 2622
+2633	40.950874	-32.847042	6.000000	5.000000	MEG 2633
+2632	40.950874	-27.847042	6.000000	5.000000	MEG 2632
+2642	38.210819	-20.857738	6.000000	5.000000	MEG 2642
+2643	38.210819	-15.857738	6.000000	5.000000	MEG 2643
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/layouts/Vectorview-mag.lout b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/layouts/Vectorview-mag.lout
new file mode 100644
index 0000000..c5f4c60
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/layouts/Vectorview-mag.lout
@@ -0,0 +1,103 @@
+-50.000000 50.000000 -50.000000 38.000000
+111	-41.408840	17.090919	6.000000	5.000000	MEG 0111
+121	-33.873951	19.857674	6.000000	5.000000	MEG 0121
+131	-38.464523	9.051075	6.000000	5.000000	MEG 0131
+141	-45.317917	3.279520	6.000000	5.000000	MEG 0141
+211	-32.233719	8.146864	6.000000	5.000000	MEG 0211
+221	-25.690760	8.433022	6.000000	5.000000	MEG 0221
+231	-27.227139	-1.254610	6.000000	5.000000	MEG 0231
+241	-33.698534	-2.642785	6.000000	5.000000	MEG 0241
+311	-23.067547	24.734621	6.000000	5.000000	MEG 0311
+321	-22.098728	16.737410	6.000000	5.000000	MEG 0321
+331	-16.461800	14.609854	6.000000	5.000000	MEG 0331
+341	-28.464256	17.451874	6.000000	5.000000	MEG 0341
+411	-19.362539	7.376735	6.000000	5.000000	MEG 0411
+421	-12.864409	6.474677	6.000000	5.000000	MEG 0421
+431	-13.325964	-1.183000	6.000000	5.000000	MEG 0431
+441	-20.358908	-0.938589	6.000000	5.000000	MEG 0441
+511	-16.560817	29.103437	6.000000	5.000000	MEG 0511
+521	-9.821842	31.383564	6.000000	5.000000	MEG 0521
+531	-9.336051	25.759117	6.000000	5.000000	MEG 0531
+541	-16.222077	22.789145	6.000000	5.000000	MEG 0541
+611	-9.426766	19.671541	6.000000	5.000000	MEG 0611
+621	-2.982150	13.733236	6.000000	5.000000	MEG 0621
+631	-6.324418	6.882314	6.000000	5.000000	MEG 0631
+641	-9.654012	13.389857	6.000000	5.000000	MEG 0641
+711	-6.407364	-0.212448	6.000000	5.000000	MEG 0711
+721	0.444286	-0.277880	6.000000	5.000000	MEG 0721
+731	0.483912	-6.911695	6.000000	5.000000	MEG 0731
+741	-6.503398	-6.874514	6.000000	5.000000	MEG 0741
+811	-2.979496	32.140564	6.000000	5.000000	MEG 0811
+821	-2.981206	26.486458	6.000000	5.000000	MEG 0821
+911	3.820817	31.402866	6.000000	5.000000	MEG 0911
+921	10.618533	29.086569	6.000000	5.000000	MEG 0921
+931	10.229562	22.803463	6.000000	5.000000	MEG 0931
+941	3.361053	25.786205	6.000000	5.000000	MEG 0941
+1011	-2.982047	20.501795	6.000000	5.000000	MEG 1011
+1021	3.409646	19.674952	6.000000	5.000000	MEG 1021
+1031	3.613043	13.399289	6.000000	5.000000	MEG 1031
+1041	0.382112	6.933975	6.000000	5.000000	MEG 1041
+1111	6.826344	6.452130	6.000000	5.000000	MEG 1111
+1121	13.341015	7.352071	6.000000	5.000000	MEG 1121
+1131	14.322306	-1.012468	6.000000	5.000000	MEG 1131
+1141	7.299809	-1.115800	6.000000	5.000000	MEG 1141
+1211	17.159397	24.712067	6.000000	5.000000	MEG 1211
+1221	22.594622	17.362583	6.000000	5.000000	MEG 1221
+1231	16.098728	16.737411	6.000000	5.000000	MEG 1231
+1241	10.418224	14.626265	6.000000	5.000000	MEG 1241
+1311	19.690762	8.433019	6.000000	5.000000	MEG 1311
+1321	26.213667	8.075083	6.000000	5.000000	MEG 1321
+1331	27.774809	-2.728805	6.000000	5.000000	MEG 1331
+1341	21.202684	-1.254627	6.000000	5.000000	MEG 1341
+1411	27.929657	19.898018	6.000000	5.000000	MEG 1411
+1421	35.246883	17.323858	6.000000	5.000000	MEG 1421
+1431	39.239410	3.410470	6.000000	5.000000	MEG 1431
+1441	32.390839	8.988529	6.000000	5.000000	MEG 1441
+1511	-40.253967	-3.703956	6.000000	5.000000	MEG 1511
+1521	-38.062698	-14.995193	6.000000	5.000000	MEG 1521
+1531	-40.474266	-23.037640	6.000000	5.000000	MEG 1531
+1541	-44.949768	-10.637144	6.000000	5.000000	MEG 1541
+1611	-32.408976	-12.215726	6.000000	5.000000	MEG 1611
+1621	-26.253698	-10.038419	6.000000	5.000000	MEG 1621
+1631	-22.034237	-17.815468	6.000000	5.000000	MEG 1631
+1641	-28.014048	-20.868780	6.000000	5.000000	MEG 1641
+1711	-32.343294	-33.363060	6.000000	5.000000	MEG 1711
+1721	-32.557526	-25.167658	6.000000	5.000000	MEG 1721
+1731	-24.219797	-32.925196	6.000000	5.000000	MEG 1731
+1741	-21.768074	-40.654018	6.000000	5.000000	MEG 1741
+1811	-19.800634	-8.646573	6.000000	5.000000	MEG 1811
+1821	-13.191874	-8.019776	6.000000	5.000000	MEG 1821
+1831	-6.600061	-13.240516	6.000000	5.000000	MEG 1831
+1841	-14.718287	-15.782150	6.000000	5.000000	MEG 1841
+1911	-15.472808	-23.418205	6.000000	5.000000	MEG 1911
+1921	-12.552808	-31.875578	6.000000	5.000000	MEG 1921
+1931	-14.142802	-37.886852	6.000000	5.000000	MEG 1931
+1941	-21.129593	-27.560652	6.000000	5.000000	MEG 1941
+2011	-7.059234	-19.849951	6.000000	5.000000	MEG 2011
+2021	1.013249	-19.839857	6.000000	5.000000	MEG 2021
+2031	1.170161	-26.385864	6.000000	5.000000	MEG 2031
+2041	-7.170043	-26.360546	6.000000	5.000000	MEG 2041
+2111	-3.028555	-33.257917	6.000000	5.000000	MEG 2111
+2121	-3.000000	-39.515667	6.000000	5.000000	MEG 2121
+2131	3.501040	-44.468269	6.000000	5.000000	MEG 2131
+2141	-9.538412	-44.461239	6.000000	5.000000	MEG 2141
+2211	7.168070	-7.997848	6.000000	5.000000	MEG 2211
+2221	13.792637	-8.592716	6.000000	5.000000	MEG 2221
+2231	8.728101	-15.836154	6.000000	5.000000	MEG 2231
+2241	0.622745	-13.248796	6.000000	5.000000	MEG 2241
+2311	9.465158	-23.429756	6.000000	5.000000	MEG 2311
+2321	15.043037	-27.577251	6.000000	5.000000	MEG 2321
+2331	8.107240	-37.881119	6.000000	5.000000	MEG 2331
+2341	6.452683	-31.889233	6.000000	5.000000	MEG 2341
+2411	20.260805	-9.959167	6.000000	5.000000	MEG 2411
+2421	26.352144	-12.264672	6.000000	5.000000	MEG 2421
+2431	21.924099	-20.924681	6.000000	5.000000	MEG 2431
+2441	16.034241	-17.815463	6.000000	5.000000	MEG 2441
+2511	18.170528	-32.936850	6.000000	5.000000	MEG 2511
+2521	26.548311	-25.126150	6.000000	5.000000	MEG 2521
+2531	26.293430	-33.390539	6.000000	5.000000	MEG 2531
+2541	15.720093	-40.673553	6.000000	5.000000	MEG 2541
+2611	34.165833	-3.701116	6.000000	5.000000	MEG 2611
+2621	38.903042	-10.588621	6.000000	5.000000	MEG 2621
+2631	34.358242	-23.135988	6.000000	5.000000	MEG 2631
+2641	32.029198	-14.983262	6.000000	5.000000	MEG 2641
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/layouts/biosemi.lay b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/layouts/biosemi.lay
new file mode 100644
index 0000000..ca74816
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/layouts/biosemi.lay
@@ -0,0 +1,64 @@
+1	-0.496189	1.527114	0.290000	0.230000	Fp1
+2	-0.943808	1.299041	0.290000	0.230000	AF7
+3	-0.545830	1.170536	0.290000	0.230000	AF3
+4	-0.326906	0.809121	0.290000	0.230000	F1
+5	-0.659023	0.813825	0.290000	0.230000	F3
+6	-0.987913	0.858779	0.290000	0.230000	F5
+7	-1.299041	0.943808	0.290000	0.230000	F7
+8	-1.527114	0.496189	0.290000	0.230000	FT7
+9	-1.173172	0.450338	0.290000	0.230000	FC5
+10	-0.770517	0.409691	0.290000	0.230000	FC3
+11	-0.394923	0.394923	0.290000	0.230000	FC1
+12	-0.401426	-0.000000	0.290000	0.230000	C1
+13	-0.802851	-0.000000	0.290000	0.230000	C3
+14	-1.204277	-0.000000	0.290000	0.230000	C5
+15	-1.605703	-0.000000	0.290000	0.230000	T7
+16	-1.527114	-0.496189	0.290000	0.230000	TP7
+17	-1.173172	-0.450338	0.290000	0.230000	CP5
+18	-0.770517	-0.409691	0.290000	0.230000	CP3
+19	-0.394923	-0.394923	0.290000	0.230000	CP1
+20	-0.326906	-0.809121	0.290000	0.230000	P1
+21	-0.659023	-0.813825	0.290000	0.230000	P3
+22	-0.987913	-0.858779	0.290000	0.230000	P5
+23	-1.299041	-0.943808	0.290000	0.230000	P7
+24	-1.537550	-1.290157	0.290000	0.230000	P9
+25	-0.943808	-1.299041	0.290000	0.230000	PO7
+26	-0.545830	-1.170536	0.290000	0.230000	PO3
+27	-0.496189	-1.527114	0.290000	0.230000	O1
+28	0.000000	-2.007129	0.290000	0.230000	Iz
+29	0.000000	-1.605703	0.290000	0.230000	Oz
+30	0.000000	-1.204277	0.290000	0.230000	POz
+31	0.000000	-0.802851	0.290000	0.230000	Pz
+32	0.000000	-0.401426	0.290000	0.230000	CPz
+33	0.000000	1.605703	0.290000	0.230000	Fpz
+34	0.496189	1.527114	0.290000	0.230000	Fp2
+35	0.943808	1.299041	0.290000	0.230000	AF8
+36	0.545830	1.170536	0.290000	0.230000	AF4
+37	0.000000	1.204277	0.290000	0.230000	AFz
+38	0.000000	0.802851	0.290000	0.230000	Fz
+39	0.326906	0.809121	0.290000	0.230000	F2
+40	0.659023	0.813825	0.290000	0.230000	F4
+41	0.987913	0.858779	0.290000	0.230000	F6
+42	1.299041	0.943808	0.290000	0.230000	F8
+43	1.527114	0.496189	0.290000	0.230000	FT8
+44	1.173172	0.450338	0.290000	0.230000	FC6
+45	0.770517	0.409691	0.290000	0.230000	FC4
+46	0.394923	0.394923	0.290000	0.230000	FC2
+47	0.000000	0.401426	0.290000	0.230000	FCz
+48	0.000000	0.000000	0.290000	0.230000	Cz
+49	0.401426	0.000000	0.290000	0.230000	C2
+50	0.802851	0.000000	0.290000	0.230000	C4
+51	1.204277	0.000000	0.290000	0.230000	C6
+52	1.605703	0.000000	0.290000	0.230000	T8
+53	1.527114	-0.496189	0.290000	0.230000	TP8
+54	1.173172	-0.450338	0.290000	0.230000	CP6
+55	0.770517	-0.409691	0.290000	0.230000	CP4
+56	0.394923	-0.394923	0.290000	0.230000	CP2
+57	0.326906	-0.809121	0.290000	0.230000	P2
+58	0.659023	-0.813825	0.290000	0.230000	P4
+59	0.987913	-0.858779	0.290000	0.230000	P6
+60	1.299041	-0.943808	0.290000	0.230000	P8
+61	1.537550	-1.290157	0.290000	0.230000	P10
+62	0.943808	-1.299041	0.290000	0.230000	PO8
+63	0.545830	-1.170536	0.290000	0.230000	PO4
+64	0.496189	-1.527114	0.290000	0.230000	O2
\ No newline at end of file
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/layouts/magnesWH3600.lout b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/layouts/magnesWH3600.lout
new file mode 100644
index 0000000..577e953
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/layouts/magnesWH3600.lout
@@ -0,0 +1,249 @@
+  -42.19    43.52   -41.70    28.71
+001    -1.28    -5.13     4.00     3.00 MEG 001
+002    -1.22    -1.43     4.00     3.00 MEG 002
+003    -1.37     2.53     4.00     3.00 MEG 003
+004    -1.36     5.90     4.00     3.00 MEG 004
+005    -1.45     9.27     4.00     3.00 MEG 005
+006    -4.89     9.36     4.00     3.00 MEG 006
+007    -5.20     5.86     4.00     3.00 MEG 007
+008    -5.26     2.40     4.00     3.00 MEG 008
+009    -5.34    -1.29     4.00     3.00 MEG 009
+010    -5.12    -5.08     4.00     3.00 MEG 010
+011    -4.73    -8.47     4.00     3.00 MEG 011
+012    -1.31    -8.81     4.00     3.00 MEG 012
+013     2.04    -8.49     4.00     3.00 MEG 013
+014     2.54    -5.16     4.00     3.00 MEG 014
+015     2.69    -1.43     4.00     3.00 MEG 015
+016     2.62     2.56     4.00     3.00 MEG 016
+017     2.50     5.89     4.00     3.00 MEG 017
+018     2.10     9.34     4.00     3.00 MEG 018
+019    -1.45    12.55     4.00     3.00 MEG 019
+020    -5.76    12.42     4.00     3.00 MEG 020
+021    -8.30     9.98     4.00     3.00 MEG 021
+022    -9.16     5.97     4.00     3.00 MEG 022
+023    -9.32     2.49     4.00     3.00 MEG 023
+024    -9.42    -1.32     4.00     3.00 MEG 024
+025    -9.13    -5.11     4.00     3.00 MEG 025
+026    -8.43    -9.18     4.00     3.00 MEG 026
+027    -5.45   -12.10     4.00     3.00 MEG 027
+028    -1.40   -12.51     4.00     3.00 MEG 028
+029     2.64   -12.08     4.00     3.00 MEG 029
+030     5.77    -9.29     4.00     3.00 MEG 030
+031     6.50    -5.19     4.00     3.00 MEG 031
+032     6.85    -1.37     4.00     3.00 MEG 032
+033     6.70     2.65     4.00     3.00 MEG 033
+034     6.46     6.18     4.00     3.00 MEG 034
+035     5.61    10.08     4.00     3.00 MEG 035
+036     2.95    12.49     4.00     3.00 MEG 036
+037    -1.47    15.77     4.00     3.00 MEG 037
+038    -5.48    15.52     4.00     3.00 MEG 038
+039    -8.97    13.31     4.00     3.00 MEG 039
+040   -11.91    10.42     4.00     3.00 MEG 040
+041   -12.96     6.84     4.00     3.00 MEG 041
+042   -13.39     3.21     4.00     3.00 MEG 042
+043   -13.58    -0.70     4.00     3.00 MEG 043
+044   -13.08    -4.42     4.00     3.00 MEG 044
+045   -12.52    -8.05     4.00     3.00 MEG 045
+046   -11.13   -11.34     4.00     3.00 MEG 046
+047    -8.45   -14.21     4.00     3.00 MEG 047
+048    -5.08   -15.56     4.00     3.00 MEG 048
+049    -1.60   -16.17     4.00     3.00 MEG 049
+050     2.22   -15.61     4.00     3.00 MEG 050
+051     5.63   -14.28     4.00     3.00 MEG 051
+052     8.38   -11.70     4.00     3.00 MEG 052
+053     9.89    -8.24     4.00     3.00 MEG 053
+054    10.43    -4.42     4.00     3.00 MEG 054
+055    10.94    -0.62     4.00     3.00 MEG 055
+056    10.72     3.35     4.00     3.00 MEG 056
+057    10.22     7.01     4.00     3.00 MEG 057
+058     9.04    10.61     4.00     3.00 MEG 058
+059     6.20    13.42     4.00     3.00 MEG 059
+060     2.52    15.65     4.00     3.00 MEG 060
+061    -1.53    18.91     4.00     3.00 MEG 061
+062    -5.68    18.61     4.00     3.00 MEG 062
+063    -9.46    16.89     4.00     3.00 MEG 063
+064   -12.95    14.48     4.00     3.00 MEG 064
+065   -15.67    11.24     4.00     3.00 MEG 065
+066   -17.06     7.05     4.00     3.00 MEG 066
+067   -17.65     3.16     4.00     3.00 MEG 067
+068   -17.98    -1.20     4.00     3.00 MEG 068
+069   -17.13    -5.53     4.00     3.00 MEG 069
+070   -16.60    -9.33     4.00     3.00 MEG 070
+071   -14.32   -12.91     4.00     3.00 MEG 071
+072   -11.85   -15.75     4.00     3.00 MEG 072
+073    -8.78   -17.93     4.00     3.00 MEG 073
+074    -5.30   -19.40     4.00     3.00 MEG 074
+075    -1.58   -19.85     4.00     3.00 MEG 075
+076     2.41   -19.42     4.00     3.00 MEG 076
+077     5.94   -18.13     4.00     3.00 MEG 077
+078     9.16   -15.98     4.00     3.00 MEG 078
+079    11.79   -13.08     4.00     3.00 MEG 079
+080    13.62    -9.59     4.00     3.00 MEG 080
+081    14.57    -5.64     4.00     3.00 MEG 081
+082    15.42    -1.35     4.00     3.00 MEG 082
+083    15.05     3.30     4.00     3.00 MEG 083
+084    14.29     7.20     4.00     3.00 MEG 084
+085    12.81    11.43     4.00     3.00 MEG 085
+086     9.96    14.67     4.00     3.00 MEG 086
+087     6.46    17.06     4.00     3.00 MEG 087
+088     2.60    18.73     4.00     3.00 MEG 088
+089    -1.60    22.21     4.00     3.00 MEG 089
+090    -5.83    21.82     4.00     3.00 MEG 090
+091    -9.75    20.43     4.00     3.00 MEG 091
+092   -13.45    18.45     4.00     3.00 MEG 092
+093   -16.67    15.62     4.00     3.00 MEG 093
+094   -19.33    12.13     4.00     3.00 MEG 094
+095   -20.94     7.82     4.00     3.00 MEG 095
+096   -21.81     3.65     4.00     3.00 MEG 096
+097   -22.23    -1.27     4.00     3.00 MEG 097
+098   -21.14    -5.87     4.00     3.00 MEG 098
+099   -20.30    -9.97     4.00     3.00 MEG 099
+100   -18.46   -13.84     4.00     3.00 MEG 100
+101   -16.07   -17.08     4.00     3.00 MEG 101
+102   -12.88   -19.71     4.00     3.00 MEG 102
+103    -9.34   -21.89     4.00     3.00 MEG 103
+104    -5.64   -23.02     4.00     3.00 MEG 104
+105    -1.72   -23.54     4.00     3.00 MEG 105
+106     2.48   -23.24     4.00     3.00 MEG 106
+107     6.42   -22.00     4.00     3.00 MEG 107
+108     9.86   -20.19     4.00     3.00 MEG 108
+109    13.22   -17.32     4.00     3.00 MEG 109
+110    15.75   -14.15     4.00     3.00 MEG 110
+111    17.67   -10.19     4.00     3.00 MEG 111
+112    18.65    -6.08     4.00     3.00 MEG 112
+113    19.69    -1.27     4.00     3.00 MEG 113
+114    19.27     3.70     4.00     3.00 MEG 114
+115    18.30     8.05     4.00     3.00 MEG 115
+116    16.46    12.48     4.00     3.00 MEG 116
+117    13.74    15.93     4.00     3.00 MEG 117
+118    10.41    18.72     4.00     3.00 MEG 118
+119     6.64    20.69     4.00     3.00 MEG 119
+120     2.67    22.02     4.00     3.00 MEG 120
+121    -1.74    25.41     4.00     3.00 MEG 121
+122    -6.59    24.84     4.00     3.00 MEG 122
+123   -11.16    23.37     4.00     3.00 MEG 123
+124   -15.46    21.07     4.00     3.00 MEG 124
+125   -19.25    17.84     4.00     3.00 MEG 125
+126   -22.45    13.89     4.00     3.00 MEG 126
+127   -24.89     8.96     4.00     3.00 MEG 127
+128   -26.13     4.36     4.00     3.00 MEG 128
+129   -26.65    -1.22     4.00     3.00 MEG 129
+130   -25.30    -6.36     4.00     3.00 MEG 130
+131   -24.16   -11.45     4.00     3.00 MEG 131
+132   -21.98   -15.88     4.00     3.00 MEG 132
+133   -18.81   -19.82     4.00     3.00 MEG 133
+134   -15.20   -22.99     4.00     3.00 MEG 134
+135   -11.11   -25.29     4.00     3.00 MEG 135
+136    -6.51   -26.74     4.00     3.00 MEG 136
+137    -1.86   -27.28     4.00     3.00 MEG 137
+138     3.17   -26.90     4.00     3.00 MEG 138
+139     7.79   -25.55     4.00     3.00 MEG 139
+140    12.07   -23.15     4.00     3.00 MEG 140
+141    15.93   -20.09     4.00     3.00 MEG 141
+142    19.04   -16.25     4.00     3.00 MEG 142
+143    21.39   -11.67     4.00     3.00 MEG 143
+144    22.75    -6.58     4.00     3.00 MEG 144
+145    23.99    -1.23     4.00     3.00 MEG 145
+146    23.36     4.49     4.00     3.00 MEG 146
+147    22.02     9.37     4.00     3.00 MEG 147
+148    19.51    14.31     4.00     3.00 MEG 148
+149    16.20    18.23     4.00     3.00 MEG 149
+150    12.16    21.54     4.00     3.00 MEG 150
+151     7.85    23.69     4.00     3.00 MEG 151
+152     3.16    25.01     4.00     3.00 MEG 152
+153   -23.01    18.82     4.00     3.00 MEG 153
+154   -26.06    15.31     4.00     3.00 MEG 154
+155   -28.76    10.18     4.00     3.00 MEG 155
+156   -31.71     3.39     4.00     3.00 MEG 156
+157   -32.05    -2.89     4.00     3.00 MEG 157
+158   -31.42    -8.67     4.00     3.00 MEG 158
+159   -26.22   -15.24     4.00     3.00 MEG 159
+160   -23.31   -19.72     4.00     3.00 MEG 160
+161   -19.33   -23.66     4.00     3.00 MEG 161
+162   -14.75   -26.73     4.00     3.00 MEG 162
+163    -9.92   -28.91     4.00     3.00 MEG 163
+164    -4.52   -30.10     4.00     3.00 MEG 164
+165     1.25   -30.15     4.00     3.00 MEG 165
+166     6.17   -29.40     4.00     3.00 MEG 166
+167    11.43   -27.39     4.00     3.00 MEG 167
+168    16.20   -24.37     4.00     3.00 MEG 168
+169    20.37   -20.27     4.00     3.00 MEG 169
+170    23.54   -15.56     4.00     3.00 MEG 170
+171    28.66    -8.94     4.00     3.00 MEG 171
+172    29.46    -3.00     4.00     3.00 MEG 172
+173    29.04     3.51     4.00     3.00 MEG 173
+174    25.94    10.77     4.00     3.00 MEG 174
+175    23.08    15.80     4.00     3.00 MEG 175
+176    19.78    19.54     4.00     3.00 MEG 176
+177   -26.70    20.52     4.00     3.00 MEG 177
+178   -29.66    16.81     4.00     3.00 MEG 178
+179   -32.55    11.68     4.00     3.00 MEG 179
+180   -32.47   -13.23     4.00     3.00 MEG 180
+181   -27.63   -19.12     4.00     3.00 MEG 181
+182   -23.75   -23.89     4.00     3.00 MEG 182
+183   -18.94   -27.77     4.00     3.00 MEG 183
+184   -13.64   -30.59     4.00     3.00 MEG 184
+185    -7.93   -32.70     4.00     3.00 MEG 185
+186    -2.12   -33.31     4.00     3.00 MEG 186
+187     4.06   -32.74     4.00     3.00 MEG 187
+188    10.04   -31.14     4.00     3.00 MEG 188
+189    15.57   -28.41     4.00     3.00 MEG 189
+190    20.44   -24.69     4.00     3.00 MEG 190
+191    24.62   -19.81     4.00     3.00 MEG 191
+192    29.49   -13.87     4.00     3.00 MEG 192
+193    29.48    12.54     4.00     3.00 MEG 193
+194    26.49    17.54     4.00     3.00 MEG 194
+195    23.28    21.40     4.00     3.00 MEG 195
+196   -36.84     4.15     4.00     3.00 MEG 196
+197   -37.22    -3.16     4.00     3.00 MEG 197
+198   -36.14    -9.68     4.00     3.00 MEG 198
+199   -28.42   -23.63     4.00     3.00 MEG 199
+200   -23.68   -28.05     4.00     3.00 MEG 200
+201   -18.03   -31.89     4.00     3.00 MEG 201
+202   -11.97   -34.42     4.00     3.00 MEG 202
+203    -5.32   -35.88     4.00     3.00 MEG 203
+204     1.03   -36.08     4.00     3.00 MEG 204
+205     7.92   -35.00     4.00     3.00 MEG 205
+206    13.99   -32.64     4.00     3.00 MEG 206
+207    19.78   -29.06     4.00     3.00 MEG 207
+208    24.79   -24.52     4.00     3.00 MEG 208
+209    33.39   -10.13     4.00     3.00 MEG 209
+210    34.62    -3.11     4.00     3.00 MEG 210
+211    34.23     4.57     4.00     3.00 MEG 211
+212   -32.38    19.14     4.00     3.00 MEG 212
+213   -35.90    13.21     4.00     3.00 MEG 213
+214   -36.70   -14.70     4.00     3.00 MEG 214
+215   -32.93   -22.44     4.00     3.00 MEG 215
+216   -28.17   -28.07     4.00     3.00 MEG 216
+217   -22.65   -32.41     4.00     3.00 MEG 217
+218   -16.53   -35.71     4.00     3.00 MEG 218
+219    -9.52   -37.92     4.00     3.00 MEG 219
+220    -2.58   -38.82     4.00     3.00 MEG 220
+221     4.65   -38.54     4.00     3.00 MEG 221
+222    11.78   -36.65     4.00     3.00 MEG 222
+223    18.43   -33.60     4.00     3.00 MEG 223
+224    24.26   -29.21     4.00     3.00 MEG 224
+225    29.52   -23.44     4.00     3.00 MEG 225
+226    33.73   -15.36     4.00     3.00 MEG 226
+227    33.02    14.20     4.00     3.00 MEG 227
+228    29.24    19.93     4.00     3.00 MEG 228
+229   -36.80    18.24     4.00     3.00 MEG 229
+230   -40.03    12.76     4.00     3.00 MEG 230
+231   -41.35     5.03     4.00     3.00 MEG 231
+232   -41.79    -3.17     4.00     3.00 MEG 232
+233   -40.48   -10.59     4.00     3.00 MEG 233
+234   -32.92   -26.79     4.00     3.00 MEG 234
+235   -27.40   -32.12     4.00     3.00 MEG 235
+236   -20.92   -36.72     4.00     3.00 MEG 236
+237   -14.11   -39.49     4.00     3.00 MEG 237
+238    -6.76   -41.18     4.00     3.00 MEG 238
+239     1.45   -41.40     4.00     3.00 MEG 239
+240     8.96   -40.25     4.00     3.00 MEG 240
+241    16.27   -37.84     4.00     3.00 MEG 241
+242    22.75   -33.68     4.00     3.00 MEG 242
+243    29.08   -28.20     4.00     3.00 MEG 243
+244    37.59   -11.05     4.00     3.00 MEG 244
+245    39.12    -3.16     4.00     3.00 MEG 245
+246    38.59     5.47     4.00     3.00 MEG 246
+247    37.16    13.60     4.00     3.00 MEG 247
+248    33.62    18.93     4.00     3.00 MEG 248
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/montages/GSN-HydroCel-128.sfp b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/montages/GSN-HydroCel-128.sfp
new file mode 100644
index 0000000..56c94f8
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/montages/GSN-HydroCel-128.sfp
@@ -0,0 +1,131 @@
+FidNz	0	9.071585155	-2.359754454
+FidT9	-6.711765	0.040402876	-3.251600355
+FidT10	6.711765	0.040402876	-3.251600355
+E1	5.787677636	5.520863216	-2.577468644
+E2	5.291804727	6.709097557	0.307434896
+E3	3.864122447	7.63424051	3.067770143
+E4	2.868837559	7.145708546	4.989564557
+E5	1.479340453	5.68662139	6.812878187
+E6	0	3.806770224	7.891304964
+E7	-1.223800252	1.558864431	8.44043914
+E8	4.221901505	7.998817387	-1.354789681
+E9	2.695405558	8.884820317	1.088308144
+E10	1.830882336	8.708839134	3.18709115
+E11	0	7.96264703	5.044718001
+E12	-1.479340453	5.68662139	6.812878187
+E13	-2.435870762	3.254307219	7.608766206
+E14	1.270447661	9.479016328	-0.947183306
+E15	0	9.087440894	1.333345013
+E16	0	9.076490798	3.105438474
+E17	0	9.271139705	-2.211516434
+E18	-1.830882336	8.708839134	3.18709115
+E19	-2.868837559	7.145708546	4.989564557
+E20	-3.825797111	5.121648995	5.942844877
+E21	-1.270447661	9.479016328	-0.947183306
+E22	-2.695405558	8.884820317	1.088308144
+E23	-3.864122447	7.63424051	3.067770143
+E24	-4.459387187	6.021159964	4.365321482
+E25	-4.221901505	7.998817387	-1.354789681
+E26	-5.291804727	6.709097557	0.307434896
+E27	-5.682547954	5.453384344	2.836565436
+E28	-5.546670402	4.157847823	4.627615703
+E29	-4.762196763	2.697832099	6.297663028
+E30	-3.695490968	0.960411022	7.627828134
+E31	-1.955187826	-0.684381878	8.564858511
+E32	-5.787677636	5.520863216	-2.577468644
+E33	-6.399087198	4.127248875	-0.356852241
+E34	-6.823959684	2.968422112	2.430080351
+E35	-6.414469893	1.490027747	4.741794544
+E36	-5.47913021	0.284948655	6.38332782
+E37	-3.909902609	-1.519049882	7.764134929
+E38	-6.550732888	3.611543152	-3.353155926
+E39	-7.191620108	0.850096251	-0.882936903
+E40	-7.391919265	0.032151584	2.143634599
+E41	-6.905051715	-0.800953972	4.600056501
+E42	-5.956055073	-2.338984312	6.00361353
+E43	-6.518995129	2.417299399	-5.253637073
+E44	-6.840717711	1.278489412	-3.5553823
+E45	-7.304625099	-1.866238006	-0.629182006
+E46	-7.312517928	-2.298574078	2.385298838
+E47	-6.737313764	-3.011819533	4.178390203
+E48	-5.934584124	2.22697797	-7.934360742
+E49	-6.298127313	0.41663451	-6.069156425
+E50	-6.78248072	-4.023512045	-0.232191092
+E51	-6.558030032	-4.667036048	2.749989597
+E52	-5.831241498	-4.494821698	4.955347697
+E53	-4.193518856	-4.037020083	6.982920038
+E54	-2.270752074	-3.414835627	8.204556551
+E55	0	-2.138343513	8.791875902
+E56	-6.174969392	-2.458138877	-5.637380998
+E57	-6.580438308	-3.739554155	-2.991084431
+E58	-6.034746843	-5.755782196	0.051843011
+E59	-5.204501802	-6.437833018	2.984444293
+E60	-4.116929504	-6.061561438	5.365757296
+E61	-2.344914884	-5.481057427	7.057748614
+E62	0	-6.676694032	6.465208258
+E63	-5.333266171	-4.302240169	-5.613509789
+E64	-5.404091392	-5.870302681	-2.891640039
+E65	-4.645302298	-7.280552408	0.130139701
+E66	-3.608293164	-7.665487704	3.129931648
+E67	-1.844644417	-7.354417376	5.224001733
+E68	-3.784983913	-6.401014415	-5.260040689
+E69	-3.528848027	-7.603010836	-2.818037873
+E70	-2.738838019	-8.607966849	0.239368223
+E71	-1.404967401	-8.437486994	3.277284901
+E72	0	-7.829896826	4.687622229
+E73	-1.929652202	-7.497197868	-5.136777648
+E74	-1.125731192	-8.455208629	-2.632832329
+E75	0	-8.996686498	0.487952047
+E76	1.404967401	-8.437486994	3.277284901
+E77	1.844644417	-7.354417376	5.224001733
+E78	2.344914884	-5.481057427	7.057748614
+E79	2.270752074	-3.414835627	8.204556551
+E80	1.955187826	-0.684381878	8.564858511
+E81	0	-7.85891896	-4.945387489
+E82	1.125731192	-8.455208629	-2.632832329
+E83	2.738838019	-8.607966849	0.239368223
+E84	3.608293164	-7.665487704	3.129931648
+E85	4.116929504	-6.061561438	5.365757296
+E86	4.193518856	-4.037020083	6.982920038
+E87	3.909902609	-1.519049882	7.764134929
+E88	1.929652202	-7.497197868	-5.136777648
+E89	3.528848027	-7.603010836	-2.818037873
+E90	4.645302298	-7.280552408	0.130139701
+E91	5.204501802	-6.437833018	2.984444293
+E92	5.831241498	-4.494821698	4.955347697
+E93	5.956055073	-2.338984312	6.00361353
+E94	3.784983913	-6.401014415	-5.260040689
+E95	5.404091392	-5.870302681	-2.891640039
+E96	6.034746843	-5.755782196	0.051843011
+E97	6.558030032	-4.667036048	2.749989597
+E98	6.737313764	-3.011819533	4.178390203
+E99	5.333266171	-4.302240169	-5.613509789
+E100	6.580438308	-3.739554155	-2.991084431
+E101	6.78248072	-4.023512045	-0.232191092
+E102	7.312517928	-2.298574078	2.385298838
+E103	6.905051715	-0.800953972	4.600056501
+E104	5.47913021	0.284948655	6.38332782
+E105	3.695490968	0.960411022	7.627828134
+E106	1.223800252	1.558864431	8.44043914
+E107	6.174969392	-2.458138877	-5.637380998
+E108	7.304625099	-1.866238006	-0.629182006
+E109	7.391919265	0.032151584	2.143634599
+E110	6.414469893	1.490027747	4.741794544
+E111	4.762196763	2.697832099	6.297663028
+E112	2.435870762	3.254307219	7.608766206
+E113	6.298127313	0.41663451	-6.069156425
+E114	6.840717711	1.278489412	-3.5553823
+E115	7.191620108	0.850096251	-0.882936903
+E116	6.823959684	2.968422112	2.430080351
+E117	5.546670402	4.157847823	4.627615703
+E118	3.825797111	5.121648995	5.942844877
+E119	5.934584124	2.22697797	-7.934360742
+E120	6.518995129	2.417299399	-5.253637073
+E121	6.550732888	3.611543152	-3.353155926
+E122	6.399087198	4.127248875	-0.356852241
+E123	5.682547954	5.453384344	2.836565436
+E124	4.459387187	6.021159964	4.365321482
+E125	6.118458137	4.523870113	-4.409174427
+E126	3.743504949	6.649204911	-6.530243068
+E127	-3.743504949	6.649204911	-6.530243068
+E128	-6.118458137	4.523870113	-4.409174427
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/montages/GSN-HydroCel-129.sfp b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/montages/GSN-HydroCel-129.sfp
new file mode 100644
index 0000000..fb222db
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/montages/GSN-HydroCel-129.sfp
@@ -0,0 +1,132 @@
+FidNz	0	9.071585155	-2.359754454
+FidT9	-6.711765	0.040402876	-3.251600355
+FidT10	6.711765	0.040402876	-3.251600355
+E1	5.787677636	5.520863216	-2.577468644
+E2	5.291804727	6.709097557	0.307434896
+E3	3.864122447	7.63424051	3.067770143
+E4	2.868837559	7.145708546	4.989564557
+E5	1.479340453	5.68662139	6.812878187
+E6	0	3.806770224	7.891304964
+E7	-1.223800252	1.558864431	8.44043914
+E8	4.221901505	7.998817387	-1.354789681
+E9	2.695405558	8.884820317	1.088308144
+E10	1.830882336	8.708839134	3.18709115
+E11	0	7.96264703	5.044718001
+E12	-1.479340453	5.68662139	6.812878187
+E13	-2.435870762	3.254307219	7.608766206
+E14	1.270447661	9.479016328	-0.947183306
+E15	0	9.087440894	1.333345013
+E16	0	9.076490798	3.105438474
+E17	0	9.271139705	-2.211516434
+E18	-1.830882336	8.708839134	3.18709115
+E19	-2.868837559	7.145708546	4.989564557
+E20	-3.825797111	5.121648995	5.942844877
+E21	-1.270447661	9.479016328	-0.947183306
+E22	-2.695405558	8.884820317	1.088308144
+E23	-3.864122447	7.63424051	3.067770143
+E24	-4.459387187	6.021159964	4.365321482
+E25	-4.221901505	7.998817387	-1.354789681
+E26	-5.291804727	6.709097557	0.307434896
+E27	-5.682547954	5.453384344	2.836565436
+E28	-5.546670402	4.157847823	4.627615703
+E29	-4.762196763	2.697832099	6.297663028
+E30	-3.695490968	0.960411022	7.627828134
+E31	-1.955187826	-0.684381878	8.564858511
+E32	-5.787677636	5.520863216	-2.577468644
+E33	-6.399087198	4.127248875	-0.356852241
+E34	-6.823959684	2.968422112	2.430080351
+E35	-6.414469893	1.490027747	4.741794544
+E36	-5.47913021	0.284948655	6.38332782
+E37	-3.909902609	-1.519049882	7.764134929
+E38	-6.550732888	3.611543152	-3.353155926
+E39	-7.191620108	0.850096251	-0.882936903
+E40	-7.391919265	0.032151584	2.143634599
+E41	-6.905051715	-0.800953972	4.600056501
+E42	-5.956055073	-2.338984312	6.00361353
+E43	-6.518995129	2.417299399	-5.253637073
+E44	-6.840717711	1.278489412	-3.5553823
+E45	-7.304625099	-1.866238006	-0.629182006
+E46	-7.312517928	-2.298574078	2.385298838
+E47	-6.737313764	-3.011819533	4.178390203
+E48	-5.934584124	2.22697797	-7.934360742
+E49	-6.298127313	0.41663451	-6.069156425
+E50	-6.78248072	-4.023512045	-0.232191092
+E51	-6.558030032	-4.667036048	2.749989597
+E52	-5.831241498	-4.494821698	4.955347697
+E53	-4.193518856	-4.037020083	6.982920038
+E54	-2.270752074	-3.414835627	8.204556551
+E55	0	-2.138343513	8.791875902
+E56	-6.174969392	-2.458138877	-5.637380998
+E57	-6.580438308	-3.739554155	-2.991084431
+E58	-6.034746843	-5.755782196	0.051843011
+E59	-5.204501802	-6.437833018	2.984444293
+E60	-4.116929504	-6.061561438	5.365757296
+E61	-2.344914884	-5.481057427	7.057748614
+E62	0	-6.676694032	6.465208258
+E63	-5.333266171	-4.302240169	-5.613509789
+E64	-5.404091392	-5.870302681	-2.891640039
+E65	-4.645302298	-7.280552408	0.130139701
+E66	-3.608293164	-7.665487704	3.129931648
+E67	-1.844644417	-7.354417376	5.224001733
+E68	-3.784983913	-6.401014415	-5.260040689
+E69	-3.528848027	-7.603010836	-2.818037873
+E70	-2.738838019	-8.607966849	0.239368223
+E71	-1.404967401	-8.437486994	3.277284901
+E72	0	-7.829896826	4.687622229
+E73	-1.929652202	-7.497197868	-5.136777648
+E74	-1.125731192	-8.455208629	-2.632832329
+E75	0	-8.996686498	0.487952047
+E76	1.404967401	-8.437486994	3.277284901
+E77	1.844644417	-7.354417376	5.224001733
+E78	2.344914884	-5.481057427	7.057748614
+E79	2.270752074	-3.414835627	8.204556551
+E80	1.955187826	-0.684381878	8.564858511
+E81	0	-7.85891896	-4.945387489
+E82	1.125731192	-8.455208629	-2.632832329
+E83	2.738838019	-8.607966849	0.239368223
+E84	3.608293164	-7.665487704	3.129931648
+E85	4.116929504	-6.061561438	5.365757296
+E86	4.193518856	-4.037020083	6.982920038
+E87	3.909902609	-1.519049882	7.764134929
+E88	1.929652202	-7.497197868	-5.136777648
+E89	3.528848027	-7.603010836	-2.818037873
+E90	4.645302298	-7.280552408	0.130139701
+E91	5.204501802	-6.437833018	2.984444293
+E92	5.831241498	-4.494821698	4.955347697
+E93	5.956055073	-2.338984312	6.00361353
+E94	3.784983913	-6.401014415	-5.260040689
+E95	5.404091392	-5.870302681	-2.891640039
+E96	6.034746843	-5.755782196	0.051843011
+E97	6.558030032	-4.667036048	2.749989597
+E98	6.737313764	-3.011819533	4.178390203
+E99	5.333266171	-4.302240169	-5.613509789
+E100	6.580438308	-3.739554155	-2.991084431
+E101	6.78248072	-4.023512045	-0.232191092
+E102	7.312517928	-2.298574078	2.385298838
+E103	6.905051715	-0.800953972	4.600056501
+E104	5.47913021	0.284948655	6.38332782
+E105	3.695490968	0.960411022	7.627828134
+E106	1.223800252	1.558864431	8.44043914
+E107	6.174969392	-2.458138877	-5.637380998
+E108	7.304625099	-1.866238006	-0.629182006
+E109	7.391919265	0.032151584	2.143634599
+E110	6.414469893	1.490027747	4.741794544
+E111	4.762196763	2.697832099	6.297663028
+E112	2.435870762	3.254307219	7.608766206
+E113	6.298127313	0.41663451	-6.069156425
+E114	6.840717711	1.278489412	-3.5553823
+E115	7.191620108	0.850096251	-0.882936903
+E116	6.823959684	2.968422112	2.430080351
+E117	5.546670402	4.157847823	4.627615703
+E118	3.825797111	5.121648995	5.942844877
+E119	5.934584124	2.22697797	-7.934360742
+E120	6.518995129	2.417299399	-5.253637073
+E121	6.550732888	3.611543152	-3.353155926
+E122	6.399087198	4.127248875	-0.356852241
+E123	5.682547954	5.453384344	2.836565436
+E124	4.459387187	6.021159964	4.365321482
+E125	6.118458137	4.523870113	-4.409174427
+E126	3.743504949	6.649204911	-6.530243068
+E127	-3.743504949	6.649204911	-6.530243068
+E128	-6.118458137	4.523870113	-4.409174427
+Cz	0	0	8.899186843
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/montages/GSN-HydroCel-256.sfp b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/montages/GSN-HydroCel-256.sfp
new file mode 100644
index 0000000..2464e89
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/montages/GSN-HydroCel-256.sfp
@@ -0,0 +1,259 @@
+FidNz 0.00000 10.56381 -2.05108
+FidT9 -7.82694 0.45386 -3.76056
+FidT10 7.82694 0.45386 -3.76056
+E1 6.96223 5.38242 -2.19061
+E2 6.48414 6.40424 -0.14004
+E3 5.69945 7.20796 1.79088
+E4 4.81093 7.77321 3.65006
+E5 3.61962 7.47782 5.50947
+E6 2.25278 6.46157 6.96317
+E7 1.18879 5.21755 8.13378
+E8 0.00000 3.59608 8.75111
+E9 -1.15339 1.51369 9.19904
+E10 5.94022 7.38337 -1.51513
+E11 5.07624 8.37264 0.40595
+E12 3.87946 9.03611 2.51559
+E13 2.60756 8.97868 4.39107
+E14 1.23344 8.11574 6.06161
+E15 0.00000 6.81181 7.28186
+E16 -1.18879 5.21755 8.13378
+E17 -2.29559 2.91372 8.55810
+E18 4.06489 9.40559 -0.89098
+E19 2.86784 10.01456 0.85212
+E20 1.42153 10.06322 2.84803
+E21 0.00000 9.40339 4.65829
+E22 -1.23344 8.11574 6.06161
+E23 -2.25278 6.46157 6.96317
+E24 -3.34467 4.40891 7.67253
+E25 1.39547 10.65281 -0.61138
+E26 0.00000 10.68996 1.00542
+E27 -1.42153 10.06322 2.84803
+E28 -2.60756 8.97868 4.39107
+E29 -3.61962 7.47782 5.50947
+E30 -4.49828 5.59395 6.28801
+E31 0.00000 10.56381 -2.05108
+E32 -1.39547 10.65281 -0.61138
+E33 -2.86784 10.01456 0.85212
+E34 -3.87946 9.03611 2.51559
+E35 -4.81093 7.77321 3.65006
+E36 -5.10466 6.41586 4.77815
+E37 -4.06489 9.40559 -0.89098
+E38 -5.07624 8.37264 0.40595
+E39 -5.69945 7.20796 1.79088
+E40 -6.16984 6.11292 3.29612
+E41 -6.01447 4.93908 4.85771
+E42 -5.33943 3.80220 6.32664
+E43 -4.64127 2.57224 7.50868
+E44 -3.53746 1.07133 8.47419
+E45 -1.99458 -0.60998 9.28870
+E46 -5.94022 7.38337 -1.51513
+E47 -6.48414 6.40424 -0.14004
+E48 -6.97545 5.35131 1.30741
+E49 -7.10064 4.23342 2.91874
+E50 -6.86564 3.16240 4.76800
+E51 -6.11380 1.94213 6.23844
+E52 -5.31389 0.60081 7.48811
+E53 -3.72368 -1.14573 8.58697
+E54 -6.96223 5.38242 -2.19061
+E55 -7.31613 4.37155 -0.61128
+E56 -7.66385 3.29619 1.04415
+E57 -7.62423 2.30205 2.81799
+E58 -7.36570 1.34368 4.60382
+E59 -6.70292 0.06004 6.23992
+E60 -5.40372 -1.61247 7.47343
+E61 -7.54098 3.05323 -2.51935
+E62 -7.77059 2.06323 -0.80729
+E63 -7.96921 1.20744 0.97332
+E64 -8.06621 0.40109 2.78565
+E65 -7.60767 -0.56840 4.59939
+E66 -6.81554 -1.94522 5.93053
+E67 -7.69315 1.74041 -4.18153
+E68 -7.74468 1.05291 -2.47059
+E69 -7.93758 0.07220 -0.96992
+E70 -7.98893 -0.75212 0.84194
+E71 -8.05947 -1.50296 2.76753
+E72 -7.56445 -2.31141 4.30327
+E73 -7.52646 0.73096 -5.96025
+E74 -7.76752 -1.84131 -0.92719
+E75 -7.79279 -2.73175 1.10033
+E76 -7.46191 -3.49308 2.95937
+E77 -6.86934 -3.79448 4.89401
+E78 -5.65276 -3.84604 6.52108
+E79 -4.12465 -3.54800 7.95405
+E80 -2.23647 -2.95809 8.92461
+E81 0.00000 -1.93834 9.45867
+E82 -7.12806 -0.49186 -7.34929
+E83 -7.37920 -3.49709 -2.18347
+E84 -7.52183 -3.70044 -0.51432
+E85 -7.15214 -4.71132 1.51762
+E86 -6.48817 -5.15829 3.47294
+E87 -5.53051 -5.46184 5.50189
+E88 -4.03809 -5.23807 7.04455
+E89 -2.29514 -4.87829 8.27223
+E90 0.00000 -3.74195 9.02791
+E91 -6.82585 -1.86426 -8.69399
+E92 -6.74047 -2.84840 -6.74712
+E93 -6.78379 -4.01784 -5.01755
+E94 -7.03346 -4.45090 -3.54895
+E95 -6.99052 -5.01694 -1.88810
+E96 -6.67571 -5.73608 0.10234
+E97 -5.96851 -6.52864 2.03293
+E98 -5.10822 -6.74936 3.92134
+E99 -3.75216 -6.67734 5.63719
+E100 -2.14874 -6.29190 7.11453
+E101 0.00000 -7.15042 6.95434
+E102 -6.36989 -3.82470 -8.20622
+E103 -6.24349 -4.62250 -6.49623
+E104 -6.09726 -5.61090 -4.67894
+E105 -6.31441 -6.01299 -3.25921
+E106 -5.98418 -6.74733 -1.40314
+E107 -5.23709 -7.57398 0.46627
+E108 -4.29098 -8.11323 2.38442
+E109 -3.24277 -8.15293 4.22025
+E110 -1.73181 -7.63850 5.69360
+E111 -5.63580 -5.80367 -7.74857
+E112 -5.38718 -6.45180 -6.16689
+E113 -5.08285 -7.32643 -4.32109
+E114 -5.27282 -7.46584 -2.87485
+E115 -4.13620 -8.61230 -1.04503
+E116 -3.13323 -9.13629 0.81878
+E117 -1.94503 -9.23415 2.62135
+E118 -1.09312 -8.74110 4.13810
+E119 0.00000 -8.09146 5.34087
+E120 -4.70608 -7.21970 -7.52955
+E121 -4.20415 -7.81153 -5.84368
+E122 -3.62234 -8.59338 -4.04243
+E123 -3.02717 -9.45363 -1.95941
+E124 -2.20152 -9.70916 -0.63755
+E125 -1.01682 -9.71656 0.95467
+E126 0.00000 -9.23206 2.54671
+E127 1.09312 -8.74110 4.13810
+E128 1.73181 -7.63850 5.69360
+E129 2.14874 -6.29190 7.11453
+E130 2.29514 -4.87829 8.27223
+E131 2.23647 -2.95809 8.92461
+E132 1.99458 -0.60998 9.28870
+E133 -3.45625 -8.57317 -6.82654
+E134 -2.71528 -8.94646 -5.55376
+E135 -2.03205 -9.56166 -3.44989
+E136 -0.91885 -9.62744 -2.21054
+E137 0.00000 -9.58535 -0.88629
+E138 1.01682 -9.71656 0.95467
+E139 1.94503 -9.23415 2.62135
+E140 3.24277 -8.15293 4.22025
+E141 3.75216 -6.67734 5.63719
+E142 4.03809 -5.23807 7.04455
+E143 4.12465 -3.54800 7.95405
+E144 3.72368 -1.14573 8.58697
+E145 -1.88533 -9.22031 -6.79889
+E146 -1.06111 -9.53369 -5.45325
+E147 0.00000 -9.48329 -3.84204
+E148 0.91885 -9.62744 -2.21054
+E149 2.20152 -9.70916 -0.63755
+E150 3.13323 -9.13629 0.81878
+E151 4.29098 -8.11323 2.38442
+E152 5.10822 -6.74936 3.92134
+E153 5.53051 -5.46184 5.50189
+E154 5.65276 -3.84604 6.52108
+E155 5.40372 -1.61247 7.47343
+E156 1.06111 -9.53369 -5.45325
+E157 2.03205 -9.56166 -3.44989
+E158 3.02717 -9.45363 -1.95941
+E159 4.13620 -8.61230 -1.04503
+E160 5.23709 -7.57398 0.46627
+E161 5.96851 -6.52864 2.03293
+E162 6.48817 -5.15829 3.47294
+E163 6.86934 -3.79448 4.89401
+E164 6.81554 -1.94522 5.93053
+E165 1.88533 -9.22031 -6.79889
+E166 2.71528 -8.94646 -5.55376
+E167 3.62234 -8.59338 -4.04243
+E168 5.27282 -7.46584 -2.87485
+E169 5.98418 -6.74733 -1.40314
+E170 6.67571 -5.73608 0.10234
+E171 7.15214 -4.71132 1.51762
+E172 7.46191 -3.49308 2.95937
+E173 7.56445 -2.31141 4.30327
+E174 3.45625 -8.57317 -6.82654
+E175 4.20415 -7.81153 -5.84368
+E176 5.08285 -7.32643 -4.32109
+E177 6.31441 -6.01299 -3.25921
+E178 6.99052 -5.01694 -1.88810
+E179 7.52183 -3.70044 -0.51432
+E180 7.79279 -2.73175 1.10033
+E181 8.05947 -1.50296 2.76753
+E182 7.60767 -0.56840 4.59939
+E183 6.70292 0.06004 6.23992
+E184 5.31389 0.60081 7.48811
+E185 3.53746 1.07133 8.47419
+E186 1.15339 1.51369 9.19904
+E187 4.70608 -7.21970 -7.52955
+E188 5.38718 -6.45180 -6.16689
+E189 6.09726 -5.61090 -4.67894
+E190 7.03346 -4.45090 -3.54895
+E191 7.37920 -3.49709 -2.18347
+E192 7.76752 -1.84131 -0.92719
+E193 7.98893 -0.75212 0.84194
+E194 8.06621 0.40109 2.78565
+E195 7.36570 1.34368 4.60382
+E196 6.11380 1.94213 6.23844
+E197 4.64127 2.57224 7.50868
+E198 2.29559 2.91372 8.55810
+E199 5.63580 -5.80367 -7.74857
+E200 6.24349 -4.62250 -6.49623
+E201 6.78379 -4.01784 -5.01755
+E202 7.93758 0.07220 -0.96992
+E203 7.96921 1.20744 0.97332
+E204 7.62423 2.30205 2.81799
+E205 6.86564 3.16240 4.76800
+E206 5.33943 3.80220 6.32664
+E207 3.34467 4.40891 7.67253
+E208 6.36989 -3.82470 -8.20622
+E209 6.74047 -2.84840 -6.74712
+E210 7.74468 1.05291 -2.47059
+E211 7.77059 2.06323 -0.80729
+E212 7.66385 3.29619 1.04415
+E213 7.10064 4.23342 2.91874
+E214 6.01447 4.93908 4.85771
+E215 4.49828 5.59395 6.28801
+E216 6.82585 -1.86426 -8.69399
+E217 7.12806 -0.49186 -7.34929
+E218 7.52646 0.73096 -5.96025
+E219 7.69315 1.74041 -4.18153
+E220 7.54098 3.05323 -2.51935
+E221 7.31613 4.37155 -0.61128
+E222 6.97545 5.35131 1.30741
+E223 6.16984 6.11292 3.29612
+E224 5.10466 6.41586 4.77815
+E225 7.62652 3.24782 -4.40493
+E226 7.24346 4.80120 -4.77214
+E227 7.55603 2.52648 -6.26962
+E228 7.38028 1.35743 -7.84943
+E229 6.86103 -0.14155 -9.14913
+E230 6.74159 5.99080 -5.83258
+E231 7.22458 4.14855 -6.88918
+E232 7.31422 3.19647 -8.44268
+E233 7.09051 1.66694 -9.77213
+E234 5.88750 7.22674 -6.54736
+E235 6.65934 5.64059 -7.65729
+E236 6.75138 4.62427 -9.03070
+E237 6.58044 3.33743 -10.39707
+E238 4.69146 8.22723 -6.78260
+E239 5.81346 6.42065 -8.65026
+E240 6.04363 5.37051 -9.81363
+E241 -4.69146 8.22723 -6.78260
+E242 -5.81346 6.42065 -8.65026
+E243 -6.04363 5.37051 -9.81363
+E244 -5.88750 7.22674 -6.54736
+E245 -6.65934 5.64059 -7.65729
+E246 -6.75138 4.62427 -9.03070
+E247 -6.58044 3.33743 -10.39707
+E248 -6.74159 5.99080 -5.83258
+E249 -7.22458 4.14855 -6.88918
+E250 -7.31422 3.19647 -8.44268
+E251 -7.09051 1.66694 -9.77213
+E252 -7.24346 4.80120 -4.77214
+E253 -7.62652 3.24782 -4.40493
+E254 -7.55603 2.52648 -6.26962
+E255 -7.38028 1.35743 -7.84943
+E256 -6.86103 -0.14155 -9.14913
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/montages/GSN-HydroCel-257.sfp b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/montages/GSN-HydroCel-257.sfp
new file mode 100644
index 0000000..98c6b1a
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/montages/GSN-HydroCel-257.sfp
@@ -0,0 +1,260 @@
+FidNz 0.00000 10.56381 -2.05108
+FidT9 -7.82694 0.45386 -3.76056
+FidT10 7.82694 0.45386 -3.76056
+E1 6.96223 5.38242 -2.19061
+E2 6.48414 6.40424 -0.14004
+E3 5.69945 7.20796 1.79088
+E4 4.81093 7.77321 3.65006
+E5 3.61962 7.47782 5.50947
+E6 2.25278 6.46157 6.96317
+E7 1.18879 5.21755 8.13378
+E8 0.00000 3.59608 8.75111
+E9 -1.15339 1.51369 9.19904
+E10 5.94022 7.38337 -1.51513
+E11 5.07624 8.37264 0.40595
+E12 3.87946 9.03611 2.51559
+E13 2.60756 8.97868 4.39107
+E14 1.23344 8.11574 6.06161
+E15 0.00000 6.81181 7.28186
+E16 -1.18879 5.21755 8.13378
+E17 -2.29559 2.91372 8.55810
+E18 4.06489 9.40559 -0.89098
+E19 2.86784 10.01456 0.85212
+E20 1.42153 10.06322 2.84803
+E21 0.00000 9.40339 4.65829
+E22 -1.23344 8.11574 6.06161
+E23 -2.25278 6.46157 6.96317
+E24 -3.34467 4.40891 7.67253
+E25 1.39547 10.65281 -0.61138
+E26 0.00000 10.68996 1.00542
+E27 -1.42153 10.06322 2.84803
+E28 -2.60756 8.97868 4.39107
+E29 -3.61962 7.47782 5.50947
+E30 -4.49828 5.59395 6.28801
+E31 0.00000 10.56381 -2.05108
+E32 -1.39547 10.65281 -0.61138
+E33 -2.86784 10.01456 0.85212
+E34 -3.87946 9.03611 2.51559
+E35 -4.81093 7.77321 3.65006
+E36 -5.10466 6.41586 4.77815
+E37 -4.06489 9.40559 -0.89098
+E38 -5.07624 8.37264 0.40595
+E39 -5.69945 7.20796 1.79088
+E40 -6.16984 6.11292 3.29612
+E41 -6.01447 4.93908 4.85771
+E42 -5.33943 3.80220 6.32664
+E43 -4.64127 2.57224 7.50868
+E44 -3.53746 1.07133 8.47419
+E45 -1.99458 -0.60998 9.28870
+E46 -5.94022 7.38337 -1.51513
+E47 -6.48414 6.40424 -0.14004
+E48 -6.97545 5.35131 1.30741
+E49 -7.10064 4.23342 2.91874
+E50 -6.86564 3.16240 4.76800
+E51 -6.11380 1.94213 6.23844
+E52 -5.31389 0.60081 7.48811
+E53 -3.72368 -1.14573 8.58697
+E54 -6.96223 5.38242 -2.19061
+E55 -7.31613 4.37155 -0.61128
+E56 -7.66385 3.29619 1.04415
+E57 -7.62423 2.30205 2.81799
+E58 -7.36570 1.34368 4.60382
+E59 -6.70292 0.06004 6.23992
+E60 -5.40372 -1.61247 7.47343
+E61 -7.54098 3.05323 -2.51935
+E62 -7.77059 2.06323 -0.80729
+E63 -7.96921 1.20744 0.97332
+E64 -8.06621 0.40109 2.78565
+E65 -7.60767 -0.56840 4.59939
+E66 -6.81554 -1.94522 5.93053
+E67 -7.69315 1.74041 -4.18153
+E68 -7.74468 1.05291 -2.47059
+E69 -7.93758 0.07220 -0.96992
+E70 -7.98893 -0.75212 0.84194
+E71 -8.05947 -1.50296 2.76753
+E72 -7.56445 -2.31141 4.30327
+E73 -7.52646 0.73096 -5.96025
+E74 -7.76752 -1.84131 -0.92719
+E75 -7.79279 -2.73175 1.10033
+E76 -7.46191 -3.49308 2.95937
+E77 -6.86934 -3.79448 4.89401
+E78 -5.65276 -3.84604 6.52108
+E79 -4.12465 -3.54800 7.95405
+E80 -2.23647 -2.95809 8.92461
+E81 0.00000 -1.93834 9.45867
+E82 -7.12806 -0.49186 -7.34929
+E83 -7.37920 -3.49709 -2.18347
+E84 -7.52183 -3.70044 -0.51432
+E85 -7.15214 -4.71132 1.51762
+E86 -6.48817 -5.15829 3.47294
+E87 -5.53051 -5.46184 5.50189
+E88 -4.03809 -5.23807 7.04455
+E89 -2.29514 -4.87829 8.27223
+E90 0.00000 -3.74195 9.02791
+E91 -6.82585 -1.86426 -8.69399
+E92 -6.74047 -2.84840 -6.74712
+E93 -6.78379 -4.01784 -5.01755
+E94 -7.03346 -4.45090 -3.54895
+E95 -6.99052 -5.01694 -1.88810
+E96 -6.67571 -5.73608 0.10234
+E97 -5.96851 -6.52864 2.03293
+E98 -5.10822 -6.74936 3.92134
+E99 -3.75216 -6.67734 5.63719
+E100 -2.14874 -6.29190 7.11453
+E101 0.00000 -7.15042 6.95434
+E102 -6.36989 -3.82470 -8.20622
+E103 -6.24349 -4.62250 -6.49623
+E104 -6.09726 -5.61090 -4.67894
+E105 -6.31441 -6.01299 -3.25921
+E106 -5.98418 -6.74733 -1.40314
+E107 -5.23709 -7.57398 0.46627
+E108 -4.29098 -8.11323 2.38442
+E109 -3.24277 -8.15293 4.22025
+E110 -1.73181 -7.63850 5.69360
+E111 -5.63580 -5.80367 -7.74857
+E112 -5.38718 -6.45180 -6.16689
+E113 -5.08285 -7.32643 -4.32109
+E114 -5.27282 -7.46584 -2.87485
+E115 -4.13620 -8.61230 -1.04503
+E116 -3.13323 -9.13629 0.81878
+E117 -1.94503 -9.23415 2.62135
+E118 -1.09312 -8.74110 4.13810
+E119 0.00000 -8.09146 5.34087
+E120 -4.70608 -7.21970 -7.52955
+E121 -4.20415 -7.81153 -5.84368
+E122 -3.62234 -8.59338 -4.04243
+E123 -3.02717 -9.45363 -1.95941
+E124 -2.20152 -9.70916 -0.63755
+E125 -1.01682 -9.71656 0.95467
+E126 0.00000 -9.23206 2.54671
+E127 1.09312 -8.74110 4.13810
+E128 1.73181 -7.63850 5.69360
+E129 2.14874 -6.29190 7.11453
+E130 2.29514 -4.87829 8.27223
+E131 2.23647 -2.95809 8.92461
+E132 1.99458 -0.60998 9.28870
+E133 -3.45625 -8.57317 -6.82654
+E134 -2.71528 -8.94646 -5.55376
+E135 -2.03205 -9.56166 -3.44989
+E136 -0.91885 -9.62744 -2.21054
+E137 0.00000 -9.58535 -0.88629
+E138 1.01682 -9.71656 0.95467
+E139 1.94503 -9.23415 2.62135
+E140 3.24277 -8.15293 4.22025
+E141 3.75216 -6.67734 5.63719
+E142 4.03809 -5.23807 7.04455
+E143 4.12465 -3.54800 7.95405
+E144 3.72368 -1.14573 8.58697
+E145 -1.88533 -9.22031 -6.79889
+E146 -1.06111 -9.53369 -5.45325
+E147 0.00000 -9.48329 -3.84204
+E148 0.91885 -9.62744 -2.21054
+E149 2.20152 -9.70916 -0.63755
+E150 3.13323 -9.13629 0.81878
+E151 4.29098 -8.11323 2.38442
+E152 5.10822 -6.74936 3.92134
+E153 5.53051 -5.46184 5.50189
+E154 5.65276 -3.84604 6.52108
+E155 5.40372 -1.61247 7.47343
+E156 1.06111 -9.53369 -5.45325
+E157 2.03205 -9.56166 -3.44989
+E158 3.02717 -9.45363 -1.95941
+E159 4.13620 -8.61230 -1.04503
+E160 5.23709 -7.57398 0.46627
+E161 5.96851 -6.52864 2.03293
+E162 6.48817 -5.15829 3.47294
+E163 6.86934 -3.79448 4.89401
+E164 6.81554 -1.94522 5.93053
+E165 1.88533 -9.22031 -6.79889
+E166 2.71528 -8.94646 -5.55376
+E167 3.62234 -8.59338 -4.04243
+E168 5.27282 -7.46584 -2.87485
+E169 5.98418 -6.74733 -1.40314
+E170 6.67571 -5.73608 0.10234
+E171 7.15214 -4.71132 1.51762
+E172 7.46191 -3.49308 2.95937
+E173 7.56445 -2.31141 4.30327
+E174 3.45625 -8.57317 -6.82654
+E175 4.20415 -7.81153 -5.84368
+E176 5.08285 -7.32643 -4.32109
+E177 6.31441 -6.01299 -3.25921
+E178 6.99052 -5.01694 -1.88810
+E179 7.52183 -3.70044 -0.51432
+E180 7.79279 -2.73175 1.10033
+E181 8.05947 -1.50296 2.76753
+E182 7.60767 -0.56840 4.59939
+E183 6.70292 0.06004 6.23992
+E184 5.31389 0.60081 7.48811
+E185 3.53746 1.07133 8.47419
+E186 1.15339 1.51369 9.19904
+E187 4.70608 -7.21970 -7.52955
+E188 5.38718 -6.45180 -6.16689
+E189 6.09726 -5.61090 -4.67894
+E190 7.03346 -4.45090 -3.54895
+E191 7.37920 -3.49709 -2.18347
+E192 7.76752 -1.84131 -0.92719
+E193 7.98893 -0.75212 0.84194
+E194 8.06621 0.40109 2.78565
+E195 7.36570 1.34368 4.60382
+E196 6.11380 1.94213 6.23844
+E197 4.64127 2.57224 7.50868
+E198 2.29559 2.91372 8.55810
+E199 5.63580 -5.80367 -7.74857
+E200 6.24349 -4.62250 -6.49623
+E201 6.78379 -4.01784 -5.01755
+E202 7.93758 0.07220 -0.96992
+E203 7.96921 1.20744 0.97332
+E204 7.62423 2.30205 2.81799
+E205 6.86564 3.16240 4.76800
+E206 5.33943 3.80220 6.32664
+E207 3.34467 4.40891 7.67253
+E208 6.36989 -3.82470 -8.20622
+E209 6.74047 -2.84840 -6.74712
+E210 7.74468 1.05291 -2.47059
+E211 7.77059 2.06323 -0.80729
+E212 7.66385 3.29619 1.04415
+E213 7.10064 4.23342 2.91874
+E214 6.01447 4.93908 4.85771
+E215 4.49828 5.59395 6.28801
+E216 6.82585 -1.86426 -8.69399
+E217 7.12806 -0.49186 -7.34929
+E218 7.52646 0.73096 -5.96025
+E219 7.69315 1.74041 -4.18153
+E220 7.54098 3.05323 -2.51935
+E221 7.31613 4.37155 -0.61128
+E222 6.97545 5.35131 1.30741
+E223 6.16984 6.11292 3.29612
+E224 5.10466 6.41586 4.77815
+E225 7.62652 3.24782 -4.40493
+E226 7.24346 4.80120 -4.77214
+E227 7.55603 2.52648 -6.26962
+E228 7.38028 1.35743 -7.84943
+E229 6.86103 -0.14155 -9.14913
+E230 6.74159 5.99080 -5.83258
+E231 7.22458 4.14855 -6.88918
+E232 7.31422 3.19647 -8.44268
+E233 7.09051 1.66694 -9.77213
+E234 5.88750 7.22674 -6.54736
+E235 6.65934 5.64059 -7.65729
+E236 6.75138 4.62427 -9.03070
+E237 6.58044 3.33743 -10.39707
+E238 4.69146 8.22723 -6.78260
+E239 5.81346 6.42065 -8.65026
+E240 6.04363 5.37051 -9.81363
+E241 -4.69146 8.22723 -6.78260
+E242 -5.81346 6.42065 -8.65026
+E243 -6.04363 5.37051 -9.81363
+E244 -5.88750 7.22674 -6.54736
+E245 -6.65934 5.64059 -7.65729
+E246 -6.75138 4.62427 -9.03070
+E247 -6.58044 3.33743 -10.39707
+E248 -6.74159 5.99080 -5.83258
+E249 -7.22458 4.14855 -6.88918
+E250 -7.31422 3.19647 -8.44268
+E251 -7.09051 1.66694 -9.77213
+E252 -7.24346 4.80120 -4.77214
+E253 -7.62652 3.24782 -4.40493
+E254 -7.55603 2.52648 -6.26962
+E255 -7.38028 1.35743 -7.84943
+E256 -6.86103 -0.14155 -9.14913
+Cz 0.00000 0.00000 9.68308
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/montages/GSN-HydroCel-32.sfp b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/montages/GSN-HydroCel-32.sfp
new file mode 100644
index 0000000..214fb1b
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/montages/GSN-HydroCel-32.sfp
@@ -0,0 +1,36 @@
+FidNz	0	9.071585155	-2.359754454
+FidT9	-6.711765	0.040402876	-3.251600355
+FidT10	6.711765	0.040402876	-3.251600355
+E1	-2.695405558	8.884820317	1.088308144
+E2	2.695405558	8.884820317	1.088308144
+E3	-4.459387187	6.021159964	4.365321482
+E4	4.459387187	6.021159964	4.365321482
+E5	-5.47913021	0.284948655	6.38332782
+E6	5.47913021	0.284948655	6.38332782
+E7	-5.831241498	-4.494821698	4.955347697
+E8	5.831241498	-4.494821698	4.955347697
+E9	-2.738838019	-8.607966849	0.239368223
+E10	2.738838019	-8.607966849	0.239368223
+E11	-6.399087198	4.127248875	-0.356852241
+E12	6.399087198	4.127248875	-0.356852241
+E13	-7.304625099	-1.866238006	-0.629182006
+E14	7.304625099	-1.866238006	-0.629182006
+E15	-6.034746843	-5.755782196	0.051843011
+E16	6.034746843	-5.755782196	0.051843011
+E17	0	7.96264703	5.044718001
+E18	0	9.271139705	-2.211516434
+E19	0	-6.676694032	6.465208258
+E20	0	-8.996686498	0.487952047
+E21	-6.518995129	2.417299399	-5.253637073
+E22	6.518995129	2.417299399	-5.253637073
+E23	-6.174969392	-2.458138877	-5.637380998
+E24	6.174969392	-2.458138877	-5.637380998
+E25	-3.784983913	-6.401014415	-5.260040689
+E26	3.784983913	-6.401014415	-5.260040689
+E27	0	9.087440894	1.333345013
+E28	0	3.806770224	7.891304964
+E29	-3.743504949	6.649204911	-6.530243068
+E30	3.743504949	6.649204911	-6.530243068
+E31	-6.118458137	4.523870113	-4.409174427
+E32	6.118458137	4.523870113	-4.409174427
+Cz	0	0	8.899186843
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/montages/GSN-HydroCel-64_1.0.sfp b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/montages/GSN-HydroCel-64_1.0.sfp
new file mode 100644
index 0000000..004dcb0
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/montages/GSN-HydroCel-64_1.0.sfp
@@ -0,0 +1,67 @@
+FidNz      0.00000      10.3556     -2.69376
+FidT9     -7.18083    0.0461216     -3.71184
+FidT10      6.24270    0.0461216     -3.71184
+E1      6.60688      6.30230     -2.94229
+E2      4.41106      8.71481      3.50199
+E3      3.27490      8.15713      5.69580
+E4      0.00000      4.34559      9.00826
+E5      3.07692      10.1424      1.24235
+E6      0.00000      9.08970      5.75876
+E7     -2.78065      3.71493      8.68573
+E8      0.00000      10.3612      3.54499
+E9     -3.2749      8.15713      5.6958
+E10     -3.07692      10.1424      1.24235
+E11     -4.41106      8.71481      3.50199
+E12     -5.09058      6.87341      4.98320
+E13     -6.48687      6.22527      3.23806
+E14     -6.33176      4.74636      5.28262
+E15     -5.43625      3.07969      7.18905
+E16     -4.21856      1.09635      8.70749
+E17     -6.60688      6.30230     -2.94229
+E18     -7.30483      4.71143    -0.407362
+E19     -7.78984      3.38858      2.77404
+E20     -6.25466     0.325281      7.28684
+E21     -4.46332     -1.73406      8.86309
+E22     -7.88241    -0.914323      5.25116
+E23     -7.80897      1.45945     -4.05862
+E24     -8.33854     -2.13039    -0.718238
+E25     -8.34755     -2.62392      2.72292
+E26     -7.69093     -3.43812      4.76981
+E27     -7.48627     -5.32762      3.13923
+E28     -6.65661     -5.13103      5.65674
+E29     -7.51185     -4.26886     -3.41445
+E30     -6.88892     -6.57047    0.0591810
+E31     -4.69965     -6.91953      6.12524
+E32     -6.16900     -6.70120     -3.30093
+E33     -2.10574     -8.39538      5.96342
+E34      0.00000     -4.98271      9.28085
+E35     -3.12650     -9.82636     0.273249
+E36      0.00000     -8.93816      5.35112
+E37      0.00000     -10.2701     0.557018
+E38      2.10574     -8.39538      5.96342
+E39      3.12650     -9.82636     0.273249
+E40      4.69965     -6.91953      6.12524
+E41      4.46332     -1.73406      8.86309
+E42      6.65661     -5.13103      5.65674
+E43      6.16900     -6.70120     -3.30093
+E44      6.88892     -6.57047    0.0591810
+E45      7.48627     -5.32762      3.13923
+E46      7.69093     -3.43812      4.76981
+E47      7.51185     -4.26886     -3.41445
+E48      8.34755     -2.62392      2.72292
+E49      7.88241    -0.914323      5.25116
+E50      6.25466     0.325281      7.28684
+E51      4.21856      1.09635      8.70749
+E52      8.33854     -2.13039    -0.718238
+E53      5.43625      3.07969      7.18905
+E54      2.78065      3.71493      8.68573
+E55      7.80897      1.45945     -4.05862
+E56      7.78984      3.38858      2.77404
+E57      6.33176      4.74636      5.28262
+E58      7.30483      4.71143    -0.407362
+E59      6.48687      6.22527      3.23806
+E60      5.09058      6.87341      4.98320
+E61      6.98448      5.16419     -5.03326
+E62      4.27337      7.59035     -7.45455
+E63     -4.27337      7.59035     -7.45455
+E64     -6.98448      5.16419     -5.03326
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/montages/GSN-HydroCel-65_1.0.sfp b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/montages/GSN-HydroCel-65_1.0.sfp
new file mode 100644
index 0000000..c1c455d
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/montages/GSN-HydroCel-65_1.0.sfp
@@ -0,0 +1,68 @@
+FidNz      0.00000      10.3556     -2.69376
+FidT9     -7.18083    0.0461216     -3.71184
+FidT10      6.24270    0.0461216     -3.71184
+E1      6.60688      6.30230     -2.94229
+E2      4.41106      8.71481      3.50199
+E3      3.27490      8.15713      5.69580
+E4      0.00000      4.34559      9.00826
+E5      3.07692      10.1424      1.24235
+E6      0.00000      9.08970      5.75876
+E7     -2.78065      3.71493      8.68573
+E8      0.00000      10.3612      3.54499
+E9     -3.2749      8.15713      5.6958
+E10     -3.07692      10.1424      1.24235
+E11     -4.41106      8.71481      3.50199
+E12     -5.09058      6.87341      4.98320
+E13     -6.48687      6.22527      3.23806
+E14     -6.33176      4.74636      5.28262
+E15     -5.43625      3.07969      7.18905
+E16     -4.21856      1.09635      8.70749
+E17     -6.60688      6.30230     -2.94229
+E18     -7.30483      4.71143    -0.407362
+E19     -7.78984      3.38858      2.77404
+E20     -6.25466     0.325281      7.28684
+E21     -4.46332     -1.73406      8.86309
+E22     -7.88241    -0.914323      5.25116
+E23     -7.80897      1.45945     -4.05862
+E24     -8.33854     -2.13039    -0.718238
+E25     -8.34755     -2.62392      2.72292
+E26     -7.69093     -3.43812      4.76981
+E27     -7.48627     -5.32762      3.13923
+E28     -6.65661     -5.13103      5.65674
+E29     -7.51185     -4.26886     -3.41445
+E30     -6.88892     -6.57047    0.0591810
+E31     -4.69965     -6.91953      6.12524
+E32     -6.16900     -6.70120     -3.30093
+E33     -2.10574     -8.39538      5.96342
+E34      0.00000     -4.98271      9.28085
+E35     -3.12650     -9.82636     0.273249
+E36      0.00000     -8.93816      5.35112
+E37      0.00000     -10.2701     0.557018
+E38      2.10574     -8.39538      5.96342
+E39      3.12650     -9.82636     0.273249
+E40      4.69965     -6.91953      6.12524
+E41      4.46332     -1.73406      8.86309
+E42      6.65661     -5.13103      5.65674
+E43      6.16900     -6.70120     -3.30093
+E44      6.88892     -6.57047    0.0591810
+E45      7.48627     -5.32762      3.13923
+E46      7.69093     -3.43812      4.76981
+E47      7.51185     -4.26886     -3.41445
+E48      8.34755     -2.62392      2.72292
+E49      7.88241    -0.914323      5.25116
+E50      6.25466     0.325281      7.28684
+E51      4.21856      1.09635      8.70749
+E52      8.33854     -2.13039    -0.718238
+E53      5.43625      3.07969      7.18905
+E54      2.78065      3.71493      8.68573
+E55      7.80897      1.45945     -4.05862
+E56      7.78984      3.38858      2.77404
+E57      6.33176      4.74636      5.28262
+E58      7.30483      4.71143    -0.407362
+E59      6.48687      6.22527      3.23806
+E60      5.09058      6.87341      4.98320
+E61      6.98448      5.16419     -5.03326
+E62      4.27337      7.59035     -7.45455
+E63     -4.27337      7.59035     -7.45455
+E64     -6.98448      5.16419     -5.03326
+Cz      0.00000      0.00000      10.1588
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/montages/biosemi128.txt b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/montages/biosemi128.txt
new file mode 100644
index 0000000..69739c6
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/montages/biosemi128.txt
@@ -0,0 +1,132 @@
+Site	 Theta	Phi
+A1	0	0
+A2	11.5	-90
+A3	23	-90
+A4	34.5	-90
+A5	-46	67.5
+A6	-46	45
+A7	-57.5	45
+A8	-69	54
+A9	-80.5	54
+A10	-92	54
+A11	-103.5	54
+A12	-115	54
+A13	-115	72
+A14	-103.5	72
+A15	-92	72
+A16	-80.5	72
+A17	-69	72
+A18	-57.5	67.5
+A19	46	-90
+A20	57.5	-90
+A21	69	-90
+A22	80.5	-90
+A23	92	-90
+A24	103.5	-90
+A25	115	-90
+A26	115	-72
+A27	103.5	-72
+A28	92	-72
+A29	80.5	-72
+A30	69	-72
+A31	57.5	-67.5
+A32	46	-67.5
+B1	11.5	-18
+B2	23	-45
+B3	46	-45
+B4	57.5	-45
+B5	69	-54
+B6	80.5	-54
+B7	92	-54
+B8	103.5	-54
+B9	115	-54
+B10	103.5	-36
+B11	92	-36
+B12	80.5	-36
+B13	69	-36
+B14	92	-18
+B15	80.5	-18
+B16	69	-18
+B17	57.5	-22.5
+B18	46	-22.5
+B19	34.5	-30
+B20	23	0
+B21	34.5	0
+B22	46	0
+B23	57.5	0
+B24	69	0
+B25	80.5	0
+B26	92	0
+B27	92	18
+B28	80.5	18
+B29	69	18
+B30	57.5	22.5
+B31	46	22.5
+B32	34.5	30
+C1	11.5	54
+C2	23	45
+C3	46	45
+C4	57.5	45
+C5	69	36
+C6	80.5	36
+C7	92	36
+C8	92	54
+C9	80.5	54
+C10	69	54
+C11	34.5	60
+C12	46	67.5
+C13	57.5	67.5
+C14	69	72
+C15	80.5	72
+C16	92	72
+C17	92	90
+C18	80.5	90
+C19	69	90
+C20	57.5	90
+C21	46	90
+C22	34.5	90
+C23	23	90
+C24	-34.5	-60
+C25	-46	-67.5
+C26	-57.5	-67.5
+C27	-69	-72
+C28	-80.5	-72
+C29	-92	-72
+C30	-92	-54
+C31	-80.5	-54
+C32	-69	-54
+D1	-11.5	-54
+D2	-23	-45
+D3	-46	-45
+D4	-57.5	-45
+D5	-69	-36
+D6	-80.5	-36
+D7	-92	-36
+D8	-92	-18
+D9	-80.5	-18
+D10	-69	-18
+D11	-57.5	-22.5
+D12	-46	-22.5
+D13	-34.5	-30
+D14	-23	0
+D15	-11.5	18
+D16	-23	45
+D17	-34.5	30
+D18	-34.5	0
+D19	-46	0
+D20	-57.5	0
+D21	-69	0
+D22	-80.5	0
+D23	-92	0
+D24	-92	18
+D25	-80.5	18
+D26	-69	18
+D27	-57.5	22.5
+D28	-46	22.5
+D29	-69	36
+D30	-80.5	36
+D31	-92	36
+D32	-103.5	36
+Nz	 115	 90
+LPA	-115	  0
+RPA	 115	  0
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/montages/biosemi16.txt b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/montages/biosemi16.txt
new file mode 100644
index 0000000..d8a6769
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/montages/biosemi16.txt
@@ -0,0 +1,20 @@
+Site	 Theta	Phi
+Fp1	 -92	-72
+Fp2	  92	 72
+F4	  60	 51
+Fz	  46	 90
+F3	 -60	-51
+T7	 -92	  0
+C3	 -46	  0
+Cz	   0	  0
+C4	  46	  0
+T8	  92	  0
+P4	  60	-51
+Pz	  46	-90
+P3	 -60	 51
+O1	 -92	 72
+Oz	  92	-90
+O2	  92	-72
+Nz	 115	 90
+LPA	-115	  0
+RPA	 115	  0
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/montages/biosemi160.txt b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/montages/biosemi160.txt
new file mode 100644
index 0000000..04fefc7
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/montages/biosemi160.txt
@@ -0,0 +1,164 @@
+Site	 Theta	Phi
+A1	0	0
+A2	11.5	-90
+A3	23	-90
+A4	34.5	-90
+A5	-46	72
+A6	-46	54
+A7	-57.5	54
+A8	-69	60
+A9	-80.5	60
+A10	-92	60
+A11	-103.5	60
+A12	-115	60
+A13	-115	75
+A14	-103.5	75
+A15	-92	75
+A16	-80.5	75
+A17	-69	75
+A18	-57.5	72
+A19	46	-90
+A20	57.5	-90
+A21	69	-90
+A22	80.5	-90
+A23	92	-90
+A24	103.5	-90
+A25	115	-90
+A26	115	-75
+A27	103.5	-75
+A28	92	-75
+A29	80.5	-75
+A30	69	-75
+A31	57.5	-72
+A32	46	-72
+B1	11.5	-18
+B2	23	-60
+B3	46	-54
+B4	57.5	-54
+B5	69	-60
+B6	80.5	-60
+B7	92	-60
+B8	103.5	-60
+B9	115	-60
+B10	115	-45
+B11	103.5	-45
+B12	92	-45
+B13	80.5	-45
+B14	69	-45
+B15	69	-30
+B16	80.5	-30
+B17	92	-30
+B18	103.5	-30
+B19	92	-15
+B20	80.5	-15
+B21	69	-15
+B22	57.5	-36
+B23	46	-36
+B24	34.5	-45
+B25	23	-30
+B26	34.5	-22.5
+B27	46	-18
+B28	57.5	-18
+B29	57.5	0
+B30	69	0
+B31	80.5	0
+B32	92	0
+C1	11.5	54
+C2	23	30
+C3	23	0
+C4	34.5	0
+C5	34.5	22.5
+C6	46	18
+C7	46	0
+C8	57.5	18
+C9	69	15
+C10	80.5	15
+C11	92	15
+C12	92	30
+C13	80.5	30
+C14	69	30
+C15	69	45
+C16	80.5	45
+C17	92	45
+C18	92	60
+C19	80.5	60
+C20	69	60
+C21	57.5	54
+C22	57.5	36
+C23	46	36
+C24	34.5	45
+C25	23	60
+C26	34.5	67.5
+C27	46	54
+C28	46	72
+C29	57.5	72
+C30	69	75
+C31	80.5	75
+C32	92	75
+D1	-11.5	-54
+D2	23	90
+D3	34.5	90
+D4	46	90
+D5	57.5	90
+D6	69	90
+D7	80.5	90
+D8	92	90
+D9	-92	-75
+D10	-80.5	-75
+D11	-69	-75
+D12	-57.5	-72
+D13	-46	-72
+D14	-34.5	-67.5
+D15	-23	-60
+D16	-23	-30
+D17	-34.5	-45
+D18	-46	-54
+D19	-57.5	-54
+D20	-69	-60
+D21	-80.5	-60
+D22	-92	-60
+D23	-92	-45
+D24	-80.5	-45
+D25	-69	-45
+D26	-57.5	-36
+D27	-46	-36
+D28	-34.5	-22.5
+D29	-46	-18
+D30	-69	-30
+D31	-80.5	-30
+D32	-92	-30
+E1	-11.5	18
+E2	-23	0
+E3	-34.5	0
+E4	-46	0
+E5	-57.5	-18
+E6	-69	-15
+E7	-80.5	-15
+E8	-92	-15
+E9	-92	0
+E10	-80.5	0
+E11	-69	0
+E12	-57.5	0
+E13	-57.5	18
+E14	-69	15
+E15	-80.5	15
+E16	-92	15
+E17	-103.5	30
+E18	-92	30
+E19	-80.5	30
+E20	-69	30
+E21	-46	18
+E22	-34.5	22.5
+E23	-23	30
+E24	-23	60
+E25	-34.5	45
+E26	-46	36
+E27	-57.5	36
+E28	-69	45
+E29	-80.5	45
+E30	-92	45
+E31	-103.5	45
+E32	-115	45
+Nz	 115	 90
+LPA	-115	  0
+RPA	 115	  0
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/montages/biosemi256.txt b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/montages/biosemi256.txt
new file mode 100644
index 0000000..50085a2
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/montages/biosemi256.txt
@@ -0,0 +1,260 @@
+Site	 Theta	Phi
+A1	0	0
+A2	9.2	-90
+A3	18.4	-90
+A4	27.6	-90
+A5	36.8	-90
+A6	46	-90
+A7	-46	75
+A8	-55.2	75
+A9	-64.4	78
+A10	-73.6	78
+A11	-82.8	78.75
+A12	-92	78.75
+A13	-101.2	78.75
+A14	-110.4	78
+A15	-119.6	78
+A16	119.6	-90
+A17	110.4	-90
+A18	101.2	-90
+A19	92	-90
+A20	82.8	-90
+A21	73.6	-90
+A22	64.4	-90
+A23	55.2	-90
+A24	46	-75
+A25	55.2	-75
+A26	64.4	-78
+A27	73.6	-78
+A28	82.8	-78.75
+A29	92	-78.75
+A30	101.2	-78.75
+A31	110.4	-78
+A32	119.6	-78
+B1	18.4	-54
+B2	27.6	-66
+B3	36.8	-54
+B4	46	-60
+B5	55.2	-60
+B6	64.4	-66
+B7	73.6	-66
+B8	82.8	-67.5
+B9	92	-67.5
+B10	101.2	-67.5
+B11	110.4	-66
+B12	119.6	-66
+B13	110.4	-54
+B14	101.2	-56.25
+B15	92	-56.25
+B16	82.8	-56.25
+B17	73.6	-54
+B18	64.4	-54
+B19	55.2	-45
+B20	46	-45
+B21	27.6	-42
+B22	36.8	-36
+B23	46	-30
+B24	55.2	-30
+B25	64.4	-42
+B26	73.6	-42
+B27	82.8	-45
+B28	92	-45
+B29	101.2	-45
+B30	110.4	-42
+B31	110.4	-30
+B32	101.2	-33.75
+C1	9.2	-18
+C2	18.4	-18
+C3	27.6	-18
+C4	36.8	-18
+C5	46	-15
+C6	55.2	-15
+C7	64.4	-18
+C8	64.4	-30
+C9	73.6	-30
+C10	82.8	-33.75
+C11	92	-33.75
+C12	101.2	-22.5
+C13	92	-22.5
+C14	82.8	-22.5
+C15	73.6	-18
+C16	82.8	-11.25
+C17	92	-11.25
+C18	92	0
+C19	82.8	0
+C20	73.6	-6
+C21	64.4	-6
+C22	55.2	0
+C23	46	0
+C24	36.8	0
+C25	27.6	6
+C26	36.8	18
+C27	46	15
+C28	55.2	15
+C29	64.4	6
+C30	73.6	6
+C31	82.8	11.25
+C32	92	11.25
+D1	9.2	54
+D2	18.4	18
+D3	27.6	30
+D4	36.8	36
+D5	46	30
+D6	64.4	18
+D7	73.6	18
+D8	82.8	22.5
+D9	92	22.5
+D10	101.2	22.5
+D11	101.2	33.75
+D12	92	33.75
+D13	82.8	33.75
+D14	73.6	30
+D15	64.4	30
+D16	55.2	30
+D17	46	45
+D18	55.2	45
+D19	64.4	42
+D20	73.6	42
+D21	82.8	45
+D22	92	45
+D23	101.2	45
+D24	92	56.25
+D25	82.8	56.25
+D26	73.6	54
+D27	64.4	54
+D28	55.2	60
+D29	64.4	66
+D30	73.6	66
+D31	82.8	67.5
+D32	92	67.5
+E1	18.4	90
+E2	18.4	54
+E3	27.6	54
+E4	36.8	54
+E5	46	60
+E6	46	75
+E7	55.2	75
+E8	64.4	78
+E9	73.6	78
+E10	82.8	78.75
+E11	92	78.75
+E12	92	90
+E13	82.8	90
+E14	73.6	90
+E15	64.4	90
+E16	55.2	90
+E17	46	90
+E18	36.8	90
+E19	36.8	72
+E20	27.6	78
+E21	-27.6	-78
+E22	-36.8	-72
+E23	-46	-75
+E24	-55.2	-75
+E25	-64.4	-78
+E26	-73.6	-78
+E27	-82.8	-78.75
+E28	-92	-78.75
+E29	-92	-67.5
+E30	-82.8	-67.5
+E31	-73.6	-66
+E32	-64.4	-66
+F1	-9.2	-54
+F2	-18.4	-54
+F3	-27.6	-54
+F4	-36.8	-54
+F5	-46	-60
+F6	-55.2	-60
+F7	-64.4	-54
+F8	-73.6	-54
+F9	-82.8	-56.25
+F10	-92	-56.25
+F11	-101.2	-45
+F12	-92	-45
+F13	-82.8	-45
+F14	-73.6	-42
+F15	-64.4	-42
+F16	-55.2	-45
+F17	-46	-45
+F18	-36.8	-36
+F19	-27.6	-30
+F20	-18.4	-18
+F21	-27.6	-6
+F22	-36.8	-18
+F23	-46	-30
+F24	-55.2	-30
+F25	-64.4	-30
+F26	-73.6	-30
+F27	-82.8	-33.75
+F28	-92	-33.75
+F29	-101.2	-33.75
+F30	-101.2	-22.5
+F31	-92	-22.5
+F32	-82.8	-22.5
+G1	-9.2	18
+G2	-18.4	18
+G3	-27.6	18
+G4	-36.8	0
+G5	-46	-15
+G6	-55.2	-15
+G7	-64.4	-18
+G8	-73.6	-18
+G9	-82.8	-11.25
+G10	-92	-11.25
+G11	-92	0
+G12	-82.8	0
+G13	-73.6	-6
+G14	-64.4	-6
+G15	-55.2	0
+G16	-46	0
+G17	-55.2	15
+G18	-64.4	6
+G19	-73.6	6
+G20	-82.8	11.25
+G21	-92	11.25
+G22	-101.2	22.5
+G23	-92	22.5
+G24	-82.8	22.5
+G25	-73.6	18
+G26	-64.4	18
+G27	-64.4	30
+G28	-73.6	30
+G29	-82.8	33.75
+G30	-92	33.75
+G31	-101.2	33.75
+G32	-110.4	30
+H1	-18.4	54
+H2	-27.6	42
+H3	-36.8	36
+H4	-36.8	18
+H5	-46	15
+H6	-46	30
+H7	-55.2	30
+H8	-64.4	42
+H9	-73.6	42
+H10	-82.8	45
+H11	-92	45
+H12	-101.2	45
+H13	-110.4	42
+H14	-110.4	54
+H15	-101.2	56.25
+H16	-92	56.25
+H17	-82.8	56.25
+H18	-73.6	54
+H19	-64.4	54
+H20	-55.2	45
+H21	-46	45
+H22	-36.8	54
+H23	-27.6	66
+H24	-46	60
+H25	-55.2	60
+H26	-64.4	66
+H27	-73.6	66
+H28	-82.8	67.5
+H29	-92	67.5
+H30	-101.2	67.5
+H31	-110.4	66
+H32	-119.6	66
+Nz	 115	 90
+LPA	-115	  0
+RPA	 115	  0
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/montages/biosemi32.txt b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/montages/biosemi32.txt
new file mode 100644
index 0000000..d2e0a14
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/montages/biosemi32.txt
@@ -0,0 +1,36 @@
+Site	 Theta	Phi
+Fp1	 -92	-72
+AF3	 -74	-65
+F7	 -92	-36
+F3	 -60	-51
+FC1	 -32	-45
+FC5	 -72	-21
+T7	 -92	  0
+C3	 -46	  0
+CP1	 -32	 45
+CP5	 -72	 21
+P7	 -92	 36
+P3	 -60	 51
+Pz	  46	-90
+PO3	 -74	 65
+O1	 -92	 72
+Oz	  92	-90
+O2	  92	-72
+PO4	  74	-65
+P4	  60	-51
+P8	  92	-36
+CP6	  72	-21
+CP2	  32	-45
+C4	  46	  0
+T8 	  92	  0
+FC6	  72	 21
+FC2	  32	 45
+F4	  60	 51
+F8	  92	 36
+AF4	  74	 65
+Fp2	  92	 72
+Fz	  46	 90
+Cz	   0	  0
+Nz	 115	 90
+LPA	-115	  0
+RPA	 115	  0
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/montages/biosemi64.txt b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/montages/biosemi64.txt
new file mode 100644
index 0000000..4071cfb
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/montages/biosemi64.txt
@@ -0,0 +1,68 @@
+Site	 Theta	Phi
+Fp1	-92	-72
+AF7	-92	-54
+AF3	-74	-65
+F1	-50	-68
+F3	-60	-51
+F5	-75	-41
+F7	-92	-36
+FT7	-92	-18
+FC5	-72	-21
+FC3	-50	-28
+FC1	-32	-45
+C1	-23	0
+C3	-46	0
+C5	-69	0
+T7	-92	0
+TP7	-92	18
+CP5	-72	21
+CP3	-50	28
+CP1	-32	45
+P1	-50	68
+P3	-60	51
+P5	-75	41
+P7	-92	36
+P9	-115	36
+PO7	-92	54
+PO3	-74	65
+O1	-92	72
+Iz	115	-90
+Oz	92	-90
+POz	69	-90
+Pz	46	-90
+CPz	23	-90
+Fpz	92	90
+Fp2	92	72
+AF8	92	54
+AF4	74	65
+AFz	69	90
+Fz	46	90
+F2	50	68
+F4	60	51
+F6	75	41
+F8	92	36
+FT8	92	18
+FC6	72	21
+FC4	50	28
+FC2	32	45
+FCz	23	90
+Cz	0	0
+C2	23	0
+C4	46	0
+C6	69	0
+T8	92	0
+TP8	92	-18
+CP6	72	-21
+CP4	50	-28
+CP2	32	-45
+P2	50	-68
+P4	60	-51
+P6	75	-41
+P8	92	-36
+P10	115	-36
+PO8	92	-54
+PO4	74	-65
+O2	92	-72
+Nz	 115	 90
+LPA	-115	  0
+RPA	 115	  0
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/montages/easycap-M1.txt b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/montages/easycap-M1.txt
new file mode 100644
index 0000000..271dc0f
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/montages/easycap-M1.txt
@@ -0,0 +1,75 @@
+Site	 Theta	Phi
+Fp1	 -92	-72
+Fp2	  92	 72
+F3	 -60	-51
+F4	  60	 51
+C3	 -46	  0
+C4	  46	  0
+P3	 -60	 51
+P4	  60	-51
+O1	 -92	 72
+O2	  92	-72
+F7	 -92	-36
+F8	  92	 36
+T7	 -92	  0
+T8	  92	  0
+P7	 -92	 36
+P8	  92	-36
+Fz	  46	 90
+Cz	   0	  0
+Pz	  46	-90
+F1	 -50	-68
+F2	  50	 68
+FC1	 -32	-45
+FC2	  32	 45
+C1	 -23	  0
+C2	  23	  0
+CP1	 -32	 45
+CP2	  32	-45
+P1	 -50	 68
+P2	  50	-68
+AF3	 -74	-65
+AF4	  74	 65
+FC3	 -53	-33
+FC4	  53	 33
+CP3	 -52	 33
+CP4	  52	-33
+PO3	 -74	 65
+PO4	  74	-65
+F5	 -75	-41
+F6	  75	 41
+FC5	 -72	-21
+FC6	  72	 21
+C5	 -69	  0
+C6	  69	  0
+CP5	 -72	 21
+CP6	  72	-21
+P5	 -75	 41
+P6	  75	-41
+AF7	 -92	-54
+AF8	  92	 54
+FT7	 -92	-18
+FT8	  92	 18
+TP7	 -92	 18
+TP8	  92	-18
+PO7	 -92	 54
+PO8	  92	-54
+F9	-115	-36
+F10	 115	 36
+FT9	-115	-18
+FT10	 115	 18
+TP9	-115	 18
+TP10	 115	-18
+P9	-115	 36
+P10	 115	-36
+PO9	-115	 54
+PO10	 115	-54
+O9	-115	 72
+O10	 115	-72
+Fpz	  92	 90
+AFz	  69	 90
+FCz	  23	 90
+CPz	  23	-90
+POz	  69	-90
+Oz	  92	-90
+Iz	 115	-90
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/montages/easycap-M10.txt b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/montages/easycap-M10.txt
new file mode 100644
index 0000000..7019fc6
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/montages/easycap-M10.txt
@@ -0,0 +1,62 @@
+Site	 Theta	Phi
+1	   0	   0
+2	  23	  90
+3	  23	  30
+4	  23	 -30
+5	  23	 -90
+6	 -23	  30
+7	 -23	 -30
+8	  46	  90
+9	  46	  66
+10	  46	  33
+11	  46	   0
+12	  46	 -33
+13	  46	 -66
+14	  46	 -90
+15	 -46	  66
+16	 -46	  33
+17	 -46	   0
+18	 -46	 -33
+19	 -46	 -66
+20	  69	  90
+21	  69	  66
+22	  69	  42
+23	  69	  18
+24	  69	  -6
+25	  69	 -30
+26	  69	 -54
+27	  69	 -78
+28	 -69	  78
+29	 -69	  54
+30	 -69	  30
+31	 -69	   6
+32	 -69	 -18
+33	 -69	 -42
+34	 -69	 -66
+35	  92	  90
+36	  92	  68
+37	  92	  45
+38	  92	  22
+39	  92	   0
+40	  92	 -22
+41	  92	 -45
+42	  92	 -68
+43	  92	 -90
+44	 -92	  68
+45	 -92	  45
+46	 -92	  22
+47	 -92	   0
+48	 -92	 -22
+49	 -92	 -45
+50	 -92	 -68
+51	 115	  35
+52	 115	  10
+53	 115	 -15
+54	 115	 -40
+55	 115	 -65
+56	 115	 -90
+57	-115	  65
+58	-115	  40
+59	-115	  15
+60	-115	 -10
+61	-115	 -35
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/montages/standard_1005.elc b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/montages/standard_1005.elc
new file mode 100644
index 0000000..4e69532
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/montages/standard_1005.elc
@@ -0,0 +1,698 @@
+# ASA electrode file
+ReferenceLabel	avg
+UnitPosition	mm
+NumberPositions=	346
+Positions
+-86.0761 -19.9897 -47.9860
+85.7939 -20.0093 -48.0310
+0.0083 86.8110 -39.9830
+-29.4367 83.9171 -6.9900
+0.1123 88.2470 -1.7130
+29.8723 84.8959 -7.0800
+-48.9708 64.0872 -47.6830
+-54.8397 68.5722 -10.5900
+-45.4307 72.8622 5.9780
+-33.7007 76.8371 21.2270
+-18.4717 79.9041 32.7520
+0.2313 80.7710 35.4170
+19.8203 80.3019 32.7640
+35.7123 77.7259 21.9560
+46.5843 73.8078 6.0340
+55.7433 69.6568 -10.7550
+50.4352 63.8698 -48.0050
+-70.1019 41.6523 -49.9520
+-70.2629 42.4743 -11.4200
+-64.4658 48.0353 16.9210
+-50.2438 53.1112 42.1920
+-27.4958 56.9311 60.3420
+0.3122 58.5120 66.4620
+29.5142 57.6019 59.5400
+51.8362 54.3048 40.8140
+67.9142 49.8297 16.3670
+73.0431 44.4217 -12.0000
+72.1141 42.0667 -50.4520
+-84.0759 14.5673 -50.4290
+-80.7750 14.1203 -11.1350
+-77.2149 18.6433 24.4600
+-60.1819 22.7162 55.5440
+-34.0619 26.0111 79.9870
+0.3761 27.3900 88.6680
+34.7841 26.4379 78.8080
+62.2931 23.7228 55.6300
+79.5341 19.9357 24.4380
+81.8151 15.4167 -11.3300
+84.1131 14.3647 -50.5380
+-85.8941 -15.8287 -48.2830
+-84.1611 -16.0187 -9.3460
+-80.2801 -13.7597 29.1600
+-65.3581 -11.6317 64.3580
+-36.1580 -9.9839 89.7520
+0.4009 -9.1670 100.2440
+37.6720 -9.6241 88.4120
+67.1179 -10.9003 63.5800
+83.4559 -12.7763 29.2080
+85.0799 -15.0203 -9.4900
+85.5599 -16.3613 -48.2710
+-85.6192 -46.5147 -45.7070
+-84.8302 -46.0217 -7.0560
+-79.5922 -46.5507 30.9490
+-63.5562 -47.0088 65.6240
+-35.5131 -47.2919 91.3150
+0.3858 -47.3180 99.4320
+38.3838 -47.0731 90.6950
+66.6118 -46.6372 65.5800
+83.3218 -46.1013 31.2060
+85.5488 -45.5453 -7.1300
+86.1618 -47.0353 -45.8690
+-73.0093 -73.7657 -40.9980
+-72.4343 -73.4527 -2.4870
+-67.2723 -76.2907 28.3820
+-53.0073 -78.7878 55.9400
+-28.6203 -80.5249 75.4360
+0.3247 -81.1150 82.6150
+31.9197 -80.4871 76.7160
+55.6667 -78.5602 56.5610
+67.8877 -75.9043 28.0910
+73.0557 -73.0683 -2.5400
+73.8947 -74.3903 -41.2200
+-54.9104 -98.0448 -35.4650
+-54.8404 -97.5279 2.7920
+-48.4244 -99.3408 21.5990
+-36.5114 -100.8529 37.1670
+-18.9724 -101.7680 46.5360
+0.2156 -102.1780 50.6080
+19.8776 -101.7930 46.3930
+36.7816 -100.8491 36.3970
+49.8196 -99.4461 21.7270
+55.6666 -97.6251 2.7300
+54.9876 -98.0911 -35.5410
+-29.4134 -112.4490 8.8390
+0.1076 -114.8920 14.6570
+29.8426 -112.1560 8.8000
+-29.8184 -114.5700 -29.2160
+0.0045 -118.5650 -23.0780
+29.7416 -114.2600 -29.2560
+-43.2897 75.8552 -28.2440
+-38.5517 79.9532 -4.9950
+-27.9857 82.4591 2.7020
+-17.1947 84.8491 10.0270
+-5.9317 86.8780 16.2000
+7.1053 87.0740 16.4690
+18.9233 85.5969 11.4430
+28.6443 82.9759 2.8280
+39.3203 80.6868 -4.7250
+43.8223 76.5418 -28.3070
+-63.2538 53.8573 -30.3160
+-61.3508 58.7992 0.8970
+-50.7998 64.0412 23.0890
+-34.3157 68.3931 41.1880
+-11.4357 70.7561 50.3480
+13.4793 71.2010 51.1750
+36.1833 69.1509 41.2540
+52.3972 65.0708 22.8620
+62.9152 60.0448 0.6300
+64.3342 54.5998 -30.4440
+-79.0669 28.0813 -31.2530
+-74.4999 31.3003 4.8460
+-65.2379 36.4282 36.1440
+-44.4098 40.7622 61.6900
+-15.4238 43.6600 77.6820
+17.5922 44.0540 77.7880
+45.8532 41.6228 60.6470
+67.1281 37.7998 35.2960
+78.0531 32.9817 4.4830
+80.0971 28.5137 -31.3380
+-84.1250 -1.8467 -29.7940
+-82.3550 0.8263 8.5790
+-74.6920 4.3033 45.3070
+-51.0509 7.1772 74.3770
+-18.2190 9.0941 92.5290
+18.7870 9.2479 91.5620
+51.8851 7.7978 73.5070
+77.0020 5.3357 45.3500
+83.8880 1.9457 8.5010
+84.1230 -1.8083 -29.6380
+-86.9731 -32.2157 -27.8480
+-85.5651 -30.6287 11.1530
+-76.4071 -29.7307 49.2170
+-52.9281 -28.9058 80.3040
+-18.3541 -28.3219 98.2200
+20.2199 -28.1481 98.1720
+55.1139 -28.3862 80.4740
+79.0059 -28.9863 49.6280
+85.9999 -29.8203 11.2480
+88.6249 -32.2723 -28.0000
+-78.1602 -60.7567 -23.8240
+-76.6802 -60.8317 12.8800
+-68.1152 -62.9747 47.2520
+-46.9142 -64.6908 75.2960
+-15.8202 -65.5999 91.1640
+19.4198 -65.5950 92.4050
+50.6738 -64.4822 76.1300
+71.0958 -62.6243 47.3280
+78.5198 -60.4323 12.9020
+78.9027 -60.9553 -23.8050
+-64.5973 -87.6558 -19.0140
+-62.9593 -87.5028 12.9520
+-54.0103 -89.8988 37.3320
+-35.8874 -91.6669 55.5040
+-12.0474 -92.6069 65.5080
+13.9226 -92.6940 66.9580
+37.7986 -91.6291 56.7330
+54.6087 -89.6402 37.0350
+63.1117 -87.2282 12.8560
+65.0137 -87.8062 -18.9520
+-42.8624 -108.0730 -13.1510
+-40.1204 -107.1290 12.0610
+-31.9514 -108.2520 23.0470
+-19.8624 -108.9420 29.7600
+-6.9194 -109.2600 32.7100
+6.8036 -109.1630 31.5820
+20.2936 -108.9140 28.9440
+32.1756 -108.2520 22.2550
+41.0976 -107.2450 12.1380
+43.8946 -109.1270 -13.1700
+-14.8504 -117.9870 -6.9200
+15.0946 -118.0180 -6.9330
+-14.8107 87.2351 -4.4770
+15.1623 88.0910 -4.5510
+-54.8298 66.4132 -29.7040
+-51.1757 70.8362 -1.7550
+-39.6407 74.8671 13.6780
+-27.2187 78.7091 28.3750
+-9.1977 80.6051 35.1330
+10.4823 80.8650 35.3590
+28.5803 79.3029 28.4700
+40.9403 75.7399 13.8600
+52.0293 71.8468 -1.9200
+55.7542 67.1698 -29.8240
+-71.5079 41.1193 -30.8540
+-68.5558 45.2843 3.0020
+-58.4878 50.6722 30.1920
+-39.9798 55.2601 52.6000
+-13.3838 57.9021 64.3320
+15.8342 58.4559 64.9920
+41.7942 56.2259 51.4990
+60.0522 52.0858 28.7080
+71.9592 47.1917 2.4750
+72.7981 41.8218 -31.0260
+-82.9559 13.3203 -30.8080
+-80.1139 16.3903 6.8500
+-71.2099 20.8203 41.3240
+-48.5119 24.5292 69.1360
+-17.3439 27.0241 86.9230
+18.4181 27.2709 86.4370
+49.5481 25.2378 68.4300
+73.2191 22.0067 41.2970
+81.5801 17.6837 6.5640
+83.3711 13.5477 -30.7490
+-85.1321 -17.0557 -28.7310
+-82.9461 -14.8827 10.0090
+-75.2941 -12.6397 47.9040
+-51.5811 -10.7548 78.0350
+-18.2790 -9.4319 97.3560
+19.6780 -9.3041 95.7060
+53.8059 -10.1442 77.7300
+78.1249 -11.7353 47.8400
+85.1369 -13.9063 9.8900
+86.0999 -17.0883 -28.7560
+-84.8102 -47.2457 -26.2200
+-82.7042 -46.2977 11.9740
+-73.3012 -46.7917 49.1090
+-51.0492 -47.1758 80.0160
+-17.3542 -47.3419 97.4100
+20.6798 -47.2321 98.0720
+53.9968 -46.8902 80.0770
+76.5498 -46.3733 49.1400
+85.1998 -45.8073 12.1020
+85.4428 -47.2213 -26.1760
+-72.1773 -74.6277 -21.5360
+-70.1133 -74.8677 12.9990
+-61.7283 -77.6238 43.0280
+-41.6733 -79.7528 66.7150
+-13.9613 -81.0029 81.0030
+17.2977 -80.9810 81.6410
+44.7477 -79.6111 67.6550
+63.6267 -77.3022 43.1190
+72.1037 -74.4993 13.0250
+73.2817 -75.0773 -21.5760
+-54.7754 -98.9768 -16.1930
+-51.9284 -98.4438 12.3040
+-43.3424 -100.1629 30.0090
+-28.0074 -101.3610 42.3790
+-9.5034 -102.0600 49.4180
+10.2356 -102.0290 48.9420
+28.6476 -101.3901 42.1380
+44.2206 -100.2191 29.8080
+52.8386 -98.5360 12.2500
+55.8596 -99.8940 -16.2080
+-14.8054 -115.1000 11.8290
+15.1456 -115.1910 11.8330
+-15.1584 -118.2420 -26.0480
+15.1286 -118.1510 -26.0810
+-36.1247 72.3801 -45.8520
+-43.5117 78.5802 -9.2400
+-33.2847 81.2071 -1.1400
+-22.3517 83.5621 6.0710
+-12.2417 86.1941 14.1880
+0.1703 87.3220 17.4420
+13.6223 86.7579 15.3020
+24.1013 84.3769 7.4330
+33.9133 81.8119 -1.0350
+43.9483 79.2958 -9.3000
+37.7123 72.1679 -46.1970
+-59.3398 52.6802 -48.7700
+-63.2618 55.9922 -11.1730
+-55.8198 61.3962 11.8840
+-43.3817 66.3672 32.8110
+-23.5817 69.9171 47.2930
+0.2763 71.2800 52.0920
+25.5583 70.5559 47.8270
+45.1522 67.2748 32.7310
+58.0002 62.5998 11.9000
+64.6732 57.2738 -11.4600
+60.6012 52.2668 -49.0380
+-78.4839 28.7703 -50.5220
+-76.6149 28.6533 -11.5080
+-71.5059 33.9263 20.9930
+-55.9399 38.7162 49.7880
+-30.6548 42.4151 71.0400
+0.3512 44.0740 79.1410
+32.6451 43.1009 70.7950
+57.5042 39.8518 48.8110
+74.2501 35.4997 20.3800
+79.0341 30.3437 -11.9970
+79.9201 28.9417 -50.9140
+-87.3620 -0.5147 -49.8370
+-82.6680 -0.9417 -10.2840
+-80.1330 2.5853 27.3120
+-64.1610 5.8313 60.8850
+-35.7490 8.3091 85.4590
+0.3911 9.5080 95.5600
+36.0700 8.6519 83.8320
+65.1640 6.6198 60.0520
+81.5440 3.6637 27.2010
+83.1680 0.1817 -10.3640
+85.3930 -0.9523 -49.5200
+-86.6321 -31.2377 -47.1780
+-85.9331 -31.0927 -8.4740
+-81.5431 -30.1727 30.2730
+-66.1281 -29.2957 65.8980
+-36.9301 -28.5699 91.7340
+0.3959 -28.1630 101.2690
+38.5399 -28.2251 90.9760
+68.8539 -28.6403 66.4100
+84.5529 -29.3783 30.8780
+85.9999 -30.2803 -8.4350
+86.7619 -31.7313 -47.2530
+-80.7152 -60.6457 -43.5940
+-78.5992 -59.7237 -4.7580
+-73.6642 -61.9227 30.3800
+-59.4112 -63.9248 62.6720
+-32.7283 -65.3199 85.9440
+0.3658 -65.7500 94.0580
+35.8918 -65.1381 85.9800
+62.2558 -63.6152 62.7190
+76.6708 -61.5483 30.5430
+79.3188 -59.3033 -4.8400
+81.5598 -61.2153 -43.8000
+-64.5703 -86.4318 -38.3240
+-64.5833 -86.2218 0.0330
+-58.7123 -88.7048 25.1930
+-46.1603 -90.8878 47.4460
+-24.6483 -92.2919 62.0760
+0.2727 -92.7580 67.3420
+26.4367 -92.2951 63.1990
+47.1437 -90.7122 47.6780
+60.8127 -88.5042 25.6620
+65.1517 -85.9432 -0.0090
+65.0377 -86.7182 -38.4480
+-43.1284 -107.5160 -32.3870
+-42.9764 -106.4930 5.7730
+-36.2344 -107.7160 17.7500
+-25.9844 -108.6160 26.5440
+-13.6644 -109.2660 32.8560
+0.1676 -109.2760 32.7900
+13.6506 -109.1060 30.9360
+26.6636 -108.6680 26.4150
+37.7006 -107.8400 18.0690
+43.6696 -106.5990 5.7260
+43.1766 -107.4440 -32.4630
+-29.3914 -114.5110 -10.0200
+0.0525 -119.3430 -3.9360
+29.5526 -113.6360 -10.0510
+-84.1611 -16.0187 -9.3460
+-72.4343 -73.4527 -2.4870
+85.0799 -15.0203 -9.4900
+73.0557 -73.0683 -2.5400
+-86.0761 -44.9897 -67.9860
+ 85.7939 -45.0093 -68.0310
+-86.0761 -24.9897 -67.9860
+ 85.7939 -25.0093 -68.0310
+Labels
+LPA
+RPA
+Nz
+Fp1
+Fpz
+Fp2
+AF9
+AF7
+AF5
+AF3
+AF1
+AFz
+AF2
+AF4
+AF6
+AF8
+AF10
+F9
+F7
+F5
+F3
+F1
+Fz
+F2
+F4
+F6
+F8
+F10
+FT9
+FT7
+FC5
+FC3
+FC1
+FCz
+FC2
+FC4
+FC6
+FT8
+FT10
+T9
+T7
+C5
+C3
+C1
+Cz
+C2
+C4
+C6
+T8
+T10
+TP9
+TP7
+CP5
+CP3
+CP1
+CPz
+CP2
+CP4
+CP6
+TP8
+TP10
+P9
+P7
+P5
+P3
+P1
+Pz
+P2
+P4
+P6
+P8
+P10
+PO9
+PO7
+PO5
+PO3
+PO1
+POz
+PO2
+PO4
+PO6
+PO8
+PO10
+O1
+Oz
+O2
+I1
+Iz
+I2
+AFp9h
+AFp7h
+AFp5h
+AFp3h
+AFp1h
+AFp2h
+AFp4h
+AFp6h
+AFp8h
+AFp10h
+AFF9h
+AFF7h
+AFF5h
+AFF3h
+AFF1h
+AFF2h
+AFF4h
+AFF6h
+AFF8h
+AFF10h
+FFT9h
+FFT7h
+FFC5h
+FFC3h
+FFC1h
+FFC2h
+FFC4h
+FFC6h
+FFT8h
+FFT10h
+FTT9h
+FTT7h
+FCC5h
+FCC3h
+FCC1h
+FCC2h
+FCC4h
+FCC6h
+FTT8h
+FTT10h
+TTP9h
+TTP7h
+CCP5h
+CCP3h
+CCP1h
+CCP2h
+CCP4h
+CCP6h
+TTP8h
+TTP10h
+TPP9h
+TPP7h
+CPP5h
+CPP3h
+CPP1h
+CPP2h
+CPP4h
+CPP6h
+TPP8h
+TPP10h
+PPO9h
+PPO7h
+PPO5h
+PPO3h
+PPO1h
+PPO2h
+PPO4h
+PPO6h
+PPO8h
+PPO10h
+POO9h
+POO7h
+POO5h
+POO3h
+POO1h
+POO2h
+POO4h
+POO6h
+POO8h
+POO10h
+OI1h
+OI2h
+Fp1h
+Fp2h
+AF9h
+AF7h
+AF5h
+AF3h
+AF1h
+AF2h
+AF4h
+AF6h
+AF8h
+AF10h
+F9h
+F7h
+F5h
+F3h
+F1h
+F2h
+F4h
+F6h
+F8h
+F10h
+FT9h
+FT7h
+FC5h
+FC3h
+FC1h
+FC2h
+FC4h
+FC6h
+FT8h
+FT10h
+T9h
+T7h
+C5h
+C3h
+C1h
+C2h
+C4h
+C6h
+T8h
+T10h
+TP9h
+TP7h
+CP5h
+CP3h
+CP1h
+CP2h
+CP4h
+CP6h
+TP8h
+TP10h
+P9h
+P7h
+P5h
+P3h
+P1h
+P2h
+P4h
+P6h
+P8h
+P10h
+PO9h
+PO7h
+PO5h
+PO3h
+PO1h
+PO2h
+PO4h
+PO6h
+PO8h
+PO10h
+O1h
+O2h
+I1h
+I2h
+AFp9
+AFp7
+AFp5
+AFp3
+AFp1
+AFpz
+AFp2
+AFp4
+AFp6
+AFp8
+AFp10
+AFF9
+AFF7
+AFF5
+AFF3
+AFF1
+AFFz
+AFF2
+AFF4
+AFF6
+AFF8
+AFF10
+FFT9
+FFT7
+FFC5
+FFC3
+FFC1
+FFCz
+FFC2
+FFC4
+FFC6
+FFT8
+FFT10
+FTT9
+FTT7
+FCC5
+FCC3
+FCC1
+FCCz
+FCC2
+FCC4
+FCC6
+FTT8
+FTT10
+TTP9
+TTP7
+CCP5
+CCP3
+CCP1
+CCPz
+CCP2
+CCP4
+CCP6
+TTP8
+TTP10
+TPP9
+TPP7
+CPP5
+CPP3
+CPP1
+CPPz
+CPP2
+CPP4
+CPP6
+TPP8
+TPP10
+PPO9
+PPO7
+PPO5
+PPO3
+PPO1
+PPOz
+PPO2
+PPO4
+PPO6
+PPO8
+PPO10
+POO9
+POO7
+POO5
+POO3
+POO1
+POOz
+POO2
+POO4
+POO6
+POO8
+POO10
+OI1
+OIz
+OI2
+T3
+T5
+T4
+T6
+M1
+M2
+A1
+A2
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/montages/standard_1020.elc b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/montages/standard_1020.elc
new file mode 100644
index 0000000..2f68b51
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/montages/standard_1020.elc
@@ -0,0 +1,200 @@
+# ASA electrode file
+ReferenceLabel	avg
+UnitPosition	mm
+NumberPositions=	97
+Positions
+-86.0761 -19.9897 -47.9860
+85.7939 -20.0093 -48.0310
+0.0083 86.8110 -39.9830
+-29.4367 83.9171 -6.9900
+0.1123 88.2470 -1.7130
+29.8723 84.8959 -7.0800
+-48.9708 64.0872 -47.6830
+-54.8397 68.5722 -10.5900
+-45.4307 72.8622 5.9780
+-33.7007 76.8371 21.2270
+-18.4717 79.9041 32.7520
+0.2313 80.7710 35.4170
+19.8203 80.3019 32.7640
+35.7123 77.7259 21.9560
+46.5843 73.8078 6.0340
+55.7433 69.6568 -10.7550
+50.4352 63.8698 -48.0050
+-70.1019 41.6523 -49.9520
+-70.2629 42.4743 -11.4200
+-64.4658 48.0353 16.9210
+-50.2438 53.1112 42.1920
+-27.4958 56.9311 60.3420
+0.3122 58.5120 66.4620
+29.5142 57.6019 59.5400
+51.8362 54.3048 40.8140
+67.9142 49.8297 16.3670
+73.0431 44.4217 -12.0000
+72.1141 42.0667 -50.4520
+-84.0759 14.5673 -50.4290
+-80.7750 14.1203 -11.1350
+-77.2149 18.6433 24.4600
+-60.1819 22.7162 55.5440
+-34.0619 26.0111 79.9870
+0.3761 27.3900 88.6680
+34.7841 26.4379 78.8080
+62.2931 23.7228 55.6300
+79.5341 19.9357 24.4380
+81.8151 15.4167 -11.3300
+84.1131 14.3647 -50.5380
+-85.8941 -15.8287 -48.2830
+-84.1611 -16.0187 -9.3460
+-80.2801 -13.7597 29.1600
+-65.3581 -11.6317 64.3580
+-36.1580 -9.9839 89.7520
+0.4009 -9.1670 100.2440
+37.6720 -9.6241 88.4120
+67.1179 -10.9003 63.5800
+83.4559 -12.7763 29.2080
+85.0799 -15.0203 -9.4900
+85.5599 -16.3613 -48.2710
+-85.6192 -46.5147 -45.7070
+-84.8302 -46.0217 -7.0560
+-79.5922 -46.5507 30.9490
+-63.5562 -47.0088 65.6240
+-35.5131 -47.2919 91.3150
+0.3858 -47.3180 99.4320
+38.3838 -47.0731 90.6950
+66.6118 -46.6372 65.5800
+83.3218 -46.1013 31.2060
+85.5488 -45.5453 -7.1300
+86.1618 -47.0353 -45.8690
+-73.0093 -73.7657 -40.9980
+-72.4343 -73.4527 -2.4870
+-67.2723 -76.2907 28.3820
+-53.0073 -78.7878 55.9400
+-28.6203 -80.5249 75.4360
+0.3247 -81.1150 82.6150
+31.9197 -80.4871 76.7160
+55.6667 -78.5602 56.5610
+67.8877 -75.9043 28.0910
+73.0557 -73.0683 -2.5400
+73.8947 -74.3903 -41.2200
+-54.9104 -98.0448 -35.4650
+-54.8404 -97.5279 2.7920
+-48.4244 -99.3408 21.5990
+-36.5114 -100.8529 37.1670
+-18.9724 -101.7680 46.5360
+0.2156 -102.1780 50.6080
+19.8776 -101.7930 46.3930
+36.7816 -100.8491 36.3970
+49.8196 -99.4461 21.7270
+55.6666 -97.6251 2.7300
+54.9876 -98.0911 -35.5410
+-29.4134 -112.4490 8.8390
+0.1076 -114.8920 14.6570
+29.8426 -112.1560 8.8000
+-29.8184 -114.5700 -29.2160
+0.0045 -118.5650 -23.0780
+29.7416 -114.2600 -29.2560
+-84.1611 -16.0187 -9.3460
+-72.4343 -73.4527 -2.4870
+85.0799 -15.0203 -9.4900
+73.0557 -73.0683 -2.5400
+-86.0761 -44.9897 -67.9860
+ 85.7939 -45.0093 -68.0310
+-86.0761 -24.9897 -67.9860
+ 85.7939 -25.0093 -68.0310
+Labels
+LPA
+RPA
+Nz
+Fp1
+Fpz
+Fp2
+AF9
+AF7
+AF5
+AF3
+AF1
+AFz
+AF2
+AF4
+AF6
+AF8
+AF10
+F9
+F7
+F5
+F3
+F1
+Fz
+F2
+F4
+F6
+F8
+F10
+FT9
+FT7
+FC5
+FC3
+FC1
+FCz
+FC2
+FC4
+FC6
+FT8
+FT10
+T9
+T7
+C5
+C3
+C1
+Cz
+C2
+C4
+C6
+T8
+T10
+TP9
+TP7
+CP5
+CP3
+CP1
+CPz
+CP2
+CP4
+CP6
+TP8
+TP10
+P9
+P7
+P5
+P3
+P1
+Pz
+P2
+P4
+P6
+P8
+P10
+PO9
+PO7
+PO5
+PO3
+PO1
+POz
+PO2
+PO4
+PO6
+PO8
+PO10
+O1
+Oz
+O2
+O9
+Iz
+O10
+T3
+T5
+T4
+T6
+M1
+M2
+A1
+A2
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/montages/standard_alphabetic.elc b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/montages/standard_alphabetic.elc
new file mode 100644
index 0000000..55367e4
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/montages/standard_alphabetic.elc
@@ -0,0 +1,142 @@
+# ASA electrode file
+ReferenceLabel	avg
+UnitPosition	mm
+NumberPositions=	68
+Positions
+-86.0761 -19.9897 -47.9860
+85.7939 -20.0093 -48.0310
+0.0083 86.8110 -39.9830
+-29.4367 83.9171 -6.9900
+0.1123 88.2470 -1.7130
+29.8723 84.8959 -7.0800
+-54.8397 68.5722 -10.5900
+-33.7007 76.8371 21.2270
+0.2313 80.7710 35.4170
+35.7123 77.7259 21.9560
+55.7433 69.6568 -10.7550
+-70.2629 42.4743 -11.4200
+-64.4658 48.0353 16.9210
+-50.2438 53.1112 42.1920
+-27.4958 56.9311 60.3420
+0.3122 58.5120 66.4620
+29.5142 57.6019 59.5400
+51.8362 54.3048 40.8140
+67.9142 49.8297 16.3670
+73.0431 44.4217 -12.0000
+-80.7750 14.1203 -11.1350
+-77.2149 18.6433 24.4600
+-60.1819 22.7162 55.5440
+-34.0619 26.0111 79.9870
+0.3761 27.3900 88.6680
+34.7841 26.4379 78.8080
+62.2931 23.7228 55.6300
+79.5341 19.9357 24.4380
+81.8151 15.4167 -11.3300
+-84.1611 -16.0187 -9.3460
+-80.2801 -13.7597 29.1600
+-65.3581 -11.6317 64.3580
+-36.1580 -9.9839 89.7520
+0.4009 -9.1670 100.2440
+37.6720 -9.6241 88.4120
+67.1179 -10.9003 63.5800
+83.4559 -12.7763 29.2080
+85.0799 -15.0203 -9.4900
+-84.8302 -46.0217 -7.0560
+-79.5922 -46.5507 30.9490
+-63.5562 -47.0088 65.6240
+-35.5131 -47.2919 91.3150
+0.3858 -47.3180 99.4320
+38.3838 -47.0731 90.6950
+66.6118 -46.6372 65.5800
+83.3218 -46.1013 31.2060
+85.5488 -45.5453 -7.1300
+-72.4343 -73.4527 -2.4870
+-67.2723 -76.2907 28.3820
+-53.0073 -78.7878 55.9400
+-28.6203 -80.5249 75.4360
+0.3247 -81.1150 82.6150
+31.9197 -80.4871 76.7160
+55.6667 -78.5602 56.5610
+67.8877 -75.9043 28.0910
+73.0557 -73.0683 -2.5400
+-54.8404 -97.5279 2.7920
+-36.5114 -100.8529 37.1670
+0.2156 -102.1780 50.6080
+36.7816 -100.8491 36.3970
+55.6666 -97.6251 2.7300
+-29.4134 -112.4490 8.8390
+0.1076 -114.8920 14.6570
+29.8426 -112.1560 8.8000
+-86.0761 -44.9897 -67.9860
+ 85.7939 -45.0093 -68.0310
+-86.0761 -24.9897 -67.9860
+ 85.7939 -25.0093 -68.0310
+Labels
+LPA
+RPA
+Nz
+Fp1
+Fpz
+Fp2
+B3
+B1
+Bz
+B2
+B4
+F7
+F5
+F3
+F1
+Fz
+F2
+F4
+F6
+F8
+D7
+D5
+D3
+D1
+Dz
+D2
+D4
+D6
+D8
+T3
+C5
+C3
+C1
+Cz
+C2
+C4
+C6
+T4
+E7
+E5
+E3
+E1
+Ez
+E2
+E4
+E6
+E8
+T5
+P5
+P3
+P1
+Pz
+P2
+P4
+P6
+T6
+H3
+H1
+Hz
+H2
+H4
+O1
+Oz
+O2
+M1
+M2
+A1
+A2
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/montages/standard_postfixed.elc b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/montages/standard_postfixed.elc
new file mode 100644
index 0000000..3ed4d32
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/montages/standard_postfixed.elc
@@ -0,0 +1,212 @@
+# ASA electrode file
+ReferenceLabel	avg
+UnitPosition	mm
+NumberPositions=	103
+Positions
+-86.0761 -19.9897 -47.9860
+85.7939 -20.0093 -48.0310
+0.0083 86.8110 -39.9830
+-29.4367 83.9171 -6.9900
+0.1123 88.2470 -1.7130
+29.8723 84.8959 -7.0800
+-54.8397 68.5722 -10.5900
+-45.4307 72.8622 5.9780
+-33.7007 76.8371 21.2270
+-18.4717 79.9041 32.7520
+0.2313 80.7710 35.4170
+19.8203 80.3019 32.7640
+35.7123 77.7259 21.9560
+46.5843 73.8078 6.0340
+55.7433 69.6568 -10.7550
+-70.2629 42.4743 -11.4200
+-64.4658 48.0353 16.9210
+-50.2438 53.1112 42.1920
+-27.4958 56.9311 60.3420
+0.3122 58.5120 66.4620
+29.5142 57.6019 59.5400
+51.8362 54.3048 40.8140
+67.9142 49.8297 16.3670
+73.0431 44.4217 -12.0000
+-80.7750 14.1203 -11.1350
+-77.2149 18.6433 24.4600
+-60.1819 22.7162 55.5440
+-34.0619 26.0111 79.9870
+0.3761 27.3900 88.6680
+34.7841 26.4379 78.8080
+62.2931 23.7228 55.6300
+79.5341 19.9357 24.4380
+81.8151 15.4167 -11.3300
+-84.0759 14.5673 -50.4290
+-80.7750 14.1203 -11.1350
+-77.2149 18.6433 24.4600
+-60.1819 22.7162 55.5440
+-34.0619 26.0111 79.9870
+0.3761 27.3900 88.6680
+34.7841 26.4379 78.8080
+62.2931 23.7228 55.6300
+79.5341 19.9357 24.4380
+81.8151 15.4167 -11.3300
+84.1131 14.3647 -50.5380
+-84.1611 -16.0187 -9.3460
+-80.2801 -13.7597 29.1600
+-65.3581 -11.6317 64.3580
+-36.1580 -9.9839 89.7520
+0.4009 -9.1670 100.2440
+37.6720 -9.6241 88.4120
+67.1179 -10.9003 63.5800
+83.4559 -12.7763 29.2080
+85.0799 -15.0203 -9.4900
+-84.8302 -46.0217 -7.0560
+-79.5922 -46.5507 30.9490
+-63.5562 -47.0088 65.6240
+-35.5131 -47.2919 91.3150
+0.3858 -47.3180 99.4320
+38.3838 -47.0731 90.6950
+66.6118 -46.6372 65.5800
+83.3218 -46.1013 31.2060
+85.5488 -45.5453 -7.1300
+-84.8302 -46.0217 -7.0560
+-79.5922 -46.5507 30.9490
+-63.5562 -47.0088 65.6240
+-35.5131 -47.2919 91.3150
+0.3858 -47.3180 99.4320
+38.3838 -47.0731 90.6950
+66.6118 -46.6372 65.5800
+83.3218 -46.1013 31.2060
+85.5488 -45.5453 -7.1300
+-73.0093 -73.7657 -40.9980
+-72.4343 -73.4527 -2.4870
+-67.2723 -76.2907 28.3820
+-53.0073 -78.7878 55.9400
+-28.6203 -80.5249 75.4360
+0.3247 -81.1150 82.6150
+31.9197 -80.4871 76.7160
+55.6667 -78.5602 56.5610
+67.8877 -75.9043 28.0910
+73.0557 -73.0683 -2.5400
+73.8947 -74.3903 -41.2200
+-54.9104 -98.0448 -35.4650
+-54.8404 -97.5279 2.7920
+-48.4244 -99.3408 21.5990
+-36.5114 -100.8529 37.1670
+-18.9724 -101.7680 46.5360
+0.2156 -102.1780 50.6080
+19.8776 -101.7930 46.3930
+36.7816 -100.8491 36.3970
+49.8196 -99.4461 21.7270
+55.6666 -97.6251 2.7300
+54.9876 -98.0911 -35.5410
+-29.4134 -112.4490 8.8390
+0.1076 -114.8920 14.6570
+29.8426 -112.1560 8.8000
+-29.8184 -114.5700 -29.2160
+0.0045 -118.5650 -23.0780
+29.7416 -114.2600 -29.2560
+-86.0761 -44.9897 -67.9860
+ 85.7939 -45.0093 -68.0310
+-86.0761 -24.9897 -67.9860
+ 85.7939 -25.0093 -68.0310
+Labels
+LPA
+RPA
+Nz
+Fp1
+Fpz
+Fp2
+F7a
+F5a
+F3a
+F1a
+Fza
+F2a
+F4a
+F6a
+F8a
+F7
+F5
+F3
+F1
+Fz
+F2
+F4
+F6
+F8
+F7p
+F5p
+F3p
+F1p
+Fzp
+F2p
+F4p
+F6p
+F8p
+T1
+T3a
+C5a
+C3a
+C1a
+Cza
+C2a
+C4a
+C6a
+T4a
+T2
+T3
+C5
+C3
+C1
+Cz
+C2
+C4
+C6
+T4
+T3p
+C5p
+C3p
+C1p
+Czp
+C2p
+C4p
+C6p
+T4p
+T5a
+P5a
+P3a
+P1a
+Pza
+P2a
+P4a
+P6a
+T6a
+Cb1a
+T5
+P5
+P3
+P1
+Pz
+P2
+P4
+P6
+T6
+Cb2a
+Cb1
+O1a
+P5p
+P3p
+P1p
+Pzp
+P2p
+P4p
+P6p
+O2a
+Cb2
+O1
+Oz
+O2
+Cb1p
+Iz
+Cb2p
+M1
+M2
+A1
+A2
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/montages/standard_prefixed.elc b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/montages/standard_prefixed.elc
new file mode 100644
index 0000000..67563c0
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/montages/standard_prefixed.elc
@@ -0,0 +1,160 @@
+# ASA electrode file
+ReferenceLabel	avg
+UnitPosition	mm
+NumberPositions=	77
+Positions
+-86.0761 -19.9897 -47.9860
+85.7939 -20.0093 -48.0310
+0.0083 86.8110 -39.9830
+-29.4367 83.9171 -6.9900
+0.1123 88.2470 -1.7130
+29.8723 84.8959 -7.0800
+55.7433 69.6568 -10.7550
+-33.7007 76.8371 21.2270
+0.2313 80.7710 35.4170
+35.7123 77.7259 21.9560
+55.7433 69.6568 -10.7550
+-70.2629 42.4743 -11.4200
+-64.4658 48.0353 16.9210
+-50.2438 53.1112 42.1920
+-27.4958 56.9311 60.3420
+0.3122 58.5120 66.4620
+29.5142 57.6019 59.5400
+51.8362 54.3048 40.8140
+67.9142 49.8297 16.3670
+73.0431 44.4217 -12.0000
+-84.0759 14.5673 -50.4290
+-80.7750 14.1203 -11.1350
+-77.2149 18.6433 24.4600
+-60.1819 22.7162 55.5440
+-34.0619 26.0111 79.9870
+0.3761 27.3900 88.6680
+34.7841 26.4379 78.8080
+62.2931 23.7228 55.6300
+79.5341 19.9357 24.4380
+81.8151 15.4167 -11.3300
+84.1131 14.3647 -50.5380
+-85.8941 -15.8287 -48.2830
+-84.1611 -16.0187 -9.3460
+-80.2801 -13.7597 29.1600
+-65.3581 -11.6317 64.3580
+-36.1580 -9.9839 89.7520
+0.4009 -9.1670 100.2440
+37.6720 -9.6241 88.4120
+67.1179 -10.9003 63.5800
+83.4559 -12.7763 29.2080
+85.0799 -15.0203 -9.4900
+85.5599 -16.3613 -48.2710
+-84.8302 -46.0217 -7.0560
+-79.5922 -46.5507 30.9490
+-63.5562 -47.0088 65.6240
+-35.5131 -47.2919 91.3150
+0.3858 -47.3180 99.4320
+38.3838 -47.0731 90.6950
+66.6118 -46.6372 65.5800
+83.3218 -46.1013 31.2060
+85.5488 -45.5453 -7.1300
+-73.0093 -73.7657 -40.9980
+-72.4343 -73.4527 -2.4870
+-67.2723 -76.2907 28.3820
+-53.0073 -78.7878 55.9400
+-28.6203 -80.5249 75.4360
+0.3247 -81.1150 82.6150
+31.9197 -80.4871 76.7160
+55.6667 -78.5602 56.5610
+67.8877 -75.9043 28.0910
+73.0557 -73.0683 -2.5400
+73.8947 -74.3903 -41.2200
+-54.9104 -98.0448 -35.4650
+-54.8404 -97.5279 2.7920
+-36.5114 -100.8529 37.1670
+0.2156 -102.1780 50.6080
+36.7816 -100.8491 36.3970
+55.6666 -97.6251 2.7300
+54.9876 -98.0911 -35.5410
+-29.4134 -112.4490 8.8390
+0.1076 -114.8920 14.6570
+29.8426 -112.1560 8.8000
+0.0045 -118.5650 -23.0780
+-86.0761 -44.9897 -67.9860
+ 85.7939 -45.0093 -68.0310
+-86.0761 -24.9897 -67.9860
+ 85.7939 -25.0093 -68.0310
+Labels
+LPA
+RPA
+Nz
+Fp1
+Fpz
+Fp2
+aF3
+aF1
+aFz
+aF2
+aF4
+F7
+F5
+F3
+F1
+Fz
+F2
+F4
+F6
+F8
+iT1
+T1
+pF5
+pF3
+pF1
+pFz
+pF2
+pF4
+pF6
+T2
+iT2
+iT3
+T3
+C5
+C3
+C1
+Cz
+C2
+C4
+C6
+T4
+iT4
+T3A
+pC5
+pC3
+pC1
+pCz
+pC2
+pC4
+pC6
+T4A
+iT5
+T5
+P5
+P3
+P1
+Pz
+P2
+P4
+P6
+T6
+iT6
+pO5
+pO3
+pO1
+pOz
+pO2
+pO4
+pO6
+O1
+Oz
+O2
+Iz
+M1
+M2
+A1
+A2
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/montages/standard_primed.elc b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/montages/standard_primed.elc
new file mode 100644
index 0000000..00ec918
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/montages/standard_primed.elc
@@ -0,0 +1,212 @@
+# ASA electrode file
+ReferenceLabel	avg
+UnitPosition	mm
+NumberPositions=	103
+Positions
+-86.0761 -19.9897 -47.9860
+85.7939 -20.0093 -48.0310
+0.0083 86.8110 -39.9830
+-29.4367 83.9171 -6.9900
+0.1123 88.2470 -1.7130
+29.8723 84.8959 -7.0800
+-54.8397 68.5722 -10.5900
+-45.4307 72.8622 5.9780
+-33.7007 76.8371 21.2270
+-18.4717 79.9041 32.7520
+0.2313 80.7710 35.4170
+19.8203 80.3019 32.7640
+35.7123 77.7259 21.9560
+46.5843 73.8078 6.0340
+55.7433 69.6568 -10.7550
+-70.2629 42.4743 -11.4200
+-64.4658 48.0353 16.9210
+-50.2438 53.1112 42.1920
+-27.4958 56.9311 60.3420
+0.3122 58.5120 66.4620
+29.5142 57.6019 59.5400
+51.8362 54.3048 40.8140
+67.9142 49.8297 16.3670
+73.0431 44.4217 -12.0000
+-80.7750 14.1203 -11.1350
+-77.2149 18.6433 24.4600
+-60.1819 22.7162 55.5440
+-34.0619 26.0111 79.9870
+0.3761 27.3900 88.6680
+34.7841 26.4379 78.8080
+62.2931 23.7228 55.6300
+79.5341 19.9357 24.4380
+81.8151 15.4167 -11.3300
+-84.0759 14.5673 -50.4290
+-80.7750 14.1203 -11.1350
+-77.2149 18.6433 24.4600
+-60.1819 22.7162 55.5440
+-34.0619 26.0111 79.9870
+0.3761 27.3900 88.6680
+34.7841 26.4379 78.8080
+62.2931 23.7228 55.6300
+79.5341 19.9357 24.4380
+81.8151 15.4167 -11.3300
+84.1131 14.3647 -50.5380
+-84.1611 -16.0187 -9.3460
+-80.2801 -13.7597 29.1600
+-65.3581 -11.6317 64.3580
+-36.1580 -9.9839 89.7520
+0.4009 -9.1670 100.2440
+37.6720 -9.6241 88.4120
+67.1179 -10.9003 63.5800
+83.4559 -12.7763 29.2080
+85.0799 -15.0203 -9.4900
+-84.8302 -46.0217 -7.0560
+-79.5922 -46.5507 30.9490
+-63.5562 -47.0088 65.6240
+-35.5131 -47.2919 91.3150
+0.3858 -47.3180 99.4320
+38.3838 -47.0731 90.6950
+66.6118 -46.6372 65.5800
+83.3218 -46.1013 31.2060
+85.5488 -45.5453 -7.1300
+-84.8302 -46.0217 -7.0560
+-79.5922 -46.5507 30.9490
+-63.5562 -47.0088 65.6240
+-35.5131 -47.2919 91.3150
+0.3858 -47.3180 99.4320
+38.3838 -47.0731 90.6950
+66.6118 -46.6372 65.5800
+83.3218 -46.1013 31.2060
+85.5488 -45.5453 -7.1300
+-73.0093 -73.7657 -40.9980
+-72.4343 -73.4527 -2.4870
+-67.2723 -76.2907 28.3820
+-53.0073 -78.7878 55.9400
+-28.6203 -80.5249 75.4360
+0.3247 -81.1150 82.6150
+31.9197 -80.4871 76.7160
+55.6667 -78.5602 56.5610
+67.8877 -75.9043 28.0910
+73.0557 -73.0683 -2.5400
+73.8947 -74.3903 -41.2200
+-54.9104 -98.0448 -35.4650
+-54.8404 -97.5279 2.7920
+-48.4244 -99.3408 21.5990
+-36.5114 -100.8529 37.1670
+-18.9724 -101.7680 46.5360
+0.2156 -102.1780 50.6080
+19.8776 -101.7930 46.3930
+36.7816 -100.8491 36.3970
+49.8196 -99.4461 21.7270
+55.6666 -97.6251 2.7300
+54.9876 -98.0911 -35.5410
+-29.4134 -112.4490 8.8390
+0.1076 -114.8920 14.6570
+29.8426 -112.1560 8.8000
+-29.8184 -114.5700 -29.2160
+0.0045 -118.5650 -23.0780
+29.7416 -114.2600 -29.2560
+-86.0761 -44.9897 -67.9860
+ 85.7939 -45.0093 -68.0310
+-86.0761 -24.9897 -67.9860
+ 85.7939 -25.0093 -68.0310
+Labels
+LPA
+RPA
+Nz
+Fp1
+Fpz
+Fp2
+F7'
+F5'
+F3'
+F1'
+Fz'
+F2'
+F4'
+F6'
+F8'
+F7
+F5
+F3
+F1
+Fz
+F2
+F4
+F6
+F8
+F7''
+F5''
+F3''
+F1''
+Fz''
+F2''
+F4''
+F6''
+F8''
+T1
+T3'
+C5'
+C3'
+C1'
+Cz'
+C2'
+C4'
+C6'
+T4'
+T2
+T3
+C5
+C3
+C1
+Cz
+C2
+C4
+C6
+T4
+T3''
+C5''
+C3''
+C1''
+Cz''
+C2''
+C4''
+C6''
+T4''
+T5'
+P5'
+P3'
+P1'
+Pz'
+P2'
+P4'
+P6'
+T6'
+Cb1'
+T5
+P5
+P3
+P1
+Pz
+P2
+P4
+P6
+T6
+Cb2'
+Cb1
+O1'
+P5''
+P3''
+P1''
+Pz''
+P2''
+P4''
+P6''
+O2'
+Cb2
+O1
+Oz
+O2
+Cb1''
+Iz
+Cb2''
+M1
+M2
+A1
+A2
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/neighbors/KIT-157_neighb.mat b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/neighbors/KIT-157_neighb.mat
new file mode 100644
index 0000000..1cae3fc
Binary files /dev/null and b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/neighbors/KIT-157_neighb.mat differ
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/neighbors/KIT-208_neighb.mat b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/neighbors/KIT-208_neighb.mat
new file mode 100644
index 0000000..81de840
Binary files /dev/null and b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/neighbors/KIT-208_neighb.mat differ
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/neighbors/biosemi16_neighb.mat b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/neighbors/biosemi16_neighb.mat
new file mode 100644
index 0000000..56b7fb6
Binary files /dev/null and b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/neighbors/biosemi16_neighb.mat differ
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/neighbors/biosemi32_neighb.mat b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/neighbors/biosemi32_neighb.mat
new file mode 100644
index 0000000..1c29040
Binary files /dev/null and b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/neighbors/biosemi32_neighb.mat differ
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/neighbors/biosemi64_neighb.mat b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/neighbors/biosemi64_neighb.mat
new file mode 100644
index 0000000..4afbf6f
Binary files /dev/null and b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/neighbors/biosemi64_neighb.mat differ
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/neighbors/bti148_neighb.mat b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/neighbors/bti148_neighb.mat
new file mode 100644
index 0000000..527e435
Binary files /dev/null and b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/neighbors/bti148_neighb.mat differ
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/neighbors/bti248_neighb.mat b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/neighbors/bti248_neighb.mat
new file mode 100644
index 0000000..9bde76b
Binary files /dev/null and b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/neighbors/bti248_neighb.mat differ
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/neighbors/bti248grad_neighb.mat b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/neighbors/bti248grad_neighb.mat
new file mode 100644
index 0000000..4e5d620
Binary files /dev/null and b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/neighbors/bti248grad_neighb.mat differ
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/neighbors/ctf151_neighb.mat b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/neighbors/ctf151_neighb.mat
new file mode 100644
index 0000000..611a0bc
Binary files /dev/null and b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/neighbors/ctf151_neighb.mat differ
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/neighbors/ctf275_neighb.mat b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/neighbors/ctf275_neighb.mat
new file mode 100644
index 0000000..91cf84e
Binary files /dev/null and b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/neighbors/ctf275_neighb.mat differ
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/neighbors/ctf64_neighb.mat b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/neighbors/ctf64_neighb.mat
new file mode 100644
index 0000000..fd001e6
Binary files /dev/null and b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/neighbors/ctf64_neighb.mat differ
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/neighbors/easycap128ch-avg_neighb.mat b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/neighbors/easycap128ch-avg_neighb.mat
new file mode 100644
index 0000000..020392d
Binary files /dev/null and b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/neighbors/easycap128ch-avg_neighb.mat differ
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/neighbors/easycap32ch-avg_neighb.mat b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/neighbors/easycap32ch-avg_neighb.mat
new file mode 100644
index 0000000..62c88f0
Binary files /dev/null and b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/neighbors/easycap32ch-avg_neighb.mat differ
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/neighbors/easycap64ch-avg_neighb.mat b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/neighbors/easycap64ch-avg_neighb.mat
new file mode 100644
index 0000000..e59536c
Binary files /dev/null and b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/neighbors/easycap64ch-avg_neighb.mat differ
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/neighbors/easycapM11_neighb.mat b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/neighbors/easycapM11_neighb.mat
new file mode 100644
index 0000000..28131e7
Binary files /dev/null and b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/neighbors/easycapM11_neighb.mat differ
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/neighbors/easycapM14_neighb.mat b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/neighbors/easycapM14_neighb.mat
new file mode 100644
index 0000000..be2ad3d
Binary files /dev/null and b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/neighbors/easycapM14_neighb.mat differ
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/neighbors/easycapM15_neighb.mat b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/neighbors/easycapM15_neighb.mat
new file mode 100644
index 0000000..7dfa554
Binary files /dev/null and b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/neighbors/easycapM15_neighb.mat differ
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/neighbors/easycapM1_neighb.mat b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/neighbors/easycapM1_neighb.mat
new file mode 100644
index 0000000..f60d60d
Binary files /dev/null and b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/neighbors/easycapM1_neighb.mat differ
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/neighbors/neuromag122_neighb.mat b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/neighbors/neuromag122_neighb.mat
new file mode 100644
index 0000000..e8bbb75
Binary files /dev/null and b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/neighbors/neuromag122_neighb.mat differ
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/neighbors/neuromag306mag_neighb.mat b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/neighbors/neuromag306mag_neighb.mat
new file mode 100644
index 0000000..d7ffc98
Binary files /dev/null and b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/neighbors/neuromag306mag_neighb.mat differ
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/neighbors/neuromag306planar_neighb.mat b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/neighbors/neuromag306planar_neighb.mat
new file mode 100644
index 0000000..aa0529e
Binary files /dev/null and b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/data/neighbors/neuromag306planar_neighb.mat differ
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/interpolation.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/interpolation.py
new file mode 100644
index 0000000..0b355a4
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/interpolation.py
@@ -0,0 +1,207 @@
+# Authors: Denis Engemann <denis.engemann at gmail.com>
+#
+# License: BSD (3-clause)
+
+import numpy as np
+from numpy.polynomial.legendre import legval
+from scipy import linalg
+
+from ..utils import logger
+from ..io.pick import pick_types, pick_channels
+from ..surface import _normalize_vectors
+from ..bem import _fit_sphere
+from ..forward import _map_meg_channels
+
+
+def _calc_g(cosang, stiffness=4, num_lterms=50):
+    """Calculate spherical spline g function between points on a sphere.
+
+    Parameters
+    ----------
+    cosang : array-like of float, shape(n_channels, n_channels)
+        cosine of angles between pairs of points on a spherical surface. This
+        is equivalent to the dot product of unit vectors.
+    stiffness : float
+        stiffness of the spline.
+    num_lterms : int
+        number of Legendre terms to evaluate.
+
+    Returns
+    -------
+    G : np.ndrarray of float, shape(n_channels, n_channels)
+        The G matrix.
+    """
+    factors = [(2 * n + 1) / (n ** stiffness * (n + 1) ** stiffness *
+                              4 * np.pi) for n in range(1, num_lterms + 1)]
+    return legval(cosang, [0] + factors)
+
+
+def _calc_h(cosang, stiffness=4, num_lterms=50):
+    """Calculate spherical spline h function between points on a sphere.
+
+    Parameters
+    ----------
+    cosang : array-like of float, shape(n_channels, n_channels)
+        cosine of angles between pairs of points on a spherical surface. This
+        is equivalent to the dot product of unit vectors.
+    stiffness : float
+        stiffness of the spline. Also referred to as `m`.
+    num_lterms : int
+        number of Legendre terms to evaluate.
+    H : np.ndrarray of float, shape(n_channels, n_channels)
+        The H matrix.
+    """
+    factors = [(2 * n + 1) /
+               (n ** (stiffness - 1) * (n + 1) ** (stiffness - 1) * 4 * np.pi)
+               for n in range(1, num_lterms + 1)]
+    return legval(cosang, [0] + factors)
+
+
+def _make_interpolation_matrix(pos_from, pos_to, alpha=1e-5):
+    """Compute interpolation matrix based on spherical splines
+
+    Implementation based on [1]
+
+    Parameters
+    ----------
+    pos_from : np.ndarray of float, shape(n_good_sensors, 3)
+        The positions to interpoloate from.
+    pos_to : np.ndarray of float, shape(n_bad_sensors, 3)
+        The positions to interpoloate.
+    alpha : float
+        Regularization parameter. Defaults to 1e-5.
+
+    Returns
+    -------
+    interpolation : np.ndarray of float, shape(len(pos_from), len(pos_to))
+        The interpolation matrix that maps good signals to the location
+        of bad signals.
+
+    References
+    ----------
+    [1] Perrin, F., Pernier, J., Bertrand, O. and Echallier, JF. (1989).
+        Spherical splines for scalp potential and current density mapping.
+        Electroencephalography Clinical Neurophysiology, Feb; 72(2):184-7.
+    """
+
+    pos_from = pos_from.copy()
+    pos_to = pos_to.copy()
+
+    # normalize sensor positions to sphere
+    _normalize_vectors(pos_from)
+    _normalize_vectors(pos_to)
+
+    # cosine angles between source positions
+    cosang_from = pos_from.dot(pos_from.T)
+    cosang_to_from = pos_to.dot(pos_from.T)
+    G_from = _calc_g(cosang_from)
+    G_to_from, H_to_from = (f(cosang_to_from) for f in (_calc_g, _calc_h))
+
+    if alpha is not None:
+        G_from.flat[::len(G_from) + 1] += alpha
+
+    C_inv = linalg.pinv(G_from)
+    interpolation = G_to_from.dot(C_inv)
+    return interpolation
+
+
+def _do_interp_dots(inst, interpolation, goods_idx, bads_idx):
+    """Dot product of channel mapping matrix to channel data
+    """
+    from ..io.base import _BaseRaw
+    from ..epochs import _BaseEpochs
+    from ..evoked import Evoked
+
+    if isinstance(inst, _BaseRaw):
+        inst._data[bads_idx] = interpolation.dot(inst._data[goods_idx])
+    elif isinstance(inst, _BaseEpochs):
+        inst._data[:, bads_idx, :] = np.einsum('ij,xjy->xiy', interpolation,
+                                               inst._data[:, goods_idx, :])
+    elif isinstance(inst, Evoked):
+        inst.data[bads_idx] = interpolation.dot(inst.data[goods_idx])
+    else:
+        raise ValueError('Inputs of type {0} are not supported'
+                         .format(type(inst)))
+
+
+def _interpolate_bads_eeg(inst):
+    """Interpolate bad EEG channels
+
+    Operates in place.
+
+    Parameters
+    ----------
+    inst : mne.io.Raw, mne.Epochs or mne.Evoked
+        The data to interpolate. Must be preloaded.
+    """
+    bads_idx = np.zeros(len(inst.ch_names), dtype=np.bool)
+    goods_idx = np.zeros(len(inst.ch_names), dtype=np.bool)
+
+    picks = pick_types(inst.info, meg=False, eeg=True, exclude=[])
+    inst.info._check_consistency()
+    bads_idx[picks] = [inst.ch_names[ch] in inst.info['bads'] for ch in picks]
+
+    if len(picks) == 0 or len(bads_idx) == 0:
+        return
+
+    goods_idx[picks] = True
+    goods_idx[bads_idx] = False
+
+    pos = inst._get_channel_positions(picks)
+
+    # Make sure only EEG are used
+    bads_idx_pos = bads_idx[picks]
+    goods_idx_pos = goods_idx[picks]
+
+    pos_good = pos[goods_idx_pos]
+    pos_bad = pos[bads_idx_pos]
+
+    # test spherical fit
+    radius, center = _fit_sphere(pos_good)
+    distance = np.sqrt(np.sum((pos_good - center) ** 2, 1))
+    distance = np.mean(distance / radius)
+    if np.abs(1. - distance) > 0.1:
+        logger.warning('Your spherical fit is poor, interpolation results are '
+                       'likely to be inaccurate.')
+
+    logger.info('Computing interpolation matrix from {0} sensor '
+                'positions'.format(len(pos_good)))
+
+    interpolation = _make_interpolation_matrix(pos_good, pos_bad)
+
+    logger.info('Interpolating {0} sensors'.format(len(pos_bad)))
+    _do_interp_dots(inst, interpolation, goods_idx, bads_idx)
+
+
+def _interpolate_bads_meg(inst, mode='accurate', verbose=None):
+    """Interpolate bad channels from data in good channels.
+
+    Parameters
+    ----------
+    inst : mne.io.Raw, mne.Epochs or mne.Evoked
+        The data to interpolate. Must be preloaded.
+    mode : str
+        Either `'accurate'` or `'fast'`, determines the quality of the
+        Legendre polynomial expansion used for interpolation. `'fast'` should
+        be sufficient for most applications.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+    """
+    picks_meg = pick_types(inst.info, meg=True, eeg=False, exclude=[])
+    ch_names = [inst.info['ch_names'][p] for p in picks_meg]
+    picks_good = pick_types(inst.info, meg=True, eeg=False, exclude='bads')
+
+    # select the bad meg channel to be interpolated
+    if len(inst.info['bads']) == 0:
+        picks_bad = []
+    else:
+        picks_bad = pick_channels(ch_names, inst.info['bads'],
+                                  exclude=[])
+
+    # return without doing anything if there are no meg channels
+    if len(picks_meg) == 0 or len(picks_bad) == 0:
+        return
+
+    mapping = _map_meg_channels(inst, picks_good, picks_bad, mode=mode)
+
+    _do_interp_dots(inst, mapping, picks_good, picks_bad)
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/layout.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/layout.py
new file mode 100644
index 0000000..fb21ac8
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/layout.py
@@ -0,0 +1,825 @@
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Denis Engemann <denis.engemann at gmail.com>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#          Eric Larson <larson.eric.d at gmail.com>
+#          Marijn van Vliet <w.m.vanvliet at gmail.com>
+#          Jona Sassenhagen <jona.sassenhagen at gmail.com>
+#          Teon Brooks <teon.brooks at gmail.com>
+#
+# License: Simplified BSD
+
+import logging
+from collections import defaultdict
+from itertools import combinations
+import os.path as op
+
+import numpy as np
+
+from ..transforms import _polar_to_cartesian, _cartesian_to_sphere
+from ..io.pick import pick_types
+from ..io.constants import FIFF
+from ..utils import _clean_names
+from ..externals.six.moves import map
+
+
+class Layout(object):
+    """Sensor layouts
+
+    Layouts are typically loaded from a file using read_layout. Only use this
+    class directly if you're constructing a new layout.
+
+    Parameters
+    ----------
+    box : tuple of length 4
+        The box dimension (x_min, x_max, y_min, y_max).
+    pos : array, shape=(n_channels, 4)
+        The positions of the channels in 2d (x, y, width, height).
+    names : list
+        The channel names.
+    ids : list
+        The channel ids.
+    kind : str
+        The type of Layout (e.g. 'Vectorview-all').
+    """
+    def __init__(self, box, pos, names, ids, kind):
+        self.box = box
+        self.pos = pos
+        self.names = names
+        self.ids = ids
+        self.kind = kind
+
+    def save(self, fname):
+        """Save Layout to disk
+
+        Parameters
+        ----------
+        fname : str
+            The file name (e.g. 'my_layout.lout').
+
+        See Also
+        --------
+        read_layout
+        """
+        x = self.pos[:, 0]
+        y = self.pos[:, 1]
+        width = self.pos[:, 2]
+        height = self.pos[:, 3]
+        if fname.endswith('.lout'):
+            out_str = '%8.2f %8.2f %8.2f %8.2f\n' % self.box
+        elif fname.endswith('.lay'):
+            out_str = ''
+        else:
+            raise ValueError('Unknown layout type. Should be of type '
+                             '.lout or .lay.')
+
+        for ii in range(x.shape[0]):
+            out_str += ('%03d %8.2f %8.2f %8.2f %8.2f %s\n' % (self.ids[ii],
+                        x[ii], y[ii], width[ii], height[ii], self.names[ii]))
+
+        f = open(fname, 'w')
+        f.write(out_str)
+        f.close()
+
+    def __repr__(self):
+        return '<Layout | %s - Channels: %s ...>' % (self.kind,
+                                                     ', '.join(self.names[:3]))
+
+
+def _read_lout(fname):
+    """Aux function"""
+    with open(fname) as f:
+        box_line = f.readline()  # first line contains box dimension
+        box = tuple(map(float, box_line.split()))
+        names, pos, ids = [], [], []
+        for line in f:
+            splits = line.split()
+            if len(splits) == 7:
+                cid, x, y, dx, dy, chkind, nb = splits
+                name = chkind + ' ' + nb
+            else:
+                cid, x, y, dx, dy, name = splits
+            pos.append(np.array([x, y, dx, dy], dtype=np.float))
+            names.append(name)
+            ids.append(int(cid))
+
+    pos = np.array(pos)
+
+    return box, pos, names, ids
+
+
+def _read_lay(fname):
+    """Aux function"""
+    with open(fname) as f:
+        box = None
+        names, pos, ids = [], [], []
+        for line in f:
+            splits = line.split()
+            if len(splits) == 7:
+                cid, x, y, dx, dy, chkind, nb = splits
+                name = chkind + ' ' + nb
+            else:
+                cid, x, y, dx, dy, name = splits
+            pos.append(np.array([x, y, dx, dy], dtype=np.float))
+            names.append(name)
+            ids.append(int(cid))
+
+    pos = np.array(pos)
+
+    return box, pos, names, ids
+
+
+def read_layout(kind, path=None, scale=True):
+    """Read layout from a file
+
+    Parameters
+    ----------
+    kind : str
+        The name of the .lout file (e.g. kind='Vectorview-all' for
+        'Vectorview-all.lout').
+
+    path : str | None
+        The path of the folder containing the Layout file. Defaults to the
+        mne/channels/data/layouts folder inside your mne-python installation.
+
+    scale : bool
+        Apply useful scaling for out the box plotting using layout.pos.
+        Defaults to True.
+
+    Returns
+    -------
+    layout : instance of Layout
+        The layout.
+
+    See Also
+    --------
+    Layout.save
+    """
+    if path is None:
+        path = op.join(op.dirname(__file__), 'data', 'layouts')
+
+    if not kind.endswith('.lout') and op.exists(op.join(path, kind + '.lout')):
+        kind += '.lout'
+    elif not kind.endswith('.lay') and op.exists(op.join(path, kind + '.lay')):
+        kind += '.lay'
+
+    if kind.endswith('.lout'):
+        fname = op.join(path, kind)
+        kind = kind[:-5]
+        box, pos, names, ids = _read_lout(fname)
+    elif kind.endswith('.lay'):
+        fname = op.join(path, kind)
+        kind = kind[:-4]
+        box, pos, names, ids = _read_lay(fname)
+        kind.endswith('.lay')
+    else:
+        raise ValueError('Unknown layout type. Should be of type '
+                         '.lout or .lay.')
+
+    if scale:
+        pos[:, 0] -= np.min(pos[:, 0])
+        pos[:, 1] -= np.min(pos[:, 1])
+        scaling = max(np.max(pos[:, 0]), np.max(pos[:, 1])) + pos[0, 2]
+        pos /= scaling
+        pos[:, :2] += 0.03
+        pos[:, :2] *= 0.97 / 1.03
+        pos[:, 2:] *= 0.94
+
+    return Layout(box=box, pos=pos, names=names, kind=kind, ids=ids)
+
+
+def make_eeg_layout(info, radius=0.5, width=None, height=None, exclude='bads'):
+    """Create .lout file from EEG electrode digitization
+
+    Parameters
+    ----------
+    info : instance of mne.io.meas_info.Info
+        Measurement info (e.g., raw.info).
+    radius : float
+        Viewport radius as a fraction of main figure height. Defaults to 0.5.
+    width : float | None
+        Width of sensor axes as a fraction of main figure height. By default,
+        this will be the maximum width possible without axes overlapping.
+    height : float | None
+        Height of sensor axes as a fraction of main figure height. By default,
+        this will be the maximum height possible withough axes overlapping.
+    exclude : list of string | str
+        List of channels to exclude. If empty do not exclude any.
+        If 'bads', exclude channels in info['bads'] (default).
+
+    Returns
+    -------
+    layout : Layout
+        The generated Layout.
+
+    See Also
+    --------
+    make_grid_layout, generate_2d_layout
+    """
+    if not (0 <= radius <= 0.5):
+        raise ValueError('The radius parameter should be between 0 and 0.5.')
+    if width is not None and not (0 <= width <= 1.0):
+        raise ValueError('The width parameter should be between 0 and 1.')
+    if height is not None and not (0 <= height <= 1.0):
+        raise ValueError('The height parameter should be between 0 and 1.')
+
+    picks = pick_types(info, meg=False, eeg=True, ref_meg=False,
+                       exclude=exclude)
+    loc2d = _auto_topomap_coords(info, picks)
+    names = [info['chs'][i]['ch_name'] for i in picks]
+
+    # Scale [x, y] to [-0.5, 0.5]
+    loc2d_min = np.min(loc2d, axis=0)
+    loc2d_max = np.max(loc2d, axis=0)
+    loc2d = (loc2d - (loc2d_max + loc2d_min) / 2.) / (loc2d_max - loc2d_min)
+
+    # If no width or height specified, calculate the maximum value possible
+    # without axes overlapping.
+    if width is None or height is None:
+        width, height = _box_size(loc2d, width, height, padding=0.1)
+
+    # Scale to viewport radius
+    loc2d *= 2 * radius
+
+    # Some subplot centers will be at the figure edge. Shrink everything so it
+    # fits in the figure.
+    scaling = min(1 / (1. + width), 1 / (1. + height))
+    loc2d *= scaling
+    width *= scaling
+    height *= scaling
+
+    # Shift to center
+    loc2d += 0.5
+
+    n_channels = loc2d.shape[0]
+    pos = np.c_[loc2d[:, 0] - 0.5 * width,
+                loc2d[:, 1] - 0.5 * height,
+                width * np.ones(n_channels),
+                height * np.ones(n_channels)]
+
+    box = (0, 1, 0, 1)
+    ids = 1 + np.arange(n_channels)
+    layout = Layout(box=box, pos=pos, names=names, kind='EEG', ids=ids)
+    return layout
+
+
+def make_grid_layout(info, picks=None, n_col=None):
+    """ Generate .lout file for custom data, i.e., ICA sources
+
+    Parameters
+    ----------
+    info : instance of mne.io.meas_info.Info | None
+        Measurement info (e.g., raw.info). If None, default names will be
+        employed.
+    picks : array-like of int | None
+        The indices of the channels to be included. If None, al misc channels
+        will be included.
+    n_col : int | None
+        Number of columns to generate. If None, a square grid will be produced.
+
+    Returns
+    -------
+    layout : Layout
+        The generated layout.
+
+    See Also
+    --------
+    make_eeg_layout, generate_2d_layout
+    """
+    if picks is None:
+        picks = pick_types(info, misc=True, ref_meg=False, exclude='bads')
+
+    names = [info['chs'][k]['ch_name'] for k in picks]
+
+    if not names:
+        raise ValueError('No misc data channels found.')
+
+    ids = list(range(len(picks)))
+    size = len(picks)
+
+    if n_col is None:
+        # prepare square-like layout
+        n_row = n_col = np.sqrt(size)  # try square
+        if n_col % 1:
+            # try n * (n-1) rectangle
+            n_col, n_row = int(n_col + 1), int(n_row)
+
+        if n_col * n_row < size:  # jump to the next full square
+            n_row += 1
+    else:
+        n_row = np.ceil(size / float(n_col))
+
+    # setup position grid
+    x, y = np.meshgrid(np.linspace(-0.5, 0.5, n_col),
+                       np.linspace(-0.5, 0.5, n_row))
+    x, y = x.ravel()[:size], y.ravel()[:size]
+    width, height = _box_size(np.c_[x, y], padding=0.1)
+
+    # Some axes will be at the figure edge. Shrink everything so it fits in the
+    # figure. Add 0.01 border around everything
+    border_x, border_y = (0.01, 0.01)
+    x_scaling = 1 / (1. + width + border_x)
+    y_scaling = 1 / (1. + height + border_y)
+    x = x * x_scaling
+    y = y * y_scaling
+    width *= x_scaling
+    height *= y_scaling
+
+    # Shift to center
+    x += 0.5
+    y += 0.5
+
+    # calculate pos
+    pos = np.c_[x - 0.5 * width, y - 0.5 * height,
+                width * np.ones(size), height * np.ones(size)]
+    box = (0, 1, 0, 1)
+
+    layout = Layout(box=box, pos=pos, names=names, kind='grid-misc', ids=ids)
+    return layout
+
+
+def find_layout(info, ch_type=None, exclude='bads'):
+    """Choose a layout based on the channels in the info 'chs' field
+
+    Parameters
+    ----------
+    info : instance of mne.io.meas_info.Info
+        The measurement info.
+    ch_type : {'mag', 'grad', 'meg', 'eeg'} | None
+        The channel type for selecting single channel layouts.
+        Defaults to None. Note, this argument will only be considered for
+        VectorView type layout. Use `meg` to force using the full layout
+        in situations where the info does only contain one sensor type.
+    exclude : list of string | str
+        List of channels to exclude. If empty do not exclude any (default).
+        If 'bads', exclude channels in info['bads'].
+
+    Returns
+    -------
+    layout : Layout instance | None
+        None if layout not found.
+    """
+    our_types = ' or '.join(['`None`', '`mag`', '`grad`', '`meg`'])
+    if ch_type not in (None, 'meg', 'mag', 'grad', 'eeg'):
+        raise ValueError('Invalid channel type (%s) requested '
+                         '`ch_type` must be %s' % (ch_type, our_types))
+
+    chs = info['chs']
+    coil_types = set([ch['coil_type'] for ch in chs])
+    channel_types = set([ch['kind'] for ch in chs])
+
+    has_vv_mag = any(k in coil_types for k in
+                     [FIFF.FIFFV_COIL_VV_MAG_T1, FIFF.FIFFV_COIL_VV_MAG_T2,
+                      FIFF.FIFFV_COIL_VV_MAG_T3])
+    has_vv_grad = any(k in coil_types for k in [FIFF.FIFFV_COIL_VV_PLANAR_T1,
+                                                FIFF.FIFFV_COIL_VV_PLANAR_T2,
+                                                FIFF.FIFFV_COIL_VV_PLANAR_T3])
+    has_vv_meg = has_vv_mag and has_vv_grad
+    has_vv_only_mag = has_vv_mag and not has_vv_grad
+    has_vv_only_grad = has_vv_grad and not has_vv_mag
+    is_old_vv = ' ' in chs[0]['ch_name']
+
+    has_4D_mag = FIFF.FIFFV_COIL_MAGNES_MAG in coil_types
+    ctf_other_types = (FIFF.FIFFV_COIL_CTF_REF_MAG,
+                       FIFF.FIFFV_COIL_CTF_REF_GRAD,
+                       FIFF.FIFFV_COIL_CTF_OFFDIAG_REF_GRAD)
+    has_CTF_grad = (FIFF.FIFFV_COIL_CTF_GRAD in coil_types or
+                    (FIFF.FIFFV_MEG_CH in channel_types and
+                     any(k in ctf_other_types for k in coil_types)))
+    # hack due to MNE-C bug in IO of CTF
+    n_kit_grads = sum(ch['coil_type'] == FIFF.FIFFV_COIL_KIT_GRAD
+                      for ch in chs)
+
+    has_any_meg = any([has_vv_mag, has_vv_grad, has_4D_mag, has_CTF_grad,
+                       n_kit_grads])
+    has_eeg_coils = (FIFF.FIFFV_COIL_EEG in coil_types and
+                     FIFF.FIFFV_EEG_CH in channel_types)
+    has_eeg_coils_and_meg = has_eeg_coils and has_any_meg
+    has_eeg_coils_only = has_eeg_coils and not has_any_meg
+
+    if ch_type == "meg" and not has_any_meg:
+        raise RuntimeError('No MEG channels present. Cannot find MEG layout.')
+
+    if ch_type == "eeg" and not has_eeg_coils:
+        raise RuntimeError('No EEG channels present. Cannot find EEG layout.')
+
+    if ((has_vv_meg and ch_type is None) or
+            (any([has_vv_mag, has_vv_grad]) and ch_type == 'meg')):
+        layout_name = 'Vectorview-all'
+    elif has_vv_only_mag or (has_vv_meg and ch_type == 'mag'):
+        layout_name = 'Vectorview-mag'
+    elif has_vv_only_grad or (has_vv_meg and ch_type == 'grad'):
+        layout_name = 'Vectorview-grad'
+    elif ((has_eeg_coils_only and ch_type in [None, 'eeg']) or
+          (has_eeg_coils_and_meg and ch_type == 'eeg')):
+        if not isinstance(info, dict):
+            raise RuntimeError('Cannot make EEG layout, no measurement info '
+                               'was passed to `find_layout`')
+        return make_eeg_layout(info, exclude=exclude)
+    elif has_4D_mag:
+        layout_name = 'magnesWH3600'
+    elif has_CTF_grad:
+        layout_name = 'CTF-275'
+    elif n_kit_grads == 157:
+        layout_name = 'KIT-157'
+    elif n_kit_grads == 208:
+        layout_name = 'KIT-AD'
+    else:
+        return None
+
+    layout = read_layout(layout_name)
+    if not is_old_vv:
+        layout.names = _clean_names(layout.names, remove_whitespace=True)
+    if has_CTF_grad:
+        layout.names = _clean_names(layout.names, before_dash=True)
+
+    return layout
+
+
+def _box_size(points, width=None, height=None, padding=0.0):
+    """ Given a series of points, calculate an appropriate box size.
+
+    Parameters
+    ----------
+    points : array, shape (n_points, 2)
+        The centers of the axes as a list of (x, y) coordinate pairs. Normally
+        these are points in the range [0, 1] centered at 0.5.
+    width : float | None
+        An optional box width to enforce. When set, only the box height will be
+        calculated by the function.
+    height : float | None
+        An optional box height to enforce. When set, only the box width will be
+        calculated by the function.
+    padding : float
+        Portion of the box to reserve for padding. The value can range between
+        0.0 (boxes will touch, default) to 1.0 (boxes consist of only padding).
+
+    Returns
+    -------
+    width : float
+        Width of the box
+    height : float
+        Height of the box
+    """
+    from scipy.spatial.distance import pdist
+
+    def xdiff(a, b):
+        return np.abs(a[0] - b[0])
+
+    def ydiff(a, b):
+        return np.abs(a[1] - b[1])
+
+    points = np.asarray(points)
+    all_combinations = list(combinations(points, 2))
+
+    if width is None and height is None:
+        if len(points) <= 1:
+            # Trivial case first
+            width = 1.0
+            height = 1.0
+        else:
+            # Find the closest two points A and B.
+            a, b = all_combinations[np.argmin(pdist(points))]
+
+            # The closest points define either the max width or max height.
+            w, h = xdiff(a, b), ydiff(a, b)
+            if w > h:
+                width = w
+            else:
+                height = h
+
+    # At this point, either width or height is known, or both are known.
+    if height is None:
+        # Find all axes that could potentially overlap horizontally.
+        hdist = pdist(points, xdiff)
+        candidates = [all_combinations[i] for i, d in enumerate(hdist)
+                      if d < width]
+
+        if len(candidates) == 0:
+            # No axes overlap, take all the height you want.
+            height = 1.0
+        else:
+            # Find an appropriate height so all none of the found axes will
+            # overlap.
+            height = np.min([ydiff(*c) for c in candidates])
+
+    elif width is None:
+        # Find all axes that could potentially overlap vertically.
+        vdist = pdist(points, ydiff)
+        candidates = [all_combinations[i] for i, d in enumerate(vdist)
+                      if d < height]
+
+        if len(candidates) == 0:
+            # No axes overlap, take all the width you want.
+            width = 1.0
+        else:
+            # Find an appropriate width so all none of the found axes will
+            # overlap.
+            width = np.min([xdiff(*c) for c in candidates])
+
+    # Add a bit of padding between boxes
+    width *= 1 - padding
+    height *= 1 - padding
+
+    return width, height
+
+
+def _find_topomap_coords(info, picks, layout=None):
+    """Try to guess the E/MEG layout and return appropriate topomap coordinates
+
+    Parameters
+    ----------
+    info : instance of mne.io.meas_info.Info
+        Measurement info.
+    picks : list of int
+        Channel indices to generate topomap coords for.
+    layout : None | instance of Layout
+        Enforce using a specific layout. With None, a new map is generated.
+        With None, a layout is chosen based on the channels in the chs
+        parameter.
+
+    Returns
+    -------
+    coords : array, shape = (n_chs, 2)
+        2 dimensional coordinates for each sensor for a topomap plot.
+    """
+    if len(picks) == 0:
+        raise ValueError("Need more than 0 channels.")
+
+    if layout is not None:
+        chs = [info['chs'][i] for i in picks]
+        pos = [layout.pos[layout.names.index(ch['ch_name'])] for ch in chs]
+        pos = np.asarray(pos)
+    else:
+        pos = _auto_topomap_coords(info, picks)
+
+    return pos
+
+
+def _auto_topomap_coords(info, picks):
+    """Make a 2 dimensional sensor map from sensor positions in an info dict.
+    The default is to use the electrode locations. The fallback option is to
+    attempt using digitization points of kind FIFFV_POINT_EEG. This only works
+    with EEG and requires an equal number of digitization points and sensors.
+
+    Parameters
+    ----------
+    info : instance of mne.io.meas_info.Info
+        The measurement info.
+    picks : list of int
+        The channel indices to generate topomap coords for.
+
+    Returns
+    -------
+    locs : array, shape = (n_sensors, 2)
+        An array of positions of the 2 dimensional map.
+    """
+    from scipy.spatial.distance import pdist
+
+    chs = [info['chs'][i] for i in picks]
+
+    # Use channel locations if available
+    locs3d = np.array([ch['loc'][:3] for ch in chs])
+
+    # If electrode locations are not available, use digization points
+    if len(locs3d) == 0 or np.allclose(locs3d, 0):
+        logging.warning('Did not find any electrode locations the info, '
+                        'will attempt to use digitization points instead. '
+                        'However, if digitization points do not correspond to '
+                        'the EEG electrodes, this will lead to bad results. '
+                        'Please verify that the sensor locations in the plot '
+                        'are accurate.')
+
+        # MEG/EOG/ECG sensors don't have digitization points; all requested
+        # channels must be EEG
+        for ch in chs:
+            if ch['kind'] != FIFF.FIFFV_EEG_CH:
+                raise ValueError("Cannot determine location of MEG/EOG/ECG "
+                                 "channels using digitization points.")
+
+        eeg_ch_names = [ch['ch_name'] for ch in info['chs']
+                        if ch['kind'] == FIFF.FIFFV_EEG_CH]
+
+        # Get EEG digitization points
+        if info['dig'] is None or len(info['dig']) == 0:
+            raise RuntimeError('No digitization points found.')
+
+        locs3d = np.array([point['r'] for point in info['dig']
+                           if point['kind'] == FIFF.FIFFV_POINT_EEG])
+
+        if len(locs3d) == 0:
+            raise RuntimeError('Did not find any digitization points of '
+                               'kind FIFFV_POINT_EEG (%d) in the info.'
+                               % FIFF.FIFFV_POINT_EEG)
+
+        if len(locs3d) != len(eeg_ch_names):
+            raise ValueError("Number of EEG digitization points (%d) "
+                             "doesn't match the number of EEG channels "
+                             "(%d)" % (len(locs3d), len(eeg_ch_names)))
+
+        # Center digitization points on head origin
+        dig_kinds = (FIFF.FIFFV_POINT_CARDINAL,
+                     FIFF.FIFFV_POINT_EEG,
+                     FIFF.FIFFV_POINT_EXTRA)
+        from ..preprocessing.maxfilter import fit_sphere_to_headshape
+        _, origin_head, _ = fit_sphere_to_headshape(info, dig_kinds)
+        origin_head /= 1000.  # to meters
+        locs3d -= origin_head
+
+        # Match the digitization points with the requested
+        # channels.
+        eeg_ch_locs = dict(zip(eeg_ch_names, locs3d))
+        locs3d = np.array([eeg_ch_locs[ch['ch_name']] for ch in chs])
+
+    # Duplicate points cause all kinds of trouble during visualization
+    if np.min(pdist(locs3d)) < 1e-10:
+        raise ValueError('Electrode positions must be unique.')
+
+    x, y, z = locs3d.T
+    az, el, r = _cartesian_to_sphere(x, y, z)
+    locs2d = np.c_[_polar_to_cartesian(az, np.pi / 2 - el)]
+    return locs2d
+
+
+def _pair_grad_sensors(info, layout=None, topomap_coords=True, exclude='bads'):
+    """Find the picks for pairing grad channels
+
+    Parameters
+    ----------
+    info : instance of mne.io.meas_info.Info
+        An info dictionary containing channel information.
+    layout : Layout | None
+        The layout if available. Defaults to None.
+    topomap_coords : bool
+        Return the coordinates for a topomap plot along with the picks. If
+        False, only picks are returned. Defaults to True.
+    exclude : list of str | str
+        List of channels to exclude. If empty do not exclude any (default).
+        If 'bads', exclude channels in info['bads']. Defaults to 'bads'.
+
+    Returns
+    -------
+    picks : array of int
+        Picks for the grad channels, ordered in pairs.
+    coords : array, shape = (n_grad_channels, 3)
+        Coordinates for a topomap plot (optional, only returned if
+        topomap_coords == True).
+    """
+    # find all complete pairs of grad channels
+    pairs = defaultdict(list)
+    grad_picks = pick_types(info, meg='grad', ref_meg=False, exclude=exclude)
+    for i in grad_picks:
+        ch = info['chs'][i]
+        name = ch['ch_name']
+        if name.startswith('MEG'):
+            if name.endswith(('2', '3')):
+                key = name[-4:-1]
+                pairs[key].append(ch)
+    pairs = [p for p in pairs.values() if len(p) == 2]
+    if len(pairs) == 0:
+        raise ValueError("No 'grad' channel pairs found.")
+
+    # find the picks corresponding to the grad channels
+    grad_chs = sum(pairs, [])
+    ch_names = info['ch_names']
+    picks = [ch_names.index(c['ch_name']) for c in grad_chs]
+
+    if topomap_coords:
+        shape = (len(pairs), 2, -1)
+        coords = (_find_topomap_coords(info, picks, layout)
+                  .reshape(shape).mean(axis=1))
+        return picks, coords
+    else:
+        return picks
+
+
+# this function is used to pair grad when info is not present
+# it is the case of Projection that don't have the info.
+def _pair_grad_sensors_from_ch_names(ch_names):
+    """Find the indexes for pairing grad channels
+
+    Parameters
+    ----------
+    ch_names : list of str
+        A list of channel names.
+
+    Returns
+    -------
+    indexes : list of int
+        Indexes of the grad channels, ordered in pairs.
+    """
+    pairs = defaultdict(list)
+    for i, name in enumerate(ch_names):
+        if name.startswith('MEG'):
+            if name.endswith(('2', '3')):
+                key = name[-4:-1]
+                pairs[key].append(i)
+
+    pairs = [p for p in pairs.values() if len(p) == 2]
+
+    grad_chs = sum(pairs, [])
+    return grad_chs
+
+
+def _merge_grad_data(data):
+    """Merge data from channel pairs using the RMS
+
+    Parameters
+    ----------
+    data : array, shape = (n_channels, n_times)
+        Data for channels, ordered in pairs.
+
+    Returns
+    -------
+    data : array, shape = (n_channels / 2, n_times)
+        The root mean square for each pair.
+    """
+    data = data.reshape((len(data) // 2, 2, -1))
+    data = np.sqrt(np.sum(data ** 2, axis=1) / 2)
+    return data
+
+
+def generate_2d_layout(xy, w=.07, h=.05, pad=.02, ch_names=None,
+                       ch_indices=None, name='ecog', bg_image=None):
+    """Generate a custom 2D layout from xy points.
+
+    Generates a 2-D layout for plotting with plot_topo methods and
+    functions. XY points will be normalized between 0 and 1, where
+    normalization extremes will be either the min/max of xy, or
+    the width/height of bg_image.
+
+    Parameters
+    ----------
+    xy : ndarray (N x 2)
+        The xy coordinates of sensor locations.
+    w : float
+        The width of each sensor's axis (between 0 and 1)
+    h : float
+        The height of each sensor's axis (between 0 and 1)
+    pad : float
+        Portion of the box to reserve for padding. The value can range between
+        0.0 (boxes will touch, default) to 1.0 (boxes consist of only padding).
+    ch_names : list
+        The names of each channel. Must be a list of strings, with one
+        string per channel.
+    ch_indices : list
+        Index of each channel - must be a collection of unique integers,
+        one index per channel.
+    name : string
+        The name of this layout type.
+    bg_image : str | ndarray
+        The image over which sensor axes will be plotted. Either a path to an
+        image file, or an array that can be plotted with plt.imshow. If
+        provided, xy points will be normalized by the width/height of this
+        image. If not, xy points will be normalized by their own min/max.
+
+    Returns
+    -------
+    layout : Layout
+        A Layout object that can be plotted with plot_topo
+        functions and methods.
+
+    See Also
+    --------
+    make_eeg_layout, make_grid_layout
+
+    Notes
+    -----
+    .. versionadded:: 0.9.0
+    """
+    from scipy.ndimage import imread
+
+    if ch_indices is None:
+        ch_indices = np.arange(xy.shape[0])
+    if ch_names is None:
+        ch_names = ['{0}'.format(i) for i in ch_indices]
+
+    if len(ch_names) != len(ch_indices):
+        raise ValueError('# ch names and indices must be equal')
+    if len(ch_names) != len(xy):
+        raise ValueError('# ch names and xy vals must be equal')
+
+    x, y = xy.copy().astype(float).T
+
+    # Normalize xy to 0-1
+    if bg_image is not None:
+        # Normalize by image dimensions
+        if isinstance(bg_image, str):
+            img = imread(bg_image)
+        else:
+            img = bg_image
+        x /= img.shape[1]
+        y /= img.shape[0]
+    else:
+        # Normalize x and y by their maxes
+        for i_dim in [x, y]:
+            i_dim -= i_dim.min(0)
+            i_dim /= (i_dim.max(0) - i_dim.min(0))
+
+    # Create box and pos variable
+    box = _box_size(np.vstack([x, y]).T, padding=pad)
+    box = (0, 0, box[0], box[1])
+    w, h = [np.array([i] * x.shape[0]) for i in [w, h]]
+    loc_params = np.vstack([x, y, w, h]).T
+
+    layout = Layout(box, loc_params, ch_names, ch_indices, name)
+    return layout
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/montage.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/montage.py
new file mode 100644
index 0000000..b3ac08d
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/montage.py
@@ -0,0 +1,533 @@
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Denis Engemann <denis.engemann at gmail.com>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#          Eric Larson <larson.eric.d at gmail.com>
+#          Marijn van Vliet <w.m.vanvliet at gmail.com>
+#          Jona Sassenhagen <jona.sassenhagen at gmail.com>
+#          Teon Brooks <teon.brooks at gmail.com>
+#          Christian Brodbeck <christianbrodbeck at nyu.edu>
+#
+# License: Simplified BSD
+
+import os
+import os.path as op
+
+import numpy as np
+
+from ..viz import plot_montage
+from .channels import _contains_ch_type
+from ..transforms import (_sphere_to_cartesian, apply_trans,
+                          get_ras_to_neuromag_trans)
+from ..io.meas_info import _make_dig_points, _read_dig_points
+from ..externals.six import string_types
+from ..externals.six.moves import map
+
+
+class Montage(object):
+    """Montage for EEG cap
+
+    Montages are typically loaded from a file using read_montage. Only use this
+    class directly if you're constructing a new montage.
+
+    Parameters
+    ----------
+    pos : array, shape (n_channels, 3)
+        The positions of the channels in 3d.
+    ch_names : list
+        The channel names.
+    kind : str
+        The type of montage (e.g. 'standard_1005').
+    selection : array of int
+        The indices of the selected channels in the montage file.
+
+    Notes
+    -----
+    .. versionadded:: 0.9.0
+    """
+    def __init__(self, pos, ch_names, kind, selection):
+        self.pos = pos
+        self.ch_names = ch_names
+        self.kind = kind
+        self.selection = selection
+
+    def __repr__(self):
+        s = '<Montage | %s - %d Channels: %s ...>'
+        s %= self.kind, len(self.ch_names), ', '.join(self.ch_names[:3])
+        return s
+
+    def plot(self, scale_factor=1.5, show_names=False):
+        """Plot EEG sensor montage
+
+        Parameters
+        ----------
+        scale_factor : float
+            Determines the size of the points. Defaults to 1.5
+        show_names : bool
+            Whether to show the channel names. Defaults to False
+
+        Returns
+        -------
+        fig : Instance of matplotlib.figure.Figure
+            The figure object.
+        """
+        return plot_montage(self, scale_factor=scale_factor,
+                            show_names=show_names)
+
+
+def read_montage(kind, ch_names=None, path=None, unit='m', transform=False):
+    """Read montage from a file
+
+    This function can be used to read electrode positions from a user specified
+    file using the `kind` and `path` parameters. Alternatively, use only the
+    `kind` parameter to load one of the build-in montages:
+
+    ===================   =====================================================
+    Kind                  description
+    ===================   =====================================================
+    standard_1005         Electrodes are named and positioned according to the
+                          international 10-05 system.
+    standard_1020         Electrodes are named and positioned according to the
+                          international 10-20 system.
+    standard_alphabetic   Electrodes are named with LETTER-NUMBER combinations
+                          (A1, B2, F4, etc.)
+    standard_postfixed    Electrodes are named according to the international
+                          10-20 system using postfixes for intermediate
+                          positions.
+    standard_prefixed     Electrodes are named according to the international
+                          10-20 system using prefixes for intermediate
+                          positions.
+    standard_primed       Electrodes are named according to the international
+                          10-20 system using prime marks (' and '') for
+                          intermediate positions.
+
+    biosemi16             BioSemi cap with 16 electrodes
+    biosemi32             BioSemi cap with 32 electrodes
+    biosemi64             BioSemi cap with 64 electrodes
+    biosemi128            BioSemi cap with 128 electrodes
+    biosemi160            BioSemi cap with 160 electrodes
+    biosemi256            BioSemi cap with 256 electrodes
+
+    easycap-M10           Brainproducts EasyCap with electrodes named
+                          according to the 10-05 system
+    easycap-M1            Brainproduct EasyCap with numbered electrodes
+
+    EGI_256               Geodesic Sensor Net with 256 channels
+
+    GSN-HydroCel-32       HydroCel Geodesic Sensor Net with 32 electrodes
+    GSN-HydroCel-64_1.0   HydroCel Geodesic Sensor Net with 64 electrodes
+    GSN-HydroCel-65_1.0   HydroCel Geodesic Sensor Net with 64 electrodes + Cz
+    GSN-HydroCel-128      HydroCel Geodesic Sensor Net with 128 electrodes
+    GSN-HydroCel-129      HydroCel Geodesic Sensor Net with 128 electrodes + Cz
+    GSN-HydroCel-256      HydroCel Geodesic Sensor Net with 256 electrodes
+    GSN-HydroCel-257      HydroCel Geodesic Sensor Net with 256 electrodes + Cz
+    ===================   =====================================================
+
+    Parameters
+    ----------
+    kind : str
+        The name of the montage file (e.g. kind='easycap-M10' for
+        'easycap-M10.txt'). Files with extensions '.elc', '.txt', '.csd',
+        '.elp', '.hpts' or '.sfp' are supported.
+    ch_names : list of str | None
+        If not all electrodes defined in the montage are present in the EEG
+        data, use this parameter to select subset of electrode positions to
+        load. If None (default), all defined electrode positions are returned.
+    path : str | None
+        The path of the folder containing the montage file. Defaults to the
+        mne/channels/data/montages folder in your mne-python installation.
+    unit : 'm' | 'cm' | 'mm'
+        Unit of the input file. If not 'm' (default), coordinates will be
+        rescaled to 'm'.
+    transform : bool
+        If True, points will be transformed to Neuromag space.
+        The fidicuals, 'nasion', 'lpa', 'rpa' must be specified in
+        the montage file. Useful for points captured using Polhemus FastSCAN.
+        Default is False.
+
+    Returns
+    -------
+    montage : instance of Montage
+        The montage.
+
+    Notes
+    -----
+    Built-in montages are not scaled or transformed by default.
+
+    .. versionadded:: 0.9.0
+    """
+
+    if path is None:
+        path = op.join(op.dirname(__file__), 'data', 'montages')
+    if not op.isabs(kind):
+        supported = ('.elc', '.txt', '.csd', '.sfp', '.elp', '.hpts')
+        montages = [op.splitext(f) for f in os.listdir(path)]
+        montages = [m for m in montages if m[1] in supported and kind == m[0]]
+        if len(montages) != 1:
+            raise ValueError('Could not find the montage. Please provide the '
+                             'full path.')
+        kind, ext = montages[0]
+        fname = op.join(path, kind + ext)
+    else:
+        kind, ext = op.splitext(kind)
+        fname = op.join(path, kind + ext)
+
+    if ext == '.sfp':
+        # EGI geodesic
+        dtype = np.dtype('S4, f8, f8, f8')
+        data = np.loadtxt(fname, dtype=dtype)
+        pos = np.c_[data['f1'], data['f2'], data['f3']]
+        ch_names_ = data['f0'].astype(np.str)
+    elif ext == '.elc':
+        # 10-5 system
+        ch_names_ = []
+        pos = []
+        with open(fname) as fid:
+            for line in fid:
+                if 'Positions\n' in line:
+                    break
+            pos = []
+            for line in fid:
+                if 'Labels\n' in line:
+                    break
+                pos.append(list(map(float, line.split())))
+            for line in fid:
+                if not line or not set(line) - set([' ']):
+                    break
+                ch_names_.append(line.strip(' ').strip('\n'))
+        pos = np.array(pos)
+    elif ext == '.txt':
+        # easycap
+        try:  # newer version
+            data = np.genfromtxt(fname, dtype='str', skip_header=1)
+        except TypeError:
+            data = np.genfromtxt(fname, dtype='str', skiprows=1)
+        ch_names_ = list(data[:, 0])
+        theta, phi = data[:, 1].astype(float), data[:, 2].astype(float)
+        x = 85. * np.cos(np.deg2rad(phi)) * np.sin(np.deg2rad(theta))
+        y = 85. * np.sin(np.deg2rad(theta)) * np.sin(np.deg2rad(phi))
+        z = 85. * np.cos(np.deg2rad(theta))
+        pos = np.c_[x, y, z]
+    elif ext == '.csd':
+        # CSD toolbox
+        dtype = [('label', 'S4'), ('theta', 'f8'), ('phi', 'f8'),
+                 ('radius', 'f8'), ('x', 'f8'), ('y', 'f8'), ('z', 'f8'),
+                 ('off_sph', 'f8')]
+        try:  # newer version
+            table = np.loadtxt(fname, skip_header=2, dtype=dtype)
+        except TypeError:
+            table = np.loadtxt(fname, skiprows=2, dtype=dtype)
+        ch_names_ = table['label']
+        theta = (2 * np.pi * table['theta']) / 360.
+        phi = (2 * np.pi * table['phi']) / 360.
+        pos = _sphere_to_cartesian(theta, phi, r=1.0)
+        pos = np.asarray(pos).T
+    elif ext == '.elp':
+        # standard BESA spherical
+        dtype = np.dtype('S8, S8, f8, f8, f8')
+        try:
+            data = np.loadtxt(fname, dtype=dtype, skip_header=1)
+        except TypeError:
+            data = np.loadtxt(fname, dtype=dtype, skiprows=1)
+
+        az = data['f2']
+        horiz = data['f3']
+
+        radius = np.abs(az / 180.)
+        angles = np.array([90. - h if a >= 0. else -90. - h
+                           for h, a in zip(horiz, az)])
+
+        sph_phi = (0.5 - radius) * 180.
+        sph_theta = angles
+
+        azimuth = sph_theta / 180.0 * np.pi
+        elevation = sph_phi / 180.0 * np.pi
+        r = 85.
+
+        y, x, z = _sphere_to_cartesian(azimuth, elevation, r)
+
+        pos = np.c_[x, y, z]
+        ch_names_ = data['f1'].astype(np.str)
+    elif ext == '.hpts':
+        # MNE-C specified format for generic digitizer data
+        dtype = [('type', 'S8'), ('name', 'S8'),
+                 ('x', 'f8'), ('y', 'f8'), ('z', 'f8')]
+        data = np.loadtxt(fname, dtype=dtype)
+        pos = np.vstack((data['x'], data['y'], data['z'])).T
+        ch_names_ = data['name'].astype(np.str)
+    else:
+        raise ValueError('Currently the "%s" template is not supported.' %
+                         kind)
+    selection = np.arange(len(pos))
+
+    if unit == 'mm':
+        pos /= 1e3
+    elif unit == 'cm':
+        pos /= 1e2
+    elif unit != 'm':
+        raise ValueError("'unit' should be either 'm', 'cm', or 'mm'.")
+    if transform:
+        names_lower = [name.lower() for name in list(ch_names_)]
+        if ext == '.hpts':
+            fids = ('2', '1', '3')  # Alternate cardinal point names
+        else:
+            fids = ('nz', 'lpa', 'rpa')
+
+        missing = [name for name in fids
+                   if name not in names_lower]
+        if missing:
+            raise ValueError("The points %s are missing, but are needed "
+                             "to transform the points to the MNE coordinate "
+                             "system. Either add the points, or read the "
+                             "montage with transform=False. " % missing)
+        nasion = pos[names_lower.index(fids[0])]
+        lpa = pos[names_lower.index(fids[1])]
+        rpa = pos[names_lower.index(fids[2])]
+
+        neuromag_trans = get_ras_to_neuromag_trans(nasion, lpa, rpa)
+        pos = apply_trans(neuromag_trans, pos)
+
+    if ch_names is not None:
+        sel, ch_names_ = zip(*[(i, e) for i, e in enumerate(ch_names_)
+                             if e in ch_names])
+        sel = list(sel)
+        pos = pos[sel]
+        selection = selection[sel]
+    else:
+        ch_names_ = list(ch_names_)
+    kind = op.split(kind)[-1]
+    return Montage(pos=pos, ch_names=ch_names_, kind=kind, selection=selection)
+
+
+class DigMontage(object):
+    """Montage for Digitized data
+
+    Montages are typically loaded from a file using read_dig_montage. Only use
+    this class directly if you're constructing a new montage.
+
+    Parameters
+    ----------
+    hsp : array, shape (n_points, 3)
+        The positions of the channels in 3d.
+    hpi : array, shape (n_hpi, 3)
+        The positions of the head-position indicator coils in 3d.
+        These points are in the MEG device space.
+    elp : array, shape (n_hpi, 3)
+        The positions of the head-position indicator coils in 3d.
+        This is typically in the acquisition digitizer space.
+    point_names : list, shape (n_elp)
+        The names of the digitized points for hpi and elp.
+    nasion : array, shape (1, 3)
+        The position of the nasion fidicual point in the RAS head space.
+    lpa : array, shape (1, 3)
+        The position of the left periauricular fidicual point in
+        the RAS head space.
+    rpa : array, shape (1, 3)
+        The position of the right periauricular fidicual point in
+        the RAS head space.
+    dev_head_t : array, shape (4, 4)
+        A Device-to-Head transformation matrix.
+
+    Notes
+    -----
+    .. versionadded:: 0.9.0
+    """
+    def __init__(self, hsp, hpi, elp, point_names,
+                 nasion=None, lpa=None, rpa=None, dev_head_t=None):
+        self.hsp = hsp
+        self.hpi = hpi
+        self.elp = elp
+        self.point_names = point_names
+
+        self.nasion = nasion
+        self.lpa = lpa
+        self.rpa = rpa
+        if dev_head_t is None:
+            self.dev_head_t = np.identity(4)
+        else:
+            self.dev_head_t = dev_head_t
+
+    def __repr__(self):
+        s = '<DigMontage | %d Dig Points, %d HPI points: %s ...>'
+        s %= (len(self.hsp), len(self.point_names),
+              ', '.join(self.point_names[:3]))
+        return s
+
+    def plot(self, scale_factor=1.5, show_names=False):
+        """Plot EEG sensor montage
+
+        Parameters
+        ----------
+        scale_factor : float
+            Determines the size of the points. Defaults to 1.5
+        show_names : bool
+            Whether to show the channel names. Defaults to False
+
+        Returns
+        -------
+        fig : Instance of matplotlib.figure.Figure
+            The figure object.
+        """
+        from ..viz import plot_montage
+        return plot_montage(self, scale_factor=scale_factor,
+                            show_names=show_names)
+
+
+def read_dig_montage(hsp=None, hpi=None, elp=None, point_names=None,
+                     unit='mm', transform=True, dev_head_t=False):
+    """Read montage from a file
+
+    Parameters
+    ----------
+    hsp : None | str | array, shape (n_points, 3)
+        If str, this corresponds to the filename of the headshape points.
+        This is typically used with the Polhemus FastSCAN system.
+        If numpy.array, this corresponds to an array of positions of the
+        channels in 3d.
+    hpi : None | str | array, shape (n_hpi, 3)
+        If str, this corresponds to the filename of hpi points. If numpy.array,
+        this corresponds to an array hpi points. These points are in
+        device space.
+    elp : None | str | array, shape (n_fids + n_hpi, 3)
+        If str, this corresponds to the filename of hpi points.
+        This is typically used with the Polhemus FastSCAN system.
+        If numpy.array, this corresponds to an array hpi points. These points
+        are in head space. Fiducials should be listed first, then the points
+        corresponding to the hpi.
+    point_names : None | list
+        If list, this corresponds to a list of point names. This must be
+        specified if elp is defined.
+    unit : 'm' | 'cm' | 'mm'
+        Unit of the input file. If not 'm', coordinates will be rescaled
+        to 'm'. Default is 'mm'. This is applied only for hsp and elp files.
+    transform : bool
+        If True, points will be transformed to Neuromag space.
+        The fidicuals, 'nasion', 'lpa', 'rpa' must be specified in
+        the montage file. Useful for points captured using Polhemus FastSCAN.
+        Default is True.
+    dev_head_t : bool
+        If True, a Dev-to-Head transformation matrix will be added to the
+        montage. To get a proper `dev_head_t`, the hpi and the elp points
+        must be in the same order. If False, an identity matrix will be added
+        to the montage. Default is False.
+
+
+    Returns
+    -------
+    montage : instance of DigMontage
+        The digitizer montage.
+
+    Notes
+    -----
+    All digitized points will be transformed to head-based coordinate system
+    if transform is True and fiducials are present.
+
+    .. versionadded:: 0.9.0
+    """
+    if isinstance(hsp, string_types):
+        hsp = _read_dig_points(hsp)
+    if hsp is not None:
+        if unit == 'mm':
+            hsp *= 1e-3
+        if unit == 'cm':
+            hsp *= 1e-2
+    if isinstance(hpi, string_types):
+        ext = op.splitext(hpi)[-1]
+        if ext == '.txt':
+            hpi = _read_dig_points(hpi)
+        elif ext in ('.sqd', '.mrk'):
+            from ..io.kit import read_mrk
+            hpi = read_mrk(hpi)
+        else:
+            raise TypeError('HPI file is not supported.')
+    if isinstance(elp, string_types):
+        elp = _read_dig_points(elp)
+    if elp is not None:
+        if len(elp) != len(point_names):
+            raise ValueError("The elp file contains %i points, but %i names "
+                             "were specified." % (len(elp), len(point_names)))
+        if unit == 'mm':
+            elp *= 1e-3
+        elif unit == 'cm':
+            elp *= 1e-2
+
+    if transform:
+        if elp is None:
+            raise ValueError("ELP points are not specified. Points are needed "
+                             "for transformation.")
+        names_lower = [name.lower() for name in point_names]
+
+        # check that all needed points are present
+        missing = tuple(name for name in ('nasion', 'lpa', 'rpa')
+                        if name not in names_lower)
+        if missing:
+            raise ValueError("The points %s are missing, but are needed "
+                             "to transform the points to the MNE coordinate "
+                             "system. Either add the points, or read the "
+                             "montage with transform=False." % str(missing))
+
+        nasion = elp[names_lower.index('nasion')]
+        lpa = elp[names_lower.index('lpa')]
+        rpa = elp[names_lower.index('rpa')]
+        neuromag_trans = get_ras_to_neuromag_trans(nasion, lpa, rpa)
+
+        fids = np.array([nasion, lpa, rpa])
+        fids = apply_trans(neuromag_trans, fids)
+        elp = apply_trans(neuromag_trans, elp)
+        hsp = apply_trans(neuromag_trans, hsp)
+    else:
+        fids = [None] * 3
+    if dev_head_t:
+        from ..coreg import fit_matched_points
+        trans = fit_matched_points(tgt_pts=elp[3:], src_pts=hpi, out='trans')
+    else:
+        trans = np.identity(4)
+
+    return DigMontage(hsp, hpi, elp, point_names, fids[0], fids[1], fids[2],
+                      trans)
+
+
+def _set_montage(info, montage):
+    """Apply montage to data.
+
+    With a Montage, this function will replace the EEG channel names and
+    locations with the values specified for the particular montage.
+
+    With a DigMontage, this function will replace the digitizer info with
+    the values specified for the particular montage.
+
+    Note: This function will change the info variable in place.
+
+    Parameters
+    ----------
+    info : instance of Info
+        The measurement info to update.
+    montage : instance of Montage
+        The montage to apply.
+    """
+    if isinstance(montage, Montage):
+        if not _contains_ch_type(info, 'eeg'):
+            raise ValueError('No EEG channels found.')
+
+        sensors_found = False
+        for pos, ch_name in zip(montage.pos, montage.ch_names):
+            if ch_name not in info['ch_names']:
+                continue
+
+            ch_idx = info['ch_names'].index(ch_name)
+            info['ch_names'][ch_idx] = ch_name
+            info['chs'][ch_idx]['loc'] = np.r_[pos, [0.] * 9]
+            sensors_found = True
+
+        if not sensors_found:
+            raise ValueError('None of the sensors defined in the montage were '
+                             'found in the info structure. Check the channel '
+                             'names.')
+    elif isinstance(montage, DigMontage):
+        dig = _make_dig_points(nasion=montage.nasion, lpa=montage.lpa,
+                               rpa=montage.rpa, hpi=montage.hpi,
+                               dig_points=montage.hsp)
+        info['dig'] = dig
+        info['dev_head_t']['trans'] = montage.dev_head_t
+    else:
+        raise TypeError("Montage must be a 'Montage' or 'DigMontage' "
+                        "instead of '%s'." % type(montage))
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/tests/__init__.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/tests/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/tests/test_channels.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/tests/test_channels.py
new file mode 100644
index 0000000..3a37858
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/tests/test_channels.py
@@ -0,0 +1,152 @@
+# Author: Daniel G Wakeman <dwakeman at nmr.mgh.harvard.edu>
+#         Denis A. Engemann <denis.engemann at gmail.com>
+#
+# License: BSD (3-clause)
+
+import os.path as op
+
+from copy import deepcopy
+
+import numpy as np
+from numpy.testing import assert_array_equal
+from nose.tools import assert_raises, assert_true, assert_equal
+
+from mne.channels import rename_channels, read_ch_connectivity
+from mne.channels.channels import _ch_neighbor_connectivity
+from mne.io import read_info, Raw
+from mne.io.constants import FIFF
+from mne.fixes import partial, savemat
+from mne.utils import _TempDir, run_tests_if_main
+from mne import pick_types
+
+base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
+raw_fname = op.join(base_dir, 'test_raw.fif')
+
+
+def test_rename_channels():
+    """Test rename channels
+    """
+    info = read_info(raw_fname)
+    # Error Tests
+    # Test channel name exists in ch_names
+    mapping = {'EEG 160': 'EEG060'}
+    assert_raises(ValueError, rename_channels, info, mapping)
+    # Test improper mapping configuration
+    mapping = {'MEG 2641': 1.0}
+    assert_raises(ValueError, rename_channels, info, mapping)
+    # Test non-unique mapping configuration
+    mapping = {'MEG 2641': 'MEG 2642'}
+    assert_raises(ValueError, rename_channels, info, mapping)
+    # Test bad input
+    assert_raises(ValueError, rename_channels, info, 1.)
+
+    # Test successful changes
+    # Test ch_name and ch_names are changed
+    info2 = deepcopy(info)  # for consistency at the start of each test
+    info2['bads'] = ['EEG 060', 'EOG 061']
+    mapping = {'EEG 060': 'EEG060', 'EOG 061': 'EOG061'}
+    rename_channels(info2, mapping)
+    assert_true(info2['chs'][374]['ch_name'] == 'EEG060')
+    assert_true(info2['ch_names'][374] == 'EEG060')
+    assert_true(info2['chs'][375]['ch_name'] == 'EOG061')
+    assert_true(info2['ch_names'][375] == 'EOG061')
+    assert_array_equal(['EEG060', 'EOG061'], info2['bads'])
+    info2 = deepcopy(info)
+    rename_channels(info2, lambda x: x.replace(' ', ''))
+    assert_true(info2['chs'][373]['ch_name'] == 'EEG059')
+    info2 = deepcopy(info)
+    info2['bads'] = ['EEG 060', 'EEG 060']
+    rename_channels(info2, mapping)
+    assert_array_equal(['EEG060', 'EEG060'], info2['bads'])
+
+
+def test_set_channel_types():
+    """Test set_channel_types
+    """
+    raw = Raw(raw_fname)
+    # Error Tests
+    # Test channel name exists in ch_names
+    mapping = {'EEG 160': 'EEG060'}
+    assert_raises(ValueError, raw.set_channel_types, mapping)
+    # Test change to illegal channel type
+    mapping = {'EOG 061': 'xxx'}
+    assert_raises(ValueError, raw.set_channel_types, mapping)
+    # Test type change
+    raw2 = Raw(raw_fname)
+    raw2.info['bads'] = ['EEG 059', 'EEG 060', 'EOG 061']
+    mapping = {'EEG 060': 'eog', 'EEG 059': 'ecg', 'EOG 061': 'seeg'}
+    raw2.set_channel_types(mapping)
+    info = raw2.info
+    assert_true(info['chs'][374]['ch_name'] == 'EEG 060')
+    assert_true(info['chs'][374]['kind'] == FIFF.FIFFV_EOG_CH)
+    assert_true(info['chs'][374]['unit'] == FIFF.FIFF_UNIT_V)
+    assert_true(info['chs'][374]['coil_type'] == FIFF.FIFFV_COIL_NONE)
+    assert_true(info['chs'][373]['ch_name'] == 'EEG 059')
+    assert_true(info['chs'][373]['kind'] == FIFF.FIFFV_ECG_CH)
+    assert_true(info['chs'][373]['unit'] == FIFF.FIFF_UNIT_V)
+    assert_true(info['chs'][373]['coil_type'] == FIFF.FIFFV_COIL_NONE)
+    assert_true(info['chs'][375]['ch_name'] == 'EOG 061')
+    assert_true(info['chs'][375]['kind'] == FIFF.FIFFV_SEEG_CH)
+    assert_true(info['chs'][375]['unit'] == FIFF.FIFF_UNIT_V)
+    assert_true(info['chs'][375]['coil_type'] == FIFF.FIFFV_COIL_EEG)
+
+
+def test_read_ch_connectivity():
+    "Test reading channel connectivity templates"
+    tempdir = _TempDir()
+    a = partial(np.array, dtype='<U7')
+    # no pep8
+    nbh = np.array([[(['MEG0111'], [[a(['MEG0131'])]]),
+                     (['MEG0121'], [[a(['MEG0111'])],
+                                    [a(['MEG0131'])]]),
+                     (['MEG0131'], [[a(['MEG0111'])],
+                                    [a(['MEG0121'])]])]],
+                   dtype=[('label', 'O'), ('neighblabel', 'O')])
+    mat = dict(neighbours=nbh)
+    mat_fname = op.join(tempdir, 'test_mat.mat')
+    savemat(mat_fname, mat, oned_as='row')
+
+    ch_connectivity, ch_names = read_ch_connectivity(mat_fname)
+    x = ch_connectivity
+    assert_equal(x.shape[0], len(ch_names))
+    assert_equal(x.shape, (3, 3))
+    assert_equal(x[0, 1], False)
+    assert_equal(x[0, 2], True)
+    assert_true(np.all(x.diagonal()))
+    assert_raises(ValueError, read_ch_connectivity, mat_fname, [0, 3])
+    ch_connectivity, ch_names = read_ch_connectivity(mat_fname, picks=[0, 2])
+    assert_equal(ch_connectivity.shape[0], 2)
+    assert_equal(len(ch_names), 2)
+
+    ch_names = ['EEG01', 'EEG02', 'EEG03']
+    neighbors = [['EEG02'], ['EEG04'], ['EEG02']]
+    assert_raises(ValueError, _ch_neighbor_connectivity, ch_names, neighbors)
+    neighbors = [['EEG02'], ['EEG01', 'EEG03'], ['EEG 02']]
+    assert_raises(ValueError, _ch_neighbor_connectivity, ch_names[:2],
+                  neighbors)
+    neighbors = [['EEG02'], 'EEG01', ['EEG 02']]
+    assert_raises(ValueError, _ch_neighbor_connectivity, ch_names, neighbors)
+    connectivity, ch_names = read_ch_connectivity('neuromag306mag')
+    assert_equal(connectivity.shape, (102, 102))
+    assert_equal(len(ch_names), 102)
+    assert_raises(ValueError, read_ch_connectivity, 'bananas!')
+
+
+def test_get_set_sensor_positions():
+    """Test get/set functions for sensor positions
+    """
+    raw1 = Raw(raw_fname)
+    picks = pick_types(raw1.info, meg=False, eeg=True)
+    pos = np.array([ch['loc'][:3] for ch in raw1.info['chs']])[picks]
+    raw_pos = raw1._get_channel_positions(picks=picks)
+    assert_array_equal(raw_pos, pos)
+
+    ch_name = raw1.info['ch_names'][13]
+    assert_raises(ValueError, raw1._set_channel_positions, [1, 2], ['name'])
+    raw2 = Raw(raw_fname)
+    raw2.info['chs'][13]['loc'][:3] = np.array([1, 2, 3])
+    raw1._set_channel_positions([[1, 2, 3]], [ch_name])
+    assert_array_equal(raw1.info['chs'][13]['loc'],
+                       raw2.info['chs'][13]['loc'])
+
+run_tests_if_main()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/tests/test_interpolation.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/tests/test_interpolation.py
new file mode 100644
index 0000000..2b2a881
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/tests/test_interpolation.py
@@ -0,0 +1,120 @@
+import os.path as op
+import numpy as np
+from numpy.testing import (assert_allclose, assert_array_equal)
+from nose.tools import assert_raises, assert_equal, assert_true
+
+from mne import io, pick_types, pick_channels, read_events, Epochs
+from mne.channels.interpolation import _make_interpolation_matrix
+from mne.utils import run_tests_if_main, slow_test
+
+base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
+raw_fname = op.join(base_dir, 'test_raw.fif')
+event_name = op.join(base_dir, 'test-eve.fif')
+evoked_nf_name = op.join(base_dir, 'test-nf-ave.fif')
+
+event_id, tmin, tmax = 1, -0.2, 0.5
+event_id_2 = 2
+
+
+def _load_data():
+    """Helper function to load data."""
+    # It is more memory efficient to load data in a separate
+    # function so it's loaded on-demand
+    raw = io.Raw(raw_fname, add_eeg_ref=False)
+    events = read_events(event_name)
+    picks_eeg = pick_types(raw.info, meg=False, eeg=True, exclude=[])
+    # select every second channel for faster speed but compensate by using
+    # mode='accurate'.
+    picks_meg = pick_types(raw.info, meg=True, eeg=False, exclude=[])[1::2]
+    picks = pick_types(raw.info, meg=True, eeg=True, exclude=[])
+
+    epochs_eeg = Epochs(raw, events, event_id, tmin, tmax, picks=picks_eeg,
+                        preload=True, reject=dict(eeg=80e-6))
+    epochs_meg = Epochs(raw, events, event_id, tmin, tmax, picks=picks_meg,
+                        preload=True, reject=dict(grad=1000e-12, mag=4e-12))
+    epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                    preload=True, reject=dict(eeg=80e-6, grad=1000e-12,
+                                              mag=4e-12))
+    return raw, epochs, epochs_eeg, epochs_meg
+
+
+ at slow_test
+def test_interpolation():
+    """Test interpolation"""
+    raw, epochs, epochs_eeg, epochs_meg = _load_data()
+
+    # It's a trade of between speed and accuracy. If every second channel is
+    # selected the tests are more than 3x faster but the correlation
+    # drops to 0.8
+    thresh = 0.80
+
+    # create good and bad channels for EEG
+    epochs_eeg.info['bads'] = []
+    goods_idx = np.ones(len(epochs_eeg.ch_names), dtype=bool)
+    goods_idx[epochs_eeg.ch_names.index('EEG 012')] = False
+    bads_idx = ~goods_idx
+
+    evoked_eeg = epochs_eeg.average()
+    ave_before = evoked_eeg.data[bads_idx]
+
+    # interpolate bad channels for EEG
+    pos = epochs_eeg._get_channel_positions()
+    pos_good = pos[goods_idx]
+    pos_bad = pos[bads_idx]
+    interpolation = _make_interpolation_matrix(pos_good, pos_bad)
+    assert_equal(interpolation.shape, (1, len(epochs_eeg.ch_names) - 1))
+    ave_after = np.dot(interpolation, evoked_eeg.data[goods_idx])
+
+    epochs_eeg.info['bads'] = ['EEG 012']
+    evoked_eeg = epochs_eeg.average()
+    assert_array_equal(ave_after, evoked_eeg.interpolate_bads().data[bads_idx])
+
+    assert_allclose(ave_before, ave_after, atol=2e-6)
+
+    # check that interpolation fails when preload is False
+    epochs_eeg.preload = False
+    assert_raises(ValueError,  epochs_eeg.interpolate_bads)
+    epochs_eeg.preload = True
+
+    # check that interpolation changes the data in raw
+    raw_eeg = io.RawArray(data=epochs_eeg._data[0], info=epochs_eeg.info)
+    raw_before = raw_eeg._data[bads_idx]
+    raw_after = raw_eeg.interpolate_bads()._data[bads_idx]
+    assert_equal(np.all(raw_before == raw_after), False)
+
+    # check that interpolation fails when preload is False
+    for inst in [raw, epochs]:
+        assert hasattr(inst, 'preload')
+        inst.preload = False
+        inst.info['bads'] = [inst.ch_names[1]]
+        assert_raises(ValueError, inst.interpolate_bads)
+
+    # check that interpolation works for MEG
+    epochs_meg.info['bads'] = ['MEG 0141']
+    evoked = epochs_meg.average()
+    pick = pick_channels(epochs_meg.info['ch_names'], epochs_meg.info['bads'])
+
+    # MEG -- raw
+    raw_meg = io.RawArray(data=epochs_meg._data[0], info=epochs_meg.info)
+    raw_meg.info['bads'] = ['MEG 0141']
+    data1 = raw_meg[pick, :][0][0]
+    # reset_bads=False here because epochs_meg appears to share the same info
+    # dict with raw and we want to test the epochs functionality too
+    data2 = raw_meg.interpolate_bads(reset_bads=False)[pick, :][0][0]
+    assert_true(np.corrcoef(data1, data2)[0, 1] > thresh)
+    # the same number of bads as before
+    assert_true(len(raw_meg.info['bads']) == len(raw_meg.info['bads']))
+
+    # MEG -- epochs
+    data1 = epochs_meg.get_data()[:, pick, :].ravel()
+    epochs_meg.interpolate_bads()
+    data2 = epochs_meg.get_data()[:, pick, :].ravel()
+    assert_true(np.corrcoef(data1, data2)[0, 1] > thresh)
+    assert_true(len(raw_meg.info['bads']) == 0)
+
+    # MEG -- evoked
+    data1 = evoked.data[pick]
+    data2 = evoked.interpolate_bads().data[pick]
+    assert_true(np.corrcoef(data1, data2)[0, 1] > thresh)
+
+run_tests_if_main()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/tests/test_layout.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/tests/test_layout.py
new file mode 100644
index 0000000..ccc388d
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/tests/test_layout.py
@@ -0,0 +1,380 @@
+from __future__ import print_function
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Denis Engemann <denis.engemann at gmail.com>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#          Eric Larson <larson.eric.d at gmail.com>
+#
+# License: Simplified BSD
+
+import copy
+import os.path as op
+import warnings
+
+import numpy as np
+from numpy.testing import (assert_array_almost_equal, assert_array_equal,
+                           assert_allclose)
+from nose.tools import assert_true, assert_raises
+from mne.channels import (make_eeg_layout, make_grid_layout, read_layout,
+                          find_layout)
+from mne.channels.layout import (_box_size, _auto_topomap_coords,
+                                 generate_2d_layout)
+from mne.utils import run_tests_if_main
+from mne import pick_types, pick_info
+from mne.io import Raw, read_raw_kit
+from mne.io.meas_info import _empty_info
+from mne.io.constants import FIFF
+from mne.preprocessing.maxfilter import fit_sphere_to_headshape
+from mne.utils import _TempDir
+
+warnings.simplefilter('always')
+
+fif_fname = op.join(op.dirname(__file__), '..', '..', 'io',
+                    'tests', 'data', 'test_raw.fif')
+
+lout_path = op.join(op.dirname(__file__), '..', '..', 'io',
+                    'tests', 'data')
+
+bti_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'bti',
+                  'tests', 'data')
+
+fname_ctf_raw = op.join(op.dirname(__file__), '..', '..', 'io', 'tests',
+                        'data', 'test_ctf_comp_raw.fif')
+
+fname_kit_157 = op.join(op.dirname(__file__), '..', '..',  'io', 'kit',
+                        'tests', 'data', 'test.sqd')
+
+test_info = _empty_info()
+test_info.update({
+    'ch_names': ['ICA 001', 'ICA 002', 'EOG 061'],
+    'chs': [{'cal': 1,
+             'ch_name': 'ICA 001',
+             'coil_type': 0,
+             'coord_Frame': 0,
+             'kind': 502,
+             'loc': np.array([0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1.],
+                             dtype=np.float32),
+             'logno': 1,
+             'range': 1.0,
+             'scanno': 1,
+             'unit': -1,
+             'unit_mul': 0},
+            {'cal': 1,
+             'ch_name': 'ICA 002',
+             'coil_type': 0,
+             'coord_Frame': 0,
+             'kind': 502,
+             'loc': np.array([0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1.],
+                             dtype=np.float32),
+             'logno': 2,
+             'range': 1.0,
+             'scanno': 2,
+             'unit': -1,
+             'unit_mul': 0},
+            {'cal': 0.002142000012099743,
+             'ch_name': 'EOG 061',
+             'coil_type': 1,
+             'coord_frame': 0,
+             'kind': 202,
+             'loc': np.array([0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1.],
+                             dtype=np.float32),
+             'logno': 61,
+             'range': 1.0,
+             'scanno': 376,
+             'unit': 107,
+             'unit_mul': 0}],
+    'nchan': 3})
+
+
+def test_io_layout_lout():
+    """Test IO with .lout files"""
+    tempdir = _TempDir()
+    layout = read_layout('Vectorview-all', scale=False)
+    layout.save(op.join(tempdir, 'foobar.lout'))
+    layout_read = read_layout(op.join(tempdir, 'foobar.lout'), path='./',
+                              scale=False)
+    assert_array_almost_equal(layout.pos, layout_read.pos, decimal=2)
+    assert_true(layout.names, layout_read.names)
+
+    print(layout)  # test repr
+
+
+def test_io_layout_lay():
+    """Test IO with .lay files"""
+    tempdir = _TempDir()
+    layout = read_layout('CTF151', scale=False)
+    layout.save(op.join(tempdir, 'foobar.lay'))
+    layout_read = read_layout(op.join(tempdir, 'foobar.lay'), path='./',
+                              scale=False)
+    assert_array_almost_equal(layout.pos, layout_read.pos, decimal=2)
+    assert_true(layout.names, layout_read.names)
+
+
+def test_auto_topomap_coords():
+    """Test mapping of coordinates in 3D space to 2D"""
+    info = Raw(fif_fname).info.copy()
+    picks = pick_types(info, meg=False, eeg=True, eog=False, stim=False)
+
+    # Remove extra digitization point, so EEG digitization points match up
+    # with the EEG channels
+    del info['dig'][85]
+
+    # Remove head origin from channel locations, so mapping with digitization
+    # points yields the same result
+    dig_kinds = (FIFF.FIFFV_POINT_CARDINAL,
+                 FIFF.FIFFV_POINT_EEG,
+                 FIFF.FIFFV_POINT_EXTRA)
+    _, origin_head, _ = fit_sphere_to_headshape(info, dig_kinds)
+    origin_head /= 1000.  # to meters
+    for ch in info['chs']:
+        ch['loc'][:3] -= origin_head
+
+    # Use channel locations
+    l0 = _auto_topomap_coords(info, picks)
+
+    # Remove electrode position information, use digitization points from now
+    # on.
+    for ch in info['chs']:
+        ch['loc'].fill(0)
+
+    l1 = _auto_topomap_coords(info, picks)
+    assert_allclose(l1, l0, atol=1e-3)
+
+    # Test plotting mag topomap without channel locations: it should fail
+    mag_picks = pick_types(info, meg='mag')
+    assert_raises(ValueError, _auto_topomap_coords, info, mag_picks)
+
+    # Test function with too many EEG digitization points: it should fail
+    info['dig'].append({'r': [1, 2, 3], 'kind': FIFF.FIFFV_POINT_EEG})
+    assert_raises(ValueError, _auto_topomap_coords, info, picks)
+
+    # Test function with too little EEG digitization points: it should fail
+    info['dig'] = info['dig'][:-2]
+    assert_raises(ValueError, _auto_topomap_coords, info, picks)
+
+    # Electrode positions must be unique
+    info['dig'].append(info['dig'][-1])
+    assert_raises(ValueError, _auto_topomap_coords, info, picks)
+
+    # Test function without EEG digitization points: it should fail
+    info['dig'] = [d for d in info['dig'] if d['kind'] != FIFF.FIFFV_POINT_EEG]
+    assert_raises(RuntimeError, _auto_topomap_coords, info, picks)
+
+    # Test function without any digitization points, it should fail
+    info['dig'] = None
+    assert_raises(RuntimeError, _auto_topomap_coords, info, picks)
+    info['dig'] = []
+    assert_raises(RuntimeError, _auto_topomap_coords, info, picks)
+
+
+def test_make_eeg_layout():
+    """Test creation of EEG layout"""
+    tempdir = _TempDir()
+    tmp_name = 'foo'
+    lout_name = 'test_raw'
+    lout_orig = read_layout(kind=lout_name, path=lout_path)
+    info = Raw(fif_fname).info
+    info['bads'].append(info['ch_names'][360])
+    layout = make_eeg_layout(info, exclude=[])
+    assert_array_equal(len(layout.names), len([ch for ch in info['ch_names']
+                                               if ch.startswith('EE')]))
+    layout.save(op.join(tempdir, tmp_name + '.lout'))
+    lout_new = read_layout(kind=tmp_name, path=tempdir, scale=False)
+    assert_array_equal(lout_new.kind, tmp_name)
+    assert_allclose(layout.pos, lout_new.pos, atol=0.1)
+    assert_array_equal(lout_orig.names, lout_new.names)
+
+    # Test input validation
+    assert_raises(ValueError, make_eeg_layout, info, radius=-0.1)
+    assert_raises(ValueError, make_eeg_layout, info, radius=0.6)
+    assert_raises(ValueError, make_eeg_layout, info, width=-0.1)
+    assert_raises(ValueError, make_eeg_layout, info, width=1.1)
+    assert_raises(ValueError, make_eeg_layout, info, height=-0.1)
+    assert_raises(ValueError, make_eeg_layout, info, height=1.1)
+
+
+def test_make_grid_layout():
+    """Test creation of grid layout"""
+    tempdir = _TempDir()
+    tmp_name = 'bar'
+    lout_name = 'test_ica'
+    lout_orig = read_layout(kind=lout_name, path=lout_path)
+    layout = make_grid_layout(test_info)
+    layout.save(op.join(tempdir, tmp_name + '.lout'))
+    lout_new = read_layout(kind=tmp_name, path=tempdir)
+    assert_array_equal(lout_new.kind, tmp_name)
+    assert_array_equal(lout_orig.pos, lout_new.pos)
+    assert_array_equal(lout_orig.names, lout_new.names)
+
+    # Test creating grid layout with specified number of columns
+    layout = make_grid_layout(test_info, n_col=2)
+    # Vertical positions should be equal
+    assert_true(layout.pos[0, 1] == layout.pos[1, 1])
+    # Horizontal positions should be unequal
+    assert_true(layout.pos[0, 0] != layout.pos[1, 0])
+    # Box sizes should be equal
+    assert_array_equal(layout.pos[0, 3:], layout.pos[1, 3:])
+
+
+def test_find_layout():
+    """Test finding layout"""
+    assert_raises(ValueError, find_layout, test_info, ch_type='meep')
+
+    sample_info = Raw(fif_fname).info
+    grads = pick_types(sample_info, meg='grad')
+    sample_info2 = pick_info(sample_info, grads)
+
+    mags = pick_types(sample_info, meg='mag')
+    sample_info3 = pick_info(sample_info, mags)
+
+    # mock new convention
+    sample_info4 = copy.deepcopy(sample_info)
+    for ii, name in enumerate(sample_info4['ch_names']):
+        new = name.replace(' ', '')
+        sample_info4['ch_names'][ii] = new
+        sample_info4['chs'][ii]['ch_name'] = new
+
+    eegs = pick_types(sample_info, meg=False, eeg=True)
+    sample_info5 = pick_info(sample_info, eegs)
+
+    lout = find_layout(sample_info, ch_type=None)
+    assert_true(lout.kind == 'Vectorview-all')
+    assert_true(all(' ' in k for k in lout.names))
+
+    lout = find_layout(sample_info2, ch_type='meg')
+    assert_true(lout.kind == 'Vectorview-all')
+
+    # test new vector-view
+    lout = find_layout(sample_info4, ch_type=None)
+    assert_true(lout.kind == 'Vectorview-all')
+    assert_true(all(' ' not in k for k in lout.names))
+
+    lout = find_layout(sample_info, ch_type='grad')
+    assert_true(lout.kind == 'Vectorview-grad')
+    lout = find_layout(sample_info2)
+    assert_true(lout.kind == 'Vectorview-grad')
+    lout = find_layout(sample_info2, ch_type='grad')
+    assert_true(lout.kind == 'Vectorview-grad')
+    lout = find_layout(sample_info2, ch_type='meg')
+    assert_true(lout.kind == 'Vectorview-all')
+
+    lout = find_layout(sample_info, ch_type='mag')
+    assert_true(lout.kind == 'Vectorview-mag')
+    lout = find_layout(sample_info3)
+    assert_true(lout.kind == 'Vectorview-mag')
+    lout = find_layout(sample_info3, ch_type='mag')
+    assert_true(lout.kind == 'Vectorview-mag')
+    lout = find_layout(sample_info3, ch_type='meg')
+    assert_true(lout.kind == 'Vectorview-all')
+
+    lout = find_layout(sample_info, ch_type='eeg')
+    assert_true(lout.kind == 'EEG')
+    lout = find_layout(sample_info5)
+    assert_true(lout.kind == 'EEG')
+    lout = find_layout(sample_info5, ch_type='eeg')
+    assert_true(lout.kind == 'EEG')
+    # no common layout, 'meg' option not supported
+
+    fname_bti_raw = op.join(bti_dir, 'exported4D_linux_raw.fif')
+    lout = find_layout(Raw(fname_bti_raw).info)
+    assert_true(lout.kind == 'magnesWH3600')
+
+    lout = find_layout(Raw(fname_ctf_raw).info)
+    assert_true(lout.kind == 'CTF-275')
+
+    lout = find_layout(read_raw_kit(fname_kit_157).info)
+    assert_true(lout.kind == 'KIT-157')
+
+
+def test_box_size():
+    """Test calculation of box sizes."""
+    # No points. Box size should be 1,1.
+    assert_allclose(_box_size([]), (1.0, 1.0))
+
+    # Create one point. Box size should be 1,1.
+    point = [(0, 0)]
+    assert_allclose(_box_size(point), (1.0, 1.0))
+
+    # Create two points. Box size should be 0.5,1.
+    points = [(0.25, 0.5), (0.75, 0.5)]
+    assert_allclose(_box_size(points), (0.5, 1.0))
+
+    # Create three points. Box size should be (0.5, 0.5).
+    points = [(0.25, 0.25), (0.75, 0.25), (0.5, 0.75)]
+    assert_allclose(_box_size(points), (0.5, 0.5))
+
+    # Create a grid of points. Box size should be (0.1, 0.1).
+    x, y = np.meshgrid(np.linspace(-0.5, 0.5, 11), np.linspace(-0.5, 0.5, 11))
+    x, y = x.ravel(), y.ravel()
+    assert_allclose(_box_size(np.c_[x, y]), (0.1, 0.1))
+
+    # Create a random set of points. This should never break the function.
+    rng = np.random.RandomState(42)
+    points = rng.rand(100, 2)
+    width, height = _box_size(points)
+    assert_true(width is not None)
+    assert_true(height is not None)
+
+    # Test specifying an existing width.
+    points = [(0.25, 0.25), (0.75, 0.25), (0.5, 0.75)]
+    assert_allclose(_box_size(points, width=0.4), (0.4, 0.5))
+
+    # Test specifying an existing width that has influence on the calculated
+    # height.
+    points = [(0.25, 0.25), (0.75, 0.25), (0.5, 0.75)]
+    assert_allclose(_box_size(points, width=0.2), (0.2, 1.0))
+
+    # Test specifying an existing height.
+    points = [(0.25, 0.25), (0.75, 0.25), (0.5, 0.75)]
+    assert_allclose(_box_size(points, height=0.4), (0.5, 0.4))
+
+    # Test specifying an existing height that has influence on the calculated
+    # width.
+    points = [(0.25, 0.25), (0.75, 0.45), (0.5, 0.75)]
+    assert_allclose(_box_size(points, height=0.1), (1.0, 0.1))
+
+    # Test specifying both width and height. The function should simply return
+    # these.
+    points = [(0.25, 0.25), (0.75, 0.45), (0.5, 0.75)]
+    assert_array_equal(_box_size(points, width=0.1, height=0.1), (0.1, 0.1))
+
+    # Test specifying a width that will cause unfixable horizontal overlap and
+    # essentially breaks the function (height will be 0).
+    points = [(0.25, 0.25), (0.75, 0.25), (0.5, 0.75)]
+    assert_array_equal(_box_size(points, width=1), (1, 0))
+
+    # Test adding some padding.
+    # Create three points. Box size should be a little less than (0.5, 0.5).
+    points = [(0.25, 0.25), (0.75, 0.25), (0.5, 0.75)]
+    assert_allclose(_box_size(points, padding=0.1), (0.9 * 0.5, 0.9 * 0.5))
+
+
+def test_generate_2d_layout():
+    """Test creation of a layout from 2d points."""
+    snobg = 10
+    sbg = 15
+    side = range(snobg)
+    bg_image = np.random.randn(sbg, sbg)
+    w, h = [.2, .5]
+
+    # Generate fake data
+    xy = np.array([(i, j) for i in side for j in side])
+    lt = generate_2d_layout(xy, w=w, h=h)
+
+    # Correct points ordering / minmaxing
+    comp_1, comp_2 = [(5, 0), (7, 0)]
+    assert_true(lt.pos[:, :2].max() == 1)
+    assert_true(lt.pos[:, :2].min() == 0)
+    with np.errstate(invalid='ignore'):  # divide by zero
+        assert_allclose(xy[comp_2] / float(xy[comp_1]),
+                        lt.pos[comp_2] / float(lt.pos[comp_1]))
+    assert_allclose(lt.pos[0, [2, 3]], [w, h])
+
+    # Correct number elements
+    assert_true(lt.pos.shape[1] == 4)
+    assert_true(len(lt.box) == 4)
+
+    # Make sure background image normalizing is correct
+    lt_bg = generate_2d_layout(xy, bg_image=bg_image)
+    assert_allclose(lt_bg.pos[:, :2].max(), xy.max() / float(sbg))
+
+run_tests_if_main()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/tests/test_montage.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/tests/test_montage.py
new file mode 100644
index 0000000..23da88f
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/channels/tests/test_montage.py
@@ -0,0 +1,209 @@
+# Author: Teon Brooks <teon.brooks at gmail.com>
+#
+# License: BSD (3-clause)
+
+import os.path as op
+
+from nose.tools import assert_equal
+
+import numpy as np
+from numpy.testing import (assert_array_equal, assert_almost_equal,
+                           assert_allclose, assert_array_almost_equal)
+
+from mne.channels.montage import read_montage, _set_montage, read_dig_montage
+from mne.utils import _TempDir
+from mne import create_info, EvokedArray
+from mne.coreg import fit_matched_points
+from mne.transforms import apply_trans, get_ras_to_neuromag_trans
+from mne.io.constants import FIFF
+from mne.io.meas_info import _read_dig_points
+from mne.io.kit import read_mrk
+
+
+p_dir = op.dirname(__file__)
+elp = op.join(p_dir, '..', '..', 'io', 'kit', 'tests', 'data', 'test_elp.txt')
+hsp = op.join(p_dir, '..', '..', 'io', 'kit', 'tests', 'data', 'test_hsp.txt')
+hpi = op.join(p_dir, '..', '..', 'io', 'kit', 'tests', 'data', 'test_mrk.sqd')
+
+
+def test_montage():
+    """Test making montages"""
+    tempdir = _TempDir()
+    # no pep8
+    input_str = ["""FidNz 0.00000 10.56381 -2.05108
+    FidT9 -7.82694 0.45386 -3.76056
+    FidT10 7.82694 0.45386 -3.76056""",
+    """// MatLab   Sphere coordinates [degrees]         Cartesian coordinates
+    // Label       Theta       Phi    Radius         X         Y         Z       off sphere surface
+      E1      37.700     -14.000       1.000    0.7677    0.5934   -0.2419  -0.00000000000000011
+      E2      44.600      -0.880       1.000    0.7119    0.7021   -0.0154   0.00000000000000000
+      E3      51.700      11.000       1.000    0.6084    0.7704    0.1908   0.00000000000000000""",  # noqa
+    """# ASA electrode file
+    ReferenceLabel  avg
+    UnitPosition    mm
+    NumberPositions=    68
+    Positions
+    -86.0761 -19.9897 -47.9860
+    85.7939 -20.0093 -48.0310
+    0.0083 86.8110 -39.9830
+    Labels
+    LPA
+    RPA
+    Nz
+    """,
+    """Site  Theta  Phi
+    Fp1  -92    -72
+    Fp2   92     72
+    F3   -60    -51
+    """,
+    """346
+     EEG	      F3	 -62.027	 -50.053	      85
+     EEG	      Fz	  45.608	      90	      85
+     EEG	      F4	   62.01	  50.103	      85
+    """,
+    """
+    eeg Fp1 -95.0 -31.0 -3.0
+    eeg AF7 -81 -59 -3
+    eeg AF3 -87 -41 28
+    """]
+    kinds = ['test.sfp', 'test.csd', 'test.elc', 'test.txt', 'test.elp',
+             'test.hpts']
+    for kind, text in zip(kinds, input_str):
+        fname = op.join(tempdir, kind)
+        with open(fname, 'w') as fid:
+            fid.write(text)
+        montage = read_montage(fname)
+        assert_equal(len(montage.ch_names), 3)
+        assert_equal(len(montage.ch_names), len(montage.pos))
+        assert_equal(montage.pos.shape, (3, 3))
+        assert_equal(montage.kind, op.splitext(kind)[0])
+        if kind.endswith('csd'):
+            dtype = [('label', 'S4'), ('theta', 'f8'), ('phi', 'f8'),
+                     ('radius', 'f8'), ('x', 'f8'), ('y', 'f8'), ('z', 'f8'),
+                     ('off_sph', 'f8')]
+            try:
+                table = np.loadtxt(fname, skip_header=2, dtype=dtype)
+            except TypeError:
+                table = np.loadtxt(fname, skiprows=2, dtype=dtype)
+            pos2 = np.c_[table['x'], table['y'], table['z']]
+            assert_array_almost_equal(pos2, montage.pos, 4)
+    # test transform
+    input_str = """
+    eeg Fp1 -95.0 -31.0 -3.0
+    eeg AF7 -81 -59 -3
+    eeg AF3 -87 -41 28
+    cardinal 2 -91 0 -42
+    cardinal 1 0 -91 -42
+    cardinal 3 0 91 -42
+    """
+    kind = 'test_fid.hpts'
+    fname = op.join(tempdir, kind)
+    with open(fname, 'w') as fid:
+        fid.write(input_str)
+    montage = read_montage(op.join(tempdir, 'test_fid.hpts'), transform=True)
+    # check coordinate transformation
+    pos = np.array([-95.0, -31.0, -3.0])
+    nasion = np.array([-91, 0, -42])
+    lpa = np.array([0, -91, -42])
+    rpa = np.array([0, 91, -42])
+    fids = np.vstack((nasion, lpa, rpa))
+    trans = get_ras_to_neuromag_trans(fids[0], fids[1], fids[2])
+    pos = apply_trans(trans, pos)
+    assert_array_equal(montage.pos[0], pos)
+    idx = montage.ch_names.index('2')
+    assert_array_equal(montage.pos[idx, [0, 2]], [0, 0])
+    idx = montage.ch_names.index('1')
+    assert_array_equal(montage.pos[idx, [1, 2]], [0, 0])
+    idx = montage.ch_names.index('3')
+    assert_array_equal(montage.pos[idx, [1, 2]], [0, 0])
+    pos = np.array([-95.0, -31.0, -3.0])
+    montage_fname = op.join(tempdir, 'test_fid.hpts')
+    montage = read_montage(montage_fname, unit='mm')
+    assert_array_equal(montage.pos[0], pos * 1e-3)
+
+    # test with last
+    info = create_info(montage.ch_names, 1e3, ['eeg'] * len(montage.ch_names))
+    _set_montage(info, montage)
+    pos2 = np.array([c['loc'][:3] for c in info['chs']])
+    assert_array_equal(pos2, montage.pos)
+    assert_equal(montage.ch_names, info['ch_names'])
+
+    info = create_info(
+        montage.ch_names, 1e3, ['eeg'] * len(montage.ch_names))
+
+    evoked = EvokedArray(
+        data=np.zeros((len(montage.ch_names), 1)), info=info, tmin=0)
+    evoked.set_montage(montage)
+    pos3 = np.array([c['loc'][:3] for c in evoked.info['chs']])
+    assert_array_equal(pos3, montage.pos)
+    assert_equal(montage.ch_names, evoked.info['ch_names'])
+
+
+def test_read_dig_montage():
+    """Test read_dig_montage"""
+    names = ['nasion', 'lpa', 'rpa', '1', '2', '3', '4', '5']
+    montage = read_dig_montage(hsp, hpi, elp, names, unit='m', transform=False)
+    elp_points = _read_dig_points(elp)
+    hsp_points = _read_dig_points(hsp)
+    hpi_points = read_mrk(hpi)
+    assert_equal(montage.point_names, names)
+    assert_array_equal(montage.elp, elp_points)
+    assert_array_equal(montage.hsp, hsp_points)
+    assert_array_equal(montage.hpi, hpi_points)
+    assert_array_equal(montage.dev_head_t, np.identity(4))
+    montage = read_dig_montage(hsp, hpi, elp, names,
+                               transform=True, dev_head_t=True)
+    # check coordinate transformation
+    # nasion
+    assert_almost_equal(montage.elp[0, 0], 0)
+    assert_almost_equal(montage.nasion[0], 0)
+    assert_almost_equal(montage.elp[0, 2], 0)
+    assert_almost_equal(montage.nasion[0], 0)
+    # lpa and rpa
+    assert_allclose(montage.elp[1:3, 1:], 0, atol=1e-16)
+    assert_allclose(montage.lpa[1:], 0, atol=1e-16)
+    assert_allclose(montage.rpa[1:], 0, atol=1e-16)
+    # device head transform
+    dev_head_t = fit_matched_points(tgt_pts=montage.elp[3:],
+                                    src_pts=montage.hpi, out='trans')
+    assert_array_equal(montage.dev_head_t, dev_head_t)
+
+
+def test_set_dig_montage():
+    """Test applying DigMontage to inst
+
+    Extensive testing of applying `dig` to info is done in test_meas_info
+    with `test_make_dig_points`.
+    """
+    names = ['nasion', 'lpa', 'rpa', '1', '2', '3', '4', '5']
+    hsp_points = _read_dig_points(hsp)
+    elp_points = _read_dig_points(elp)
+    hpi_points = read_mrk(hpi)
+    p0, p1, p2 = elp_points[:3]
+    nm_trans = get_ras_to_neuromag_trans(p0, p1, p2)
+    elp_points = apply_trans(nm_trans, elp_points)
+    nasion_point, lpa_point, rpa_point = elp_points[:3]
+    hsp_points = apply_trans(nm_trans, hsp_points)
+
+    montage = read_dig_montage(hsp, hpi, elp, names, unit='m', transform=True)
+    info = create_info(['Test Ch'], 1e3, ['eeg'])
+    _set_montage(info, montage)
+    hs = np.array([p['r'] for i, p in enumerate(info['dig'])
+                   if p['kind'] == FIFF.FIFFV_POINT_EXTRA])
+    nasion_dig = np.array([p['r'] for p in info['dig']
+                           if all([p['ident'] == FIFF.FIFFV_POINT_NASION,
+                                   p['kind'] == FIFF.FIFFV_POINT_CARDINAL])])
+    lpa_dig = np.array([p['r'] for p in info['dig']
+                        if all([p['ident'] == FIFF.FIFFV_POINT_LPA,
+                                p['kind'] == FIFF.FIFFV_POINT_CARDINAL])])
+    rpa_dig = np.array([p['r'] for p in info['dig']
+                        if all([p['ident'] == FIFF.FIFFV_POINT_RPA,
+                                p['kind'] == FIFF.FIFFV_POINT_CARDINAL])])
+    hpi_dig = np.array([p['r'] for p in info['dig']
+                        if p['kind'] == FIFF.FIFFV_POINT_HPI])
+    assert_array_equal(hs, hsp_points)
+    assert_array_equal(nasion_dig.ravel(), nasion_point)
+    assert_array_equal(lpa_dig.ravel(), lpa_point)
+    assert_array_equal(rpa_dig.ravel(), rpa_point)
+    assert_array_equal(hpi_dig, hpi_points)
+    assert_array_equal(montage.dev_head_t, info['dev_head_t']['trans'])
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/chpi.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/chpi.py
new file mode 100644
index 0000000..13e4bf3
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/chpi.py
@@ -0,0 +1,440 @@
+# Authors: Eric Larson <larson.eric.d at gmail.com>
+#
+# License: BSD (3-clause)
+
+import numpy as np
+from os import path as op
+from scipy import linalg
+
+from .io.pick import pick_types, pick_channels
+from .io.base import _BaseRaw
+from .io.constants import FIFF
+from .forward import (_magnetic_dipole_field_vec, _create_meg_coils,
+                      _concatenate_coils)
+from .cov import make_ad_hoc_cov, _get_whitener_data
+from .transforms import apply_trans, invert_transform
+from .utils import verbose, logger, check_version
+from .fixes import partial
+from .externals.six import string_types
+
+
+# ############################################################################
+# Reading from text or FIF file
+
+ at verbose
+def get_chpi_positions(raw, t_step=None, verbose=None):
+    """Extract head positions
+
+    Note that the raw instance must have CHPI channels recorded.
+
+    Parameters
+    ----------
+    raw : instance of Raw | str
+        Raw instance to extract the head positions from. Can also be a
+        path to a Maxfilter log file (str).
+    t_step : float | None
+        Sampling interval to use when converting data. If None, it will
+        be automatically determined. By default, a sampling interval of
+        1 second is used if processing a raw data. If processing a
+        Maxfilter log file, this must be None because the log file
+        itself will determine the sampling interval.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    translation : ndarray, shape (N, 3)
+        Translations at each time point.
+    rotation : ndarray, shape (N, 3, 3)
+        Rotations at each time point.
+    t : ndarray, shape (N,)
+        The time points.
+
+    Notes
+    -----
+    The digitized HPI head frame y is related to the frame position X as:
+
+        Y = np.dot(rotation, X) + translation
+
+    Note that if a Maxfilter log file is being processed, the start time
+    may not use the same reference point as the rest of mne-python (i.e.,
+    it could be referenced relative to raw.first_samp or something else).
+    """
+    if isinstance(raw, _BaseRaw):
+        # for simplicity, we'll sample at 1 sec intervals like maxfilter
+        if t_step is None:
+            t_step = 1.0
+        t_step = float(t_step)
+        picks = pick_types(raw.info, meg=False, ref_meg=False,
+                           chpi=True, exclude=[])
+        if len(picks) == 0:
+            raise RuntimeError('raw file has no CHPI channels')
+        time_idx = raw.time_as_index(np.arange(0, raw.times[-1], t_step))
+        data = [raw[picks, ti] for ti in time_idx]
+        t = np.array([d[1] for d in data])
+        data = np.array([d[0][:, 0] for d in data])
+        data = np.c_[t, data]
+    else:
+        if not isinstance(raw, string_types):
+            raise TypeError('raw must be an instance of Raw or string')
+        if not op.isfile(raw):
+            raise IOError('File "%s" does not exist' % raw)
+        if t_step is not None:
+            raise ValueError('t_step must be None if processing a log')
+        data = np.loadtxt(raw, skiprows=1)  # first line is header, skip it
+    return _quats_to_trans_rot_t(data)
+
+
+def _quats_to_trans_rot_t(quats):
+    """Convert Maxfilter-formatted head position quaternions
+
+    Parameters
+    ----------
+    quats : ndarray, shape (N, 10)
+        Maxfilter-formatted quaternions.
+
+    Returns
+    -------
+    translation : ndarray, shape (N, 3)
+        Translations at each time point.
+    rotation : ndarray, shape (N, 3, 3)
+        Rotations at each time point.
+    t : ndarray, shape (N,)
+        The time points.
+
+    See Also
+    --------
+    _calculate_chpi_positions, get_chpi_positions
+    """
+    t = quats[:, 0].copy()
+    rotation = _quat_to_rot(quats[:, 1:4])
+    translation = quats[:, 4:7].copy()
+    return translation, rotation, t
+
+
+def _quat_to_rot(q):
+    """Helper to convert quaternions to rotations"""
+    # z = a + bi + cj + dk
+    b, c, d = q[..., 0], q[..., 1], q[..., 2]
+    bb, cc, dd = b * b, c * c, d * d
+    # use max() here to be safe in case roundoff errs put us over
+    aa = np.maximum(1. - bb - cc - dd, 0.)
+    a = np.sqrt(aa)
+    ab_2 = 2 * a * b
+    ac_2 = 2 * a * c
+    ad_2 = 2 * a * d
+    bc_2 = 2 * b * c
+    bd_2 = 2 * b * d
+    cd_2 = 2 * c * d
+    rotation = np.array([(aa + bb - cc - dd, bc_2 - ad_2, bd_2 + ac_2),
+                         (bc_2 + ad_2, aa + cc - bb - dd, cd_2 - ab_2),
+                         (bd_2 - ac_2, cd_2 + ab_2, aa + dd - bb - cc),
+                         ])
+    if q.ndim > 1:
+        rotation = np.rollaxis(np.rollaxis(rotation, 1, q.ndim + 1), 0, q.ndim)
+    return rotation
+
+
+def _rot_to_quat(rot):
+    """Here we derive qw from qx, qy, qz"""
+    qw_4 = np.sqrt(1 + rot[..., 0, 0] + rot[..., 1, 1] + rot[..., 2, 2]) * 2
+    qx = (rot[..., 2, 1] - rot[..., 1, 2]) / qw_4
+    qy = (rot[..., 0, 2] - rot[..., 2, 0]) / qw_4
+    qz = (rot[..., 1, 0] - rot[..., 0, 1]) / qw_4
+    return np.rollaxis(np.array((qx, qy, qz)), 0, rot.ndim - 1)
+
+
+# ############################################################################
+# Estimate positions from data
+
+def _get_hpi_info(info):
+    """Helper to get HPI information from raw"""
+    if len(info['hpi_meas']) == 0 or \
+            ('coil_freq' not in info['hpi_meas'][0]['hpi_coils'][0]):
+        raise RuntimeError('Appropriate cHPI information not found in'
+                           'raw.info["hpi_meas"], cannot process cHPI')
+    hpi_result = info['hpi_results'][-1]
+    hpi_coils = info['hpi_meas'][-1]['hpi_coils']
+    hpi_num = np.array([h['number'] for h in hpi_coils])
+    pos_order = np.searchsorted(hpi_num, hpi_result['order'])
+    hpi_dig = [d for d in info['dig'] if d['kind'] == FIFF.FIFFV_POINT_HPI]
+    # this shouldn't happen, eventually we could add the transforms
+    # necessary to put it in head coords
+    if not all(d['coord_frame'] == FIFF.FIFFV_COORD_HEAD for d in hpi_dig):
+        raise RuntimeError('cHPI coordinate frame incorrect')
+    hpi_rrs = np.array([d['r'] for d in hpi_dig])[pos_order]
+    hpi_freqs = np.array([float(x['coil_freq']) for x in hpi_coils])
+    # how cHPI active is indicated in the FIF file
+    hpi_sub = info['hpi_subsystem']
+    hpi_pick = pick_channels(info['ch_names'], [hpi_sub['event_channel']])[0]
+    hpi_on = np.sum([coil['event_bits'][0] for coil in hpi_sub['hpi_coils']])
+    return hpi_freqs, hpi_rrs, hpi_pick, hpi_on, pos_order
+
+
+def _magnetic_dipole_objective(x, B, B2, w, coils):
+    """Project data onto right eigenvectors of whitened forward"""
+    fwd = np.dot(_magnetic_dipole_field_vec(x[np.newaxis, :], coils), w.T)
+    one = np.dot(linalg.svd(fwd, full_matrices=False)[2], B)
+    Bm2 = np.sum(one * one)
+    return B2 - Bm2
+
+
+def _fit_magnetic_dipole(B_orig, w, coils, x0):
+    """Fit a single bit of data (x0 = pos)"""
+    from scipy.optimize import fmin_cobyla
+    B = np.dot(w, B_orig)
+    B2 = np.dot(B, B)
+    objective = partial(_magnetic_dipole_objective, B=B, B2=B2,
+                        w=w, coils=coils)
+    x = fmin_cobyla(objective, x0, (), rhobeg=1e-2, rhoend=1e-4, disp=False)
+    return x, 1. - objective(x) / B2
+
+
+def _chpi_objective(x, est_pos_dev, hpi_head_rrs):
+    """Helper objective function"""
+    rot = _quat_to_rot(x[:3]).T
+    d = np.dot(est_pos_dev, rot) + x[3:] - hpi_head_rrs
+    return np.sum(d * d)
+
+
+def _fit_chpi_pos(est_pos_dev, hpi_head_rrs, x0):
+    """Fit rotation and translation parameters for cHPI coils"""
+    from scipy.optimize import fmin_cobyla
+    denom = np.sum((hpi_head_rrs - np.mean(hpi_head_rrs, axis=0)) ** 2)
+    objective = partial(_chpi_objective, est_pos_dev=est_pos_dev,
+                        hpi_head_rrs=hpi_head_rrs)
+    x = fmin_cobyla(objective, x0, (), rhobeg=1e-2, rhoend=1e-6, disp=False)
+    return x, 1. - objective(x) / denom
+
+
+def _angle_between_quats(x, y):
+    """Compute the angle between two quaternions w/3-element representations"""
+    # convert to complete quaternion representation
+    # use max() here to be safe in case roundoff errs put us over
+    x0 = np.sqrt(np.maximum(1. - x[..., 0] ** 2 -
+                            x[..., 1] ** 2 - x[..., 2] ** 2, 0.))
+    y0 = np.sqrt(np.maximum(1. - y[..., 0] ** 2 -
+                            y[..., 1] ** 2 - y[..., 2] ** 2, 0.))
+    # the difference z = x * conj(y), and theta = np.arccos(z0)
+    z0 = np.maximum(np.minimum(y0 * x0 + (x * y).sum(axis=-1), 1.), -1)
+    return 2 * np.arccos(z0)
+
+
+ at verbose
+def _calculate_chpi_positions(raw, t_step_min=0.1, t_step_max=10.,
+                              t_window=0.2, dist_limit=0.005, gof_limit=0.98,
+                              verbose=None):
+    """Calculate head positions using cHPI coils
+
+    Parameters
+    ----------
+    raw : instance of Raw
+        Raw data with cHPI information.
+    t_step_min : float
+        Minimum time step to use. If correlations are sufficiently high,
+        t_step_max will be used.
+    t_step_max : float
+        Maximum time step to use.
+    t_window : float
+        Time window to use to estimate the head positions.
+    max_step : float
+        Maximum time step to go between estimations.
+    dist_limit : float
+        Minimum distance (m) to accept for coil position fitting.
+    gof_limit : float
+        Minimum goodness of fit to accept.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    translation : ndarray, shape (N, 3)
+        Translations at each time point.
+    rotation : ndarray, shape (N, 3, 3)
+        Rotations at each time point.
+    t : ndarray, shape (N,)
+        The time points.
+
+    Notes
+    -----
+    The number of time points ``N`` will depend on the velocity of head
+    movements as well as ``t_step_max`` and ``t_step_min``.
+
+    See Also
+    --------
+    get_chpi_positions
+    """
+    from scipy.spatial.distance import cdist
+    if not (check_version('numpy', '1.7') and check_version('scipy', '0.11')):
+        raise RuntimeError('numpy>=1.7 and scipy>=0.11 required')
+    hpi_freqs, orig_head_rrs, hpi_pick, hpi_on, order = _get_hpi_info(raw.info)
+    sfreq, ch_names = raw.info['sfreq'], raw.info['ch_names']
+    # initial transforms
+    dev_head_t = raw.info['dev_head_t']['trans']
+    head_dev_t = invert_transform(raw.info['dev_head_t'])['trans']
+    # determine timing
+    n_window = int(round(t_window * sfreq))
+    fit_starts = np.round(np.arange(0, raw.last_samp / sfreq, t_step_min) *
+                          sfreq).astype(int)
+    fit_starts = fit_starts[fit_starts < raw.n_times - n_window]
+    fit_times = (fit_starts + (n_window + 1) // 2) / sfreq
+    n_freqs = len(hpi_freqs)
+    logger.info('HPIFIT: %s coils digitized in order %s'
+                % (n_freqs, ' '.join(str(o + 1) for o in order)))
+    logger.info('Coordinate transformation:')
+    for d in (dev_head_t[0, :3], dev_head_t[1, :3], dev_head_t[2, :3],
+              dev_head_t[:3, 3] * 1000.):
+        logger.info('{0:8.4f} {1:8.4f} {2:8.4f}'.format(*d))
+    logger.info('Using %s HPI coils: %s Hz'
+                % (n_freqs, ' '.join(str(int(s)) for s in hpi_freqs)))
+    # Set up amplitude fits
+    slope = np.arange(n_window).astype(np.float64)[:, np.newaxis]
+    f_t = 2 * np.pi * hpi_freqs[np.newaxis, :] * (slope / sfreq)
+    model = np.concatenate([np.sin(f_t), np.cos(f_t),
+                            slope, np.ones((n_window, 1))], axis=1)
+    inv_model = linalg.pinv(model)
+    del slope, f_t
+
+    # Set up magnetic dipole fits
+    picks = pick_types(raw.info, meg=True, eeg=False)
+    picks_chpi = np.concatenate([picks, [hpi_pick]])
+    logger.info('Found %s total and %s good MEG channels'
+                % (len(ch_names), len(picks)))
+    megchs = [ch for ci, ch in enumerate(raw.info['chs']) if ci in picks]
+    coils = _concatenate_coils(_create_meg_coils(megchs, 'normal'))
+
+    cov = make_ad_hoc_cov(raw.info, verbose=False)
+    whitener = _get_whitener_data(raw.info, cov, picks, verbose=False)
+    dev_head_quat = np.concatenate([_rot_to_quat(dev_head_t[:3, :3]),
+                                    dev_head_t[:3, 3]])
+    orig_dists = cdist(orig_head_rrs, orig_head_rrs)
+    last_quat = dev_head_quat.copy()
+    last_data_fit = None  # this indicates it's the first run
+    last_time = -t_step_min
+    last_head_rrs = orig_head_rrs.copy()
+    corr_limit = 0.98
+    quats = []
+    est_pos_dev = apply_trans(head_dev_t, orig_head_rrs)
+    for start, t in zip(fit_starts, fit_times):
+        #
+        # 1. Fit amplitudes for each channel from each of the N cHPI sinusoids
+        #
+        meg_chpi_data = raw[picks_chpi, start:start + n_window][0]
+        this_data = meg_chpi_data[:-1]
+        chpi_data = meg_chpi_data[-1]
+        if not (chpi_data == hpi_on).all():
+            logger.info('HPI not turned on (t=%7.3f)' % t)
+            continue
+        X = np.dot(inv_model, this_data.T)
+        data_diff = np.dot(model, X).T - this_data
+        data_diff *= data_diff
+        this_data *= this_data
+        g_chan = (1 - np.sqrt(data_diff.sum(axis=1) / this_data.sum(axis=1)))
+        g_sin = (1 - np.sqrt(data_diff.sum() / this_data.sum()))
+        del data_diff, this_data
+        X_sin, X_cos = X[:n_freqs], X[n_freqs:2 * n_freqs]
+        s_fit = np.sqrt(X_cos * X_cos + X_sin * X_sin)
+        if last_data_fit is None:  # first iteration
+            corr = 0.
+        else:
+            corr = np.corrcoef(s_fit.ravel(), last_data_fit.ravel())[0, 1]
+
+        # check to see if we need to continue
+        if t - last_time <= t_step_max - 1e-7 and corr > corr_limit and \
+                t != fit_times[-1]:
+            continue  # don't need to re-fit data
+        last_data_fit = s_fit.copy()  # save *before* inplace sign transform
+
+        # figure out principal direction of the vectors and align
+        # for s, c, fit in zip(X_sin, X_cos, s_fit):
+        #     fit *= np.sign(linalg.svd([s, c], full_matrices=False)[2][0])
+        s_fit *= np.sign(np.arctan2(X_sin, X_cos))
+
+        #
+        # 2. Fit magnetic dipole for each coil to obtain coil positions
+        #    in device coordinates
+        #
+        logger.info('HPI amplitude correlation %s: %s (%s chnls > 0.95)'
+                    % (t, g_sin, (g_chan > 0.95).sum()))
+        outs = [_fit_magnetic_dipole(f, whitener, coils, pos)
+                for f, pos in zip(s_fit, est_pos_dev)]
+        est_pos_dev = np.array([o[0] for o in outs])
+        g_coils = [o[1] for o in outs]
+        these_dists = cdist(est_pos_dev, est_pos_dev)
+        these_dists = np.abs(orig_dists - these_dists)
+        # there is probably a better algorithm for finding the bad ones...
+        good = False
+        use_mask = np.ones(n_freqs, bool)
+        while not good:
+            d = (these_dists[use_mask][:, use_mask] <= dist_limit)
+            good = d.all()
+            if not good:
+                if use_mask.sum() == 2:
+                    use_mask[:] = False
+                    break  # failure
+                # exclude next worst point
+                badness = these_dists[use_mask][:, use_mask].sum(axis=0)
+                exclude = np.where(use_mask)[0][np.argmax(badness)]
+                use_mask[exclude] = False
+        good = use_mask.sum() >= 3
+        if not good:
+            logger.warning('    %s/%s acceptable hpi fits found, cannot '
+                           'determine the transformation! (t=%7.3f)'
+                           % (use_mask.sum(), n_freqs, t))
+            continue
+
+        #
+        # 3. Fit the head translation and rotation params (minimize error
+        #    between coil positions and the head coil digitization positions)
+        #
+        dev_head_quat, g = _fit_chpi_pos(est_pos_dev[use_mask],
+                                         orig_head_rrs[use_mask],
+                                         dev_head_quat)
+        if g < gof_limit:
+            logger.info('    Bad coil fit for %s! (t=%7.3f)' % t)
+            continue
+        this_dev_head_t = np.concatenate((_quat_to_rot(dev_head_quat[:3]),
+                                          dev_head_quat[3:][:, np.newaxis]),
+                                         axis=1)
+        this_dev_head_t = np.concatenate((this_dev_head_t, [[0, 0, 0, 1.]]))
+        this_head_rrs = apply_trans(this_dev_head_t, est_pos_dev)
+        dt = t - last_time
+        vs = tuple(1000. * np.sqrt(np.sum((last_head_rrs -
+                                           this_head_rrs) ** 2, axis=1)) / dt)
+        logger.info('Hpi fit OK, movements [mm/s] = ' +
+                    ' / '.join(['%0.1f'] * n_freqs) % vs)
+        errs = [0] * n_freqs  # XXX eventually calculate this
+        e = 0.  # XXX eventually calculate this
+        d = 100 * np.sqrt(np.sum(last_quat[3:] - dev_head_quat[3:]) ** 2)  # cm
+        r = _angle_between_quats(last_quat[:3], dev_head_quat[:3]) / dt
+        v = d / dt  # cm/sec
+        for ii in range(n_freqs):
+            if use_mask[ii]:
+                start, end = ' ', '/'
+            else:
+                start, end = '(', ')'
+            log_str = (start +
+                       '{0:6.1f} {1:6.1f} {2:6.1f} / ' +
+                       '{3:6.1f} {4:6.1f} {5:6.1f} / ' +
+                       'g = {6:0.3f} err = {7:4.1f} ' +
+                       end)
+            if ii <= 2:
+                log_str += '{8:6.3f} {9:6.3f} {10:6.3f}'
+            elif ii == 3:
+                log_str += '{8:6.1f} {9:6.1f} {10:6.1f}'
+            vals = np.concatenate((1000 * orig_head_rrs[ii],
+                                   1000 * this_head_rrs[ii],
+                                   [g_coils[ii], errs[ii]]))
+            if ii <= 2:
+                vals = np.concatenate((vals, this_dev_head_t[ii, :3]))
+            elif ii == 3:
+                vals = np.concatenate((vals, this_dev_head_t[:3, 3] * 1000.))
+            logger.debug(log_str.format(*vals))
+        logger.info('#t = %0.3f, #e = %0.2f cm, #g = %0.3f, #v = %0.2f cm/s, '
+                    '#r = %0.2f rad/s, #d = %0.2f cm' % (t, e, g, v, r, d))
+        quats.append(np.concatenate(([t], dev_head_quat, [g], [1. - g], [v])))
+        last_time = t
+        last_head_rrs = this_head_rrs.copy()
+    quats = np.array(quats)
+    quats = np.zeros((0, 10)) if quats.size == 0 else quats
+    return _quats_to_trans_rot_t(quats)
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/commands/__init__.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/commands/__init__.py
new file mode 100644
index 0000000..eb018c3
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/commands/__init__.py
@@ -0,0 +1 @@
+from . import utils
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/commands/mne_browse_raw.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/commands/mne_browse_raw.py
new file mode 100644
index 0000000..409aabf
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/commands/mne_browse_raw.py
@@ -0,0 +1,107 @@
+#!/usr/bin/env python
+"""Browse raw data
+
+You can do for example:
+
+$ mne browse_raw --raw sample_audvis_raw.fif \
+                 --proj sample_audvis_ecg_proj.fif \
+                 --eve sample_audvis_raw-eve.fif
+"""
+
+# Authors : Eric Larson, PhD
+
+import sys
+import mne
+
+
+def run():
+    import matplotlib.pyplot as plt
+
+    from mne.commands.utils import get_optparser
+
+    parser = get_optparser(__file__)
+
+    parser.add_option("--raw", dest="raw_in",
+                      help="Input raw FIF file", metavar="FILE")
+    parser.add_option("--proj", dest="proj_in",
+                      help="Projector file", metavar="FILE",
+                      default='')
+    parser.add_option("--eve", dest="eve_in",
+                      help="Events file", metavar="FILE",
+                      default='')
+    parser.add_option("-d", "--duration", dest="duration", type="float",
+                      help="Time window for plotting (sec)",
+                      default=10.0)
+    parser.add_option("-t", "--start", dest="start", type="float",
+                      help="Initial start time for plotting",
+                      default=0.0)
+    parser.add_option("-n", "--n_channels", dest="n_channels", type="int",
+                      help="Number of channels to plot at a time",
+                      default=20)
+    parser.add_option("-o", "--order", dest="order",
+                      help="Order for plotting ('type' or 'original')",
+                      default='type')
+    parser.add_option("-p", "--preload", dest="preload",
+                      help="Preload raw data (for faster navigaton)",
+                      default=False)
+    parser.add_option("-s", "--show_options", dest="show_options",
+                      help="Show projection options dialog",
+                      default=False)
+    parser.add_option("--allowmaxshield", dest="maxshield",
+                      help="Allow loading MaxShield processed data",
+                      action="store_true")
+    parser.add_option("--highpass", dest="highpass", type="float",
+                      help="Display high-pass filter corner frequency",
+                      default=-1)
+    parser.add_option("--lowpass", dest="lowpass", type="float",
+                      help="Display low-pass filter corner frequency",
+                      default=-1)
+    parser.add_option("--filtorder", dest="filtorder", type="int",
+                      help="Display filtering IIR order",
+                      default=4)
+    parser.add_option("--clipping", dest="clipping",
+                      help="Enable trace clipping mode, either 'clip' or "
+                      "'transparent'", default=None)
+
+    options, args = parser.parse_args()
+
+    raw_in = options.raw_in
+    duration = options.duration
+    start = options.start
+    n_channels = options.n_channels
+    order = options.order
+    preload = options.preload
+    show_options = options.show_options
+    proj_in = options.proj_in
+    eve_in = options.eve_in
+    maxshield = options.maxshield
+    highpass = options.highpass
+    lowpass = options.lowpass
+    filtorder = options.filtorder
+    clipping = options.clipping
+
+    if raw_in is None:
+        parser.print_help()
+        sys.exit(1)
+
+    raw = mne.io.Raw(raw_in, preload=preload, allow_maxshield=maxshield)
+    if len(proj_in) > 0:
+        projs = mne.read_proj(proj_in)
+        raw.info['projs'] = projs
+    if len(eve_in) > 0:
+        events = mne.read_events(eve_in)
+    else:
+        events = None
+    highpass = None if highpass < 0 or filtorder <= 0 else highpass
+    lowpass = None if lowpass < 0 or filtorder <= 0 else lowpass
+    filtorder = 4 if filtorder <= 0 else filtorder
+    raw.plot(duration=duration, start=start, n_channels=n_channels,
+             order=order, show_options=show_options, events=events,
+             highpass=highpass, lowpass=lowpass, filtorder=filtorder,
+             clipping=clipping)
+    plt.show(block=True)
+
+
+is_main = (__name__ == '__main__')
+if is_main:
+    run()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/commands/mne_bti2fiff.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/commands/mne_bti2fiff.py
new file mode 100644
index 0000000..98ccd05
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/commands/mne_bti2fiff.py
@@ -0,0 +1,91 @@
+#!/usr/bin/env python
+"""
+Import BTi / 4D MagnesWH3600 data to fif file.
+
+example usage: mne bti2fiff --pdf C,rfDC -o my_raw.fif
+
+Note.
+1) Currently direct inclusion of reference channel weights
+is not supported. Please use \'mne_create_comp_data\' to include
+the weights or use the low level functions from this module to
+include them by yourself.
+2) The informed guess for the 4D name is E31 for the ECG channel and
+E63, E63 for the EOG channels. Pleas check and adjust if those channels
+are present in your dataset but 'ECG 01' and 'EOG 01', 'EOG 02' don't
+appear in the channel names of the raw object.
+"""
+
+# Authors: Denis A. Engemann  <denis.engemann at gmail.com>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#          Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
+#          Yuval Harpaz <yuvharpaz at gmail.com>
+#
+#          simplified bsd-3 license
+
+
+import sys
+
+from mne.io import read_raw_bti
+
+
+def run():
+    from mne.commands.utils import get_optparser
+
+    parser = get_optparser(__file__)
+
+    parser.add_option('-p', '--pdf', dest='pdf_fname',
+                      help='Input data file name', metavar='FILE')
+    parser.add_option('-c', '--config', dest='config_fname',
+                      help='Input config file name', metavar='FILE',
+                      default='config')
+    parser.add_option('--head_shape', dest='head_shape_fname',
+                      help='Headshape file name', metavar='FILE',
+                      default='hs_file')
+    parser.add_option('-o', '--out_fname', dest='out_fname',
+                      help='Name of the resulting fiff file',
+                      default='as_data_fname')
+    parser.add_option('-r', '--rotation_x', dest='rotation_x', type='float',
+                      help='Compensatory rotation about Neuromag x axis, deg',
+                      default=2.0)
+    parser.add_option('-T', '--translation', dest='translation', type='str',
+                      help='Default translation, meter',
+                      default=(0.00, 0.02, 0.11))
+    parser.add_option('--ecg_ch', dest='ecg_ch', type='str',
+                      help='4D ECG channel name',
+                      default='E31')
+    parser.add_option('--eog_ch', dest='eog_ch', type='str',
+                      help='4D EOG channel names',
+                      default='E63,E64')
+
+    options, args = parser.parse_args()
+
+    pdf_fname = options.pdf_fname
+    if pdf_fname is None:
+        parser.print_help()
+        sys.exit(1)
+
+    config_fname = options.config_fname
+    head_shape_fname = options.head_shape_fname
+    out_fname = options.out_fname
+    rotation_x = options.rotation_x
+    translation = options.translation
+    ecg_ch = options.ecg_ch
+    eog_ch = options.ecg_ch.split(',')
+
+    if out_fname == 'as_data_fname':
+        out_fname = pdf_fname + '_raw.fif'
+
+    raw = read_raw_bti(pdf_fname=pdf_fname, config_fname=config_fname,
+                       head_shape_fname=head_shape_fname,
+                       rotation_x=rotation_x, translation=translation,
+                       ecg_ch=ecg_ch, eog_ch=eog_ch)
+
+    raw.save(out_fname)
+    raw.close()
+    if is_main:
+        sys.exit(0)
+
+is_main = (__name__ == '__main__')
+if is_main:
+    run()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/commands/mne_clean_eog_ecg.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/commands/mne_clean_eog_ecg.py
new file mode 100644
index 0000000..3aa9397
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/commands/mne_clean_eog_ecg.py
@@ -0,0 +1,137 @@
+#!/usr/bin/env python
+"""Clean a raw file from EOG and ECG artifacts with PCA (ie SSP)
+"""
+from __future__ import print_function
+
+# Authors : Dr Engr. Sheraz Khan,  P.Eng, Ph.D.
+#           Engr. Nandita Shetty,  MS.
+#           Alexandre Gramfort, Ph.D.
+
+
+import sys
+
+import mne
+
+
+def clean_ecg_eog(in_fif_fname, out_fif_fname=None, eog=True, ecg=True,
+                  ecg_proj_fname=None, eog_proj_fname=None,
+                  ecg_event_fname=None, eog_event_fname=None, in_path='.',
+                  quiet=False):
+    """Clean ECG from raw fif file
+
+    Parameters
+    ----------
+    in_fif_fname : str
+        Raw fif File
+    eog_event_fname : str
+        name of EOG event file required.
+    eog : bool
+        Reject or not EOG artifacts.
+    ecg : bool
+        Reject or not ECG artifacts.
+    ecg_event_fname : str
+        name of ECG event file required.
+    in_path : str
+        Path where all the files are.
+    """
+    if not eog and not ecg:
+        raise Exception("EOG and ECG cannot be both disabled")
+
+    # Reading fif File
+    raw_in = mne.io.Raw(in_fif_fname)
+
+    if in_fif_fname.endswith('_raw.fif') or in_fif_fname.endswith('-raw.fif'):
+        prefix = in_fif_fname[:-8]
+    else:
+        prefix = in_fif_fname[:-4]
+
+    if out_fif_fname is None:
+        out_fif_fname = prefix + '_clean_ecg_eog_raw.fif'
+    if ecg_proj_fname is None:
+        ecg_proj_fname = prefix + '_ecg-proj.fif'
+    if eog_proj_fname is None:
+        eog_proj_fname = prefix + '_eog-proj.fif'
+    if ecg_event_fname is None:
+        ecg_event_fname = prefix + '_ecg-eve.fif'
+    if eog_event_fname is None:
+        eog_event_fname = prefix + '_eog-eve.fif'
+
+    print('Implementing ECG and EOG artifact rejection on data')
+
+    kwargs = dict() if quiet else dict(stdout=None, stderr=None)
+    if ecg:
+        ecg_events, _, _ = mne.preprocessing.find_ecg_events(raw_in)
+        print("Writing ECG events in %s" % ecg_event_fname)
+        mne.write_events(ecg_event_fname, ecg_events)
+        print('Computing ECG projector')
+        command = ('mne_process_raw', '--cd', in_path, '--raw', in_fif_fname,
+                   '--events', ecg_event_fname, '--makeproj',
+                   '--projtmin', '-0.08', '--projtmax', '0.08',
+                   '--saveprojtag', '_ecg-proj', '--projnmag', '2',
+                   '--projngrad', '1', '--projevent', '999', '--highpass', '5',
+                   '--lowpass', '35', '--projmagrej', '4000',
+                   '--projgradrej', '3000')
+        mne.utils.run_subprocess(command, **kwargs)
+    if eog:
+        eog_events = mne.preprocessing.find_eog_events(raw_in)
+        print("Writing EOG events in %s" % eog_event_fname)
+        mne.write_events(eog_event_fname, eog_events)
+        print('Computing EOG projector')
+        command = ('mne_process_raw', '--cd', in_path, '--raw', in_fif_fname,
+                   '--events', eog_event_fname, '--makeproj',
+                   '--projtmin', '-0.15', '--projtmax', '0.15',
+                   '--saveprojtag', '_eog-proj', '--projnmag', '2',
+                   '--projngrad', '2', '--projevent', '998', '--lowpass', '35',
+                   '--projmagrej', '4000', '--projgradrej', '3000')
+        mne.utils.run_subprocess(command, **kwargs)
+
+    if out_fif_fname is not None:
+        # Applying the ECG EOG projector
+        print('Applying ECG EOG projector')
+        command = ('mne_process_raw', '--cd', in_path, '--raw', in_fif_fname,
+                   '--proj', in_fif_fname, '--projoff', '--save',
+                   out_fif_fname, '--filteroff',
+                   '--proj', ecg_proj_fname, '--proj', eog_proj_fname)
+        mne.utils.run_subprocess(command, **kwargs)
+        print('Done removing artifacts.')
+        print("Cleaned raw data saved in: %s" % out_fif_fname)
+        print('IMPORTANT : Please eye-ball the data !!')
+    else:
+        print('Projection not applied to raw data.')
+
+
+def run():
+    from mne.commands.utils import get_optparser
+
+    parser = get_optparser(__file__)
+
+    parser.add_option("-i", "--in", dest="raw_in",
+                      help="Input raw FIF file", metavar="FILE")
+    parser.add_option("-o", "--out", dest="raw_out",
+                      help="Output raw FIF file", metavar="FILE",
+                      default=None)
+    parser.add_option("-e", "--no-eog", dest="eog", action="store_false",
+                      help="Remove EOG", default=True)
+    parser.add_option("-c", "--no-ecg", dest="ecg", action="store_false",
+                      help="Remove ECG", default=True)
+    parser.add_option("-q", "--quiet", dest="quiet", action="store_true",
+                      help="Suppress mne_process_raw output", default=False)
+
+    options, args = parser.parse_args()
+
+    if options.raw_in is None:
+        parser.print_help()
+        sys.exit(1)
+
+    raw_in = options.raw_in
+    raw_out = options.raw_out
+    eog = options.eog
+    ecg = options.ecg
+    quiet = options.quiet
+
+    clean_ecg_eog(raw_in, raw_out, eog=eog, ecg=ecg, quiet=quiet)
+
+
+is_main = (__name__ == '__main__')
+if is_main:
+    run()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/commands/mne_compare_fiff.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/commands/mne_compare_fiff.py
new file mode 100644
index 0000000..bc8a223
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/commands/mne_compare_fiff.py
@@ -0,0 +1,27 @@
+#!/usr/bin/env python
+"""Compare FIFF files
+
+You can do for example:
+
+$ mne compare_fiff test_raw.fif test_raw_sss.fif
+"""
+
+# Authors : Eric Larson, PhD
+
+import sys
+import mne
+
+
+def run():
+    parser = mne.commands.utils.get_optparser(
+        __file__, usage='mne compare_fiff <file_a> <file_b>')
+    options, args = parser.parse_args()
+    if len(args) != 2:
+        parser.print_help()
+        sys.exit(1)
+    mne.viz.compare_fiff(args[0], args[1])
+
+
+is_main = (__name__ == '__main__')
+if is_main:
+    run()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/commands/mne_compute_proj_ecg.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/commands/mne_compute_proj_ecg.py
new file mode 100644
index 0000000..735a6db
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/commands/mne_compute_proj_ecg.py
@@ -0,0 +1,213 @@
+#!/usr/bin/env python
+"""Compute SSP/PCA projections for ECG artifacts
+
+You can do for example:
+
+$ mne compute_proj_ecg -i sample_audvis_raw.fif -c "MEG 1531" \
+                       --l-freq 1 --h-freq 100 \
+                       --rej-grad 3000 --rej-mag 4000 --rej-eeg 100
+"""
+from __future__ import print_function
+
+# Authors : Alexandre Gramfort, Ph.D.
+#           Martin Luessi, Ph.D.
+
+from mne.externals.six import string_types
+import os
+import sys
+import mne
+
+
+def run():
+    from mne.commands.utils import get_optparser
+
+    parser = get_optparser(__file__)
+
+    parser.add_option("-i", "--in", dest="raw_in",
+                      help="Input raw FIF file", metavar="FILE")
+    parser.add_option("--tmin", dest="tmin", type="float",
+                      help="Time before event in seconds",
+                      default=-0.2)
+    parser.add_option("--tmax", dest="tmax", type="float",
+                      help="Time after event in seconds",
+                      default=0.4)
+    parser.add_option("-g", "--n-grad", dest="n_grad", type="int",
+                      help="Number of SSP vectors for gradiometers",
+                      default=2)
+    parser.add_option("-m", "--n-mag", dest="n_mag", type="int",
+                      help="Number of SSP vectors for magnetometers",
+                      default=2)
+    parser.add_option("-e", "--n-eeg", dest="n_eeg", type="int",
+                      help="Number of SSP vectors for EEG",
+                      default=2)
+    parser.add_option("--l-freq", dest="l_freq", type="float",
+                      help="Filter low cut-off frequency in Hz",
+                      default=1)
+    parser.add_option("--h-freq", dest="h_freq", type="float",
+                      help="Filter high cut-off frequency in Hz",
+                      default=100)
+    parser.add_option("--ecg-l-freq", dest="ecg_l_freq", type="float",
+                      help="Filter low cut-off frequency in Hz used "
+                      "for ECG event detection",
+                      default=5)
+    parser.add_option("--ecg-h-freq", dest="ecg_h_freq", type="float",
+                      help="Filter high cut-off frequency in Hz used "
+                      "for ECG event detection",
+                      default=35)
+    parser.add_option("-p", "--preload", dest="preload",
+                      help="Temporary file used during computation "
+                      "(to save memory)",
+                      default=True)
+    parser.add_option("-a", "--average", dest="average", action="store_true",
+                      help="Compute SSP after averaging",
+                      default=False)
+    parser.add_option("--proj", dest="proj",
+                      help="Use SSP projections from a fif file.",
+                      default=None)
+    parser.add_option("--filtersize", dest="filter_length", type="int",
+                      help="Number of taps to use for filtering",
+                      default=2048)
+    parser.add_option("-j", "--n-jobs", dest="n_jobs", type="int",
+                      help="Number of jobs to run in parallel",
+                      default=1)
+    parser.add_option("-c", "--channel", dest="ch_name",
+                      help="Channel to use for ECG detection "
+                      "(Required if no ECG found)",
+                      default=None)
+    parser.add_option("--rej-grad", dest="rej_grad", type="float",
+                      help="Gradiometers rejection parameter "
+                      "in fT/cm (peak to peak amplitude)",
+                      default=2000)
+    parser.add_option("--rej-mag", dest="rej_mag", type="float",
+                      help="Magnetometers rejection parameter "
+                      "in fT (peak to peak amplitude)",
+                      default=3000)
+    parser.add_option("--rej-eeg", dest="rej_eeg", type="float",
+                      help="EEG rejection parameter in uV "
+                      "(peak to peak amplitude)",
+                      default=50)
+    parser.add_option("--rej-eog", dest="rej_eog", type="float",
+                      help="EOG rejection parameter in uV "
+                      "(peak to peak amplitude)",
+                      default=250)
+    parser.add_option("--avg-ref", dest="avg_ref", action="store_true",
+                      help="Add EEG average reference proj",
+                      default=False)
+    parser.add_option("--no-proj", dest="no_proj", action="store_true",
+                      help="Exclude the SSP projectors currently "
+                      "in the fiff file",
+                      default=False)
+    parser.add_option("--bad", dest="bad_fname",
+                      help="Text file containing bad channels list "
+                      "(one per line)",
+                      default=None)
+    parser.add_option("--event-id", dest="event_id", type="int",
+                      help="ID to use for events",
+                      default=999)
+    parser.add_option("--event-raw", dest="raw_event_fname",
+                      help="raw file to use for event detection",
+                      default=None)
+    parser.add_option("--tstart", dest="tstart", type="float",
+                      help="Start artifact detection after tstart seconds",
+                      default=0.)
+    parser.add_option("--qrsthr", dest="qrs_threshold", type="string",
+                      help="QRS detection threshold. Between 0 and 1. Can "
+                      "also be 'auto' for automatic selection",
+                      default='auto')
+
+    options, args = parser.parse_args()
+
+    raw_in = options.raw_in
+
+    if raw_in is None:
+        parser.print_help()
+        sys.exit(1)
+
+    tmin = options.tmin
+    tmax = options.tmax
+    n_grad = options.n_grad
+    n_mag = options.n_mag
+    n_eeg = options.n_eeg
+    l_freq = options.l_freq
+    h_freq = options.h_freq
+    ecg_l_freq = options.ecg_l_freq
+    ecg_h_freq = options.ecg_h_freq
+    average = options.average
+    preload = options.preload
+    filter_length = options.filter_length
+    n_jobs = options.n_jobs
+    ch_name = options.ch_name
+    reject = dict(grad=1e-13 * float(options.rej_grad),
+                  mag=1e-15 * float(options.rej_mag),
+                  eeg=1e-6 * float(options.rej_eeg),
+                  eog=1e-6 * float(options.rej_eog))
+    avg_ref = options.avg_ref
+    no_proj = options.no_proj
+    bad_fname = options.bad_fname
+    event_id = options.event_id
+    proj_fname = options.proj
+    raw_event_fname = options.raw_event_fname
+    tstart = options.tstart
+    qrs_threshold = options.qrs_threshold
+    if qrs_threshold != 'auto':
+        try:
+            qrs_threshold = float(qrs_threshold)
+        except ValueError:
+            raise ValueError('qrsthr must be "auto" or a float')
+
+    if bad_fname is not None:
+        with open(bad_fname, 'r') as fid:
+            bads = [w.rstrip() for w in fid.readlines()]
+        print('Bad channels read : %s' % bads)
+    else:
+        bads = []
+
+    if raw_in.endswith('_raw.fif') or raw_in.endswith('-raw.fif'):
+        prefix = raw_in[:-8]
+    else:
+        prefix = raw_in[:-4]
+
+    ecg_event_fname = prefix + '_ecg-eve.fif'
+
+    if average:
+        ecg_proj_fname = prefix + '_ecg_avg-proj.fif'
+    else:
+        ecg_proj_fname = prefix + '_ecg-proj.fif'
+
+    raw = mne.io.Raw(raw_in, preload=preload)
+
+    if raw_event_fname is not None:
+        raw_event = mne.io.Raw(raw_event_fname)
+    else:
+        raw_event = raw
+
+    flat = None  # XXX : not exposed to the user
+    cpe = mne.preprocessing.compute_proj_ecg
+    projs, events = cpe(raw, raw_event, tmin, tmax, n_grad, n_mag, n_eeg,
+                        l_freq, h_freq, average, filter_length, n_jobs,
+                        ch_name, reject, flat, bads, avg_ref, no_proj,
+                        event_id, ecg_l_freq, ecg_h_freq, tstart,
+                        qrs_threshold, copy=False)
+
+    raw.close()
+
+    if raw_event_fname is not None:
+        raw_event.close()
+
+    if proj_fname is not None:
+        print('Including SSP projections from : %s' % proj_fname)
+        # append the ecg projs, so they are last in the list
+        projs = mne.read_proj(proj_fname) + projs
+
+    if isinstance(preload, string_types) and os.path.exists(preload):
+        os.remove(preload)
+
+    print("Writing ECG projections in %s" % ecg_proj_fname)
+    mne.write_proj(ecg_proj_fname, projs)
+
+    print("Writing ECG events in %s" % ecg_event_fname)
+    mne.write_events(ecg_event_fname, events)
+
+is_main = (__name__ == '__main__')
+if is_main:
+    run()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/commands/mne_compute_proj_eog.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/commands/mne_compute_proj_eog.py
new file mode 100644
index 0000000..e48740b
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/commands/mne_compute_proj_eog.py
@@ -0,0 +1,198 @@
+#!/usr/bin/env python
+"""Compute SSP/PCA projections for EOG artifacts
+
+You can do for example:
+
+$ mne compute_proj_eog -i sample_audvis_raw.fif \
+                       --l-freq 1 --h-freq 35 \
+                       --rej-grad 3000 --rej-mag 4000 --rej-eeg 100
+
+or
+
+$ mne compute_proj_eog -i sample_audvis_raw.fif \
+                       --l-freq 1 --h-freq 35 \
+                       --rej-grad 3000 --rej-mag 4000 --rej-eeg 100 \
+                       --proj sample_audvis_ecg-proj.fif
+
+to exclude ECG artifacts from projection computation.
+"""
+from __future__ import print_function
+
+# Authors : Alexandre Gramfort, Ph.D.
+#           Martin Luessi, Ph.D.
+
+from mne.externals.six import string_types
+import os
+import sys
+import mne
+
+
+def run():
+    from mne.commands.utils import get_optparser
+
+    parser = get_optparser(__file__)
+
+    parser.add_option("-i", "--in", dest="raw_in",
+                      help="Input raw FIF file", metavar="FILE")
+    parser.add_option("--tmin", dest="tmin", type="float",
+                      help="Time before event in seconds", default=-0.2)
+    parser.add_option("--tmax", dest="tmax", type="float",
+                      help="Time after event in seconds", default=0.2)
+    parser.add_option("-g", "--n-grad", dest="n_grad", type="int",
+                      help="Number of SSP vectors for gradiometers",
+                      default=2)
+    parser.add_option("-m", "--n-mag", dest="n_mag", type="int",
+                      help="Number of SSP vectors for magnetometers",
+                      default=2)
+    parser.add_option("-e", "--n-eeg", dest="n_eeg", type="int",
+                      help="Number of SSP vectors for EEG", default=2)
+    parser.add_option("--l-freq", dest="l_freq", type="float",
+                      help="Filter low cut-off frequency in Hz",
+                      default=1)
+    parser.add_option("--h-freq", dest="h_freq", type="float",
+                      help="Filter high cut-off frequency in Hz",
+                      default=35)
+    parser.add_option("--eog-l-freq", dest="eog_l_freq", type="float",
+                      help="Filter low cut-off frequency in Hz used for "
+                      "EOG event detection", default=1)
+    parser.add_option("--eog-h-freq", dest="eog_h_freq", type="float",
+                      help="Filter high cut-off frequency in Hz used for "
+                      "EOG event detection", default=10)
+    parser.add_option("-p", "--preload", dest="preload",
+                      help="Temporary file used during computation (to "
+                      "save memory)", default=True)
+    parser.add_option("-a", "--average", dest="average", action="store_true",
+                      help="Compute SSP after averaging",
+                      default=False)
+    parser.add_option("--proj", dest="proj",
+                      help="Use SSP projections from a fif file.",
+                      default=None)
+    parser.add_option("--filtersize", dest="filter_length", type="int",
+                      help="Number of taps to use for filtering",
+                      default=2048)
+    parser.add_option("-j", "--n-jobs", dest="n_jobs", type="int",
+                      help="Number of jobs to run in parallel", default=1)
+    parser.add_option("--rej-grad", dest="rej_grad", type="float",
+                      help="Gradiometers rejection parameter in fT/cm (peak "
+                      "to peak amplitude)", default=2000)
+    parser.add_option("--rej-mag", dest="rej_mag", type="float",
+                      help="Magnetometers rejection parameter in fT (peak to "
+                      "peak amplitude)", default=3000)
+    parser.add_option("--rej-eeg", dest="rej_eeg", type="float",
+                      help="EEG rejection parameter in uV (peak to peak "
+                      "amplitude)", default=50)
+    parser.add_option("--rej-eog", dest="rej_eog", type="float",
+                      help="EOG rejection parameter in uV (peak to peak "
+                      "amplitude)", default=1e9)
+    parser.add_option("--avg-ref", dest="avg_ref", action="store_true",
+                      help="Add EEG average reference proj",
+                      default=False)
+    parser.add_option("--no-proj", dest="no_proj", action="store_true",
+                      help="Exclude the SSP projectors currently in the "
+                      "fiff file",  default=False)
+    parser.add_option("--bad", dest="bad_fname",
+                      help="Text file containing bad channels list "
+                      "(one per line)", default=None)
+    parser.add_option("--event-id", dest="event_id", type="int",
+                      help="ID to use for events", default=998)
+    parser.add_option("--event-raw", dest="raw_event_fname",
+                      help="raw file to use for event detection", default=None)
+    parser.add_option("--tstart", dest="tstart", type="float",
+                      help="Start artifact detection after tstart seconds",
+                      default=0.)
+    parser.add_option("-c", "--channel", dest="ch_name", type="string",
+                      help="Custom EOG channel(s), comma separated",
+                      default=None)
+
+    options, args = parser.parse_args()
+
+    raw_in = options.raw_in
+
+    if raw_in is None:
+        parser.print_help()
+        sys.exit(1)
+
+    tmin = options.tmin
+    tmax = options.tmax
+    n_grad = options.n_grad
+    n_mag = options.n_mag
+    n_eeg = options.n_eeg
+    l_freq = options.l_freq
+    h_freq = options.h_freq
+    eog_l_freq = options.eog_l_freq
+    eog_h_freq = options.eog_h_freq
+    average = options.average
+    preload = options.preload
+    filter_length = options.filter_length
+    n_jobs = options.n_jobs
+    reject = dict(grad=1e-13 * float(options.rej_grad),
+                  mag=1e-15 * float(options.rej_mag),
+                  eeg=1e-6 * float(options.rej_eeg),
+                  eog=1e-6 * float(options.rej_eog))
+    avg_ref = options.avg_ref
+    no_proj = options.no_proj
+    bad_fname = options.bad_fname
+    event_id = options.event_id
+    proj_fname = options.proj
+    raw_event_fname = options.raw_event_fname
+    tstart = options.tstart
+    ch_name = options.ch_name
+
+    if bad_fname is not None:
+        with open(bad_fname, 'r') as fid:
+            bads = [w.rstrip() for w in fid.readlines()]
+        print('Bad channels read : %s' % bads)
+    else:
+        bads = []
+
+    if raw_in.endswith('_raw.fif') or raw_in.endswith('-raw.fif'):
+        prefix = raw_in[:-8]
+    else:
+        prefix = raw_in[:-4]
+
+    eog_event_fname = prefix + '_eog-eve.fif'
+
+    if average:
+        eog_proj_fname = prefix + '_eog_avg-proj.fif'
+    else:
+        eog_proj_fname = prefix + '_eog-proj.fif'
+
+    raw = mne.io.Raw(raw_in, preload=preload)
+
+    if raw_event_fname is not None:
+        raw_event = mne.io.Raw(raw_event_fname)
+    else:
+        raw_event = raw
+
+    flat = None  # XXX : not exposed to the user
+    projs, events = mne.preprocessing.compute_proj_eog(
+        raw=raw, raw_event=raw_event, tmin=tmin, tmax=tmax, n_grad=n_grad,
+        n_mag=n_mag, n_eeg=n_eeg, l_freq=l_freq, h_freq=h_freq,
+        average=average, filter_length=filter_length,
+        n_jobs=n_jobs, reject=reject, flat=flat, bads=bads,
+        avg_ref=avg_ref, no_proj=no_proj, event_id=event_id,
+        eog_l_freq=eog_l_freq, eog_h_freq=eog_h_freq,
+        tstart=tstart, ch_name=ch_name, copy=False)
+
+    raw.close()
+
+    if raw_event_fname is not None:
+        raw_event.close()
+
+    if proj_fname is not None:
+        print('Including SSP projections from : %s' % proj_fname)
+        # append the eog projs, so they are last in the list
+        projs = mne.read_proj(proj_fname) + projs
+
+    if isinstance(preload, string_types) and os.path.exists(preload):
+        os.remove(preload)
+
+    print("Writing EOG projections in %s" % eog_proj_fname)
+    mne.write_proj(eog_proj_fname, projs)
+
+    print("Writing EOG events in %s" % eog_event_fname)
+    mne.write_events(eog_event_fname, events)
+
+is_main = (__name__ == '__main__')
+if is_main:
+    run()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/commands/mne_coreg.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/commands/mne_coreg.py
new file mode 100644
index 0000000..42b58d8
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/commands/mne_coreg.py
@@ -0,0 +1,29 @@
+#!/usr/bin/env python
+# Authors: Christian Brodbeck  <christianbrodbeck at nyu.edu>
+
+""" Open the coregistration GUI.
+
+example usage:  $ mne coreg
+
+"""
+
+import os
+import sys
+
+import mne
+
+
+def run():
+    from mne.commands.utils import get_optparser
+
+    parser = get_optparser(__file__)
+    options, args = parser.parse_args()
+
+    os.environ['ETS_TOOLKIT'] = 'qt4'
+    mne.gui.coregistration()
+    if is_main:
+        sys.exit(0)
+
+is_main = (__name__ == '__main__')
+if is_main:
+    run()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/commands/mne_flash_bem.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/commands/mne_flash_bem.py
new file mode 100644
index 0000000..f46f0a2
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/commands/mne_flash_bem.py
@@ -0,0 +1,90 @@
+#!/usr/bin/env python
+"""Create 3-Layers BEM model from Flash MRI images
+
+This program assumes that FreeSurfer and MNE are installed and
+sourced properly.
+
+This function extracts the BEM surfaces (outer skull, inner skull, and
+outer skin) from multiecho FLASH MRI data with spin angles of 5 and 30
+degrees. The multiecho FLASH data are inputted in DICOM format.
+This function assumes that the Freesurfer segmentation of the subject
+has been completed. In particular, the T1.mgz and brain.mgz MRI volumes
+should be, as usual, in the subject's mri directory.
+
+Before running this script do the following:
+(unless the --noconvert option is specified)
+
+    1. Copy all of your FLASH images in a single directory <source> and
+       create a directory <dest> to hold the output of mne_organize_dicom
+    2. cd to <dest> and run
+       $ mne_organize_dicom <source>
+       to create an appropriate directory structure
+    3. Create symbolic links to make flash05 and flash30 point to the
+       appropriate series:
+       $ ln -s <FLASH 5 series dir> flash05
+       $ ln -s <FLASH 30 series dir> flash30
+    4. cd to the directory where flash05 and flash30 links are
+    5. Set SUBJECTS_DIR and SUBJECT environment variables appropriately
+    6. Run this script
+
+Example usage:
+
+$ mne flash_bem --subject sample
+
+"""
+from __future__ import print_function
+
+# Authors: Lorenzo De Santis
+
+from mne.bem import convert_flash_mris, make_flash_bem
+
+
+def run():
+    from mne.commands.utils import get_optparser
+
+    parser = get_optparser(__file__)
+
+    parser.add_option("-s", "--subject", dest="subject",
+                      help="Subject name", default=None)
+    parser.add_option("-d", "--subjects-dir", dest="subjects_dir",
+                      help="Subjects directory", default=None)
+    parser.add_option("-3", "--noflash30", dest="noflash30",
+                      action="store_true", default=False,
+                      help=("Skip the 30-degree flip angle data"),)
+    parser.add_option("-n", "--noconvert", dest="noconvert",
+                      action="store_true", default=False,
+                      help=("Assume that the Flash MRI images have already "
+                            "been converted to mgz files"))
+    parser.add_option("-u", "--unwarp", dest="unwarp",
+                      action="store_true", default=False,
+                      help=("Run grad_unwarp with -unwarp <type> option on "
+                            "each of the converted data sets"))
+    parser.add_option("-o", "--overwrite", dest="overwrite",
+                      action="store_true", default=False,
+                      help="Write over existing .surf files in bem folder")
+    parser.add_option("-v", "--view", dest="show", action="store_true",
+                      help="Show BEM model in 3D for visual inspection",
+                      default=False)
+
+    options, args = parser.parse_args()
+
+    subject = options.subject
+    subjects_dir = options.subjects_dir
+    flash30 = not options.noflash30
+    convert = not options.noconvert
+    unwarp = options.unwarp
+    overwrite = options.overwrite
+    show = options.show
+
+    if options.subject is None:
+        parser.print_help()
+        raise RuntimeError('The subject argument must be set')
+
+    convert_flash_mris(subject=subject, subjects_dir=subjects_dir,
+                       flash30=flash30, convert=convert, unwarp=unwarp)
+    make_flash_bem(subject=subject, subjects_dir=subjects_dir,
+                   overwrite=overwrite, show=show)
+
+is_main = (__name__ == '__main__')
+if is_main:
+    run()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/commands/mne_flash_bem_model.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/commands/mne_flash_bem_model.py
new file mode 100644
index 0000000..2cd6580
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/commands/mne_flash_bem_model.py
@@ -0,0 +1,145 @@
+#!/usr/bin/env python
+"""Create 3-Layers BEM model from Flash MRI images
+
+This function extracts the BEM surfaces (outer skull, inner skull, and
+outer skin) from multiecho FLASH MRI data with spin angles of 5 and 30
+degrees. The multiecho FLASH data are inputted in NIFTI format.
+It was developed to work for Phillips MRI data, but could probably be
+used for data from other scanners that have been converted to NIFTI format
+(e.g., using MRIcron's dcm2nii). However,it has been tested only for
+data from the Achieva scanner). This function assumes that the Freesurfer
+segmentation of the subject has been completed. In particular, the T1.mgz
+and brain.mgz MRI volumes should be, as usual, in the subject's mri
+directory.
+
+"""
+from __future__ import print_function
+
+# Authors:  Rey Rene Ramirez, Ph.D.   e-mail: rrramir at uw.edu
+#           Alexandre Gramfort, Ph.D.
+
+import sys
+import math
+import os
+
+import mne
+from mne.utils import deprecated
+
+
+ at deprecated("This function is deprecated, use mne_flash_bem instead")
+def make_flash_bem(subject, subjects_dir, flash05, flash30, show=False):
+    """Create 3-Layers BEM model from Flash MRI images
+
+    Parameters
+    ----------
+    subject : string
+        Subject name
+    subjects_dir : string
+        Directory containing subjects data (Freesurfer SUBJECTS_DIR)
+    flash05 : string
+        Full path of the NIFTI file for the
+        FLASH sequence with a spin angle of 5 degrees
+    flash30 : string
+        Full path of the NIFTI file for the
+        FLASH sequence with a spin angle of 30 degrees
+    show : bool
+        Show surfaces in 3D to visually inspect all three BEM
+        surfaces (recommended)
+
+    Notes
+    -----
+    This program assumes that both Freesurfer/FSL, and MNE,
+    including MNE's Matlab Toolbox, are installed properly.
+    For reference please read the MNE manual and wiki, and Freesurfer's wiki:
+    http://www.nmr.mgh.harvard.edu/meg/manuals/
+    http://www.nmr.mgh.harvard.edu/martinos/userInfo/data/sofMNE.php
+    http://www.nmr.mgh.harvard.edu/martinos/userInfo/data/MNE_register/index.php
+    http://surfer.nmr.mgh.harvard.edu/
+    http://surfer.nmr.mgh.harvard.edu/fswiki
+
+    References:
+    B. Fischl, D. H. Salat, A. J. van der Kouwe, N. Makris, F. Segonne,
+    B. T. Quinn, and A. M. Dale, "Sequence-independent segmentation of magnetic
+    resonance images," Neuroimage, vol. 23 Suppl 1, pp. S69-84, 2004.
+    J. Jovicich, S. Czanner, D. Greve, E. Haley, A. van der Kouwe, R. Gollub,
+    D. Kennedy, F. Schmitt, G. Brown, J. Macfall, B. Fischl, and A. Dale,
+    "Reliability in multi-site structural MRI studies: effects of gradient
+    non-linearity correction on phantom and human data," Neuroimage,
+    vol. 30, Epp. 436-43, 2006.
+    """
+    os.environ['SUBJECT'] = subject
+    os.chdir(os.path.join(subjects_dir, subject, "mri"))
+    if not os.path.exists('flash'):
+        os.mkdir("flash")
+    os.chdir("flash")
+    # flash_dir = os.getcwd()
+    if not os.path.exists('parameter_maps'):
+        os.mkdir("parameter_maps")
+    print("--- Converting Flash 5")
+    os.system('mri_convert -flip_angle %s -tr 25 %s mef05.mgz' %
+              (5 * math.pi / 180, flash05))
+    print("--- Converting Flash 30")
+    os.system('mri_convert -flip_angle %s -tr 25 %s mef30.mgz' %
+              (30 * math.pi / 180, flash30))
+    print("--- Running mne_flash_bem")
+    os.system('mne_flash_bem --noconvert')
+    os.chdir(os.path.join(subjects_dir, subject, 'bem'))
+    if not os.path.exists('flash'):
+        os.mkdir("flash")
+    os.chdir("flash")
+    print("[done]")
+
+    if show:
+        fnames = ['outer_skin.surf', 'outer_skull.surf', 'inner_skull.surf']
+        head_col = (0.95, 0.83, 0.83)  # light pink
+        skull_col = (0.91, 0.89, 0.67)
+        brain_col = (0.67, 0.89, 0.91)  # light blue
+        colors = [head_col, skull_col, brain_col]
+        from mayavi import mlab
+        mlab.clf()
+        for fname, c in zip(fnames, colors):
+            points, faces = mne.read_surface(fname)
+            mlab.triangular_mesh(points[:, 0], points[:, 1], points[:, 2],
+                                 faces, color=c, opacity=0.3)
+        mlab.show()
+
+
+def run():
+    from mne.commands.utils import get_optparser
+
+    parser = get_optparser(__file__)
+
+    subject = os.environ.get('SUBJECT')
+    subjects_dir = os.environ.get('SUBJECTS_DIR')
+
+    parser.add_option("-s", "--subject", dest="subject",
+                      help="Subject name", default=subject)
+    parser.add_option("-d", "--subjects-dir", dest="subjects_dir",
+                      help="Subjects directory", default=subjects_dir)
+    parser.add_option("-5", "--flash05", dest="flash05",
+                      help=("Path to FLASH sequence with a spin angle of 5 "
+                            "degrees in Nifti format"), metavar="FILE")
+    parser.add_option("-3", "--flash30", dest="flash30",
+                      help=("Path to FLASH sequence with a spin angle of 30 "
+                            "degrees in Nifti format"), metavar="FILE")
+    parser.add_option("-v", "--view", dest="show", action="store_true",
+                      help="Show BEM model in 3D for visual inspection",
+                      default=False)
+
+    options, args = parser.parse_args()
+
+    if options.flash05 is None or options.flash30 is None:
+        parser.print_help()
+        sys.exit(1)
+
+    subject = options.subject
+    subjects_dir = options.subjects_dir
+    flash05 = os.path.abspath(options.flash05)
+    flash30 = os.path.abspath(options.flash30)
+    show = options.show
+
+    make_flash_bem(subject, subjects_dir, flash05, flash30, show=show)
+
+is_main = (__name__ == '__main__')
+if is_main:
+    run()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/commands/mne_freeview_bem_surfaces.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/commands/mne_freeview_bem_surfaces.py
new file mode 100644
index 0000000..16607e8
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/commands/mne_freeview_bem_surfaces.py
@@ -0,0 +1,92 @@
+#!/usr/bin/env python
+"""View the 3-Layers BEM model using Freeview
+
+"""
+from __future__ import print_function
+
+# Authors:  Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+
+import sys
+import os
+import os.path as op
+
+from mne.utils import run_subprocess, get_subjects_dir
+
+
+def freeview_bem_surfaces(subject, subjects_dir, method):
+    """View 3-Layers BEM model with Freeview
+
+    Parameters
+    ----------
+    subject : string
+        Subject name
+    subjects_dir : string
+        Directory containing subjects data (Freesurfer SUBJECTS_DIR)
+    method : string
+        Can be 'flash' or 'watershed'.
+    """
+    subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
+
+    env = os.environ.copy()
+    env['SUBJECT'] = subject
+    env['SUBJECTS_DIR'] = subjects_dir
+
+    if 'FREESURFER_HOME' not in env:
+        raise RuntimeError('The FreeSurfer environment needs to be set up.')
+
+    mri_dir = op.join(subjects_dir, subject, 'mri')
+    bem_dir = op.join(subjects_dir, subject, 'bem')
+    mri = op.join(mri_dir, 'T1.mgz')
+
+    if method == 'watershed':
+        bem_dir = op.join(bem_dir, 'watershed')
+        outer_skin = op.join(bem_dir, '%s_outer_skin_surface' % subject)
+        outer_skull = op.join(bem_dir, '%s_outer_skull_surface' % subject)
+        inner_skull = op.join(bem_dir, '%s_inner_skull_surface' % subject)
+    else:
+        if method == 'flash':
+            bem_dir = op.join(bem_dir, 'flash')
+        outer_skin = op.join(bem_dir, 'outer_skin.surf')
+        outer_skull = op.join(bem_dir, 'outer_skull.surf')
+        inner_skull = op.join(bem_dir, 'inner_skull.surf')
+
+    # put together the command
+    cmd = ['freeview']
+    cmd += ["--volume", mri]
+    cmd += ["--surface", "%s:color=red:edgecolor=red" % inner_skull]
+    cmd += ["--surface", "%s:color=yellow:edgecolor=yellow" % outer_skull]
+    cmd += ["--surface",
+            "%s:color=255,170,127:edgecolor=255,170,127" % outer_skin]
+
+    run_subprocess(cmd, env=env, stdout=sys.stdout)
+    print("[done]")
+
+
+def run():
+    from mne.commands.utils import get_optparser
+
+    parser = get_optparser(__file__)
+
+    subject = os.environ.get('SUBJECT')
+    subjects_dir = get_subjects_dir()
+
+    parser.add_option("-s", "--subject", dest="subject",
+                      help="Subject name", default=subject)
+    parser.add_option("-d", "--subjects-dir", dest="subjects_dir",
+                      help="Subjects directory", default=subjects_dir)
+    parser.add_option("-m", "--method", dest="method",
+                      help=("Method used to generate the BEM model. "
+                            "Can be flash or watershed."), metavar="FILE")
+
+    options, args = parser.parse_args()
+
+    subject = options.subject
+    subjects_dir = options.subjects_dir
+    method = options.method
+
+    freeview_bem_surfaces(subject, subjects_dir, method)
+
+
+is_main = (__name__ == '__main__')
+if is_main:
+    run()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/commands/mne_kit2fiff.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/commands/mne_kit2fiff.py
new file mode 100644
index 0000000..c013deb
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/commands/mne_kit2fiff.py
@@ -0,0 +1,72 @@
+#!/usr/bin/env python
+# Authors: Teon Brooks  <teon.brooks at gmail.com>
+
+""" Import KIT / NYU data to fif file.
+
+example usage:  $ mne kit2fiff --input input.sqd --output output.fif
+Use without arguments to invoke GUI:  $ mne kt2fiff
+
+"""
+
+import os
+import sys
+
+import mne
+from mne.io import read_raw_kit
+
+
+def run():
+    from mne.commands.utils import get_optparser
+
+    parser = get_optparser(__file__)
+
+    parser.add_option('--input', dest='input_fname',
+                      help='Input data file name', metavar='filename')
+    parser.add_option('--mrk', dest='mrk_fname',
+                      help='MEG Marker file name', metavar='filename')
+    parser.add_option('--elp', dest='elp_fname',
+                      help='Headshape points file name', metavar='filename')
+    parser.add_option('--hsp', dest='hsp_fname',
+                      help='Headshape file name', metavar='filename')
+    parser.add_option('--stim', dest='stim',
+                      help='Colon Separated Stimulus Trigger Channels',
+                      metavar='chs')
+    parser.add_option('--slope', dest='slope', help='Slope direction',
+                      metavar='slope')
+    parser.add_option('--stimthresh', dest='stimthresh', default=1,
+                      help='Threshold value for trigger channels',
+                      metavar='value')
+    parser.add_option('--output', dest='out_fname',
+                      help='Name of the resulting fiff file',
+                      metavar='filename')
+
+    options, args = parser.parse_args()
+
+    input_fname = options.input_fname
+    if input_fname is None:
+        os.environ['ETS_TOOLKIT'] = 'qt4'
+        mne.gui.kit2fiff()
+        sys.exit(0)
+
+    hsp_fname = options.hsp_fname
+    elp_fname = options.elp_fname
+    mrk_fname = options.mrk_fname
+    stim = options.stim
+    slope = options.slope
+    stimthresh = options.stimthresh
+    out_fname = options.out_fname
+
+    if isinstance(stim, str):
+        stim = map(int, stim.split(':'))
+
+    raw = read_raw_kit(input_fname=input_fname, mrk=mrk_fname, elp=elp_fname,
+                       hsp=hsp_fname, stim=stim, slope=slope,
+                       stimthresh=stimthresh)
+
+    raw.save(out_fname)
+    raw.close()
+    sys.exit(0)
+
+is_main = (__name__ == '__main__')
+if is_main:
+    run()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/commands/mne_make_scalp_surfaces.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/commands/mne_make_scalp_surfaces.py
new file mode 100644
index 0000000..af1bae7
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/commands/mne_make_scalp_surfaces.py
@@ -0,0 +1,144 @@
+#!/usr/bin/env python
+
+# Authors: Denis A. Engemann  <denis.engemann at gmail.com>
+#          Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
+#
+#          simplified bsd-3 license
+
+"""
+Create high-resolution head surfaces for coordinate alignment.
+
+example usage: mne make_scalp_surfaces --overwrite --subject sample
+"""
+from __future__ import print_function
+
+import os
+import copy
+import os.path as op
+import sys
+import mne
+from mne.utils import run_subprocess, _TempDir, verbose, logger
+
+
+def _check_file(fname, overwrite):
+    """Helper to prevent overwrites"""
+    if op.isfile(fname) and not overwrite:
+        raise IOError('File %s exists, use --overwrite to overwrite it'
+                      % fname)
+
+
+def run():
+    from mne.commands.utils import get_optparser
+
+    parser = get_optparser(__file__)
+    subjects_dir = mne.get_config('SUBJECTS_DIR')
+
+    parser.add_option('-o', '--overwrite', dest='overwrite',
+                      action='store_true',
+                      help='Overwrite previously computed surface')
+    parser.add_option('-s', '--subject', dest='subject',
+                      help='The name of the subject', type='str')
+    parser.add_option('-f', '--force', dest='force', action='store_true',
+                      help='Force transformation of surface into bem.')
+    parser.add_option('-v', '--verbose', dest='verbose', action='store_true',
+                      help='Print the debug messages.')
+    parser.add_option("-d", "--subjects-dir", dest="subjects_dir",
+                      help="Subjects directory", default=subjects_dir)
+
+    options, args = parser.parse_args()
+
+    subject = vars(options).get('subject', os.getenv('SUBJECT'))
+    subjects_dir = options.subjects_dir
+    if subject is None or subjects_dir is None:
+        parser.print_help()
+        sys.exit(1)
+    _run(subjects_dir, subject, options.force, options.overwrite,
+         options.verbose)
+
+
+ at verbose
+def _run(subjects_dir, subject, force, overwrite, verbose=None):
+    this_env = copy.copy(os.environ)
+    this_env['SUBJECTS_DIR'] = subjects_dir
+    this_env['SUBJECT'] = subject
+
+    if 'SUBJECTS_DIR' not in this_env:
+        raise RuntimeError('The environment variable SUBJECTS_DIR should '
+                           'be set')
+
+    if not op.isdir(subjects_dir):
+        raise RuntimeError('subjects directory %s not found, specify using '
+                           'the environment variable SUBJECTS_DIR or '
+                           'the command line option --subjects-dir')
+
+    if 'MNE_ROOT' not in this_env:
+        raise RuntimeError('MNE_ROOT environment variable is not set')
+
+    if 'FREESURFER_HOME' not in this_env:
+        raise RuntimeError('The FreeSurfer environment needs to be set up '
+                           'for this script')
+    force = '--force' if force else '--check'
+    subj_path = op.join(subjects_dir, subject)
+    if not op.exists(subj_path):
+        raise RuntimeError('%s does not exits. Please check your subject '
+                           'directory path.' % subj_path)
+
+    if op.exists(op.join(subj_path, 'mri', 'T1.mgz')):
+        mri = 'T1.mgz'
+    else:
+        mri = 'T1'
+
+    logger.info('1. Creating a dense scalp tessellation with mkheadsurf...')
+
+    def check_seghead(surf_path=op.join(subj_path, 'surf')):
+        for k in ['/lh.seghead', '/lh.smseghead']:
+            surf = surf_path + k if op.exists(surf_path + k) else None
+            if surf is not None:
+                break
+        return surf
+
+    my_seghead = check_seghead()
+    if my_seghead is None:
+        run_subprocess(['mkheadsurf', '-subjid', subject, '-srcvol', mri],
+                       env=this_env)
+
+    surf = check_seghead()
+    if surf is None:
+        raise RuntimeError('mkheadsurf did not produce the standard output '
+                           'file.')
+
+    dense_fname = '{0}/{1}/bem/{1}-head-dense.fif'.format(subjects_dir,
+                                                          subject)
+    logger.info('2. Creating %s ...' % dense_fname)
+    _check_file(dense_fname, overwrite)
+    run_subprocess(['mne_surf2bem', '--surf', surf, '--id', '4', force,
+                    '--fif', dense_fname], env=this_env)
+    levels = 'medium', 'sparse'
+    my_surf = mne.read_bem_surfaces(dense_fname)[0]
+    tris = [30000, 2500]
+    if os.getenv('_MNE_TESTING_SCALP', 'false') == 'true':
+        tris = [len(my_surf['tris'])]  # don't actually decimate
+    for ii, (n_tri, level) in enumerate(zip(tris, levels), 3):
+        logger.info('%i. Creating %s tessellation...' % (ii, level))
+        logger.info('%i.1 Decimating the dense tessellation...' % ii)
+        points, tris = mne.decimate_surface(points=my_surf['rr'],
+                                            triangles=my_surf['tris'],
+                                            n_triangles=n_tri)
+        other_fname = dense_fname.replace('dense', level)
+        logger.info('%i.2 Creating %s' % (ii, other_fname))
+        _check_file(other_fname, overwrite)
+        tempdir = _TempDir()
+        surf_fname = tempdir + '/tmp-surf.surf'
+        # convert points to meters, make mne_analyze happy
+        mne.write_surface(surf_fname, points * 1e3, tris)
+        # XXX for some reason --check does not work here.
+        try:
+            run_subprocess(['mne_surf2bem', '--surf', surf_fname, '--id', '4',
+                            '--force', '--fif', other_fname], env=this_env)
+        finally:
+            del tempdir
+
+is_main = (__name__ == '__main__')
+if is_main:
+    run()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/commands/mne_maxfilter.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/commands/mne_maxfilter.py
new file mode 100644
index 0000000..dd5607c
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/commands/mne_maxfilter.py
@@ -0,0 +1,148 @@
+#!/usr/bin/env python
+""" Apply MaxFilter
+
+Example usage:
+
+$ mne maxfilter -i sample_audvis_raw.fif --st
+
+This will apply MaxFilter with the MaxSt extension. The origin used
+by MaxFilter is computed by mne-python by fitting a sphere to the
+headshape points.
+"""
+
+# Authors : Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+
+import sys
+import os
+import mne
+
+
+def run():
+    from mne.commands.utils import get_optparser
+
+    parser = get_optparser(__file__)
+
+    parser.add_option("-i", "--in", dest="in_fname",
+                      help="Input raw FIF file", metavar="FILE")
+    parser.add_option("-o", dest="out_fname",
+                      help="Output FIF file (if not set, suffix  '_sss' will "
+                      "be used)", metavar="FILE", default=None)
+    parser.add_option("--origin", dest="origin",
+                      help="Head origin in mm, or a filename to read the "
+                      "origin from. If not set it will be estimated from "
+                      "headshape points", default=None)
+    parser.add_option("--origin-out", dest="origin_out",
+                      help="Filename to use for computed origin", default=None)
+    parser.add_option("--frame", dest="frame", type="string",
+                      help="Coordinate frame for head center ('device' or "
+                      "'head')", default="device")
+    parser.add_option("--bad", dest="bad", type="string",
+                      help="List of static bad channels",
+                      default=None)
+    parser.add_option("--autobad", dest="autobad", type="string",
+                      help="Set automated bad channel detection ('on', 'off', "
+                      "'n')", default="off")
+    parser.add_option("--skip", dest="skip",
+                      help="Skips raw data sequences, time intervals pairs in "
+                      "sec, e.g.: 0 30 120 150", default=None)
+    parser.add_option("--force", dest="force", action="store_true",
+                      help="Ignore program warnings",
+                      default=False)
+    parser.add_option("--st", dest="st", action="store_true",
+                      help="Apply the time-domain MaxST extension",
+                      default=False)
+    parser.add_option("--buflen", dest="st_buflen", type="float",
+                      help="MaxSt buffer length in sec",
+                      default=16.0)
+    parser.add_option("--corr", dest="st_corr", type="float",
+                      help="MaxSt subspace correlation",
+                      default=0.96)
+    parser.add_option("--trans", dest="mv_trans",
+                      help="Transforms the data into the coil definitions of "
+                      "in_fname, or into the default frame", default=None)
+    parser.add_option("--movecomp", dest="mv_comp", action="store_true",
+                      help="Estimates and compensates head movements in "
+                      "continuous raw data", default=False)
+    parser.add_option("--headpos", dest="mv_headpos", action="store_true",
+                      help="Estimates and stores head position parameters, "
+                      "but does not compensate movements", default=False)
+    parser.add_option("--hp", dest="mv_hp", type="string",
+                      help="Stores head position data in an ascii file",
+                      default=None)
+    parser.add_option("--hpistep", dest="mv_hpistep", type="float",
+                      help="Sets head position update interval in ms",
+                      default=None)
+    parser.add_option("--hpisubt", dest="mv_hpisubt", type="string",
+                      help="Subtracts hpi signals: sine amplitudes, amp + "
+                      "baseline, or switch off", default=None)
+    parser.add_option("--nohpicons", dest="mv_hpicons", action="store_false",
+                      help="Do not check initial consistency isotrak vs "
+                      "hpifit", default=True)
+    parser.add_option("--linefreq", dest="linefreq", type="float",
+                      help="Sets the basic line interference frequency (50 or "
+                      "60 Hz)", default=None)
+    parser.add_option("--nooverwrite", dest="overwrite", action="store_false",
+                      help="Do not overwrite output file if it already exists",
+                      default=True)
+    parser.add_option("--args", dest="mx_args", type="string",
+                      help="Additional command line arguments to pass to "
+                      "MaxFilter", default="")
+
+    options, args = parser.parse_args()
+
+    in_fname = options.in_fname
+
+    if in_fname is None:
+        parser.print_help()
+        sys.exit(1)
+
+    out_fname = options.out_fname
+    origin = options.origin
+    origin_out = options.origin_out
+    frame = options.frame
+    bad = options.bad
+    autobad = options.autobad
+    skip = options.skip
+    force = options.force
+    st = options.st
+    st_buflen = options.st_buflen
+    st_corr = options.st_corr
+    mv_trans = options.mv_trans
+    mv_comp = options.mv_comp
+    mv_headpos = options.mv_headpos
+    mv_hp = options.mv_hp
+    mv_hpistep = options.mv_hpistep
+    mv_hpisubt = options.mv_hpisubt
+    mv_hpicons = options.mv_hpicons
+    linefreq = options.linefreq
+    overwrite = options.overwrite
+    mx_args = options.mx_args
+
+    if in_fname.endswith('_raw.fif') or in_fname.endswith('-raw.fif'):
+        prefix = in_fname[:-8]
+    else:
+        prefix = in_fname[:-4]
+
+    if out_fname is None:
+        if st:
+            out_fname = prefix + '_tsss.fif'
+        else:
+            out_fname = prefix + '_sss.fif'
+
+    if origin is not None and os.path.exists(origin):
+        with open(origin, 'r') as fid:
+            origin = fid.readlines()[0].strip()
+
+    origin = mne.preprocessing.apply_maxfilter(
+        in_fname, out_fname, origin, frame,
+        bad, autobad, skip, force, st, st_buflen, st_corr, mv_trans,
+        mv_comp, mv_headpos, mv_hp, mv_hpistep, mv_hpisubt, mv_hpicons,
+        linefreq, mx_args, overwrite)
+
+    if origin_out is not None:
+        with open(origin_out, 'w') as fid:
+            fid.write(origin + '\n')
+
+is_main = (__name__ == '__main__')
+if is_main:
+    run()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/commands/mne_report.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/commands/mne_report.py
new file mode 100644
index 0000000..417730e
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/commands/mne_report.py
@@ -0,0 +1,93 @@
+#!/usr/bin/env python
+"""Create mne report for a folder
+
+Example usage
+
+mne report -p MNE-sample-data/ -i \
+MNE-sample-data/MEG/sample/sample_audvis-ave.fif -d MNE-sample-data/subjects/ \
+-s sample
+
+"""
+
+import sys
+import time
+
+from mne.report import Report
+from mne.utils import verbose, logger
+
+
+ at verbose
+def log_elapsed(t, verbose=None):
+    logger.info('Report complete in %s seconds' % round(t, 1))
+
+
+def run():
+    from mne.commands.utils import get_optparser
+
+    parser = get_optparser(__file__)
+
+    parser.add_option("-p", "--path", dest="path",
+                      help="Path to folder who MNE-Report must be created")
+    parser.add_option("-i", "--info", dest="info_fname",
+                      help="File from which info dictionary is to be read",
+                      metavar="FILE")
+    parser.add_option("-c", "--cov", dest="cov_fname",
+                      help="File from which noise covariance is to be read",
+                      metavar="FILE")
+    parser.add_option("--bmin", dest="bmin",
+                      help="Time at which baseline correction starts for "
+                      "evokeds", default=None)
+    parser.add_option("--bmax", dest="bmax",
+                      help="Time at which baseline correction stops for "
+                      "evokeds", default=None)
+    parser.add_option("-d", "--subjects-dir", dest="subjects_dir",
+                      help="The subjects directory")
+    parser.add_option("-s", "--subject", dest="subject",
+                      help="The subject name")
+    parser.add_option("-v", "--verbose", dest="verbose",
+                      action='store_true', help="run in verbose mode")
+    parser.add_option("--no-browser", dest="no_browser", action='store_false',
+                      help="Do not open MNE-Report in browser")
+    parser.add_option("--overwrite", dest="overwrite", action='store_false',
+                      help="Overwrite html report if it already exists")
+    parser.add_option("-j", "--jobs", dest="n_jobs", help="Number of jobs to"
+                      " run in parallel")
+    parser.add_option("-m", "--mri-decim", type="int", dest="mri_decim",
+                      default=2, help="Integer factor used to decimate "
+                      "BEM plots")
+
+    options, args = parser.parse_args()
+    path = options.path
+    if path is None:
+        parser.print_help()
+        sys.exit(1)
+    info_fname = options.info_fname
+    cov_fname = options.cov_fname
+    subjects_dir = options.subjects_dir
+    subject = options.subject
+    mri_decim = int(options.mri_decim)
+    verbose = True if options.verbose is not None else False
+    open_browser = False if options.no_browser is not None else True
+    overwrite = True if options.overwrite is not None else False
+    n_jobs = int(options.n_jobs) if options.n_jobs is not None else 1
+
+    bmin = float(options.bmin) if options.bmin is not None else None
+    bmax = float(options.bmax) if options.bmax is not None else None
+    # XXX: this means (None, None) cannot be specified through command line
+    if bmin is None and bmax is None:
+        baseline = None
+    else:
+        baseline = (bmin, bmax)
+
+    t0 = time.time()
+    report = Report(info_fname, subjects_dir=subjects_dir,
+                    subject=subject, baseline=baseline,
+                    cov_fname=cov_fname, verbose=verbose)
+    report.parse_folder(path, verbose=verbose, n_jobs=n_jobs,
+                        mri_decim=mri_decim)
+    log_elapsed(time.time() - t0, verbose=verbose)
+    report.save(open_browser=open_browser, overwrite=overwrite)
+
+is_main = (__name__ == '__main__')
+if is_main:
+    run()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/commands/mne_surf2bem.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/commands/mne_surf2bem.py
new file mode 100644
index 0000000..dd822b0
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/commands/mne_surf2bem.py
@@ -0,0 +1,48 @@
+#!/usr/bin/env python
+"""Convert surface to BEM FIF file
+
+Example usage
+
+mne surf2bem --surf ${SUBJECTS_DIR}/${SUBJECT}/surf/lh.seghead --fif \
+${SUBJECTS_DIR}/${SUBJECT}/bem/${SUBJECT}-head.fif --id=4
+
+"""
+from __future__ import print_function
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#
+# License: BSD (3-clause)
+
+import sys
+
+import mne
+
+
+def run():
+    from mne.commands.utils import get_optparser
+
+    parser = get_optparser(__file__)
+
+    parser.add_option("-s", "--surf", dest="surf",
+                      help="Surface in Freesurfer format", metavar="FILE")
+    parser.add_option("-f", "--fif", dest="fif",
+                      help="FIF file produced", metavar="FILE")
+    parser.add_option("-i", "--id", dest="id", default=4,
+                      help=("Surface Id (e.g. 4 sur head surface)"))
+
+    options, args = parser.parse_args()
+
+    if options.surf is None:
+        parser.print_help()
+        sys.exit(1)
+
+    print("Converting %s to BEM FIF file." % options.surf)
+    points, tris = mne.read_surface(options.surf)
+    points *= 1e-3
+    surf = dict(coord_frame=5, id=int(options.id), nn=None, np=len(points),
+                ntri=len(tris), rr=points, sigma=1, tris=tris)
+    mne.write_bem_surface(options.fif, surf)
+
+
+is_main = (__name__ == '__main__')
+if is_main:
+    run()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/commands/mne_watershed_bem.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/commands/mne_watershed_bem.py
new file mode 100644
index 0000000..8efe423
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/commands/mne_watershed_bem.py
@@ -0,0 +1,62 @@
+#!/usr/bin/env python
+# Authors: Lorenzo De Santis
+"""
+
+    Create BEM surfaces using the watershed algorithm included with
+        FreeSurfer
+
+"""
+
+from __future__ import print_function
+import sys
+
+from mne.bem import make_watershed_bem
+
+
+def run():
+    from mne.commands.utils import get_optparser
+
+    parser = get_optparser(__file__)
+
+    parser.add_option("-s", "--subject", dest="subject",
+                      help="Subject name (required)", default=None)
+    parser.add_option("-d", "--subjects-dir", dest="subjects_dir",
+                      help="Subjects directory", default=None)
+    parser.add_option("-o", "--overwrite", dest="overwrite",
+                      help="Write over existing files", action="store_true")
+    parser.add_option("-v", "--volume", dest="volume",
+                      help="Defaults to T1", default='T1')
+    parser.add_option("-a", "--atlas", dest="atlas",
+                      help="Specify the --atlas option for mri_watershed",
+                      default=False, action="store_true")
+    parser.add_option("-g", "--gcaatlas", dest="gcaatlas",
+                      help="Use the subcortical atlas", default=False,
+                      action="store_true")
+    parser.add_option("-p", "--preflood", dest="preflood",
+                      help="Change the preflood height", default=None)
+    parser.add_option("--verbose", dest="verbose",
+                      help="If not None, override default verbose level",
+                      default=None)
+
+    options, args = parser.parse_args()
+
+    if options.subject is None:
+        parser.print_help()
+        sys.exit(1)
+
+    subject = options.subject
+    subjects_dir = options.subjects_dir
+    overwrite = options.overwrite
+    volume = options.volume
+    atlas = options.atlas
+    gcaatlas = options.gcaatlas
+    preflood = options.preflood
+    verbose = options.verbose
+
+    make_watershed_bem(subject=subject, subjects_dir=subjects_dir,
+                       overwrite=overwrite, volume=volume, atlas=atlas,
+                       gcaatlas=gcaatlas, preflood=preflood, verbose=verbose)
+
+is_main = (__name__ == '__main__')
+if is_main:
+    run()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/commands/tests/__init__.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/commands/tests/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/commands/tests/test_commands.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/commands/tests/test_commands.py
new file mode 100644
index 0000000..89574e1
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/commands/tests/test_commands.py
@@ -0,0 +1,244 @@
+# -*- coding: utf-8 -*-
+import os
+from os import path as op
+import shutil
+import glob
+import warnings
+from nose.tools import assert_true, assert_raises
+
+from mne.commands import (mne_browse_raw, mne_bti2fiff, mne_clean_eog_ecg,
+                          mne_compute_proj_ecg, mne_compute_proj_eog,
+                          mne_coreg, mne_flash_bem_model, mne_kit2fiff,
+                          mne_make_scalp_surfaces, mne_maxfilter,
+                          mne_report, mne_surf2bem, mne_watershed_bem,
+                          mne_compare_fiff, mne_flash_bem)
+from mne.utils import (run_tests_if_main, _TempDir, requires_mne, requires_PIL,
+                       requires_mayavi, requires_tvtk, requires_freesurfer,
+                       ArgvSetter, slow_test, ultra_slow_test)
+from mne.io import Raw
+from mne.datasets import testing, sample
+
+
+base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
+raw_fname = op.join(base_dir, 'test_raw.fif')
+
+subjects_dir = op.join(testing.data_path(download=False), 'subjects')
+
+warnings.simplefilter('always')
+
+
+def check_usage(module, force_help=False):
+    """Helper to ensure we print usage"""
+    args = ('--help',) if force_help else ()
+    with ArgvSetter(args) as out:
+        try:
+            module.run()
+        except SystemExit:
+            pass
+        assert_true('Usage: ' in out.stdout.getvalue())
+
+
+ at slow_test
+def test_browse_raw():
+    """Test mne browse_raw"""
+    check_usage(mne_browse_raw)
+
+
+def test_bti2fiff():
+    """Test mne bti2fiff"""
+    check_usage(mne_bti2fiff)
+
+
+def test_compare_fiff():
+    """Test mne compare_fiff"""
+    check_usage(mne_compare_fiff)
+
+
+ at requires_mne
+def test_clean_eog_ecg():
+    """Test mne clean_eog_ecg"""
+    check_usage(mne_clean_eog_ecg)
+    tempdir = _TempDir()
+    raw = Raw([raw_fname, raw_fname, raw_fname])
+    raw.info['bads'] = ['MEG 2443']
+    use_fname = op.join(tempdir, op.basename(raw_fname))
+    raw.save(use_fname)
+    with ArgvSetter(('-i', use_fname, '--quiet')):
+        mne_clean_eog_ecg.run()
+    fnames = glob.glob(op.join(tempdir, '*proj.fif'))
+    assert_true(len(fnames) == 2)  # two projs
+    fnames = glob.glob(op.join(tempdir, '*-eve.fif'))
+    assert_true(len(fnames) == 3)  # raw plus two projs
+
+
+ at slow_test
+def test_compute_proj_ecg_eog():
+    """Test mne compute_proj_ecg/eog"""
+    for fun in (mne_compute_proj_ecg, mne_compute_proj_eog):
+        check_usage(fun)
+        tempdir = _TempDir()
+        use_fname = op.join(tempdir, op.basename(raw_fname))
+        bad_fname = op.join(tempdir, 'bads.txt')
+        with open(bad_fname, 'w') as fid:
+            fid.write('MEG 2443\n')
+        shutil.copyfile(raw_fname, use_fname)
+        with ArgvSetter(('-i', use_fname, '--bad=' + bad_fname,
+                         '--rej-eeg', '150')):
+            fun.run()
+        fnames = glob.glob(op.join(tempdir, '*proj.fif'))
+        assert_true(len(fnames) == 1)
+        fnames = glob.glob(op.join(tempdir, '*-eve.fif'))
+        assert_true(len(fnames) == 1)
+
+
+def test_coreg():
+    """Test mne coreg"""
+    assert_true(hasattr(mne_coreg, 'run'))
+
+
+def test_flash_bem_model():
+    """Test mne flash_bem_model"""
+    assert_true(hasattr(mne_flash_bem_model, 'run'))
+    check_usage(mne_flash_bem_model)
+
+
+def test_kit2fiff():
+    """Test mne kit2fiff"""
+    # Can't check
+    check_usage(mne_kit2fiff, force_help=True)
+
+
+ at requires_tvtk
+ at requires_mne
+ at testing.requires_testing_data
+def test_make_scalp_surfaces():
+    """Test mne make_scalp_surfaces"""
+    check_usage(mne_make_scalp_surfaces)
+    # Copy necessary files to avoid FreeSurfer call
+    tempdir = _TempDir()
+    surf_path = op.join(subjects_dir, 'sample', 'surf')
+    surf_path_new = op.join(tempdir, 'sample', 'surf')
+    os.mkdir(op.join(tempdir, 'sample'))
+    os.mkdir(surf_path_new)
+    os.mkdir(op.join(tempdir, 'sample', 'bem'))
+    shutil.copy(op.join(surf_path, 'lh.seghead'), surf_path_new)
+
+    orig_fs = os.getenv('FREESURFER_HOME', None)
+    orig_mne = os.getenv('MNE_ROOT')
+    if orig_fs is not None:
+        del os.environ['FREESURFER_HOME']
+    cmd = ('-s', 'sample', '--subjects-dir', tempdir)
+    os.environ['_MNE_TESTING_SCALP'] = 'true'
+    try:
+        with ArgvSetter(cmd, disable_stdout=False, disable_stderr=False):
+            assert_raises(RuntimeError, mne_make_scalp_surfaces.run)
+            os.environ['FREESURFER_HOME'] = tempdir  # don't need it
+            del os.environ['MNE_ROOT']
+            assert_raises(RuntimeError, mne_make_scalp_surfaces.run)
+            os.environ['MNE_ROOT'] = orig_mne
+            mne_make_scalp_surfaces.run()
+            assert_raises(IOError, mne_make_scalp_surfaces.run)  # no overwrite
+    finally:
+        if orig_fs is not None:
+            os.environ['FREESURFER_HOME'] = orig_fs
+        os.environ['MNE_ROOT'] = orig_mne
+        del os.environ['_MNE_TESTING_SCALP']
+
+
+def test_maxfilter():
+    """Test mne maxfilter"""
+    check_usage(mne_maxfilter)
+    with ArgvSetter(('-i', raw_fname, '--st', '--movecomp', '--linefreq', '60',
+                     '--trans', raw_fname)) as out:
+        with warnings.catch_warnings(record=True) as w:
+            warnings.simplefilter('always')
+            os.environ['_MNE_MAXFILTER_TEST'] = 'true'
+            try:
+                mne_maxfilter.run()
+            finally:
+                del os.environ['_MNE_MAXFILTER_TEST']
+        assert_true(len(w) == 1)
+        for check in ('maxfilter', '-trans', '-movecomp'):
+            assert_true(check in out.stdout.getvalue(), check)
+
+
+ at slow_test
+ at requires_mayavi
+ at requires_PIL
+ at testing.requires_testing_data
+def test_report():
+    """Test mne report"""
+    check_usage(mne_report)
+    tempdir = _TempDir()
+    use_fname = op.join(tempdir, op.basename(raw_fname))
+    shutil.copyfile(raw_fname, use_fname)
+    with ArgvSetter(('-p', tempdir, '-i', use_fname, '-d', subjects_dir,
+                     '-s', 'sample', '--no-browser', '-m', '30')):
+        mne_report.run()
+    fnames = glob.glob(op.join(tempdir, '*.html'))
+    assert_true(len(fnames) == 1)
+
+
+def test_surf2bem():
+    """Test mne surf2bem"""
+    check_usage(mne_surf2bem)
+
+
+ at ultra_slow_test
+ at requires_freesurfer
+ at testing.requires_testing_data
+def test_watershed_bem():
+    """Test mne watershed bem"""
+    check_usage(mne_watershed_bem)
+    # Copy necessary files to tempdir
+    tempdir = _TempDir()
+    mridata_path = op.join(subjects_dir, 'sample', 'mri')
+    mridata_path_new = op.join(tempdir, 'sample', 'mri')
+    os.mkdir(op.join(tempdir, 'sample'))
+    os.mkdir(mridata_path_new)
+    if op.exists(op.join(mridata_path, 'T1')):
+        shutil.copytree(op.join(mridata_path, 'T1'), op.join(mridata_path_new,
+                        'T1'))
+    if op.exists(op.join(mridata_path, 'T1.mgz')):
+        shutil.copyfile(op.join(mridata_path, 'T1.mgz'),
+                        op.join(mridata_path_new, 'T1.mgz'))
+
+    with ArgvSetter(('-d', tempdir, '-s', 'sample', '-o'),
+                    disable_stdout=False, disable_stderr=False):
+        mne_watershed_bem.run()
+
+
+ at slow_test
+ at requires_mne
+ at requires_freesurfer
+ at sample.requires_sample_data
+def test_flash_bem():
+    """Test mne flash_bem"""
+    check_usage(mne_flash_bem, force_help=True)
+    # Using the sample dataset
+    subjects_dir = op.join(sample.data_path(download=False), 'subjects')
+    # Copy necessary files to tempdir
+    tempdir = _TempDir()
+    mridata_path = op.join(subjects_dir, 'sample', 'mri')
+    mridata_path_new = op.join(tempdir, 'sample', 'mri')
+    os.makedirs(op.join(mridata_path_new, 'flash'))
+    os.makedirs(op.join(tempdir, 'sample', 'bem'))
+    shutil.copyfile(op.join(mridata_path, 'T1.mgz'),
+                    op.join(mridata_path_new, 'T1.mgz'))
+    shutil.copyfile(op.join(mridata_path, 'brain.mgz'),
+                    op.join(mridata_path_new, 'brain.mgz'))
+    # Copy the available mri/flash/mef*.mgz files from the dataset
+    files = glob.glob(op.join(mridata_path, 'flash', 'mef*.mgz'))
+    for infile in files:
+        shutil.copyfile(infile, op.join(mridata_path_new, 'flash',
+                                        op.basename(infile)))
+    # Test mne flash_bem with --noconvert option
+    # (since there are no DICOM Flash images in dataset)
+    currdir = os.getcwd()
+    with ArgvSetter(('-d', tempdir, '-s', 'sample', '-n'),
+                    disable_stdout=False, disable_stderr=False):
+        mne_flash_bem.run()
+    os.chdir(currdir)
+
+
+run_tests_if_main()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/commands/utils.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/commands/utils.py
new file mode 100644
index 0000000..2957300
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/commands/utils.py
@@ -0,0 +1,45 @@
+"""Some utility functions for commands (e.g. for cmdline handling)
+"""
+
+# Authors: Yaroslav Halchenko <debian at onerussian.com>
+#
+# License: BSD (3-clause)
+
+import imp
+import os
+import re
+from optparse import OptionParser
+
+import mne
+
+
+def get_optparser(cmdpath, usage=None):
+    """Create OptionParser with cmd source specific settings (e.g. prog value)
+    """
+    command = os.path.basename(cmdpath)
+    if re.match('mne_(.*).py', command):
+        command = command[4:-3]
+    elif re.match('mne_(.*).pyc', command):
+        command = command[4:-4]
+
+    # Fetch description
+    if cmdpath.endswith('.pyc'):
+        mod = imp.load_compiled('__temp', cmdpath)
+    else:
+        mod = imp.load_source('__temp', cmdpath)
+    if mod.__doc__:
+        doc, description, epilog = mod.__doc__, None, None
+
+        doc_lines = doc.split('\n')
+        description = doc_lines[0]
+        if len(doc_lines) > 1:
+            epilog = '\n'.join(doc_lines[1:])
+
+    # monkey patch OptionParser to not wrap epilog
+    OptionParser.format_epilog = lambda self, formatter: self.epilog
+    parser = OptionParser(prog="mne %s" % command,
+                          version=mne.__version__,
+                          description=description,
+                          epilog=epilog, usage=usage)
+
+    return parser
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/connectivity/__init__.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/connectivity/__init__.py
new file mode 100644
index 0000000..1495fb9
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/connectivity/__init__.py
@@ -0,0 +1,6 @@
+""" Connectivity Analysis Tools
+"""
+
+from .utils import seed_target_indices
+from .spectral import spectral_connectivity
+from .effective import phase_slope_index
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/connectivity/effective.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/connectivity/effective.py
new file mode 100644
index 0000000..636661b
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/connectivity/effective.py
@@ -0,0 +1,162 @@
+# Authors: Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+from ..externals.six.moves import zip
+import copy
+
+import numpy as np
+
+from ..utils import logger, verbose
+from .spectral import spectral_connectivity
+
+
+ at verbose
+def phase_slope_index(data, indices=None, sfreq=2 * np.pi,
+                      mode='multitaper', fmin=None, fmax=np.inf,
+                      tmin=None, tmax=None, mt_bandwidth=None,
+                      mt_adaptive=False, mt_low_bias=True,
+                      cwt_frequencies=None, cwt_n_cycles=7, block_size=1000,
+                      n_jobs=1, verbose=None):
+    """
+    Compute the Phase Slope Index (PSI) connectivity measure
+
+    The PSI is an effective connectivity measure, i.e., a measure which can
+    give an indication of the direction of the information flow (causality).
+    For two time series, and one computes the PSI between the first and the
+    second time series as follows
+
+    indices = (np.array([0]), np.array([1]))
+    psi = phase_slope_index(data, indices=indices, ...)
+
+    A positive value means that time series 0 is ahead of time series 1 and
+    a negative value means the opposite.
+
+    The PSI is computed from the coherency (see spectral_connectivity), details
+    can be found in [1].
+
+    References
+    ----------
+    [1] Nolte et al. "Robustly Estimating the Flow Direction of Information in
+    Complex Physical Systems", Physical Review Letters, vol. 100, no. 23,
+    pp. 1-4, Jun. 2008.
+
+    Parameters
+    ----------
+    data : array-like, shape=(n_epochs, n_signals, n_times)
+        Can also be a list/generator of array, shape =(n_signals, n_times);
+        list/generator of SourceEstimate; or Epochs.
+        The data from which to compute connectivity. Note that it is also
+        possible to combine multiple signals by providing a list of tuples,
+        e.g., data = [(arr_0, stc_0), (arr_1, stc_1), (arr_2, stc_2)],
+        corresponds to 3 epochs, and arr_* could be an array with the same
+        number of time points as stc_*.
+    indices : tuple of arrays | None
+        Two arrays with indices of connections for which to compute
+        connectivity. If None, all connections are computed.
+    sfreq : float
+        The sampling frequency.
+    mode : str
+        Spectrum estimation mode can be either: 'multitaper', 'fourier', or
+        'cwt_morlet'.
+    fmin : float | tuple of floats
+        The lower frequency of interest. Multiple bands are defined using
+        a tuple, e.g., (8., 20.) for two bands with 8Hz and 20Hz lower freq.
+        If None the frequency corresponding to an epoch length of 5 cycles
+        is used.
+    fmax : float | tuple of floats
+        The upper frequency of interest. Multiple bands are dedined using
+        a tuple, e.g. (13., 30.) for two band with 13Hz and 30Hz upper freq.
+    tmin : float | None
+        Time to start connectivity estimation.
+    tmax : float | None
+        Time to end connectivity estimation.
+    mt_bandwidth : float | None
+        The bandwidth of the multitaper windowing function in Hz.
+        Only used in 'multitaper' mode.
+    mt_adaptive : bool
+        Use adaptive weights to combine the tapered spectra into PSD.
+        Only used in 'multitaper' mode.
+    mt_low_bias : bool
+        Only use tapers with more than 90% spectral concentration within
+        bandwidth. Only used in 'multitaper' mode.
+    cwt_frequencies : array
+        Array of frequencies of interest. Only used in 'cwt_morlet' mode.
+    cwt_n_cycles: float | array of float
+        Number of cycles. Fixed number or one per frequency. Only used in
+        'cwt_morlet' mode.
+    block_size : int
+        How many connections to compute at once (higher numbers are faster
+        but require more memory).
+    n_jobs : int
+        How many epochs to process in parallel.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    psi : array
+        Computed connectivity measure(s). The shape of each array is either
+        (n_signals, n_signals, n_bands) mode: 'multitaper' or 'fourier'
+        (n_signals, n_signals, n_bands, n_times) mode: 'cwt_morlet'
+        when "indices" is None, or
+        (n_con, n_bands) mode: 'multitaper' or 'fourier'
+        (n_con, n_bands, n_times) mode: 'cwt_morlet'
+        when "indices" is specified and "n_con = len(indices[0])".
+    freqs : array
+        Frequency points at which the connectivity was computed.
+    times : array
+        Time points for which the connectivity was computed.
+    n_epochs : int
+        Number of epochs used for computation.
+    n_tapers : int
+        The number of DPSS tapers used. Only defined in 'multitaper' mode.
+        Otherwise None is returned.
+    """
+    logger.info('Estimating phase slope index (PSI)')
+    # estimate the coherency
+    cohy, freqs_, times, n_epochs, n_tapers = spectral_connectivity(
+        data, method='cohy', indices=indices, sfreq=sfreq, mode=mode,
+        fmin=fmin, fmax=fmax, fskip=0, faverage=False, tmin=tmin, tmax=tmax,
+        mt_bandwidth=mt_bandwidth, mt_adaptive=mt_adaptive,
+        mt_low_bias=mt_low_bias, cwt_frequencies=cwt_frequencies,
+        cwt_n_cycles=cwt_n_cycles, block_size=block_size, n_jobs=n_jobs,
+        verbose=verbose)
+
+    logger.info('Computing PSI from estimated Coherency')
+    # compute PSI in the requested bands
+    if fmin is None:
+        fmin = -np.inf  # set it to -inf, so we can adjust it later
+
+    bands = list(zip(np.asarray((fmin,)).ravel(), np.asarray((fmax,)).ravel()))
+    n_bands = len(bands)
+
+    freq_dim = -2 if mode == 'cwt_morlet' else -1
+
+    # allocate space for output
+    out_shape = list(cohy.shape)
+    out_shape[freq_dim] = n_bands
+    psi = np.zeros(out_shape, dtype=np.float)
+
+    # allocate accumulator
+    acc_shape = copy.copy(out_shape)
+    acc_shape.pop(freq_dim)
+    acc = np.empty(acc_shape, dtype=np.complex128)
+
+    freqs = list()
+    idx_fi = [slice(None)] * cohy.ndim
+    idx_fj = [slice(None)] * cohy.ndim
+    for band_idx, band in enumerate(bands):
+        freq_idx = np.where((freqs_ > band[0]) & (freqs_ < band[1]))[0]
+        freqs.append(freqs_[freq_idx])
+
+        acc.fill(0.)
+        for fi, fj in zip(freq_idx, freq_idx[1:]):
+            idx_fi[freq_dim] = fi
+            idx_fj[freq_dim] = fj
+            acc += np.conj(cohy[idx_fi]) * cohy[idx_fj]
+
+        idx_fi[freq_dim] = band_idx
+        psi[idx_fi] = np.imag(acc)
+    logger.info('[PSI Estimation Done]')
+
+    return psi, freqs, times, n_epochs, n_tapers
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/connectivity/spectral.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/connectivity/spectral.py
new file mode 100644
index 0000000..264d25a
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/connectivity/spectral.py
@@ -0,0 +1,1062 @@
+# Authors: Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+from ..externals.six import string_types
+from warnings import warn
+from inspect import getargspec, getmembers
+
+import numpy as np
+from scipy.fftpack import fftfreq
+
+from .utils import check_indices
+from ..fixes import tril_indices, partial
+from ..parallel import parallel_func
+from ..source_estimate import _BaseSourceEstimate
+from .. import Epochs
+from ..time_frequency.multitaper import (dpss_windows, _mt_spectra,
+                                         _psd_from_mt, _csd_from_mt,
+                                         _psd_from_mt_adaptive)
+from ..time_frequency.tfr import morlet, cwt
+from ..utils import logger, verbose, _time_mask
+
+########################################################################
+# Various connectivity estimators
+
+
+class _AbstractConEstBase(object):
+    """Abstract base class for all connectivity estimators, specifies
+       the interface but doesn't do anything"""
+
+    def start_epoch(self):
+        raise RuntimeError('start_epoch method not implemented')
+
+    def accumulate(self, con_idx, csd_xy):
+        raise RuntimeError('accumulate method not implemented')
+
+    def combine(self, other):
+        raise RuntimeError('combine method not implemented')
+
+    def compute_con(self, con_idx, n_epochs):
+        raise RuntimeError('compute_con method not implemented')
+
+
+class _EpochMeanConEstBase(_AbstractConEstBase):
+    """Base class for methods that estimate connectivity as mean over epochs"""
+    def __init__(self, n_cons, n_freqs, n_times):
+        self.n_cons = n_cons
+        self.n_freqs = n_freqs
+        self.n_times = n_times
+
+        if n_times == 0:
+            self.csd_shape = (n_cons, n_freqs)
+        else:
+            self.csd_shape = (n_cons, n_freqs, n_times)
+
+        self.con_scores = None
+
+    def start_epoch(self):
+        """This method is called at the start of each epoch"""
+        pass  # for this type of con. method we don't do anything
+
+    def combine(self, other):
+        """Include con. accumated for some epochs in this estimate"""
+        self._acc += other._acc
+
+
+class _CohEstBase(_EpochMeanConEstBase):
+    """Base Estimator for Coherence, Coherency, Imag. Coherence"""
+    def __init__(self, n_cons, n_freqs, n_times):
+        super(_CohEstBase, self).__init__(n_cons, n_freqs, n_times)
+
+        # allocate space for accumulation of CSD
+        self._acc = np.zeros(self.csd_shape, dtype=np.complex128)
+
+    def accumulate(self, con_idx, csd_xy):
+        """Accumulate CSD for some connections"""
+        self._acc[con_idx] += csd_xy
+
+
+class _CohEst(_CohEstBase):
+    """Coherence Estimator"""
+    name = 'Coherence'
+
+    def compute_con(self, con_idx, n_epochs, psd_xx, psd_yy):
+        """Compute final con. score for some connections"""
+        if self.con_scores is None:
+            self.con_scores = np.zeros(self.csd_shape)
+        csd_mean = self._acc[con_idx] / n_epochs
+        self.con_scores[con_idx] = np.abs(csd_mean) / np.sqrt(psd_xx * psd_yy)
+
+
+class _CohyEst(_CohEstBase):
+    """Coherency Estimator"""
+    name = 'Coherency'
+
+    def compute_con(self, con_idx, n_epochs, psd_xx, psd_yy):
+        """Compute final con. score for some connections"""
+        if self.con_scores is None:
+            self.con_scores = np.zeros(self.csd_shape,
+                                       dtype=np.complex128)
+        csd_mean = self._acc[con_idx] / n_epochs
+        self.con_scores[con_idx] = csd_mean / np.sqrt(psd_xx * psd_yy)
+
+
+class _ImCohEst(_CohEstBase):
+    """Imaginary Coherence Estimator"""
+    name = 'Imaginary Coherence'
+
+    def compute_con(self, con_idx, n_epochs, psd_xx, psd_yy):
+        """Compute final con. score for some connections"""
+        if self.con_scores is None:
+            self.con_scores = np.zeros(self.csd_shape)
+        csd_mean = self._acc[con_idx] / n_epochs
+        self.con_scores[con_idx] = np.imag(csd_mean) / np.sqrt(psd_xx * psd_yy)
+
+
+class _PLVEst(_EpochMeanConEstBase):
+    """PLV Estimator"""
+    name = 'PLV'
+
+    def __init__(self, n_cons, n_freqs, n_times):
+        super(_PLVEst, self).__init__(n_cons, n_freqs, n_times)
+
+        # allocate accumulator
+        self._acc = np.zeros(self.csd_shape, dtype=np.complex128)
+
+    def accumulate(self, con_idx, csd_xy):
+        """Accumulate some connections"""
+        self._acc[con_idx] += csd_xy / np.abs(csd_xy)
+
+    def compute_con(self, con_idx, n_epochs):
+        """Compute final con. score for some connections"""
+        if self.con_scores is None:
+            self.con_scores = np.zeros(self.csd_shape)
+        plv = np.abs(self._acc / n_epochs)
+        self.con_scores[con_idx] = plv
+
+
+class _PLIEst(_EpochMeanConEstBase):
+    """PLI Estimator"""
+    name = 'PLI'
+
+    def __init__(self, n_cons, n_freqs, n_times):
+        super(_PLIEst, self).__init__(n_cons, n_freqs, n_times)
+
+        # allocate accumulator
+        self._acc = np.zeros(self.csd_shape)
+
+    def accumulate(self, con_idx, csd_xy):
+        """Accumulate some connections"""
+        self._acc[con_idx] += np.sign(np.imag(csd_xy))
+
+    def compute_con(self, con_idx, n_epochs):
+        """Compute final con. score for some connections"""
+        if self.con_scores is None:
+            self.con_scores = np.zeros(self.csd_shape)
+        pli_mean = self._acc[con_idx] / n_epochs
+        self.con_scores[con_idx] = np.abs(pli_mean)
+
+
+class _PLIUnbiasedEst(_PLIEst):
+    """Unbiased PLI Square Estimator"""
+    name = 'Unbiased PLI Square'
+
+    def compute_con(self, con_idx, n_epochs):
+        """Compute final con. score for some connections"""
+        if self.con_scores is None:
+            self.con_scores = np.zeros(self.csd_shape)
+        pli_mean = self._acc[con_idx] / n_epochs
+
+        # See Vinck paper Eq. (30)
+        con = (n_epochs * pli_mean ** 2 - 1) / (n_epochs - 1)
+
+        self.con_scores[con_idx] = con
+
+
+class _WPLIEst(_EpochMeanConEstBase):
+    """WPLI Estimator"""
+    name = 'WPLI'
+
+    def __init__(self, n_cons, n_freqs, n_times):
+        super(_WPLIEst, self).__init__(n_cons, n_freqs, n_times)
+
+        # store  both imag(csd) and abs(imag(csd))
+        acc_shape = (2,) + self.csd_shape
+        self._acc = np.zeros(acc_shape)
+
+    def accumulate(self, con_idx, csd_xy):
+        """Accumulate some connections"""
+        im_csd = np.imag(csd_xy)
+        self._acc[0, con_idx] += im_csd
+        self._acc[1, con_idx] += np.abs(im_csd)
+
+    def compute_con(self, con_idx, n_epochs):
+        """Compute final con. score for some connections"""
+        if self.con_scores is None:
+            self.con_scores = np.zeros(self.csd_shape)
+
+        num = np.abs(self._acc[0, con_idx])
+        denom = self._acc[1, con_idx]
+
+        # handle zeros in denominator
+        z_denom = np.where(denom == 0.)
+        denom[z_denom] = 1.
+
+        con = num / denom
+
+        # where we had zeros in denominator, we set con to zero
+        con[z_denom] = 0.
+
+        self.con_scores[con_idx] = con
+
+
+class _WPLIDebiasedEst(_EpochMeanConEstBase):
+    """Debiased WPLI Square Estimator"""
+    name = 'Debiased WPLI Square'
+
+    def __init__(self, n_cons, n_freqs, n_times):
+        super(_WPLIDebiasedEst, self).__init__(n_cons, n_freqs, n_times)
+        # store imag(csd), abs(imag(csd)), imag(csd)^2
+        acc_shape = (3,) + self.csd_shape
+        self._acc = np.zeros(acc_shape)
+
+    def accumulate(self, con_idx, csd_xy):
+        """Accumulate some connections"""
+        im_csd = np.imag(csd_xy)
+        self._acc[0, con_idx] += im_csd
+        self._acc[1, con_idx] += np.abs(im_csd)
+        self._acc[2, con_idx] += im_csd ** 2
+
+    def compute_con(self, con_idx, n_epochs):
+        """Compute final con. score for some connections"""
+        if self.con_scores is None:
+            self.con_scores = np.zeros(self.csd_shape)
+
+        # note: we use the trick from fieldtrip to compute the
+        # the estimate over all pairwise epoch combinations
+        sum_im_csd = self._acc[0, con_idx]
+        sum_abs_im_csd = self._acc[1, con_idx]
+        sum_sq_im_csd = self._acc[2, con_idx]
+
+        denom = sum_abs_im_csd ** 2 - sum_sq_im_csd
+
+        # handle zeros in denominator
+        z_denom = np.where(denom == 0.)
+        denom[z_denom] = 1.
+
+        con = (sum_im_csd ** 2 - sum_sq_im_csd) / denom
+
+        # where we had zeros in denominator, we set con to zero
+        con[z_denom] = 0.
+
+        self.con_scores[con_idx] = con
+
+
+class _PPCEst(_EpochMeanConEstBase):
+    """Pairwise Phase Consistency (PPC) Estimator"""
+    name = 'PPC'
+
+    def __init__(self, n_cons, n_freqs, n_times):
+        super(_PPCEst, self).__init__(n_cons, n_freqs, n_times)
+
+        # store csd / abs(csd)
+        self._acc = np.zeros(self.csd_shape, dtype=np.complex128)
+
+    def accumulate(self, con_idx, csd_xy):
+        """Accumulate some connections"""
+        denom = np.abs(csd_xy)
+        z_denom = np.where(denom == 0.)
+        denom[z_denom] = 1.
+        this_acc = csd_xy / denom
+        this_acc[z_denom] = 0.  # handle division by zero
+
+        self._acc[con_idx] += this_acc
+
+    def compute_con(self, con_idx, n_epochs):
+        """Compute final con. score for some connections"""
+        if self.con_scores is None:
+            self.con_scores = np.zeros(self.csd_shape)
+
+        # note: we use the trick from fieldtrip to compute the
+        # the estimate over all pairwise epoch combinations
+        con = ((self._acc[con_idx] * np.conj(self._acc[con_idx]) - n_epochs) /
+               (n_epochs * (n_epochs - 1.)))
+
+        self.con_scores[con_idx] = np.real(con)
+
+
+###############################################################################
+def _epoch_spectral_connectivity(data, sig_idx, tmin_idx, tmax_idx, sfreq,
+                                 mode, window_fun, eigvals, wavelets,
+                                 freq_mask, mt_adaptive, idx_map, block_size,
+                                 psd, accumulate_psd, con_method_types,
+                                 con_methods, n_signals, n_times,
+                                 accumulate_inplace=True):
+    """Connectivity estimation for one epoch see spectral_connectivity"""
+
+    n_cons = len(idx_map[0])
+
+    if wavelets is not None:
+        n_times_spectrum = n_times
+        n_freqs = len(wavelets)
+    else:
+        n_times_spectrum = 0
+        n_freqs = np.sum(freq_mask)
+
+    if not accumulate_inplace:
+        # instantiate methods only for this epoch (used in parallel mode)
+        con_methods = [mtype(n_cons, n_freqs, n_times_spectrum)
+                       for mtype in con_method_types]
+
+    if len(sig_idx) == n_signals:
+        # we use all signals: use a slice for faster indexing
+        sig_idx = slice(None, None)
+
+    # compute tapered spectra
+    if mode in ['multitaper', 'fourier']:
+        x_mt = list()
+        this_psd = list()
+        sig_pos_start = 0
+        for this_data in data:
+            this_n_sig = this_data.shape[0]
+            sig_pos_end = sig_pos_start + this_n_sig
+            if not isinstance(sig_idx, slice):
+                this_sig_idx = sig_idx[(sig_idx >= sig_pos_start) &
+                                       (sig_idx < sig_pos_end)] - sig_pos_start
+            else:
+                this_sig_idx = sig_idx
+            if isinstance(this_data, _BaseSourceEstimate):
+                _mt_spectra_partial = partial(_mt_spectra, dpss=window_fun,
+                                              sfreq=sfreq)
+                this_x_mt = this_data.transform_data(
+                    _mt_spectra_partial, idx=this_sig_idx, tmin_idx=tmin_idx,
+                    tmax_idx=tmax_idx)
+            else:
+                this_x_mt, _ = _mt_spectra(this_data[this_sig_idx,
+                                                     tmin_idx:tmax_idx],
+                                           window_fun, sfreq)
+
+            if mt_adaptive:
+                # compute PSD and adaptive weights
+                _this_psd, weights = _psd_from_mt_adaptive(
+                    this_x_mt, eigvals, freq_mask, return_weights=True)
+
+                # only keep freqs of interest
+                this_x_mt = this_x_mt[:, :, freq_mask]
+            else:
+                # do not use adaptive weights
+                this_x_mt = this_x_mt[:, :, freq_mask]
+                if mode == 'multitaper':
+                    weights = np.sqrt(eigvals)[np.newaxis, :, np.newaxis]
+                else:
+                    # hack to so we can sum over axis=-2
+                    weights = np.array([1.])[:, None, None]
+
+                if accumulate_psd:
+                    _this_psd = _psd_from_mt(this_x_mt, weights)
+
+            x_mt.append(this_x_mt)
+            if accumulate_psd:
+                this_psd.append(_this_psd)
+
+        x_mt = np.concatenate(x_mt, axis=0)
+        if accumulate_psd:
+            this_psd = np.concatenate(this_psd, axis=0)
+
+        # advance position
+        sig_pos_start = sig_pos_end
+
+    elif mode == 'cwt_morlet':
+        # estimate spectra using CWT
+        x_cwt = list()
+        this_psd = list()
+        sig_pos_start = 0
+        for this_data in data:
+            this_n_sig = this_data.shape[0]
+            sig_pos_end = sig_pos_start + this_n_sig
+            if not isinstance(sig_idx, slice):
+                this_sig_idx = sig_idx[(sig_idx >= sig_pos_start) &
+                                       (sig_idx < sig_pos_end)] - sig_pos_start
+            else:
+                this_sig_idx = sig_idx
+            if isinstance(this_data, _BaseSourceEstimate):
+                cwt_partial = partial(cwt, Ws=wavelets, use_fft=True,
+                                      mode='same')
+                this_x_cwt = this_data.transform_data(
+                    cwt_partial, idx=this_sig_idx, tmin_idx=tmin_idx,
+                    tmax_idx=tmax_idx)
+            else:
+                this_x_cwt = cwt(this_data[this_sig_idx, tmin_idx:tmax_idx],
+                                 wavelets, use_fft=True, mode='same')
+
+            if accumulate_psd:
+                this_psd.append((this_x_cwt * this_x_cwt.conj()).real)
+
+            x_cwt.append(this_x_cwt)
+
+            # advance position
+            sig_pos_start = sig_pos_end
+
+        x_cwt = np.concatenate(x_cwt, axis=0)
+        if accumulate_psd:
+            this_psd = np.concatenate(this_psd, axis=0)
+    else:
+        raise RuntimeError('invalid mode')
+
+    # accumulate or return psd
+    if accumulate_psd:
+        if accumulate_inplace:
+            psd += this_psd
+        else:
+            psd = this_psd
+    else:
+        psd = None
+
+    # tell the methods that a new epoch starts
+    for method in con_methods:
+        method.start_epoch()
+
+    # accumulate connectivity scores
+    if mode in ['multitaper', 'fourier']:
+        for i in range(0, n_cons, block_size):
+            con_idx = slice(i, i + block_size)
+            if mt_adaptive:
+                csd = _csd_from_mt(x_mt[idx_map[0][con_idx]],
+                                   x_mt[idx_map[1][con_idx]],
+                                   weights[idx_map[0][con_idx]],
+                                   weights[idx_map[1][con_idx]])
+            else:
+                csd = _csd_from_mt(x_mt[idx_map[0][con_idx]],
+                                   x_mt[idx_map[1][con_idx]],
+                                   weights, weights)
+
+            for method in con_methods:
+                method.accumulate(con_idx, csd)
+    else:
+        # cwt_morlet mode
+        for i in range(0, n_cons, block_size):
+            con_idx = slice(i, i + block_size)
+
+            csd = x_cwt[idx_map[0][con_idx]] * \
+                np.conjugate(x_cwt[idx_map[1][con_idx]])
+            for method in con_methods:
+                method.accumulate(con_idx, csd)
+
+    return con_methods, psd
+
+
+def _get_n_epochs(epochs, n):
+    """Generator that returns lists with at most n epochs"""
+    epochs_out = []
+    for e in epochs:
+        if not isinstance(e, (list, tuple)):
+            e = (e,)
+        epochs_out.append(e)
+        if len(epochs_out) >= n:
+            yield epochs_out
+            epochs_out = []
+    yield epochs_out
+
+
+def _check_method(method):
+    """Test if a method implements the required interface"""
+    interface_members = [m[0] for m in getmembers(_AbstractConEstBase)
+                         if not m[0].startswith('_')]
+    method_members = [m[0] for m in getmembers(method)
+                      if not m[0].startswith('_')]
+
+    for member in interface_members:
+        if member not in method_members:
+            return False, member
+    return True, None
+
+
+def _get_and_verify_data_sizes(data, n_signals=None, n_times=None, times=None):
+    """Helper function to get and/or verify the data sizes and time scales"""
+    if not isinstance(data, (list, tuple)):
+        raise ValueError('data has to be a list or tuple')
+    n_signals_tot = 0
+    for this_data in data:
+        this_n_signals, this_n_times = this_data.shape
+        if n_times is not None:
+            if this_n_times != n_times:
+                raise ValueError('all input time series must have the same '
+                                 'number of time points')
+        else:
+            n_times = this_n_times
+        n_signals_tot += this_n_signals
+
+        if hasattr(this_data, 'times'):
+            this_times = this_data.times
+            if times is not None:
+                if np.any(times != this_times):
+                    warn('time scales of input time series do not match')
+            else:
+                times = this_times
+
+    if n_signals is not None:
+        if n_signals != n_signals_tot:
+            raise ValueError('the number of time series has to be the same in '
+                             'each epoch')
+    n_signals = n_signals_tot
+
+    return n_signals, n_times, times
+
+
+# map names to estimator types
+_CON_METHOD_MAP = {'coh': _CohEst, 'cohy': _CohyEst, 'imcoh': _ImCohEst,
+                   'plv': _PLVEst, 'ppc': _PPCEst, 'pli': _PLIEst,
+                   'pli2_unbiased': _PLIUnbiasedEst, 'wpli': _WPLIEst,
+                   'wpli2_debiased': _WPLIDebiasedEst}
+
+
+ at verbose
+def spectral_connectivity(data, method='coh', indices=None, sfreq=2 * np.pi,
+                          mode='multitaper', fmin=None, fmax=np.inf,
+                          fskip=0, faverage=False, tmin=None, tmax=None,
+                          mt_bandwidth=None, mt_adaptive=False,
+                          mt_low_bias=True, cwt_frequencies=None,
+                          cwt_n_cycles=7, block_size=1000, n_jobs=1,
+                          verbose=None):
+    """Compute frequency-domain and time-frequency domain connectivity measures
+
+    The connectivity method(s) are specified using the "method" parameter.
+    All methods are based on estimates of the cross- and power spectral
+    densities (CSD/PSD) Sxy and Sxx, Syy.
+
+    The spectral densities can be estimated using a multitaper method with
+    digital prolate spheroidal sequence (DPSS) windows, a discrete Fourier
+    transform with Hanning windows, or a continuous wavelet transform using
+    Morlet wavelets. The spectral estimation mode is specified using the
+    "mode" parameter.
+
+    By default, the connectivity between all signals is computed (only
+    connections corresponding to the lower-triangular part of the
+    connectivity matrix). If one is only interested in the connectivity
+    between some signals, the "indices" parameter can be used. For example,
+    to compute the connectivity between the signal with index 0 and signals
+    "2, 3, 4" (a total of 3 connections) one can use the following::
+
+        indices = (np.array([0, 0, 0]),    # row indices
+                   np.array([2, 3, 4]))    # col indices
+
+        con_flat = spectral_connectivity(data, method='coh',
+                                         indices=indices, ...)
+
+    In this case con_flat.shape = (3, n_freqs). The connectivity scores are
+    in the same order as defined indices.
+
+    **Supported Connectivity Measures**
+
+    The connectivity method(s) is specified using the "method" parameter. The
+    following methods are supported (note: ``E[]`` denotes average over
+    epochs). Multiple measures can be computed at once by using a list/tuple,
+    e.g., ``['coh', 'pli']`` to compute coherence and PLI.
+
+        'coh' : Coherence given by::
+
+                     | E[Sxy] |
+            C = ---------------------
+                sqrt(E[Sxx] * E[Syy])
+
+        'cohy' : Coherency given by::
+
+                       E[Sxy]
+            C = ---------------------
+                sqrt(E[Sxx] * E[Syy])
+
+        'imcoh' : Imaginary coherence [1]_ given by::
+
+                      Im(E[Sxy])
+            C = ----------------------
+                sqrt(E[Sxx] * E[Syy])
+
+        'plv' : Phase-Locking Value (PLV) [2]_ given by::
+
+            PLV = |E[Sxy/|Sxy|]|
+
+        'ppc' : Pairwise Phase Consistency (PPC), an unbiased estimator
+        of squared PLV [3]_.
+
+        'pli' : Phase Lag Index (PLI) [4]_ given by::
+
+            PLI = |E[sign(Im(Sxy))]|
+
+        'pli2_unbiased' : Unbiased estimator of squared PLI [5]_.
+
+        'wpli' : Weighted Phase Lag Index (WPLI) [5]_ given by::
+
+                      |E[Im(Sxy)]|
+            WPLI = ------------------
+                      E[|Im(Sxy)|]
+
+        'wpli2_debiased' : Debiased estimator of squared WPLI [5].
+
+
+    References
+    ----------
+
+    .. [1] Nolte et al. "Identifying true brain interaction from EEG data using
+           the imaginary part of coherency" Clinical neurophysiology, vol. 115,
+           no. 10, pp. 2292-2307, Oct. 2004.
+
+    .. [2] Lachaux et al. "Measuring phase synchrony in brain signals" Human
+           brain mapping, vol. 8, no. 4, pp. 194-208, Jan. 1999.
+
+    .. [3] Vinck et al. "The pairwise phase consistency: a bias-free measure of
+           rhythmic neuronal synchronization" NeuroImage, vol. 51, no. 1,
+           pp. 112-122, May 2010.
+
+    .. [4] Stam et al. "Phase lag index: assessment of functional connectivity
+           from multi channel EEG and MEG with diminished bias from common
+           sources" Human brain mapping, vol. 28, no. 11, pp. 1178-1193,
+           Nov. 2007.
+
+    .. [5] Vinck et al. "An improved index of phase-synchronization for
+           electro-physiological data in the presence of volume-conduction,
+           noise and sample-size bias" NeuroImage, vol. 55, no. 4,
+           pp. 1548-1565, Apr. 2011.
+
+    Parameters
+    ----------
+    data : array-like, shape=(n_epochs, n_signals, n_times) | Epochs
+        The data from which to compute connectivity. Note that it is also
+        possible to combine multiple signals by providing a list of tuples,
+        e.g., data = [(arr_0, stc_0), (arr_1, stc_1), (arr_2, stc_2)],
+        corresponds to 3 epochs, and arr_* could be an array with the same
+        number of time points as stc_*. The array-like object can also
+        be a list/generator of array, shape =(n_signals, n_times),
+        or a list/generator of SourceEstimate or VolSourceEstimate objects.
+    method : string | list of string
+        Connectivity measure(s) to compute.
+    indices : tuple of arrays | None
+        Two arrays with indices of connections for which to compute
+        connectivity. If None, all connections are computed.
+    sfreq : float
+        The sampling frequency.
+    mode : str
+        Spectrum estimation mode can be either: 'multitaper', 'fourier', or
+        'cwt_morlet'.
+    fmin : float | tuple of floats
+        The lower frequency of interest. Multiple bands are defined using
+        a tuple, e.g., (8., 20.) for two bands with 8Hz and 20Hz lower freq.
+        If None the frequency corresponding to an epoch length of 5 cycles
+        is used.
+    fmax : float | tuple of floats
+        The upper frequency of interest. Multiple bands are dedined using
+        a tuple, e.g. (13., 30.) for two band with 13Hz and 30Hz upper freq.
+    fskip : int
+        Omit every "(fskip + 1)-th" frequency bin to decimate in frequency
+        domain.
+    faverage : boolean
+        Average connectivity scores for each frequency band. If True,
+        the output freqs will be a list with arrays of the frequencies
+        that were averaged.
+    tmin : float | None
+        Time to start connectivity estimation. Note: when "data" is an array,
+        the first sample is assumed to be at time 0. For other types
+        (Epochs, etc.), the time information contained in the object is used
+        to compute the time indices.
+    tmax : float | None
+        Time to end connectivity estimation. Note: when "data" is an array,
+        the first sample is assumed to be at time 0. For other types
+        (Epochs, etc.), the time information contained in the object is used
+        to compute the time indices.
+    mt_bandwidth : float | None
+        The bandwidth of the multitaper windowing function in Hz.
+        Only used in 'multitaper' mode.
+    mt_adaptive : bool
+        Use adaptive weights to combine the tapered spectra into PSD.
+        Only used in 'multitaper' mode.
+    mt_low_bias : bool
+        Only use tapers with more than 90% spectral concentration within
+        bandwidth. Only used in 'multitaper' mode.
+    cwt_frequencies : array
+        Array of frequencies of interest. Only used in 'cwt_morlet' mode.
+    cwt_n_cycles: float | array of float
+        Number of cycles. Fixed number or one per frequency. Only used in
+        'cwt_morlet' mode.
+    block_size : int
+        How many connections to compute at once (higher numbers are faster
+        but require more memory).
+    n_jobs : int
+        How many epochs to process in parallel.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    con : array | list of arrays
+        Computed connectivity measure(s). The shape of each array is either
+        (n_signals, n_signals, n_frequencies) mode: 'multitaper' or 'fourier'
+        (n_signals, n_signals, n_frequencies, n_times) mode: 'cwt_morlet'
+        when "indices" is None, or
+        (n_con, n_frequencies) mode: 'multitaper' or 'fourier'
+        (n_con, n_frequencies, n_times) mode: 'cwt_morlet'
+        when "indices" is specified and "n_con = len(indices[0])".
+    freqs : array
+        Frequency points at which the connectivity was computed.
+    times : array
+        Time points for which the connectivity was computed.
+    n_epochs : int
+        Number of epochs used for computation.
+    n_tapers : int
+        The number of DPSS tapers used. Only defined in 'multitaper' mode.
+        Otherwise None is returned.
+    """
+    if n_jobs > 1:
+        parallel, my_epoch_spectral_connectivity, _ = \
+            parallel_func(_epoch_spectral_connectivity, n_jobs,
+                          verbose=verbose)
+
+    # format fmin and fmax and check inputs
+    if fmin is None:
+        fmin = -np.inf  # set it to -inf, so we can adjust it later
+
+    fmin = np.asarray((fmin,)).ravel()
+    fmax = np.asarray((fmax,)).ravel()
+    if len(fmin) != len(fmax):
+        raise ValueError('fmin and fmax must have the same length')
+    if np.any(fmin > fmax):
+        raise ValueError('fmax must be larger than fmin')
+
+    n_bands = len(fmin)
+
+    # assign names to connectivity methods
+    if not isinstance(method, (list, tuple)):
+        method = [method]  # make it a list so we can iterate over it
+
+    n_methods = len(method)
+    con_method_types = []
+    for m in method:
+        if m in _CON_METHOD_MAP:
+            method = _CON_METHOD_MAP[m]
+            con_method_types.append(method)
+        elif isinstance(m, string_types):
+            raise ValueError('%s is not a valid connectivity method' % m)
+        else:
+            # add custom method
+            method_valid, msg = _check_method(m)
+            if not method_valid:
+                raise ValueError('The supplied connectivity method does '
+                                 'not have the method %s' % msg)
+            con_method_types.append(m)
+
+    # determine how many arguments the compute_con_function needs
+    n_comp_args = [len(getargspec(mtype.compute_con).args)
+                   for mtype in con_method_types]
+
+    # we only support 3 or 5 arguments
+    if any(n not in (3, 5) for n in n_comp_args):
+        raise ValueError('The compute_con function needs to have either '
+                         '3 or 5 arguments')
+
+    # if none of the comp_con functions needs the PSD, we don't estimate it
+    accumulate_psd = any(n == 5 for n in n_comp_args)
+
+    if isinstance(data, Epochs):
+        times_in = data.times  # input times for Epochs input type
+        sfreq = data.info['sfreq']
+
+    # loop over data; it could be a generator that returns
+    # (n_signals x n_times) arrays or SourceEstimates
+    epoch_idx = 0
+    logger.info('Connectivity computation...')
+    for epoch_block in _get_n_epochs(data, n_jobs):
+
+        if epoch_idx == 0:
+            # initialize everything
+            first_epoch = epoch_block[0]
+
+            # get the data size and time scale
+            n_signals, n_times_in, times_in = \
+                _get_and_verify_data_sizes(first_epoch)
+
+            if times_in is None:
+                # we are not using Epochs or SourceEstimate(s) as input
+                times_in = np.linspace(0.0, n_times_in / sfreq, n_times_in,
+                                       endpoint=False)
+
+            n_times_in = len(times_in)
+            mask = _time_mask(times_in, tmin, tmax)
+            tmin_idx, tmax_idx = np.where(mask)[0][[0, -1]]
+            tmax_idx += 1
+            tmin_true = times_in[tmin_idx]
+            tmax_true = times_in[tmax_idx - 1]  # time of last point used
+
+            times = times_in[tmin_idx:tmax_idx]
+            n_times = len(times)
+
+            if indices is None:
+                # only compute r for lower-triangular region
+                indices_use = tril_indices(n_signals, -1)
+            else:
+                indices_use = check_indices(indices)
+
+            # number of connectivities to compute
+            n_cons = len(indices_use[0])
+
+            logger.info('    computing connectivity for %d connections'
+                        % n_cons)
+
+            logger.info('    using t=%0.3fs..%0.3fs for estimation (%d points)'
+                        % (tmin_true, tmax_true, n_times))
+
+            # get frequencies of interest for the different modes
+            if mode in ['multitaper', 'fourier']:
+                # fmin fmax etc is only supported for these modes
+                # decide which frequencies to keep
+                freqs_all = fftfreq(n_times, 1. / sfreq)
+                freqs_all = freqs_all[freqs_all >= 0]
+            elif mode == 'cwt_morlet':
+                # cwt_morlet mode
+                if cwt_frequencies is None:
+                    raise ValueError('define frequencies of interest using '
+                                     'cwt_frequencies')
+                else:
+                    cwt_frequencies = cwt_frequencies.astype(np.float)
+                if any(cwt_frequencies > (sfreq / 2.)):
+                    raise ValueError('entries in cwt_frequencies cannot be '
+                                     'larger than Nyquist (sfreq / 2)')
+                freqs_all = cwt_frequencies
+            else:
+                raise ValueError('mode has an invalid value')
+
+            # check that fmin corresponds to at least 5 cycles
+            five_cycle_freq = 5. * sfreq / float(n_times)
+
+            if len(fmin) == 1 and fmin[0] == -np.inf:
+                # we use the 5 cycle freq. as default
+                fmin = [five_cycle_freq]
+            else:
+                if any(fmin < five_cycle_freq):
+                    warn('fmin corresponds to less than 5 cycles, '
+                         'spectrum estimate will be unreliable')
+
+            # create a frequency mask for all bands
+            freq_mask = np.zeros(len(freqs_all), dtype=np.bool)
+            for f_lower, f_upper in zip(fmin, fmax):
+                freq_mask |= ((freqs_all >= f_lower) & (freqs_all <= f_upper))
+
+            # possibly skip frequency points
+            for pos in range(fskip):
+                freq_mask[pos + 1::fskip + 1] = False
+
+            # the frequency points where we compute connectivity
+            freqs = freqs_all[freq_mask]
+            n_freqs = len(freqs)
+
+            # get the freq. indices and points for each band
+            freq_idx_bands = [np.where((freqs >= fl) & (freqs <= fu))[0]
+                              for fl, fu in zip(fmin, fmax)]
+            freqs_bands = [freqs[freq_idx] for freq_idx in freq_idx_bands]
+
+            # make sure we don't have empty bands
+            for i, n_f_band in enumerate([len(f) for f in freqs_bands]):
+                if n_f_band == 0:
+                    raise ValueError('There are no frequency points between '
+                                     '%0.1fHz and %0.1fHz. Change the band '
+                                     'specification (fmin, fmax) or the '
+                                     'frequency resolution.'
+                                     % (fmin[i], fmax[i]))
+
+            if n_bands == 1:
+                logger.info('    frequencies: %0.1fHz..%0.1fHz (%d points)'
+                            % (freqs_bands[0][0], freqs_bands[0][-1],
+                               n_freqs))
+            else:
+                logger.info('    computing connectivity for the bands:')
+                for i, bfreqs in enumerate(freqs_bands):
+                    logger.info('     band %d: %0.1fHz..%0.1fHz '
+                                '(%d points)' % (i + 1, bfreqs[0],
+                                                 bfreqs[-1], len(bfreqs)))
+
+            if faverage:
+                logger.info('    connectivity scores will be averaged for '
+                            'each band')
+
+            # get the window function, wavelets, etc for different modes
+            if mode == 'multitaper':
+                # compute standardized half-bandwidth
+                if mt_bandwidth is not None:
+                    half_nbw = float(mt_bandwidth) * n_times / (2 * sfreq)
+                else:
+                    half_nbw = 4
+
+                # compute dpss windows
+                n_tapers_max = int(2 * half_nbw)
+                window_fun, eigvals = dpss_windows(n_times, half_nbw,
+                                                   n_tapers_max,
+                                                   low_bias=mt_low_bias)
+                n_tapers = len(eigvals)
+                logger.info('    using multitaper spectrum estimation with '
+                            '%d DPSS windows' % n_tapers)
+
+                if mt_adaptive and len(eigvals) < 3:
+                    warn('Not adaptively combining the spectral estimators '
+                         'due to a low number of tapers.')
+                    mt_adaptive = False
+
+                n_times_spectrum = 0  # this method only uses the freq. domain
+                wavelets = None
+            elif mode == 'fourier':
+                logger.info('    using FFT with a Hanning window to estimate '
+                            'spectra')
+
+                window_fun = np.hanning(n_times)
+                mt_adaptive = False
+                eigvals = 1.
+                n_tapers = None
+                n_times_spectrum = 0  # this method only uses the freq. domain
+                wavelets = None
+            elif mode == 'cwt_morlet':
+                logger.info('    using CWT with Morlet wavelets to estimate '
+                            'spectra')
+
+                # reformat cwt_n_cycles if we have removed some frequencies
+                # using fmin, fmax, fskip
+                cwt_n_cycles = np.asarray((cwt_n_cycles,)).ravel()
+                if len(cwt_n_cycles) > 1:
+                    if len(cwt_n_cycles) != len(cwt_frequencies):
+                        raise ValueError('cwt_n_cycles must be float or an '
+                                         'array with the same size as '
+                                         'cwt_frequencies')
+                    cwt_n_cycles = cwt_n_cycles[freq_mask]
+
+                # get the Morlet wavelets
+                wavelets = morlet(sfreq, freqs,
+                                  n_cycles=cwt_n_cycles, zero_mean=True)
+                eigvals = None
+                n_tapers = None
+                window_fun = None
+                n_times_spectrum = n_times
+            else:
+                raise ValueError('mode has an invalid value')
+
+            # unique signals for which we actually need to compute PSD etc.
+            sig_idx = np.unique(np.r_[indices_use[0], indices_use[1]])
+
+            # map indices to unique indices
+            idx_map = [np.searchsorted(sig_idx, ind) for ind in indices_use]
+
+            # allocate space to accumulate PSD
+            if accumulate_psd:
+                if n_times_spectrum == 0:
+                    psd_shape = (len(sig_idx), n_freqs)
+                else:
+                    psd_shape = (len(sig_idx), n_freqs, n_times_spectrum)
+                psd = np.zeros(psd_shape)
+            else:
+                psd = None
+
+            # create instances of the connectivity estimators
+            con_methods = [mtype(n_cons, n_freqs, n_times_spectrum)
+                           for mtype in con_method_types]
+
+            sep = ', '
+            metrics_str = sep.join([meth.name for meth in con_methods])
+            logger.info('    the following metrics will be computed: %s'
+                        % metrics_str)
+
+        # check dimensions and time scale
+        for this_epoch in epoch_block:
+            _get_and_verify_data_sizes(this_epoch, n_signals, n_times_in,
+                                       times_in)
+
+        if n_jobs == 1:
+            # no parallel processing
+            for this_epoch in epoch_block:
+                logger.info('    computing connectivity for epoch %d'
+                            % (epoch_idx + 1))
+
+                # con methods and psd are updated inplace
+                _epoch_spectral_connectivity(
+                    this_epoch, sig_idx, tmin_idx,
+                    tmax_idx, sfreq, mode, window_fun, eigvals, wavelets,
+                    freq_mask, mt_adaptive, idx_map, block_size, psd,
+                    accumulate_psd, con_method_types, con_methods,
+                    n_signals, n_times, accumulate_inplace=True)
+                epoch_idx += 1
+        else:
+            # process epochs in parallel
+            logger.info('    computing connectivity for epochs %d..%d'
+                        % (epoch_idx + 1, epoch_idx + len(epoch_block)))
+
+            out = parallel(my_epoch_spectral_connectivity(
+                this_epoch, sig_idx,
+                tmin_idx, tmax_idx, sfreq, mode, window_fun, eigvals,
+                wavelets, freq_mask, mt_adaptive, idx_map, block_size, psd,
+                accumulate_psd, con_method_types, None, n_signals, n_times,
+                accumulate_inplace=False) for this_epoch in epoch_block)
+
+            # do the accumulation
+            for this_out in out:
+                for method, parallel_method in zip(con_methods, this_out[0]):
+                    method.combine(parallel_method)
+                if accumulate_psd:
+                    psd += this_out[1]
+
+            epoch_idx += len(epoch_block)
+
+    # normalize
+    n_epochs = epoch_idx
+    if accumulate_psd:
+        psd /= n_epochs
+
+    # compute final connectivity scores
+    con = []
+    for method, n_args in zip(con_methods, n_comp_args):
+        if n_args == 3:
+            # compute all scores at once
+            method.compute_con(slice(0, n_cons), n_epochs)
+        else:
+            # compute scores block-wise to save memory
+            for i in range(0, n_cons, block_size):
+                con_idx = slice(i, i + block_size)
+                psd_xx = psd[idx_map[0][con_idx]]
+                psd_yy = psd[idx_map[1][con_idx]]
+                method.compute_con(con_idx, n_epochs, psd_xx, psd_yy)
+
+        # get the connectivity scores
+        this_con = method.con_scores
+
+        if this_con.shape[0] != n_cons:
+            raise ValueError('First dimension of connectivity scores must be '
+                             'the same as the number of connections')
+        if faverage:
+            if this_con.shape[1] != n_freqs:
+                raise ValueError('2nd dimension of connectivity scores must '
+                                 'be the same as the number of frequencies')
+            con_shape = (n_cons, n_bands) + this_con.shape[2:]
+            this_con_bands = np.empty(con_shape, dtype=this_con.dtype)
+            for band_idx in range(n_bands):
+                this_con_bands[:, band_idx] =\
+                    np.mean(this_con[:, freq_idx_bands[band_idx]], axis=1)
+            this_con = this_con_bands
+
+        con.append(this_con)
+
+    if indices is None:
+        # return all-to-all connectivity matrices
+        logger.info('    assembling connectivity matrix')
+        con_flat = con
+        con = []
+        for this_con_flat in con_flat:
+            this_con = np.zeros((n_signals, n_signals) +
+                                this_con_flat.shape[1:],
+                                dtype=this_con_flat.dtype)
+            this_con[indices_use] = this_con_flat
+            con.append(this_con)
+
+    logger.info('[Connectivity computation done]')
+
+    if n_methods == 1:
+        # for a single method return connectivity directly
+        con = con[0]
+
+    if faverage:
+        # for each band we return the frequencies that were averaged
+        freqs = freqs_bands
+
+    return con, freqs, times, n_epochs, n_tapers
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/connectivity/tests/__init__.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/connectivity/tests/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/connectivity/tests/test_effective.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/connectivity/tests/test_effective.py
new file mode 100644
index 0000000..2615f53
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/connectivity/tests/test_effective.py
@@ -0,0 +1,40 @@
+import numpy as np
+from numpy.testing import assert_array_almost_equal
+from nose.tools import assert_true
+
+from mne.connectivity import phase_slope_index
+
+
+def test_psi():
+    """Test Phase Slope Index (PSI) estimation"""
+    sfreq = 50.
+    n_signals = 3
+    n_epochs = 10
+    n_times = 500
+    rng = np.random.RandomState(42)
+    data = rng.randn(n_epochs, n_signals, n_times)
+
+    # simulate time shifts
+    for i in range(n_epochs):
+        data[i, 1, 10:] = data[i, 0, :-10]  # signal 0 is ahead
+        data[i, 2, :-10] = data[i, 0, 10:]  # signal 2 is ahead
+
+    psi, freqs, times, n_epochs, n_tapers = phase_slope_index(
+        data, mode='fourier', sfreq=sfreq)
+    assert_true(psi[1, 0, 0] < 0)
+    assert_true(psi[2, 0, 0] > 0)
+
+    indices = (np.array([0]), np.array([1]))
+    psi_2, freqs, times, n_epochs, n_tapers = phase_slope_index(
+        data, mode='fourier', sfreq=sfreq, indices=indices)
+
+    # the measure is symmetric (sign flip)
+    assert_array_almost_equal(psi_2[0, 0], -psi[1, 0, 0])
+
+    cwt_freqs = np.arange(5., 20, 0.5)
+    psi_cwt, freqs, times, n_epochs, n_tapers = phase_slope_index(
+        data, mode='cwt_morlet', sfreq=sfreq, cwt_frequencies=cwt_freqs,
+        indices=indices)
+
+    assert_true(np.all(psi_cwt > 0))
+    assert_true(psi_cwt.shape[-1] == n_times)
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/connectivity/tests/test_spectral.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/connectivity/tests/test_spectral.py
new file mode 100644
index 0000000..8678f5b
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/connectivity/tests/test_spectral.py
@@ -0,0 +1,227 @@
+import os
+import numpy as np
+from numpy.testing import assert_array_almost_equal
+from nose.tools import assert_true, assert_raises
+from nose.plugins.skip import SkipTest
+import warnings
+
+from mne.fixes import tril_indices
+from mne.connectivity import spectral_connectivity
+from mne.connectivity.spectral import _CohEst
+
+from mne import SourceEstimate
+from mne.utils import run_tests_if_main, slow_test
+from mne.filter import band_pass_filter
+
+warnings.simplefilter('always')
+
+
+def _stc_gen(data, sfreq, tmin, combo=False):
+    """Simulate a SourceEstimate generator"""
+    vertices = [np.arange(data.shape[1]), np.empty(0)]
+    for d in data:
+        if not combo:
+            stc = SourceEstimate(data=d, vertices=vertices,
+                                 tmin=tmin, tstep=1 / float(sfreq))
+            yield stc
+        else:
+            # simulate a combination of array and source estimate
+            arr = d[0]
+            stc = SourceEstimate(data=d[1:], vertices=vertices,
+                                 tmin=tmin, tstep=1 / float(sfreq))
+            yield (arr, stc)
+
+
+ at slow_test
+def test_spectral_connectivity():
+    """Test frequency-domain connectivity methods"""
+    # XXX For some reason on 14 Oct 2015 Travis started timing out on this
+    # test, so for a quick workaround we will skip it:
+    if os.getenv('TRAVIS', 'false') == 'true':
+        raise SkipTest('Travis is broken')
+    # Use a case known to have no spurious correlations (it would bad if
+    # nosetests could randomly fail):
+    np.random.seed(0)
+
+    sfreq = 50.
+    n_signals = 3
+    n_epochs = 8
+    n_times = 256
+
+    tmin = 0.
+    tmax = (n_times - 1) / sfreq
+    data = np.random.randn(n_epochs, n_signals, n_times)
+    times_data = np.linspace(tmin, tmax, n_times)
+    # simulate connectivity from 5Hz..15Hz
+    fstart, fend = 5.0, 15.0
+    for i in range(n_epochs):
+        with warnings.catch_warnings(record=True):
+            warnings.simplefilter('always')
+            data[i, 1, :] = band_pass_filter(data[i, 0, :],
+                                             sfreq, fstart, fend)
+        # add some noise, so the spectrum is not exactly zero
+        data[i, 1, :] += 1e-2 * np.random.randn(n_times)
+
+    # First we test some invalid parameters:
+    assert_raises(ValueError, spectral_connectivity, data, method='notamethod')
+    assert_raises(ValueError, spectral_connectivity, data,
+                  mode='notamode')
+
+    # test invalid fmin fmax settings
+    assert_raises(ValueError, spectral_connectivity, data, fmin=10,
+                  fmax=10 + 0.5 * (sfreq / float(n_times)))
+    assert_raises(ValueError, spectral_connectivity, data, fmin=10, fmax=5)
+    assert_raises(ValueError, spectral_connectivity, data, fmin=(0, 11),
+                  fmax=(5, 10))
+    assert_raises(ValueError, spectral_connectivity, data, fmin=(11,),
+                  fmax=(12, 15))
+
+    methods = ['coh', 'cohy', 'imcoh', ['plv', 'ppc', 'pli', 'pli2_unbiased',
+               'wpli', 'wpli2_debiased', 'coh']]
+
+    modes = ['multitaper', 'fourier', 'cwt_morlet']
+
+    # define some frequencies for cwt
+    cwt_frequencies = np.arange(3, 24.5, 1)
+
+    for mode in modes:
+        for method in methods:
+            if method == 'coh' and mode == 'multitaper':
+                # only check adaptive estimation for coh to reduce test time
+                check_adaptive = [False, True]
+            else:
+                check_adaptive = [False]
+
+            if method == 'coh' and mode == 'cwt_morlet':
+                # so we also test using an array for num cycles
+                cwt_n_cycles = 7. * np.ones(len(cwt_frequencies))
+            else:
+                cwt_n_cycles = 7.
+
+            for adaptive in check_adaptive:
+
+                if adaptive:
+                    mt_bandwidth = 1.
+                else:
+                    mt_bandwidth = None
+
+                con, freqs, times, n, _ = spectral_connectivity(
+                    data, method=method, mode=mode, indices=None, sfreq=sfreq,
+                    mt_adaptive=adaptive, mt_low_bias=True,
+                    mt_bandwidth=mt_bandwidth, cwt_frequencies=cwt_frequencies,
+                    cwt_n_cycles=cwt_n_cycles)
+
+                assert_true(n == n_epochs)
+                assert_array_almost_equal(times_data, times)
+
+                if mode == 'multitaper':
+                    upper_t = 0.95
+                    lower_t = 0.5
+                else:
+                    # other estimates have higher variance
+                    upper_t = 0.8
+                    lower_t = 0.75
+
+                # test the simulated signal
+                if method == 'coh':
+                    idx = np.searchsorted(freqs, (fstart + 1, fend - 1))
+                    # we see something for zero-lag
+                    assert_true(np.all(con[1, 0, idx[0]:idx[1]] > upper_t))
+
+                    if mode != 'cwt_morlet':
+                        idx = np.searchsorted(freqs, (fstart - 1, fend + 1))
+                        assert_true(np.all(con[1, 0, :idx[0]] < lower_t))
+                        assert_true(np.all(con[1, 0, idx[1]:] < lower_t))
+                elif method == 'cohy':
+                    idx = np.searchsorted(freqs, (fstart + 1, fend - 1))
+                    # imaginary coh will be zero
+                    assert_true(np.all(np.imag(con[1, 0, idx[0]:idx[1]]) <
+                                lower_t))
+                    # we see something for zero-lag
+                    assert_true(np.all(np.abs(con[1, 0, idx[0]:idx[1]]) >
+                                upper_t))
+
+                    idx = np.searchsorted(freqs, (fstart - 1, fend + 1))
+                    if mode != 'cwt_morlet':
+                        assert_true(np.all(np.abs(con[1, 0, :idx[0]]) <
+                                    lower_t))
+                        assert_true(np.all(np.abs(con[1, 0, idx[1]:]) <
+                                    lower_t))
+                elif method == 'imcoh':
+                    idx = np.searchsorted(freqs, (fstart + 1, fend - 1))
+                    # imaginary coh will be zero
+                    assert_true(np.all(con[1, 0, idx[0]:idx[1]] < lower_t))
+                    idx = np.searchsorted(freqs, (fstart - 1, fend + 1))
+                    assert_true(np.all(con[1, 0, :idx[0]] < lower_t))
+                    assert_true(np.all(con[1, 0, idx[1]:] < lower_t))
+
+                # compute same connections using indices and 2 jobs
+                indices = tril_indices(n_signals, -1)
+
+                if not isinstance(method, list):
+                    test_methods = (method, _CohEst)
+                else:
+                    test_methods = method
+
+                stc_data = _stc_gen(data, sfreq, tmin)
+                con2, freqs2, times2, n2, _ = spectral_connectivity(
+                    stc_data, method=test_methods, mode=mode, indices=indices,
+                    sfreq=sfreq, mt_adaptive=adaptive, mt_low_bias=True,
+                    mt_bandwidth=mt_bandwidth, tmin=tmin, tmax=tmax,
+                    cwt_frequencies=cwt_frequencies,
+                    cwt_n_cycles=cwt_n_cycles, n_jobs=2)
+
+                assert_true(isinstance(con2, list))
+                assert_true(len(con2) == len(test_methods))
+
+                if method == 'coh':
+                    assert_array_almost_equal(con2[0], con2[1])
+
+                if not isinstance(method, list):
+                    con2 = con2[0]  # only keep the first method
+
+                    # we get the same result for the probed connections
+                    assert_array_almost_equal(freqs, freqs2)
+                    assert_array_almost_equal(con[indices], con2)
+                    assert_true(n == n2)
+                    assert_array_almost_equal(times_data, times2)
+                else:
+                    # we get the same result for the probed connections
+                    assert_true(len(con) == len(con2))
+                    for c, c2 in zip(con, con2):
+                        assert_array_almost_equal(freqs, freqs2)
+                        assert_array_almost_equal(c[indices], c2)
+                        assert_true(n == n2)
+                        assert_array_almost_equal(times_data, times2)
+
+                # compute same connections for two bands, fskip=1, and f. avg.
+                fmin = (5., 15.)
+                fmax = (15., 30.)
+                con3, freqs3, times3, n3, _ = spectral_connectivity(
+                    data, method=method, mode=mode, indices=indices,
+                    sfreq=sfreq, fmin=fmin, fmax=fmax, fskip=1, faverage=True,
+                    mt_adaptive=adaptive, mt_low_bias=True,
+                    mt_bandwidth=mt_bandwidth, cwt_frequencies=cwt_frequencies,
+                    cwt_n_cycles=cwt_n_cycles)
+
+                assert_true(isinstance(freqs3, list))
+                assert_true(len(freqs3) == len(fmin))
+                for i in range(len(freqs3)):
+                    assert_true(np.all((freqs3[i] >= fmin[i]) &
+                                       (freqs3[i] <= fmax[i])))
+
+                # average con2 "manually" and we get the same result
+                if not isinstance(method, list):
+                    for i in range(len(freqs3)):
+                        freq_idx = np.searchsorted(freqs2, freqs3[i])
+                        con2_avg = np.mean(con2[:, freq_idx], axis=1)
+                        assert_array_almost_equal(con2_avg, con3[:, i])
+                else:
+                    for j in range(len(con2)):
+                        for i in range(len(freqs3)):
+                            freq_idx = np.searchsorted(freqs2, freqs3[i])
+                            con2_avg = np.mean(con2[j][:, freq_idx], axis=1)
+                            assert_array_almost_equal(con2_avg, con3[j][:, i])
+
+
+run_tests_if_main()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/connectivity/tests/test_utils.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/connectivity/tests/test_utils.py
new file mode 100644
index 0000000..2736b1f
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/connectivity/tests/test_utils.py
@@ -0,0 +1,23 @@
+import numpy as np
+from nose.tools import assert_true
+
+from mne.connectivity import seed_target_indices
+
+
+def test_indices():
+    """Test connectivity indexing methods"""
+    n_seeds_test = [1, 3, 4]
+    n_targets_test = [2, 3, 200]
+    for n_seeds in n_seeds_test:
+        for n_targets in n_targets_test:
+            idx = np.random.permutation(np.arange(n_seeds + n_targets))
+            seeds = idx[:n_seeds]
+            targets = idx[n_seeds:]
+            indices = seed_target_indices(seeds, targets)
+            assert_true(len(indices) == 2)
+            assert_true(len(indices[0]) == len(indices[1]))
+            assert_true(len(indices[0]) == n_seeds * n_targets)
+            for seed in seeds:
+                assert_true(np.sum(indices[0] == seed) == n_targets)
+            for target in targets:
+                assert_true(np.sum(indices[1] == target) == n_seeds)
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/connectivity/utils.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/connectivity/utils.py
new file mode 100644
index 0000000..14025b4
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/connectivity/utils.py
@@ -0,0 +1,45 @@
+# Authors: Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+import numpy as np
+
+
+def check_indices(indices):
+    """Check indices parameter"""
+
+    if not isinstance(indices, tuple) or len(indices) != 2:
+        raise ValueError('indices must be a tuple of length 2')
+
+    if len(indices[0]) != len(indices[1]):
+        raise ValueError('Index arrays indices[0] and indices[1] must '
+                         'have the same length')
+
+    return indices
+
+
+def seed_target_indices(seeds, targets):
+    """Generate indices parameter for seed based connectivity analysis.
+
+    Parameters
+    ----------
+    seeds : array of int | int
+        Seed indices.
+    targets : array of int | int
+        Indices of signals for which to compute connectivity.
+
+    Returns
+    -------
+    indices : tuple of arrays
+        The indices parameter used for connectivity computation.
+    """
+    # make them arrays
+    seeds = np.asarray((seeds,)).ravel()
+    targets = np.asarray((targets,)).ravel()
+
+    n_seeds = len(seeds)
+    n_targets = len(targets)
+
+    indices = (np.concatenate([np.tile(i, n_targets) for i in seeds]),
+               np.tile(targets, n_seeds))
+
+    return indices
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/coreg.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/coreg.py
new file mode 100644
index 0000000..d3df150
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/coreg.py
@@ -0,0 +1,1088 @@
+"""Coregistration between different coordinate frames"""
+
+# Authors: Christian Brodbeck <christianbrodbeck at nyu.edu>
+#
+# License: BSD (3-clause)
+
+from .externals.six.moves import configparser
+import fnmatch
+from glob import glob, iglob
+import os
+import stat
+import sys
+import re
+import shutil
+from warnings import warn
+
+import numpy as np
+from numpy import dot
+
+from .io.meas_info import read_fiducials, write_fiducials
+from .label import read_label, Label
+from .source_space import (add_source_space_distances, read_source_spaces,
+                           write_source_spaces)
+from .surface import read_surface, write_surface
+from .bem import read_bem_surfaces, write_bem_surfaces
+from .transforms import rotation, rotation3d, scaling, translation
+from .utils import get_config, get_subjects_dir, logger, pformat
+from functools import reduce
+from .externals.six.moves import zip
+
+
+# some path templates
+trans_fname = os.path.join('{raw_dir}', '{subject}-trans.fif')
+subject_dirname = os.path.join('{subjects_dir}', '{subject}')
+bem_dirname = os.path.join(subject_dirname, 'bem')
+surf_dirname = os.path.join(subject_dirname, 'surf')
+bem_fname = os.path.join(bem_dirname, "{subject}-{name}.fif")
+head_bem_fname = pformat(bem_fname, name='head')
+fid_fname = pformat(bem_fname, name='fiducials')
+fid_fname_general = os.path.join(bem_dirname, "{head}-fiducials.fif")
+src_fname = os.path.join(bem_dirname, '{subject}-{spacing}-src.fif')
+
+
+def _make_writable(fname):
+    os.chmod(fname, stat.S_IMODE(os.lstat(fname)[stat.ST_MODE]) | 128)  # write
+
+
+def _make_writable_recursive(path):
+    """Recursively set writable"""
+    if sys.platform.startswith('win'):
+        return  # can't safely set perms
+    for root, dirs, files in os.walk(path, topdown=False):
+        for f in dirs + files:
+            _make_writable(os.path.join(root, f))
+
+
+def create_default_subject(mne_root=None, fs_home=None, update=False,
+                           subjects_dir=None):
+    """Create an average brain subject for subjects without structural MRI
+
+    Create a copy of fsaverage from the Freesurfer directory in subjects_dir
+    and add auxiliary files from the mne package.
+
+    Parameters
+    ----------
+    mne_root : None | str
+        The mne root directory (only needed if MNE_ROOT is not specified as
+        environment variable).
+    fs_home : None | str
+        The freesurfer home directory (only needed if FREESURFER_HOME is not
+        specified as environment variable).
+    update : bool
+        In cases where a copy of the fsaverage brain already exists in the
+        subjects_dir, this option allows to only copy files that don't already
+        exist in the fsaverage directory.
+    subjects_dir : None | str
+        Override the SUBJECTS_DIR environment variable
+        (os.environ['SUBJECTS_DIR']) as destination for the new subject.
+
+    Notes
+    -----
+    When no structural MRI is available for a subject, an average brain can be
+    substituted. Freesurfer comes with such an average brain model, and MNE
+    comes with some auxiliary files which make coregistration easier.
+    :py:func:`create_default_subject` copies the relevant files from Freesurfer
+    into the current subjects_dir, and also adds the auxiliary files provided
+    by MNE.
+
+    The files provided by MNE are listed below and can be found under
+    ``share/mne/mne_analyze/fsaverage`` in the MNE directory (see MNE manual
+    section 7.19 Working with the average brain):
+
+    fsaverage_head.fif:
+        The approximate head surface triangulation for fsaverage.
+    fsaverage_inner_skull-bem.fif:
+        The approximate inner skull surface for fsaverage.
+    fsaverage-fiducials.fif:
+        The locations of the fiducial points (LPA, RPA, and nasion).
+    fsaverage-trans.fif:
+        Contains a default MEG-MRI coordinate transformation suitable for
+        fsaverage.
+    """
+    subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
+    if fs_home is None:
+        fs_home = get_config('FREESURFER_HOME', fs_home)
+        if fs_home is None:
+            raise ValueError(
+                "FREESURFER_HOME environment variable not found. Please "
+                "specify the fs_home parameter in your call to "
+                "create_default_subject().")
+    if mne_root is None:
+        mne_root = get_config('MNE_ROOT', mne_root)
+        if mne_root is None:
+            raise ValueError("MNE_ROOT environment variable not found. Please "
+                             "specify the mne_root parameter in your call to "
+                             "create_default_subject().")
+
+    # make sure freesurfer files exist
+    fs_src = os.path.join(fs_home, 'subjects', 'fsaverage')
+    if not os.path.exists(fs_src):
+        raise IOError('fsaverage not found at %r. Is fs_home specified '
+                      'correctly?' % fs_src)
+    for name in ('label', 'mri', 'surf'):
+        dirname = os.path.join(fs_src, name)
+        if not os.path.isdir(dirname):
+            raise IOError("Freesurfer fsaverage seems to be incomplete: No "
+                          "directory named %s found in %s" % (name, fs_src))
+
+    # make sure destination does not already exist
+    dest = os.path.join(subjects_dir, 'fsaverage')
+    if dest == fs_src:
+        raise IOError(
+            "Your subjects_dir points to the freesurfer subjects_dir (%r). "
+            "The default subject can not be created in the freesurfer "
+            "installation directory; please specify a different "
+            "subjects_dir." % subjects_dir)
+    elif (not update) and os.path.exists(dest):
+        raise IOError(
+            "Can not create fsaverage because %r already exists in "
+            "subjects_dir %r. Delete or rename the existing fsaverage "
+            "subject folder." % ('fsaverage', subjects_dir))
+
+    # make sure mne files exist
+    mne_fname = os.path.join(mne_root, 'share', 'mne', 'mne_analyze',
+                             'fsaverage', 'fsaverage-%s.fif')
+    mne_files = ('fiducials', 'head', 'inner_skull-bem', 'trans')
+    for name in mne_files:
+        fname = mne_fname % name
+        if not os.path.isfile(fname):
+            raise IOError("MNE fsaverage incomplete: %s file not found at "
+                          "%s" % (name, fname))
+
+    # copy fsaverage from freesurfer
+    logger.info("Copying fsaverage subject from freesurfer directory...")
+    if (not update) or not os.path.exists(dest):
+        shutil.copytree(fs_src, dest)
+        _make_writable_recursive(dest)
+
+    # add files from mne
+    dest_bem = os.path.join(dest, 'bem')
+    if not os.path.exists(dest_bem):
+        os.mkdir(dest_bem)
+    logger.info("Copying auxiliary fsaverage files from mne directory...")
+    dest_fname = os.path.join(dest_bem, 'fsaverage-%s.fif')
+    _make_writable_recursive(dest_bem)
+    for name in mne_files:
+        if not os.path.exists(dest_fname % name):
+            shutil.copy(mne_fname % name, dest_bem)
+
+
+def _decimate_points(pts, res=10):
+    """Decimate the number of points using a voxel grid
+
+    Create a voxel grid with a specified resolution and retain at most one
+    point per voxel. For each voxel, the point closest to its center is
+    retained.
+
+    Parameters
+    ----------
+    pts : array, shape (n_points, 3)
+        The points making up the head shape.
+    res : scalar
+        The resolution of the voxel space (side length of each voxel).
+
+    Returns
+    -------
+    pts : array, shape = (n_points, 3)
+        The decimated points.
+    """
+    from scipy.spatial.distance import cdist
+    pts = np.asarray(pts)
+
+    # find the bin edges for the voxel space
+    xmin, ymin, zmin = pts.min(0) - res / 2.
+    xmax, ymax, zmax = pts.max(0) + res
+    xax = np.arange(xmin, xmax, res)
+    yax = np.arange(ymin, ymax, res)
+    zax = np.arange(zmin, zmax, res)
+
+    # find voxels containing one or more point
+    H, _ = np.histogramdd(pts, bins=(xax, yax, zax), normed=False)
+
+    # for each voxel, select one point
+    X, Y, Z = pts.T
+    out = np.empty((np.sum(H > 0), 3))
+    for i, (xbin, ybin, zbin) in enumerate(zip(*np.nonzero(H))):
+        x = xax[xbin]
+        y = yax[ybin]
+        z = zax[zbin]
+        xi = np.logical_and(X >= x, X < x + res)
+        yi = np.logical_and(Y >= y, Y < y + res)
+        zi = np.logical_and(Z >= z, Z < z + res)
+        idx = np.logical_and(zi, np.logical_and(yi, xi))
+        ipts = pts[idx]
+
+        mid = np.array([x, y, z]) + res / 2.
+        dist = cdist(ipts, [mid])
+        i_min = np.argmin(dist)
+        ipt = ipts[i_min]
+        out[i] = ipt
+
+    return out
+
+
+def _trans_from_params(param_info, params):
+    """Convert transformation parameters into a transformation matrix
+
+    Parameters
+    ----------
+    param_info : tuple,  len = 3
+        Tuple describing the parameters in x (do_translate, do_rotate,
+        do_scale).
+    params : tuple
+        The transformation parameters.
+
+    Returns
+    -------
+    trans : array, shape = (4, 4)
+        Transformation matrix.
+    """
+    do_rotate, do_translate, do_scale = param_info
+    i = 0
+    trans = []
+
+    if do_rotate:
+        x, y, z = params[:3]
+        trans.append(rotation(x, y, z))
+        i += 3
+
+    if do_translate:
+        x, y, z = params[i:i + 3]
+        trans.insert(0, translation(x, y, z))
+        i += 3
+
+    if do_scale == 1:
+        s = params[i]
+        trans.append(scaling(s, s, s))
+    elif do_scale == 3:
+        x, y, z = params[i:i + 3]
+        trans.append(scaling(x, y, z))
+
+    trans = reduce(dot, trans)
+    return trans
+
+
+def fit_matched_points(src_pts, tgt_pts, rotate=True, translate=True,
+                       scale=False, tol=None, x0=None, out='trans'):
+    """Find a transform that minimizes the squared distance between two
+    matching sets of points.
+
+    Uses :func:`scipy.optimize.leastsq` to find a transformation involving
+    a combination of rotation, translation, and scaling (in that order).
+
+    Parameters
+    ----------
+    src_pts : array, shape = (n, 3)
+        Points to which the transform should be applied.
+    tgt_pts : array, shape = (n, 3)
+        Points to which src_pts should be fitted. Each point in tgt_pts should
+        correspond to the point in src_pts with the same index.
+    rotate : bool
+        Allow rotation of the ``src_pts``.
+    translate : bool
+        Allow translation of the ``src_pts``.
+    scale : bool
+        Number of scaling parameters. With False, points are not scaled. With
+        True, points are scaled by the same factor along all axes.
+    tol : scalar | None
+        The error tolerance. If the distance between any of the matched points
+        exceeds this value in the solution, a RuntimeError is raised. With
+        None, no error check is performed.
+    x0 : None | tuple
+        Initial values for the fit parameters.
+    out : 'params' | 'trans'
+        In what format to return the estimate: 'params' returns a tuple with
+        the fit parameters; 'trans' returns a transformation matrix of shape
+        (4, 4).
+
+
+    Returns
+    -------
+    One of the following, depending on the ``out`` parameter:
+
+    trans : array, shape = (4, 4)
+        Transformation that, if applied to src_pts, minimizes the squared
+        distance to tgt_pts.
+    params : array, shape = (n_params, )
+        A single tuple containing the translation, rotation and scaling
+        parameters in that order.
+    """
+    from scipy.optimize import leastsq
+    src_pts = np.atleast_2d(src_pts)
+    tgt_pts = np.atleast_2d(tgt_pts)
+    if src_pts.shape != tgt_pts.shape:
+        raise ValueError("src_pts and tgt_pts must have same shape (got "
+                         "{0}, {1})".format(src_pts.shape, tgt_pts.shape))
+
+    rotate = bool(rotate)
+    translate = bool(translate)
+    scale = int(scale)
+    if translate:
+        src_pts = np.hstack((src_pts, np.ones((len(src_pts), 1))))
+
+    param_info = (rotate, translate, scale)
+    if param_info == (True, False, 0):
+        def error(x):
+            rx, ry, rz = x
+            trans = rotation3d(rx, ry, rz)
+            est = dot(src_pts, trans.T)
+            return (tgt_pts - est).ravel()
+        if x0 is None:
+            x0 = (0, 0, 0)
+    elif param_info == (True, False, 1):
+        def error(x):
+            rx, ry, rz, s = x
+            trans = rotation3d(rx, ry, rz) * s
+            est = dot(src_pts, trans.T)
+            return (tgt_pts - est).ravel()
+        if x0 is None:
+            x0 = (0, 0, 0, 1)
+    elif param_info == (True, True, 0):
+        def error(x):
+            rx, ry, rz, tx, ty, tz = x
+            trans = dot(translation(tx, ty, tz), rotation(rx, ry, rz))
+            est = dot(src_pts, trans.T)
+            return (tgt_pts - est[:, :3]).ravel()
+        if x0 is None:
+            x0 = (0, 0, 0, 0, 0, 0)
+    elif param_info == (True, True, 1):
+        def error(x):
+            rx, ry, rz, tx, ty, tz, s = x
+            trans = reduce(dot, (translation(tx, ty, tz), rotation(rx, ry, rz),
+                                 scaling(s, s, s)))
+            est = dot(src_pts, trans.T)
+            return (tgt_pts - est[:, :3]).ravel()
+        if x0 is None:
+            x0 = (0, 0, 0, 0, 0, 0, 1)
+    else:
+        raise NotImplementedError(
+            "The specified parameter combination is not implemented: "
+            "rotate=%r, translate=%r, scale=%r" % param_info)
+
+    x, _, _, _, _ = leastsq(error, x0, full_output=True)
+
+    # re-create the final transformation matrix
+    if (tol is not None) or (out == 'trans'):
+        trans = _trans_from_params(param_info, x)
+
+    # assess the error of the solution
+    if tol is not None:
+        if not translate:
+            src_pts = np.hstack((src_pts, np.ones((len(src_pts), 1))))
+        est_pts = dot(src_pts, trans.T)[:, :3]
+        err = np.sqrt(np.sum((est_pts - tgt_pts) ** 2, axis=1))
+        if np.any(err > tol):
+            raise RuntimeError("Error exceeds tolerance. Error = %r" % err)
+
+    if out == 'params':
+        return x
+    elif out == 'trans':
+        return trans
+    else:
+        raise ValueError("Invalid out parameter: %r. Needs to be 'params' or "
+                         "'trans'." % out)
+
+
+def _point_cloud_error(src_pts, tgt_pts):
+    """Find the distance from each source point to its closest target point
+
+    Parameters
+    ----------
+    src_pts : array, shape = (n, 3)
+        Source points.
+    tgt_pts : array, shape = (m, 3)
+        Target points.
+
+    Returns
+    -------
+    dist : array, shape = (n, )
+        For each point in ``src_pts``, the distance to the closest point in
+        ``tgt_pts``.
+    """
+    from scipy.spatial.distance import cdist
+    Y = cdist(src_pts, tgt_pts, 'euclidean')
+    dist = Y.min(axis=1)
+    return dist
+
+
+def _point_cloud_error_balltree(src_pts, tgt_tree):
+    """Find the distance from each source point to its closest target point
+
+    Uses sklearn.neighbors.BallTree for greater efficiency
+
+    Parameters
+    ----------
+    src_pts : array, shape = (n, 3)
+        Source points.
+    tgt_tree : sklearn.neighbors.BallTree
+        BallTree of the target points.
+
+    Returns
+    -------
+    dist : array, shape = (n, )
+        For each point in ``src_pts``, the distance to the closest point in
+        ``tgt_pts``.
+    """
+    dist, _ = tgt_tree.query(src_pts)
+    return dist.ravel()
+
+
+def fit_point_cloud(src_pts, tgt_pts, rotate=True, translate=True,
+                    scale=0, x0=None, leastsq_args={}, out='params'):
+    """Find a transform that minimizes the squared distance from each source
+    point to its closest target point
+
+    Uses :func:`scipy.optimize.leastsq` to find a transformation involving
+    a combination of rotation, translation, and scaling (in that order).
+
+    Parameters
+    ----------
+    src_pts : array, shape = (n, 3)
+        Points to which the transform should be applied.
+    tgt_pts : array, shape = (m, 3)
+        Points to which src_pts should be fitted. Each point in tgt_pts should
+        correspond to the point in src_pts with the same index.
+    rotate : bool
+        Allow rotation of the ``src_pts``.
+    translate : bool
+        Allow translation of the ``src_pts``.
+    scale : 0 | 1 | 3
+        Number of scaling parameters. With 0, points are not scaled. With 1,
+        points are scaled by the same factor along all axes. With 3, points are
+        scaled by a separate factor along each axis.
+    x0 : None | tuple
+        Initial values for the fit parameters.
+    leastsq_args : dict
+        Additional parameters to submit to :func:`scipy.optimize.leastsq`.
+    out : 'params' | 'trans'
+        In what format to return the estimate: 'params' returns a tuple with
+        the fit parameters; 'trans' returns a transformation matrix of shape
+        (4, 4).
+
+    Returns
+    -------
+    x : array, shape = (n_params, )
+        Estimated parameters for the transformation.
+
+    Notes
+    -----
+    Assumes that the target points form a dense enough point cloud so that
+    the distance of each src_pt to the closest tgt_pt can be used as an
+    estimate of the distance of src_pt to tgt_pts.
+    """
+    from scipy.optimize import leastsq
+    kwargs = {'epsfcn': 0.01}
+    kwargs.update(leastsq_args)
+
+    # assert correct argument types
+    src_pts = np.atleast_2d(src_pts)
+    tgt_pts = np.atleast_2d(tgt_pts)
+    translate = bool(translate)
+    rotate = bool(rotate)
+    scale = int(scale)
+
+    if translate:
+        src_pts = np.hstack((src_pts, np.ones((len(src_pts), 1))))
+
+    try:
+        from sklearn.neighbors import BallTree
+        tgt_pts = BallTree(tgt_pts)
+        errfunc = _point_cloud_error_balltree
+    except ImportError:
+        warn("Sklearn could not be imported. Fitting points will be slower. "
+             "To improve performance, install the sklearn module.")
+        errfunc = _point_cloud_error
+
+    # for efficiency, define parameter specific error function
+    param_info = (rotate, translate, scale)
+    if param_info == (True, False, 0):
+        x0 = x0 or (0, 0, 0)
+
+        def error(x):
+            rx, ry, rz = x
+            trans = rotation3d(rx, ry, rz)
+            est = dot(src_pts, trans.T)
+            err = errfunc(est, tgt_pts)
+            return err
+    elif param_info == (True, False, 1):
+        x0 = x0 or (0, 0, 0, 1)
+
+        def error(x):
+            rx, ry, rz, s = x
+            trans = rotation3d(rx, ry, rz) * s
+            est = dot(src_pts, trans.T)
+            err = errfunc(est, tgt_pts)
+            return err
+    elif param_info == (True, False, 3):
+        x0 = x0 or (0, 0, 0, 1, 1, 1)
+
+        def error(x):
+            rx, ry, rz, sx, sy, sz = x
+            trans = rotation3d(rx, ry, rz) * [sx, sy, sz]
+            est = dot(src_pts, trans.T)
+            err = errfunc(est, tgt_pts)
+            return err
+    elif param_info == (True, True, 0):
+        x0 = x0 or (0, 0, 0, 0, 0, 0)
+
+        def error(x):
+            rx, ry, rz, tx, ty, tz = x
+            trans = dot(translation(tx, ty, tz), rotation(rx, ry, rz))
+            est = dot(src_pts, trans.T)
+            err = errfunc(est[:, :3], tgt_pts)
+            return err
+    else:
+        raise NotImplementedError(
+            "The specified parameter combination is not implemented: "
+            "rotate=%r, translate=%r, scale=%r" % param_info)
+
+    est, _, info, msg, _ = leastsq(error, x0, full_output=True, **kwargs)
+    logger.debug("fit_point_cloud leastsq (%i calls) info: %s", info['nfev'],
+                 msg)
+
+    if out == 'params':
+        return est
+    elif out == 'trans':
+        return _trans_from_params(param_info, est)
+    else:
+        raise ValueError("Invalid out parameter: %r. Needs to be 'params' or "
+                         "'trans'." % out)
+
+
+def _find_label_paths(subject='fsaverage', pattern=None, subjects_dir=None):
+    """Find paths to label files in a subject's label directory
+
+    Parameters
+    ----------
+    subject : str
+        Name of the mri subject.
+    pattern : str | None
+        Pattern for finding the labels relative to the label directory in the
+        MRI subject directory (e.g., "aparc/*.label" will find all labels
+        in the "subject/label/aparc" directory). With None, find all labels.
+    subjects_dir : None | str
+        Override the SUBJECTS_DIR environment variable
+        (sys.environ['SUBJECTS_DIR'])
+
+    Returns
+    ------
+    paths : list
+        List of paths relative to the subject's label directory
+    """
+    subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
+    subject_dir = os.path.join(subjects_dir, subject)
+    lbl_dir = os.path.join(subject_dir, 'label')
+
+    if pattern is None:
+        paths = []
+        for dirpath, _, filenames in os.walk(lbl_dir):
+            rel_dir = os.path.relpath(dirpath, lbl_dir)
+            for filename in fnmatch.filter(filenames, '*.label'):
+                path = os.path.join(rel_dir, filename)
+                paths.append(path)
+    else:
+        paths = [os.path.relpath(path, lbl_dir) for path in iglob(pattern)]
+
+    return paths
+
+
+def _find_mri_paths(subject='fsaverage', subjects_dir=None):
+    """Find all files of an mri relevant for source transformation
+
+    Parameters
+    ----------
+    subject : str
+        Name of the mri subject.
+    subjects_dir : None | str
+        Override the SUBJECTS_DIR environment variable
+        (sys.environ['SUBJECTS_DIR'])
+
+    Returns
+    -------
+    paths | dict
+        Dictionary whose keys are relevant file type names (str), and whose
+        values are lists of paths.
+    """
+    subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
+    paths = {}
+
+    # directories to create
+    paths['dirs'] = [bem_dirname, surf_dirname]
+
+    # surf/ files
+    paths['surf'] = surf = []
+    surf_fname = os.path.join(surf_dirname, '{name}')
+    surf_names = ('inflated', 'sphere', 'sphere.reg', 'white')
+    if os.getenv('_MNE_FEW_SURFACES', '') != 'true':  # for testing
+        surf_names = surf_names + (
+            'orig', 'orig_avg', 'inflated_avg', 'inflated_pre', 'pial',
+            'pial_avg', 'smoothwm', 'white_avg', 'sphere.reg.avg')
+    for name in surf_names:
+        for hemi in ('lh.', 'rh.'):
+            fname = pformat(surf_fname, name=hemi + name)
+            surf.append(fname)
+
+    # BEM files
+    paths['bem'] = bem = []
+    path = head_bem_fname.format(subjects_dir=subjects_dir, subject=subject)
+    if os.path.exists(path):
+        bem.append('head')
+    bem_pattern = pformat(bem_fname, subjects_dir=subjects_dir,
+                          subject=subject, name='*-bem')
+    re_pattern = pformat(bem_fname, subjects_dir=subjects_dir, subject=subject,
+                         name='(.+)')
+    for path in iglob(bem_pattern):
+        match = re.match(re_pattern, path)
+        name = match.group(1)
+        bem.append(name)
+
+    # fiducials
+    paths['fid'] = [fid_fname]
+
+    # duplicate curvature files
+    paths['duplicate'] = dup = []
+    path = os.path.join(surf_dirname, '{name}')
+    for name in ['lh.curv', 'rh.curv']:
+        fname = pformat(path, name=name)
+        dup.append(fname)
+
+    # check presence of required files
+    for ftype in ['surf', 'fid', 'duplicate']:
+        for fname in paths[ftype]:
+            path = fname.format(subjects_dir=subjects_dir, subject=subject)
+            path = os.path.realpath(path)
+            if not os.path.exists(path):
+                raise IOError("Required file not found: %r" % path)
+
+    # find source space files
+    paths['src'] = src = []
+    bem_dir = bem_dirname.format(subjects_dir=subjects_dir, subject=subject)
+    fnames = fnmatch.filter(os.listdir(bem_dir), '*-src.fif')
+    prefix = subject + '-'
+    for fname in fnames:
+        if fname.startswith(prefix):
+            fname = "{subject}-%s" % fname[len(prefix):]
+        path = os.path.join(bem_dirname, fname)
+        src.append(path)
+
+    return paths
+
+
+def _is_mri_subject(subject, subjects_dir=None):
+    """Check whether a directory in subjects_dir is an mri subject directory
+
+    Parameters
+    ----------
+    subject : str
+        Name of the potential subject/directory.
+    subjects_dir : None | str
+        Override the SUBJECTS_DIR environment variable.
+
+    Returns
+    -------
+    is_mri_subject : bool
+        Whether ``subject`` is an mri subject.
+    """
+    subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
+
+    fname = head_bem_fname.format(subjects_dir=subjects_dir, subject=subject)
+    if not os.path.exists(fname):
+        return False
+
+    return True
+
+
+def _mri_subject_has_bem(subject, subjects_dir=None):
+    """Check whether an mri subject has a file matching the bem pattern
+
+    Parameters
+    ----------
+    subject : str
+        Name of the subject.
+    subjects_dir : None | str
+        Override the SUBJECTS_DIR environment variable.
+
+    Returns
+    -------
+    has_bem_file : bool
+        Whether ``subject`` has a bem file.
+    """
+    subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
+    pattern = bem_fname.format(subjects_dir=subjects_dir, subject=subject,
+                               name='*-bem')
+    fnames = glob(pattern)
+    return bool(len(fnames))
+
+
+def read_mri_cfg(subject, subjects_dir=None):
+    """Read information from the cfg file of a scaled MRI brain
+
+    Parameters
+    ----------
+    subject : str
+        Name of the scaled MRI subject.
+    subjects_dir : None | str
+        Override the SUBJECTS_DIR environment variable.
+
+    Returns
+    -------
+    cfg : dict
+        Dictionary with entries from the MRI's cfg file.
+    """
+    subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
+    fname = os.path.join(subjects_dir, subject, 'MRI scaling parameters.cfg')
+
+    if not os.path.exists(fname):
+        raise IOError("%r does not seem to be a scaled mri subject: %r does "
+                      "not exist." % (subject, fname))
+
+    logger.info("Reading MRI cfg file %s" % fname)
+    config = configparser.RawConfigParser()
+    config.read(fname)
+    n_params = config.getint("MRI Scaling", 'n_params')
+    if n_params == 1:
+        scale = config.getfloat("MRI Scaling", 'scale')
+    elif n_params == 3:
+        scale_str = config.get("MRI Scaling", 'scale')
+        scale = np.array([float(s) for s in scale_str.split()])
+    else:
+        raise ValueError("Invalid n_params value in MRI cfg: %i" % n_params)
+
+    out = {'subject_from': config.get("MRI Scaling", 'subject_from'),
+           'n_params': n_params, 'scale': scale}
+    return out
+
+
+def _write_mri_config(fname, subject_from, subject_to, scale):
+    """Write the cfg file describing a scaled MRI subject
+
+    Parameters
+    ----------
+    fname : str
+        Target file.
+    subject_from : str
+        Name of the source MRI subject.
+    subject_to : str
+        Name of the scaled MRI subject.
+    scale : float | array_like, shape = (3,)
+        The scaling parameter.
+    """
+    scale = np.asarray(scale)
+    if np.isscalar(scale) or scale.shape == ():
+        n_params = 1
+    else:
+        n_params = 3
+
+    config = configparser.RawConfigParser()
+    config.add_section("MRI Scaling")
+    config.set("MRI Scaling", 'subject_from', subject_from)
+    config.set("MRI Scaling", 'subject_to', subject_to)
+    config.set("MRI Scaling", 'n_params', str(n_params))
+    if n_params == 1:
+        config.set("MRI Scaling", 'scale', str(scale))
+    else:
+        config.set("MRI Scaling", 'scale', ' '.join([str(s) for s in scale]))
+    config.set("MRI Scaling", 'version', '1')
+    with open(fname, 'w') as fid:
+        config.write(fid)
+
+
+def _scale_params(subject_to, subject_from, scale, subjects_dir):
+    subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
+    if (subject_from is None) != (scale is None):
+        raise TypeError("Need to provide either both subject_from and scale "
+                        "parameters, or neither.")
+
+    if subject_from is None:
+        cfg = read_mri_cfg(subject_to, subjects_dir)
+        subject_from = cfg['subject_from']
+        n_params = cfg['n_params']
+        scale = cfg['scale']
+    else:
+        scale = np.asarray(scale)
+        if scale.ndim == 0:
+            n_params = 1
+        elif scale.shape == (3,):
+            n_params = 3
+        else:
+            raise ValueError("Invalid shape for scale parameer. Need scalar "
+                             "or array of length 3. Got %s." % str(scale))
+
+    return subjects_dir, subject_from, n_params, scale
+
+
+def scale_bem(subject_to, bem_name, subject_from=None, scale=None,
+              subjects_dir=None):
+    """Scale a bem file
+
+    Parameters
+    ----------
+    subject_to : str
+        Name of the scaled MRI subject (the destination mri subject).
+    bem_name : str
+        Name of the bem file. For example, to scale
+        ``fsaverage-inner_skull-bem.fif``, the bem_name would be
+        "inner_skull-bem".
+    subject_from : None | str
+        The subject from which to read the source space. If None, subject_from
+        is read from subject_to's config file.
+    scale : None | float | array, shape = (3,)
+        Scaling factor. Has to be specified if subjects_from is specified,
+        otherwise it is read from subject_to's config file.
+    subjects_dir : None | str
+        Override the SUBJECTS_DIR environment variable.
+    """
+    subjects_dir, subject_from, _, scale = _scale_params(subject_to,
+                                                         subject_from, scale,
+                                                         subjects_dir)
+
+    src = bem_fname.format(subjects_dir=subjects_dir, subject=subject_from,
+                           name=bem_name)
+    dst = bem_fname.format(subjects_dir=subjects_dir, subject=subject_to,
+                           name=bem_name)
+
+    if os.path.exists(dst):
+        raise IOError("File alredy exists: %s" % dst)
+
+    surfs = read_bem_surfaces(src)
+    if len(surfs) != 1:
+        raise NotImplementedError("BEM file with more than one surface: %r"
+                                  % src)
+    surf0 = surfs[0]
+    surf0['rr'] = surf0['rr'] * scale
+    write_bem_surfaces(dst, surf0)
+
+
+def scale_labels(subject_to, pattern=None, overwrite=False, subject_from=None,
+                 scale=None, subjects_dir=None):
+    """Scale labels to match a brain that was previously created by scaling
+
+    Parameters
+    ----------
+    subject_to : str
+        Name of the scaled MRI subject (the destination brain).
+    pattern : str | None
+        Pattern for finding the labels relative to the label directory in the
+        MRI subject directory (e.g., "lh.BA3a.label" will scale
+        "fsaverage/label/lh.BA3a.label"; "aparc/\*.label" will find all labels
+        in the "fsaverage/label/aparc" directory). With None, scale all labels.
+    overwrite : bool
+        Overwrite any label file that already exists for subject_to (otherwise
+        existsing labels are skipped).
+    subject_from : None | str
+        Name of the original MRI subject (the brain that was scaled to create
+        subject_to). If None, the value is read from subject_to's cfg file.
+    scale : None | float | array_like, shape = (3,)
+        Scaling parameter. If None, the value is read from subject_to's cfg
+        file.
+    subjects_dir : None | str
+        Override the SUBJECTS_DIR environment variable.
+    """
+    # read parameters from cfg
+    if scale is None or subject_from is None:
+        cfg = read_mri_cfg(subject_to, subjects_dir)
+        if subject_from is None:
+            subject_from = cfg['subject_from']
+        if scale is None:
+            scale = cfg['scale']
+
+    # find labels
+    paths = _find_label_paths(subject_from, pattern, subjects_dir)
+    if not paths:
+        return
+
+    subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
+    src_root = os.path.join(subjects_dir, subject_from, 'label')
+    dst_root = os.path.join(subjects_dir, subject_to, 'label')
+
+    # scale labels
+    for fname in paths:
+        dst = os.path.join(dst_root, fname)
+        if not overwrite and os.path.exists(dst):
+            continue
+
+        dirname = os.path.dirname(dst)
+        if not os.path.exists(dirname):
+            os.makedirs(dirname)
+
+        src = os.path.join(src_root, fname)
+        l_old = read_label(src)
+        pos = l_old.pos * scale
+        l_new = Label(l_old.vertices, pos, l_old.values, l_old.hemi,
+                      l_old.comment, subject=subject_to)
+        l_new.save(dst)
+
+
+def scale_mri(subject_from, subject_to, scale, overwrite=False,
+              subjects_dir=None):
+    """Create a scaled copy of an MRI subject
+
+    Parameters
+    ----------
+    subject_from : str
+        Name of the subject providing the MRI.
+    subject_to : str
+        New subject name for which to save the scaled MRI.
+    scale : float | array_like, shape = (3,)
+        The scaling factor (one or 3 parameters).
+    overwrite : bool
+        If an MRI already exists for subject_to, overwrite it.
+    subjects_dir : None | str
+        Override the SUBJECTS_DIR environment variable.
+
+    See Also
+    --------
+    scale_labels : add labels to a scaled MRI
+    scale_source_space : add a source space to a scaled MRI
+    """
+    subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
+    paths = _find_mri_paths(subject_from, subjects_dir=subjects_dir)
+    scale = np.asarray(scale)
+
+    # make sure we have an empty target directory
+    dest = subject_dirname.format(subject=subject_to,
+                                  subjects_dir=subjects_dir)
+    if os.path.exists(dest):
+        if overwrite:
+            shutil.rmtree(dest)
+        else:
+            raise IOError("Subject directory for %s already exists: %r"
+                          % (subject_to, dest))
+
+    for dirname in paths['dirs']:
+        dir_ = dirname.format(subject=subject_to, subjects_dir=subjects_dir)
+        os.makedirs(dir_)
+
+    # save MRI scaling parameters
+    fname = os.path.join(dest, 'MRI scaling parameters.cfg')
+    _write_mri_config(fname, subject_from, subject_to, scale)
+
+    # surf files [in mm]
+    for fname in paths['surf']:
+        src = fname.format(subject=subject_from, subjects_dir=subjects_dir)
+        src = os.path.realpath(src)
+        dest = fname.format(subject=subject_to, subjects_dir=subjects_dir)
+        pts, tri = read_surface(src)
+        write_surface(dest, pts * scale, tri)
+
+    # BEM files [in m]
+    for bem_name in paths['bem']:
+        scale_bem(subject_to, bem_name, subject_from, scale, subjects_dir)
+
+    # fiducials [in m]
+    for fname in paths['fid']:
+        src = fname.format(subject=subject_from, subjects_dir=subjects_dir)
+        src = os.path.realpath(src)
+        pts, cframe = read_fiducials(src)
+        for pt in pts:
+            pt['r'] = pt['r'] * scale
+        dest = fname.format(subject=subject_to, subjects_dir=subjects_dir)
+        write_fiducials(dest, pts, cframe)
+
+    # duplicate files
+    for fname in paths['duplicate']:
+        src = fname.format(subject=subject_from, subjects_dir=subjects_dir)
+        dest = fname.format(subject=subject_to, subjects_dir=subjects_dir)
+        shutil.copyfile(src, dest)
+
+    # source spaces
+    for fname in paths['src']:
+        src_name = os.path.basename(fname)
+        scale_source_space(subject_to, src_name, subject_from, scale,
+                           subjects_dir)
+
+    # labels [in m]
+    scale_labels(subject_to, subject_from=subject_from, scale=scale,
+                 subjects_dir=subjects_dir)
+
+
+def scale_source_space(subject_to, src_name, subject_from=None, scale=None,
+                       subjects_dir=None, n_jobs=1):
+    """Scale a source space for an mri created with scale_mri()
+
+    Parameters
+    ----------
+    subject_to : str
+        Name of the scaled MRI subject (the destination mri subject).
+    src_name : str
+        Source space name. Can be a spacing parameter (e.g., ``'7'``,
+        ``'ico4'``, ``'oct6'``) or a file name of a source space file relative
+        to the bem directory; if the file name contains the subject name, it
+        should be indicated as "{subject}" in ``src_name`` (e.g.,
+        ``"{subject}-my_source_space-src.fif"``).
+    subject_from : None | str
+        The subject from which to read the source space. If None, subject_from
+        is read from subject_to's config file.
+    scale : None | float | array, shape = (3,)
+        Scaling factor. Has to be specified if subjects_from is specified,
+        otherwise it is read from subject_to's config file.
+    subjects_dir : None | str
+        Override the SUBJECTS_DIR environment variable.
+    n_jobs : int
+        Number of jobs to run in parallel if recomputing distances (only
+        applies if scale is an array of length 3, and will not use more cores
+        than there are source spaces).
+    """
+    subjects_dir, subject_from, n_params, scale = _scale_params(subject_to,
+                                                                subject_from,
+                                                                scale,
+                                                                subjects_dir)
+
+    # find the source space file names
+    if src_name.isdigit():
+        spacing = src_name  # spacing in mm
+        src_pattern = src_fname
+    else:
+        match = re.match("(oct|ico)-?(\d+)$", src_name)
+        if match:
+            spacing = '-'.join(match.groups())
+            src_pattern = src_fname
+        else:
+            spacing = None
+            src_pattern = os.path.join(bem_dirname, src_name)
+
+    src = src_pattern.format(subjects_dir=subjects_dir, subject=subject_from,
+                             spacing=spacing)
+    dst = src_pattern.format(subjects_dir=subjects_dir, subject=subject_to,
+                             spacing=spacing)
+
+    # prepare scaling parameters
+    if n_params == 1:
+        norm_scale = None
+    elif n_params == 3:
+        norm_scale = 1. / scale
+    else:
+        raise RuntimeError("Invalid n_params entry in MRI cfg file: %s"
+                           % str(n_params))
+
+    # read and scale the source space [in m]
+    sss = read_source_spaces(src)
+    logger.info("scaling source space %s:  %s -> %s", spacing, subject_from,
+                subject_to)
+    logger.info("Scale factor: %s", scale)
+    add_dist = False
+    for ss in sss:
+        ss['subject_his_id'] = subject_to
+        ss['rr'] *= scale
+
+        # distances and patch info
+        if norm_scale is None:
+            if ss['dist'] is not None:
+                ss['dist'] *= scale
+                ss['nearest_dist'] *= scale
+                ss['dist_limit'] *= scale
+        else:
+            nn = ss['nn']
+            nn *= norm_scale
+            norm = np.sqrt(np.sum(nn ** 2, 1))
+            nn /= norm[:, np.newaxis]
+            if ss['dist'] is not None:
+                add_dist = True
+
+    if add_dist:
+        logger.info("Recomputing distances, this might take a while")
+        dist_limit = np.asscalar(sss[0]['dist_limit'])
+        add_source_space_distances(sss, dist_limit, n_jobs)
+
+    write_source_spaces(dst, sss)
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/cov.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/cov.py
new file mode 100644
index 0000000..5fb6f17
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/cov.py
@@ -0,0 +1,1915 @@
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
+#          Denis A. Engemann <denis.engemann at gmail.com>
+#
+# License: BSD (3-clause)
+
+import copy as cp
+import os
+from math import floor, ceil, log
+import itertools as itt
+import warnings
+
+from copy import deepcopy
+
+import six
+from distutils.version import LooseVersion
+
+import numpy as np
+from scipy import linalg
+
+from .io.write import start_file, end_file
+from .io.proj import (make_projector, _proj_equal, activate_proj,
+                      _has_eeg_average_ref_proj)
+from .io import fiff_open
+from .io.pick import (pick_types, channel_indices_by_type, pick_channels_cov,
+                      pick_channels, pick_info, _picks_by_type)
+
+from .io.constants import FIFF
+from .io.meas_info import read_bad_channels
+from .io.proj import _read_proj, _write_proj
+from .io.tag import find_tag
+from .io.tree import dir_tree_find
+from .io.write import (start_block, end_block, write_int, write_name_list,
+                       write_double, write_float_matrix, write_string)
+from .defaults import _handle_default
+from .epochs import _is_good
+from .utils import (check_fname, logger, verbose, estimate_rank,
+                    _compute_row_norms, check_version, _time_mask)
+from .utils import deprecated
+
+from .externals.six.moves import zip
+from .externals.six import string_types
+
+
+def _check_covs_algebra(cov1, cov2):
+    if cov1.ch_names != cov2.ch_names:
+        raise ValueError('Both Covariance do not have the same list of '
+                         'channels.')
+    projs1 = [str(c) for c in cov1['projs']]
+    projs2 = [str(c) for c in cov1['projs']]
+    if projs1 != projs2:
+        raise ValueError('Both Covariance do not have the same list of '
+                         'SSP projections.')
+
+
+def _get_tslice(epochs, tmin, tmax):
+    """get the slice."""
+    tstart, tend = None, None
+    mask = _time_mask(epochs.times, tmin, tmax)
+    tstart = np.where(mask)[0][0] if tmin is not None else None
+    tend = np.where(mask)[0][-1] + 1 if tmax is not None else None
+    tslice = slice(tstart, tend, None)
+    return tslice
+
+
+class Covariance(dict):
+
+    """Noise covariance matrix.
+
+    .. warning:: This class should not be instantiated directly, but
+                 instead should be created using a covariance reading or
+                 computation function.
+
+    Parameters
+    ----------
+    data : array-like
+        The data.
+    names : list of str
+        Channel names.
+    bads : list of str
+        Bad channels.
+    projs : list
+        Projection vectors.
+    nfree : int
+        Degrees of freedom.
+    eig : array-like | None
+        Eigenvalues.
+    eigvec : array-like | None
+        Eigenvectors.
+    method : str | None
+        The method used to compute the covariance.
+    loglik : float
+        The log likelihood.
+
+    Attributes
+    ----------
+    data : array of shape (n_channels, n_channels)
+        The covariance.
+    ch_names : list of string
+        List of channels' names.
+    nfree : int
+        Number of degrees of freedom i.e. number of time points used.
+
+    See Also
+    --------
+    compute_covariance
+    compute_raw_covariance
+    make_ad_hoc_cov
+    read_cov
+    """
+
+    def __init__(self, data, names, bads, projs, nfree, eig=None, eigvec=None,
+                 method=None, loglik=None):
+        """Init of covariance."""
+        diag = True if data.ndim == 1 else False
+        self.update(data=data, dim=len(data), names=names, bads=bads,
+                    nfree=nfree, eig=eig, eigvec=eigvec, diag=diag,
+                    projs=projs, kind=FIFF.FIFFV_MNE_NOISE_COV)
+        if method is not None:
+            self['method'] = method
+        if loglik is not None:
+            self['loglik'] = loglik
+
+    @property
+    def data(self):
+        """Numpy array of Noise covariance matrix."""
+        return self['data']
+
+    @property
+    def ch_names(self):
+        """Channel names."""
+        return self['names']
+
+    @property
+    def nfree(self):
+        """Number of degrees of freedom."""
+        return self['nfree']
+
+    def save(self, fname):
+        """Save covariance matrix in a FIF file.
+
+        Parameters
+        ----------
+        fname : str
+            Output filename.
+        """
+        check_fname(fname, 'covariance', ('-cov.fif', '-cov.fif.gz'))
+
+        fid = start_file(fname)
+
+        try:
+            _write_cov(fid, self)
+        except Exception as inst:
+            fid.close()
+            os.remove(fname)
+            raise inst
+
+        end_file(fid)
+
+    def copy(self):
+        """Copy the Covariance object
+
+        Returns
+        -------
+        cov : instance of Covariance
+            The copied object.
+        """
+        return deepcopy(self)
+
+    def as_diag(self, copy=True):
+        """Set covariance to be processed as being diagonal.
+
+        Parameters
+        ----------
+        copy : bool
+            If True, return a modified copy of the covarince. If False,
+            the covariance is modified in place.
+
+        Returns
+        -------
+        cov : dict
+            The covariance.
+
+        Notes
+        -----
+        This function allows creation of inverse operators
+        equivalent to using the old "--diagnoise" mne option.
+        """
+        if self['diag'] is True:
+            return self.copy() if copy is True else self
+        if copy is True:
+            cov = cp.deepcopy(self)
+        else:
+            cov = self
+        cov['diag'] = True
+        cov['data'] = np.diag(cov['data'])
+        cov['eig'] = None
+        cov['eigvec'] = None
+        return cov
+
+    def __repr__(self):
+        if self.data.ndim == 2:
+            s = 'size : %s x %s' % self.data.shape
+        else:  # ndim == 1
+            s = 'diagonal : %s' % self.data.size
+        s += ", n_samples : %s" % self.nfree
+        s += ", data : %s" % self.data
+        return "<Covariance  |  %s>" % s
+
+    def __add__(self, cov):
+        """Add Covariance taking into account number of degrees of freedom."""
+        _check_covs_algebra(self, cov)
+        this_cov = cp.deepcopy(cov)
+        this_cov['data'] = (((this_cov['data'] * this_cov['nfree']) +
+                             (self['data'] * self['nfree'])) /
+                            (self['nfree'] + this_cov['nfree']))
+        this_cov['nfree'] += self['nfree']
+
+        this_cov['bads'] = list(set(this_cov['bads']).union(self['bads']))
+
+        return this_cov
+
+    def __iadd__(self, cov):
+        """Add Covariance taking into account number of degrees of freedom."""
+        _check_covs_algebra(self, cov)
+        self['data'][:] = (((self['data'] * self['nfree']) +
+                            (cov['data'] * cov['nfree'])) /
+                           (self['nfree'] + cov['nfree']))
+        self['nfree'] += cov['nfree']
+
+        self['bads'] = list(set(self['bads']).union(cov['bads']))
+
+        return self
+
+    @verbose
+    def plot(self, info, exclude=[], colorbar=True, proj=False, show_svd=True,
+             show=True, verbose=None):
+        """Plot Covariance data.
+
+        Parameters
+        ----------
+        info: dict
+            Measurement info.
+        exclude : list of string | str
+            List of channels to exclude. If empty do not exclude any channel.
+            If 'bads', exclude info['bads'].
+        colorbar : bool
+            Show colorbar or not.
+        proj : bool
+            Apply projections or not.
+        show_svd : bool
+            Plot also singular values of the noise covariance for each sensor
+            type. We show square roots ie. standard deviations.
+        show : bool
+            Call pyplot.show() as the end or not.
+        verbose : bool, str, int, or None
+            If not None, override default verbose level (see mne.verbose).
+
+        Returns
+        -------
+        fig_cov : instance of matplotlib.pyplot.Figure
+            The covariance plot.
+        fig_svd : instance of matplotlib.pyplot.Figure | None
+            The SVD spectra plot of the covariance.
+        """
+        from .viz.misc import plot_cov
+        return plot_cov(self, info, exclude, colorbar, proj, show_svd, show)
+
+
+###############################################################################
+# IO
+
+ at verbose
+def read_cov(fname, verbose=None):
+    """Read a noise covariance from a FIF file.
+
+    Parameters
+    ----------
+    fname : string
+        The name of file containing the covariance matrix. It should end with
+        -cov.fif or -cov.fif.gz.
+    verbose : bool, str, int, or None (default None)
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    cov : Covariance
+        The noise covariance matrix.
+
+    See Also
+    --------
+    write_cov, compute_covariance, compute_raw_covariance
+    """
+    check_fname(fname, 'covariance', ('-cov.fif', '-cov.fif.gz'))
+    f, tree = fiff_open(fname)[:2]
+    with f as fid:
+        return Covariance(**_read_cov(fid, tree, FIFF.FIFFV_MNE_NOISE_COV,
+                                      limited=True))
+
+
+###############################################################################
+# Estimate from data
+
+ at verbose
+def make_ad_hoc_cov(info, verbose=None):
+    """Create an ad hoc noise covariance.
+
+    Parameters
+    ----------
+    info : instance of mne.io.meas_info.Info
+        Measurement info.
+    verbose : bool, str, int, or None (default None)
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    cov : instance of Covariance
+        The ad hoc diagonal noise covariance for the M/EEG data channels.
+
+    Notes
+    -----
+    .. versionadded:: 0.9.0
+    """
+    info = pick_info(info, pick_types(info, meg=True, eeg=True, exclude=[]))
+    info._check_consistency()
+
+    # Standard deviations to be used
+    grad_std = 5e-13
+    mag_std = 20e-15
+    eeg_std = 0.2e-6
+    logger.info('Using standard noise values '
+                '(MEG grad : %6.1f fT/cm MEG mag : %6.1f fT EEG : %6.1f uV)'
+                % (1e13 * grad_std, 1e15 * mag_std, 1e6 * eeg_std))
+
+    data = np.zeros(len(info['ch_names']))
+    for meg, eeg, val in zip(('grad', 'mag', False), (False, False, True),
+                             (grad_std, mag_std, eeg_std)):
+        data[pick_types(info, meg=meg, eeg=eeg)] = val * val
+    return Covariance(data, info['ch_names'], info['bads'], info['projs'],
+                      nfree=0)
+
+
+def _check_n_samples(n_samples, n_chan):
+    """Check to see if there are enough samples for reliable cov calc."""
+    n_samples_min = 10 * (n_chan + 1) // 2
+    if n_samples <= 0:
+        raise ValueError('No samples found to compute the covariance matrix')
+    if n_samples < n_samples_min:
+        text = ('Too few samples (required : %d got : %d), covariance '
+                'estimate may be unreliable' % (n_samples_min, n_samples))
+        warnings.warn(text)
+        logger.warning(text)
+
+
+ at deprecated('"compute_raw_data_covariance" is deprecated and will be '
+            'removed in MNE-0.11. Please use compute_raw_covariance instead')
+ at verbose
+def compute_raw_data_covariance(raw, tmin=None, tmax=None, tstep=0.2,
+                                reject=None, flat=None, picks=None,
+                                verbose=None):
+    return compute_raw_covariance(raw, tmin, tmax, tstep,
+                                  reject, flat, picks, verbose)
+
+
+ at verbose
+def compute_raw_covariance(raw, tmin=None, tmax=None, tstep=0.2,
+                           reject=None, flat=None, picks=None,
+                           verbose=None):
+    """Estimate noise covariance matrix from a continuous segment of raw data.
+
+    It is typically useful to estimate a noise covariance
+    from empty room data or time intervals before starting
+    the stimulation.
+
+    Note: To speed up the computation you should consider preloading raw data
+    by setting preload=True when reading the Raw data.
+
+    Parameters
+    ----------
+    raw : instance of Raw
+        Raw data
+    tmin : float | None (default None)
+        Beginning of time interval in seconds
+    tmax : float | None (default None)
+        End of time interval in seconds
+    tstep : float (default 0.2)
+        Length of data chunks for artefact rejection in seconds.
+    reject : dict | None (default None)
+        Rejection parameters based on peak-to-peak amplitude.
+        Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'.
+        If reject is None then no rejection is done. Example::
+
+            reject = dict(grad=4000e-13, # T / m (gradiometers)
+                          mag=4e-12, # T (magnetometers)
+                          eeg=40e-6, # uV (EEG channels)
+                          eog=250e-6 # uV (EOG channels)
+                          )
+
+    flat : dict | None (default None)
+        Rejection parameters based on flatness of signal.
+        Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg', and values
+        are floats that set the minimum acceptable peak-to-peak amplitude.
+        If flat is None then no rejection is done.
+    picks : array-like of int | None (default None)
+        Indices of channels to include (if None, all channels
+        except bad channels are used).
+    verbose : bool | str | int | None (default None)
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    cov : instance of Covariance
+        Noise covariance matrix.
+
+    See Also
+    --------
+    compute_covariance : Estimate noise covariance matrix from epochs
+    """
+    sfreq = raw.info['sfreq']
+
+    # Convert to samples
+    start = 0 if tmin is None else int(floor(tmin * sfreq))
+    if tmax is None:
+        stop = int(raw.last_samp - raw.first_samp)
+    else:
+        stop = int(ceil(tmax * sfreq))
+    step = int(ceil(tstep * raw.info['sfreq']))
+
+    # don't exclude any bad channels, inverses expect all channels present
+    if picks is None:
+        picks = pick_types(raw.info, meg=True, eeg=True, eog=False,
+                           ref_meg=False, exclude=[])
+
+    data = 0
+    n_samples = 0
+    mu = 0
+
+    info = pick_info(raw.info, picks)
+    idx_by_type = channel_indices_by_type(info)
+
+    # Read data in chuncks
+    for first in range(start, stop, step):
+        last = first + step
+        if last >= stop:
+            last = stop
+        raw_segment, times = raw[picks, first:last]
+        if _is_good(raw_segment, info['ch_names'], idx_by_type, reject, flat,
+                    ignore_chs=info['bads']):
+            mu += raw_segment.sum(axis=1)
+            data += np.dot(raw_segment, raw_segment.T)
+            n_samples += raw_segment.shape[1]
+        else:
+            logger.info("Artefact detected in [%d, %d]" % (first, last))
+
+    _check_n_samples(n_samples, len(picks))
+    mu /= n_samples
+    data -= n_samples * mu[:, None] * mu[None, :]
+    data /= (n_samples - 1.0)
+    logger.info("Number of samples used : %d" % n_samples)
+    logger.info('[done]')
+
+    ch_names = [raw.info['ch_names'][k] for k in picks]
+    bads = [b for b in raw.info['bads'] if b in ch_names]
+    projs = cp.deepcopy(raw.info['projs'])
+    # XXX : do not compute eig and eigvec now (think it's better...)
+    return Covariance(data, ch_names, bads, projs, nfree=n_samples)
+
+
+ at verbose
+def compute_covariance(epochs, keep_sample_mean=True, tmin=None, tmax=None,
+                       projs=None, method='empirical', method_params=None,
+                       cv=3, scalings=None, n_jobs=1, return_estimators=False,
+                       verbose=None):
+    """Estimate noise covariance matrix from epochs.
+
+    The noise covariance is typically estimated on pre-stim periods
+    when the stim onset is defined from events.
+
+    If the covariance is computed for multiple event types (events
+    with different IDs), the following two options can be used and combined.
+    A) either an Epochs object for each event type is created and
+    a list of Epochs is passed to this function.
+    B) an Epochs object is created for multiple events and passed
+    to this function.
+
+    Note: Baseline correction should be used when creating the Epochs.
+          Otherwise the computed covariance matrix will be inaccurate.
+
+    Note: For multiple event types, it is also possible to create a
+          single Epochs object with events obtained using
+          merge_events(). However, the resulting covariance matrix
+          will only be correct if keep_sample_mean is True.
+
+    Note: The covariance can be unstable if the number of samples is not
+          sufficient. In that case it is common to regularize a covariance
+          estimate. The ``method`` parameter of this function allows to
+          regularize the covariance in an automated way. It also allows
+          to select between different alternative estimation algorithms which
+          themselves achieve regularization. Details are described in [1].
+
+    Parameters
+    ----------
+    epochs : instance of Epochs, or a list of Epochs objects
+        The epochs.
+    keep_sample_mean : bool (default true)
+        If False, the average response over epochs is computed for
+        each event type and subtracted during the covariance
+        computation. This is useful if the evoked response from a
+        previous stimulus extends into the baseline period of the next.
+        Note. This option is only implemented for method='empirical'.
+    tmin : float | None (default None)
+        Start time for baseline. If None start at first sample.
+    tmax : float | None (default None)
+        End time for baseline. If None end at last sample.
+    projs : list of Projection | None (default None)
+        List of projectors to use in covariance calculation, or None
+        to indicate that the projectors from the epochs should be
+        inherited. If None, then projectors from all epochs must match.
+    method : str | list | None (default 'empirical')
+        The method used for covariance estimation. If 'empirical' (default),
+        the sample covariance will be computed. A list can be passed to run a
+        set of the different methods.
+        If 'auto' or a list of methods, the best estimator will be determined
+        based on log-likelihood and cross-validation on unseen data as
+        described in ref. [1]. Valid methods are:
+        'empirical', the empirical or sample covariance,
+        'diagonal_fixed', a diagonal regularization as in mne.cov.regularize
+        (see MNE manual), 'ledoit_wolf', the Ledoit-Wolf estimator (see [2]),
+        'shrunk' like 'ledoit_wolf' with cross-validation for optimal alpha
+        (see scikit-learn documentation on covariance estimation), 'pca',
+        probabilistic PCA with low rank
+        (see [3]), and, 'factor_analysis', Factor Analysis with low rank
+        (see [4]). If 'auto', expands to::
+
+             ['shrunk', 'diagonal_fixed', 'empirical', 'factor_analysis']
+
+        Note. 'ledoit_wolf' and 'pca' are similar to 'shrunk' and
+        'factor_analysis', respectively. They are not included to avoid
+        redundancy. In most cases 'shrunk' and 'factor_analysis' represent
+        more appropriate default choices.
+
+        .. versionadded:: 0.9.0
+
+    method_params : dict | None (default None)
+        Additional parameters to the estimation procedure. Only considered if
+        method is not None. Keys must correspond to the value(s) of `method`.
+        If None (default), expands to::
+
+            'empirical': {'store_precision': False, 'assume_centered': True},
+            'diagonal_fixed': {'grad': 0.01, 'mag': 0.01, 'eeg': 0.0,
+                               'store_precision': False,
+                               'assume_centered': True},
+            'ledoit_wolf': {'store_precision': False, 'assume_centered': True},
+            'shrunk': {'shrinkage': np.logspace(-4, 0, 30),
+                       'store_precision': False, 'assume_centered': True},
+            'pca': {'iter_n_components': None},
+            'factor_analysis': {'iter_n_components': None}
+
+    cv : int | sklearn cross_validation object (default 3)
+        The cross validation method. Defaults to 3, which will
+        internally trigger a default 3-fold shuffle split.
+    scalings : dict | None (default None)
+        Defaults to ``dict(mag=1e15, grad=1e13, eeg=1e6)``.
+        These defaults will scale magnetometers and gradiometers
+        at the same unit.
+    n_jobs : int (default 1)
+        Number of jobs to run in parallel.
+    return_estimators : bool (default False)
+        Whether to return all estimators or the best. Only considered if
+        method equals 'auto' or is a list of str. Defaults to False
+    verbose : bool | str | int | or None (default None)
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    cov : instance of Covariance | list
+        The computed covariance. If method equals 'auto' or is a list of str
+        and return_estimators equals True, a list of covariance estimators is
+        returned (sorted by log-likelihood, from high to low, i.e. from best
+        to worst).
+
+    See Also
+    --------
+    compute_raw_covariance : Estimate noise covariance from raw data
+
+    References
+    ----------
+    [1] Engemann D. and Gramfort A. (2015) Automated model selection in
+        covariance estimation and spatial whitening of MEG and EEG signals,
+        vol. 108, 328-342, NeuroImage.
+    [2] Ledoit, O., Wolf, M., (2004). A well-conditioned estimator for
+        large-dimensional covariance matrices. Journal of Multivariate
+        Analysis 88 (2), 365 - 411.
+    [3] Tipping, M. E., Bishop, C. M., (1999). Probabilistic principal
+        component analysis. Journal of the Royal Statistical Society: Series
+        B (Statistical Methodology) 61 (3), 611 - 622.
+    [4] Barber, D., (2012). Bayesian reasoning and machine learning.
+        Cambridge University Press., Algorithm 21.1
+    """
+    accepted_methods = ('auto', 'empirical', 'diagonal_fixed', 'ledoit_wolf',
+                        'shrunk', 'pca', 'factor_analysis',)
+    msg = ('Invalid method ({method}). Accepted values (individually or '
+           'in a list) are "%s"' % '" or "'.join(accepted_methods + ('None',)))
+
+    # scale to natural unit for best stability with MEG/EEG
+    if isinstance(scalings, dict):
+        for k, v in scalings.items():
+            if k not in ('mag', 'grad', 'eeg'):
+                raise ValueError('The keys in `scalings` must be "mag" or'
+                                 '"grad" or "eeg". You gave me: %s' % k)
+    scalings = _handle_default('scalings', scalings)
+
+    _method_params = {
+        'empirical': {'store_precision': False, 'assume_centered': True},
+        'diagonal_fixed': {'grad': 0.01, 'mag': 0.01, 'eeg': 0.0,
+                           'store_precision': False, 'assume_centered': True},
+        'ledoit_wolf': {'store_precision': False, 'assume_centered': True},
+        'shrunk': {'shrinkage': np.logspace(-4, 0, 30),
+                   'store_precision': False, 'assume_centered': True},
+        'pca': {'iter_n_components': None},
+        'factor_analysis': {'iter_n_components': None}
+    }
+    if isinstance(method_params, dict):
+        for key, values in method_params.items():
+            if key not in _method_params:
+                raise ValueError('key (%s) must be "%s"' %
+                                 (key, '" or "'.join(_method_params)))
+
+            _method_params[key].update(method_params[key])
+
+    # for multi condition support epochs is required to refer to a list of
+    # epochs objects
+
+    def _unpack_epochs(epochs):
+        if len(epochs.event_id) > 1:
+            epochs = [epochs[k] for k in epochs.event_id]
+        else:
+            epochs = [epochs]
+        return epochs
+
+    if not isinstance(epochs, list):
+        epochs = _unpack_epochs(epochs)
+    else:
+        epochs = sum([_unpack_epochs(epoch) for epoch in epochs], [])
+
+    # check for baseline correction
+    for epochs_t in epochs:
+        if epochs_t.baseline is None and epochs_t.info['highpass'] < 0.5:
+            warnings.warn('Epochs are not baseline corrected, covariance '
+                          'matrix may be inaccurate')
+
+    for epoch in epochs:
+        epoch.info._check_consistency()
+    bads = epochs[0].info['bads']
+    if projs is None:
+        projs = cp.deepcopy(epochs[0].info['projs'])
+        # make sure Epochs are compatible
+        for epochs_t in epochs[1:]:
+            if epochs_t.proj != epochs[0].proj:
+                raise ValueError('Epochs must agree on the use of projections')
+            for proj_a, proj_b in zip(epochs_t.info['projs'], projs):
+                if not _proj_equal(proj_a, proj_b):
+                    raise ValueError('Epochs must have same projectors')
+    else:
+        projs = cp.deepcopy(projs)
+    ch_names = epochs[0].ch_names
+
+    # make sure Epochs are compatible
+    for epochs_t in epochs[1:]:
+        if epochs_t.info['bads'] != bads:
+            raise ValueError('Epochs must have same bad channels')
+        if epochs_t.ch_names != ch_names:
+            raise ValueError('Epochs must have same channel names')
+    picks_list = _picks_by_type(epochs[0].info)
+    picks_meeg = np.concatenate([b for _, b in picks_list])
+    picks_meeg = np.sort(picks_meeg)
+    ch_names = [epochs[0].ch_names[k] for k in picks_meeg]
+    info = epochs[0].info  # we will overwrite 'epochs'
+
+    if method == 'auto':
+        method = ['shrunk', 'diagonal_fixed', 'empirical', 'factor_analysis']
+
+    if not isinstance(method, (list, tuple)):
+        method = [method]
+
+    ok_sklearn = check_version('sklearn', '0.15') is True
+    if not ok_sklearn and (len(method) != 1 or method[0] != 'empirical'):
+        raise ValueError('scikit-learn is not installed, `method` must be '
+                         '`empirical`')
+
+    if keep_sample_mean is False:
+        if len(method) != 1 or 'empirical' not in method:
+            raise ValueError('`keep_sample_mean=False` is only supported'
+                             'with `method="empirical"`')
+        for p, v in _method_params.items():
+            if v.get('assume_centered', None) is False:
+                raise ValueError('`assume_centered` must be True'
+                                 ' if `keep_sample_mean` is False')
+        # prepare mean covs
+        n_epoch_types = len(epochs)
+        data_mean = list(np.zeros(n_epoch_types))
+        n_samples = np.zeros(n_epoch_types, dtype=np.int)
+        n_epochs = np.zeros(n_epoch_types, dtype=np.int)
+
+        for ii, epochs_t in enumerate(epochs):
+
+            tslice = _get_tslice(epochs_t, tmin, tmax)
+            for e in epochs_t:
+                e = e[picks_meeg, tslice]
+                if not keep_sample_mean:
+                    data_mean[ii] += e
+                n_samples[ii] += e.shape[1]
+                n_epochs[ii] += 1
+
+        n_samples_epoch = n_samples // n_epochs
+        norm_const = np.sum(n_samples_epoch * (n_epochs - 1))
+        data_mean = [1.0 / n_epoch * np.dot(mean, mean.T) for n_epoch, mean
+                     in zip(n_epochs, data_mean)]
+
+    if not all(k in accepted_methods for k in method):
+        raise ValueError(msg.format(method=method))
+
+    info = pick_info(info, picks_meeg)
+    tslice = _get_tslice(epochs[0], tmin, tmax)
+    epochs = [ee.get_data()[:, picks_meeg, tslice] for ee in epochs]
+    picks_meeg = np.arange(len(picks_meeg))
+    picks_list = _picks_by_type(info)
+
+    if len(epochs) > 1:
+        epochs = np.concatenate(epochs, 0)
+    else:
+        epochs = epochs[0]
+
+    epochs = np.hstack(epochs)
+    n_samples_tot = epochs.shape[-1]
+    _check_n_samples(n_samples_tot, len(picks_meeg))
+
+    epochs = epochs.T  # sklearn | C-order
+    if ok_sklearn:
+        cov_data = _compute_covariance_auto(epochs, method=method,
+                                            method_params=_method_params,
+                                            info=info,
+                                            verbose=verbose,
+                                            cv=cv,
+                                            n_jobs=n_jobs,
+                                            # XXX expose later
+                                            stop_early=True,  # if needed.
+                                            picks_list=picks_list,
+                                            scalings=scalings)
+    else:
+        if _method_params['empirical']['assume_centered'] is True:
+            cov = epochs.T.dot(epochs) / n_samples_tot
+        else:
+            cov = np.cov(epochs.T, bias=1)
+        cov_data = {'empirical': {'data': cov}}
+
+    if keep_sample_mean is False:
+        cov = cov_data['empirical']['data']
+        # undo scaling
+        cov *= n_samples_tot
+        # ... apply pre-computed class-wise normalization
+        for mean_cov in data_mean:
+            cov -= mean_cov
+        cov /= norm_const
+
+    covs = list()
+    for this_method, data in cov_data.items():
+        cov = Covariance(data.pop('data'), ch_names, info['bads'], projs,
+                         nfree=n_samples_tot)
+        logger.info('Number of samples used : %d' % n_samples_tot)
+        logger.info('[done]')
+
+        # add extra info
+        cov.update(method=this_method, **data)
+        covs.append(cov)
+
+    if ok_sklearn:
+        msg = ['log-likelihood on unseen data (descending order):']
+        logliks = [(c['method'], c['loglik']) for c in covs]
+        logliks.sort(reverse=True, key=lambda c: c[1])
+        for k, v in logliks:
+            msg.append('%s: %0.3f' % (k, v))
+        logger.info('\n   '.join(msg))
+
+    if ok_sklearn and not return_estimators:
+        keys, scores = zip(*[(c['method'], c['loglik']) for c in covs])
+        out = covs[np.argmax(scores)]
+        logger.info('selecting best estimator: {0}'.format(out['method']))
+    elif ok_sklearn:
+        out = covs
+        out.sort(key=lambda c: c['loglik'], reverse=True)
+    else:
+        out = covs[0]
+
+    return out
+
+
+def _compute_covariance_auto(data, method, info, method_params, cv,
+                             scalings, n_jobs, stop_early, picks_list,
+                             verbose):
+    """docstring for _compute_covariance_auto."""
+    from sklearn.grid_search import GridSearchCV
+    from sklearn.covariance import (LedoitWolf, ShrunkCovariance,
+                                    EmpiricalCovariance)
+
+    # rescale to improve numerical stability
+    _apply_scaling_array(data.T, picks_list=picks_list, scalings=scalings)
+    estimator_cov_info = list()
+    msg = 'Estimating covariance using %s'
+    _RegCovariance, _ShrunkCovariance = _get_covariance_classes()
+    for this_method in method:
+        data_ = data.copy()
+        name = this_method.__name__ if callable(this_method) else this_method
+        logger.info(msg % name.upper())
+
+        if this_method == 'empirical':
+            est = EmpiricalCovariance(**method_params[this_method])
+            est.fit(data_)
+            _info = None
+            estimator_cov_info.append((est, est.covariance_, _info))
+
+        elif this_method == 'diagonal_fixed':
+            est = _RegCovariance(info=info, **method_params[this_method])
+            est.fit(data_)
+            _info = None
+            estimator_cov_info.append((est, est.covariance_, _info))
+
+        elif this_method == 'ledoit_wolf':
+            shrinkages = []
+            lw = LedoitWolf(**method_params[this_method])
+
+            for ch_type, picks in picks_list:
+                lw.fit(data_[:, picks])
+                shrinkages.append((
+                    ch_type,
+                    lw.shrinkage_,
+                    picks
+                ))
+            sc = _ShrunkCovariance(shrinkage=shrinkages,
+                                   **method_params[this_method])
+            sc.fit(data_)
+            _info = None
+            estimator_cov_info.append((sc, sc.covariance_, _info))
+
+        elif this_method == 'shrunk':
+            shrinkage = method_params[this_method].pop('shrinkage')
+            tuned_parameters = [{'shrinkage': shrinkage}]
+            shrinkages = []
+            gs = GridSearchCV(ShrunkCovariance(**method_params[this_method]),
+                              tuned_parameters, cv=cv)
+            for ch_type, picks in picks_list:
+                gs.fit(data_[:, picks])
+                shrinkages.append((
+                    ch_type,
+                    gs.best_estimator_.shrinkage,
+                    picks
+                ))
+            shrinkages = [c[0] for c in zip(shrinkages)]
+            sc = _ShrunkCovariance(shrinkage=shrinkages,
+                                   **method_params[this_method])
+            sc.fit(data_)
+            _info = None
+            estimator_cov_info.append((sc, sc.covariance_, _info))
+
+        elif this_method == 'pca':
+            mp = method_params[this_method]
+            pca, _info = _auto_low_rank_model(data_, this_method,
+                                              n_jobs=n_jobs,
+                                              method_params=mp, cv=cv,
+                                              stop_early=stop_early)
+            pca.fit(data_)
+            estimator_cov_info.append((pca, pca.get_covariance(), _info))
+
+        elif this_method == 'factor_analysis':
+            mp = method_params[this_method]
+            fa, _info = _auto_low_rank_model(data_, this_method, n_jobs=n_jobs,
+                                             method_params=mp, cv=cv,
+                                             stop_early=stop_early)
+            fa.fit(data_)
+            estimator_cov_info.append((fa, fa.get_covariance(), _info))
+        else:
+            raise ValueError('Oh no! Your estimator does not have'
+                             ' a .fit method')
+        logger.info('Done.')
+
+    logger.info('Using cross-validation to select the best estimator.')
+    estimators, _, _ = zip(*estimator_cov_info)
+    logliks = np.array([_cross_val(data, e, cv, n_jobs) for e in estimators])
+
+    # undo scaling
+    for c in estimator_cov_info:
+        _undo_scaling_cov(c[1], picks_list, scalings)
+
+    out = dict()
+    estimators, covs, runtime_infos = zip(*estimator_cov_info)
+    cov_methods = [c.__name__ if callable(c) else c for c in method]
+    runtime_infos, covs = list(runtime_infos), list(covs)
+    my_zip = zip(cov_methods, runtime_infos, logliks, covs, estimators)
+    for this_method, runtime_info, loglik, data, est in my_zip:
+        out[this_method] = {'loglik': loglik, 'data': data, 'estimator': est}
+        if runtime_info is not None:
+            out[this_method].update(runtime_info)
+
+    return out
+
+
+def _logdet(A):
+    """Compute the log det of a symmetric matrix."""
+    vals = linalg.eigh(A)[0]
+    vals = np.abs(vals)  # avoid negative values (numerical errors)
+    return np.sum(np.log(vals))
+
+
+def _gaussian_loglik_scorer(est, X, y=None):
+    """Compute the Gaussian log likelihood of X under the model in est."""
+    # compute empirical covariance of the test set
+    precision = est.get_precision()
+    n_samples, n_features = X.shape
+    log_like = np.zeros(n_samples)
+    log_like = -.5 * (X * (np.dot(X, precision))).sum(axis=1)
+    log_like -= .5 * (n_features * log(2. * np.pi) - _logdet(precision))
+    out = np.mean(log_like)
+    return out
+
+
+def _cross_val(data, est, cv, n_jobs):
+    """Helper to compute cross validation."""
+    from sklearn.cross_validation import cross_val_score
+    return np.mean(cross_val_score(est, data, cv=cv, n_jobs=n_jobs,
+                                   scoring=_gaussian_loglik_scorer))
+
+
+def _auto_low_rank_model(data, mode, n_jobs, method_params, cv,
+                         stop_early=True, verbose=None):
+    """compute latent variable models."""
+    method_params = cp.deepcopy(method_params)
+    iter_n_components = method_params.pop('iter_n_components')
+    if iter_n_components is None:
+        iter_n_components = np.arange(5, data.shape[1], 5)
+    from sklearn.decomposition import PCA, FactorAnalysis
+    if mode == 'factor_analysis':
+        est = FactorAnalysis
+    elif mode == 'pca':
+        est = PCA
+    else:
+        raise ValueError('Come on, this is not a low rank estimator: %s' %
+                         mode)
+    est = est(**method_params)
+    est.n_components = 1
+    scores = np.empty_like(iter_n_components, dtype=np.float64)
+    scores.fill(np.nan)
+
+    # make sure we don't empty the thing if it's a generator
+    max_n = max(list(cp.deepcopy(iter_n_components)))
+    if max_n > data.shape[1]:
+        warnings.warn('You are trying to estimate %i components on matrix '
+                      'with %i features.' % (max_n, data.shape[1]))
+
+    for ii, n in enumerate(iter_n_components):
+        est.n_components = n
+        try:  # this may fail depending on rank and split
+            score = _cross_val(data=data, est=est, cv=cv, n_jobs=n_jobs)
+        except ValueError:
+            score = np.inf
+        if np.isinf(score) or score > 0:
+            logger.info('... infinite values encountered. stopping estimation')
+            break
+        logger.info('... rank: %i - loglik: %0.3f' % (n, score))
+        if score != -np.inf:
+            scores[ii] = score
+
+        if (ii >= 3 and np.all(np.diff(scores[ii - 3:ii]) < 0.) and
+           stop_early is True):
+            # early stop search when loglik has been going down 3 times
+            logger.info('early stopping parameter search.')
+            break
+
+    # happens if rank is too low right form the beginning
+    if np.isnan(scores).all():
+        raise RuntimeError('Oh no! Could not estimate covariance because all '
+                           'scores were NaN. Please contact the MNE-Python '
+                           'developers.')
+
+    i_score = np.nanargmax(scores)
+    best = est.n_components = iter_n_components[i_score]
+    logger.info('... best model at rank = %i' % best)
+    runtime_info = {'ranks': np.array(iter_n_components),
+                    'scores': scores,
+                    'best': best,
+                    'cv': cv}
+    return est, runtime_info
+
+
+def _get_covariance_classes():
+    """Prepare special cov estimators."""
+    from sklearn.covariance import (EmpiricalCovariance, shrunk_covariance,
+                                    ShrunkCovariance)
+
+    class _RegCovariance(EmpiricalCovariance):
+
+        """Aux class."""
+
+        def __init__(self, info, grad=0.01, mag=0.01, eeg=0.0,
+                     store_precision=False, assume_centered=False):
+            self.info = info
+            self.grad = grad
+            self.mag = mag
+            self.eeg = eeg
+            self.store_precision = store_precision
+            self.assume_centered = assume_centered
+
+        def fit(self, X):
+            EmpiricalCovariance.fit(self, X)
+            self.covariance_ = 0.5 * (self.covariance_ + self.covariance_.T)
+            cov_ = Covariance(
+                data=self.covariance_, names=self.info['ch_names'],
+                bads=self.info['bads'], projs=self.info['projs'],
+                nfree=len(self.covariance_))
+            cov_ = regularize(cov_, self.info, grad=self.grad, mag=self.mag,
+                              eeg=self.eeg, proj=False,
+                              exclude='bads')  # ~proj == important!!
+            self.covariance_ = cov_.data
+            return self
+
+    class _ShrunkCovariance(ShrunkCovariance):
+
+        """Aux class."""
+
+        def __init__(self, store_precision, assume_centered, shrinkage=0.1):
+            self.store_precision = store_precision
+            self.assume_centered = assume_centered
+            self.shrinkage = shrinkage
+
+        def fit(self, X):
+            EmpiricalCovariance.fit(self, X)
+            cov = self.covariance_
+
+            if not isinstance(self.shrinkage, (list, tuple)):
+                shrinkage = [('all', self.shrinkage, np.arange(len(cov)))]
+            else:
+                shrinkage = self.shrinkage
+
+            zero_cross_cov = np.zeros_like(cov, dtype=bool)
+            for a, b in itt.combinations(shrinkage, 2):
+                picks_i, picks_j = a[2], b[2]
+                ch_ = a[0], b[0]
+                if 'eeg' in ch_:
+                    zero_cross_cov[np.ix_(picks_i, picks_j)] = True
+                    zero_cross_cov[np.ix_(picks_j, picks_i)] = True
+
+            self.zero_cross_cov_ = zero_cross_cov
+
+            # Apply shrinkage to blocks
+            for ch_type, c, picks in shrinkage:
+                sub_cov = cov[np.ix_(picks, picks)]
+                cov[np.ix_(picks, picks)] = shrunk_covariance(sub_cov,
+                                                              shrinkage=c)
+
+            # Apply shrinkage to cross-cov
+            for a, b in itt.combinations(shrinkage, 2):
+                shrinkage_i, shrinkage_j = a[1], b[1]
+                picks_i, picks_j = a[2], b[2]
+                c_ij = np.sqrt((1. - shrinkage_i) * (1. - shrinkage_j))
+                cov[np.ix_(picks_i, picks_j)] *= c_ij
+                cov[np.ix_(picks_j, picks_i)] *= c_ij
+
+            # Set to zero the necessary cross-cov
+            if np.any(zero_cross_cov):
+                cov[zero_cross_cov] = 0.0
+
+            self.covariance_ = cov
+            return self
+
+        def score(self, X_test, y=None):
+            """Compute the log-likelihood of a Gaussian data set with
+            `self.covariance_` as an estimator of its covariance matrix.
+
+            Parameters
+            ----------
+            X_test : array-like, shape = [n_samples, n_features]
+                Test data of which we compute the likelihood, where n_samples
+                is the number of samples and n_features is the number of
+                features. X_test is assumed to be drawn from the same
+                distribution as the data used in fit (including centering).
+
+            y : not used, present for API consistence purpose.
+
+            Returns
+            -------
+            res : float
+                The likelihood of the data set with `self.covariance_` as an
+                estimator of its covariance matrix.
+            """
+            from sklearn.covariance import empirical_covariance, log_likelihood
+            # compute empirical covariance of the test set
+            test_cov = empirical_covariance(X_test - self.location_,
+                                            assume_centered=True)
+            if np.any(self.zero_cross_cov_):
+                test_cov[self.zero_cross_cov_] = 0.
+            res = log_likelihood(test_cov, self.get_precision())
+            return res
+
+    return _RegCovariance, _ShrunkCovariance
+
+
+###############################################################################
+# Writing
+
+def write_cov(fname, cov):
+    """Write a noise covariance matrix.
+
+    Parameters
+    ----------
+    fname : string
+        The name of the file. It should end with -cov.fif or -cov.fif.gz.
+    cov : Covariance
+        The noise covariance matrix
+
+    See Also
+    --------
+    read_cov
+    """
+    cov.save(fname)
+
+
+###############################################################################
+# Prepare for inverse modeling
+
+def _unpack_epochs(epochs):
+    """Aux Function."""
+    if len(epochs.event_id) > 1:
+        epochs = [epochs[k] for k in epochs.event_id]
+    else:
+        epochs = [epochs]
+
+    return epochs
+
+
+def _get_ch_whitener(A, pca, ch_type, rank):
+    """"Get whitener params for a set of channels."""
+    # whitening operator
+    eig, eigvec = linalg.eigh(A, overwrite_a=True)
+    eigvec = eigvec.T
+    eig[:-rank] = 0.0
+
+    logger.info('Setting small %s eigenvalues to zero.' % ch_type)
+    if not pca:  # No PCA case.
+        logger.info('Not doing PCA for %s.' % ch_type)
+    else:
+        logger.info('Doing PCA for %s.' % ch_type)
+        # This line will reduce the actual number of variables in data
+        # and leadfield to the true rank.
+        eigvec = eigvec[:-rank].copy()
+    return eig, eigvec
+
+
+ at verbose
+def prepare_noise_cov(noise_cov, info, ch_names, rank=None,
+                      scalings=None, verbose=None):
+    """Prepare noise covariance matrix.
+
+    Parameters
+    ----------
+    noise_cov : Covariance
+        The noise covariance to process.
+    info : dict
+        The measurement info (used to get channel types and bad channels).
+    ch_names : list
+        The channel names to be considered.
+    rank : None | int | dict
+        Specified rank of the noise covariance matrix. If None, the rank is
+        detected automatically. If int, the rank is specified for the MEG
+        channels. A dictionary with entries 'eeg' and/or 'meg' can be used
+        to specify the rank for each modality.
+    scalings : dict | None
+        Data will be rescaled before rank estimation to improve accuracy.
+        If dict, it will override the following dict (default if None):
+
+            dict(mag=1e12, grad=1e11, eeg=1e5)
+
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+    """
+    C_ch_idx = [noise_cov.ch_names.index(c) for c in ch_names]
+    if noise_cov['diag'] is False:
+        C = noise_cov.data[np.ix_(C_ch_idx, C_ch_idx)]
+    else:
+        C = np.diag(noise_cov.data[C_ch_idx])
+
+    scalings = _handle_default('scalings_cov_rank', scalings)
+
+    # Create the projection operator
+    proj, ncomp, _ = make_projector(info['projs'], ch_names)
+    if ncomp > 0:
+        logger.info('    Created an SSP operator (subspace dimension = %d)'
+                    % ncomp)
+        C = np.dot(proj, np.dot(C, proj.T))
+
+    pick_meg = pick_types(info, meg=True, eeg=False, ref_meg=False,
+                          exclude='bads')
+    pick_eeg = pick_types(info, meg=False, eeg=True, ref_meg=False,
+                          exclude='bads')
+    meg_names = [info['chs'][k]['ch_name'] for k in pick_meg]
+    C_meg_idx = [k for k in range(len(C)) if ch_names[k] in meg_names]
+    eeg_names = [info['chs'][k]['ch_name'] for k in pick_eeg]
+    C_eeg_idx = [k for k in range(len(C)) if ch_names[k] in eeg_names]
+
+    has_meg = len(C_meg_idx) > 0
+    has_eeg = len(C_eeg_idx) > 0
+
+    # Get the specified noise covariance rank
+    if rank is not None:
+        if isinstance(rank, dict):
+            rank_meg = rank.get('meg', None)
+            rank_eeg = rank.get('eeg', None)
+        else:
+            rank_meg = int(rank)
+            rank_eeg = None
+    else:
+        rank_meg, rank_eeg = None, None
+
+    if has_meg:
+        C_meg = C[np.ix_(C_meg_idx, C_meg_idx)]
+        this_info = pick_info(info, pick_meg)
+        if rank_meg is None:
+            if len(C_meg_idx) < len(pick_meg):
+                this_info = pick_info(info, C_meg_idx)
+            rank_meg = _estimate_rank_meeg_cov(C_meg, this_info, scalings)
+        C_meg_eig, C_meg_eigvec = _get_ch_whitener(C_meg, False, 'MEG',
+                                                   rank_meg)
+    if has_eeg:
+        C_eeg = C[np.ix_(C_eeg_idx, C_eeg_idx)]
+        this_info = pick_info(info, pick_eeg)
+        if rank_eeg is None:
+            if len(C_meg_idx) < len(pick_meg):
+                this_info = pick_info(info, C_eeg_idx)
+            rank_eeg = _estimate_rank_meeg_cov(C_eeg, this_info, scalings)
+        C_eeg_eig, C_eeg_eigvec = _get_ch_whitener(C_eeg, False, 'EEG',
+                                                   rank_eeg)
+        if not _has_eeg_average_ref_proj(info['projs']):
+            warnings.warn('No average EEG reference present in info["projs"], '
+                          'covariance may be adversely affected. Consider '
+                          'recomputing covariance using a raw file with an '
+                          'average eeg reference projector added.')
+
+    n_chan = len(ch_names)
+    eigvec = np.zeros((n_chan, n_chan), dtype=np.float)
+    eig = np.zeros(n_chan, dtype=np.float)
+
+    if has_meg:
+        eigvec[np.ix_(C_meg_idx, C_meg_idx)] = C_meg_eigvec
+        eig[C_meg_idx] = C_meg_eig
+    if has_eeg:
+        eigvec[np.ix_(C_eeg_idx, C_eeg_idx)] = C_eeg_eigvec
+        eig[C_eeg_idx] = C_eeg_eig
+
+    assert(len(C_meg_idx) + len(C_eeg_idx) == n_chan)
+
+    noise_cov = cp.deepcopy(noise_cov)
+    noise_cov.update(data=C, eig=eig, eigvec=eigvec, dim=len(ch_names),
+                     diag=False, names=ch_names)
+
+    return noise_cov
+
+
+def regularize(cov, info, mag=0.1, grad=0.1, eeg=0.1, exclude='bads',
+               proj=True, verbose=None):
+    """Regularize noise covariance matrix.
+
+    This method works by adding a constant to the diagonal for each
+    channel type separately. Special care is taken to keep the
+    rank of the data constant.
+
+    **Note:** This function is kept for reasons of backward-compatibility.
+    Please consider explicitly using the ``method`` parameter in
+    `compute_covariance` to directly combine estimation with regularization
+    in a data-driven fashion see the
+    `faq <http://martinos.org/mne/dev/faq.html#how-should-i-regularize-the-covariance-matrix>`_
+    for more information.
+
+    Parameters
+    ----------
+    cov : Covariance
+        The noise covariance matrix.
+    info : dict
+        The measurement info (used to get channel types and bad channels).
+    mag : float (default 0.1)
+        Regularization factor for MEG magnetometers.
+    grad : float (default 0.1)
+        Regularization factor for MEG gradiometers.
+    eeg : float (default 0.1)
+        Regularization factor for EEG.
+    exclude : list | 'bads' (default 'bads')
+        List of channels to mark as bad. If 'bads', bads channels
+        are extracted from both info['bads'] and cov['bads'].
+    proj : bool (default true)
+        Apply or not projections to keep rank of data.
+    verbose : bool | str | int | None (default None)
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    reg_cov : Covariance
+        The regularized covariance matrix.
+
+    See Also
+    --------
+    compute_covariance
+    """  # noqa
+    cov = cp.deepcopy(cov)
+    info._check_consistency()
+
+    if exclude is None:
+        raise ValueError('exclude must be a list of strings or "bads"')
+
+    if exclude == 'bads':
+        exclude = info['bads'] + cov['bads']
+
+    sel_eeg = pick_types(info, meg=False, eeg=True, ref_meg=False,
+                         exclude=exclude)
+    sel_mag = pick_types(info, meg='mag', eeg=False, ref_meg=False,
+                         exclude=exclude)
+    sel_grad = pick_types(info, meg='grad', eeg=False, ref_meg=False,
+                          exclude=exclude)
+
+    info_ch_names = info['ch_names']
+    ch_names_eeg = [info_ch_names[i] for i in sel_eeg]
+    ch_names_mag = [info_ch_names[i] for i in sel_mag]
+    ch_names_grad = [info_ch_names[i] for i in sel_grad]
+
+    # This actually removes bad channels from the cov, which is not backward
+    # compatible, so let's leave all channels in
+    cov_good = pick_channels_cov(cov, include=info_ch_names, exclude=exclude)
+    ch_names = cov_good.ch_names
+
+    idx_eeg, idx_mag, idx_grad = [], [], []
+    for i, ch in enumerate(ch_names):
+        if ch in ch_names_eeg:
+            idx_eeg.append(i)
+        elif ch in ch_names_mag:
+            idx_mag.append(i)
+        elif ch in ch_names_grad:
+            idx_grad.append(i)
+        else:
+            raise Exception('channel is unknown type')
+
+    C = cov_good['data']
+
+    assert len(C) == (len(idx_eeg) + len(idx_mag) + len(idx_grad))
+
+    if proj:
+        projs = info['projs'] + cov_good['projs']
+        projs = activate_proj(projs)
+
+    for desc, idx, reg in [('EEG', idx_eeg, eeg), ('MAG', idx_mag, mag),
+                           ('GRAD', idx_grad, grad)]:
+        if len(idx) == 0 or reg == 0.0:
+            logger.info("    %s regularization : None" % desc)
+            continue
+
+        logger.info("    %s regularization : %s" % (desc, reg))
+
+        this_C = C[np.ix_(idx, idx)]
+        if proj:
+            this_ch_names = [ch_names[k] for k in idx]
+            P, ncomp, _ = make_projector(projs, this_ch_names)
+            U = linalg.svd(P)[0][:, :-ncomp]
+            if ncomp > 0:
+                logger.info('    Created an SSP operator for %s '
+                            '(dimension = %d)' % (desc, ncomp))
+                this_C = np.dot(U.T, np.dot(this_C, U))
+
+        sigma = np.mean(np.diag(this_C))
+        this_C.flat[::len(this_C) + 1] += reg * sigma  # modify diag inplace
+        if proj and ncomp > 0:
+            this_C = np.dot(U, np.dot(this_C, U.T))
+
+        C[np.ix_(idx, idx)] = this_C
+
+    # Put data back in correct locations
+    idx = pick_channels(cov.ch_names, info_ch_names, exclude=exclude)
+    cov['data'][np.ix_(idx, idx)] = C
+
+    return cov
+
+
+def _regularized_covariance(data, reg=None):
+    """Compute a regularized covariance from data using sklearn.
+
+    Parameters
+    ----------
+    data : ndarray, shape (n_channels, n_times)
+        Data for covariance estimation.
+    reg : float | str | None (default None)
+        If not None, allow regularization for covariance estimation
+        if float, shrinkage covariance is used (0 <= shrinkage <= 1).
+        if str, optimal shrinkage using Ledoit-Wolf Shrinkage ('ledoit_wolf')
+        or Oracle Approximating Shrinkage ('oas').
+
+    Returns
+    -------
+    cov : ndarray, shape (n_channels, n_channels)
+        The covariance matrix.
+    """
+    if reg is None:
+        # compute empirical covariance
+        cov = np.cov(data)
+    else:
+        no_sklearn_err = ('the scikit-learn package is missing and '
+                          'required for covariance regularization.')
+        # use sklearn covariance estimators
+        if isinstance(reg, float):
+            if (reg < 0) or (reg > 1):
+                raise ValueError('0 <= shrinkage <= 1 for '
+                                 'covariance regularization.')
+            try:
+                import sklearn
+                sklearn_version = LooseVersion(sklearn.__version__)
+                from sklearn.covariance import ShrunkCovariance
+            except ImportError:
+                raise Exception(no_sklearn_err)
+            if sklearn_version < '0.12':
+                skl_cov = ShrunkCovariance(shrinkage=reg,
+                                           store_precision=False)
+            else:
+                # init sklearn.covariance.ShrunkCovariance estimator
+                skl_cov = ShrunkCovariance(shrinkage=reg,
+                                           store_precision=False,
+                                           assume_centered=True)
+        elif isinstance(reg, six.string_types):
+            if reg == 'ledoit_wolf':
+                try:
+                    from sklearn.covariance import LedoitWolf
+                except ImportError:
+                    raise Exception(no_sklearn_err)
+                # init sklearn.covariance.LedoitWolf estimator
+                skl_cov = LedoitWolf(store_precision=False,
+                                     assume_centered=True)
+            elif reg == 'oas':
+                try:
+                    from sklearn.covariance import OAS
+                except ImportError:
+                    raise Exception(no_sklearn_err)
+                # init sklearn.covariance.OAS estimator
+                skl_cov = OAS(store_precision=False,
+                              assume_centered=True)
+            else:
+                raise ValueError("regularization parameter should be "
+                                 "'lwf' or 'oas'")
+        else:
+            raise ValueError("regularization parameter should be "
+                             "of type str or int (got %s)." % type(reg))
+
+        # compute regularized covariance using sklearn
+        cov = skl_cov.fit(data.T).covariance_
+
+    return cov
+
+
+def compute_whitener(noise_cov, info, picks=None, rank=None,
+                     scalings=None, verbose=None):
+    """Compute whitening matrix.
+
+    Parameters
+    ----------
+    noise_cov : Covariance
+        The noise covariance.
+    info : dict
+        The measurement info.
+    picks : array-like of int | None
+        The channels indices to include. If None the data
+        channels in info, except bad channels, are used.
+    rank : None | int | dict
+        Specified rank of the noise covariance matrix. If None, the rank is
+        detected automatically. If int, the rank is specified for the MEG
+        channels. A dictionary with entries 'eeg' and/or 'meg' can be used
+        to specify the rank for each modality.
+    scalings : dict | None
+        The rescaling method to be applied. See documentation of
+        ``prepare_noise_cov`` for details.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    W : 2d array
+        The whitening matrix.
+    ch_names : list
+        The channel names.
+    """
+    if picks is None:
+        picks = pick_types(info, meg=True, eeg=True, ref_meg=False,
+                           exclude='bads')
+
+    ch_names = [info['chs'][k]['ch_name'] for k in picks]
+
+    noise_cov = cp.deepcopy(noise_cov)
+    noise_cov = prepare_noise_cov(noise_cov, info, ch_names,
+                                  rank=rank, scalings=scalings)
+    n_chan = len(ch_names)
+
+    W = np.zeros((n_chan, n_chan), dtype=np.float)
+    #
+    #   Omit the zeroes due to projection
+    #
+    eig = noise_cov['eig']
+    nzero = (eig > 0)
+    W[nzero, nzero] = 1.0 / np.sqrt(eig[nzero])
+    #
+    #   Rows of eigvec are the eigenvectors
+    #
+    W = np.dot(W, noise_cov['eigvec'])
+    W = np.dot(noise_cov['eigvec'].T, W)
+    return W, ch_names
+
+
+ at verbose
+def whiten_evoked(evoked, noise_cov, picks=None, diag=False, rank=None,
+                  scalings=None, verbose=None):
+    """Whiten evoked data using given noise covariance.
+
+    Parameters
+    ----------
+    evoked : instance of Evoked
+        The evoked data
+    noise_cov : instance of Covariance
+        The noise covariance
+    picks : array-like of int | None
+        The channel indices to whiten. Can be None to whiten MEG and EEG
+        data.
+    diag : bool (default False)
+        If True, whiten using only the diagonal of the covariance.
+    rank : None | int | dict (default None)
+        Specified rank of the noise covariance matrix. If None, the rank is
+        detected automatically. If int, the rank is specified for the MEG
+        channels. A dictionary with entries 'eeg' and/or 'meg' can be used
+        to specify the rank for each modality.
+    scalings : dict | None (default None)
+        To achieve reliable rank estimation on multiple sensors,
+        sensors have to be rescaled. This parameter controls the
+        rescaling. If dict, it will override the
+        following default dict (default if None):
+
+            dict(mag=1e12, grad=1e11, eeg=1e5)
+
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    evoked_white : instance of Evoked
+        The whitened evoked data.
+    """
+    evoked = cp.deepcopy(evoked)
+    if picks is None:
+        picks = pick_types(evoked.info, meg=True, eeg=True)
+    W = _get_whitener_data(evoked.info, noise_cov, picks,
+                           diag=diag, rank=rank, scalings=scalings)
+    evoked.data[picks] = np.sqrt(evoked.nave) * np.dot(W, evoked.data[picks])
+    return evoked
+
+
+ at verbose
+def _get_whitener_data(info, noise_cov, picks, diag=False, rank=None,
+                       scalings=None, verbose=None):
+    """Get whitening matrix for a set of data."""
+    ch_names = [info['ch_names'][k] for k in picks]
+    noise_cov = pick_channels_cov(noise_cov, include=ch_names, exclude=[])
+    info = pick_info(info, picks)
+    if diag:
+        noise_cov = cp.deepcopy(noise_cov)
+        noise_cov['data'] = np.diag(np.diag(noise_cov['data']))
+
+    scalings = _handle_default('scalings_cov_rank', scalings)
+    W = compute_whitener(noise_cov, info, rank=rank, scalings=scalings)[0]
+    return W
+
+
+ at verbose
+def _read_cov(fid, node, cov_kind, limited=False, verbose=None):
+    """Read a noise covariance matrix."""
+    #   Find all covariance matrices
+    covs = dir_tree_find(node, FIFF.FIFFB_MNE_COV)
+    if len(covs) == 0:
+        raise ValueError('No covariance matrices found')
+
+    #   Is any of the covariance matrices a noise covariance
+    for p in range(len(covs)):
+        tag = find_tag(fid, covs[p], FIFF.FIFF_MNE_COV_KIND)
+
+        if tag is not None and int(tag.data) == cov_kind:
+            this = covs[p]
+
+            #   Find all the necessary data
+            tag = find_tag(fid, this, FIFF.FIFF_MNE_COV_DIM)
+            if tag is None:
+                raise ValueError('Covariance matrix dimension not found')
+            dim = int(tag.data)
+
+            tag = find_tag(fid, this, FIFF.FIFF_MNE_COV_NFREE)
+            if tag is None:
+                nfree = -1
+            else:
+                nfree = int(tag.data)
+
+            tag = find_tag(fid, this, FIFF.FIFF_MNE_COV_METHOD)
+            if tag is None:
+                method = None
+            else:
+                method = tag.data
+
+            tag = find_tag(fid, this, FIFF.FIFF_MNE_COV_SCORE)
+            if tag is None:
+                score = None
+            else:
+                score = tag.data[0]
+
+            tag = find_tag(fid, this, FIFF.FIFF_MNE_ROW_NAMES)
+            if tag is None:
+                names = []
+            else:
+                names = tag.data.split(':')
+                if len(names) != dim:
+                    raise ValueError('Number of names does not match '
+                                     'covariance matrix dimension')
+
+            tag = find_tag(fid, this, FIFF.FIFF_MNE_COV)
+            if tag is None:
+                tag = find_tag(fid, this, FIFF.FIFF_MNE_COV_DIAG)
+                if tag is None:
+                    raise ValueError('No covariance matrix data found')
+                else:
+                    #   Diagonal is stored
+                    data = tag.data
+                    diag = True
+                    logger.info('    %d x %d diagonal covariance (kind = '
+                                '%d) found.' % (dim, dim, cov_kind))
+
+            else:
+                from scipy import sparse
+                if not sparse.issparse(tag.data):
+                    #   Lower diagonal is stored
+                    vals = tag.data
+                    data = np.zeros((dim, dim))
+                    data[np.tril(np.ones((dim, dim))) > 0] = vals
+                    data = data + data.T
+                    data.flat[::dim + 1] /= 2.0
+                    diag = False
+                    logger.info('    %d x %d full covariance (kind = %d) '
+                                'found.' % (dim, dim, cov_kind))
+                else:
+                    diag = False
+                    data = tag.data
+                    logger.info('    %d x %d sparse covariance (kind = %d)'
+                                ' found.' % (dim, dim, cov_kind))
+
+            #   Read the possibly precomputed decomposition
+            tag1 = find_tag(fid, this, FIFF.FIFF_MNE_COV_EIGENVALUES)
+            tag2 = find_tag(fid, this, FIFF.FIFF_MNE_COV_EIGENVECTORS)
+            if tag1 is not None and tag2 is not None:
+                eig = tag1.data
+                eigvec = tag2.data
+            else:
+                eig = None
+                eigvec = None
+
+            #   Read the projection operator
+            projs = _read_proj(fid, this)
+
+            #   Read the bad channel list
+            bads = read_bad_channels(fid, this)
+
+            #   Put it together
+            assert dim == len(data)
+            assert data.ndim == (1 if diag else 2)
+            cov = dict(kind=cov_kind, diag=diag, dim=dim, names=names,
+                       data=data, projs=projs, bads=bads, nfree=nfree, eig=eig,
+                       eigvec=eigvec)
+            if score is not None:
+                cov['loglik'] = score
+            if method is not None:
+                cov['method'] = method
+            if limited:
+                del cov['kind'], cov['dim'], cov['diag']
+
+            return cov
+
+    logger.info('    Did not find the desired covariance matrix (kind = %d)'
+                % cov_kind)
+
+    return None
+
+
+def _write_cov(fid, cov):
+    """Write a noise covariance matrix."""
+    start_block(fid, FIFF.FIFFB_MNE_COV)
+
+    #   Dimensions etc.
+    write_int(fid, FIFF.FIFF_MNE_COV_KIND, cov['kind'])
+    write_int(fid, FIFF.FIFF_MNE_COV_DIM, cov['dim'])
+    if cov['nfree'] > 0:
+        write_int(fid, FIFF.FIFF_MNE_COV_NFREE, cov['nfree'])
+
+    #   Channel names
+    if cov['names'] is not None and len(cov['names']) > 0:
+        write_name_list(fid, FIFF.FIFF_MNE_ROW_NAMES, cov['names'])
+
+    #   Data
+    if cov['diag']:
+        write_double(fid, FIFF.FIFF_MNE_COV_DIAG, cov['data'])
+    else:
+        # Store only lower part of covariance matrix
+        dim = cov['dim']
+        mask = np.tril(np.ones((dim, dim), dtype=np.bool)) > 0
+        vals = cov['data'][mask].ravel()
+        write_double(fid, FIFF.FIFF_MNE_COV, vals)
+
+    #   Eigenvalues and vectors if present
+    if cov['eig'] is not None and cov['eigvec'] is not None:
+        write_float_matrix(fid, FIFF.FIFF_MNE_COV_EIGENVECTORS, cov['eigvec'])
+        write_double(fid, FIFF.FIFF_MNE_COV_EIGENVALUES, cov['eig'])
+
+    #   Projection operator
+    if cov['projs'] is not None and len(cov['projs']) > 0:
+        _write_proj(fid, cov['projs'])
+
+    #   Bad channels
+    if cov['bads'] is not None and len(cov['bads']) > 0:
+        start_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS)
+        write_name_list(fid, FIFF.FIFF_MNE_CH_NAME_LIST, cov['bads'])
+        end_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS)
+
+    # estimator method
+    if 'method' in cov:
+        write_string(fid, FIFF.FIFF_MNE_COV_METHOD, cov['method'])
+
+    # negative log-likelihood score
+    if 'loglik' in cov:
+        write_double(
+            fid, FIFF.FIFF_MNE_COV_SCORE, np.array(cov['loglik']))
+
+    #   Done!
+    end_block(fid, FIFF.FIFFB_MNE_COV)
+
+
+def _apply_scaling_array(data, picks_list, scalings):
+    """Scale data type-dependently for estimation."""
+    scalings = _check_scaling_inputs(data, picks_list, scalings)
+    if isinstance(scalings, dict):
+        picks_dict = dict(picks_list)
+        scalings = [(picks_dict[k], v) for k, v in scalings.items()
+                    if k in picks_dict]
+        for idx, scaling in scalings:
+            data[idx, :] *= scaling  # F - order
+    else:
+        data *= scalings[:, np.newaxis]  # F - order
+
+
+def _undo_scaling_array(data, picks_list, scalings):
+    scalings = _check_scaling_inputs(data, picks_list, scalings)
+    if isinstance(scalings, dict):
+        scalings = dict((k, 1. / v) for k, v in scalings.items())
+    elif isinstance(scalings, np.ndarray):
+        scalings = 1. / scalings
+    return _apply_scaling_array(data, picks_list, scalings)
+
+
+def _apply_scaling_cov(data, picks_list, scalings):
+    """Scale resulting data after estimation."""
+    scalings = _check_scaling_inputs(data, picks_list, scalings)
+    scales = None
+    if isinstance(scalings, dict):
+        n_channels = len(data)
+        covinds = list(zip(*picks_list))[1]
+        assert len(data) == sum(len(k) for k in covinds)
+        assert list(sorted(np.concatenate(covinds))) == list(range(len(data)))
+        scales = np.zeros(n_channels)
+        for ch_t, idx in picks_list:
+            scales[idx] = scalings[ch_t]
+    elif isinstance(scalings, np.ndarray):
+        if len(scalings) != len(data):
+            raise ValueError('Scaling factors and data are of incompatible '
+                             'shape')
+        scales = scalings
+    elif scalings is None:
+        pass
+    else:
+        raise RuntimeError('Arff...')
+    if scales is not None:
+        assert np.sum(scales == 0.) == 0
+        data *= (scales[None, :] * scales[:, None])
+
+
+def _undo_scaling_cov(data, picks_list, scalings):
+    scalings = _check_scaling_inputs(data, picks_list, scalings)
+    if isinstance(scalings, dict):
+        scalings = dict((k, 1. / v) for k, v in scalings.items())
+    elif isinstance(scalings, np.ndarray):
+        scalings = 1. / scalings
+    return _apply_scaling_cov(data, picks_list, scalings)
+
+
+def _check_scaling_inputs(data, picks_list, scalings):
+    """Aux function."""
+    rescale_dict_ = dict(mag=1e15, grad=1e13, eeg=1e6)
+
+    scalings_ = None
+    if isinstance(scalings, string_types) and scalings == 'norm':
+        scalings_ = 1. / _compute_row_norms(data)
+    elif isinstance(scalings, dict):
+        rescale_dict_.update(scalings)
+        scalings_ = rescale_dict_
+    elif isinstance(scalings, np.ndarray):
+        scalings_ = scalings
+    elif scalings is None:
+        pass
+    else:
+        raise NotImplementedError("No way! That's not a rescaling "
+                                  'option: %s' % scalings)
+    return scalings_
+
+
+def _estimate_rank_meeg_signals(data, info, scalings, tol=1e-4,
+                                return_singular=False, copy=True):
+    """Estimate rank for M/EEG data.
+
+    Parameters
+    ----------
+    data : np.ndarray of float, shape(n_channels, n_samples)
+        The M/EEG signals.
+    info : mne.io.measurement_info.Info
+        The measurment info.
+    scalings : dict | 'norm' | np.ndarray | None
+        The rescaling method to be applied. If dict, it will override the
+        following default dict:
+
+            dict(mag=1e15, grad=1e13, eeg=1e6)
+
+        If 'norm' data will be scaled by channel-wise norms. If array,
+        pre-specified norms will be used. If None, no scaling will be applied.
+    return_singular : bool
+        If True, also return the singular values that were used
+        to determine the rank.
+    copy : bool
+        If False, values in data will be modified in-place during
+        rank estimation (saves memory).
+
+    Returns
+    -------
+    rank : int
+        Estimated rank of the data.
+    s : array
+        If return_singular is True, the singular values that were
+        thresholded to determine the rank are also returned.
+    """
+    picks_list = _picks_by_type(info)
+    _apply_scaling_array(data, picks_list, scalings)
+    if data.shape[1] < data.shape[0]:
+        ValueError("You've got fewer samples than channels, your "
+                   "rank estimate might be inaccurate.")
+    out = estimate_rank(data, tol=tol, norm=False,
+                        return_singular=return_singular, copy=copy)
+    rank = out[0] if isinstance(out, tuple) else out
+    ch_type = ' + '.join(list(zip(*picks_list))[0])
+    logger.info('estimated rank (%s): %d' % (ch_type, rank))
+    _undo_scaling_array(data, picks_list, scalings)
+    return out
+
+
+def _estimate_rank_meeg_cov(data, info, scalings, tol=1e-4,
+                            return_singular=False, copy=True):
+    """Estimate rank for M/EEG data.
+
+    Parameters
+    ----------
+    data : np.ndarray of float, shape (n_channels, n_channels)
+        The M/EEG covariance.
+    info : mne.io.measurement_info.Info
+        The measurment info.
+    scalings : dict | 'norm' | np.ndarray | None
+        The rescaling method to be applied. If dict, it will override the
+        following default dict:
+
+            dict(mag=1e12, grad=1e11, eeg=1e5)
+
+        If 'norm' data will be scaled by channel-wise norms. If array,
+        pre-specified norms will be used. If None, no scaling will be applied.
+    return_singular : bool
+        If True, also return the singular values that were used
+        to determine the rank.
+    copy : bool
+        If False, values in data will be modified in-place during
+        rank estimation (saves memory).
+
+    Returns
+    -------
+    rank : int
+        Estimated rank of the data.
+    s : array
+        If return_singular is True, the singular values that were
+        thresholded to determine the rank are also returned.
+    """
+    picks_list = _picks_by_type(info)
+    scalings = _handle_default('scalings_cov_rank', scalings)
+    _apply_scaling_cov(data, picks_list, scalings)
+    if data.shape[1] < data.shape[0]:
+        ValueError("You've got fewer samples than channels, your "
+                   "rank estimate might be inaccurate.")
+    out = estimate_rank(data, tol=tol, norm=False,
+                        return_singular=return_singular, copy=copy)
+    rank = out[0] if isinstance(out, tuple) else out
+    ch_type = ' + '.join(list(zip(*picks_list))[0])
+    logger.info('estimated rank (%s): %d' % (ch_type, rank))
+    _undo_scaling_cov(data, picks_list, scalings)
+    return out
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/cuda.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/cuda.py
new file mode 100644
index 0000000..e17b0be
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/cuda.py
@@ -0,0 +1,384 @@
+# Authors: Eric Larson <larson.eric.d at gmail.com>
+#
+# License: BSD (3-clause)
+
+import numpy as np
+from scipy.fftpack import fft, ifft
+
+from .utils import sizeof_fmt, logger, get_config
+
+
+# Support CUDA for FFTs; requires scikits.cuda and pycuda
+_cuda_capable = False
+_multiply_inplace_c128 = _halve_c128 = _real_c128 = None
+
+
+def _get_cudafft():
+    """Helper to deal with scikit-cuda namespace change"""
+    try:
+        from skcuda import fft
+    except ImportError:
+        try:
+            from scikits.cuda import fft
+        except ImportError:
+            fft = None
+    return fft
+
+
+def get_cuda_memory():
+    """Get the amount of free memory for CUDA operations
+
+    Returns
+    -------
+    memory : str
+        The amount of available memory as a human-readable string.
+    """
+    if not _cuda_capable:
+        logger.warning('CUDA not enabled, returning zero for memory')
+        mem = 0
+    else:
+        from pycuda.driver import mem_get_info
+        mem = mem_get_info()[0]
+    return sizeof_fmt(mem)
+
+
+def init_cuda(ignore_config=False):
+    """Initialize CUDA functionality
+
+    This function attempts to load the necessary interfaces
+    (hardware connectivity) to run CUDA-based filtering. This
+    function should only need to be run once per session.
+
+    If the config var (set via mne.set_config or in ENV)
+    MNE_USE_CUDA == 'true', this function will be executed when
+    the first CUDA setup is performed. If this variable is not
+    set, this function can be manually executed.
+    """
+    global _cuda_capable, _multiply_inplace_c128, _halve_c128, _real_c128
+    if _cuda_capable:
+        return
+    if not ignore_config and (get_config('MNE_USE_CUDA', 'false').lower() !=
+                              'true'):
+        logger.info('CUDA not enabled in config, skipping initialization')
+        return
+    # Triage possible errors for informative messaging
+    _cuda_capable = False
+    try:
+        from pycuda import gpuarray, driver  # noqa
+        from pycuda.elementwise import ElementwiseKernel
+    except ImportError:
+        logger.warning('module pycuda not found, CUDA not enabled')
+        return
+    try:
+        # Initialize CUDA; happens with importing autoinit
+        import pycuda.autoinit  # noqa
+    except ImportError:
+        logger.warning('pycuda.autoinit could not be imported, likely '
+                       'a hardware error, CUDA not enabled')
+        return
+    # Make sure scikit-cuda is installed
+    cudafft = _get_cudafft()
+    if cudafft is None:
+        logger.warning('module scikit-cuda not found, CUDA not '
+                       'enabled')
+        return
+
+    # let's construct our own CUDA multiply in-place function
+    _multiply_inplace_c128 = ElementwiseKernel(
+        'pycuda::complex<double> *a, pycuda::complex<double> *b',
+        'b[i] *= a[i]', 'multiply_inplace')
+    _halve_c128 = ElementwiseKernel(
+        'pycuda::complex<double> *a', 'a[i] /= 2.0', 'halve_value')
+    _real_c128 = ElementwiseKernel(
+        'pycuda::complex<double> *a', 'a[i] = real(a[i])', 'real_value')
+
+    # Make sure we can use 64-bit FFTs
+    try:
+        cudafft.Plan(16, np.float64, np.complex128)  # will get auto-GC'ed
+    except:
+        logger.warning('Device does not support 64-bit FFTs, '
+                       'CUDA not enabled')
+        return
+    _cuda_capable = True
+    # Figure out limit for CUDA FFT calculations
+    logger.info('Enabling CUDA with %s available memory' % get_cuda_memory())
+
+
+###############################################################################
+# Repeated FFT multiplication
+
+def setup_cuda_fft_multiply_repeated(n_jobs, h_fft):
+    """Set up repeated CUDA FFT multiplication with a given filter
+
+    Parameters
+    ----------
+    n_jobs : int | str
+        If n_jobs == 'cuda', the function will attempt to set up for CUDA
+        FFT multiplication.
+    h_fft : array
+        The filtering function that will be used repeatedly.
+        If n_jobs='cuda', this function will be shortened (since CUDA
+        assumes FFTs of real signals are half the length of the signal)
+        and turned into a gpuarray.
+
+    Returns
+    -------
+    n_jobs : int
+        Sets n_jobs = 1 if n_jobs == 'cuda' was passed in, otherwise
+        original n_jobs is passed.
+    cuda_dict : dict
+        Dictionary with the following CUDA-related variables:
+            use_cuda : bool
+                Whether CUDA should be used.
+            fft_plan : instance of FFTPlan
+                FFT plan to use in calculating the FFT.
+            ifft_plan : instance of FFTPlan
+                FFT plan to use in calculating the IFFT.
+            x_fft : instance of gpuarray
+                Empty allocated GPU space for storing the result of the
+                frequency-domain multiplication.
+            x : instance of gpuarray
+                Empty allocated GPU space for the data to filter.
+    h_fft : array | instance of gpuarray
+        This will either be a gpuarray (if CUDA enabled) or np.ndarray.
+        If CUDA is enabled, h_fft will be modified appropriately for use
+        with filter.fft_multiply().
+
+    Notes
+    -----
+    This function is designed to be used with fft_multiply_repeated().
+    """
+    cuda_dict = dict(use_cuda=False, fft_plan=None, ifft_plan=None,
+                     x_fft=None, x=None)
+    n_fft = len(h_fft)
+    cuda_fft_len = int((n_fft - (n_fft % 2)) / 2 + 1)
+    if n_jobs == 'cuda':
+        n_jobs = 1
+        init_cuda()
+        if _cuda_capable:
+            from pycuda import gpuarray
+            cudafft = _get_cudafft()
+            # set up all arrays necessary for CUDA
+            # try setting up for float64
+            try:
+                # do the IFFT normalization now so we don't have to later
+                h_fft = gpuarray.to_gpu(h_fft[:cuda_fft_len]
+                                        .astype('complex_') / len(h_fft))
+                cuda_dict.update(
+                    use_cuda=True,
+                    fft_plan=cudafft.Plan(n_fft, np.float64, np.complex128),
+                    ifft_plan=cudafft.Plan(n_fft, np.complex128, np.float64),
+                    x_fft=gpuarray.empty(cuda_fft_len, np.complex128),
+                    x=gpuarray.empty(int(n_fft), np.float64))
+                logger.info('Using CUDA for FFT FIR filtering')
+            except Exception:
+                logger.info('CUDA not used, could not instantiate memory '
+                            '(arrays may be too large), falling back to '
+                            'n_jobs=1')
+        else:
+            logger.info('CUDA not used, CUDA could not be initialized, '
+                        'falling back to n_jobs=1')
+    return n_jobs, cuda_dict, h_fft
+
+
+def fft_multiply_repeated(h_fft, x, cuda_dict=dict(use_cuda=False)):
+    """Do FFT multiplication by a filter function (possibly using CUDA)
+
+    Parameters
+    ----------
+    h_fft : 1-d array or gpuarray
+        The filtering array to apply.
+    x : 1-d array
+        The array to filter.
+    cuda_dict : dict
+        Dictionary constructed using setup_cuda_multiply_repeated().
+
+    Returns
+    -------
+    x : 1-d array
+        Filtered version of x.
+    """
+    if not cuda_dict['use_cuda']:
+        # do the fourier-domain operations
+        x = np.real(ifft(h_fft * fft(x), overwrite_x=True)).ravel()
+    else:
+        cudafft = _get_cudafft()
+        # do the fourier-domain operations, results in second param
+        cuda_dict['x'].set(x.astype(np.float64))
+        cudafft.fft(cuda_dict['x'], cuda_dict['x_fft'], cuda_dict['fft_plan'])
+        _multiply_inplace_c128(h_fft, cuda_dict['x_fft'])
+        # If we wanted to do it locally instead of using our own kernel:
+        # cuda_seg_fft.set(cuda_seg_fft.get() * h_fft)
+        cudafft.ifft(cuda_dict['x_fft'], cuda_dict['x'],
+                     cuda_dict['ifft_plan'], False)
+        x = np.array(cuda_dict['x'].get(), dtype=x.dtype, subok=True,
+                     copy=False)
+    return x
+
+
+###############################################################################
+# FFT Resampling
+
+def setup_cuda_fft_resample(n_jobs, W, new_len):
+    """Set up CUDA FFT resampling
+
+    Parameters
+    ----------
+    n_jobs : int | str
+        If n_jobs == 'cuda', the function will attempt to set up for CUDA
+        FFT resampling.
+    W : array
+        The filtering function to be used during resampling.
+        If n_jobs='cuda', this function will be shortened (since CUDA
+        assumes FFTs of real signals are half the length of the signal)
+        and turned into a gpuarray.
+    new_len : int
+        The size of the array following resampling.
+
+    Returns
+    -------
+    n_jobs : int
+        Sets n_jobs = 1 if n_jobs == 'cuda' was passed in, otherwise
+        original n_jobs is passed.
+    cuda_dict : dict
+        Dictionary with the following CUDA-related variables:
+            use_cuda : bool
+                Whether CUDA should be used.
+            fft_plan : instance of FFTPlan
+                FFT plan to use in calculating the FFT.
+            ifft_plan : instance of FFTPlan
+                FFT plan to use in calculating the IFFT.
+            x_fft : instance of gpuarray
+                Empty allocated GPU space for storing the result of the
+                frequency-domain multiplication.
+            x : instance of gpuarray
+                Empty allocated GPU space for the data to resample.
+    W : array | instance of gpuarray
+        This will either be a gpuarray (if CUDA enabled) or np.ndarray.
+        If CUDA is enabled, W will be modified appropriately for use
+        with filter.fft_multiply().
+
+    Notes
+    -----
+    This function is designed to be used with fft_resample().
+    """
+    cuda_dict = dict(use_cuda=False, fft_plan=None, ifft_plan=None,
+                     x_fft=None, x=None, y_fft=None, y=None)
+    n_fft_x, n_fft_y = len(W), new_len
+    cuda_fft_len_x = int((n_fft_x - (n_fft_x % 2)) // 2 + 1)
+    cuda_fft_len_y = int((n_fft_y - (n_fft_y % 2)) // 2 + 1)
+    if n_jobs == 'cuda':
+        n_jobs = 1
+        init_cuda()
+        if _cuda_capable:
+            # try setting up for float64
+            from pycuda import gpuarray
+            cudafft = _get_cudafft()
+            try:
+                # do the IFFT normalization now so we don't have to later
+                W = gpuarray.to_gpu(W[:cuda_fft_len_x]
+                                    .astype('complex_') / n_fft_y)
+                cuda_dict.update(
+                    use_cuda=True,
+                    fft_plan=cudafft.Plan(n_fft_x, np.float64, np.complex128),
+                    ifft_plan=cudafft.Plan(n_fft_y, np.complex128, np.float64),
+                    x_fft=gpuarray.zeros(max(cuda_fft_len_x,
+                                             cuda_fft_len_y), np.complex128),
+                    x=gpuarray.empty(max(int(n_fft_x),
+                                     int(n_fft_y)), np.float64))
+                logger.info('Using CUDA for FFT resampling')
+            except Exception:
+                logger.info('CUDA not used, could not instantiate memory '
+                            '(arrays may be too large), falling back to '
+                            'n_jobs=1')
+        else:
+            logger.info('CUDA not used, CUDA could not be initialized, '
+                        'falling back to n_jobs=1')
+    return n_jobs, cuda_dict, W
+
+
+def fft_resample(x, W, new_len, npad, to_remove,
+                 cuda_dict=dict(use_cuda=False)):
+    """Do FFT resampling with a filter function (possibly using CUDA)
+
+    Parameters
+    ----------
+    x : 1-d array
+        The array to resample. Will be converted to float64 if necessary.
+    W : 1-d array or gpuarray
+        The filtering function to apply.
+    new_len : int
+        The size of the output array (before removing padding).
+    npad : int
+        Amount of padding to apply before resampling.
+    to_remove : int
+        Number of samples to remove after resampling.
+    cuda_dict : dict
+        Dictionary constructed using setup_cuda_multiply_repeated().
+
+    Returns
+    -------
+    x : 1-d array
+        Filtered version of x.
+    """
+    # add some padding at beginning and end to make this work a little cleaner
+    if x.dtype != np.float64:
+        x = x.astype(np.float64)
+    x = _smart_pad(x, npad)
+    old_len = len(x)
+    shorter = new_len < old_len
+    if not cuda_dict['use_cuda']:
+        N = int(min(new_len, old_len))
+        sl_1 = slice((N + 1) // 2)
+        y_fft = np.zeros(new_len, np.complex128)
+        x_fft = fft(x).ravel() * W
+        y_fft[sl_1] = x_fft[sl_1]
+        sl_2 = slice(-(N - 1) // 2, None)
+        y_fft[sl_2] = x_fft[sl_2]
+        y = np.real(ifft(y_fft, overwrite_x=True)).ravel()
+    else:
+        cudafft = _get_cudafft()
+        cuda_dict['x'].set(np.concatenate((x, np.zeros(max(new_len - old_len,
+                                                           0), x.dtype))))
+        # do the fourier-domain operations, results put in second param
+        cudafft.fft(cuda_dict['x'], cuda_dict['x_fft'], cuda_dict['fft_plan'])
+        _multiply_inplace_c128(W, cuda_dict['x_fft'])
+        # This is not straightforward, but because x_fft and y_fft share
+        # the same data (and only one half of the full DFT is stored), we
+        # don't have to transfer the slice like we do in scipy. All we
+        # need to worry about is the Nyquist component, either halving it
+        # or taking just the real component...
+        use_len = new_len if shorter else old_len
+        func = _real_c128 if shorter else _halve_c128
+        if use_len % 2 == 0:
+            nyq = int((use_len - (use_len % 2)) // 2)
+            func(cuda_dict['x_fft'], slice=slice(nyq, nyq + 1))
+        cudafft.ifft(cuda_dict['x_fft'], cuda_dict['x'],
+                     cuda_dict['ifft_plan'], scale=False)
+        y = cuda_dict['x'].get()[:new_len if shorter else None]
+
+    # now let's trim it back to the correct size (if there was padding)
+    if to_remove > 0:
+        keep = np.ones((new_len), dtype='bool')
+        keep[:to_remove] = False
+        keep[-to_remove:] = False
+        y = np.compress(keep, y)
+
+    return y
+
+
+###############################################################################
+# Misc
+
+# this has to go in mne.cuda instead of mne.filter to avoid import errors
+def _smart_pad(x, n_pad):
+    """Pad vector x
+    """
+    if n_pad == 0:
+        return x
+    elif n_pad < 0:
+        raise RuntimeError('n_pad must be non-negative')
+    # need to pad with zeros if len(x) <= npad
+    z_pad = np.zeros(max(n_pad - len(x) + 1, 0), dtype=x.dtype)
+    return np.concatenate([z_pad, 2 * x[0] - x[n_pad:0:-1], x,
+                           2 * x[-1] - x[-2:-n_pad - 2:-1], z_pad])
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/data/FreeSurferColorLUT.txt b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/data/FreeSurferColorLUT.txt
new file mode 100644
index 0000000..2b85ef3
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/data/FreeSurferColorLUT.txt
@@ -0,0 +1,1397 @@
+#$Id: FreeSurferColorLUT.txt,v 1.70.2.7 2012/08/27 17:20:08 nicks Exp $
+
+#No. Label Name:                            R   G   B   A
+
+0   Unknown                                 0   0   0   0
+1   Left-Cerebral-Exterior                  70  130 180 0
+2   Left-Cerebral-White-Matter              245 245 245 0
+3   Left-Cerebral-Cortex                    205 62  78  0
+4   Left-Lateral-Ventricle                  120 18  134 0
+5   Left-Inf-Lat-Vent                       196 58  250 0
+6   Left-Cerebellum-Exterior                0   148 0   0
+7   Left-Cerebellum-White-Matter            220 248 164 0
+8   Left-Cerebellum-Cortex                  230 148 34  0
+9   Left-Thalamus                           0   118 14  0
+10  Left-Thalamus-Proper                    0   118 14  0
+11  Left-Caudate                            122 186 220 0
+12  Left-Putamen                            236 13  176 0
+13  Left-Pallidum                           12  48  255 0
+14  3rd-Ventricle                           204 182 142 0
+15  4th-Ventricle                           42  204 164 0
+16  Brain-Stem                              119 159 176 0
+17  Left-Hippocampus                        220 216 20  0
+18  Left-Amygdala                           103 255 255 0
+19  Left-Insula                             80  196 98  0
+20  Left-Operculum                          60  58  210 0
+21  Line-1                                  60  58  210 0
+22  Line-2                                  60  58  210 0
+23  Line-3                                  60  58  210 0
+24  CSF                                     60  60  60  0
+25  Left-Lesion                             255 165 0   0
+26  Left-Accumbens-area                     255 165 0   0
+27  Left-Substancia-Nigra                   0   255 127 0
+28  Left-VentralDC                          165 42  42  0
+29  Left-undetermined                       135 206 235 0
+30  Left-vessel                             160 32  240 0
+31  Left-choroid-plexus                     0   200 200 0
+32  Left-F3orb                              100 50  100 0
+33  Left-lOg                                135 50  74  0
+34  Left-aOg                                122 135 50  0
+35  Left-mOg                                51  50  135 0
+36  Left-pOg                                74  155 60  0
+37  Left-Stellate                           120 62  43  0
+38  Left-Porg                               74  155 60  0
+39  Left-Aorg                               122 135 50  0
+40  Right-Cerebral-Exterior                 70  130 180 0
+41  Right-Cerebral-White-Matter             0   225 0   0
+42  Right-Cerebral-Cortex                   205 62  78  0
+43  Right-Lateral-Ventricle                 120 18  134 0
+44  Right-Inf-Lat-Vent                      196 58  250 0
+45  Right-Cerebellum-Exterior               0   148 0   0
+46  Right-Cerebellum-White-Matter           220 248 164 0
+47  Right-Cerebellum-Cortex                 230 148 34  0
+48  Right-Thalamus                          0   118 14  0
+49  Right-Thalamus-Proper                   0   118 14  0
+50  Right-Caudate                           122 186 220 0
+51  Right-Putamen                           236 13  176 0
+52  Right-Pallidum                          13  48  255 0
+53  Right-Hippocampus                       220 216 20  0
+54  Right-Amygdala                          103 255 255 0
+55  Right-Insula                            80  196 98  0
+56  Right-Operculum                         60  58  210 0
+57  Right-Lesion                            255 165 0   0
+58  Right-Accumbens-area                    255 165 0   0
+59  Right-Substancia-Nigra                  0   255 127 0
+60  Right-VentralDC                         165 42  42  0
+61  Right-undetermined                      135 206 235 0
+62  Right-vessel                            160 32  240 0
+63  Right-choroid-plexus                    0   200 221 0
+64  Right-F3orb                             100 50  100 0
+65  Right-lOg                               135 50  74  0
+66  Right-aOg                               122 135 50  0
+67  Right-mOg                               51  50  135 0
+68  Right-pOg                               74  155 60  0
+69  Right-Stellate                          120 62  43  0
+70  Right-Porg                              74  155 60  0
+71  Right-Aorg                              122 135 50  0
+72  5th-Ventricle                           120 190 150 0
+73  Left-Interior                           122 135 50  0
+74  Right-Interior                          122 135 50  0
+# 75/76 removed. duplicates of 4/43
+77  WM-hypointensities                      200 70  255 0
+78  Left-WM-hypointensities                 255 148 10  0
+79  Right-WM-hypointensities                255 148 10  0
+80  non-WM-hypointensities                  164 108 226 0
+81  Left-non-WM-hypointensities             164 108 226 0
+82  Right-non-WM-hypointensities            164 108 226 0
+83  Left-F1                                 255 218 185 0
+84  Right-F1                                255 218 185 0
+85  Optic-Chiasm                            234 169 30  0
+192 Corpus_Callosum                         250 255 50  0
+
+86  Left_future_WMSA                        200 120  255 0
+87  Right_future_WMSA                       200 121  255 0
+88  future_WMSA                             200 122  255 0
+
+
+96  Left-Amygdala-Anterior                  205 10  125 0
+97  Right-Amygdala-Anterior                 205 10  125 0
+98  Dura                                    160 32  240 0
+
+100 Left-wm-intensity-abnormality           124 140 178 0
+101 Left-caudate-intensity-abnormality      125 140 178 0
+102 Left-putamen-intensity-abnormality      126 140 178 0
+103 Left-accumbens-intensity-abnormality    127 140 178 0
+104 Left-pallidum-intensity-abnormality     124 141 178 0
+105 Left-amygdala-intensity-abnormality     124 142 178 0
+106 Left-hippocampus-intensity-abnormality  124 143 178 0
+107 Left-thalamus-intensity-abnormality     124 144 178 0
+108 Left-VDC-intensity-abnormality          124 140 179 0
+109 Right-wm-intensity-abnormality          124 140 178 0
+110 Right-caudate-intensity-abnormality     125 140 178 0
+111 Right-putamen-intensity-abnormality     126 140 178 0
+112 Right-accumbens-intensity-abnormality   127 140 178 0
+113 Right-pallidum-intensity-abnormality    124 141 178 0
+114 Right-amygdala-intensity-abnormality    124 142 178 0
+115 Right-hippocampus-intensity-abnormality 124 143 178 0
+116 Right-thalamus-intensity-abnormality    124 144 178 0
+117 Right-VDC-intensity-abnormality         124 140 179 0
+
+118 Epidermis                               255 20  147 0
+119 Conn-Tissue                             205 179 139 0
+120 SC-Fat-Muscle                           238 238 209 0
+121 Cranium                                 200 200 200 0
+122 CSF-SA                                  74  255 74  0
+123 Muscle                                  238 0   0   0
+124 Ear                                     0   0   139 0
+125 Adipose                                 173 255 47  0
+126 Spinal-Cord                             133 203 229 0
+127 Soft-Tissue                             26  237 57  0
+128 Nerve                                   34  139 34  0
+129 Bone                                    30  144 255 0
+130 Air                                     147 19  173 0
+131 Orbital-Fat                             238 59  59  0
+132 Tongue                                  221 39  200 0
+133 Nasal-Structures                        238 174 238 0
+134 Globe                                   255 0   0   0
+135 Teeth                                   72  61  139 0
+136 Left-Caudate-Putamen                    21  39  132 0
+137 Right-Caudate-Putamen                   21  39  132 0
+138 Left-Claustrum                          65  135 20  0
+139 Right-Claustrum                         65  135 20  0
+140 Cornea                                  134 4   160 0
+142 Diploe                                  221 226 68  0
+143 Vitreous-Humor                          255 255 254 0
+144 Lens                                    52  209 226 0
+145 Aqueous-Humor                           239 160 223 0
+146 Outer-Table                             70  130 180 0
+147 Inner-Table                             70  130 181 0
+148 Periosteum                              139 121 94  0
+149 Endosteum                               224 224 224 0
+150 R-C-S                                   255 0   0   0
+151 Iris                                    205 205 0   0
+152 SC-Adipose-Muscle                       238 238 209 0
+153 SC-Tissue                               139 121 94  0
+154 Orbital-Adipose                         238 59  59  0
+
+155 Left-IntCapsule-Ant                     238 59  59  0
+156 Right-IntCapsule-Ant                    238 59  59  0
+157 Left-IntCapsule-Pos                     62  10  205 0
+158 Right-IntCapsule-Pos                    62  10  205 0
+
+# These labels are for babies/children
+159 Left-Cerebral-WM-unmyelinated           0   118 14  0
+160 Right-Cerebral-WM-unmyelinated          0   118 14  0
+161 Left-Cerebral-WM-myelinated             220 216 21  0
+162 Right-Cerebral-WM-myelinated            220 216 21  0
+163 Left-Subcortical-Gray-Matter            122 186 220 0
+164 Right-Subcortical-Gray-Matter           122 186 220 0
+165 Skull                                   255 165 0   0
+166 Posterior-fossa                         14  48  255 0
+167 Scalp                                   166 42  42  0
+168 Hematoma                                121 18  134 0
+169 Left-Basal-Ganglia                      236 13  127 0
+176 Right-Basal-Ganglia                     236 13  126 0
+
+# Label names and colors for Brainstem consituents
+# No.  Label Name:                          R   G   B   A
+170 brainstem                               119 159 176 0
+171 DCG                                     119 0   176 0
+172 Vermis                                  119 100 176 0
+173 Midbrain                                119 200 176 0
+174 Pons                                    119 159 100 0
+175 Medulla                                 119 159 200 0
+
+#176 Right-Basal-Ganglia   found in babies/children section above
+
+180 Left-Cortical-Dysplasia                 73  61  139 0
+181 Right-Cortical-Dysplasia                73  62  139 0
+
+#192 Corpus_Callosum  listed after #85 above
+193 Left-hippocampal_fissure                0   196 255 0
+194 Left-CADG-head                          255 164 164 0
+195 Left-subiculum                          196 196 0   0
+196 Left-fimbria                            0   100 255 0
+197 Right-hippocampal_fissure               128 196 164 0
+198 Right-CADG-head                         0   126 75  0
+199 Right-subiculum                         128 96  64  0
+200 Right-fimbria                           0   50  128 0
+201 alveus                                  255 204 153 0
+202 perforant_pathway                       255 128 128 0
+203 parasubiculum                           255 255 0   0
+204 presubiculum                            64  0   64  0
+205 subiculum                               0   0   255 0
+206 CA1                                     255 0   0   0
+207 CA2                                     128 128 255 0
+208 CA3                                     0   128 0   0
+209 CA4                                     196 160 128 0
+210 GC-ML-DG                                32  200 255 0
+211 HATA                                    128 255 128 0
+212 fimbria                                 204 153 204 0
+213 lateral_ventricle                       121 17  136 0
+214 molecular_layer_HP                      128 0   0   0
+215 hippocampal_fissure                     128 32  255 0
+216 entorhinal_cortex                       255 204 102 0
+217 molecular_layer_subiculum               128 128 128 0
+218 Amygdala                                104 255 255 0
+219 Cerebral_White_Matter                   0   226 0   0
+220 Cerebral_Cortex                         205 63  78  0
+221 Inf_Lat_Vent                            197 58  250 0
+222 Perirhinal                              33  150 250 0
+223 Cerebral_White_Matter_Edge              226 0   0   0
+224 Background                              100 100 100 0
+225 Ectorhinal                              197 150 250 0
+226 HP_tail                                 170 170 255 0
+
+250 Fornix                                  255 0   0   0
+251 CC_Posterior                            0   0   64  0
+252 CC_Mid_Posterior                        0   0   112 0
+253 CC_Central                              0   0   160 0
+254 CC_Mid_Anterior                         0   0   208 0
+255 CC_Anterior                             0   0   255 0
+
+# This is for keeping track of voxel changes
+256 Voxel-Unchanged                         0   0   0   0
+
+# lymph node and vascular labels
+331 Aorta                                   255 0   0   0
+332 Left-Common-IliacA                      255 80  0   0
+333 Right-Common-IliacA                     255 160 0   0
+334 Left-External-IliacA                    255 255 0   0
+335 Right-External-IliacA                   0   255 0   0
+336 Left-Internal-IliacA                    255 0   160 0
+337 Right-Internal-IliacA                   255 0   255 0
+338 Left-Lateral-SacralA                    255 50  80  0
+339 Right-Lateral-SacralA                   80  255 50  0
+340 Left-ObturatorA                         160 255 50  0
+341 Right-ObturatorA                        160 200 255 0
+342 Left-Internal-PudendalA                 0   255 160 0
+343 Right-Internal-PudendalA                0   0   255 0
+344 Left-UmbilicalA                         80  50  255 0
+345 Right-UmbilicalA                        160 0   255 0
+346 Left-Inf-RectalA                        255 210 0   0
+347 Right-Inf-RectalA                       0   160 255 0
+348 Left-Common-IliacV                      255 200 80  0
+349 Right-Common-IliacV                     255 200 160 0
+350 Left-External-IliacV                    255 80  200 0
+351 Right-External-IliacV                   255 160 200 0
+352 Left-Internal-IliacV                    30  255 80  0
+353 Right-Internal-IliacV                   80  200 255 0
+354 Left-ObturatorV                         80  255 200 0
+355 Right-ObturatorV                        195 255 200 0
+356 Left-Internal-PudendalV                 120 200 20  0
+357 Right-Internal-PudendalV                170 10  200 0
+358 Pos-Lymph                               20  130 180 0
+359 Neg-Lymph                               20  180 130 0
+
+400 V1                                      206 62  78  0
+401 V2                                      121 18  134 0
+402 BA44                                    199 58  250 0
+403 BA45                                    1   148 0   0
+404 BA4a                                    221 248 164 0
+405 BA4p                                    231 148 34  0
+406 BA6                                     1   118 14  0
+407 BA2                                     120 118 14  0
+408 BA1_old                                 123 186 221 0
+409 BAun2                                   238 13  177 0
+410 BA1                                     123 186 220 0
+411 BA2b                                    138 13  206 0
+412 BA3a                                    238 130 176 0
+413 BA3b                                    218 230 76  0
+414 MT                                      38  213 176 0
+415 AIPS_AIP_l                              1   225 176 0
+416 AIPS_AIP_r                              1   225 176 0
+417 AIPS_VIP_l                              200 2   100 0
+418 AIPS_VIP_r                              200 2   100 0
+419 IPL_PFcm_l                              5   200 90  0
+420 IPL_PFcm_r                              5   200 90  0
+421 IPL_PF_l                                100 5   200 0
+422 IPL_PFm_l                               25  255 100 0
+423 IPL_PFm_r                               25  255 100 0
+424 IPL_PFop_l                              230 7   100 0
+425 IPL_PFop_r                              230 7   100 0
+426 IPL_PF_r                                100 5   200 0
+427 IPL_PFt_l                               150 10  200 0
+428 IPL_PFt_r                               150 10  200 0
+429 IPL_PGa_l                               175 10  176 0
+430 IPL_PGa_r                               175 10  176 0
+431 IPL_PGp_l                               10  100 255 0
+432 IPL_PGp_r                               10  100 255 0
+433 Visual_V3d_l                            150 45  70  0
+434 Visual_V3d_r                            150 45  70  0
+435 Visual_V4_l                             45  200 15  0
+436 Visual_V4_r                             45  200 15  0
+437 Visual_V5_b                             227 45  100 0
+438 Visual_VP_l                             227 45  100 0
+439 Visual_VP_r                             227 45  100 0
+
+# wm lesions
+498 wmsa                                    143 188 143 0
+499 other_wmsa                              255 248 220 0
+
+# HiRes Hippocampus labeling
+500 right_CA2_3                             17  85  136 0
+501 right_alveus                            119 187 102 0
+502 right_CA1                               204 68  34  0
+503 right_fimbria                           204 0   255 0
+504 right_presubiculum                      221 187 17  0
+505 right_hippocampal_fissure               153 221 238 0
+506 right_CA4_DG                            51  17  17  0
+507 right_subiculum                         0   119 85  0
+508 right_fornix                            20  100 200 0
+
+550 left_CA2_3                              17  85  137 0
+551 left_alveus                             119 187 103 0
+552 left_CA1                                204 68  35  0
+553 left_fimbria                            204 0   254 0
+554 left_presubiculum                       221 187 16  0
+555 left_hippocampal_fissure                153 221 239 0
+556 left_CA4_DG                             51  17  18  0
+557 left_subiculum                          0   119 86  0
+558 left_fornix                             20  100 201 0
+
+600 Tumor                                   254 254 254 0
+
+
+# Cerebellar parcellation labels from SUIT (matches labels in cma.h)
+#No. Label Name:                            R   G   B   A
+601  Cbm_Left_I_IV                          70  130 180 0
+602  Cbm_Right_I_IV                         245 245 245 0
+603  Cbm_Left_V                             205 62  78  0
+604  Cbm_Right_V                            120 18  134 0
+605  Cbm_Left_VI                            196 58  250 0
+606  Cbm_Vermis_VI                          0   148 0   0
+607  Cbm_Right_VI                           220 248 164 0
+608  Cbm_Left_CrusI                         230 148 34  0
+609  Cbm_Vermis_CrusI                       0   118 14  0
+610  Cbm_Right_CrusI                        0   118 14  0
+611  Cbm_Left_CrusII                        122 186 220 0
+612  Cbm_Vermis_CrusII                      236 13  176 0
+613  Cbm_Right_CrusII 12                    48  255 0
+614  Cbm_Left_VIIb                          204 182 142 0
+615  Cbm_Vermis_VIIb                        42  204 164 0
+616  Cbm_Right_VIIb                         119 159 176 0
+617  Cbm_Left_VIIIa                         220 216 20  0
+618  Cbm_Vermis_VIIIa                       103 255 255 0
+619  Cbm_Right_VIIIa                        80  196 98  0
+620  Cbm_Left_VIIIb                         60  58  210 0
+621  Cbm_Vermis_VIIIb                       60  58  210 0
+622  Cbm_Right_VIIIb                        60  58  210 0
+623  Cbm_Left_IX                            60  58  210 0
+624  Cbm_Vermis_IX                          60  60  60  0
+625  Cbm_Right_IX                           255 165 0   0
+626  Cbm_Left_X                             255 165 0   0
+627  Cbm_Vermis_X                           0   255 127 0
+628  Cbm_Right_X                            165 42  42  0
+
+# Cerebellar lobule parcellations
+640  Cbm_Right_I_V_med                      204  0  0   0
+641  Cbm_Right_I_V_mid                      255  0  0   0
+642  Cbm_Right_VI_med                       0    0  255 0
+643  Cbm_Right_VI_mid                       30  144 255 0
+644  Cbm_Right_VI_lat                       100 212 237 0
+645  Cbm_Right_CrusI_med                    218 165 32  0
+646  Cbm_Right_CrusI_mid                    255 215 0   0
+647  Cbm_Right_CrusI_lat                    255 255 166 0
+648  Cbm_Right_CrusII_med                   153 0   204 0
+649  Cbm_Right_CrusII_mid                   153 141 209 0
+650  Cbm_Right_CrusII_lat                   204 204 255 0
+651  Cbm_Right_7med                         31  212 194 0
+652  Cbm_Right_7mid                         3   255 237 0
+653  Cbm_Right_7lat                         204 255 255 0
+654  Cbm_Right_8med                         86  74  147 0
+655  Cbm_Right_8mid                         114 114 190 0
+656  Cbm_Right_8lat                         184 178 255 0
+657  Cbm_Right_PUNs                         126 138 37  0
+658  Cbm_Right_TONs                         189 197 117 0
+659  Cbm_Right_FLOs                         240 230 140 0
+660  Cbm_Left_I_V_med                       204  0  0   0
+661  Cbm_Left_I_V_mid                       255  0  0   0
+662  Cbm_Left_VI_med                        0    0  255 0
+663  Cbm_Left_VI_mid                        30  144 255 0
+664  Cbm_Left_VI_lat                        100 212 237 0
+665  Cbm_Left_CrusI_med                     218 165 32  0
+666  Cbm_Left_CrusI_mid                     255 215 0   0
+667  Cbm_Left_CrusI_lat                     255 255 166 0
+668  Cbm_Left_CrusII_med                    153 0   204 0
+669  Cbm_Left_CrusII_mid                    153 141 209 0
+670  Cbm_Left_CrusII_lat                    204 204 255 0
+671  Cbm_Left_7med                          31  212 194 0
+672  Cbm_Left_7mid                          3   255 237 0
+673  Cbm_Left_7lat                          204 255 255 0
+674  Cbm_Left_8med                          86  74  147 0
+675  Cbm_Left_8mid                          114 114 190 0
+676  Cbm_Left_8lat                          184 178 255 0
+677  Cbm_Left_PUNs                          126 138 37  0
+678  Cbm_Left_TONs                          189 197 117 0
+679  Cbm_Left_FLOs                          240 230 140 0
+
+701 CSF-FSL-FAST                            120 18  134 0
+702 GrayMatter-FSL-FAST                     205 62  78  0
+703 WhiteMatter-FSL-FAST                    0   225 0   0
+
+999 SUSPICIOUS                              255 100 100 0
+
+# Below is the color table for the cortical labels of the seg volume
+# created by mri_aparc2aseg in which the aseg cortex label is replaced
+# by the labels in the aparc. It also supports wm labels that will
+# eventually be created by mri_aparc2aseg. Otherwise, the aseg labels
+# do not change from above. The cortical lables are the same as in
+# colortable_desikan_killiany.txt, except that left hemisphere has
+# 1000 added to the index and the right has 2000 added.  The label
+# names are also prepended with ctx-lh or ctx-rh. The white matter
+# labels are the same as in colortable_desikan_killiany.txt, except
+# that left hemisphere has 3000 added to the index and the right has
+# 4000 added. The label names are also prepended with wm-lh or wm-rh.
+# Centrum semiovale is also labled with 5001 (left) and 5002 (right).
+# Even further below are the color tables for aparc.a2005s and aparc.a2009s.
+
+#No.    Label Name:                         R   G   B   A
+1000    ctx-lh-unknown                      25  5   25  0
+1001    ctx-lh-bankssts                     25  100 40  0
+1002    ctx-lh-caudalanteriorcingulate      125 100 160 0
+1003    ctx-lh-caudalmiddlefrontal          100 25  0   0
+1004    ctx-lh-corpuscallosum               120 70  50  0
+1005    ctx-lh-cuneus                       220 20  100 0
+1006    ctx-lh-entorhinal                   220 20  10  0
+1007    ctx-lh-fusiform                     180 220 140 0
+1008    ctx-lh-inferiorparietal             220 60  220 0
+1009    ctx-lh-inferiortemporal             180 40  120 0
+1010    ctx-lh-isthmuscingulate             140 20  140 0
+1011    ctx-lh-lateraloccipital             20  30  140 0
+1012    ctx-lh-lateralorbitofrontal         35  75  50  0
+1013    ctx-lh-lingual                      225 140 140 0
+1014    ctx-lh-medialorbitofrontal          200 35  75  0
+1015    ctx-lh-middletemporal               160 100 50  0
+1016    ctx-lh-parahippocampal              20  220 60  0
+1017    ctx-lh-paracentral                  60  220 60  0
+1018    ctx-lh-parsopercularis              220 180 140 0
+1019    ctx-lh-parsorbitalis                20  100 50  0
+1020    ctx-lh-parstriangularis             220 60  20  0
+1021    ctx-lh-pericalcarine                120 100 60  0
+1022    ctx-lh-postcentral                  220 20  20  0
+1023    ctx-lh-posteriorcingulate           220 180 220 0
+1024    ctx-lh-precentral                   60  20  220 0
+1025    ctx-lh-precuneus                    160 140 180 0
+1026    ctx-lh-rostralanteriorcingulate     80  20  140 0
+1027    ctx-lh-rostralmiddlefrontal         75  50  125 0
+1028    ctx-lh-superiorfrontal              20  220 160 0
+1029    ctx-lh-superiorparietal             20  180 140 0
+1030    ctx-lh-superiortemporal             140 220 220 0
+1031    ctx-lh-supramarginal                80  160 20  0
+1032    ctx-lh-frontalpole                  100 0   100 0
+1033    ctx-lh-temporalpole                 70  70  70  0
+1034    ctx-lh-transversetemporal           150 150 200 0
+1035    ctx-lh-insula                       255 192 32  0
+
+2000    ctx-rh-unknown                      25  5   25  0
+2001    ctx-rh-bankssts                     25  100 40  0
+2002    ctx-rh-caudalanteriorcingulate      125 100 160 0
+2003    ctx-rh-caudalmiddlefrontal          100 25  0   0
+2004    ctx-rh-corpuscallosum               120 70  50  0
+2005    ctx-rh-cuneus                       220 20  100 0
+2006    ctx-rh-entorhinal                   220 20  10  0
+2007    ctx-rh-fusiform                     180 220 140 0
+2008    ctx-rh-inferiorparietal             220 60  220 0
+2009    ctx-rh-inferiortemporal             180 40  120 0
+2010    ctx-rh-isthmuscingulate             140 20  140 0
+2011    ctx-rh-lateraloccipital             20  30  140 0
+2012    ctx-rh-lateralorbitofrontal         35  75  50  0
+2013    ctx-rh-lingual                      225 140 140 0
+2014    ctx-rh-medialorbitofrontal          200 35  75  0
+2015    ctx-rh-middletemporal               160 100 50  0
+2016    ctx-rh-parahippocampal              20  220 60  0
+2017    ctx-rh-paracentral                  60  220 60  0
+2018    ctx-rh-parsopercularis              220 180 140 0
+2019    ctx-rh-parsorbitalis                20  100 50  0
+2020    ctx-rh-parstriangularis             220 60  20  0
+2021    ctx-rh-pericalcarine                120 100 60  0
+2022    ctx-rh-postcentral                  220 20  20  0
+2023    ctx-rh-posteriorcingulate           220 180 220 0
+2024    ctx-rh-precentral                   60  20  220 0
+2025    ctx-rh-precuneus                    160 140 180 0
+2026    ctx-rh-rostralanteriorcingulate     80  20  140 0
+2027    ctx-rh-rostralmiddlefrontal         75  50  125 0
+2028    ctx-rh-superiorfrontal              20  220 160 0
+2029    ctx-rh-superiorparietal             20  180 140 0
+2030    ctx-rh-superiortemporal             140 220 220 0
+2031    ctx-rh-supramarginal                80  160 20  0
+2032    ctx-rh-frontalpole                  100 0   100 0
+2033    ctx-rh-temporalpole                 70  70  70  0
+2034    ctx-rh-transversetemporal           150 150 200 0
+2035    ctx-rh-insula                       255 192 32  0
+
+3000    wm-lh-unknown                       230 250 230 0
+3001    wm-lh-bankssts                      230 155 215 0
+3002    wm-lh-caudalanteriorcingulate       130 155 95  0
+3003    wm-lh-caudalmiddlefrontal           155 230 255 0
+3004    wm-lh-corpuscallosum                135 185 205 0
+3005    wm-lh-cuneus                        35  235 155 0
+3006    wm-lh-entorhinal                    35  235 245 0
+3007    wm-lh-fusiform                      75  35  115 0
+3008    wm-lh-inferiorparietal              35  195 35  0
+3009    wm-lh-inferiortemporal              75  215 135 0
+3010    wm-lh-isthmuscingulate              115 235 115 0
+3011    wm-lh-lateraloccipital              235 225 115 0
+3012    wm-lh-lateralorbitofrontal          220 180 205 0
+3013    wm-lh-lingual                       30  115 115 0
+3014    wm-lh-medialorbitofrontal           55  220 180 0
+3015    wm-lh-middletemporal                95  155 205 0
+3016    wm-lh-parahippocampal               235 35  195 0
+3017    wm-lh-paracentral                   195 35  195 0
+3018    wm-lh-parsopercularis               35  75  115 0
+3019    wm-lh-parsorbitalis                 235 155 205 0
+3020    wm-lh-parstriangularis              35  195 235 0
+3021    wm-lh-pericalcarine                 135 155 195 0
+3022    wm-lh-postcentral                   35  235 235 0
+3023    wm-lh-posteriorcingulate            35  75  35  0
+3024    wm-lh-precentral                    195 235 35  0
+3025    wm-lh-precuneus                     95  115 75  0
+3026    wm-lh-rostralanteriorcingulate      175 235 115 0
+3027    wm-lh-rostralmiddlefrontal          180 205 130 0
+3028    wm-lh-superiorfrontal               235 35  95  0
+3029    wm-lh-superiorparietal              235 75  115 0
+3030    wm-lh-superiortemporal              115 35  35  0
+3031    wm-lh-supramarginal                 175 95  235 0
+3032    wm-lh-frontalpole                   155 255 155 0
+3033    wm-lh-temporalpole                  185 185 185 0
+3034    wm-lh-transversetemporal            105 105 55  0
+3035    wm-lh-insula                        254 191 31  0
+
+4000    wm-rh-unknown                       230 250 230 0
+4001    wm-rh-bankssts                      230 155 215 0
+4002    wm-rh-caudalanteriorcingulate       130 155 95  0
+4003    wm-rh-caudalmiddlefrontal           155 230 255 0
+4004    wm-rh-corpuscallosum                135 185 205 0
+4005    wm-rh-cuneus                        35  235 155 0
+4006    wm-rh-entorhinal                    35  235 245 0
+4007    wm-rh-fusiform                      75  35  115 0
+4008    wm-rh-inferiorparietal              35  195 35  0
+4009    wm-rh-inferiortemporal              75  215 135 0
+4010    wm-rh-isthmuscingulate              115 235 115 0
+4011    wm-rh-lateraloccipital              235 225 115 0
+4012    wm-rh-lateralorbitofrontal          220 180 205 0
+4013    wm-rh-lingual                       30  115 115 0
+4014    wm-rh-medialorbitofrontal           55  220 180 0
+4015    wm-rh-middletemporal                95  155 205 0
+4016    wm-rh-parahippocampal               235 35  195 0
+4017    wm-rh-paracentral                   195 35  195 0
+4018    wm-rh-parsopercularis               35  75  115 0
+4019    wm-rh-parsorbitalis                 235 155 205 0
+4020    wm-rh-parstriangularis              35  195 235 0
+4021    wm-rh-pericalcarine                 135 155 195 0
+4022    wm-rh-postcentral                   35  235 235 0
+4023    wm-rh-posteriorcingulate            35  75  35  0
+4024    wm-rh-precentral                    195 235 35  0
+4025    wm-rh-precuneus                     95  115 75  0
+4026    wm-rh-rostralanteriorcingulate      175 235 115 0
+4027    wm-rh-rostralmiddlefrontal          180 205 130 0
+4028    wm-rh-superiorfrontal               235 35  95  0
+4029    wm-rh-superiorparietal              235 75  115 0
+4030    wm-rh-superiortemporal              115 35  35  0
+4031    wm-rh-supramarginal                 175 95  235 0
+4032    wm-rh-frontalpole                   155 255 155 0
+4033    wm-rh-temporalpole                  185 185 185 0
+4034    wm-rh-transversetemporal            105 105 55  0
+4035    wm-rh-insula                        254 191 31  0
+
+# Below is the color table for the cortical labels of the seg volume
+# created by mri_aparc2aseg (with --a2005s flag) in which the aseg
+# cortex label is replaced by the labels in the aparc.a2005s. The
+# cortical labels are the same as in Simple_surface_labels2005.txt,
+# except that left hemisphere has 1100 added to the index and the
+# right has 2100 added.  The label names are also prepended with
+# ctx-lh or ctx-rh.  The aparc.a2009s labels are further below
+
+#No.    Label Name:                                     R   G   B   A
+1100    ctx-lh-Unknown                                  0   0   0   0
+1101    ctx-lh-Corpus_callosum                          50  50  50  0
+1102    ctx-lh-G_and_S_Insula_ONLY_AVERAGE              180 20  30  0
+1103    ctx-lh-G_cingulate-Isthmus                      60  25  25  0
+1104    ctx-lh-G_cingulate-Main_part                    25  60  60  0
+
+1200    ctx-lh-G_cingulate-caudal_ACC                   25  60  61  0
+1201    ctx-lh-G_cingulate-rostral_ACC                  25  90  60  0
+1202    ctx-lh-G_cingulate-posterior                    25  120 60  0
+
+1205    ctx-lh-S_cingulate-caudal_ACC                   25  150 60  0
+1206    ctx-lh-S_cingulate-rostral_ACC                  25  180 60  0
+1207    ctx-lh-S_cingulate-posterior                    25  210 60  0
+
+1210    ctx-lh-S_pericallosal-caudal                    25  150 90  0
+1211    ctx-lh-S_pericallosal-rostral                   25  180 90  0
+1212    ctx-lh-S_pericallosal-posterior                 25  210 90  0
+
+1105    ctx-lh-G_cuneus                                 180 20  20  0
+1106    ctx-lh-G_frontal_inf-Opercular_part             220 20  100 0
+1107    ctx-lh-G_frontal_inf-Orbital_part               140 60  60  0
+1108    ctx-lh-G_frontal_inf-Triangular_part            180 220 140 0
+1109    ctx-lh-G_frontal_middle                         140 100 180 0
+1110    ctx-lh-G_frontal_superior                       180 20  140 0
+1111    ctx-lh-G_frontomarginal                         140 20  140 0
+1112    ctx-lh-G_insular_long                           21  10  10  0
+1113    ctx-lh-G_insular_short                          225 140 140 0
+1114    ctx-lh-G_and_S_occipital_inferior               23  60  180 0
+1115    ctx-lh-G_occipital_middle                       180 60  180 0
+1116    ctx-lh-G_occipital_superior                     20  220 60  0
+1117    ctx-lh-G_occipit-temp_lat-Or_fusiform           60  20  140 0
+1118    ctx-lh-G_occipit-temp_med-Lingual_part          220 180 140 0
+1119    ctx-lh-G_occipit-temp_med-Parahippocampal_part  65  100 20  0
+1120    ctx-lh-G_orbital                                220 60  20  0
+1121    ctx-lh-G_paracentral                            60  100 60  0
+1122    ctx-lh-G_parietal_inferior-Angular_part         20  60  220 0
+1123    ctx-lh-G_parietal_inferior-Supramarginal_part   100 100 60  0
+1124    ctx-lh-G_parietal_superior                      220 180 220 0
+1125    ctx-lh-G_postcentral                            20  180 140 0
+1126    ctx-lh-G_precentral                             60  140 180 0
+1127    ctx-lh-G_precuneus                              25  20  140 0
+1128    ctx-lh-G_rectus                                 20  60  100 0
+1129    ctx-lh-G_subcallosal                            60  220 20  0
+1130    ctx-lh-G_subcentral                             60  20  220 0
+1131    ctx-lh-G_temporal_inferior                      220 220 100 0
+1132    ctx-lh-G_temporal_middle                        180 60  60  0
+1133    ctx-lh-G_temp_sup-G_temp_transv_and_interm_S    60  60  220 0
+1134    ctx-lh-G_temp_sup-Lateral_aspect                220 60  220 0
+1135    ctx-lh-G_temp_sup-Planum_polare                 65  220 60  0
+1136    ctx-lh-G_temp_sup-Planum_tempolare              25  140 20  0
+1137    ctx-lh-G_and_S_transverse_frontopolar           13  0   250 0
+1138    ctx-lh-Lat_Fissure-ant_sgt-ramus_horizontal     61  20  220 0
+1139    ctx-lh-Lat_Fissure-ant_sgt-ramus_vertical       61  20  60  0
+1140    ctx-lh-Lat_Fissure-post_sgt                     61  60  100 0
+1141    ctx-lh-Medial_wall                              25  25  25  0
+1142    ctx-lh-Pole_occipital                           140 20  60  0
+1143    ctx-lh-Pole_temporal                            220 180 20  0
+1144    ctx-lh-S_calcarine                              63  180 180 0
+1145    ctx-lh-S_central                                221 20  10  0
+1146    ctx-lh-S_central_insula                         21  220 20  0
+1147    ctx-lh-S_cingulate-Main_part_and_Intracingulate 183 100 20  0
+1148    ctx-lh-S_cingulate-Marginalis_part              221 20  100 0
+1149    ctx-lh-S_circular_insula_anterior               221 60  140 0
+1150    ctx-lh-S_circular_insula_inferior               221 20  220 0
+1151    ctx-lh-S_circular_insula_superior               61  220 220 0
+1152    ctx-lh-S_collateral_transverse_ant              100 200 200 0
+1153    ctx-lh-S_collateral_transverse_post             10  200 200 0
+1154    ctx-lh-S_frontal_inferior                       221 220 20  0
+1155    ctx-lh-S_frontal_middle                         141 20  100 0
+1156    ctx-lh-S_frontal_superior                       61  220 100 0
+1157    ctx-lh-S_frontomarginal                         21  220 60  0
+1158    ctx-lh-S_intermedius_primus-Jensen              141 60  20  0
+1159    ctx-lh-S_intraparietal-and_Parietal_transverse  143 20  220 0
+1160    ctx-lh-S_occipital_anterior                     61  20  180 0
+1161    ctx-lh-S_occipital_middle_and_Lunatus           101 60  220 0
+1162    ctx-lh-S_occipital_superior_and_transversalis   21  20  140 0
+1163    ctx-lh-S_occipito-temporal_lateral              221 140 20  0
+1164    ctx-lh-S_occipito-temporal_medial_and_S_Lingual 141 100 220 0
+1165    ctx-lh-S_orbital-H_shapped                      101 20  20  0
+1166    ctx-lh-S_orbital_lateral                        221 100 20  0
+1167    ctx-lh-S_orbital_medial-Or_olfactory            181 200 20  0
+1168    ctx-lh-S_paracentral                            21  180 140 0
+1169    ctx-lh-S_parieto_occipital                      101 100 180 0
+1170    ctx-lh-S_pericallosal                           181 220 20  0
+1171    ctx-lh-S_postcentral                            21  140 200 0
+1172    ctx-lh-S_precentral-Inferior-part               21  20  240 0
+1173    ctx-lh-S_precentral-Superior-part               21  20  200 0
+1174    ctx-lh-S_subcentral_ant                         61  180 60  0
+1175    ctx-lh-S_subcentral_post                        61  180 250 0
+1176    ctx-lh-S_suborbital                             21  20  60  0
+1177    ctx-lh-S_subparietal                            101 60  60  0
+1178    ctx-lh-S_supracingulate                         21  220 220 0
+1179    ctx-lh-S_temporal_inferior                      21  180 180 0
+1180    ctx-lh-S_temporal_superior                      223 220 60  0
+1181    ctx-lh-S_temporal_transverse                    221 60  60  0
+
+2100    ctx-rh-Unknown                                  0   0   0   0
+2101    ctx-rh-Corpus_callosum                          50  50  50  0
+2102    ctx-rh-G_and_S_Insula_ONLY_AVERAGE              180 20  30  0
+2103    ctx-rh-G_cingulate-Isthmus                      60  25  25  0
+2104    ctx-rh-G_cingulate-Main_part                    25  60  60  0
+
+2105    ctx-rh-G_cuneus                                 180 20  20  0
+2106    ctx-rh-G_frontal_inf-Opercular_part             220 20  100 0
+2107    ctx-rh-G_frontal_inf-Orbital_part               140 60  60  0
+2108    ctx-rh-G_frontal_inf-Triangular_part            180 220 140 0
+2109    ctx-rh-G_frontal_middle                         140 100 180 0
+2110    ctx-rh-G_frontal_superior                       180 20  140 0
+2111    ctx-rh-G_frontomarginal                         140 20  140 0
+2112    ctx-rh-G_insular_long                           21  10  10  0
+2113    ctx-rh-G_insular_short                          225 140 140 0
+2114    ctx-rh-G_and_S_occipital_inferior               23  60  180 0
+2115    ctx-rh-G_occipital_middle                       180 60  180 0
+2116    ctx-rh-G_occipital_superior                     20  220 60  0
+2117    ctx-rh-G_occipit-temp_lat-Or_fusiform           60  20  140 0
+2118    ctx-rh-G_occipit-temp_med-Lingual_part          220 180 140 0
+2119    ctx-rh-G_occipit-temp_med-Parahippocampal_part  65  100 20  0
+2120    ctx-rh-G_orbital                                220 60  20  0
+2121    ctx-rh-G_paracentral                            60  100 60  0
+2122    ctx-rh-G_parietal_inferior-Angular_part         20  60  220 0
+2123    ctx-rh-G_parietal_inferior-Supramarginal_part   100 100 60  0
+2124    ctx-rh-G_parietal_superior                      220 180 220 0
+2125    ctx-rh-G_postcentral                            20  180 140 0
+2126    ctx-rh-G_precentral                             60  140 180 0
+2127    ctx-rh-G_precuneus                              25  20  140 0
+2128    ctx-rh-G_rectus                                 20  60  100 0
+2129    ctx-rh-G_subcallosal                            60  220 20  0
+2130    ctx-rh-G_subcentral                             60  20  220 0
+2131    ctx-rh-G_temporal_inferior                      220 220 100 0
+2132    ctx-rh-G_temporal_middle                        180 60  60  0
+2133    ctx-rh-G_temp_sup-G_temp_transv_and_interm_S    60  60  220 0
+2134    ctx-rh-G_temp_sup-Lateral_aspect                220 60  220 0
+2135    ctx-rh-G_temp_sup-Planum_polare                 65  220 60  0
+2136    ctx-rh-G_temp_sup-Planum_tempolare              25  140 20  0
+2137    ctx-rh-G_and_S_transverse_frontopolar           13  0   250 0
+2138    ctx-rh-Lat_Fissure-ant_sgt-ramus_horizontal     61  20  220 0
+2139    ctx-rh-Lat_Fissure-ant_sgt-ramus_vertical       61  20  60  0
+2140    ctx-rh-Lat_Fissure-post_sgt                     61  60  100 0
+2141    ctx-rh-Medial_wall                              25  25  25  0
+2142    ctx-rh-Pole_occipital                           140 20  60  0
+2143    ctx-rh-Pole_temporal                            220 180 20  0
+2144    ctx-rh-S_calcarine                              63  180 180 0
+2145    ctx-rh-S_central                                221 20  10  0
+2146    ctx-rh-S_central_insula                         21  220 20  0
+2147    ctx-rh-S_cingulate-Main_part_and_Intracingulate 183 100 20  0
+2148    ctx-rh-S_cingulate-Marginalis_part              221 20  100 0
+2149    ctx-rh-S_circular_insula_anterior               221 60  140 0
+2150    ctx-rh-S_circular_insula_inferior               221 20  220 0
+2151    ctx-rh-S_circular_insula_superior               61  220 220 0
+2152    ctx-rh-S_collateral_transverse_ant              100 200 200 0
+2153    ctx-rh-S_collateral_transverse_post             10  200 200 0
+2154    ctx-rh-S_frontal_inferior                       221 220 20  0
+2155    ctx-rh-S_frontal_middle                         141 20  100 0
+2156    ctx-rh-S_frontal_superior                       61  220 100 0
+2157    ctx-rh-S_frontomarginal                         21  220 60  0
+2158    ctx-rh-S_intermedius_primus-Jensen              141 60  20  0
+2159    ctx-rh-S_intraparietal-and_Parietal_transverse  143 20  220 0
+2160    ctx-rh-S_occipital_anterior                     61  20  180 0
+2161    ctx-rh-S_occipital_middle_and_Lunatus           101 60  220 0
+2162    ctx-rh-S_occipital_superior_and_transversalis   21  20  140 0
+2163    ctx-rh-S_occipito-temporal_lateral              221 140 20  0
+2164    ctx-rh-S_occipito-temporal_medial_and_S_Lingual 141 100 220 0
+2165    ctx-rh-S_orbital-H_shapped                      101 20  20  0
+2166    ctx-rh-S_orbital_lateral                        221 100 20  0
+2167    ctx-rh-S_orbital_medial-Or_olfactory            181 200 20  0
+2168    ctx-rh-S_paracentral                            21  180 140 0
+2169    ctx-rh-S_parieto_occipital                      101 100 180 0
+2170    ctx-rh-S_pericallosal                           181 220 20  0
+2171    ctx-rh-S_postcentral                            21  140 200 0
+2172    ctx-rh-S_precentral-Inferior-part               21  20  240 0
+2173    ctx-rh-S_precentral-Superior-part               21  20  200 0
+2174    ctx-rh-S_subcentral_ant                         61  180 60  0
+2175    ctx-rh-S_subcentral_post                        61  180 250 0
+2176    ctx-rh-S_suborbital                             21  20  60  0
+2177    ctx-rh-S_subparietal                            101 60  60  0
+2178    ctx-rh-S_supracingulate                         21  220 220 0
+2179    ctx-rh-S_temporal_inferior                      21  180 180 0
+2180    ctx-rh-S_temporal_superior                      223 220 60  0
+2181    ctx-rh-S_temporal_transverse                    221 60  60  0
+
+
+2200    ctx-rh-G_cingulate-caudal_ACC                   25  60  61  0
+2201    ctx-rh-G_cingulate-rostral_ACC                  25  90  60  0
+2202    ctx-rh-G_cingulate-posterior                    25  120 60  0
+
+2205    ctx-rh-S_cingulate-caudal_ACC                   25  150 60  0
+2206    ctx-rh-S_cingulate-rostral_ACC                  25  180 60  0
+2207    ctx-rh-S_cingulate-posterior                    25  210 60  0
+
+2210    ctx-rh-S_pericallosal-caudal                    25  150 90  0
+2211    ctx-rh-S_pericallosal-rostral                   25  180 90  0
+2212    ctx-rh-S_pericallosal-posterior                 25  210 90  0
+
+3100    wm-lh-Unknown                                   0   0   0   0
+3101    wm-lh-Corpus_callosum                           50  50  50  0
+3102    wm-lh-G_and_S_Insula_ONLY_AVERAGE               180 20  30  0
+3103    wm-lh-G_cingulate-Isthmus                       60  25  25  0
+3104    wm-lh-G_cingulate-Main_part                     25  60  60  0
+3105    wm-lh-G_cuneus                                  180 20  20  0
+3106    wm-lh-G_frontal_inf-Opercular_part              220 20  100 0
+3107    wm-lh-G_frontal_inf-Orbital_part                140 60  60  0
+3108    wm-lh-G_frontal_inf-Triangular_part             180 220 140 0
+3109    wm-lh-G_frontal_middle                          140 100 180 0
+3110    wm-lh-G_frontal_superior                        180 20  140 0
+3111    wm-lh-G_frontomarginal                          140 20  140 0
+3112    wm-lh-G_insular_long                            21  10  10  0
+3113    wm-lh-G_insular_short                           225 140 140 0
+3114    wm-lh-G_and_S_occipital_inferior                23  60  180 0
+3115    wm-lh-G_occipital_middle                        180 60  180 0
+3116    wm-lh-G_occipital_superior                      20  220 60  0
+3117    wm-lh-G_occipit-temp_lat-Or_fusiform            60  20  140 0
+3118    wm-lh-G_occipit-temp_med-Lingual_part           220 180 140 0
+3119    wm-lh-G_occipit-temp_med-Parahippocampal_part   65  100 20  0
+3120    wm-lh-G_orbital                                 220 60  20  0
+3121    wm-lh-G_paracentral                             60  100 60  0
+3122    wm-lh-G_parietal_inferior-Angular_part          20  60  220 0
+3123    wm-lh-G_parietal_inferior-Supramarginal_part    100 100 60  0
+3124    wm-lh-G_parietal_superior                       220 180 220 0
+3125    wm-lh-G_postcentral                             20  180 140 0
+3126    wm-lh-G_precentral                              60  140 180 0
+3127    wm-lh-G_precuneus                               25  20  140 0
+3128    wm-lh-G_rectus                                  20  60  100 0
+3129    wm-lh-G_subcallosal                             60  220 20  0
+3130    wm-lh-G_subcentral                              60  20  220 0
+3131    wm-lh-G_temporal_inferior                       220 220 100 0
+3132    wm-lh-G_temporal_middle                         180 60  60  0
+3133    wm-lh-G_temp_sup-G_temp_transv_and_interm_S     60  60  220 0
+3134    wm-lh-G_temp_sup-Lateral_aspect                 220 60  220 0
+3135    wm-lh-G_temp_sup-Planum_polare                  65  220 60  0
+3136    wm-lh-G_temp_sup-Planum_tempolare               25  140 20  0
+3137    wm-lh-G_and_S_transverse_frontopolar            13  0   250 0
+3138    wm-lh-Lat_Fissure-ant_sgt-ramus_horizontal      61  20  220 0
+3139    wm-lh-Lat_Fissure-ant_sgt-ramus_vertical        61  20  60  0
+3140    wm-lh-Lat_Fissure-post_sgt                      61  60  100 0
+3141    wm-lh-Medial_wall                               25  25  25  0
+3142    wm-lh-Pole_occipital                            140 20  60  0
+3143    wm-lh-Pole_temporal                             220 180 20  0
+3144    wm-lh-S_calcarine                               63  180 180 0
+3145    wm-lh-S_central                                 221 20  10  0
+3146    wm-lh-S_central_insula                          21  220 20  0
+3147    wm-lh-S_cingulate-Main_part_and_Intracingulate  183 100 20  0
+3148    wm-lh-S_cingulate-Marginalis_part               221 20  100 0
+3149    wm-lh-S_circular_insula_anterior                221 60  140 0
+3150    wm-lh-S_circular_insula_inferior                221 20  220 0
+3151    wm-lh-S_circular_insula_superior                61  220 220 0
+3152    wm-lh-S_collateral_transverse_ant               100 200 200 0
+3153    wm-lh-S_collateral_transverse_post              10  200 200 0
+3154    wm-lh-S_frontal_inferior                        221 220 20  0
+3155    wm-lh-S_frontal_middle                          141 20  100 0
+3156    wm-lh-S_frontal_superior                        61  220 100 0
+3157    wm-lh-S_frontomarginal                          21  220 60  0
+3158    wm-lh-S_intermedius_primus-Jensen               141 60  20  0
+3159    wm-lh-S_intraparietal-and_Parietal_transverse   143 20  220 0
+3160    wm-lh-S_occipital_anterior                      61  20  180 0
+3161    wm-lh-S_occipital_middle_and_Lunatus            101 60  220 0
+3162    wm-lh-S_occipital_superior_and_transversalis    21  20  140 0
+3163    wm-lh-S_occipito-temporal_lateral               221 140 20  0
+3164    wm-lh-S_occipito-temporal_medial_and_S_Lingual  141 100 220 0
+3165    wm-lh-S_orbital-H_shapped                       101 20  20  0
+3166    wm-lh-S_orbital_lateral                         221 100 20  0
+3167    wm-lh-S_orbital_medial-Or_olfactory             181 200 20  0
+3168    wm-lh-S_paracentral                             21  180 140 0
+3169    wm-lh-S_parieto_occipital                       101 100 180 0
+3170    wm-lh-S_pericallosal                            181 220 20  0
+3171    wm-lh-S_postcentral                             21  140 200 0
+3172    wm-lh-S_precentral-Inferior-part                21  20  240 0
+3173    wm-lh-S_precentral-Superior-part                21  20  200 0
+3174    wm-lh-S_subcentral_ant                          61  180 60  0
+3175    wm-lh-S_subcentral_post                         61  180 250 0
+3176    wm-lh-S_suborbital                              21  20  60  0
+3177    wm-lh-S_subparietal                             101 60  60  0
+3178    wm-lh-S_supracingulate                          21  220 220 0
+3179    wm-lh-S_temporal_inferior                       21  180 180 0
+3180    wm-lh-S_temporal_superior                       223 220 60  0
+3181    wm-lh-S_temporal_transverse                     221 60  60  0
+
+4100    wm-rh-Unknown                                   0   0   0   0
+4101    wm-rh-Corpus_callosum                           50  50  50  0
+4102    wm-rh-G_and_S_Insula_ONLY_AVERAGE               180 20  30  0
+4103    wm-rh-G_cingulate-Isthmus                       60  25  25  0
+4104    wm-rh-G_cingulate-Main_part                     25  60  60  0
+4105    wm-rh-G_cuneus                                  180 20  20  0
+4106    wm-rh-G_frontal_inf-Opercular_part              220 20  100 0
+4107    wm-rh-G_frontal_inf-Orbital_part                140 60  60  0
+4108    wm-rh-G_frontal_inf-Triangular_part             180 220 140 0
+4109    wm-rh-G_frontal_middle                          140 100 180 0
+4110    wm-rh-G_frontal_superior                        180 20  140 0
+4111    wm-rh-G_frontomarginal                          140 20  140 0
+4112    wm-rh-G_insular_long                            21  10  10  0
+4113    wm-rh-G_insular_short                           225 140 140 0
+4114    wm-rh-G_and_S_occipital_inferior                23  60  180 0
+4115    wm-rh-G_occipital_middle                        180 60  180 0
+4116    wm-rh-G_occipital_superior                      20  220 60  0
+4117    wm-rh-G_occipit-temp_lat-Or_fusiform            60  20  140 0
+4118    wm-rh-G_occipit-temp_med-Lingual_part           220 180 140 0
+4119    wm-rh-G_occipit-temp_med-Parahippocampal_part   65  100 20  0
+4120    wm-rh-G_orbital                                 220 60  20  0
+4121    wm-rh-G_paracentral                             60  100 60  0
+4122    wm-rh-G_parietal_inferior-Angular_part          20  60  220 0
+4123    wm-rh-G_parietal_inferior-Supramarginal_part    100 100 60  0
+4124    wm-rh-G_parietal_superior                       220 180 220 0
+4125    wm-rh-G_postcentral                             20  180 140 0
+4126    wm-rh-G_precentral                              60  140 180 0
+4127    wm-rh-G_precuneus                               25  20  140 0
+4128    wm-rh-G_rectus                                  20  60  100 0
+4129    wm-rh-G_subcallosal                             60  220 20  0
+4130    wm-rh-G_subcentral                              60  20  220 0
+4131    wm-rh-G_temporal_inferior                       220 220 100 0
+4132    wm-rh-G_temporal_middle                         180 60  60  0
+4133    wm-rh-G_temp_sup-G_temp_transv_and_interm_S     60  60  220 0
+4134    wm-rh-G_temp_sup-Lateral_aspect                 220 60  220 0
+4135    wm-rh-G_temp_sup-Planum_polare                  65  220 60  0
+4136    wm-rh-G_temp_sup-Planum_tempolare               25  140 20  0
+4137    wm-rh-G_and_S_transverse_frontopolar            13  0   250 0
+4138    wm-rh-Lat_Fissure-ant_sgt-ramus_horizontal      61  20  220 0
+4139    wm-rh-Lat_Fissure-ant_sgt-ramus_vertical        61  20  60  0
+4140    wm-rh-Lat_Fissure-post_sgt                      61  60  100 0
+4141    wm-rh-Medial_wall                               25  25  25  0
+4142    wm-rh-Pole_occipital                            140 20  60  0
+4143    wm-rh-Pole_temporal                             220 180 20  0
+4144    wm-rh-S_calcarine                               63  180 180 0
+4145    wm-rh-S_central                                 221 20  10  0
+4146    wm-rh-S_central_insula                          21  220 20  0
+4147    wm-rh-S_cingulate-Main_part_and_Intracingulate  183 100 20  0
+4148    wm-rh-S_cingulate-Marginalis_part               221 20  100 0
+4149    wm-rh-S_circular_insula_anterior                221 60  140 0
+4150    wm-rh-S_circular_insula_inferior                221 20  220 0
+4151    wm-rh-S_circular_insula_superior                61  220 220 0
+4152    wm-rh-S_collateral_transverse_ant               100 200 200 0
+4153    wm-rh-S_collateral_transverse_post              10  200 200 0
+4154    wm-rh-S_frontal_inferior                        221 220 20  0
+4155    wm-rh-S_frontal_middle                          141 20  100 0
+4156    wm-rh-S_frontal_superior                        61  220 100 0
+4157    wm-rh-S_frontomarginal                          21  220 60  0
+4158    wm-rh-S_intermedius_primus-Jensen               141 60  20  0
+4159    wm-rh-S_intraparietal-and_Parietal_transverse   143 20  220 0
+4160    wm-rh-S_occipital_anterior                      61  20  180 0
+4161    wm-rh-S_occipital_middle_and_Lunatus            101 60  220 0
+4162    wm-rh-S_occipital_superior_and_transversalis    21  20  140 0
+4163    wm-rh-S_occipito-temporal_lateral               221 140 20  0
+4164    wm-rh-S_occipito-temporal_medial_and_S_Lingual  141 100 220 0
+4165    wm-rh-S_orbital-H_shapped                       101 20  20  0
+4166    wm-rh-S_orbital_lateral                         221 100 20  0
+4167    wm-rh-S_orbital_medial-Or_olfactory             181 200 20  0
+4168    wm-rh-S_paracentral                             21  180 140 0
+4169    wm-rh-S_parieto_occipital                       101 100 180 0
+4170    wm-rh-S_pericallosal                            181 220 20  0
+4171    wm-rh-S_postcentral                             21  140 200 0
+4172    wm-rh-S_precentral-Inferior-part                21  20  240 0
+4173    wm-rh-S_precentral-Superior-part                21  20  200 0
+4174    wm-rh-S_subcentral_ant                          61  180 60  0
+4175    wm-rh-S_subcentral_post                         61  180 250 0
+4176    wm-rh-S_suborbital                              21  20  60  0
+4177    wm-rh-S_subparietal                             101 60  60  0
+4178    wm-rh-S_supracingulate                          21  220 220 0
+4179    wm-rh-S_temporal_inferior                       21  180 180 0
+4180    wm-rh-S_temporal_superior                       223 220 60  0
+4181    wm-rh-S_temporal_transverse                     221 60  60  0
+
+5001    Left-UnsegmentedWhiteMatter                     20  30  40  0
+5002    Right-UnsegmentedWhiteMatter                    20  30  40  0
+
+# Below is the color table for white-matter pathways produced by dmri_paths
+
+#No.   Label Name:                                      R   G   B   A
+#
+5100   fmajor                                           204 102 102 0
+5101   fminor                                           204 102 102 0
+#
+5102   lh.atr                                           255 255 102 0
+5103   lh.cab                                           153 204 0   0
+5104   lh.ccg                                           0   153 153 0
+5105   lh.cst                                           204 153 255 0
+5106   lh.ilf                                           255 153 51  0
+5107   lh.slfp                                          204 204 204 0
+5108   lh.slft                                          153 255 255 0
+5109   lh.unc                                           102 153 255 0
+#
+5110   rh.atr                                           255 255 102 0
+5111   rh.cab                                           153 204 0   0
+5112   rh.ccg                                           0   153 153 0
+5113   rh.cst                                           204 153 255 0
+5114   rh.ilf                                           255 153 51  0
+5115   rh.slfp                                          204 204 204 0
+5116   rh.slft                                          153 255 255 0
+5117   rh.unc                                           102 153 255 0
+
+# These are the same tracula labels as above in human-readable form
+5200   CC-ForcepsMajor                                  204 102 102 0
+5201   CC-ForcepsMinor                                  204 102 102 0
+5202   LAntThalRadiation                                255 255 102 0
+5203   LCingulumAngBundle                               153 204 0   0
+5204   LCingulumCingGyrus                               0   153 153 0
+5205   LCorticospinalTract                              204 153 255 0
+5206   LInfLongFas                                      255 153 51  0
+5207   LSupLongFasParietal                              204 204 204 0
+5208   LSupLongFasTemporal                              153 255 255 0
+5209   LUncinateFas                                     102 153 255 0
+5210   RAntThalRadiation                                255 255 102 0
+5211   RCingulumAngBundle                               153 204 0   0
+5212   RCingulumCingGyrus                               0   153 153 0
+5213   RCorticospinalTract                              204 153 255 0
+5214   RInfLongFas                                      255 153 51  0
+5215   RSupLongFasParietal                              204 204 204 0
+5216   RSupLongFasTemporal                              153 255 255 0
+5217   RUncinateFas                                     102 153 255 0
+
+########################################
+
+6000   CST-orig                                         0   255 0   0
+6001   CST-hammer                                       255 255 0   0
+6002   CST-CVS                                          0   255 255 0
+6003   CST-flirt                                        0   0   255 0
+
+6010   Left-SLF1                                        236 16  231 0
+6020   Right-SLF1                                       237 18  232 0
+
+6030   Left-SLF3                                        236 13  227 0
+6040   Right-SLF3                                       236 17  228 0
+
+6050   Left-CST                                         1   255 1   0
+6060   Right-CST                                        2   255 1   0
+
+6070   Left-SLF2                                        236 14  230 0
+6080   Right-SLF2                                       237 14  230 0
+
+#No.  Label Name:                                       R   G   B   A
+
+7001  Lateral-nucleus                                   72  132 181 0
+7002  Basolateral-nucleus                               243 243 243 0
+7003  Basal-nucleus                                     207 63  79  0
+7004  Centromedial-nucleus                              121 20  135 0
+7005  Central-nucleus                                   197 60  248 0
+7006  Medial-nucleus                                    2   149 2   0
+7007  Cortical-nucleus                                  221 249 166 0
+7008  Accessory-Basal-nucleus                           232 146 35  0
+7009  Corticoamygdaloid-transitio                       20  60  120 0
+7010  Anterior-amygdaloid-area-AAA                      250 250 0   0
+7011  Fusion-amygdala-HP-FAH                            122 187 222 0
+7012  Hippocampal-amygdala-transition-HATA              237 12  177 0
+7013  Endopiriform-nucleus                              10  49  255 0
+7014  Lateral-nucleus-olfactory-tract                   205 184 144 0
+7015  Paralaminar-nucleus                               45  205 165 0
+7016  Intercalated-nucleus                              117 160 175 0
+7017  Prepiriform-cortex                                221 217 21  0
+7018  Periamygdaloid-cortex                             20  60  120 0
+7019  Envelope-Amygdala                                 141 21  100 0
+7020  Extranuclear-Amydala                              225 140 141 0
+
+7100  Brainstem-inferior-colliculus                     42  201 168 0
+7101  Brainstem-cochlear-nucleus                        168 104 162 0
+
+8001  Thalamus-Anterior                                 74  130 181 0
+8002  Thalamus-Ventral-anterior                         242 241 240 0
+8003  Thalamus-Lateral-dorsal                           206 65  78  0
+8004  Thalamus-Lateral-posterior                        120 21  133 0
+8005  Thalamus-Ventral-lateral                          195 61  246 0
+8006  Thalamus-Ventral-posterior-medial                 3   147 6   0
+8007  Thalamus-Ventral-posterior-lateral                220 251 163 0
+8008  Thalamus-intralaminar                             232 146 33  0
+8009  Thalamus-centromedian                             4   114 14  0
+8010  Thalamus-mediodorsal                              121 184 220 0
+8011  Thalamus-medial                                   235 11  175 0
+8012  Thalamus-pulvinar                                 12  46  250 0
+8013  Thalamus-lateral-geniculate                       203 182 143 0
+8014  Thalamus-medial-geniculate                        42  204 167 0
+
+#
+# Labels for thalamus parcellation using probabilistic tractography. See:
+# Functional--Anatomical Validation and Individual Variation of Diffusion
+# Tractography-based Segmentation of the Human Thalamus; Cerebral Cortex
+# January 2005;15:31--39, doi:10.1093/cercor/bhh105, Advance Access
+# publication July 6, 2004
+#
+
+#No.    Label Name:                         R   G   B   A
+9000    ctx-lh-prefrontal                   30  5   30  0
+9001    ctx-lh-primary-motor                30  100 45  0
+9002    ctx-lh-premotor                     130 100 165 0
+9003    ctx-lh-temporal                     105 25  5   0
+9004    ctx-lh-posterior-parietal           125 70  55  0
+9005    ctx-lh-prim-sec-somatosensory       225 20  105 0
+9006    ctx-lh-occipital                    225 20  15  0
+
+9500    ctx-rh-prefrontal                   30  55  30  0
+9501    ctx-rh-primary-motor                30  150 45  0
+9502    ctx-rh-premotor                     130 150 165 0
+9503    ctx-rh-temporal                     105 75  5   0
+9504    ctx-rh-posterior-parietal           125 120 55  0
+9505    ctx-rh-prim-sec-somatosensory       225 70  105 0
+9506    ctx-rh-occipital                    225 70  15  0
+
+# Below is the color table for the cortical labels of the seg volume
+# created by mri_aparc2aseg (with --a2009s flag) in which the aseg
+# cortex label is replaced by the labels in the aparc.a2009s. The
+# cortical labels are the same as in Simple_surface_labels2009.txt,
+# except that left hemisphere has 11100 added to the index and the
+# right has 12100 added.  The label names are also prepended with
+# ctx_lh_, ctx_rh_, wm_lh_ and wm_rh_ (note usage of _ instead of -
+# to differentiate from a2005s labels).
+
+#No.   Label Name:                              R   G   B   A
+11100  ctx_lh_Unknown                           0   0   0   0
+11101  ctx_lh_G_and_S_frontomargin             23 220  60   0
+11102  ctx_lh_G_and_S_occipital_inf            23  60 180   0
+11103  ctx_lh_G_and_S_paracentral              63 100  60   0
+11104  ctx_lh_G_and_S_subcentral               63  20 220   0
+11105  ctx_lh_G_and_S_transv_frontopol         13   0 250   0
+11106  ctx_lh_G_and_S_cingul-Ant               26  60   0   0
+11107  ctx_lh_G_and_S_cingul-Mid-Ant           26  60  75   0
+11108  ctx_lh_G_and_S_cingul-Mid-Post          26  60 150   0
+11109  ctx_lh_G_cingul-Post-dorsal             25  60 250   0
+11110  ctx_lh_G_cingul-Post-ventral            60  25  25   0
+11111  ctx_lh_G_cuneus                        180  20  20   0
+11112  ctx_lh_G_front_inf-Opercular           220  20 100   0
+11113  ctx_lh_G_front_inf-Orbital             140  60  60   0
+11114  ctx_lh_G_front_inf-Triangul            180 220 140   0
+11115  ctx_lh_G_front_middle                  140 100 180   0
+11116  ctx_lh_G_front_sup                     180  20 140   0
+11117  ctx_lh_G_Ins_lg_and_S_cent_ins          23  10  10   0
+11118  ctx_lh_G_insular_short                 225 140 140   0
+11119  ctx_lh_G_occipital_middle              180  60 180   0
+11120  ctx_lh_G_occipital_sup                  20 220  60   0
+11121  ctx_lh_G_oc-temp_lat-fusifor            60  20 140   0
+11122  ctx_lh_G_oc-temp_med-Lingual           220 180 140   0
+11123  ctx_lh_G_oc-temp_med-Parahip            65 100  20   0
+11124  ctx_lh_G_orbital                       220  60  20   0
+11125  ctx_lh_G_pariet_inf-Angular             20  60 220   0
+11126  ctx_lh_G_pariet_inf-Supramar           100 100  60   0
+11127  ctx_lh_G_parietal_sup                  220 180 220   0
+11128  ctx_lh_G_postcentral                    20 180 140   0
+11129  ctx_lh_G_precentral                     60 140 180   0
+11130  ctx_lh_G_precuneus                      25  20 140   0
+11131  ctx_lh_G_rectus                         20  60 100   0
+11132  ctx_lh_G_subcallosal                    60 220  20   0
+11133  ctx_lh_G_temp_sup-G_T_transv            60  60 220   0
+11134  ctx_lh_G_temp_sup-Lateral              220  60 220   0
+11135  ctx_lh_G_temp_sup-Plan_polar            65 220  60   0
+11136  ctx_lh_G_temp_sup-Plan_tempo            25 140  20   0
+11137  ctx_lh_G_temporal_inf                  220 220 100   0
+11138  ctx_lh_G_temporal_middle               180  60  60   0
+11139  ctx_lh_Lat_Fis-ant-Horizont             61  20 220   0
+11140  ctx_lh_Lat_Fis-ant-Vertical             61  20  60   0
+11141  ctx_lh_Lat_Fis-post                     61  60 100   0
+11142  ctx_lh_Medial_wall                      25  25  25   0
+11143  ctx_lh_Pole_occipital                  140  20  60   0
+11144  ctx_lh_Pole_temporal                   220 180  20   0
+11145  ctx_lh_S_calcarine                      63 180 180   0
+11146  ctx_lh_S_central                       221  20  10   0
+11147  ctx_lh_S_cingul-Marginalis             221  20 100   0
+11148  ctx_lh_S_circular_insula_ant           221  60 140   0
+11149  ctx_lh_S_circular_insula_inf           221  20 220   0
+11150  ctx_lh_S_circular_insula_sup            61 220 220   0
+11151  ctx_lh_S_collat_transv_ant             100 200 200   0
+11152  ctx_lh_S_collat_transv_post             10 200 200   0
+11153  ctx_lh_S_front_inf                     221 220  20   0
+11154  ctx_lh_S_front_middle                  141  20 100   0
+11155  ctx_lh_S_front_sup                      61 220 100   0
+11156  ctx_lh_S_interm_prim-Jensen            141  60  20   0
+11157  ctx_lh_S_intrapariet_and_P_trans       143  20 220   0
+11158  ctx_lh_S_oc_middle_and_Lunatus         101  60 220   0
+11159  ctx_lh_S_oc_sup_and_transversal         21  20 140   0
+11160  ctx_lh_S_occipital_ant                  61  20 180   0
+11161  ctx_lh_S_oc-temp_lat                   221 140  20   0
+11162  ctx_lh_S_oc-temp_med_and_Lingual       141 100 220   0
+11163  ctx_lh_S_orbital_lateral               221 100  20   0
+11164  ctx_lh_S_orbital_med-olfact            181 200  20   0
+11165  ctx_lh_S_orbital-H_Shaped              101  20  20   0
+11166  ctx_lh_S_parieto_occipital             101 100 180   0
+11167  ctx_lh_S_pericallosal                  181 220  20   0
+11168  ctx_lh_S_postcentral                    21 140 200   0
+11169  ctx_lh_S_precentral-inf-part            21  20 240   0
+11170  ctx_lh_S_precentral-sup-part            21  20 200   0
+11171  ctx_lh_S_suborbital                     21  20  60   0
+11172  ctx_lh_S_subparietal                   101  60  60   0
+11173  ctx_lh_S_temporal_inf                   21 180 180   0
+11174  ctx_lh_S_temporal_sup                  223 220  60   0
+11175  ctx_lh_S_temporal_transverse           221  60  60   0
+
+12100  ctx_rh_Unknown                           0   0   0   0
+12101  ctx_rh_G_and_S_frontomargin             23 220  60   0
+12102  ctx_rh_G_and_S_occipital_inf            23  60 180   0
+12103  ctx_rh_G_and_S_paracentral              63 100  60   0
+12104  ctx_rh_G_and_S_subcentral               63  20 220   0
+12105  ctx_rh_G_and_S_transv_frontopol         13   0 250   0
+12106  ctx_rh_G_and_S_cingul-Ant               26  60   0   0
+12107  ctx_rh_G_and_S_cingul-Mid-Ant           26  60  75   0
+12108  ctx_rh_G_and_S_cingul-Mid-Post          26  60 150   0
+12109  ctx_rh_G_cingul-Post-dorsal             25  60 250   0
+12110  ctx_rh_G_cingul-Post-ventral            60  25  25   0
+12111  ctx_rh_G_cuneus                        180  20  20   0
+12112  ctx_rh_G_front_inf-Opercular           220  20 100   0
+12113  ctx_rh_G_front_inf-Orbital             140  60  60   0
+12114  ctx_rh_G_front_inf-Triangul            180 220 140   0
+12115  ctx_rh_G_front_middle                  140 100 180   0
+12116  ctx_rh_G_front_sup                     180  20 140   0
+12117  ctx_rh_G_Ins_lg_and_S_cent_ins          23  10  10   0
+12118  ctx_rh_G_insular_short                 225 140 140   0
+12119  ctx_rh_G_occipital_middle              180  60 180   0
+12120  ctx_rh_G_occipital_sup                  20 220  60   0
+12121  ctx_rh_G_oc-temp_lat-fusifor            60  20 140   0
+12122  ctx_rh_G_oc-temp_med-Lingual           220 180 140   0
+12123  ctx_rh_G_oc-temp_med-Parahip            65 100  20   0
+12124  ctx_rh_G_orbital                       220  60  20   0
+12125  ctx_rh_G_pariet_inf-Angular             20  60 220   0
+12126  ctx_rh_G_pariet_inf-Supramar           100 100  60   0
+12127  ctx_rh_G_parietal_sup                  220 180 220   0
+12128  ctx_rh_G_postcentral                    20 180 140   0
+12129  ctx_rh_G_precentral                     60 140 180   0
+12130  ctx_rh_G_precuneus                      25  20 140   0
+12131  ctx_rh_G_rectus                         20  60 100   0
+12132  ctx_rh_G_subcallosal                    60 220  20   0
+12133  ctx_rh_G_temp_sup-G_T_transv            60  60 220   0
+12134  ctx_rh_G_temp_sup-Lateral              220  60 220   0
+12135  ctx_rh_G_temp_sup-Plan_polar            65 220  60   0
+12136  ctx_rh_G_temp_sup-Plan_tempo            25 140  20   0
+12137  ctx_rh_G_temporal_inf                  220 220 100   0
+12138  ctx_rh_G_temporal_middle               180  60  60   0
+12139  ctx_rh_Lat_Fis-ant-Horizont             61  20 220   0
+12140  ctx_rh_Lat_Fis-ant-Vertical             61  20  60   0
+12141  ctx_rh_Lat_Fis-post                     61  60 100   0
+12142  ctx_rh_Medial_wall                      25  25  25   0
+12143  ctx_rh_Pole_occipital                  140  20  60   0
+12144  ctx_rh_Pole_temporal                   220 180  20   0
+12145  ctx_rh_S_calcarine                      63 180 180   0
+12146  ctx_rh_S_central                       221  20  10   0
+12147  ctx_rh_S_cingul-Marginalis             221  20 100   0
+12148  ctx_rh_S_circular_insula_ant           221  60 140   0
+12149  ctx_rh_S_circular_insula_inf           221  20 220   0
+12150  ctx_rh_S_circular_insula_sup            61 220 220   0
+12151  ctx_rh_S_collat_transv_ant             100 200 200   0
+12152  ctx_rh_S_collat_transv_post             10 200 200   0
+12153  ctx_rh_S_front_inf                     221 220  20   0
+12154  ctx_rh_S_front_middle                  141  20 100   0
+12155  ctx_rh_S_front_sup                      61 220 100   0
+12156  ctx_rh_S_interm_prim-Jensen            141  60  20   0
+12157  ctx_rh_S_intrapariet_and_P_trans       143  20 220   0
+12158  ctx_rh_S_oc_middle_and_Lunatus         101  60 220   0
+12159  ctx_rh_S_oc_sup_and_transversal         21  20 140   0
+12160  ctx_rh_S_occipital_ant                  61  20 180   0
+12161  ctx_rh_S_oc-temp_lat                   221 140  20   0
+12162  ctx_rh_S_oc-temp_med_and_Lingual       141 100 220   0
+12163  ctx_rh_S_orbital_lateral               221 100  20   0
+12164  ctx_rh_S_orbital_med-olfact            181 200  20   0
+12165  ctx_rh_S_orbital-H_Shaped              101  20  20   0
+12166  ctx_rh_S_parieto_occipital             101 100 180   0
+12167  ctx_rh_S_pericallosal                  181 220  20   0
+12168  ctx_rh_S_postcentral                    21 140 200   0
+12169  ctx_rh_S_precentral-inf-part            21  20 240   0
+12170  ctx_rh_S_precentral-sup-part            21  20 200   0
+12171  ctx_rh_S_suborbital                     21  20  60   0
+12172  ctx_rh_S_subparietal                   101  60  60   0
+12173  ctx_rh_S_temporal_inf                   21 180 180   0
+12174  ctx_rh_S_temporal_sup                  223 220  60   0
+12175  ctx_rh_S_temporal_transverse           221  60  60   0
+
+#No.   Label Name:                              R   G   B   A
+13100  wm_lh_Unknown                            0   0   0   0
+13101  wm_lh_G_and_S_frontomargin              23 220  60   0
+13102  wm_lh_G_and_S_occipital_inf             23  60 180   0
+13103  wm_lh_G_and_S_paracentral               63 100  60   0
+13104  wm_lh_G_and_S_subcentral                63  20 220   0
+13105  wm_lh_G_and_S_transv_frontopol          13   0 250   0
+13106  wm_lh_G_and_S_cingul-Ant                26  60   0   0
+13107  wm_lh_G_and_S_cingul-Mid-Ant            26  60  75   0
+13108  wm_lh_G_and_S_cingul-Mid-Post           26  60 150   0
+13109  wm_lh_G_cingul-Post-dorsal              25  60 250   0
+13110  wm_lh_G_cingul-Post-ventral             60  25  25   0
+13111  wm_lh_G_cuneus                         180  20  20   0
+13112  wm_lh_G_front_inf-Opercular            220  20 100   0
+13113  wm_lh_G_front_inf-Orbital              140  60  60   0
+13114  wm_lh_G_front_inf-Triangul             180 220 140   0
+13115  wm_lh_G_front_middle                   140 100 180   0
+13116  wm_lh_G_front_sup                      180  20 140   0
+13117  wm_lh_G_Ins_lg_and_S_cent_ins           23  10  10   0
+13118  wm_lh_G_insular_short                  225 140 140   0
+13119  wm_lh_G_occipital_middle               180  60 180   0
+13120  wm_lh_G_occipital_sup                   20 220  60   0
+13121  wm_lh_G_oc-temp_lat-fusifor             60  20 140   0
+13122  wm_lh_G_oc-temp_med-Lingual            220 180 140   0
+13123  wm_lh_G_oc-temp_med-Parahip             65 100  20   0
+13124  wm_lh_G_orbital                        220  60  20   0
+13125  wm_lh_G_pariet_inf-Angular              20  60 220   0
+13126  wm_lh_G_pariet_inf-Supramar            100 100  60   0
+13127  wm_lh_G_parietal_sup                   220 180 220   0
+13128  wm_lh_G_postcentral                     20 180 140   0
+13129  wm_lh_G_precentral                      60 140 180   0
+13130  wm_lh_G_precuneus                       25  20 140   0
+13131  wm_lh_G_rectus                          20  60 100   0
+13132  wm_lh_G_subcallosal                     60 220  20   0
+13133  wm_lh_G_temp_sup-G_T_transv             60  60 220   0
+13134  wm_lh_G_temp_sup-Lateral               220  60 220   0
+13135  wm_lh_G_temp_sup-Plan_polar             65 220  60   0
+13136  wm_lh_G_temp_sup-Plan_tempo             25 140  20   0
+13137  wm_lh_G_temporal_inf                   220 220 100   0
+13138  wm_lh_G_temporal_middle                180  60  60   0
+13139  wm_lh_Lat_Fis-ant-Horizont              61  20 220   0
+13140  wm_lh_Lat_Fis-ant-Vertical              61  20  60   0
+13141  wm_lh_Lat_Fis-post                      61  60 100   0
+13142  wm_lh_Medial_wall                       25  25  25   0
+13143  wm_lh_Pole_occipital                   140  20  60   0
+13144  wm_lh_Pole_temporal                    220 180  20   0
+13145  wm_lh_S_calcarine                       63 180 180   0
+13146  wm_lh_S_central                        221  20  10   0
+13147  wm_lh_S_cingul-Marginalis              221  20 100   0
+13148  wm_lh_S_circular_insula_ant            221  60 140   0
+13149  wm_lh_S_circular_insula_inf            221  20 220   0
+13150  wm_lh_S_circular_insula_sup             61 220 220   0
+13151  wm_lh_S_collat_transv_ant              100 200 200   0
+13152  wm_lh_S_collat_transv_post              10 200 200   0
+13153  wm_lh_S_front_inf                      221 220  20   0
+13154  wm_lh_S_front_middle                   141  20 100   0
+13155  wm_lh_S_front_sup                       61 220 100   0
+13156  wm_lh_S_interm_prim-Jensen             141  60  20   0
+13157  wm_lh_S_intrapariet_and_P_trans        143  20 220   0
+13158  wm_lh_S_oc_middle_and_Lunatus          101  60 220   0
+13159  wm_lh_S_oc_sup_and_transversal          21  20 140   0
+13160  wm_lh_S_occipital_ant                   61  20 180   0
+13161  wm_lh_S_oc-temp_lat                    221 140  20   0
+13162  wm_lh_S_oc-temp_med_and_Lingual        141 100 220   0
+13163  wm_lh_S_orbital_lateral                221 100  20   0
+13164  wm_lh_S_orbital_med-olfact             181 200  20   0
+13165  wm_lh_S_orbital-H_Shaped               101  20  20   0
+13166  wm_lh_S_parieto_occipital              101 100 180   0
+13167  wm_lh_S_pericallosal                   181 220  20   0
+13168  wm_lh_S_postcentral                     21 140 200   0
+13169  wm_lh_S_precentral-inf-part             21  20 240   0
+13170  wm_lh_S_precentral-sup-part             21  20 200   0
+13171  wm_lh_S_suborbital                      21  20  60   0
+13172  wm_lh_S_subparietal                    101  60  60   0
+13173  wm_lh_S_temporal_inf                    21 180 180   0
+13174  wm_lh_S_temporal_sup                   223 220  60   0
+13175  wm_lh_S_temporal_transverse            221  60  60   0
+
+14100  wm_rh_Unknown                            0   0   0   0
+14101  wm_rh_G_and_S_frontomargin              23 220  60   0
+14102  wm_rh_G_and_S_occipital_inf             23  60 180   0
+14103  wm_rh_G_and_S_paracentral               63 100  60   0
+14104  wm_rh_G_and_S_subcentral                63  20 220   0
+14105  wm_rh_G_and_S_transv_frontopol          13   0 250   0
+14106  wm_rh_G_and_S_cingul-Ant                26  60   0   0
+14107  wm_rh_G_and_S_cingul-Mid-Ant            26  60  75   0
+14108  wm_rh_G_and_S_cingul-Mid-Post           26  60 150   0
+14109  wm_rh_G_cingul-Post-dorsal              25  60 250   0
+14110  wm_rh_G_cingul-Post-ventral             60  25  25   0
+14111  wm_rh_G_cuneus                         180  20  20   0
+14112  wm_rh_G_front_inf-Opercular            220  20 100   0
+14113  wm_rh_G_front_inf-Orbital              140  60  60   0
+14114  wm_rh_G_front_inf-Triangul             180 220 140   0
+14115  wm_rh_G_front_middle                   140 100 180   0
+14116  wm_rh_G_front_sup                      180  20 140   0
+14117  wm_rh_G_Ins_lg_and_S_cent_ins           23  10  10   0
+14118  wm_rh_G_insular_short                  225 140 140   0
+14119  wm_rh_G_occipital_middle               180  60 180   0
+14120  wm_rh_G_occipital_sup                   20 220  60   0
+14121  wm_rh_G_oc-temp_lat-fusifor             60  20 140   0
+14122  wm_rh_G_oc-temp_med-Lingual            220 180 140   0
+14123  wm_rh_G_oc-temp_med-Parahip             65 100  20   0
+14124  wm_rh_G_orbital                        220  60  20   0
+14125  wm_rh_G_pariet_inf-Angular              20  60 220   0
+14126  wm_rh_G_pariet_inf-Supramar            100 100  60   0
+14127  wm_rh_G_parietal_sup                   220 180 220   0
+14128  wm_rh_G_postcentral                     20 180 140   0
+14129  wm_rh_G_precentral                      60 140 180   0
+14130  wm_rh_G_precuneus                       25  20 140   0
+14131  wm_rh_G_rectus                          20  60 100   0
+14132  wm_rh_G_subcallosal                     60 220  20   0
+14133  wm_rh_G_temp_sup-G_T_transv             60  60 220   0
+14134  wm_rh_G_temp_sup-Lateral               220  60 220   0
+14135  wm_rh_G_temp_sup-Plan_polar             65 220  60   0
+14136  wm_rh_G_temp_sup-Plan_tempo             25 140  20   0
+14137  wm_rh_G_temporal_inf                   220 220 100   0
+14138  wm_rh_G_temporal_middle                180  60  60   0
+14139  wm_rh_Lat_Fis-ant-Horizont              61  20 220   0
+14140  wm_rh_Lat_Fis-ant-Vertical              61  20  60   0
+14141  wm_rh_Lat_Fis-post                      61  60 100   0
+14142  wm_rh_Medial_wall                       25  25  25   0
+14143  wm_rh_Pole_occipital                   140  20  60   0
+14144  wm_rh_Pole_temporal                    220 180  20   0
+14145  wm_rh_S_calcarine                       63 180 180   0
+14146  wm_rh_S_central                        221  20  10   0
+14147  wm_rh_S_cingul-Marginalis              221  20 100   0
+14148  wm_rh_S_circular_insula_ant            221  60 140   0
+14149  wm_rh_S_circular_insula_inf            221  20 220   0
+14150  wm_rh_S_circular_insula_sup             61 220 220   0
+14151  wm_rh_S_collat_transv_ant              100 200 200   0
+14152  wm_rh_S_collat_transv_post              10 200 200   0
+14153  wm_rh_S_front_inf                      221 220  20   0
+14154  wm_rh_S_front_middle                   141  20 100   0
+14155  wm_rh_S_front_sup                       61 220 100   0
+14156  wm_rh_S_interm_prim-Jensen             141  60  20   0
+14157  wm_rh_S_intrapariet_and_P_trans        143  20 220   0
+14158  wm_rh_S_oc_middle_and_Lunatus          101  60 220   0
+14159  wm_rh_S_oc_sup_and_transversal          21  20 140   0
+14160  wm_rh_S_occipital_ant                   61  20 180   0
+14161  wm_rh_S_oc-temp_lat                    221 140  20   0
+14162  wm_rh_S_oc-temp_med_and_Lingual        141 100 220   0
+14163  wm_rh_S_orbital_lateral                221 100  20   0
+14164  wm_rh_S_orbital_med-olfact             181 200  20   0
+14165  wm_rh_S_orbital-H_Shaped               101  20  20   0
+14166  wm_rh_S_parieto_occipital              101 100 180   0
+14167  wm_rh_S_pericallosal                   181 220  20   0
+14168  wm_rh_S_postcentral                     21 140 200   0
+14169  wm_rh_S_precentral-inf-part             21  20 240   0
+14170  wm_rh_S_precentral-sup-part             21  20 200   0
+14171  wm_rh_S_suborbital                      21  20  60   0
+14172  wm_rh_S_subparietal                    101  60  60   0
+14173  wm_rh_S_temporal_inf                    21 180 180   0
+14174  wm_rh_S_temporal_sup                   223 220  60   0
+14175  wm_rh_S_temporal_transverse            221  60  60   0
+
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/data/__init__.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/data/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/data/coil_def.dat b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/data/coil_def.dat
new file mode 100644
index 0000000..dc4eee6
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/data/coil_def.dat
@@ -0,0 +1,461 @@
+#
+#	MEG coil definition file
+#
+#       Copyright 2005 - 2009
+#
+#       Matti Hamalainen
+#       Athinoula A. Martinos Center for Biomedical Imaging
+#       Charlestown, MA, USA
+#
+#
+#	<class>	<id> <accuracy> <np> <size> <baseline> "<description>"
+#
+# struct class id accuracy num_points size baseline description
+# format '%d %d %d %d %e %e %s'
+#
+#	<w_1> <x_1/m> <y_1/m> <z_1/m> <nx_1> <ny_1> <nz_1>
+#
+# struct w     x       y       z       nx     ny     nz
+# format '%f %e %e %e %e %e %e'
+#
+#	....
+#
+#	<w_np> <x_np/m> <y_np/m> <z_np/m> <nx_np> <ny_np> <nz_np>
+#
+#	<class>		1	magnetometer
+#			2	axial gradiometer
+#			3	planar gradiometer
+#			4	axial second-order gradiometer
+#
+#	<accuracy>	0       point approximation
+#                       1	normal
+#			2	accurate
+#
+#       Produced with:
+#
+#	mne_list_coil_def version 1.12 compiled at Nov 19 2014 04:19:15
+#
+3   2       0   2  2.789e-02  1.620e-02	"Neuromag-122 planar gradiometer size = 27.89  mm base = 16.20  mm"
+ 61.7284  8.100e-03  0.000e+00  0.000e+00  0.000  0.000  1.000
+-61.7284 -8.100e-03  0.000e+00  0.000e+00  0.000  0.000  1.000
+3   2       1   2  2.789e-02  1.620e-02	"Neuromag-122 planar gradiometer size = 27.89  mm base = 16.20  mm"
+ 61.7284  8.100e-03  0.000e+00  0.000e+00  0.000  0.000  1.000
+-61.7284 -8.100e-03  0.000e+00  0.000e+00  0.000  0.000  1.000
+3   2       2   8  2.789e-02  1.620e-02	"Neuromag-122 planar gradiometer size = 27.89  mm base = 16.20  mm"
+ 15.1057  1.111e-02  7.680e-03  0.000e+00  0.000  0.000  1.000
+ 15.1057  5.440e-03  7.680e-03  0.000e+00  0.000  0.000  1.000
+ 15.1057  5.440e-03 -7.680e-03  0.000e+00  0.000  0.000  1.000
+ 15.1057  1.111e-02 -7.680e-03  0.000e+00  0.000  0.000  1.000
+-15.1057 -1.111e-02  7.680e-03  0.000e+00  0.000  0.000  1.000
+-15.1057 -5.440e-03  7.680e-03  0.000e+00  0.000  0.000  1.000
+-15.1057 -5.440e-03 -7.680e-03  0.000e+00  0.000  0.000  1.000
+-15.1057 -1.111e-02 -7.680e-03  0.000e+00  0.000  0.000  1.000
+1   2000    0   1  0.000e+00  0.000e+00	"Point magnetometer"
+  1.0000  0.000e+00  0.000e+00  0.000e+00  0.000  0.000  1.000
+1   2000    1   1  0.000e+00  0.000e+00	"Point magnetometer"
+  1.0000  0.000e+00  0.000e+00  0.000e+00  0.000  0.000  1.000
+1   2000    2   1  0.000e+00  0.000e+00	"Point magnetometer"
+  1.0000  0.000e+00  0.000e+00  0.000e+00  0.000  0.000  1.000
+3   3012    0   2  2.639e-02  1.680e-02	"Vectorview planar gradiometer T1 size = 26.39  mm base = 16.80  mm"
+ 59.5238  8.400e-03  0.000e+00  3.000e-04  0.000  0.000  1.000
+-59.5238 -8.400e-03  0.000e+00  3.000e-04  0.000  0.000  1.000
+3   3012    1   4  2.639e-02  1.680e-02	"Vectorview planar gradiometer T1 size = 26.39  mm base = 16.80  mm"
+ 29.7619  8.400e-03  6.713e-03  3.000e-04  0.000  0.000  1.000
+ 29.7619  8.400e-03 -6.713e-03  3.000e-04  0.000  0.000  1.000
+-29.7619 -8.400e-03  6.713e-03  3.000e-04  0.000  0.000  1.000
+-29.7619 -8.400e-03 -6.713e-03  3.000e-04  0.000  0.000  1.000
+3   3012    2   8  2.639e-02  1.680e-02	"Vectorview planar gradiometer T1 size = 26.39  mm base = 16.80  mm"
+ 14.9858  1.079e-02  6.713e-03  3.000e-04  0.000  0.000  1.000
+ 14.9858  5.891e-03  6.713e-03  3.000e-04  0.000  0.000  1.000
+ 14.9858  5.891e-03 -6.713e-03  3.000e-04  0.000  0.000  1.000
+ 14.9858  1.079e-02 -6.713e-03  3.000e-04  0.000  0.000  1.000
+-14.9858 -1.079e-02  6.713e-03  3.000e-04  0.000  0.000  1.000
+-14.9858 -5.891e-03  6.713e-03  3.000e-04  0.000  0.000  1.000
+-14.9858 -5.891e-03 -6.713e-03  3.000e-04  0.000  0.000  1.000
+-14.9858 -1.079e-02 -6.713e-03  3.000e-04  0.000  0.000  1.000
+3   3013    0   2  2.639e-02  1.680e-02	"Vectorview planar gradiometer T2 size = 26.39  mm base = 16.80  mm"
+ 59.5238  8.400e-03  0.000e+00  3.000e-04  0.000  0.000  1.000
+-59.5238 -8.400e-03  0.000e+00  3.000e-04  0.000  0.000  1.000
+3   3013    1   4  2.639e-02  1.680e-02	"Vectorview planar gradiometer T2 size = 26.39  mm base = 16.80  mm"
+ 29.7619  8.400e-03  6.713e-03  3.000e-04  0.000  0.000  1.000
+ 29.7619  8.400e-03 -6.713e-03  3.000e-04  0.000  0.000  1.000
+-29.7619 -8.400e-03  6.713e-03  3.000e-04  0.000  0.000  1.000
+-29.7619 -8.400e-03 -6.713e-03  3.000e-04  0.000  0.000  1.000
+3   3013    2   8  2.639e-02  1.680e-02	"Vectorview planar gradiometer T2 size = 26.39  mm base = 16.80  mm"
+ 14.9858  1.079e-02  6.713e-03  3.000e-04  0.000  0.000  1.000
+ 14.9858  5.891e-03  6.713e-03  3.000e-04  0.000  0.000  1.000
+ 14.9858  5.891e-03 -6.713e-03  3.000e-04  0.000  0.000  1.000
+ 14.9858  1.079e-02 -6.713e-03  3.000e-04  0.000  0.000  1.000
+-14.9858 -1.079e-02  6.713e-03  3.000e-04  0.000  0.000  1.000
+-14.9858 -5.891e-03  6.713e-03  3.000e-04  0.000  0.000  1.000
+-14.9858 -5.891e-03 -6.713e-03  3.000e-04  0.000  0.000  1.000
+-14.9858 -1.079e-02 -6.713e-03  3.000e-04  0.000  0.000  1.000
+1   3022    0   1  2.580e-02  0.000e+00	"Vectorview magnetometer T1 size = 25.80  mm"
+  1.0000  0.000e+00  0.000e+00  0.000e+00  0.000  0.000  1.000
+1   3022    1   4  2.580e-02  0.000e+00	"Vectorview magnetometer T1 size = 25.80  mm"
+  0.2500 -6.450e-03 -6.450e-03  3.000e-04  0.000  0.000  1.000
+  0.2500 -6.450e-03  6.450e-03  3.000e-04  0.000  0.000  1.000
+  0.2500  6.450e-03 -6.450e-03  3.000e-04  0.000  0.000  1.000
+  0.2500  6.450e-03  6.450e-03  3.000e-04  0.000  0.000  1.000
+1   3022    2  16  2.580e-02  0.000e+00	"Vectorview magnetometer T1 size = 25.80  mm"
+  0.0625 -9.675e-03 -9.675e-03  3.000e-04  0.000  0.000  1.000
+  0.0625 -9.675e-03 -3.225e-03  3.000e-04  0.000  0.000  1.000
+  0.0625 -9.675e-03  3.225e-03  3.000e-04  0.000  0.000  1.000
+  0.0625 -9.675e-03  9.675e-03  3.000e-04  0.000  0.000  1.000
+  0.0625 -3.225e-03 -9.675e-03  3.000e-04  0.000  0.000  1.000
+  0.0625 -3.225e-03 -3.225e-03  3.000e-04  0.000  0.000  1.000
+  0.0625 -3.225e-03  3.225e-03  3.000e-04  0.000  0.000  1.000
+  0.0625 -3.225e-03  9.675e-03  3.000e-04  0.000  0.000  1.000
+  0.0625  3.225e-03 -9.675e-03  3.000e-04  0.000  0.000  1.000
+  0.0625  3.225e-03 -3.225e-03  3.000e-04  0.000  0.000  1.000
+  0.0625  3.225e-03  3.225e-03  3.000e-04  0.000  0.000  1.000
+  0.0625  3.225e-03  9.675e-03  3.000e-04  0.000  0.000  1.000
+  0.0625  9.675e-03 -9.675e-03  3.000e-04  0.000  0.000  1.000
+  0.0625  9.675e-03 -3.225e-03  3.000e-04  0.000  0.000  1.000
+  0.0625  9.675e-03  3.225e-03  3.000e-04  0.000  0.000  1.000
+  0.0625  9.675e-03  9.675e-03  3.000e-04  0.000  0.000  1.000
+1   3023    0   1  2.580e-02  0.000e+00	"Vectorview magnetometer T2 size = 25.80  mm"
+  1.0000  0.000e+00  0.000e+00  0.000e+00  0.000  0.000  1.000
+1   3023    1   4  2.580e-02  0.000e+00	"Vectorview magnetometer T2 size = 25.80  mm"
+  0.2500 -6.450e-03 -6.450e-03  3.000e-04  0.000  0.000  1.000
+  0.2500 -6.450e-03  6.450e-03  3.000e-04  0.000  0.000  1.000
+  0.2500  6.450e-03 -6.450e-03  3.000e-04  0.000  0.000  1.000
+  0.2500  6.450e-03  6.450e-03  3.000e-04  0.000  0.000  1.000
+1   3023    2  16  2.580e-02  0.000e+00	"Vectorview magnetometer T2 size = 25.80  mm"
+  0.0625 -9.675e-03 -9.675e-03  3.000e-04  0.000  0.000  1.000
+  0.0625 -9.675e-03 -3.225e-03  3.000e-04  0.000  0.000  1.000
+  0.0625 -9.675e-03  3.225e-03  3.000e-04  0.000  0.000  1.000
+  0.0625 -9.675e-03  9.675e-03  3.000e-04  0.000  0.000  1.000
+  0.0625 -3.225e-03 -9.675e-03  3.000e-04  0.000  0.000  1.000
+  0.0625 -3.225e-03 -3.225e-03  3.000e-04  0.000  0.000  1.000
+  0.0625 -3.225e-03  3.225e-03  3.000e-04  0.000  0.000  1.000
+  0.0625 -3.225e-03  9.675e-03  3.000e-04  0.000  0.000  1.000
+  0.0625  3.225e-03 -9.675e-03  3.000e-04  0.000  0.000  1.000
+  0.0625  3.225e-03 -3.225e-03  3.000e-04  0.000  0.000  1.000
+  0.0625  3.225e-03  3.225e-03  3.000e-04  0.000  0.000  1.000
+  0.0625  3.225e-03  9.675e-03  3.000e-04  0.000  0.000  1.000
+  0.0625  9.675e-03 -9.675e-03  3.000e-04  0.000  0.000  1.000
+  0.0625  9.675e-03 -3.225e-03  3.000e-04  0.000  0.000  1.000
+  0.0625  9.675e-03  3.225e-03  3.000e-04  0.000  0.000  1.000
+  0.0625  9.675e-03  9.675e-03  3.000e-04  0.000  0.000  1.000
+1   3024    0   1  2.100e-02  0.000e+00	"Vectorview magnetometer T3 size = 21.00  mm"
+  1.0000  0.000e+00  0.000e+00  0.000e+00  0.000  0.000  1.000
+1   3024    1   4  2.100e-02  0.000e+00	"Vectorview magnetometer T3 size = 21.00  mm"
+  0.2500 -5.250e-03 -5.250e-03  3.000e-04  0.000  0.000  1.000
+  0.2500 -5.250e-03  5.250e-03  3.000e-04  0.000  0.000  1.000
+  0.2500  5.250e-03 -5.250e-03  3.000e-04  0.000  0.000  1.000
+  0.2500  5.250e-03  5.250e-03  3.000e-04  0.000  0.000  1.000
+1   3024    2  16  2.100e-02  0.000e+00	"Vectorview magnetometer T3 size = 21.00  mm"
+  0.0625 -7.875e-03 -7.875e-03  3.000e-04  0.000  0.000  1.000
+  0.0625 -7.875e-03 -2.625e-03  3.000e-04  0.000  0.000  1.000
+  0.0625 -7.875e-03  2.625e-03  3.000e-04  0.000  0.000  1.000
+  0.0625 -7.875e-03  7.875e-03  3.000e-04  0.000  0.000  1.000
+  0.0625 -2.625e-03 -7.875e-03  3.000e-04  0.000  0.000  1.000
+  0.0625 -2.625e-03 -2.625e-03  3.000e-04  0.000  0.000  1.000
+  0.0625 -2.625e-03  2.625e-03  3.000e-04  0.000  0.000  1.000
+  0.0625 -2.625e-03  7.875e-03  3.000e-04  0.000  0.000  1.000
+  0.0625  2.625e-03 -7.875e-03  3.000e-04  0.000  0.000  1.000
+  0.0625  2.625e-03 -2.625e-03  3.000e-04  0.000  0.000  1.000
+  0.0625  2.625e-03  2.625e-03  3.000e-04  0.000  0.000  1.000
+  0.0625  2.625e-03  7.875e-03  3.000e-04  0.000  0.000  1.000
+  0.0625  7.875e-03 -7.875e-03  3.000e-04  0.000  0.000  1.000
+  0.0625  7.875e-03 -2.625e-03  3.000e-04  0.000  0.000  1.000
+  0.0625  7.875e-03  2.625e-03  3.000e-04  0.000  0.000  1.000
+  0.0625  7.875e-03  7.875e-03  3.000e-04  0.000  0.000  1.000
+1   4001    0   1  2.300e-02  0.000e+00	"Magnes WH2500 magnetometer size = 23.00  mm"
+  1.0000  0.000e+00  0.000e+00  0.000e+00  0.000  0.000  1.000
+1   4001    1   4  2.300e-02  0.000e+00	"Magnes WH2500 magnetometer size = 23.00  mm"
+  0.2500  5.750e-03  5.750e-03  0.000e+00  0.000  0.000  1.000
+  0.2500 -5.750e-03  5.750e-03  0.000e+00  0.000  0.000  1.000
+  0.2500 -5.750e-03 -5.750e-03  0.000e+00  0.000  0.000  1.000
+  0.2500  5.750e-03 -5.750e-03  0.000e+00  0.000  0.000  1.000
+1   4001    2   7  2.300e-02  0.000e+00	"Magnes WH2500 magnetometer size = 23.00  mm"
+  0.2500  0.000e+00  0.000e+00  0.000e+00  0.000  0.000  1.000
+  0.1250  9.390e-03  0.000e+00  0.000e+00  0.000  0.000  1.000
+  0.1250 -9.390e-03  0.000e+00  0.000e+00  0.000  0.000  1.000
+  0.1250  4.695e-03  8.132e-03  0.000e+00  0.000  0.000  1.000
+  0.1250  4.695e-03 -8.132e-03  0.000e+00  0.000  0.000  1.000
+  0.1250 -4.695e-03  8.132e-03  0.000e+00  0.000  0.000  1.000
+  0.1250 -4.695e-03 -8.132e-03  0.000e+00  0.000  0.000  1.000
+2   4002    0   2  1.800e-02  5.000e-02	"Magnes WH3600 gradiometer size = 18.00  mm base = 50.00  mm"
+  1.0000  0.000e+00  0.000e+00  0.000e+00  0.000  0.000  1.000
+ -1.0000  0.000e+00  0.000e+00  5.000e-02  0.000  0.000  1.000
+2   4002    1   8  1.800e-02  5.000e-02	"Magnes WH3600 gradiometer size = 18.00  mm base = 50.00  mm"
+  0.2500  4.500e-03  4.500e-03  0.000e+00  0.000  0.000  1.000
+  0.2500 -4.500e-03  4.500e-03  0.000e+00  0.000  0.000  1.000
+  0.2500 -4.500e-03 -4.500e-03  0.000e+00  0.000  0.000  1.000
+  0.2500  4.500e-03 -4.500e-03  0.000e+00  0.000  0.000  1.000
+ -0.2500  4.500e-03  4.500e-03  5.000e-02  0.000  0.000  1.000
+ -0.2500 -4.500e-03  4.500e-03  5.000e-02  0.000  0.000  1.000
+ -0.2500 -4.500e-03 -4.500e-03  5.000e-02  0.000  0.000  1.000
+ -0.2500  4.500e-03 -4.500e-03  5.000e-02  0.000  0.000  1.000
+2   4002    2  14  1.800e-02  5.000e-02	"Magnes WH3600 gradiometer size = 18.00  mm base = 50.00  mm"
+  0.2500  0.000e+00  0.000e+00  0.000e+00  0.000  0.000  1.000
+  0.1250  7.348e-03  0.000e+00  0.000e+00  0.000  0.000  1.000
+  0.1250 -7.348e-03  0.000e+00  0.000e+00  0.000  0.000  1.000
+  0.1250  3.674e-03  6.364e-03  0.000e+00  0.000  0.000  1.000
+  0.1250  3.674e-03 -6.364e-03  0.000e+00  0.000  0.000  1.000
+  0.1250 -3.674e-03  6.364e-03  0.000e+00  0.000  0.000  1.000
+  0.1250 -3.674e-03 -6.364e-03  0.000e+00  0.000  0.000  1.000
+ -0.2500  0.000e+00  0.000e+00  5.000e-02  0.000  0.000  1.000
+ -0.1250  7.348e-03  0.000e+00  5.000e-02  0.000  0.000  1.000
+ -0.1250 -7.348e-03  0.000e+00  5.000e-02  0.000  0.000  1.000
+ -0.1250  3.674e-03  6.364e-03  5.000e-02  0.000  0.000  1.000
+ -0.1250  3.674e-03 -6.364e-03  5.000e-02  0.000  0.000  1.000
+ -0.1250 -3.674e-03  6.364e-03  5.000e-02  0.000  0.000  1.000
+ -0.1250 -3.674e-03 -6.364e-03  5.000e-02  0.000  0.000  1.000
+1   4003    0   1  3.000e-02  0.000e+00	"Magnes reference magnetometer size = 30.00  mm"
+  1.0000  0.000e+00  0.000e+00  0.000e+00  0.000  0.000  1.000
+1   4003    1   4  3.000e-02  0.000e+00	"Magnes reference magnetometer size = 30.00  mm"
+  0.2500  7.500e-03  7.500e-03  0.000e+00  0.000  0.000  1.000
+  0.2500 -7.500e-03  7.500e-03  0.000e+00  0.000  0.000  1.000
+  0.2500 -7.500e-03 -7.500e-03  0.000e+00  0.000  0.000  1.000
+  0.2500  7.500e-03 -7.500e-03  0.000e+00  0.000  0.000  1.000
+1   4003    2   4  3.000e-02  0.000e+00	"Magnes reference magnetometer size = 30.00  mm"
+  0.2500  7.500e-03  7.500e-03  0.000e+00  0.000  0.000  1.000
+  0.2500 -7.500e-03  7.500e-03  0.000e+00  0.000  0.000  1.000
+  0.2500 -7.500e-03 -7.500e-03  0.000e+00  0.000  0.000  1.000
+  0.2500  7.500e-03 -7.500e-03  0.000e+00  0.000  0.000  1.000
+2   4004    0   2  8.000e-02  1.350e-01	"Magnes reference gradiometer (diag) size = 80.00  mm base = 135.00 mm"
+  1.0000  0.000e+00  0.000e+00  0.000e+00  0.000  0.000  1.000
+ -1.0000  0.000e+00  0.000e+00  1.350e-01  0.000  0.000  1.000
+2   4004    1   8  8.000e-02  1.350e-01	"Magnes reference gradiometer (diag) size = 80.00  mm base = 135.00 mm"
+  0.2500  2.000e-02  2.000e-02  0.000e+00  0.000  0.000  1.000
+  0.2500 -2.000e-02  2.000e-02  0.000e+00  0.000  0.000  1.000
+  0.2500 -2.000e-02 -2.000e-02  0.000e+00  0.000  0.000  1.000
+  0.2500  2.000e-02 -2.000e-02  0.000e+00  0.000  0.000  1.000
+ -0.2500  2.000e-02  2.000e-02  1.350e-01  0.000  0.000  1.000
+ -0.2500 -2.000e-02  2.000e-02  1.350e-01  0.000  0.000  1.000
+ -0.2500 -2.000e-02 -2.000e-02  1.350e-01  0.000  0.000  1.000
+ -0.2500  2.000e-02 -2.000e-02  1.350e-01  0.000  0.000  1.000
+2   4004    2   8  8.000e-02  1.350e-01	"Magnes reference gradiometer (diag) size = 80.00  mm base = 135.00 mm"
+  0.2500  2.000e-02  2.000e-02  0.000e+00  0.000  0.000  1.000
+  0.2500 -2.000e-02  2.000e-02  0.000e+00  0.000  0.000  1.000
+  0.2500 -2.000e-02 -2.000e-02  0.000e+00  0.000  0.000  1.000
+  0.2500  2.000e-02 -2.000e-02  0.000e+00  0.000  0.000  1.000
+ -0.2500  2.000e-02  2.000e-02  1.350e-01  0.000  0.000  1.000
+ -0.2500 -2.000e-02  2.000e-02  1.350e-01  0.000  0.000  1.000
+ -0.2500 -2.000e-02 -2.000e-02  1.350e-01  0.000  0.000  1.000
+ -0.2500  2.000e-02 -2.000e-02  1.350e-01  0.000  0.000  1.000
+2   4005    0   2  8.000e-02  1.350e-01	"Magnes reference gradiometer (offdiag) size = 80.00  mm base = 135.00 mm"
+  1.0000  6.750e-02  0.000e+00  0.000e+00  0.000  0.000  1.000
+ -1.0000 -6.750e-02  0.000e+00  0.000e+00  0.000  0.000  1.000
+2   4005    1   8  8.000e-02  1.350e-01	"Magnes reference gradiometer (offdiag) size = 80.00  mm base = 135.00 mm"
+  0.2500  8.750e-02  2.000e-02  0.000e+00  0.000  0.000  1.000
+  0.2500  4.750e-02  2.000e-02  0.000e+00  0.000  0.000  1.000
+  0.2500  4.750e-02 -2.000e-02  0.000e+00  0.000  0.000  1.000
+  0.2500  8.750e-02 -2.000e-02  0.000e+00  0.000  0.000  1.000
+ -0.2500 -4.750e-02  2.000e-02  0.000e+00  0.000  0.000  1.000
+ -0.2500 -8.750e-02  2.000e-02  0.000e+00  0.000  0.000  1.000
+ -0.2500 -8.750e-02 -2.000e-02  0.000e+00  0.000  0.000  1.000
+ -0.2500 -4.750e-02 -2.000e-02  0.000e+00  0.000  0.000  1.000
+2   4005    2   8  8.000e-02  1.350e-01	"Magnes reference gradiometer (offdiag) size = 80.00  mm base = 135.00 mm"
+  0.2500  8.750e-02  2.000e-02  0.000e+00  0.000  0.000  1.000
+  0.2500  4.750e-02  2.000e-02  0.000e+00  0.000  0.000  1.000
+  0.2500  4.750e-02 -2.000e-02  0.000e+00  0.000  0.000  1.000
+  0.2500  8.750e-02 -2.000e-02  0.000e+00  0.000  0.000  1.000
+ -0.2500 -4.750e-02  2.000e-02  0.000e+00  0.000  0.000  1.000
+ -0.2500 -8.750e-02  2.000e-02  0.000e+00  0.000  0.000  1.000
+ -0.2500 -8.750e-02 -2.000e-02  0.000e+00  0.000  0.000  1.000
+ -0.2500 -4.750e-02 -2.000e-02  0.000e+00  0.000  0.000  1.000
+2   5001    0   2  1.800e-02  5.000e-02	"CTF axial gradiometer size = 18.00  mm base = 50.00  mm"
+  1.0000  0.000e+00  0.000e+00  0.000e+00  0.000  0.000  1.000
+ -1.0000  0.000e+00  0.000e+00  5.000e-02  0.000  0.000  1.000
+2   5001    1   8  1.800e-02  5.000e-02	"CTF axial gradiometer size = 18.00  mm base = 50.00  mm"
+  0.2500  4.500e-03  4.500e-03  0.000e+00  0.000  0.000  1.000
+  0.2500 -4.500e-03  4.500e-03  0.000e+00  0.000  0.000  1.000
+  0.2500 -4.500e-03 -4.500e-03  0.000e+00  0.000  0.000  1.000
+  0.2500  4.500e-03 -4.500e-03  0.000e+00  0.000  0.000  1.000
+ -0.2500  4.500e-03  4.500e-03  5.000e-02  0.000  0.000  1.000
+ -0.2500 -4.500e-03  4.500e-03  5.000e-02  0.000  0.000  1.000
+ -0.2500 -4.500e-03 -4.500e-03  5.000e-02  0.000  0.000  1.000
+ -0.2500  4.500e-03 -4.500e-03  5.000e-02  0.000  0.000  1.000
+2   5001    2  14  1.800e-02  5.000e-02	"CTF axial gradiometer size = 18.00  mm base = 50.00  mm"
+  0.2500  0.000e+00  0.000e+00  0.000e+00  0.000  0.000  1.000
+  0.1250  7.348e-03  0.000e+00  0.000e+00  0.000  0.000  1.000
+  0.1250 -7.348e-03  0.000e+00  0.000e+00  0.000  0.000  1.000
+  0.1250  3.674e-03  6.364e-03  0.000e+00  0.000  0.000  1.000
+  0.1250  3.674e-03 -6.364e-03  0.000e+00  0.000  0.000  1.000
+  0.1250 -3.674e-03  6.364e-03  0.000e+00  0.000  0.000  1.000
+  0.1250 -3.674e-03 -6.364e-03  0.000e+00  0.000  0.000  1.000
+ -0.2500  0.000e+00  0.000e+00  5.000e-02  0.000  0.000  1.000
+ -0.1250  7.348e-03  0.000e+00  5.000e-02  0.000  0.000  1.000
+ -0.1250 -7.348e-03  0.000e+00  5.000e-02  0.000  0.000  1.000
+ -0.1250  3.674e-03  6.364e-03  5.000e-02  0.000  0.000  1.000
+ -0.1250  3.674e-03 -6.364e-03  5.000e-02  0.000  0.000  1.000
+ -0.1250 -3.674e-03  6.364e-03  5.000e-02  0.000  0.000  1.000
+ -0.1250 -3.674e-03 -6.364e-03  5.000e-02  0.000  0.000  1.000
+1   5002    0   1  1.600e-02  0.000e+00	"CTF reference magnetometer size = 16.00  mm"
+  1.0000  0.000e+00  0.000e+00  0.000e+00  0.000  0.000  1.000
+1   5002    1   4  1.600e-02  0.000e+00	"CTF reference magnetometer size = 16.00  mm"
+  0.2500  4.000e-03  4.000e-03  0.000e+00  0.000  0.000  1.000
+  0.2500 -4.000e-03  4.000e-03  0.000e+00  0.000  0.000  1.000
+  0.2500 -4.000e-03 -4.000e-03  0.000e+00  0.000  0.000  1.000
+  0.2500  4.000e-03 -4.000e-03  0.000e+00  0.000  0.000  1.000
+1   5002    2   4  1.600e-02  0.000e+00	"CTF reference magnetometer size = 16.00  mm"
+  0.2500  4.000e-03  4.000e-03  0.000e+00  0.000  0.000  1.000
+  0.2500 -4.000e-03  4.000e-03  0.000e+00  0.000  0.000  1.000
+  0.2500 -4.000e-03 -4.000e-03  0.000e+00  0.000  0.000  1.000
+  0.2500  4.000e-03 -4.000e-03  0.000e+00  0.000  0.000  1.000
+2   5003    0   2  3.440e-02  7.860e-02	"CTF reference gradiometer (diag) size = 34.40  mm base = 78.60  mm"
+  1.0000  0.000e+00  0.000e+00  0.000e+00  0.000  0.000  1.000
+ -1.0000  0.000e+00  0.000e+00  7.860e-02  0.000  0.000  1.000
+2   5003    1   8  3.440e-02  7.860e-02	"CTF reference gradiometer (diag) size = 34.40  mm base = 78.60  mm"
+  0.2500  8.600e-03  8.600e-03  0.000e+00  0.000  0.000  1.000
+  0.2500 -8.600e-03  8.600e-03  0.000e+00  0.000  0.000  1.000
+  0.2500 -8.600e-03 -8.600e-03  0.000e+00  0.000  0.000  1.000
+  0.2500  8.600e-03 -8.600e-03  0.000e+00  0.000  0.000  1.000
+ -0.2500  8.600e-03  8.600e-03  7.860e-02  0.000  0.000  1.000
+ -0.2500 -8.600e-03  8.600e-03  7.860e-02  0.000  0.000  1.000
+ -0.2500 -8.600e-03 -8.600e-03  7.860e-02  0.000  0.000  1.000
+ -0.2500  8.600e-03 -8.600e-03  7.860e-02  0.000  0.000  1.000
+2   5003    2   8  3.440e-02  7.860e-02	"CTF reference gradiometer (diag) size = 34.40  mm base = 78.60  mm"
+  0.2500  8.600e-03  8.600e-03  0.000e+00  0.000  0.000  1.000
+  0.2500 -8.600e-03  8.600e-03  0.000e+00  0.000  0.000  1.000
+  0.2500 -8.600e-03 -8.600e-03  0.000e+00  0.000  0.000  1.000
+  0.2500  8.600e-03 -8.600e-03  0.000e+00  0.000  0.000  1.000
+ -0.2500  8.600e-03  8.600e-03  7.860e-02  0.000  0.000  1.000
+ -0.2500 -8.600e-03  8.600e-03  7.860e-02  0.000  0.000  1.000
+ -0.2500 -8.600e-03 -8.600e-03  7.860e-02  0.000  0.000  1.000
+ -0.2500  8.600e-03 -8.600e-03  7.860e-02  0.000  0.000  1.000
+2   5004    0   2  3.440e-02  7.860e-02	"CTF reference gradiometer (offdiag) size = 34.40  mm base = 78.60  mm"
+  1.0000  3.930e-02  0.000e+00  0.000e+00  0.000  0.000  1.000
+ -1.0000 -3.930e-02  0.000e+00  0.000e+00  0.000  0.000  1.000
+2   5004    1   8  3.440e-02  7.860e-02	"CTF reference gradiometer (offdiag) size = 34.40  mm base = 78.60  mm"
+  0.2500  4.780e-02  8.500e-03  0.000e+00  0.000  0.000  1.000
+  0.2500  3.080e-02  8.500e-03  0.000e+00  0.000  0.000  1.000
+  0.2500  3.080e-02 -8.500e-03  0.000e+00  0.000  0.000  1.000
+  0.2500  4.780e-02 -8.500e-03  0.000e+00  0.000  0.000  1.000
+ -0.2500 -3.080e-02  8.500e-03  0.000e+00  0.000  0.000  1.000
+ -0.2500 -4.780e-02  8.500e-03  0.000e+00  0.000  0.000  1.000
+ -0.2500 -4.780e-02 -8.500e-03  0.000e+00  0.000  0.000  1.000
+ -0.2500 -3.080e-02 -8.500e-03  0.000e+00  0.000  0.000  1.000
+2   5004    2   8  3.440e-02  7.860e-02	"CTF reference gradiometer (offdiag) size = 34.40  mm base = 78.60  mm"
+  0.2500  4.780e-02  8.500e-03  0.000e+00  0.000  0.000  1.000
+  0.2500  3.080e-02  8.500e-03  0.000e+00  0.000  0.000  1.000
+  0.2500  3.080e-02 -8.500e-03  0.000e+00  0.000  0.000  1.000
+  0.2500  4.780e-02 -8.500e-03  0.000e+00  0.000  0.000  1.000
+ -0.2500 -3.080e-02  8.500e-03  0.000e+00  0.000  0.000  1.000
+ -0.2500 -4.780e-02  8.500e-03  0.000e+00  0.000  0.000  1.000
+ -0.2500 -4.780e-02 -8.500e-03  0.000e+00  0.000  0.000  1.000
+ -0.2500 -3.080e-02 -8.500e-03  0.000e+00  0.000  0.000  1.000
+2   6001    0   2  1.550e-02  5.000e-02	"MIT KIT system gradiometer size = 15.50  mm base = 50.00  mm"
+  1.0000  0.000e+00  0.000e+00  0.000e+00  0.000  0.000  1.000
+ -1.0000  0.000e+00  0.000e+00  5.000e-02  0.000  0.000  1.000
+2   6001    1   8  1.550e-02  5.000e-02	"MIT KIT system gradiometer size = 15.50  mm base = 50.00  mm"
+  0.2500  3.875e-03  3.875e-03  0.000e+00  0.000  0.000  1.000
+  0.2500 -3.875e-03  3.875e-03  0.000e+00  0.000  0.000  1.000
+  0.2500 -3.875e-03 -3.875e-03  0.000e+00  0.000  0.000  1.000
+  0.2500  3.875e-03 -3.875e-03  0.000e+00  0.000  0.000  1.000
+ -0.2500  3.875e-03  3.875e-03  5.000e-02  0.000  0.000  1.000
+ -0.2500 -3.875e-03  3.875e-03  5.000e-02  0.000  0.000  1.000
+ -0.2500 -3.875e-03 -3.875e-03  5.000e-02  0.000  0.000  1.000
+ -0.2500  3.875e-03 -3.875e-03  5.000e-02  0.000  0.000  1.000
+2   6001    2  14  1.550e-02  5.000e-02	"MIT KIT system gradiometer size = 15.50  mm base = 50.00  mm"
+  0.2500  0.000e+00  0.000e+00  0.000e+00  0.000  0.000  1.000
+  0.1250  6.328e-03  0.000e+00  0.000e+00  0.000  0.000  1.000
+  0.1250 -6.328e-03  0.000e+00  0.000e+00  0.000  0.000  1.000
+  0.1250  3.164e-03  5.480e-03  0.000e+00  0.000  0.000  1.000
+  0.1250  3.164e-03 -5.480e-03  0.000e+00  0.000  0.000  1.000
+  0.1250 -3.164e-03  5.480e-03  0.000e+00  0.000  0.000  1.000
+  0.1250 -3.164e-03 -5.480e-03  0.000e+00  0.000  0.000  1.000
+ -0.2500  0.000e+00  0.000e+00  5.000e-02  0.000  0.000  1.000
+ -0.1250  6.328e-03  0.000e+00  5.000e-02  0.000  0.000  1.000
+ -0.1250 -6.328e-03  0.000e+00  5.000e-02  0.000  0.000  1.000
+ -0.1250  3.164e-03  5.480e-03  5.000e-02  0.000  0.000  1.000
+ -0.1250  3.164e-03 -5.480e-03  5.000e-02  0.000  0.000  1.000
+ -0.1250 -3.164e-03  5.480e-03  5.000e-02  0.000  0.000  1.000
+ -0.1250 -3.164e-03 -5.480e-03  5.000e-02  0.000  0.000  1.000
+2   7001    0   2  6.000e-03  5.000e-02	"BabySQUID system gradiometer size = 6.00   mm base = 50.00  mm"
+  1.0000  0.000e+00  0.000e+00  0.000e+00  0.000  0.000  1.000
+ -1.0000  0.000e+00  0.000e+00  5.000e-02  0.000  0.000  1.000
+2   7001    1   2  6.000e-03  5.000e-02	"BabySQUID system gradiometer size = 6.00   mm base = 50.00  mm"
+  1.0000  0.000e+00  0.000e+00  0.000e+00  0.000  0.000  1.000
+ -1.0000  0.000e+00  0.000e+00  5.000e-02  0.000  0.000  1.000
+2   7001    2   8  6.000e-03  5.000e-02	"BabySQUID system gradiometer size = 6.00   mm base = 50.00  mm"
+  0.2500  1.500e-03  1.500e-03  0.000e+00  0.000  0.000  1.000
+  0.2500 -1.500e-03  1.500e-03  0.000e+00  0.000  0.000  1.000
+  0.2500 -1.500e-03 -1.500e-03  0.000e+00  0.000  0.000  1.000
+  0.2500  1.500e-03 -1.500e-03  0.000e+00  0.000  0.000  1.000
+ -0.2500  1.500e-03  1.500e-03  5.000e-02  0.000  0.000  1.000
+ -0.2500 -1.500e-03  1.500e-03  5.000e-02  0.000  0.000  1.000
+ -0.2500 -1.500e-03 -1.500e-03  5.000e-02  0.000  0.000  1.000
+ -0.2500  1.500e-03 -1.500e-03  5.000e-02  0.000  0.000  1.000
+1   7002    0   1  1.000e-02  0.000e+00	"BabyMEG system magnetometer size = 10.00  mm"
+  1.0000  0.000e+00  0.000e+00  0.000e+00  0.000  0.000  1.000
+1   7002    1   4  1.000e-02  0.000e+00	"BabyMEG system magnetometer size = 10.00  mm"
+  0.2500  2.500e-03  2.500e-03  0.000e+00  0.000  0.000  1.000
+  0.2500 -2.500e-03  2.500e-03  0.000e+00  0.000  0.000  1.000
+  0.2500 -2.500e-03 -2.500e-03  0.000e+00  0.000  0.000  1.000
+  0.2500  2.500e-03 -2.500e-03  0.000e+00  0.000  0.000  1.000
+1   7002    2   7  1.000e-02  0.000e+00	"BabyMEG system magnetometer size = 10.00  mm"
+  0.2500  0.000e+00  0.000e+00  0.000e+00  0.000  0.000  1.000
+  0.1250  4.082e-03  0.000e+00  0.000e+00  0.000  0.000  1.000
+  0.1250 -4.082e-03  0.000e+00  0.000e+00  0.000  0.000  1.000
+  0.1250  2.041e-03  3.536e-03  0.000e+00  0.000  0.000  1.000
+  0.1250  2.041e-03 -3.536e-03  0.000e+00  0.000  0.000  1.000
+  0.1250 -2.041e-03  3.536e-03  0.000e+00  0.000  0.000  1.000
+  0.1250 -2.041e-03 -3.536e-03  0.000e+00  0.000  0.000  1.000
+1   7003    0   1  2.000e-02  0.000e+00	"BabyMEG system compensation magnetometer size = 20.00  mm"
+  1.0000  0.000e+00  0.000e+00  0.000e+00  0.000  0.000  1.000
+1   7003    1   4  2.000e-02  0.000e+00	"BabyMEG system compensation magnetometer size = 20.00  mm"
+  0.2500  5.000e-03  5.000e-03  0.000e+00  0.000  0.000  1.000
+  0.2500 -5.000e-03  5.000e-03  0.000e+00  0.000  0.000  1.000
+  0.2500 -5.000e-03 -5.000e-03  0.000e+00  0.000  0.000  1.000
+  0.2500  5.000e-03 -5.000e-03  0.000e+00  0.000  0.000  1.000
+1   7003    2   7  2.000e-02  0.000e+00	"BabyMEG system compensation magnetometer size = 20.00  mm"
+  0.2500  0.000e+00  0.000e+00  0.000e+00  0.000  0.000  1.000
+  0.1250  8.165e-03  0.000e+00  0.000e+00  0.000  0.000  1.000
+  0.1250 -8.165e-03  0.000e+00  0.000e+00  0.000  0.000  1.000
+  0.1250  4.082e-03  7.071e-03  0.000e+00  0.000  0.000  1.000
+  0.1250  4.082e-03 -7.071e-03  0.000e+00  0.000  0.000  1.000
+  0.1250 -4.082e-03  7.071e-03  0.000e+00  0.000  0.000  1.000
+  0.1250 -4.082e-03 -7.071e-03  0.000e+00  0.000  0.000  1.000
+1   7004    0   1  2.000e-02  0.000e+00	"BabyMEG system reference magnetometer size = 20.00  mm"
+  1.0000  0.000e+00  0.000e+00  0.000e+00  0.000  0.000  1.000
+1   7004    1   4  2.000e-02  0.000e+00	"BabyMEG system reference magnetometer size = 20.00  mm"
+  0.2500  5.000e-03  5.000e-03  0.000e+00  0.000  0.000  1.000
+  0.2500 -5.000e-03  5.000e-03  0.000e+00  0.000  0.000  1.000
+  0.2500 -5.000e-03 -5.000e-03  0.000e+00  0.000  0.000  1.000
+  0.2500  5.000e-03 -5.000e-03  0.000e+00  0.000  0.000  1.000
+1   7004    2   7  2.000e-02  0.000e+00	"BabyMEG system reference magnetometer size = 20.00  mm"
+  0.2500  0.000e+00  0.000e+00  0.000e+00  0.000  0.000  1.000
+  0.1250  8.165e-03  0.000e+00  0.000e+00  0.000  0.000  1.000
+  0.1250 -8.165e-03  0.000e+00  0.000e+00  0.000  0.000  1.000
+  0.1250  4.082e-03  7.071e-03  0.000e+00  0.000  0.000  1.000
+  0.1250  4.082e-03 -7.071e-03  0.000e+00  0.000  0.000  1.000
+  0.1250 -4.082e-03  7.071e-03  0.000e+00  0.000  0.000  1.000
+  0.1250 -4.082e-03 -7.071e-03  0.000e+00  0.000  0.000  1.000
+3   8001    0   2  7.000e-02  7.500e-02	"Sample TMS figure-of-eight coil size = 70.00  mm base = 75.00  mm"
+ 13.3333  3.750e-02  0.000e+00  0.000e+00  0.000  0.000  1.000
+-13.3333 -3.750e-02  0.000e+00  0.000e+00  0.000  0.000  1.000
+3   8001    1  14  7.000e-02  7.500e-02	"Sample TMS figure-of-eight coil size = 70.00  mm base = 75.00  mm"
+  3.3333  3.750e-02  0.000e+00  0.000e+00  0.000  0.000  1.000
+  1.6667  6.608e-02  0.000e+00  0.000e+00  0.000  0.000  1.000
+  1.6667  8.923e-03  0.000e+00  0.000e+00  0.000  0.000  1.000
+  1.6667  5.179e-02  2.475e-02  0.000e+00  0.000  0.000  1.000
+  1.6667  5.179e-02 -2.475e-02  0.000e+00  0.000  0.000  1.000
+  1.6667  2.321e-02  2.475e-02  0.000e+00  0.000  0.000  1.000
+  1.6667  2.321e-02 -2.475e-02  0.000e+00  0.000  0.000  1.000
+ -3.3333 -3.750e-02  0.000e+00  0.000e+00  0.000  0.000  1.000
+ -1.6667 -8.923e-03  0.000e+00  0.000e+00  0.000  0.000  1.000
+ -1.6667 -6.608e-02  0.000e+00  0.000e+00  0.000  0.000  1.000
+ -1.6667 -2.321e-02  2.475e-02  0.000e+00  0.000  0.000  1.000
+ -1.6667 -2.321e-02 -2.475e-02  0.000e+00  0.000  0.000  1.000
+ -1.6667 -5.179e-02  2.475e-02  0.000e+00  0.000  0.000  1.000
+ -1.6667 -5.179e-02 -2.475e-02  0.000e+00  0.000  0.000  1.000
+3   8001    2  14  7.000e-02  7.500e-02	"Sample TMS figure-of-eight coil size = 70.00  mm base = 75.00  mm"
+  3.3333  3.750e-02  0.000e+00  0.000e+00  0.000  0.000  1.000
+  1.6667  6.608e-02  0.000e+00  0.000e+00  0.000  0.000  1.000
+  1.6667  8.923e-03  0.000e+00  0.000e+00  0.000  0.000  1.000
+  1.6667  5.179e-02  2.475e-02  0.000e+00  0.000  0.000  1.000
+  1.6667  5.179e-02 -2.475e-02  0.000e+00  0.000  0.000  1.000
+  1.6667  2.321e-02  2.475e-02  0.000e+00  0.000  0.000  1.000
+  1.6667  2.321e-02 -2.475e-02  0.000e+00  0.000  0.000  1.000
+ -3.3333 -3.750e-02  0.000e+00  0.000e+00  0.000  0.000  1.000
+ -1.6667 -8.923e-03  0.000e+00  0.000e+00  0.000  0.000  1.000
+ -1.6667 -6.608e-02  0.000e+00  0.000e+00  0.000  0.000  1.000
+ -1.6667 -2.321e-02  2.475e-02  0.000e+00  0.000  0.000  1.000
+ -1.6667 -2.321e-02 -2.475e-02  0.000e+00  0.000  0.000  1.000
+ -1.6667 -5.179e-02  2.475e-02  0.000e+00  0.000  0.000  1.000
+ -1.6667 -5.179e-02 -2.475e-02  0.000e+00  0.000  0.000  1.000
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/data/coil_def_Elekta.dat b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/data/coil_def_Elekta.dat
new file mode 100644
index 0000000..a15e3db
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/data/coil_def_Elekta.dat
@@ -0,0 +1,70 @@
+#
+#	MEG coil definition file for Maxwell Filtering
+#	
+#	These coil definitions make use of integration points according to the last
+#	formula in section 25.4.62 in the "Handbook of Mathematical Functions:
+#	With Formulas, Graphs, and Mathematical Tables" edited by Abramowitz and Stegun.
+#
+#	These coil definitions were used by Samu Taulu in the Spherical Space
+#	Separation work, which was subsequently used by Elekta in Maxfilter. The only 
+#	difference is that the local z-coordinate was set to zero in Taulu's original
+#	formulation.
+#
+#	Issues left to be sorted out.
+#	1) Discrepancy between gradiometer base size. 16.69 in Elekta, 16.80 in MNE
+#	2) Source of small z-coordinate offset (0.0003m). Not use in original SSS work,
+#	   but is present in Elekta's and MNE's coil definitions.
+#
+#	<class>	<id> <accuracy> <np> <size> <baseline> "<description>"
+#
+# struct class id accuracy num_points size baseline description
+# format '%d %d %d %d %e %e %s'
+#
+#	<w_1> <x_1/m> <y_1/m> <z_1/m> <nx_1> <ny_1> <nz_1>
+#
+# struct w     x       y       z       nx     ny     nz
+# format '%f %e %e %e %e %e %e'
+#
+#	....
+#
+#	<w_np> <x_np/m> <y_np/m> <z_np/m> <nx_np> <ny_np> <nz_np>
+#
+#	<class>		1	magnetometer
+#			2	axial gradiometer
+#			3	planar gradiometer
+#			4	axial second-order gradiometer
+#
+#	<accuracy>	0       point approximation
+#                       1	normal
+#			2	accurate
+#
+#
+3   3012    2   8  2.639e-02  1.669e-02	"Vectorview planar gradiometer T1 size = 26.39  mm base = 16.69  mm"
+1.4979029359e+01  1.0800000000e-02  6.7100000000e-03  3.0000000000e-04  0.0000000000e+00  0.0000000000e+00  1.0000000000e+00
+1.4979029359e+01  5.8900000000e-03  6.7100000000e-03  3.0000000000e-04  0.0000000000e+00  0.0000000000e+00  1.0000000000e+00
+1.4979029359e+01  5.8900000000e-03  -6.7100000000e-03  3.0000000000e-04  0.0000000000e+00  0.0000000000e+00  1.0000000000e+00
+1.4979029359e+01  1.0800000000e-02  -6.7100000000e-03  3.0000000000e-04  0.0000000000e+00  0.0000000000e+00  1.0000000000e+00
+-1.4979029359e+01  -1.0800000000e-02  6.7100000000e-03  3.0000000000e-04  0.0000000000e+00  0.0000000000e+00  1.0000000000e+00
+-1.4979029359e+01  -5.8900000000e-03  6.7100000000e-03  3.0000000000e-04  0.0000000000e+00  0.0000000000e+00  1.0000000000e+00
+-1.4979029359e+01  -5.8900000000e-03  -6.7100000000e-03  3.0000000000e-04  0.0000000000e+00  0.0000000000e+00  1.0000000000e+00
+-1.4979029359e+01  -1.0800000000e-02  -6.7100000000e-03  3.0000000000e-04  0.0000000000e+00  0.0000000000e+00  1.0000000000e+00
+1   3022    2  9  2.580e-02  0.000e+00	"Vectorview magnetometer T1 size = 25.80  mm"
+7.7160493800e-02  -9.9922970000e-03  9.9922970000e-03  3.0000000000e-04  0.0000000000e+00  0.0000000000e+00  1.0000000000e+00
+1.2345679010e-01  0.0000000000e+00  9.9922970000e-03  3.0000000000e-04  0.0000000000e+00  0.0000000000e+00  1.0000000000e+00
+7.7160493800e-02  9.9922970000e-03  9.9922970000e-03  3.0000000000e-04  0.0000000000e+00  0.0000000000e+00  1.0000000000e+00
+1.2345679010e-01  -9.9922970000e-03  0.0000000000e+00  3.0000000000e-04  0.0000000000e+00  0.0000000000e+00  1.0000000000e+00
+1.9753086420e-01  0.0000000000e+00  0.0000000000e+00  3.0000000000e-04  0.0000000000e+00  0.0000000000e+00  1.0000000000e+00
+1.2345679010e-01  9.9922970000e-03  0.0000000000e+00  3.0000000000e-04  0.0000000000e+00  0.0000000000e+00  1.0000000000e+00
+7.7160493800e-02  -9.9922970000e-03  -9.9922970000e-03  3.0000000000e-04  0.0000000000e+00  0.0000000000e+00  1.0000000000e+00
+1.2345679010e-01  0.0000000000e+00  -9.9922970000e-03  3.0000000000e-04  0.0000000000e+00  0.0000000000e+00  1.0000000000e+00
+7.7160493800e-02  9.9922970000e-03  -9.9922970000e-03  3.0000000000e-04  0.0000000000e+00  0.0000000000e+00  1.0000000000e+00
+1   3024    2  9  2.100e-02  0.000e+00	"Vectorview magnetometer T3 size = 21.00  mm"
+7.7160493800e-02  -8.1332650000e-03  8.1332650000e-03  3.0000000000e-04  0.0000000000e+00  0.0000000000e+00  1.0000000000e+00
+1.2345679010e-01  0.0000000000e+00  8.1332650000e-03  3.0000000000e-04  0.0000000000e+00  0.0000000000e+00  1.0000000000e+00
+7.7160493800e-02  8.1332650000e-03  8.1332650000e-03  3.0000000000e-04  0.0000000000e+00  0.0000000000e+00  1.0000000000e+00
+1.2345679010e-01  -8.1332650000e-03  0.0000000000e+00  3.0000000000e-04  0.0000000000e+00  0.0000000000e+00  1.0000000000e+00
+1.9753086420e-01  0.0000000000e+00  0.0000000000e+00  3.0000000000e-04  0.0000000000e+00  0.0000000000e+00  1.0000000000e+00
+1.2345679010e-01  8.1332650000e-03  0.0000000000e+00  3.0000000000e-04  0.0000000000e+00  0.0000000000e+00  1.0000000000e+00
+7.7160493800e-02  -8.1332650000e-03  -8.1332650000e-03  3.0000000000e-04  0.0000000000e+00  0.0000000000e+00  1.0000000000e+00
+1.2345679010e-01  0.0000000000e+00  -8.1332650000e-03  3.0000000000e-04  0.0000000000e+00  0.0000000000e+00  1.0000000000e+00
+7.7160493800e-02  8.1332650000e-03  -8.1332650000e-03  3.0000000000e-04  0.0000000000e+00  0.0000000000e+00  1.0000000000e+00
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/data/mne_analyze.sel b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/data/mne_analyze.sel
new file mode 100644
index 0000000..b0e9034
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/data/mne_analyze.sel
@@ -0,0 +1,13 @@
+#
+#	All channels
+#
+Vertex:MEG 0633|MEG 0632|MEG 0423|MEG 0422|MEG 0712|MEG 0713|MEG 0433|MEG 0432|MEG 0742|MEG 0743|MEG 1822|MEG 1823|MEG 1043|MEG 1042|MEG 1112|MEG 1113|MEG 0722|MEG 0723|MEG 1142|MEG 1143|MEG 0732|MEG 0733|MEG 2212|MEG 2213|MEG 0631|MEG 0431|MEG 0711|MEG 0431|MEG 0741|MEG 1821|MEG 1041|MEG 1111|MEG 0721|MEG 1141|MEG 0731|MEG 2211
+Left-temporal:MEG 0223|MEG 0222|MEG 0212|MEG 0213|MEG 0133|MEG 0132|MEG 0112|MEG 0113|MEG 0233|MEG 0232|MEG 0243|MEG 0242|MEG 1512|MEG 1513|MEG 0143|MEG 0142|MEG 1623|MEG 1622|MEG 1613|MEG 1612|MEG 1523|MEG 1522|MEG 1543|MEG 1542|MEG 1533|MEG 1532|MEG 0221|MEG 0211|MEG 0131|MEG 0111|MEG 0231|MEG 0241|MEG 1511|MEG 0141|MEG 1621|MEG 1611|MEG 1521|MEG 1541|MEG 1531
+Right-temporal:MEG 1312|MEG 1313|MEG 1323|MEG 1322|MEG 1442|MEG 1443|MEG 1423|MEG 1422|MEG 1342|MEG 1343|MEG 1333|MEG 1332|MEG 2612|MEG 2613|MEG 1433|MEG 1432|MEG 2413|MEG 2412|MEG 2422|MEG 2423|MEG 2642|MEG 2643|MEG 2623|MEG 2622|MEG 2633|MEG 2632|MEG 1311|MEG 1321|MEG 1441|MEG 1421|MEG 1341|MEG 1331|MEG 2611|MEG 1431|MEG 2411|MEG 2421|MEG 2641|MEG 2621|MEG 2631
+Left-parietal:MEG 0633|MEG 0632|MEG 0423|MEG 0422|MEG 0412|MEG 0413|MEG 0712|MEG 0713|MEG 0433|MEG 0432|MEG 0442|MEG 0443|MEG 0742|MEG 0743|MEG 1822|MEG 1823|MEG 1813|MEG 1812|MEG 1832|MEG 1833|MEG 1843|MEG 1842|MEG 1632|MEG 1633|MEG 2013|MEG 2012|MEG 0631|MEG 0421|MEG 0411|MEG 0711|MEG 0431|MEG 0441|MEG 0741|MEG 1821|MEG 1811|MEG 1831|MEG 1841|MEG 1631|MEG 2011
+Right-parietal:MEG 1043|MEG 1042|MEG 1112|MEG 1113|MEG 1123|MEG 1122|MEG 0722|MEG 0723|MEG 1142|MEG 1143|MEG 1133|MEG 1132|MEG 0732|MEG 0733|MEG 2212|MEG 2213|MEG 2223|MEG 2222|MEG 2242|MEG 2243|MEG 2232|MEG 2233|MEG 2442|MEG 2443|MEG 2023|MEG 2022|MEG 1041|MEG 1111|MEG 1121|MEG 0721|MEG 1141|MEG 1131|MEG 0731|MEG 2211|MEG 2221|MEG 2241|MEG 2231|MEG 2441|MEG 2021
+Left-occipital:MEG 2042|MEG 2043|MEG 1913|MEG 1912|MEG 2113|MEG 2112|MEG 1922|MEG 1923|MEG 1942|MEG 1943|MEG 1642|MEG 1643|MEG 1933|MEG 1932|MEG 1733|MEG 1732|MEG 1723|MEG 1722|MEG 2143|MEG 2142|MEG 1742|MEG 1743|MEG 1712|MEG 1713|MEG 2041|MEG 1911|MEG 2111|MEG 1921|MEG 1941|MEG 1641|MEG 1931|MEG 1731|MEG 1721|MEG 2141|MEG 1741|MEG 1711
+Right-occipital:MEG 2032|MEG 2033|MEG 2313|MEG 2312|MEG 2342|MEG 2343|MEG 2322|MEG 2323|MEG 2433|MEG 2432|MEG 2122|MEG 2123|MEG 2333|MEG 2332|MEG 2513|MEG 2512|MEG 2523|MEG 2522|MEG 2133|MEG 2132|MEG 2542|MEG 2543|MEG 2532|MEG 2533|MEG 2031|MEG 2311|MEG 2341|MEG 2321|MEG 2431|MEG 2121|MEG 2331|MEG 2511|MEG 2521|MEG 2131|MEG 2541|MEG 2531
+Left-frontal:MEG 0522|MEG 0523|MEG 0512|MEG 0513|MEG 0312|MEG 0313|MEG 0342|MEG 0343|MEG 0122|MEG 0123|MEG 0822|MEG 0823|MEG 0533|MEG 0532|MEG 0543|MEG 0542|MEG 0322|MEG 0323|MEG 0612|MEG 0613|MEG 0333|MEG 0332|MEG 0622|MEG 0623|MEG 0643|MEG 0642|MEG 0521|MEG 0511|MEG 0311|MEG 0341|MEG 0121|MEG 0821|MEG 0531|MEG 0541|MEG 0321|MEG 0611|MEG 0331|MEG 0621|MEG 0641
+Right-frontal:MEG 0813|MEG 0812|MEG 0912|MEG 0913|MEG 0922|MEG 0923|MEG 1212|MEG 1213|MEG 1223|MEG 1222|MEG 1412|MEG 1413|MEG 0943|MEG 0942|MEG 0933|MEG 0932|MEG 1232|MEG 1233|MEG 1012|MEG 1013|MEG 1022|MEG 1023|MEG 1243|MEG 1242|MEG 1033|MEG 1032|MEG 0811|MEG 0911|MEG 0921|MEG 1211|MEG 1221|MEG 1411|MEG 0941|MEG 0931|MEG 1231|MEG 1011|MEG 1021|MEG 1241|MEG 1031
+
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/datasets/__init__.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/datasets/__init__.py
new file mode 100644
index 0000000..e0530bd
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/datasets/__init__.py
@@ -0,0 +1,11 @@
+"""Demo datasets
+"""
+
+from . import sample
+from . import megsim
+from . import spm_face
+from . import brainstorm
+from . import eegbci
+from . import somato
+from . import testing
+from . import _fake
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/datasets/_fake/__init__.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/datasets/_fake/__init__.py
new file mode 100644
index 0000000..b807fc4
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/datasets/_fake/__init__.py
@@ -0,0 +1,4 @@
+"""MNE sample dataset
+"""
+
+from ._fake import data_path, get_version
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/datasets/_fake/_fake.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/datasets/_fake/_fake.py
new file mode 100644
index 0000000..580253b
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/datasets/_fake/_fake.py
@@ -0,0 +1,25 @@
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#          Eric Larson <larson.eric.d at gmail.com>
+# License: BSD Style.
+
+from ...utils import verbose
+from ..utils import (_data_path, _data_path_doc,
+                     _get_version, _version_doc)
+
+
+ at verbose
+def data_path(path=None, force_update=False, update_path=False,
+              download=True, verbose=None):
+    return _data_path(path=path, force_update=force_update,
+                      update_path=update_path, name='fake',
+                      download=download)
+
+data_path.__doc__ = _data_path_doc.format(name='fake',
+                                          conf='MNE_DATASETS_FAKE_PATH')
+
+
+def get_version():
+    return _get_version('fake')
+
+get_version.__doc__ = _version_doc.format(name='fake')
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/datasets/brainstorm/__init__.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/datasets/brainstorm/__init__.py
new file mode 100644
index 0000000..eb985dc
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/datasets/brainstorm/__init__.py
@@ -0,0 +1,4 @@
+"""Brainstorm Dataset
+"""
+
+from . import bst_raw, bst_resting, bst_auditory
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/datasets/brainstorm/bst_auditory.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/datasets/brainstorm/bst_auditory.py
new file mode 100644
index 0000000..2cbe827
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/datasets/brainstorm/bst_auditory.py
@@ -0,0 +1,60 @@
+# Authors: Mainak Jas <mainak.jas at telecom-paristech.fr>
+#
+# License: BSD (3-clause)
+
+import os.path as op
+from ...utils import verbose
+from ...fixes import partial
+from ..utils import (has_dataset, _data_path, _get_version, _version_doc,
+                     _data_path_doc)
+
+has_brainstorm_data = partial(has_dataset, name='brainstorm')
+
+
+_description = u"""
+URL: http://neuroimage.usc.edu/brainstorm/DatasetAuditory
+    - One subject, two acquisition runs of 6 minutes each
+    - Subject stimulated binaurally with intra-aural earphones
+      (air tubes+transducers)
+    - Each run contains:
+        - 200 regular beeps (440Hz)
+        - 40 easy deviant beeps (554.4Hz, 4 semitones higher)
+    - Random inter-stimulus interval: between 0.7s and 1.7s seconds, uniformly
+      distributed
+    - The subject presses a button when detecting a deviant with the right
+      index finger
+    - Auditory stimuli generated with the Matlab Psychophysics toolbox
+"""
+
+
+ at verbose
+def data_path(path=None, force_update=False, update_path=True, download=True,
+              verbose=None):
+    archive_name = dict(brainstorm='bst_auditory.tar.bz2')
+    data_path = _data_path(path=path, force_update=force_update,
+                           update_path=update_path, name='brainstorm',
+                           download=download, archive_name=archive_name)
+    if data_path != '':
+        return op.join(data_path, 'bst_auditory')
+    else:
+        return data_path
+
+_data_path_doc = _data_path_doc.format(name='brainstorm',
+                                       conf='MNE_DATASETS_BRAINSTORM_DATA'
+                                            '_PATH')
+_data_path_doc = _data_path_doc.replace('brainstorm dataset',
+                                        'brainstorm (bst_auditory) dataset')
+data_path.__doc__ = _data_path_doc
+
+
+def get_version():
+    return _get_version('brainstorm')
+
+get_version.__doc__ = _version_doc.format(name='brainstorm')
+
+
+def description():
+    """Get description of brainstorm (bst_auditory) dataset
+    """
+    for desc in _description.splitlines():
+        print(desc)
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/datasets/brainstorm/bst_raw.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/datasets/brainstorm/bst_raw.py
new file mode 100644
index 0000000..1033008
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/datasets/brainstorm/bst_raw.py
@@ -0,0 +1,59 @@
+# Authors: Mainak Jas <mainak.jas at telecom-paristech.fr>
+#
+# License: BSD (3-clause)
+
+import os.path as op
+from ...utils import verbose
+from ...fixes import partial
+from ..utils import (has_dataset, _data_path, _get_version, _version_doc,
+                     _data_path_doc)
+
+has_brainstorm_data = partial(has_dataset, name='brainstorm')
+
+_description = u"""
+URL: http://neuroimage.usc.edu/brainstorm/DatasetMedianNerveCtf
+    - One subject, one acquisition run of 6 minutes
+    - Subject stimulated using Digitimer Constant Current Stimulator
+      (model DS7A)
+    - The run contains 200 electric stimulations randomly distributed between
+      left and right:
+        - 102 stimulations of the left hand
+        - 98 stimulations of the right hand
+    - Inter-stimulus interval: jittered between [1500, 2000]ms
+    - Stimuli generated using PsychToolBox on Windows PC (TTL pulse generated
+      with the parallel port connected to the Digitimer via the rear panel BNC)
+"""
+
+
+ at verbose
+def data_path(path=None, force_update=False, update_path=True, download=True,
+              verbose=None):
+    archive_name = dict(brainstorm='bst_raw.tar.bz2')
+    data_path = _data_path(path=path, force_update=force_update,
+                           update_path=update_path, name='brainstorm',
+                           download=download, archive_name=archive_name)
+    if data_path != '':
+        return op.join(data_path, 'bst_raw')
+    else:
+        return data_path
+
+
+_data_path_doc = _data_path_doc.format(name='brainstorm',
+                                       conf='MNE_DATASETS_BRAINSTORM_DATA'
+                                            '_PATH')
+_data_path_doc = _data_path_doc.replace('brainstorm dataset',
+                                        'brainstorm (bst_raw) dataset')
+data_path.__doc__ = _data_path_doc
+
+
+def get_version():
+    return _get_version('brainstorm')
+
+get_version.__doc__ = _version_doc.format(name='brainstorm')
+
+
+def description():
+    """Get description of brainstorm (bst_raw) dataset
+    """
+    for desc in _description.splitlines():
+        print(desc)
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/datasets/brainstorm/bst_resting.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/datasets/brainstorm/bst_resting.py
new file mode 100644
index 0000000..3d33652
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/datasets/brainstorm/bst_resting.py
@@ -0,0 +1,51 @@
+# Authors: Mainak Jas <mainak.jas at telecom-paristech.fr>
+#
+# License: BSD (3-clause)
+
+import os.path as op
+from ...utils import verbose
+from ...fixes import partial
+from ..utils import (has_dataset, _data_path, _get_version, _version_doc,
+                     _data_path_doc)
+
+has_brainstorm_data = partial(has_dataset, name='brainstorm')
+
+_description = u"""
+URL: http://neuroimage.usc.edu/brainstorm/DatasetResting
+    - One subject
+    - Two runs of 10 min of resting state recordings
+    - Eyes open
+"""
+
+
+ at verbose
+def data_path(path=None, force_update=False, update_path=True, download=True,
+              verbose=None):
+    archive_name = dict(brainstorm='bst_resting.tar.bz2')
+    data_path = _data_path(path=path, force_update=force_update,
+                           update_path=update_path, name='brainstorm',
+                           download=download, archive_name=archive_name)
+    if data_path != '':
+        return op.join(data_path, 'bst_resting')
+    else:
+        return data_path
+
+_data_path_doc = _data_path_doc.format(name='brainstorm',
+                                       conf='MNE_DATASETS_BRAINSTORM_DATA'
+                                            '_PATH')
+_data_path_doc = _data_path_doc.replace('brainstorm dataset',
+                                        'brainstorm (bst_resting) dataset')
+data_path.__doc__ = _data_path_doc
+
+
+def get_version():
+    return _get_version('brainstorm')
+
+get_version.__doc__ = _version_doc.format(name='brainstorm')
+
+
+def description():
+    """Get description of brainstorm (bst_resting) dataset
+    """
+    for desc in _description.splitlines():
+        print(desc)
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/datasets/eegbci/__init__.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/datasets/eegbci/__init__.py
new file mode 100644
index 0000000..4a47873
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/datasets/eegbci/__init__.py
@@ -0,0 +1,4 @@
+"""EEG Motor Movement/Imagery Dataset
+"""
+
+from .eegbci import data_path, load_data
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/datasets/eegbci/eegbci.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/datasets/eegbci/eegbci.py
new file mode 100644
index 0000000..274b66e
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/datasets/eegbci/eegbci.py
@@ -0,0 +1,163 @@
+# Author: Martin Billinger <martin.billinger at tugraz.at>
+# License: BSD Style.
+
+import os
+from os import path as op
+
+from ..utils import _get_path, _do_path_update
+from ...utils import _fetch_file, _url_to_local_path, verbose
+
+
+EEGMI_URL = 'http://www.physionet.org/physiobank/database/eegmmidb/'
+
+
+ at verbose
+def data_path(url, path=None, force_update=False, update_path=None,
+              verbose=None):
+    """Get path to local copy of EEGMMI dataset URL
+
+    This is a low-level function useful for getting a local copy of a
+    remote EEGBCI dataet.
+
+    Parameters
+    ----------
+    url : str
+        The dataset to use.
+    path : None | str
+        Location of where to look for the EEGBCI data storing location.
+        If None, the environment variable or config parameter
+        MNE_DATASETS_EEGBCI_PATH is used. If it doesn't exist, the
+        "mne-python/examples" directory is used. If the EEGBCI dataset
+        is not found under the given path (e.g., as
+        "mne-python/examples/MNE-eegbci-data"), the data
+        will be automatically downloaded to the specified folder.
+    force_update : bool
+        Force update of the dataset even if a local copy exists.
+    update_path : bool | None
+        If True, set the MNE_DATASETS_EEGBCI_PATH in mne-python
+        config to the given path. If None, the user is prompted.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    path : list of str
+        Local path to the given data file. This path is contained inside a list
+        of length one, for compatibility.
+
+    Notes
+    -----
+    For example, one could do:
+
+        >>> from mne.datasets import eegbci
+        >>> url = 'http://www.physionet.org/physiobank/database/eegmmidb/'
+        >>> eegbci.data_path(url, os.getenv('HOME') + '/datasets') # doctest:+SKIP
+
+    This would download the given EEGBCI data file to the 'datasets' folder,
+    and prompt the user to save the 'datasets' path to the mne-python config,
+    if it isn't there already.
+
+    The EEGBCI dataset is documented in the following publication:
+        Schalk, G., McFarland, D.J., Hinterberger, T., Birbaumer, N.,
+        Wolpaw, J.R. (2004) BCI2000: A General-Purpose Brain-Computer Interface
+        (BCI) System. IEEE TBME 51(6):1034-1043
+    The data set is available at PhysioNet:
+        Goldberger AL, Amaral LAN, Glass L, Hausdorff JM, Ivanov PCh, Mark RG,
+        Mietus JE, Moody GB, Peng C-K, Stanley HE. (2000) PhysioBank,
+        PhysioToolkit, and PhysioNet: Components of a New Research Resource for
+        Complex Physiologic Signals. Circulation 101(23):e215-e220
+    """  # noqa
+
+    key = 'MNE_DATASETS_EEGBCI_PATH'
+    name = 'EEGBCI'
+    path = _get_path(path, key, name)
+    destination = _url_to_local_path(url, op.join(path, 'MNE-eegbci-data'))
+    destinations = [destination]
+
+    # Fetch the file
+    if not op.isfile(destination) or force_update:
+        if op.isfile(destination):
+            os.remove(destination)
+        if not op.isdir(op.dirname(destination)):
+            os.makedirs(op.dirname(destination))
+        _fetch_file(url, destination, print_destination=False)
+
+    # Offer to update the path
+    _do_path_update(path, update_path, key, name)
+    return destinations
+
+
+ at verbose
+def load_data(subject, runs, path=None, force_update=False, update_path=None,
+              base_url=EEGMI_URL, verbose=None):
+    """Get paths to local copy of EEGBCI dataset files
+
+    Parameters
+    ----------
+    subject : int
+        The subject to use. Can be in the range of 1-109 (inclusive).
+    runs : int | list of ints
+        The runs to use. Can be a list or a single number. The runs correspond
+        to the following tasks:
+              run | task
+        ----------+-----------------------------------------
+                1 | Baseline, eyes open
+                2 | Baseline, eyes closed
+         3, 7, 11 | Motor execution: left vs right hand
+         4, 8, 12 | Motor imagery: left vs right hand
+         5, 9, 13 | Motor execution: hands vs feet
+        6, 10, 14 | Motor imagery: hands vs feet
+    path : None | str
+        Location of where to look for the EEGBCI data storing location.
+        If None, the environment variable or config parameter
+        MNE_DATASETS_EEGBCI_PATH is used. If it doesn't exist, the
+        "mne-python/examples" directory is used. If the EEGBCI dataset
+        is not found under the given path (e.g., as
+        "mne-python/examples/MEGSIM"), the data
+        will be automatically downloaded to the specified folder.
+    force_update : bool
+        Force update of the dataset even if a local copy exists.
+    update_path : bool | None
+        If True, set the MNE_DATASETS_EEGBCI_PATH in mne-python
+        config to the given path. If None, the user is prompted.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    paths : list
+        List of local data paths of the given type.
+
+    Notes
+    -----
+    For example, one could do:
+
+        >>> from mne.datasets import eegbci
+        >>> eegbci.load_data(1, [4, 10, 14],\
+                             os.getenv('HOME') + '/datasets') # doctest:+SKIP
+
+    This would download runs 4, 10, and 14 (hand/foot motor imagery) runs from
+    subject 1 in the EEGBCI dataset to the 'datasets' folder, and prompt the
+    user to save the 'datasets' path to the  mne-python config, if it isn't
+    there already.
+
+    The EEGBCI dataset is documented in the following publication:
+        Schalk, G., McFarland, D.J., Hinterberger, T., Birbaumer, N.,
+        Wolpaw, J.R. (2004) BCI2000: A General-Purpose Brain-Computer Interface
+        (BCI) System. IEEE TBME 51(6):1034-1043
+    The data set is available at PhysioNet:
+        Goldberger AL, Amaral LAN, Glass L, Hausdorff JM, Ivanov PCh, Mark RG,
+        Mietus JE, Moody GB, Peng C-K, Stanley HE. (2000) PhysioBank,
+        PhysioToolkit, and PhysioNet: Components of a New Research Resource for
+        Complex Physiologic Signals. Circulation 101(23):e215-e220
+    """
+    if not hasattr(runs, '__iter__'):
+        runs = [runs]
+
+    data_paths = []
+    for r in runs:
+        url = '{u}S{s:03d}/S{s:03d}R{r:02d}.edf'.format(u=base_url,
+                                                        s=subject, r=r)
+        data_paths.extend(data_path(url, path, force_update, update_path))
+
+    return data_paths
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/datasets/megsim/__init__.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/datasets/megsim/__init__.py
new file mode 100644
index 0000000..24babeb
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/datasets/megsim/__init__.py
@@ -0,0 +1,4 @@
+"""MEGSIM dataset
+"""
+
+from .megsim import data_path, load_data
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/datasets/megsim/megsim.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/datasets/megsim/megsim.py
new file mode 100644
index 0000000..44e77fb
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/datasets/megsim/megsim.py
@@ -0,0 +1,166 @@
+# Author: Eric Larson <larson.eric.d at gmail.com>
+# License: BSD Style.
+
+import os
+from os import path as op
+import zipfile
+from sys import stdout
+
+from ...utils import _fetch_file, _url_to_local_path, verbose
+from ..utils import _get_path, _do_path_update
+from .urls import (url_match, valid_data_types, valid_data_formats,
+                   valid_conditions)
+
+
+ at verbose
+def data_path(url, path=None, force_update=False, update_path=None,
+              verbose=None):
+    """Get path to local copy of MEGSIM dataset URL
+
+    This is a low-level function useful for getting a local copy of a
+    remote MEGSIM dataet.
+
+    Parameters
+    ----------
+    url : str
+        The dataset to use.
+    path : None | str
+        Location of where to look for the MEGSIM data storing location.
+        If None, the environment variable or config parameter
+        MNE_DATASETS_MEGSIM_PATH is used. If it doesn't exist, the
+        "mne-python/examples" directory is used. If the MEGSIM dataset
+        is not found under the given path (e.g., as
+        "mne-python/examples/MEGSIM"), the data
+        will be automatically downloaded to the specified folder.
+    force_update : bool
+        Force update of the dataset even if a local copy exists.
+    update_path : bool | None
+        If True, set the MNE_DATASETS_MEGSIM_PATH in mne-python
+        config to the given path. If None, the user is prompted.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    path : list of str
+        Local paths to the given data files. If URL was a .fif file, this
+        will be a list of length 1. If it was a .zip file, it may potentially
+        be many files.
+
+    Notes
+    -----
+    For example, one could do:
+
+        >>> from mne.datasets import megsim
+        >>> url = 'http://cobre.mrn.org/megsim/simdata/neuromag/visual/M87174545_vis_sim1A_4mm_30na_neuro_rn.fif'
+        >>> megsim.data_path(url, os.getenv('HOME') + '/datasets') # doctest:+SKIP
+
+    And this would download the given MEGSIM data file to the 'datasets'
+    folder, and prompt the user to save the 'datasets' path to the mne-python
+    config, if it isn't there already.
+
+    The MEGSIM dataset is documented in the following publication:
+        Aine CJ, Sanfratello L, Ranken D, Best E, MacArthur JA, Wallace T,
+        Gilliam K, Donahue CH, Montano R, Bryant JE, Scott A, Stephen JM
+        (2012) MEG-SIM: A Web Portal for Testing MEG Analysis Methods using
+        Realistic Simulated and Empirical Data. Neuroinform 10:141-158
+    """  # noqa
+    key = 'MNE_DATASETS_MEGSIM_PATH'
+    name = 'MEGSIM'
+    path = _get_path(path, key, name)
+    destination = _url_to_local_path(url, op.join(path, 'MEGSIM'))
+    destinations = [destination]
+
+    split = op.splitext(destination)
+    is_zip = True if split[1].lower() == '.zip' else False
+    # Fetch the file
+    do_unzip = False
+    if not op.isfile(destination) or force_update:
+        if op.isfile(destination):
+            os.remove(destination)
+        if not op.isdir(op.dirname(destination)):
+            os.makedirs(op.dirname(destination))
+        _fetch_file(url, destination, print_destination=False)
+        do_unzip = True
+
+    if is_zip:
+        z = zipfile.ZipFile(destination)
+        decomp_dir, name = op.split(destination)
+        files = z.namelist()
+        # decompress if necessary (if download was re-done)
+        if do_unzip:
+            stdout.write('Decompressing %g files from\n'
+                         '"%s" ...' % (len(files), name))
+            z.extractall(decomp_dir)
+            stdout.write(' [done]\n')
+        z.close()
+        destinations = [op.join(decomp_dir, f) for f in files]
+
+    path = _do_path_update(path, update_path, key, name)
+    return destinations
+
+
+ at verbose
+def load_data(condition='visual', data_format='raw', data_type='experimental',
+              path=None, force_update=False, update_path=None, verbose=None):
+    """Get path to local copy of MEGSIM dataset type
+
+    Parameters
+    ----------
+    condition : str
+        The condition to use. Either 'visual', 'auditory', or 'somatosensory'.
+    data_format : str
+        The data format. Either 'raw', 'evoked', or 'single-trial'.
+    data_type : str
+        The type of data. Either 'experimental' or 'simulation'.
+    path : None | str
+        Location of where to look for the MEGSIM data storing location.
+        If None, the environment variable or config parameter
+        MNE_DATASETS_MEGSIM_PATH is used. If it doesn't exist, the
+        "mne-python/examples" directory is used. If the MEGSIM dataset
+        is not found under the given path (e.g., as
+        "mne-python/examples/MEGSIM"), the data
+        will be automatically downloaded to the specified folder.
+    force_update : bool
+        Force update of the dataset even if a local copy exists.
+    update_path : bool | None
+        If True, set the MNE_DATASETS_MEGSIM_PATH in mne-python
+        config to the given path. If None, the user is prompted.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    paths : list
+        List of local data paths of the given type.
+
+    Notes
+    -----
+    For example, one could do:
+
+        >>> from mne.datasets import megsim
+        >>> megsim.load_data('visual', 'raw', 'experimental', os.getenv('HOME') + '/datasets') # doctest:+SKIP
+
+    And this would download the raw visual experimental MEGSIM dataset to the
+    'datasets' folder, and prompt the user to save the 'datasets' path to the
+    mne-python config, if it isn't there already.
+
+    The MEGSIM dataset is documented in the following publication:
+        Aine CJ, Sanfratello L, Ranken D, Best E, MacArthur JA, Wallace T,
+        Gilliam K, Donahue CH, Montano R, Bryant JE, Scott A, Stephen JM
+        (2012) MEG-SIM: A Web Portal for Testing MEG Analysis Methods using
+        Realistic Simulated and Empirical Data. Neuroinform 10:141-158
+    """  # noqa
+
+    if not condition.lower() in valid_conditions:
+        raise ValueError('Unknown condition "%s"' % condition)
+    if data_format not in valid_data_formats:
+        raise ValueError('Unknown data_format "%s"' % data_format)
+    if data_type not in valid_data_types:
+        raise ValueError('Unknown data_type "%s"' % data_type)
+    urls = url_match(condition, data_format, data_type)
+
+    data_paths = list()
+    for url in urls:
+        data_paths.extend(data_path(url, path, force_update, update_path))
+    return data_paths
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/datasets/megsim/urls.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/datasets/megsim/urls.py
new file mode 100644
index 0000000..409e60f
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/datasets/megsim/urls.py
@@ -0,0 +1,172 @@
+# Author: Eric Larson <larson.eric.d at gmail.com>
+# License: BSD Style.
+
+import numpy as np
+
+url_root = 'http://cobre.mrn.org/megsim'
+
+urls = ['/empdata/neuromag/visual/subject1_day1_vis_raw.fif',
+        '/empdata/neuromag/visual/subject1_day2_vis_raw.fif',
+        '/empdata/neuromag/visual/subject3_day1_vis_raw.fif',
+        '/empdata/neuromag/visual/subject3_day2_vis_raw.fif',
+        '/empdata/neuromag/aud/subject1_day1_aud_raw.fif',
+        '/empdata/neuromag/aud/subject1_day2_aud_raw.fif',
+        '/empdata/neuromag/aud/subject3_day1_aud_raw.fif',
+        '/empdata/neuromag/aud/subject3_day2_aud_raw.fif',
+        '/empdata/neuromag/somato/subject1_day1_median_raw.fif',
+        '/empdata/neuromag/somato/subject1_day2_median_raw.fif',
+        '/empdata/neuromag/somato/subject3_day1_median_raw.fif',
+        '/empdata/neuromag/somato/subject3_day2_median_raw.fif',
+
+        '/simdata/neuromag/visual/M87174545_vis_sim1A_4mm_30na_neuro_rn.fif',
+        '/simdata/neuromag/visual/M87174545_vis_sim1B_20mm_50na_neuro_rn.fif',
+        '/simdata/neuromag/visual/M87174545_vis_sim2_4mm_30na_neuro_rn.fif',
+        '/simdata/neuromag/visual/M87174545_vis_sim3A_4mm_30na_neuro_rn.fif',
+        '/simdata/neuromag/visual/M87174545_vis_sim3B_20mm_50na_neuro_rn.fif',
+        '/simdata/neuromag/visual/M87174545_vis_sim4_4mm_30na_neuro_rn.fif',
+        '/simdata/neuromag/visual/M87174545_vis_sim5_4mm_30na_neuro_rn.fif',
+
+        '/simdata_singleTrials/subject1_singleTrials_VisWorkingMem_fif.zip',
+        '/simdata_singleTrials/subject1_singleTrials_VisWorkingMem_withOsc_fif.zip',  # noqa
+        '/simdata_singleTrials/4545_sim_oscOnly_v1_IPS_ILOG_30hzAdded.fif',
+
+        '/index.html',
+]
+
+data_formats = ['raw',
+                'raw',
+                'raw',
+                'raw',
+                'raw',
+                'raw',
+                'raw',
+                'raw',
+                'raw',
+                'raw',
+                'raw',
+                'raw',
+
+                'evoked',
+                'evoked',
+                'evoked',
+                'evoked',
+                'evoked',
+                'evoked',
+                'evoked',
+
+                'single-trial',
+                'single-trial',
+                'single-trial',
+
+                'text']
+
+subjects = ['subject_1',
+            'subject_1',
+            'subject_3',
+            'subject_3',
+            'subject_1',
+            'subject_1',
+            'subject_3',
+            'subject_3',
+            'subject_1',
+            'subject_1',
+            'subject_3',
+            'subject_3',
+
+            'subject_1',
+            'subject_1',
+            'subject_1',
+            'subject_1',
+            'subject_1',
+            'subject_1',
+            'subject_1',
+
+            'subject_1',
+            'subject_1',
+            'subject_1',
+
+            '']
+
+data_types = ['experimental',
+              'experimental',
+              'experimental',
+              'experimental',
+              'experimental',
+              'experimental',
+              'experimental',
+              'experimental',
+              'experimental',
+              'experimental',
+              'experimental',
+              'experimental',
+
+              'simulation',
+              'simulation',
+              'simulation',
+              'simulation',
+              'simulation',
+              'simulation',
+              'simulation',
+
+              'simulation',
+              'simulation',
+              'simulation',
+
+              'text']
+
+conditions = ['visual',
+              'visual',
+              'visual',
+              'visual',
+              'auditory',
+              'auditory',
+              'auditory',
+              'auditory',
+              'somatosensory',
+              'somatosensory',
+              'somatosensory',
+              'somatosensory',
+
+              'visual',
+              'visual',
+              'visual',
+              'visual',
+              'visual',
+              'visual',
+              'visual',
+
+              'visual',
+              'visual',
+              'visual',
+
+              'index']
+
+valid_data_types = list(set(data_types))
+valid_data_formats = list(set(data_formats))
+valid_conditions = list(set(conditions))
+
+# turn them into arrays for ease of use
+urls = np.atleast_1d(urls)
+data_formats = np.atleast_1d(data_formats)
+subjects = np.atleast_1d(subjects)
+data_types = np.atleast_1d(data_types)
+conditions = np.atleast_1d(conditions)
+
+# Useful for testing
+# assert len(conditions) == len(data_types) == len(subjects) \
+#     == len(data_formats) == len(urls)
+
+
+def url_match(condition, data_format, data_type):
+    """Function to match MEGSIM data files"""
+    inds = np.logical_and(conditions == condition, data_formats == data_format)
+    inds = np.logical_and(inds, data_types == data_type)
+    inds = np.logical_and(inds, data_formats == data_format)
+    good_urls = list(urls[inds])
+    for gi, g in enumerate(good_urls):
+        good_urls[gi] = url_root + g
+    if len(good_urls) == 0:
+        raise ValueError('No MEGSIM dataset found with condition="%s",\n'
+                         'data_format="%s", data_type="%s"'
+                         % (condition, data_format, data_type))
+    return good_urls
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/datasets/sample/__init__.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/datasets/sample/__init__.py
new file mode 100644
index 0000000..6b1faf2
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/datasets/sample/__init__.py
@@ -0,0 +1,5 @@
+"""MNE sample dataset
+"""
+
+from .sample import (data_path, has_sample_data, get_version,
+                     requires_sample_data)
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/datasets/sample/sample.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/datasets/sample/sample.py
new file mode 100644
index 0000000..46f40d9
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/datasets/sample/sample.py
@@ -0,0 +1,42 @@
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#          Eric Larson <larson.eric.d at gmail.com>
+# License: BSD Style.
+
+import numpy as np
+
+from ...utils import verbose, get_config
+from ...fixes import partial
+from ..utils import (has_dataset, _data_path, _data_path_doc,
+                     _get_version, _version_doc)
+
+
+has_sample_data = partial(has_dataset, name='sample')
+
+
+ at verbose
+def data_path(path=None, force_update=False, update_path=True, download=True,
+              verbose=None):
+    return _data_path(path=path, force_update=force_update,
+                      update_path=update_path, name='sample',
+                      download=download)
+
+data_path.__doc__ = _data_path_doc.format(name='sample',
+                                          conf='MNE_DATASETS_SAMPLE_PATH')
+
+
+def get_version():
+    return _get_version('sample')
+
+get_version.__doc__ = _version_doc.format(name='sample')
+
+
+# Allow forcing of sample dataset skip
+def _skip_sample_data():
+    skip_testing = (get_config('MNE_SKIP_SAMPLE_DATASET_TESTS', 'false') ==
+                    'true')
+    skip = skip_testing or not has_sample_data()
+    return skip
+
+requires_sample_data = np.testing.dec.skipif(_skip_sample_data,
+                                             'Requires sample dataset')
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/datasets/somato/__init__.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/datasets/somato/__init__.py
new file mode 100644
index 0000000..aa3f82d
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/datasets/somato/__init__.py
@@ -0,0 +1,4 @@
+"""Somatosensory dataset
+"""
+
+from .somato import data_path, has_somato_data, get_version
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/datasets/somato/somato.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/datasets/somato/somato.py
new file mode 100644
index 0000000..d0daf98
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/datasets/somato/somato.py
@@ -0,0 +1,29 @@
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#          Eric Larson <larson.eric.d at gmail.com>
+# License: BSD Style.
+
+from ...utils import verbose
+from ...fixes import partial
+from ..utils import (has_dataset, _data_path, _data_path_doc,
+                     _get_version, _version_doc)
+
+
+has_somato_data = partial(has_dataset, name='somato')
+
+
+ at verbose
+def data_path(path=None, force_update=False, update_path=True, download=True,
+              verbose=None):
+    return _data_path(path=path, force_update=force_update,
+                      update_path=update_path, name='somato',
+                      download=download)
+
+data_path.__doc__ = _data_path_doc.format(name='somato',
+                                          conf='MNE_DATASETS_SOMATO_PATH')
+
+
+def get_version():
+    return _get_version('somato')
+
+get_version.__doc__ = _version_doc.format(name='somato')
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/datasets/spm_face/__init__.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/datasets/spm_face/__init__.py
new file mode 100644
index 0000000..90f01c7
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/datasets/spm_face/__init__.py
@@ -0,0 +1,4 @@
+"""SPM face dataset
+"""
+
+from .spm_data import data_path, has_spm_data, get_version
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/datasets/spm_face/spm_data.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/datasets/spm_face/spm_data.py
new file mode 100644
index 0000000..19c6461
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/datasets/spm_face/spm_data.py
@@ -0,0 +1,28 @@
+# Authors: Denis Engemann <denis.engemann at gmail.com>
+#
+# License: BSD Style.
+
+from ...utils import verbose
+from ...fixes import partial
+from ..utils import (has_dataset, _data_path, _data_path_doc,
+                     _get_version, _version_doc)
+
+
+has_spm_data = partial(has_dataset, name='spm')
+
+
+ at verbose
+def data_path(path=None, force_update=False, update_path=True, download=True,
+              verbose=None):
+    return _data_path(path=path, force_update=force_update,
+                      update_path=update_path, name='spm',
+                      download=download)
+
+data_path.__doc__ = _data_path_doc.format(name='spm',
+                                          conf='MNE_DATASETS_SPM_DATA_PATH')
+
+
+def get_version():
+    return _get_version('spm')
+
+get_version.__doc__ = _version_doc.format(name='spm')
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/datasets/testing/__init__.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/datasets/testing/__init__.py
new file mode 100644
index 0000000..7fa74ee
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/datasets/testing/__init__.py
@@ -0,0 +1,4 @@
+"""MNE sample dataset
+"""
+
+from ._testing import data_path, requires_testing_data, get_version
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/datasets/testing/_testing.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/datasets/testing/_testing.py
new file mode 100644
index 0000000..932bd2e
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/datasets/testing/_testing.py
@@ -0,0 +1,47 @@
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#          Eric Larson <larson.eric.d at gmail.com>
+# License: BSD Style.
+
+import numpy as np
+
+from ...utils import verbose, get_config
+from ...fixes import partial
+from ..utils import (has_dataset, _data_path, _data_path_doc,
+                     _get_version, _version_doc)
+
+
+has_testing_data = partial(has_dataset, name='testing')
+
+
+ at verbose
+def data_path(path=None, force_update=False, update_path=True,
+              download=True, verbose=None):
+    # Make sure we don't do something stupid
+    if download and \
+            get_config('MNE_SKIP_TESTING_DATASET_TESTS', 'false') == 'true':
+        raise RuntimeError('Cannot download data if skipping is forced')
+    return _data_path(path=path, force_update=force_update,
+                      update_path=update_path, name='testing',
+                      download=download)
+
+data_path.__doc__ = _data_path_doc.format(name='testing',
+                                          conf='MNE_DATASETS_TESTING_PATH')
+
+
+def get_version():
+    return _get_version('testing')
+
+get_version.__doc__ = _version_doc.format(name='testing')
+
+
+# Allow forcing of testing dataset skip (for Debian tests) using:
+# `make test-no-testing-data`
+def _skip_testing_data():
+    skip_testing = (get_config('MNE_SKIP_TESTING_DATASET_TESTS', 'false') ==
+                    'true')
+    skip = skip_testing or not has_testing_data()
+    return skip
+
+requires_testing_data = np.testing.dec.skipif(_skip_testing_data,
+                                              'Requires testing dataset')
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/datasets/tests/__init__.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/datasets/tests/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/datasets/tests/test_datasets.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/datasets/tests/test_datasets.py
new file mode 100644
index 0000000..34614ca
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/datasets/tests/test_datasets.py
@@ -0,0 +1,46 @@
+from os import path as op
+from nose.tools import assert_true, assert_equal
+
+from mne import datasets
+from mne.externals.six import string_types
+from mne.utils import _TempDir, run_tests_if_main, requires_good_network
+
+
+def test_datasets():
+    """Test simple dataset functions
+    """
+    for dname in ('sample', 'somato', 'spm_face', 'testing',
+                  'bst_raw', 'bst_auditory', 'bst_resting'):
+        if dname.startswith('bst'):
+            dataset = getattr(datasets.brainstorm, dname)
+        else:
+            dataset = getattr(datasets, dname)
+    if dataset.data_path(download=False) != '':
+        assert_true(isinstance(dataset.get_version(), string_types))
+    else:
+        assert_true(dataset.get_version() is None)
+
+
+ at requires_good_network
+def test_megsim():
+    """Test MEGSIM URL handling
+    """
+    data_dir = _TempDir()
+    paths = datasets.megsim.load_data(
+        'index', 'text', 'text', path=data_dir, update_path=False)
+    assert_equal(len(paths), 1)
+    assert_true(paths[0].endswith('index.html'))
+
+
+ at requires_good_network
+def test_downloads():
+    """Test dataset URL handling
+    """
+    # Try actually downloading a dataset
+    data_dir = _TempDir()
+    path = datasets._fake.data_path(path=data_dir, update_path=False)
+    assert_true(op.isfile(op.join(path, 'bar')))
+    assert_true(datasets._fake.get_version() is None)
+
+
+run_tests_if_main()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/datasets/utils.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/datasets/utils.py
new file mode 100644
index 0000000..b333b58
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/datasets/utils.py
@@ -0,0 +1,329 @@
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#          Eric Larson <larson.eric.d at gmail.com>
+#          Denis Egnemann <denis.engemann at gmail.com>
+# License: BSD Style.
+
+import os
+import os.path as op
+import shutil
+import tarfile
+from warnings import warn
+import stat
+
+from .. import __version__ as mne_version
+from ..utils import get_config, set_config, _fetch_file, logger
+from ..externals.six import string_types
+from ..externals.six.moves import input
+
+
+_data_path_doc = """Get path to local copy of {name} dataset
+
+    Parameters
+    ----------
+    path : None | str
+        Location of where to look for the {name} dataset.
+        If None, the environment variable or config parameter
+        {conf} is used. If it doesn't exist, the
+        "mne-python/examples" directory is used. If the {name} dataset
+        is not found under the given path (e.g., as
+        "mne-python/examples/MNE-{name}-data"), the data
+        will be automatically downloaded to the specified folder.
+    force_update : bool
+        Force update of the {name} dataset even if a local copy exists.
+    update_path : bool | None
+        If True, set the {conf} in mne-python
+        config to the given path. If None, the user is prompted.
+    download : bool
+        If False and the {name} dataset has not been downloaded yet,
+        it will not be downloaded and the path will be returned as
+        '' (empty string). This is mostly used for debugging purposes
+        and can be safely ignored by most users.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    path : str
+        Path to {name} dataset directory.
+"""
+
+
+_version_doc = """Get version of the local {name} dataset
+
+    Returns
+    -------
+    version : str | None
+        Version of the {name} local dataset, or None if the dataset
+        does not exist locally.
+"""
+
+
+_bst_license_text = """
+License
+-------
+This tutorial dataset (EEG and MRI data) remains a property of the MEG Lab,
+McConnell Brain Imaging Center, Montreal Neurological Institute,
+McGill University, Canada. Its use and transfer outside the Brainstorm
+tutorial, e.g. for research purposes, is prohibited without written consent
+from the MEG Lab.
+
+If you reference this dataset in your publications, please:
+1) aknowledge its authors: Elizabeth Bock, Esther Florin, Francois Tadel and
+Sylvain Baillet
+2) cite Brainstorm as indicated on the website:
+http://neuroimage.usc.edu/brainstorm
+
+For questions, please contact Francois Tadel (francois.tadel at mcgill.ca).
+"""
+
+
+def _dataset_version(path, name):
+    """Get the version of the dataset"""
+    ver_fname = op.join(path, 'version.txt')
+    if op.exists(ver_fname):
+        with open(ver_fname, 'r') as fid:
+            version = fid.readline().strip()  # version is on first line
+    else:
+        # Sample dataset versioning was introduced after 0.3
+        # SPM dataset was introduced with 0.7
+        version = '0.3' if name == 'sample' else '0.7'
+
+    return version
+
+
+def _get_path(path, key, name):
+    """Helper to get a dataset path"""
+    if path is None:
+            # use an intelligent guess if it's not defined
+            def_path = op.realpath(op.join(op.dirname(__file__), '..', '..',
+                                           '..', 'examples'))
+            if get_config(key) is None:
+                key = 'MNE_DATA'
+            path = get_config(key, def_path)
+
+            # use the same for all datasets
+            if not op.exists(path) or not os.access(path, os.W_OK):
+                try:
+                    os.mkdir(path)
+                except OSError:
+                    try:
+                        logger.info('Checking for %s data in '
+                                    '"~/mne_data"...' % name)
+                        path = op.join(op.expanduser("~"), "mne_data")
+                        if not op.exists(path):
+                            logger.info("Trying to create "
+                                        "'~/mne_data' in home directory")
+                            os.mkdir(path)
+                    except OSError:
+                        raise OSError("User does not have write permissions "
+                                      "at '%s', try giving the path as an "
+                                      "argument to data_path() where user has "
+                                      "write permissions, for ex:data_path"
+                                      "('/home/xyz/me2/')" % (path))
+    if not isinstance(path, string_types):
+        raise ValueError('path must be a string or None')
+    return path
+
+
+def _do_path_update(path, update_path, key, name):
+    """Helper to update path"""
+    path = op.abspath(path)
+    if update_path is None:
+        if get_config(key, '') != path:
+            update_path = True
+            msg = ('Do you want to set the path:\n    %s\nas the default '
+                   '%s dataset path in the mne-python config [y]/n? '
+                   % (path, name))
+            answer = input(msg)
+            if answer.lower() == 'n':
+                update_path = False
+        else:
+            update_path = False
+
+    if update_path is True:
+        set_config(key, path)
+    return path
+
+
+def _data_path(path=None, force_update=False, update_path=True, download=True,
+               name=None, check_version=False, return_version=False,
+               archive_name=None):
+    """Aux function
+    """
+    key = {'sample': 'MNE_DATASETS_SAMPLE_PATH',
+           'spm': 'MNE_DATASETS_SPM_FACE_PATH',
+           'somato': 'MNE_DATASETS_SOMATO_PATH',
+           'brainstorm': 'MNE_DATASETS_BRAINSTORM_PATH',
+           'testing': 'MNE_DATASETS_TESTING_PATH',
+           'fake': 'MNE_DATASETS_FAKE_PATH',
+           }[name]
+
+    path = _get_path(path, key, name)
+    archive_names = dict(
+        sample='MNE-sample-data-processed.tar.gz',
+        spm='MNE-spm-face.tar.bz2',
+        somato='MNE-somato-data.tar.gz',
+        testing='mne-testing-data-master.tar.gz',
+        fake='foo.tgz',
+    )
+    if archive_name is not None:
+        archive_names.update(archive_name)
+    folder_names = dict(
+        sample='MNE-sample-data',
+        spm='MNE-spm-face',
+        somato='MNE-somato-data',
+        brainstorm='MNE-brainstorm-data',
+        testing='MNE-testing-data',
+        fake='foo',
+    )
+    urls = dict(
+        sample="https://s3.amazonaws.com/mne-python/datasets/%s",
+        spm='https://s3.amazonaws.com/mne-python/datasets/%s',
+        somato='https://s3.amazonaws.com/mne-python/datasets/%s',
+        brainstorm='https://copy.com/ZTHXXFcuIZycvRoA/brainstorm/%s',
+        testing='https://github.com/mne-tools/mne-testing-data/archive/'
+                'master.tar.gz',
+        fake='https://github.com/mne-tools/mne-testing-data/raw/master/'
+             'datasets/%s',
+    )
+    hashes = dict(
+        sample='f73186795af820428e5e8e779ce5bfcf',
+        spm='3e9e83c642136e5b720e2ecc5dcc3244',
+        somato='f3e3a8441477bb5bacae1d0c6e0964fb',
+        brainstorm=None,
+        testing=None,
+        fake='3194e9f7b46039bb050a74f3e1ae9908',
+    )
+    folder_origs = dict(  # not listed means None
+        testing='mne-testing-data-master',
+    )
+    folder_name = folder_names[name]
+    archive_name = archive_names[name]
+    hash_ = hashes[name]
+    url = urls[name]
+    folder_orig = folder_origs.get(name, None)
+    if '%s' in url:
+        url = url % archive_name
+
+    folder_path = op.join(path, folder_name)
+    if name == 'brainstorm':
+        extract_path = folder_path
+        folder_path = op.join(folder_path, archive_names[name].split('.')[0])
+
+    rm_archive = False
+    martinos_path = '/cluster/fusion/sample_data/' + archive_name
+    neurospin_path = '/neurospin/tmp/gramfort/' + archive_name
+
+    if not op.exists(folder_path) and not download:
+        return ''
+    if not op.exists(folder_path) or force_update:
+        if name == 'brainstorm':
+            answer = input('%sAgree (y/[n])? ' % _bst_license_text)
+            if answer.lower() != 'y':
+                raise RuntimeError('You must agree to the license to use this '
+                                   'dataset')
+        logger.info('Downloading or reinstalling '
+                    'data archive %s at location %s' % (archive_name, path))
+
+        if op.exists(martinos_path):
+            archive_name = martinos_path
+        elif op.exists(neurospin_path):
+            archive_name = neurospin_path
+        else:
+            archive_name = op.join(path, archive_name)
+            rm_archive = True
+            fetch_archive = True
+            if op.exists(archive_name):
+                msg = ('Archive already exists. Overwrite it (y/[n])? ')
+                answer = input(msg)
+                if answer.lower() == 'y':
+                    os.remove(archive_name)
+                else:
+                    fetch_archive = False
+
+            if fetch_archive:
+                _fetch_file(url, archive_name, print_destination=False,
+                            hash_=hash_)
+
+        if op.exists(folder_path):
+            def onerror(func, path, exc_info):
+                """Deal with access errors (e.g. testing dataset read-only)"""
+                # Is the error an access error ?
+                do = False
+                if not os.access(path, os.W_OK):
+                    perm = os.stat(path).st_mode | stat.S_IWUSR
+                    os.chmod(path, perm)
+                    do = True
+                if not os.access(op.dirname(path), os.W_OK):
+                    dir_perm = (os.stat(op.dirname(path)).st_mode |
+                                stat.S_IWUSR)
+                    os.chmod(op.dirname(path), dir_perm)
+                    do = True
+                if do:
+                    func(path)
+                else:
+                    raise
+            shutil.rmtree(folder_path, onerror=onerror)
+
+        logger.info('Decompressing the archive: %s' % archive_name)
+        logger.info('(please be patient, this can take some time)')
+        for ext in ['gz', 'bz2']:  # informed guess (and the only 2 options).
+            try:
+                if name != 'brainstorm':
+                    extract_path = path
+                tf = tarfile.open(archive_name, 'r:%s' % ext)
+                tf.extractall(path=extract_path)
+                tf.close()
+                break
+            except tarfile.ReadError as err:
+                logger.info('%s is %s trying "bz2"' % (archive_name, err))
+        if folder_orig is not None:
+            shutil.move(op.join(path, folder_orig), folder_path)
+
+        if rm_archive:
+            os.remove(archive_name)
+
+    path = _do_path_update(path, update_path, key, name)
+    path = op.join(path, folder_name)
+
+    # compare the version of the dataset and mne
+    data_version = _dataset_version(path, name)
+    try:
+        from distutils.version import LooseVersion as LV
+    except:
+        warn('Could not determine %s dataset version; dataset could\n'
+             'be out of date. Please install the "distutils" package.'
+             % name)
+    else:  # 0.7 < 0.7.git shoud be False, therefore strip
+        if check_version and LV(data_version) < LV(mne_version.strip('.git')):
+            warn('The {name} dataset (version {current}) is older than '
+                 'mne-python (version {newest}). If the examples fail, '
+                 'you may need to update the {name} dataset by using '
+                 'mne.datasets.{name}.data_path(force_update=True)'.format(
+                     name=name, current=data_version, newest=mne_version))
+    return (path, data_version) if return_version else path
+
+
+def _get_version(name):
+    """Helper to get a dataset version"""
+    if not has_dataset(name):
+        return None
+    return _data_path(name=name, return_version=True)[1]
+
+
+def has_dataset(name):
+    """Helper for dataset presence"""
+    endswith = {'sample': 'MNE-sample-data',
+                'spm': 'MNE-spm-face',
+                'somato': 'MNE-somato-data',
+                'testing': 'MNE-testing-data',
+                'fake': 'foo',
+                'brainstorm': 'MNE_brainstorm-data',
+                }[name]
+    archive_name = None
+    if name == 'brainstorm':
+        archive_name = dict(brainstorm='bst_raw')
+    dp = _data_path(download=False, name=name, check_version=False,
+                    archive_name=archive_name)
+    return dp.endswith(endswith)
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/decoding/__init__.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/decoding/__init__.py
new file mode 100644
index 0000000..d0f4e47
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/decoding/__init__.py
@@ -0,0 +1,7 @@
+from .transformer import Scaler, FilterEstimator
+from .transformer import PSDEstimator, EpochsVectorizer, ConcatenateChannels
+from .mixin import TransformerMixin
+from .base import BaseEstimator, LinearModel
+from .csp import CSP
+from .ems import compute_ems
+from .time_gen import GeneralizationAcrossTime, TimeDecoding
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/decoding/base.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/decoding/base.py
new file mode 100644
index 0000000..8f20732
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/decoding/base.py
@@ -0,0 +1,622 @@
+"""Base class copy from sklearn.base"""
+# Authors: Gael Varoquaux <gael.varoquaux at normalesup.org>
+#          Romain Trachel <trachelr at gmail.com>
+#          Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#
+# License: BSD (3-clause)
+
+import inspect
+import warnings
+import six
+import numpy as np
+
+
+class BaseEstimator(object):
+    """Base class for all estimators in scikit-learn
+    Notes
+    -----
+    All estimators should specify all the parameters that can be set
+    at the class level in their ``__init__`` as explicit keyword
+    arguments (no ``*args`` or ``**kwargs``).
+    """
+
+    @classmethod
+    def _get_param_names(cls):
+        """Get parameter names for the estimator"""
+        # fetch the constructor or the original constructor before
+        # deprecation wrapping if any
+        init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
+        if init is object.__init__:
+            # No explicit constructor to introspect
+            return []
+
+        # introspect the constructor arguments to find the model parameters
+        # to represent
+        args, varargs, kw, default = inspect.getargspec(init)
+        if varargs is not None:
+            raise RuntimeError("scikit-learn estimators should always "
+                               "specify their parameters in the signature"
+                               " of their __init__ (no varargs)."
+                               " %s doesn't follow this convention."
+                               % (cls, ))
+        # Remove 'self'
+        # XXX: This is going to fail if the init is a staticmethod, but
+        # who would do this?
+        args.pop(0)
+        args.sort()
+        return args
+
+    def get_params(self, deep=True):
+        """Get parameters for this estimator.
+
+        Parameters
+        ----------
+        deep : boolean, optional
+            If True, will return the parameters for this estimator and
+            contained subobjects that are estimators.
+
+        Returns
+        -------
+        params : mapping of string to any
+            Parameter names mapped to their values.
+        """
+        out = dict()
+        for key in self._get_param_names():
+            # We need deprecation warnings to always be on in order to
+            # catch deprecated param values.
+            # This is set in utils/__init__.py but it gets overwritten
+            # when running under python3 somehow.
+            warnings.simplefilter("always", DeprecationWarning)
+            try:
+                with warnings.catch_warnings(record=True) as w:
+                    value = getattr(self, key, None)
+                if len(w) and w[0].category == DeprecationWarning:
+                    # if the parameter is deprecated, don't show it
+                    continue
+            finally:
+                warnings.filters.pop(0)
+
+            # XXX: should we rather test if instance of estimator?
+            if deep and hasattr(value, 'get_params'):
+                deep_items = value.get_params().items()
+                out.update((key + '__' + k, val) for k, val in deep_items)
+            out[key] = value
+        return out
+
+    def set_params(self, **params):
+        """Set the parameters of this estimator.
+        The method works on simple estimators as well as on nested objects
+        (such as pipelines). The former have parameters of the form
+        ``<component>__<parameter>`` so that it's possible to update each
+        component of a nested object.
+        Returns
+        -------
+        self
+        """
+        if not params:
+            # Simple optimisation to gain speed (inspect is slow)
+            return self
+        valid_params = self.get_params(deep=True)
+        for key, value in six.iteritems(params):
+            split = key.split('__', 1)
+            if len(split) > 1:
+                # nested objects case
+                name, sub_name = split
+                if name not in valid_params:
+                    raise ValueError('Invalid parameter %s for estimator %s. '
+                                     'Check the list of available parameters '
+                                     'with `estimator.get_params().keys()`.' %
+                                     (name, self))
+                sub_object = valid_params[name]
+                sub_object.set_params(**{sub_name: value})
+            else:
+                # simple objects case
+                if key not in valid_params:
+                    raise ValueError('Invalid parameter %s for estimator %s. '
+                                     'Check the list of available parameters '
+                                     'with `estimator.get_params().keys()`.' %
+                                     (key, self.__class__.__name__))
+                setattr(self, key, value)
+        return self
+
+    def __repr__(self):
+        class_name = self.__class__.__name__
+        return '%s(%s)' % (class_name, _pprint(self.get_params(deep=False),
+                                               offset=len(class_name),),)
+
+
+###############################################################################
+def _pprint(params, offset=0, printer=repr):
+    """Pretty print the dictionary 'params'
+
+    Parameters
+    ----------
+    params: dict
+        The dictionary to pretty print
+    offset: int
+        The offset in characters to add at the begin of each line.
+    printer:
+        The function to convert entries to strings, typically
+        the builtin str or repr
+
+    """
+    # Do a multi-line justified repr:
+    options = np.get_printoptions()
+    np.set_printoptions(precision=5, threshold=64, edgeitems=2)
+    params_list = list()
+    this_line_length = offset
+    line_sep = ',\n' + (1 + offset // 2) * ' '
+    for i, (k, v) in enumerate(sorted(six.iteritems(params))):
+        if type(v) is float:
+            # use str for representing floating point numbers
+            # this way we get consistent representation across
+            # architectures and versions.
+            this_repr = '%s=%s' % (k, str(v))
+        else:
+            # use repr of the rest
+            this_repr = '%s=%s' % (k, printer(v))
+        if len(this_repr) > 500:
+            this_repr = this_repr[:300] + '...' + this_repr[-100:]
+        if i > 0:
+            if (this_line_length + len(this_repr) >= 75 or '\n' in this_repr):
+                params_list.append(line_sep)
+                this_line_length = len(line_sep)
+            else:
+                params_list.append(', ')
+                this_line_length += 2
+        params_list.append(this_repr)
+        this_line_length += len(this_repr)
+
+    np.set_printoptions(**options)
+    lines = ''.join(params_list)
+    # Strip trailing space to avoid nightmare in doctests
+    lines = '\n'.join(l.rstrip(' ') for l in lines.split('\n'))
+    return lines
+
+
+class LinearModel(BaseEstimator):
+    """
+    This object clones a Linear Model from scikit-learn
+    and updates the attribute for each fit. The linear model coefficient
+    (filters) are used to extract discriminant neural sources from
+    the measured data. This class implement the computation of patterns
+    which provides neurophysiologically interpretable information [1],
+    in the sense that significant nonzero weights are only observed at channels
+    the activity of which is related to discriminant neural sources.
+
+    Parameters
+    ----------
+    model : object | None
+        A linear model from scikit-learn with a fit method
+        that updates a coef_ attribute.
+        If None the model will be a LogisticRegression
+
+    Attributes
+    ----------
+    filters_ : ndarray
+        If fit, the filters used to decompose the data, else None.
+    patterns_ : ndarray
+        If fit, the patterns used to restore M/EEG signals, else None.
+
+    Notes
+    -----
+    .. versionadded:: 0.10
+
+    See Also
+    --------
+    ICA
+    CSP
+    xDawn
+
+    References
+    ----------
+    [1] Haufe, S., Meinecke, F., Gorgen, K., Dahne, S., Haynes, J.-D.,
+    Blankertz, B., & Biebmann, F. (2014). On the interpretation of
+    weight vectors of linear models in multivariate neuroimaging.
+    NeuroImage, 87, 96-110.
+    """
+    def __init__(self, model=None):
+        if model is None:
+            from sklearn.linear_model import LogisticRegression
+            model = LogisticRegression()
+
+        self.model = model
+        self.patterns_ = None
+        self.filters_ = None
+
+    def fit(self, X, y):
+        """Estimate the coefficient of the linear model.
+        Save the coefficient in the attribute filters_ and
+        computes the attribute patterns_ using [1].
+
+        Parameters
+        ----------
+        X : array, shape (n_epochs, n_features)
+            The data to estimate the coeffiscient.
+        y : array, shape (n_epochs,)
+            The class for each epoch.
+
+        Returns
+        -------
+        self : instance of LinearModel
+            Returns the modified instance.
+
+        References
+        ----------
+        """
+        # fit the Model
+        self.model.fit(X, y)
+        # computes the patterns
+        assert hasattr(self.model, 'coef_'), \
+            "model needs a coef_ attribute to compute the patterns"
+        self.patterns_ = np.dot(X.T, np.dot(X, self.model.coef_.T))
+        self.filters_ = self.model.coef_
+
+        return self
+
+    def transform(self, X, y=None):
+        """Transform the data using the linear model.
+
+        Parameters
+        ----------
+        X : array, shape (n_epochs, n_features)
+            The data to transform.
+        y : array, shape (n_epochs,)
+            The class for each epoch.
+
+        Returns
+        -------
+        y_pred : array, shape (n_epochs,)
+            Predicted class label per epoch.
+
+        """
+        return self.model.transform(X)
+
+    def fit_transform(self, X, y):
+        """fit the data and transform it using the linear model.
+
+        Parameters
+        ----------
+        X : array, shape (n_epochs, n_features)
+            The data to transform.
+        y : array, shape (n_epochs,)
+            The class for each epoch.
+
+        Returns
+        -------
+        y_pred : array, shape (n_epochs,)
+            Predicted class label per epoch.
+
+        """
+        return self.fit(X, y).transform(X)
+
+    def predict(self, X):
+        """Computes prediction of X.
+
+        Parameters
+        ----------
+        X : array, shape (n_epochs, n_features)
+            The data used to compute prediction.
+
+        Returns
+        -------
+        y_pred : array, shape (n_epochs,)
+            The predictions.
+        """
+        return self.model.predict(X)
+
+    def score(self, X, y):
+        """
+        Returns the score of the linear model computed
+        on the given test data.
+
+        Parameters
+        ----------
+        X : array, shape (n_epochs, n_features)
+            The data to transform.
+        y : array, shape (n_epochs,)
+            The class for each epoch.
+
+        Returns
+        -------
+        score : float
+            Score of the linear model
+
+        """
+        return self.model.score(X, y)
+
+    def plot_patterns(self, info, times=None, ch_type=None, layout=None,
+                      vmin=None, vmax=None, cmap='RdBu_r', sensors=True,
+                      colorbar=True, scale=None, scale_time=1e3, unit='a.u.',
+                      res=64, size=1, cbar_fmt='%3.1f',
+                      name_format='%01d ms', proj=False, show=True,
+                      show_names=False, title=None, mask=None,
+                      mask_params=None, outlines='head', contours=6,
+                      image_interp='bilinear', average=None, head_pos=None):
+        """
+        Plot topographic patterns of the linear model.
+        The patterns explain how the measured data was generated
+        from the neural sources (a.k.a. the forward model).
+
+        Parameters
+        ----------
+        info : instance of Info
+            Info dictionary of the epochs used to fit the linear model.
+            If not possible, consider using ``create_info``.
+        times : float | array of floats | None.
+            The time point(s) to plot. If None, the number of ``axes``
+            determines the amount of time point(s). If ``axes`` is also None,
+            10 topographies will be shown with a regular time spacing between
+            the first and last time instant.
+        ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
+            The channel type to plot. For 'grad', the gradiometers are
+            collected in pairs and the RMS for each pair is plotted.
+            If None, then channels are chosen in the order given above.
+        layout : None | Layout
+            Layout instance specifying sensor positions (does not need to be
+            specified for Neuromag data). If possible, the correct layout file
+            is inferred from the data; if no appropriate layout file was found
+            the layout is automatically generated from the sensor locations.
+        vmin : float | callable
+            The value specfying the lower bound of the color range.
+            If None, and vmax is None, -vmax is used. Else np.min(data).
+            If callable, the output equals vmin(data).
+        vmax : float | callable
+            The value specfying the upper bound of the color range.
+            If None, the maximum absolute value is used. If vmin is None,
+            but vmax is not, defaults to np.min(data).
+            If callable, the output equals vmax(data).
+        cmap : matplotlib colormap
+            Colormap. For magnetometers and eeg defaults to 'RdBu_r', else
+            'Reds'.
+        sensors : bool | str
+            Add markers for sensor locations to the plot. Accepts matplotlib
+            plot format string (e.g., 'r+' for red plusses). If True,
+            a circle will be used (via .add_artist). Defaults to True.
+        colorbar : bool
+            Plot a colorbar.
+        scale : dict | float | None
+            Scale the data for plotting. If None, defaults to 1e6 for eeg, 1e13
+            for grad and 1e15 for mag.
+        scale_time : float | None
+            Scale the time labels. Defaults to 1e3.
+        unit : dict | str | None
+            The unit of the channel type used for colorbar label. If
+            scale is None the unit is automatically determined.
+        res : int
+            The resolution of the topomap image (n pixels along each side).
+        size : float
+            Side length per topomap in inches.
+        cbar_fmt : str
+            String format for colorbar values.
+        name_format : str
+            String format for topomap values. Defaults to "%03f ms"
+        proj : bool | 'interactive'
+            If true SSP projections are applied before display.
+            If 'interactive', a check box for reversible selection
+            of SSP projection vectors will be show.
+        show : bool
+            Show figure if True.
+        show_names : bool | callable
+            If True, show channel names on top of the map. If a callable is
+            passed, channel names will be formatted using the callable; e.g.,
+            to delete the prefix 'MEG ' from all channel names, pass the
+            function lambda x: x.replace('MEG ', ''). If `mask` is not None,
+            only significant sensors will be shown.
+        title : str | None
+            Title. If None (default), no title is displayed.
+        mask : ndarray of bool, shape (n_channels, n_times) | None
+            The channels to be marked as significant at a given time point.
+            Indicies set to `True` will be considered. Defaults to None.
+        mask_params : dict | None
+            Additional plotting parameters for plotting significant sensors.
+            Default (None) equals::
+
+                dict(marker='o', markerfacecolor='w', markeredgecolor='k',
+                     linewidth=0, markersize=4)
+
+        outlines : 'head' | 'skirt' | dict | None
+            The outlines to be drawn. If 'head', the default head scheme will
+            be drawn. If 'skirt' the head scheme will be drawn, but sensors are
+            allowed to be plotted outside of the head circle. If dict, each key
+            refers to a tuple of x and y positions, the values in 'mask_pos'
+            will serve as image mask, and the 'autoshrink' (bool) field will
+            trigger automated shrinking of the positions due to points outside
+            the outline. Alternatively, a matplotlib patch object can be passed
+            for advanced masking options, either directly or as a function that
+            returns patches (required for multi-axis plots). If None, nothing
+            will be drawn. Defaults to 'head'.
+        contours : int | False | None
+            The number of contour lines to draw.
+            If 0, no contours will be drawn.
+        image_interp : str
+            The image interpolation to be used.
+            All matplotlib options are accepted.
+        average : float | None
+            The time window around a given time to be used for averaging
+            (seconds). For example, 0.01 would translate into window that
+            starts 5 ms before and ends 5 ms after a given time point.
+            Defaults to None, which means no averaging.
+        head_pos : dict | None
+            If None (default), the sensors are positioned such that they span
+            the head circle. If dict, can have entries 'center' (tuple) and
+            'scale' (tuple) for what the center and scale of the head
+            should be relative to the electrode locations.
+
+        Returns
+        -------
+        fig : instance of matplotlib.figure.Figure
+           The figure.
+        """
+
+        from .. import EvokedArray
+
+        if times is None:
+            tmin = 0
+        else:
+            tmin = times[0]
+
+        # create an evoked
+        patterns = EvokedArray(self.patterns_.reshape(info['nchan'], -1),
+                               info, tmin=tmin)
+        # the call plot_topomap
+        return patterns.plot_topomap(times=times, ch_type=ch_type,
+                                     layout=layout, vmin=vmin, vmax=vmax,
+                                     cmap=cmap, colorbar=colorbar, res=res,
+                                     cbar_fmt=cbar_fmt, sensors=sensors,
+                                     scale=scale, scale_time=scale_time,
+                                     time_format=name_format, size=size,
+                                     show_names=show_names, unit=unit,
+                                     mask_params=mask_params,
+                                     mask=mask, outlines=outlines,
+                                     contours=contours, title=title,
+                                     image_interp=image_interp, show=show,
+                                     head_pos=head_pos)
+
+    def plot_filters(self, info, times=None, ch_type=None, layout=None,
+                     vmin=None, vmax=None, cmap='RdBu_r', sensors=True,
+                     colorbar=True, scale=None, scale_time=1e3, unit='a.u.',
+                     res=64, size=1, cbar_fmt='%3.1f',
+                     name_format='%01d ms', proj=False, show=True,
+                     show_names=False, title=None, mask=None,
+                     mask_params=None, outlines='head', contours=6,
+                     image_interp='bilinear', average=None, head_pos=None):
+        """
+        Plot topographic filters of the linear model.
+        The filters are used to extract discriminant neural sources from
+        the measured data (a.k.a. the backward model).
+
+        Parameters
+        ----------
+        info : instance of Info
+            Info dictionary of the epochs used to fit the linear model.
+            If not possible, consider using ``create_info``.
+        times : float | array of floats | None.
+            The time point(s) to plot. If None, the number of ``axes``
+            determines the amount of time point(s). If ``axes`` is also None,
+            10 topographies will be shown with a regular time spacing between
+            the first and last time instant.
+        ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
+            The channel type to plot. For 'grad', the gradiometers are
+            collected in pairs and the RMS for each pair is plotted.
+            If None, then channels are chosen in the order given above.
+        layout : None | Layout
+            Layout instance specifying sensor positions (does not need to be
+            specified for Neuromag data). If possible, the correct layout file
+            is inferred from the data; if no appropriate layout file was found
+            the layout is automatically generated from the sensor locations.
+        vmin : float | callable
+            The value specfying the lower bound of the color range.
+            If None, and vmax is None, -vmax is used. Else np.min(data).
+            If callable, the output equals vmin(data).
+        vmax : float | callable
+            The value specfying the upper bound of the color range.
+            If None, the maximum absolute value is used. If vmin is None,
+            but vmax is not, defaults to np.min(data).
+            If callable, the output equals vmax(data).
+        cmap : matplotlib colormap
+            Colormap. For magnetometers and eeg defaults to 'RdBu_r', else
+            'Reds'.
+        sensors : bool | str
+            Add markers for sensor locations to the plot. Accepts matplotlib
+            plot format string (e.g., 'r+' for red plusses). If True,
+            a circle will be used (via .add_artist). Defaults to True.
+        colorbar : bool
+            Plot a colorbar.
+        scale : dict | float | None
+            Scale the data for plotting. If None, defaults to 1e6 for eeg, 1e13
+            for grad and 1e15 for mag.
+        scale_time : float | None
+            Scale the time labels. Defaults to 1e3.
+        unit : dict | str | None
+            The unit of the channel type used for colorbar label. If
+            scale is None the unit is automatically determined.
+        res : int
+            The resolution of the topomap image (n pixels along each side).
+        size : float
+            Side length per topomap in inches.
+        cbar_fmt : str
+            String format for colorbar values.
+        name_format : str
+            String format for topomap values. Defaults to "%03f ms"
+        proj : bool | 'interactive'
+            If true SSP projections are applied before display.
+            If 'interactive', a check box for reversible selection
+            of SSP projection vectors will be show.
+        show : bool
+            Show figure if True.
+        show_names : bool | callable
+            If True, show channel names on top of the map. If a callable is
+            passed, channel names will be formatted using the callable; e.g.,
+            to delete the prefix 'MEG ' from all channel names, pass the
+            function lambda x: x.replace('MEG ', ''). If `mask` is not None,
+            only significant sensors will be shown.
+        title : str | None
+            Title. If None (default), no title is displayed.
+        mask : ndarray of bool, shape (n_channels, n_times) | None
+            The channels to be marked as significant at a given time point.
+            Indicies set to `True` will be considered. Defaults to None.
+        mask_params : dict | None
+            Additional plotting parameters for plotting significant sensors.
+            Default (None) equals::
+
+                dict(marker='o', markerfacecolor='w', markeredgecolor='k',
+                     linewidth=0, markersize=4)
+
+        outlines : 'head' | 'skirt' | dict | None
+            The outlines to be drawn. If 'head', the default head scheme will
+            be drawn. If 'skirt' the head scheme will be drawn, but sensors are
+            allowed to be plotted outside of the head circle. If dict, each key
+            refers to a tuple of x and y positions, the values in 'mask_pos'
+            will serve as image mask, and the 'autoshrink' (bool) field will
+            trigger automated shrinking of the positions due to points outside
+            the outline. Alternatively, a matplotlib patch object can be passed
+            for advanced masking options, either directly or as a function that
+            returns patches (required for multi-axis plots). If None, nothing
+            will be drawn. Defaults to 'head'.
+        contours : int | False | None
+            The number of contour lines to draw.
+            If 0, no contours will be drawn.
+        image_interp : str
+            The image interpolation to be used.
+            All matplotlib options are accepted.
+        average : float | None
+            The time window around a given time to be used for averaging
+            (seconds). For example, 0.01 would translate into window that
+            starts 5 ms before and ends 5 ms after a given time point.
+            Defaults to None, which means no averaging.
+        head_pos : dict | None
+            If None (default), the sensors are positioned such that they span
+            the head circle. If dict, can have entries 'center' (tuple) and
+            'scale' (tuple) for what the center and scale of the head
+            should be relative to the electrode locations.
+
+        Returns
+        -------
+        fig : instance of matplotlib.figure.Figure
+           The figure.
+        """
+
+        from .. import EvokedArray
+
+        if times is None:
+            tmin = 0
+        else:
+            tmin = times[0]
+
+        # create an evoked
+        filters = EvokedArray(self.filters_.T.reshape(info['nchan'], -1),
+                              info, tmin=tmin)
+        # the call plot_topomap
+        return filters.plot_topomap(times=times, ch_type=ch_type,
+                                    layout=layout, vmin=vmin, vmax=vmax,
+                                    cmap=cmap, colorbar=colorbar, res=res,
+                                    cbar_fmt=cbar_fmt, sensors=sensors,
+                                    scale=scale, scale_time=scale_time,
+                                    time_format=name_format, size=size,
+                                    show_names=show_names, unit=unit,
+                                    mask_params=mask_params,
+                                    mask=mask, outlines=outlines,
+                                    contours=contours, title=title,
+                                    image_interp=image_interp, show=show,
+                                    head_pos=head_pos)
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/decoding/csp.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/decoding/csp.py
new file mode 100644
index 0000000..0ff5eaa
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/decoding/csp.py
@@ -0,0 +1,467 @@
+# Authors: Romain Trachel <trachelr at gmail.com>
+#          Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Alexandre Barachant <alexandre.barachant at gmail.com>
+#
+# License: BSD (3-clause)
+
+import copy as cp
+import warnings
+
+import numpy as np
+from scipy import linalg
+
+from .mixin import TransformerMixin
+from ..cov import _regularized_covariance
+
+
+class CSP(TransformerMixin):
+    """M/EEG signal decomposition using the Common Spatial Patterns (CSP).
+
+    This object can be used as a supervised decomposition to estimate
+    spatial filters for feature extraction in a 2 class decoding problem.
+    See [1].
+
+    Parameters
+    ----------
+    n_components : int (default 4)
+        The number of components to decompose M/EEG signals.
+        This number should be set by cross-validation.
+    reg : float | str | None (default None)
+        if not None, allow regularization for covariance estimation
+        if float, shrinkage covariance is used (0 <= shrinkage <= 1).
+        if str, optimal shrinkage using Ledoit-Wolf Shrinkage ('ledoit_wolf')
+        or Oracle Approximating Shrinkage ('oas').
+    log : bool (default True)
+        If true, apply log to standardize the features.
+        If false, features are just z-scored.
+
+    Attributes
+    ----------
+    filters_ : ndarray, shape (n_channels, n_channels)
+        If fit, the CSP components used to decompose the data, else None.
+    patterns_ : ndarray, shape (n_channels, n_channels)
+        If fit, the CSP patterns used to restore M/EEG signals, else None.
+    mean_ : ndarray, shape (n_channels,)
+        If fit, the mean squared power for each component.
+    std_ : ndarray, shape (n_channels,)
+        If fit, the std squared power for each component.
+
+    References
+    ----------
+    [1] Zoltan J. Koles. The quantitative extraction and topographic mapping
+    of the abnormal components in the clinical EEG. Electroencephalography
+    and Clinical Neurophysiology, 79(6):440--447, December 1991.
+    """
+
+    def __init__(self, n_components=4, reg=None, log=True):
+        """Init of CSP."""
+        self.n_components = n_components
+        if reg == 'lws':
+            warnings.warn('`lws` has been deprecated for the `reg`'
+                          ' argument. It will be removed in 0.11.'
+                          ' Use `ledoit_wolf` instead.', DeprecationWarning)
+            reg = 'ledoit_wolf'
+        self.reg = reg
+        self.log = log
+        self.filters_ = None
+        self.patterns_ = None
+        self.mean_ = None
+        self.std_ = None
+
+    def fit(self, epochs_data, y):
+        """Estimate the CSP decomposition on epochs.
+
+        Parameters
+        ----------
+        epochs_data : ndarray, shape (n_epochs, n_channels, n_times)
+            The data to estimate the CSP on.
+        y : array, shape (n_epochs,)
+            The class for each epoch.
+
+        Returns
+        -------
+        self : instance of CSP
+            Returns the modified instance.
+        """
+        if not isinstance(epochs_data, np.ndarray):
+            raise ValueError("epochs_data should be of type ndarray (got %s)."
+                             % type(epochs_data))
+        epochs_data = np.atleast_3d(epochs_data)
+        # check number of epochs
+        if epochs_data.shape[0] != len(y):
+            raise ValueError("n_epochs must be the same for epochs_data and y")
+        classes = np.unique(y)
+        if len(classes) != 2:
+            raise ValueError("More than two different classes in the data.")
+        # concatenate epochs
+        class_1 = np.transpose(epochs_data[y == classes[0]],
+                               [1, 0, 2]).reshape(epochs_data.shape[1], -1)
+        class_2 = np.transpose(epochs_data[y == classes[1]],
+                               [1, 0, 2]).reshape(epochs_data.shape[1], -1)
+
+        cov_1 = _regularized_covariance(class_1, reg=self.reg)
+        cov_2 = _regularized_covariance(class_2, reg=self.reg)
+
+        # then fit on covariance
+        self._fit(cov_1, cov_2)
+
+        pick_filters = self.filters_[:self.n_components]
+        X = np.asarray([np.dot(pick_filters, e) for e in epochs_data])
+
+        # compute features (mean band power)
+        X = (X ** 2).mean(axis=-1)
+
+        # To standardize features
+        self.mean_ = X.mean(axis=0)
+        self.std_ = X.std(axis=0)
+
+        return self
+
+    def _fit(self, cov_a, cov_b):
+        """Aux Function (modifies cov_a and cov_b in-place)."""
+        cov_a /= np.trace(cov_a)
+        cov_b /= np.trace(cov_b)
+        # computes the eigen values
+        lambda_, u = linalg.eigh(cov_a + cov_b)
+        # sort them
+        ind = np.argsort(lambda_)[::-1]
+        lambda2_ = lambda_[ind]
+
+        u = u[:, ind]
+        p = np.dot(np.sqrt(linalg.pinv(np.diag(lambda2_))), u.T)
+
+        # Compute the generalized eigen value problem
+        w_a = np.dot(np.dot(p, cov_a), p.T)
+        w_b = np.dot(np.dot(p, cov_b), p.T)
+        # and solve it
+        vals, vecs = linalg.eigh(w_a, w_b)
+        # sort vectors by discriminative power using eigen values
+        ind = np.argsort(np.maximum(vals, 1. / vals))[::-1]
+        vecs = vecs[:, ind]
+        # and project
+        w = np.dot(vecs.T, p)
+
+        self.filters_ = w
+        self.patterns_ = linalg.pinv(w).T
+
+    def transform(self, epochs_data, y=None):
+        """Estimate epochs sources given the CSP filters.
+
+        Parameters
+        ----------
+        epochs_data : array, shape (n_epochs, n_channels, n_times)
+            The data.
+        y : None
+            Not used.
+
+        Returns
+        -------
+        X : ndarray of shape (n_epochs, n_sources)
+            The CSP features averaged over time.
+        """
+        if not isinstance(epochs_data, np.ndarray):
+            raise ValueError("epochs_data should be of type ndarray (got %s)."
+                             % type(epochs_data))
+        if self.filters_ is None:
+            raise RuntimeError('No filters available. Please first fit CSP '
+                               'decomposition.')
+
+        pick_filters = self.filters_[:self.n_components]
+        X = np.asarray([np.dot(pick_filters, e) for e in epochs_data])
+
+        # compute features (mean band power)
+        X = (X ** 2).mean(axis=-1)
+        if self.log:
+            X = np.log(X)
+        else:
+            X -= self.mean_
+            X /= self.std_
+        return X
+
+    def plot_patterns(self, info, components=None, ch_type=None, layout=None,
+                      vmin=None, vmax=None, cmap='RdBu_r', sensors=True,
+                      colorbar=True, scale=None, scale_time=1, unit=None,
+                      res=64, size=1, cbar_fmt='%3.1f',
+                      name_format='CSP%01d', proj=False, show=True,
+                      show_names=False, title=None, mask=None,
+                      mask_params=None, outlines='head', contours=6,
+                      image_interp='bilinear', average=None, head_pos=None):
+        """Plot topographic patterns of CSP components.
+
+        The CSP patterns explain how the measured data was generated
+        from the neural sources (a.k.a. the forward model).
+
+        Parameters
+        ----------
+        info : instance of Info
+            Info dictionary of the epochs used to fit CSP.
+            If not possible, consider using ``create_info``.
+        components : float | array of floats | None.
+           The CSP patterns to plot. If None, n_components will be shown.
+        ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
+            The channel type to plot. For 'grad', the gradiometers are
+            collected in pairs and the RMS for each pair is plotted.
+            If None, then channels are chosen in the order given above.
+        layout : None | Layout
+            Layout instance specifying sensor positions (does not need to be
+            specified for Neuromag data). If possible, the correct layout file
+            is inferred from the data; if no appropriate layout file was found
+            the layout is automatically generated from the sensor locations.
+        vmin : float | callable
+            The value specfying the lower bound of the color range.
+            If None, and vmax is None, -vmax is used. Else np.min(data).
+            If callable, the output equals vmin(data).
+        vmax : float | callable
+            The value specfying the upper bound of the color range.
+            If None, the maximum absolute value is used. If vmin is None,
+            but vmax is not, defaults to np.min(data).
+            If callable, the output equals vmax(data).
+        cmap : matplotlib colormap
+            Colormap. For magnetometers and eeg defaults to 'RdBu_r', else
+            'Reds'.
+        sensors : bool | str
+            Add markers for sensor locations to the plot. Accepts matplotlib
+            plot format string (e.g., 'r+' for red plusses). If True,
+            a circle will be used (via .add_artist). Defaults to True.
+        colorbar : bool
+            Plot a colorbar.
+        scale : dict | float | None
+            Scale the data for plotting. If None, defaults to 1e6 for eeg, 1e13
+            for grad and 1e15 for mag.
+        scale_time : float | None
+            Scale the time labels. Defaults to 1.
+        unit : dict | str | None
+            The unit of the channel type used for colorbar label. If
+            scale is None the unit is automatically determined.
+        res : int
+            The resolution of the topomap image (n pixels along each side).
+        size : float
+            Side length per topomap in inches.
+        cbar_fmt : str
+            String format for colorbar values.
+        name_format : str
+            String format for topomap values. Defaults to "CSP%01d"
+        proj : bool | 'interactive'
+            If true SSP projections are applied before display.
+            If 'interactive', a check box for reversible selection
+            of SSP projection vectors will be show.
+        show : bool
+            Show figure if True.
+        show_names : bool | callable
+            If True, show channel names on top of the map. If a callable is
+            passed, channel names will be formatted using the callable; e.g.,
+            to delete the prefix 'MEG ' from all channel names, pass the
+            function lambda x: x.replace('MEG ', ''). If `mask` is not None,
+            only significant sensors will be shown.
+        title : str | None
+            Title. If None (default), no title is displayed.
+        mask : ndarray of bool, shape (n_channels, n_times) | None
+            The channels to be marked as significant at a given time point.
+            Indicies set to `True` will be considered. Defaults to None.
+        mask_params : dict | None
+            Additional plotting parameters for plotting significant sensors.
+            Default (None) equals::
+
+                dict(marker='o', markerfacecolor='w', markeredgecolor='k',
+                     linewidth=0, markersize=4)
+
+        outlines : 'head' | 'skirt' | dict | None
+            The outlines to be drawn. If 'head', the default head scheme will
+            be drawn. If 'skirt' the head scheme will be drawn, but sensors are
+            allowed to be plotted outside of the head circle. If dict, each key
+            refers to a tuple of x and y positions, the values in 'mask_pos'
+            will serve as image mask, and the 'autoshrink' (bool) field will
+            trigger automated shrinking of the positions due to points outside
+            the outline. Alternatively, a matplotlib patch object can be passed
+            for advanced masking options, either directly or as a function that
+            returns patches (required for multi-axis plots). If None, nothing
+            will be drawn. Defaults to 'head'.
+        contours : int | False | None
+            The number of contour lines to draw.
+            If 0, no contours will be drawn.
+        image_interp : str
+            The image interpolation to be used.
+            All matplotlib options are accepted.
+        average : float | None
+            The time window around a given time to be used for averaging
+            (seconds). For example, 0.01 would translate into window that
+            starts 5 ms before and ends 5 ms after a given time point.
+            Defaults to None, which means no averaging.
+        head_pos : dict | None
+            If None (default), the sensors are positioned such that they span
+            the head circle. If dict, can have entries 'center' (tuple) and
+            'scale' (tuple) for what the center and scale of the head
+            should be relative to the electrode locations.
+
+        Returns
+        -------
+        fig : instance of matplotlib.figure.Figure
+           The figure.
+        """
+
+        from .. import EvokedArray
+        if components is None:
+            components = np.arange(self.n_components)
+
+        # set sampling frequency to have 1 component per time point
+        info = cp.deepcopy(info)
+        info['sfreq'] = 1.
+        # create an evoked
+        patterns = EvokedArray(self.patterns_.T, info, tmin=0)
+        # the call plot_topomap
+        return patterns.plot_topomap(times=components, ch_type=ch_type,
+                                     layout=layout, vmin=vmin, vmax=vmax,
+                                     cmap=cmap, colorbar=colorbar, res=res,
+                                     cbar_fmt=cbar_fmt, sensors=sensors,
+                                     scale=1, scale_time=1, unit='a.u.',
+                                     time_format=name_format, size=size,
+                                     show_names=show_names,
+                                     mask_params=mask_params,
+                                     mask=mask, outlines=outlines,
+                                     contours=contours,
+                                     image_interp=image_interp, show=show,
+                                     head_pos=head_pos)
+
+    def plot_filters(self, info, components=None, ch_type=None, layout=None,
+                     vmin=None, vmax=None, cmap='RdBu_r', sensors=True,
+                     colorbar=True, scale=None, scale_time=1, unit=None,
+                     res=64, size=1, cbar_fmt='%3.1f',
+                     name_format='CSP%01d', proj=False, show=True,
+                     show_names=False, title=None, mask=None,
+                     mask_params=None, outlines='head', contours=6,
+                     image_interp='bilinear', average=None, head_pos=None):
+        """Plot topographic filters of CSP components.
+
+        The CSP filters are used to extract discriminant neural sources from
+        the measured data (a.k.a. the backward model).
+
+        Parameters
+        ----------
+        info : instance of Info
+            Info dictionary of the epochs used to fit CSP.
+            If not possible, consider using ``create_info``.
+        components : float | array of floats | None.
+           The CSP patterns to plot. If None, n_components will be shown.
+        ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
+            The channel type to plot. For 'grad', the gradiometers are
+            collected in pairs and the RMS for each pair is plotted.
+            If None, then channels are chosen in the order given above.
+        layout : None | Layout
+            Layout instance specifying sensor positions (does not need to be
+            specified for Neuromag data). If possible, the correct layout file
+            is inferred from the data; if no appropriate layout file was found
+            the layout is automatically generated from the sensor locations.
+        vmin : float | callable
+            The value specfying the lower bound of the color range.
+            If None, and vmax is None, -vmax is used. Else np.min(data).
+            If callable, the output equals vmin(data).
+        vmax : float | callable
+            The value specfying the upper bound of the color range.
+            If None, the maximum absolute value is used. If vmin is None,
+            but vmax is not, defaults to np.min(data).
+            If callable, the output equals vmax(data).
+        cmap : matplotlib colormap
+            Colormap. For magnetometers and eeg defaults to 'RdBu_r', else
+            'Reds'.
+        sensors : bool | str
+            Add markers for sensor locations to the plot. Accepts matplotlib
+            plot format string (e.g., 'r+' for red plusses). If True,
+            a circle will be used (via .add_artist). Defaults to True.
+        colorbar : bool
+            Plot a colorbar.
+        scale : dict | float | None
+            Scale the data for plotting. If None, defaults to 1e6 for eeg, 1e13
+            for grad and 1e15 for mag.
+        scale_time : float | None
+            Scale the time labels. Defaults to 1.
+        unit : dict | str | None
+            The unit of the channel type used for colorbar label. If
+            scale is None the unit is automatically determined.
+        res : int
+            The resolution of the topomap image (n pixels along each side).
+        size : float
+            Side length per topomap in inches.
+        cbar_fmt : str
+            String format for colorbar values.
+        name_format : str
+            String format for topomap values. Defaults to "CSP%01d"
+        proj : bool | 'interactive'
+            If true SSP projections are applied before display.
+            If 'interactive', a check box for reversible selection
+            of SSP projection vectors will be show.
+        show : bool
+            Show figure if True.
+        show_names : bool | callable
+            If True, show channel names on top of the map. If a callable is
+            passed, channel names will be formatted using the callable; e.g.,
+            to delete the prefix 'MEG ' from all channel names, pass the
+            function lambda x: x.replace('MEG ', ''). If `mask` is not None,
+            only significant sensors will be shown.
+        title : str | None
+            Title. If None (default), no title is displayed.
+        mask : ndarray of bool, shape (n_channels, n_times) | None
+            The channels to be marked as significant at a given time point.
+            Indicies set to `True` will be considered. Defaults to None.
+        mask_params : dict | None
+            Additional plotting parameters for plotting significant sensors.
+            Default (None) equals::
+
+                dict(marker='o', markerfacecolor='w', markeredgecolor='k',
+                     linewidth=0, markersize=4)
+
+        outlines : 'head' | 'skirt' | dict | None
+            The outlines to be drawn. If 'head', the default head scheme will
+            be drawn. If 'skirt' the head scheme will be drawn, but sensors are
+            allowed to be plotted outside of the head circle. If dict, each key
+            refers to a tuple of x and y positions, the values in 'mask_pos'
+            will serve as image mask, and the 'autoshrink' (bool) field will
+            trigger automated shrinking of the positions due to points outside
+            the outline. Alternatively, a matplotlib patch object can be passed
+            for advanced masking options, either directly or as a function that
+            returns patches (required for multi-axis plots). If None, nothing
+            will be drawn. Defaults to 'head'.
+        contours : int | False | None
+            The number of contour lines to draw.
+            If 0, no contours will be drawn.
+        image_interp : str
+            The image interpolation to be used.
+            All matplotlib options are accepted.
+        average : float | None
+            The time window around a given time to be used for averaging
+            (seconds). For example, 0.01 would translate into window that
+            starts 5 ms before and ends 5 ms after a given time point.
+            Defaults to None, which means no averaging.
+        head_pos : dict | None
+            If None (default), the sensors are positioned such that they span
+            the head circle. If dict, can have entries 'center' (tuple) and
+            'scale' (tuple) for what the center and scale of the head
+            should be relative to the electrode locations.
+
+        Returns
+        -------
+        fig : instance of matplotlib.figure.Figure
+           The figure.
+        """
+
+        from .. import EvokedArray
+        if components is None:
+            components = np.arange(self.n_components)
+
+        # set sampling frequency to have 1 component per time point
+        info = cp.deepcopy(info)
+        info['sfreq'] = 1.
+        # create an evoked
+        filters = EvokedArray(self.filters_, info, tmin=0)
+        # the call plot_topomap
+        return filters.plot_topomap(times=components, ch_type=ch_type,
+                                    layout=layout, vmin=vmin, vmax=vmax,
+                                    cmap=cmap, colorbar=colorbar, res=res,
+                                    cbar_fmt=cbar_fmt, sensors=sensors,
+                                    scale=1, scale_time=1, unit='a.u.',
+                                    time_format=name_format, size=size,
+                                    show_names=show_names,
+                                    mask_params=mask_params,
+                                    mask=mask, outlines=outlines,
+                                    contours=contours,
+                                    image_interp=image_interp, show=show,
+                                    head_pos=head_pos)
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/decoding/ems.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/decoding/ems.py
new file mode 100644
index 0000000..d41cdbc
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/decoding/ems.py
@@ -0,0 +1,117 @@
+# Author: Denis Engemann <denis.engemann at gmail.com>
+#         Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#
+# License: BSD (3-clause)
+
+import numpy as np
+
+from ..utils import logger, verbose
+from ..fixes import Counter
+from ..parallel import parallel_func
+from .. import pick_types, pick_info
+
+
+ at verbose
+def compute_ems(epochs, conditions=None, picks=None, n_jobs=1, verbose=None):
+    """Compute event-matched spatial filter on epochs
+
+    This version operates on the entire time course. No time window needs to
+    be specified. The result is a spatial filter at each time point and a
+    corresponding time course. Intuitively, the result gives the similarity
+    between the filter at each time point and the data vector (sensors) at
+    that time point.
+
+    References
+    ----------
+    [1] Aaron Schurger, Sebastien Marti, and Stanislas Dehaene, "Reducing
+        multi-sensor data to a single time course that reveals experimental
+        effects", BMC Neuroscience 2013, 14:122
+
+    Parameters
+    ----------
+    epochs : instance of mne.Epochs
+        The epochs.
+    conditions : list of str | None
+        If a list of strings, strings must match the
+        epochs.event_id's key as well as the number of conditions supported
+        by the objective_function. If None keys in epochs.event_id are used.
+    picks : array-like of int | None
+        Channels to be included. If None only good data channels are used.
+        Defaults to None
+    n_jobs : int
+        Number of jobs to run in parallel.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+        Defaults to self.verbose.
+
+    Returns
+    -------
+    surrogate_trials : ndarray, shape (trials, n_trials, n_time_points)
+        The trial surrogates.
+    mean_spatial_filter : ndarray, shape (n_channels, n_times)
+        The set of spatial filters.
+    conditions : ndarray, shape (n_epochs,)
+        The conditions used. Values correspond to original event ids.
+    """
+    logger.info('...computing surrogate time series. This can take some time')
+    if picks is None:
+        picks = pick_types(epochs.info, meg=True, eeg=True)
+
+    if not len(set(Counter(epochs.events[:, 2]).values())) == 1:
+        raise ValueError('The same number of epochs is required by '
+                         'this function. Please consider '
+                         '`epochs.equalize_event_counts`')
+
+    if conditions is None:
+        conditions = epochs.event_id.keys()
+        epochs = epochs.copy()
+    else:
+        epochs = epochs[conditions]
+
+    epochs.drop_bad_epochs()
+
+    if len(conditions) != 2:
+        raise ValueError('Currently this function expects exactly 2 '
+                         'conditions but you gave me %i' %
+                         len(conditions))
+
+    ev = epochs.events[:, 2]
+    # special care to avoid path dependant mappings and orders
+    conditions = list(sorted(conditions))
+    cond_idx = [np.where(ev == epochs.event_id[k])[0] for k in conditions]
+
+    info = pick_info(epochs.info, picks)
+    data = epochs.get_data()[:, picks]
+
+    # Scale (z-score) the data by channel type
+    for ch_type in ['mag', 'grad', 'eeg']:
+        if ch_type in epochs:
+            if ch_type == 'eeg':
+                this_picks = pick_types(info, meg=False, eeg=True)
+            else:
+                this_picks = pick_types(info, meg=ch_type, eeg=False)
+            data[:, this_picks] /= np.std(data[:, this_picks])
+
+    from sklearn.cross_validation import LeaveOneOut
+
+    parallel, p_func, _ = parallel_func(_run_ems, n_jobs=n_jobs)
+    out = parallel(p_func(_ems_diff, data, cond_idx, train, test)
+                   for train, test in LeaveOneOut(len(data)))
+
+    surrogate_trials, spatial_filter = zip(*out)
+    surrogate_trials = np.array(surrogate_trials)
+    spatial_filter = np.mean(spatial_filter, axis=0)
+
+    return surrogate_trials, spatial_filter, epochs.events[:, 2]
+
+
+def _ems_diff(data0, data1):
+    """default diff objective function"""
+    return np.mean(data0, axis=0) - np.mean(data1, axis=0)
+
+
+def _run_ems(objective_function, data, cond_idx, train, test):
+    d = objective_function(*(data[np.intersect1d(c, train)] for c in cond_idx))
+    d /= np.sqrt(np.sum(d ** 2, axis=0))[None, :]
+    # compute surrogates
+    return np.sum(data[test[0]] * d, axis=0), d
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/decoding/mixin.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/decoding/mixin.py
new file mode 100644
index 0000000..2f16db8
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/decoding/mixin.py
@@ -0,0 +1,30 @@
+class TransformerMixin(object):
+    """Mixin class for all transformers in scikit-learn"""
+
+    def fit_transform(self, X, y=None, **fit_params):
+        """Fit to data, then transform it
+
+        Fits transformer to X and y with optional parameters fit_params
+        and returns a transformed version of X.
+
+        Parameters
+        ----------
+        X : numpy array of shape [n_samples, n_features]
+            Training set.
+
+        y : numpy array of shape [n_samples]
+            Target values.
+
+        Returns
+        -------
+        X_new : numpy array of shape [n_samples, n_features_new]
+            Transformed array.
+        """
+        # non-optimized default implementation; override when a better
+        # method is possible for a given clustering algorithm
+        if y is None:
+            # fit method of arity 1 (unsupervised transformation)
+            return self.fit(X, **fit_params).transform(X)
+        else:
+            # fit method of arity 2 (supervised transformation)
+            return self.fit(X, y, **fit_params).transform(X)
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/decoding/tests/__init__.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/decoding/tests/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/decoding/tests/test_csp.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/decoding/tests/test_csp.py
new file mode 100644
index 0000000..6478567
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/decoding/tests/test_csp.py
@@ -0,0 +1,108 @@
+# Author: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#         Romain Trachel <trachelr at gmail.com>
+#
+# License: BSD (3-clause)
+
+import os.path as op
+
+from nose.tools import assert_true, assert_raises
+import numpy as np
+from numpy.testing import assert_array_almost_equal
+
+from mne import io, Epochs, read_events, pick_types
+from mne.decoding.csp import CSP
+from mne.utils import requires_sklearn, slow_test
+
+data_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
+raw_fname = op.join(data_dir, 'test_raw.fif')
+event_name = op.join(data_dir, 'test-eve.fif')
+
+tmin, tmax = -0.2, 0.5
+event_id = dict(aud_l=1, vis_l=3)
+# if stop is too small pca may fail in some cases, but we're okay on this file
+start, stop = 0, 8
+
+
+ at slow_test
+def test_csp():
+    """Test Common Spatial Patterns algorithm on epochs
+    """
+    raw = io.Raw(raw_fname, preload=False)
+    events = read_events(event_name)
+    picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
+                       eog=False, exclude='bads')
+    picks = picks[2:9:3]
+    epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                    baseline=(None, 0), preload=True)
+    epochs_data = epochs.get_data()
+    n_channels = epochs_data.shape[1]
+
+    n_components = 3
+    csp = CSP(n_components=n_components)
+
+    csp.fit(epochs_data, epochs.events[:, -1])
+
+    y = epochs.events[:, -1]
+    X = csp.fit_transform(epochs_data, y)
+    assert_true(csp.filters_.shape == (n_channels, n_channels))
+    assert_true(csp.patterns_.shape == (n_channels, n_channels))
+    assert_array_almost_equal(csp.fit(epochs_data, y).transform(epochs_data),
+                              X)
+
+    # test init exception
+    assert_raises(ValueError, csp.fit, epochs_data,
+                  np.zeros_like(epochs.events))
+    assert_raises(ValueError, csp.fit, epochs, y)
+    assert_raises(ValueError, csp.transform, epochs, y)
+
+    csp.n_components = n_components
+    sources = csp.transform(epochs_data)
+    assert_true(sources.shape[1] == n_components)
+
+    epochs.pick_types(meg='mag', copy=False)
+
+    # test plot patterns
+    components = np.arange(n_components)
+    csp.plot_patterns(epochs.info, components=components, res=12,
+                      show=False)
+
+    # test plot filters
+    csp.plot_filters(epochs.info, components=components, res=12,
+                     show=False)
+
+
+ at requires_sklearn
+def test_regularized_csp():
+    """Test Common Spatial Patterns algorithm using regularized covariance
+    """
+    raw = io.Raw(raw_fname, preload=False)
+    events = read_events(event_name)
+    picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
+                       eog=False, exclude='bads')
+    picks = picks[1:13:3]
+    epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                    baseline=(None, 0), preload=True)
+    epochs_data = epochs.get_data()
+    n_channels = epochs_data.shape[1]
+
+    n_components = 3
+    reg_cov = [None, 0.05, 'ledoit_wolf', 'oas']
+    for reg in reg_cov:
+        csp = CSP(n_components=n_components, reg=reg)
+        csp.fit(epochs_data, epochs.events[:, -1])
+        y = epochs.events[:, -1]
+        X = csp.fit_transform(epochs_data, y)
+        assert_true(csp.filters_.shape == (n_channels, n_channels))
+        assert_true(csp.patterns_.shape == (n_channels, n_channels))
+        assert_array_almost_equal(csp.fit(epochs_data, y).
+                                  transform(epochs_data), X)
+
+        # test init exception
+        assert_raises(ValueError, csp.fit, epochs_data,
+                      np.zeros_like(epochs.events))
+        assert_raises(ValueError, csp.fit, epochs, y)
+        assert_raises(ValueError, csp.transform, epochs, y)
+
+        csp.n_components = n_components
+        sources = csp.transform(epochs_data)
+        assert_true(sources.shape[1] == n_components)
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/decoding/tests/test_ems.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/decoding/tests/test_ems.py
new file mode 100644
index 0000000..e3abce6
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/decoding/tests/test_ems.py
@@ -0,0 +1,56 @@
+# Author: Denis A. Engemann <d.engemann at gmail.com>
+#
+# License: BSD (3-clause)
+
+import os.path as op
+
+from nose.tools import assert_equal, assert_raises
+
+from mne import io, Epochs, read_events, pick_types
+from mne.utils import requires_sklearn
+from mne.decoding import compute_ems
+
+data_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
+curdir = op.join(op.dirname(__file__))
+
+raw_fname = op.join(data_dir, 'test_raw.fif')
+event_name = op.join(data_dir, 'test-eve.fif')
+
+tmin, tmax = -0.2, 0.5
+event_id = dict(aud_l=1, vis_l=3)
+
+
+ at requires_sklearn
+def test_ems():
+    """Test event-matched spatial filters"""
+    raw = io.Raw(raw_fname, preload=False)
+
+    # create unequal number of events
+    events = read_events(event_name)
+    events[-2, 2] = 3
+    picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
+                       eog=False, exclude='bads')
+    picks = picks[1:13:3]
+    epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                    baseline=(None, 0), preload=True)
+    assert_raises(ValueError, compute_ems, epochs, ['aud_l', 'vis_l'])
+    epochs.equalize_event_counts(epochs.event_id, copy=False)
+
+    assert_raises(KeyError, compute_ems, epochs, ['blah', 'hahah'])
+    surrogates, filters, conditions = compute_ems(epochs)
+    assert_equal(list(set(conditions)), [1, 3])
+
+    events = read_events(event_name)
+    event_id2 = dict(aud_l=1, aud_r=2, vis_l=3)
+    epochs = Epochs(raw, events, event_id2, tmin, tmax, picks=picks,
+                    baseline=(None, 0), preload=True)
+    epochs.equalize_event_counts(epochs.event_id, copy=False)
+
+    n_expected = sum([len(epochs[k]) for k in ['aud_l', 'vis_l']])
+
+    assert_raises(ValueError, compute_ems, epochs)
+    surrogates, filters, conditions = compute_ems(epochs, ['aud_r', 'vis_l'])
+    assert_equal(n_expected, len(surrogates))
+    assert_equal(n_expected, len(conditions))
+    assert_equal(list(set(conditions)), [2, 3])
+    raw.close()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/decoding/tests/test_time_gen.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/decoding/tests/test_time_gen.py
new file mode 100644
index 0000000..4fe1b0c
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/decoding/tests/test_time_gen.py
@@ -0,0 +1,309 @@
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Jean-Remi King <jeanremi.king at gmail.com>
+#
+# License: BSD (3-clause)
+import warnings
+import copy
+import os.path as op
+
+from nose.tools import assert_equal, assert_true, assert_raises
+import numpy as np
+from numpy.testing import assert_array_equal
+
+from mne import io, Epochs, read_events, pick_types
+from mne.utils import (requires_sklearn, requires_sklearn_0_15, slow_test,
+                       run_tests_if_main)
+from mne.decoding import GeneralizationAcrossTime, TimeDecoding
+
+
+data_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
+raw_fname = op.join(data_dir, 'test_raw.fif')
+event_name = op.join(data_dir, 'test-eve.fif')
+
+tmin, tmax = -0.2, 0.5
+event_id = dict(aud_l=1, vis_l=3)
+event_id_gen = dict(aud_l=2, vis_l=4)
+
+
+def make_epochs():
+    raw = io.Raw(raw_fname, preload=False)
+    events = read_events(event_name)
+    picks = pick_types(raw.info, meg='mag', stim=False, ecg=False,
+                       eog=False, exclude='bads')
+    picks = picks[0:2]
+    decim = 30
+
+    # Test on time generalization within one condition
+    with warnings.catch_warnings(record=True):
+        epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                        baseline=(None, 0), preload=True, decim=decim)
+    return epochs
+
+
+ at slow_test
+ at requires_sklearn_0_15
+def test_generalization_across_time():
+    """Test time generalization decoding
+    """
+    from sklearn.svm import SVC
+    from sklearn.linear_model import RANSACRegressor, LinearRegression
+    from sklearn.preprocessing import LabelEncoder
+    from sklearn.metrics import mean_squared_error
+    from sklearn.cross_validation import LeaveOneLabelOut
+
+    epochs = make_epochs()
+
+    # Test default running
+    gat = GeneralizationAcrossTime(picks='foo')
+    assert_equal("<GAT | no fit, no prediction, no score>", "%s" % gat)
+    assert_raises(ValueError, gat.fit, epochs)
+    with warnings.catch_warnings(record=True):
+        # check classic fit + check manual picks
+        gat.picks = [0]
+        gat.fit(epochs)
+        # check optional y as array
+        gat.picks = None
+        gat.fit(epochs, y=epochs.events[:, 2])
+        # check optional y as list
+        gat.fit(epochs, y=epochs.events[:, 2].tolist())
+    assert_equal(len(gat.picks_), len(gat.ch_names), 1)
+    assert_equal("<GAT | fitted, start : -0.200 (s), stop : 0.499 (s), no "
+                 "prediction, no score>", '%s' % gat)
+    assert_equal(gat.ch_names, epochs.ch_names)
+    gat.predict(epochs)
+    assert_equal("<GAT | fitted, start : -0.200 (s), stop : 0.499 (s), "
+                 "predicted 14 epochs, no score>",
+                 "%s" % gat)
+    gat.score(epochs)
+    gat.score(epochs, y=epochs.events[:, 2])
+    gat.score(epochs, y=epochs.events[:, 2].tolist())
+    assert_equal("<GAT | fitted, start : -0.200 (s), stop : 0.499 (s), "
+                 "predicted 14 epochs,\n scored "
+                 "(accuracy_score)>", "%s" % gat)
+    with warnings.catch_warnings(record=True):
+        gat.fit(epochs, y=epochs.events[:, 2])
+
+    old_mode = gat.predict_mode
+    gat.predict_mode = 'super-foo-mode'
+    assert_raises(ValueError, gat.predict, epochs)
+    gat.predict_mode = old_mode
+
+    gat.score(epochs, y=epochs.events[:, 2])
+    assert_true("accuracy_score" in '%s' % gat.scorer_)
+    epochs2 = epochs.copy()
+
+    # check _DecodingTime class
+    assert_equal("<DecodingTime | start: -0.200 (s), stop: 0.499 (s), step: "
+                 "0.047 (s), length: 0.047 (s), n_time_windows: 15>",
+                 "%s" % gat.train_times_)
+    assert_equal("<DecodingTime | start: -0.200 (s), stop: 0.499 (s), step: "
+                 "0.047 (s), length: 0.047 (s), n_time_windows: 15 x 15>",
+                 "%s" % gat.test_times_)
+
+    # the y-check
+    gat.predict_mode = 'mean-prediction'
+    epochs2.events[:, 2] += 10
+    gat_ = copy.deepcopy(gat)
+    assert_raises(ValueError, gat_.score, epochs2)
+    gat.predict_mode = 'cross-validation'
+
+    # Test basics
+    # --- number of trials
+    assert_true(gat.y_train_.shape[0] ==
+                gat.y_true_.shape[0] ==
+                len(gat.y_pred_[0][0]) == 14)
+    # ---  number of folds
+    assert_true(np.shape(gat.estimators_)[1] == gat.cv)
+    # ---  length training size
+    assert_true(len(gat.train_times_['slices']) == 15 ==
+                np.shape(gat.estimators_)[0])
+    # ---  length testing sizes
+    assert_true(len(gat.test_times_['slices']) == 15 ==
+                np.shape(gat.scores_)[0])
+    assert_true(len(gat.test_times_['slices'][0]) == 15 ==
+                np.shape(gat.scores_)[1])
+
+    # Test longer time window
+    gat = GeneralizationAcrossTime(train_times={'length': .100})
+    with warnings.catch_warnings(record=True):
+        gat2 = gat.fit(epochs)
+    assert_true(gat is gat2)  # return self
+    assert_true(hasattr(gat2, 'cv_'))
+    assert_true(gat2.cv_ != gat.cv)
+    scores = gat.score(epochs)
+    assert_true(isinstance(scores, list))  # type check
+    assert_equal(len(scores[0]), len(scores))  # shape check
+
+    assert_equal(len(gat.test_times_['slices'][0][0]), 2)
+    # Decim training steps
+    gat = GeneralizationAcrossTime(train_times={'step': .100})
+    with warnings.catch_warnings(record=True):
+        gat.fit(epochs)
+
+    gat.score(epochs)
+    assert_true(len(gat.scores_) == len(gat.estimators_) == 8)  # training time
+    assert_equal(len(gat.scores_[0]), 15)  # testing time
+
+    # Test start stop training & test cv without n_fold params
+    y_4classes = np.hstack((epochs.events[:7, 2], epochs.events[7:, 2] + 1))
+    gat = GeneralizationAcrossTime(cv=LeaveOneLabelOut(y_4classes),
+                                   train_times={'start': 0.090, 'stop': 0.250})
+    # predict without fit
+    assert_raises(RuntimeError, gat.predict, epochs)
+    with warnings.catch_warnings(record=True):
+        gat.fit(epochs, y=y_4classes)
+    gat.score(epochs)
+    assert_equal(len(gat.scores_), 4)
+    assert_equal(gat.train_times_['times'][0], epochs.times[6])
+    assert_equal(gat.train_times_['times'][-1], epochs.times[9])
+
+    # Test score without passing epochs & Test diagonal decoding
+    gat = GeneralizationAcrossTime(test_times='diagonal')
+    with warnings.catch_warnings(record=True):
+        gat.fit(epochs)
+    assert_raises(RuntimeError, gat.score)
+    gat.predict(epochs)
+    scores = gat.score()
+    assert_true(scores is gat.scores_)
+    assert_equal(np.shape(gat.scores_), (15, 1))
+    assert_array_equal([tim for ttime in gat.test_times_['times']
+                        for tim in ttime], gat.train_times_['times'])
+
+    # Test generalization across conditions
+    gat = GeneralizationAcrossTime(predict_mode='mean-prediction')
+    with warnings.catch_warnings(record=True):
+        gat.fit(epochs[0:6])
+    gat.predict(epochs[7:])
+    gat.score(epochs[7:])
+
+    # Test training time parameters
+    gat_ = copy.deepcopy(gat)
+    # --- start stop outside time range
+    gat_.train_times = dict(start=-999.)
+    assert_raises(ValueError, gat_.fit, epochs)
+    gat_.train_times = dict(start=999.)
+    assert_raises(ValueError, gat_.fit, epochs)
+    # --- impossible slices
+    gat_.train_times = dict(step=.000001)
+    assert_raises(ValueError, gat_.fit, epochs)
+    gat_.train_times = dict(length=.000001)
+    assert_raises(ValueError, gat_.fit, epochs)
+    gat_.train_times = dict(length=999.)
+    assert_raises(ValueError, gat_.fit, epochs)
+
+    # Test testing time parameters
+    # --- outside time range
+    gat.test_times = dict(start=-999.)
+    assert_raises(ValueError, gat.predict, epochs)
+    gat.test_times = dict(start=999.)
+    assert_raises(ValueError, gat.predict, epochs)
+    # --- impossible slices
+    gat.test_times = dict(step=.000001)
+    assert_raises(ValueError, gat.predict, epochs)
+    gat_ = copy.deepcopy(gat)
+    gat_.train_times_['length'] = .000001
+    gat_.test_times = dict(length=.000001)
+    assert_raises(ValueError, gat_.predict, epochs)
+    # --- test time region of interest
+    gat.test_times = dict(step=.150)
+    gat.predict(epochs)
+    assert_array_equal(np.shape(gat.y_pred_), (15, 5, 14, 1))
+    # --- silly value
+    gat.test_times = 'foo'
+    assert_raises(ValueError, gat.predict, epochs)
+    assert_raises(RuntimeError, gat.score)
+    # --- unmatched length between training and testing time
+    gat.test_times = dict(length=.150)
+    assert_raises(ValueError, gat.predict, epochs)
+
+    svc = SVC(C=1, kernel='linear', probability=True)
+    gat = GeneralizationAcrossTime(clf=svc, predict_mode='mean-prediction')
+    with warnings.catch_warnings(record=True):
+        gat.fit(epochs)
+
+    # sklearn needs it: c.f.
+    # https://github.com/scikit-learn/scikit-learn/issues/2723
+    # and http://bit.ly/1u7t8UT
+    assert_raises(ValueError, gat.score, epochs2)
+    gat.score(epochs)
+    scores = sum(scores, [])  # flatten
+    assert_true(0.0 <= np.min(scores) <= 1.0)
+    assert_true(0.0 <= np.max(scores) <= 1.0)
+
+    # Test that gets error if train on one dataset, test on another, and don't
+    # specify appropriate cv:
+    gat = GeneralizationAcrossTime()
+    with warnings.catch_warnings(record=True):
+        gat.fit(epochs)
+
+    gat.predict(epochs)
+    assert_raises(ValueError, gat.predict, epochs[:10])
+
+    # Check that still works with classifier that output y_pred with
+    # shape = (n_trials, 1) instead of (n_trials,)
+    gat = GeneralizationAcrossTime(clf=RANSACRegressor(LinearRegression()),
+                                   cv=2)
+    epochs.crop(None, epochs.times[2])
+    gat.fit(epochs)
+    gat.predict(epochs)
+
+    # Test combinations of complex scenarios
+    # 2 or more distinct classes
+    n_classes = [2, 4]  # 4 tested
+    # nicely ordered labels or not
+    le = LabelEncoder()
+    y = le.fit_transform(epochs.events[:, 2])
+    y[len(y) // 2:] += 2
+    ys = (y, y + 1000)
+    # Univariate and multivariate prediction
+    svc = SVC(C=1, kernel='linear')
+
+    class SVC_proba(SVC):
+        def predict(self, x):
+            probas = super(SVC_proba, self).predict_proba(x)
+            return probas[:, 0]
+
+    svcp = SVC_proba(C=1, kernel='linear', probability=True)
+    clfs = [svc, svcp]
+    scorers = [None, mean_squared_error]
+    # Test all combinations
+    for clf, scorer in zip(clfs, scorers):
+        for y in ys:
+            for n_class in n_classes:
+                y_ = y % n_class
+                with warnings.catch_warnings(record=True):
+                    gat = GeneralizationAcrossTime(cv=2, clf=clf,
+                                                   scorer=scorer)
+                    gat.fit(epochs, y=y_)
+                    gat.score(epochs, y=y_)
+
+
+ at requires_sklearn
+def test_decoding_time():
+    """Test TimeDecoding
+    """
+    epochs = make_epochs()
+    tg = TimeDecoding()
+    assert_equal("<TimeDecoding | no fit, no prediction, no score>", '%s' % tg)
+    assert_true(hasattr(tg, 'times'))
+    assert_true(not hasattr(tg, 'train_times'))
+    assert_true(not hasattr(tg, 'test_times'))
+    tg.fit(epochs)
+    assert_equal("<TimeDecoding | fitted, start : -0.200 (s), stop : 0.499 "
+                 "(s), no prediction, no score>", '%s' % tg)
+    assert_true(not hasattr(tg, 'train_times_'))
+    assert_true(not hasattr(tg, 'test_times_'))
+    assert_raises(RuntimeError, tg.score, epochs=None)
+    tg.predict(epochs)
+    assert_equal("<TimeDecoding | fitted, start : -0.200 (s), stop : 0.499 "
+                 "(s), predicted 14 epochs, no score>",
+                 '%s' % tg)
+    assert_array_equal(np.shape(tg.y_pred_), [15, 14, 1])
+    tg.score(epochs)
+    tg.score()
+    assert_array_equal(np.shape(tg.scores_), [15])
+    assert_equal("<TimeDecoding | fitted, start : -0.200 (s), stop : 0.499 "
+                 "(s), predicted 14 epochs,\n scored (accuracy_score)>",
+                 '%s' % tg)
+
+run_tests_if_main()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/decoding/tests/test_transformer.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/decoding/tests/test_transformer.py
new file mode 100644
index 0000000..87b862c
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/decoding/tests/test_transformer.py
@@ -0,0 +1,162 @@
+# Author: Mainak Jas <mainak at neuro.hut.fi>
+#         Romain Trachel <trachelr at gmail.com>
+#
+# License: BSD (3-clause)
+
+import warnings
+import os.path as op
+import numpy as np
+
+from nose.tools import assert_true, assert_raises
+from numpy.testing import assert_array_equal
+
+from mne import io, read_events, Epochs, pick_types
+from mne.decoding import Scaler, FilterEstimator
+from mne.decoding import PSDEstimator, EpochsVectorizer
+
+warnings.simplefilter('always')  # enable b/c these tests throw warnings
+
+tmin, tmax = -0.2, 0.5
+event_id = dict(aud_l=1, vis_l=3)
+start, stop = 0, 8
+
+data_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
+raw_fname = op.join(data_dir, 'test_raw.fif')
+event_name = op.join(data_dir, 'test-eve.fif')
+
+
+def test_scaler():
+    """Test methods of Scaler
+    """
+    raw = io.Raw(raw_fname, preload=False)
+    events = read_events(event_name)
+    picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
+                       eog=False, exclude='bads')
+    picks = picks[1:13:3]
+
+    epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                    baseline=(None, 0), preload=True)
+    epochs_data = epochs.get_data()
+    scaler = Scaler(epochs.info)
+    y = epochs.events[:, -1]
+
+    # np invalid divide value warnings
+    with warnings.catch_warnings(record=True):
+        X = scaler.fit_transform(epochs_data, y)
+        assert_true(X.shape == epochs_data.shape)
+        X2 = scaler.fit(epochs_data, y).transform(epochs_data)
+
+    assert_array_equal(X2, X)
+
+    # Test inverse_transform
+    with warnings.catch_warnings(record=True):  # invalid value in mult
+        Xi = scaler.inverse_transform(X, y)
+    assert_array_equal(epochs_data, Xi)
+
+    # Test init exception
+    assert_raises(ValueError, scaler.fit, epochs, y)
+    assert_raises(ValueError, scaler.transform, epochs, y)
+
+
+def test_filterestimator():
+    """Test methods of FilterEstimator
+    """
+    raw = io.Raw(raw_fname, preload=False)
+    events = read_events(event_name)
+    picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
+                       eog=False, exclude='bads')
+    picks = picks[1:13:3]
+    epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                    baseline=(None, 0), preload=True)
+    epochs_data = epochs.get_data()
+
+    # Add tests for different combinations of l_freq and h_freq
+    filt = FilterEstimator(epochs.info, l_freq=1, h_freq=40)
+    y = epochs.events[:, -1]
+    with warnings.catch_warnings(record=True):  # stop freq attenuation warning
+        X = filt.fit_transform(epochs_data, y)
+        assert_true(X.shape == epochs_data.shape)
+        assert_array_equal(filt.fit(epochs_data, y).transform(epochs_data), X)
+
+    filt = FilterEstimator(epochs.info, l_freq=0, h_freq=40)
+    y = epochs.events[:, -1]
+    with warnings.catch_warnings(record=True):  # stop freq attenuation warning
+        X = filt.fit_transform(epochs_data, y)
+
+    filt = FilterEstimator(epochs.info, l_freq=1, h_freq=1)
+    y = epochs.events[:, -1]
+    with warnings.catch_warnings(record=True):  # stop freq attenuation warning
+        assert_raises(ValueError, filt.fit_transform, epochs_data, y)
+
+    filt = FilterEstimator(epochs.info, l_freq=1, h_freq=None)
+    with warnings.catch_warnings(record=True):  # stop freq attenuation warning
+        X = filt.fit_transform(epochs_data, y)
+
+    # Test init exception
+    assert_raises(ValueError, filt.fit, epochs, y)
+    assert_raises(ValueError, filt.transform, epochs, y)
+
+
+def test_psdestimator():
+    """Test methods of PSDEstimator
+    """
+    raw = io.Raw(raw_fname, preload=False)
+    events = read_events(event_name)
+    picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
+                       eog=False, exclude='bads')
+    picks = picks[1:13:3]
+    epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                    baseline=(None, 0), preload=True)
+    epochs_data = epochs.get_data()
+    psd = PSDEstimator(2 * np.pi, 0, np.inf)
+    y = epochs.events[:, -1]
+    X = psd.fit_transform(epochs_data, y)
+
+    assert_true(X.shape[0] == epochs_data.shape[0])
+    assert_array_equal(psd.fit(epochs_data, y).transform(epochs_data), X)
+
+    # Test init exception
+    assert_raises(ValueError, psd.fit, epochs, y)
+    assert_raises(ValueError, psd.transform, epochs, y)
+
+
+def test_epochs_vectorizer():
+    """Test methods of EpochsVectorizer
+    """
+    raw = io.Raw(raw_fname, preload=False)
+    events = read_events(event_name)
+    picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
+                       eog=False, exclude='bads')
+    picks = picks[1:13:3]
+    with warnings.catch_warnings(record=True):
+        epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                        baseline=(None, 0), preload=True)
+    epochs_data = epochs.get_data()
+    vector = EpochsVectorizer(epochs.info)
+    y = epochs.events[:, -1]
+    X = vector.fit_transform(epochs_data, y)
+
+    # Check data dimensions
+    assert_true(X.shape[0] == epochs_data.shape[0])
+    assert_true(X.shape[1] == epochs_data.shape[1] * epochs_data.shape[2])
+
+    assert_array_equal(vector.fit(epochs_data, y).transform(epochs_data), X)
+
+    # Check if data is preserved
+    n_times = epochs_data.shape[2]
+    assert_array_equal(epochs_data[0, 0, 0:n_times], X[0, 0:n_times])
+
+    # Check inverse transform
+    Xi = vector.inverse_transform(X, y)
+    assert_true(Xi.shape[0] == epochs_data.shape[0])
+    assert_true(Xi.shape[1] == epochs_data.shape[1])
+    assert_array_equal(epochs_data[0, 0, 0:n_times], Xi[0, 0, 0:n_times])
+
+    # check if inverse transform works with different number of epochs
+    Xi = vector.inverse_transform(epochs_data[0], y)
+    assert_true(Xi.shape[1] == epochs_data.shape[1])
+    assert_true(Xi.shape[2] == epochs_data.shape[2])
+
+    # Test init exception
+    assert_raises(ValueError, vector.fit, epochs, y)
+    assert_raises(ValueError, vector.transform, epochs, y)
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/decoding/time_gen.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/decoding/time_gen.py
new file mode 100644
index 0000000..5431653
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/decoding/time_gen.py
@@ -0,0 +1,1287 @@
+# Authors: Jean-Remi King <jeanremi.king at gmail.com>
+#          Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Denis Engemann <denis.engemann at gmail.com>
+#          Clement Moutard <clement.moutard at gmail.com>
+#
+# License: BSD (3-clause)
+
+import numpy as np
+import copy
+
+from ..io.pick import pick_types
+from ..viz.decoding import plot_gat_matrix, plot_gat_times
+from ..parallel import parallel_func, check_n_jobs
+
+
+class _DecodingTime(dict):
+    """A dictionary to configure the training times that has the following keys:
+
+    'slices' : ndarray, shape (n_clfs,)
+        Array of time slices (in indices) used for each classifier.
+        If not given, computed from 'start', 'stop', 'length', 'step'.
+    'start' : float
+        Time at which to start decoding (in seconds).
+        Defaults to min(epochs.times).
+    'stop' : float
+        Maximal time at which to stop decoding (in seconds).
+        Defaults to max(times).
+    'step' : float
+        Duration separating the start of subsequent classifiers (in
+        seconds). Defaults to one time sample.
+    'length' : float
+        Duration of each classifier (in seconds). Defaults to one time sample.
+    If None, empty dict. """
+
+    def __repr__(self):
+        s = ""
+        if "start" in self:
+            s += "start: %0.3f (s)" % (self["start"])
+        if "stop" in self:
+            s += ", stop: %0.3f (s)" % (self["stop"])
+        if "step" in self:
+            s += ", step: %0.3f (s)" % (self["step"])
+        if "length" in self:
+            s += ", length: %0.3f (s)" % (self["length"])
+        if "slices" in self:
+            # identify depth: training times only contains n_time but
+            # testing_times can contain n_times or n_times * m_times
+            depth = [len(ii) for ii in self["slices"]]
+            if len(np.unique(depth)) == 1:  # if all slices have same depth
+                if depth[0] == 1:  # if depth is one
+                    s += ", n_time_windows: %s" % (len(depth))
+                else:
+                    s += ", n_time_windows: %s x %s" % (len(depth), depth[0])
+            else:
+                s += (", n_time_windows: %s x [%s, %s]" %
+                      (len(depth),
+                       min([len(ii) for ii in depth]),
+                       max(([len(ii) for ii in depth]))))
+        return "<DecodingTime | %s>" % s
+
+
+class _GeneralizationAcrossTime(object):
+    """ see GeneralizationAcrossTime
+    """  # noqa
+    def __init__(self, picks=None, cv=5, clf=None, train_times=None,
+                 test_times=None, predict_mode='cross-validation', scorer=None,
+                 n_jobs=1):
+
+        from sklearn.preprocessing import StandardScaler
+        from sklearn.linear_model import LogisticRegression
+        from sklearn.pipeline import Pipeline
+
+        # Store parameters in object
+        self.cv = cv
+        # Define training sliding window
+        self.train_times = (_DecodingTime() if train_times is None
+                            else _DecodingTime(train_times))
+        # Define testing sliding window. If None, will be set in predict()
+        if test_times is None:
+            self.test_times = _DecodingTime()
+        elif test_times == 'diagonal':
+            self.test_times = 'diagonal'
+        else:
+            self.test_times = _DecodingTime(test_times)
+
+        # Default classification pipeline
+        if clf is None:
+            scaler = StandardScaler()
+            estimator = LogisticRegression()
+            clf = Pipeline([('scaler', scaler), ('estimator', estimator)])
+        self.clf = clf
+        self.predict_mode = predict_mode
+        self.scorer = scorer
+        self.picks = picks
+        self.n_jobs = n_jobs
+
+    def fit(self, epochs, y=None):
+        """ Train a classifier on each specified time slice.
+
+        Note. This function sets the ``picks_``, ``ch_names``, ``cv_``,
+        ``y_train``, ``train_times_`` and ``estimators_`` attributes.
+
+        Parameters
+        ----------
+        epochs : instance of Epochs
+            The epochs.
+        y : list or ndarray of int, shape (n_samples,) or None, optional
+            To-be-fitted model values. If None, y = epochs.events[:, 2].
+
+        Returns
+        -------
+        self : GeneralizationAcrossTime
+            Returns fitted GeneralizationAcrossTime object.
+
+        Notes
+        ------
+        If X and y are not C-ordered and contiguous arrays of np.float64 and
+        X is not a scipy.sparse.csr_matrix, X and/or y may be copied.
+
+        If X is a dense array, then the other methods will not support sparse
+        matrices as input.
+        """
+        from sklearn.base import clone
+        from sklearn.cross_validation import check_cv, StratifiedKFold
+
+        # clean attributes
+        for att in ['picks_', 'ch_names', 'y_train_', 'cv_', 'train_times_',
+                    'estimators_', 'test_times_', 'y_pred_', 'y_true_',
+                    'scores_', 'scorer_']:
+            if hasattr(self, att):
+                delattr(self, att)
+
+        n_jobs = self.n_jobs
+        # Extract data from MNE structure
+        X, y, self.picks_ = _check_epochs_input(epochs, y, self.picks)
+        self.ch_names = [epochs.ch_names[p] for p in self.picks_]
+
+        cv = self.cv
+        if isinstance(cv, (int, np.int)):
+            cv = StratifiedKFold(y, cv)
+        cv = check_cv(cv, X, y, classifier=True)
+        self.cv_ = cv  # update CV
+
+        self.y_train_ = y
+
+        # Cross validation scheme
+        # XXX Cross validation should later be transformed into a make_cv, and
+        # defined in __init__
+        self.train_times_ = copy.deepcopy(self.train_times)
+        if 'slices' not in self.train_times_:
+            self.train_times_ = _sliding_window(epochs.times, self.train_times)
+
+        # Parallel across training time
+        # TODO: JRK: Chunking times points needs to be simplified
+        parallel, p_time_gen, n_jobs = parallel_func(_fit_slices, n_jobs)
+        n_chunks = min(len(self.train_times_['slices']), n_jobs)
+        splits = np.array_split(self.train_times_['slices'], n_chunks)
+
+        def f(x):
+            return np.unique(np.concatenate(x))
+
+        out = parallel(p_time_gen(clone(self.clf),
+                                  X[..., f(train_slices_chunk)],
+                                  y, train_slices_chunk, cv)
+                       for train_slices_chunk in splits)
+        # Unpack estimators into time slices X folds list of lists.
+        self.estimators_ = sum(out, list())
+        return self
+
+    def predict(self, epochs):
+        """ Test each classifier on each specified testing time slice.
+
+        .. note:: This function sets the ``y_pred_`` and ``test_times_``
+                  attributes.
+
+        Parameters
+        ----------
+        epochs : instance of Epochs
+            The epochs. Can be similar to fitted epochs or not. See
+            predict_mode parameter.
+
+        Returns
+        -------
+        y_pred : list of lists of arrays of floats, shape (n_train_t, n_test_t, n_epochs, n_prediction_dims)
+            The single-trial predictions at each training time and each testing
+            time. Note that the number of testing times per training time need
+            not be regular; else
+            ``np.shape(y_pred_) = (n_train_time, n_test_time, n_epochs)``.
+        """  # noqa
+
+        # Check that at least one classifier has been trained
+        if not hasattr(self, 'estimators_'):
+            raise RuntimeError('Please fit models before trying to predict')
+
+        # clean attributes
+        for att in ['y_pred_', 'test_times_', 'scores_', 'scorer_', 'y_true_']:
+            if hasattr(self, att):
+                delattr(self, att)
+
+        n_jobs = self.n_jobs
+
+        X, y, _ = _check_epochs_input(epochs, None, self.picks_)
+
+        # Define testing sliding window
+        if self.test_times == 'diagonal':
+            test_times = _DecodingTime()
+            test_times['slices'] = [[s] for s in self.train_times_['slices']]
+            test_times['times'] = [[s] for s in self.train_times_['times']]
+        elif isinstance(self.test_times, dict):
+            test_times = copy.deepcopy(self.test_times)
+        else:
+            raise ValueError('`test_times` must be a dict or "diagonal"')
+
+        if 'slices' not in test_times:
+            # Check that same number of time sample in testing than in training
+            # (otherwise it won 't be the same number of features')
+            if 'length' not in test_times:
+                test_times['length'] = self.train_times_['length']
+            if test_times['length'] != self.train_times_['length']:
+                raise ValueError('`train_times` and `test_times` must have '
+                                 'identical `length` keys')
+            # Make a sliding window for each training time.
+            slices_list = list()
+            times_list = list()
+            for t in range(0, len(self.train_times_['slices'])):
+                test_times_ = _sliding_window(epochs.times, test_times)
+                times_list += [test_times_['times']]
+                slices_list += [test_times_['slices']]
+            test_times = test_times_
+            test_times['slices'] = slices_list
+            test_times['times'] = times_list
+
+        # Store all testing times parameters
+        self.test_times_ = test_times
+
+        # Prepare parallel predictions across time points
+        # FIXME Note that this means that TimeDecoding.predict isn't parallel
+        parallel, p_time_gen, n_jobs = parallel_func(_predict_slices, n_jobs)
+        n_test_slice = max([len(sl) for sl in self.train_times_['slices']])
+        # Loop across estimators (i.e. training times)
+        n_chunks = min(n_test_slice, n_jobs)
+        splits = [np.array_split(slices, n_chunks)
+                  for slices in self.test_times_['slices']]
+        splits = map(list, zip(*splits))
+
+        def chunk_X(X, slices):
+            """Smart chunking to avoid memory overload"""
+            # from object array to list
+            slices = [sl for sl in slices if len(sl)]
+            start = np.min(slices)
+            stop = np.max(slices) + 1
+            slices_ = np.array(slices) - start
+            X_ = X[:, :, start:stop]
+            return (X_, self.estimators_, self.cv_, slices_.tolist(),
+                    self.predict_mode)
+
+        y_pred = parallel(p_time_gen(*chunk_X(X, slices))
+                          for slices in splits)
+
+        # concatenate chunks across test time dimension. Don't use
+        # np.concatenate as this would need new memory allocations
+        self.y_pred_ = [[test for chunk in train for test in chunk]
+                        for train in map(list, zip(*y_pred))]
+        return self.y_pred_
+
+    def score(self, epochs=None, y=None):
+        """Score Epochs
+
+        Estimate scores across trials by comparing the prediction estimated for
+        each trial to its true value.
+
+        Calls ``predict()`` if it has not been already.
+
+        Note. The function updates the ``scorer_``, ``scores_``, and
+        ``y_true_`` attributes.
+
+        Parameters
+        ----------
+        epochs : instance of Epochs | None, optional
+            The epochs. Can be similar to fitted epochs or not.
+            If None, it needs to rely on the predictions ``y_pred_``
+            generated with ``predict()``.
+        y : list | ndarray, shape (n_epochs,) | None, optional
+            True values to be compared with the predictions ``y_pred_``
+            generated with ``predict()`` via ``scorer_``.
+            If None and ``predict_mode``=='cross-validation' y = ``y_train_``.
+
+        Returns
+        -------
+        scores : list of lists of float
+            The scores estimated by ``scorer_`` at each training time and each
+            testing time (e.g. mean accuracy of ``predict(X)``). Note that the
+            number of testing times per training time need not be regular;
+            else, np.shape(scores) = (n_train_time, n_test_time).
+        """
+        from sklearn.metrics import accuracy_score
+
+        # Run predictions if not already done
+        if epochs is not None:
+            self.predict(epochs)
+        else:
+            if not hasattr(self, 'y_pred_'):
+                raise RuntimeError('Please predict() epochs first or pass '
+                                   'epochs to score()')
+
+        # clean gat.score() attributes
+        for att in ['scores_', 'scorer_', 'y_true_']:
+            if hasattr(self, att):
+                delattr(self, att)
+
+        # Check scorer
+        # XXX Need API to identify proper scorer from the clf
+        self.scorer_ = accuracy_score if self.scorer is None else self.scorer
+
+        # If no regressor is passed, use default epochs events
+        if y is None:
+            if self.predict_mode == 'cross-validation':
+                y = self.y_train_
+            else:
+                if epochs is not None:
+                    y = epochs.events[:, 2]
+                else:
+                    raise RuntimeError('y is undefined because '
+                                       'predict_mode="mean-prediction" and '
+                                       'epochs are missing. You need to '
+                                       'explicitly specify y.')
+            if not np.all(np.unique(y) == np.unique(self.y_train_)):
+                raise ValueError('Classes (y) passed differ from classes used '
+                                 'for training. Please explicitly pass your y '
+                                 'for scoring.')
+        elif isinstance(y, list):
+            y = np.array(y)
+        self.y_true_ = y  # to be compared with y_pred for scoring
+
+        # Preprocessing for parallelization
+        n_jobs = min(len(self.y_pred_[0][0]), check_n_jobs(self.n_jobs))
+        parallel, p_time_gen, n_jobs = parallel_func(_score_slices, n_jobs)
+        n_estimators = len(self.train_times_['slices'])
+        n_chunks = min(n_estimators, n_jobs)
+        splits = np.array_split(range(len(self.train_times_['slices'])),
+                                n_chunks)
+        scores = parallel(
+            p_time_gen(self.y_true_,
+                       [self.y_pred_[train] for train in split],
+                       self.scorer_)
+            for split in splits)
+
+        self.scores_ = [score for chunk in scores for score in chunk]
+        return self.scores_
+
+
+def _predict_slices(X, estimators, cv, slices, predict_mode):
+    """Aux function of GeneralizationAcrossTime that loops across chunks of
+    testing slices.
+    """
+    out = list()
+    for this_estimator, this_slice in zip(estimators, slices):
+        out.append(_predict_time_loop(X, this_estimator, cv, this_slice,
+                                      predict_mode))
+    return out
+
+
+def _predict_time_loop(X, estimators, cv, slices, predict_mode):
+    """Aux function of GeneralizationAcrossTime
+
+    Run classifiers predictions loop across time samples.
+
+    Parameters
+    ----------
+    X : ndarray, shape (n_epochs, n_features, n_times)
+        To-be-fitted data.
+    estimators : array-like, shape (n_times, n_folds)
+        Array of scikit-learn classifiers fitted in cross-validation.
+    slices : list
+        List of slices selecting data from X from which is prediction is
+        generated.
+    predict_mode : {'cross-validation', 'mean-prediction'}
+        Indicates how predictions are achieved with regards to the cross-
+        validation procedure:
+            'cross-validation' : estimates a single prediction per sample based
+                on the unique independent classifier fitted in the cross-
+                validation.
+            'mean-prediction' : estimates k predictions per sample, based on
+                each of the k-fold cross-validation classifiers, and average
+                these predictions into a single estimate per sample.
+        Default: 'cross-validation'
+    """
+    n_epochs = len(X)
+    # Loop across testing slices
+    y_pred = [list() for _ in range(len(slices))]
+
+    # XXX EHN: This loop should be parallelized in a similar way to fit()
+    for t, indices in enumerate(slices):
+        # Flatten features in case of multiple time samples
+        Xtrain = X[:, :, indices].reshape(
+            n_epochs, np.prod(X[:, :, indices].shape[1:]))
+
+        # Single trial predictions
+        if predict_mode == 'cross-validation':
+            # If predict within cross validation, only predict with
+            # corresponding classifier, else predict with each fold's
+            # classifier and average prediction.
+
+            # Check that training cv and predicting cv match
+            if (len(estimators) != len(cv)) or (cv.n != Xtrain.shape[0]):
+                raise ValueError(
+                    'When `predict_mode = "cross-validation"`, the training '
+                    'and predicting cv schemes must be identical.')
+            for k, (train, test) in enumerate(cv):
+                # XXX I didn't manage to initialize correctly this array, as
+                # its size depends on the the type of predictor and the
+                # number of class.
+                if k == 0:
+                    y_pred_ = _predict(Xtrain[test, :], estimators[k:k + 1])
+                    y_pred[t] = np.empty((n_epochs, y_pred_.shape[1]))
+                    y_pred[t][test, :] = y_pred_
+                y_pred[t][test, :] = _predict(Xtrain[test, :],
+                                              estimators[k:k + 1])
+        elif predict_mode == 'mean-prediction':
+            y_pred[t] = _predict(Xtrain, estimators)
+        else:
+            raise ValueError('`predict_mode` must be a str, "mean-prediction"'
+                             ' or "cross-validation"')
+    return y_pred
+
+
+def _score_slices(y_true, list_y_pred, scorer):
+    """Aux function of GeneralizationAcrossTime that loops across chunks of
+    testing slices.
+    """
+    scores_list = list()
+    for y_pred in list_y_pred:
+        scores = list()
+        for t, this_y_pred in enumerate(y_pred):
+            # Scores across trials
+            scores.append(scorer(y_true, np.array(this_y_pred)))
+        scores_list.append(scores)
+    return scores_list
+
+
+def _check_epochs_input(epochs, y, picks=None):
+    """Aux function of GeneralizationAcrossTime
+
+    Format MNE data into scikit-learn X and y
+
+    Parameters
+    ----------
+    epochs : instance of Epochs
+            The epochs.
+    y : ndarray shape (n_epochs) | list shape (n_epochs) | None
+        To-be-fitted model. If y is None, y == epochs.events.
+    picks : array-like of int | None
+        The channels indices to include. If None the data
+        channels in info, except bad channels, are used.
+
+    Returns
+    -------
+    X : ndarray, shape (n_epochs, n_selected_chans, n_times)
+        To-be-fitted data.
+    y : ndarray, shape (n_epochs,)
+        To-be-fitted model.
+    picks : array-like of int | None
+        The channels indices to include. If None the data
+        channels in info, except bad channels, are used.
+    """
+    if y is None:
+        y = epochs.events[:, 2]
+    elif isinstance(y, list):
+        y = np.array(y)
+
+    # Convert MNE data into trials x features x time matrix
+    X = epochs.get_data()
+
+    # Pick channels
+    if picks is None:  # just use good data channels
+        picks = pick_types(epochs.info, meg=True, eeg=True, seeg=True,
+                           eog=False, ecg=False, misc=False, stim=False,
+                           ref_meg=False, exclude='bads')
+    if isinstance(picks, (list, np.ndarray)):
+        picks = np.array(picks, dtype=np.int)
+    else:
+        raise ValueError('picks must be a list or a numpy.ndarray of int')
+    X = X[:, picks, :]
+
+    # Check data sets
+    assert X.shape[0] == y.shape[0]
+    return X, y, picks
+
+
+def _fit_slices(clf, x_chunk, y, slices, cv):
+    """Aux function of GeneralizationAcrossTime
+
+    Fit each classifier.
+
+    Parameters
+    ----------
+    clf : scikit-learn classifier
+        The classifier object.
+    x_chunk : ndarray, shape (n_epochs, n_features, n_times)
+        To-be-fitted data.
+    y : list | array, shape (n_epochs,)
+        To-be-fitted model.
+    slices : list | array, shape (n_training_slice,)
+        List of training slices, indicating time sample relative to X
+    cv : scikit-learn cross-validation generator
+        A cross-validation generator to use.
+
+    Returns
+    -------
+    estimators : list of lists of estimators
+        List of fitted scikit-learn classifiers corresponding to each training
+        slice.
+    """
+    from sklearn.base import clone
+    # Initialize
+    n_epochs = len(x_chunk)
+    estimators = list()
+    # Identify the time samples of X_chunck corresponding to X
+    values = np.unique(np.concatenate(slices))
+    indices = range(len(values))
+    # Loop across time slices
+    for t_slice in slices:
+        # Translate absolute time samples into time sample relative to x_chunk
+        for ii in indices:
+            t_slice[t_slice == values[ii]] = indices[ii]
+        # Select slice
+        X = x_chunk[..., t_slice]
+        # Reshape data matrix to flatten features in case of multiple time
+        # samples.
+        X = X.reshape(n_epochs, np.prod(X.shape[1:]))
+        # Loop across folds
+        estimators_ = list()
+        for fold, (train, test) in enumerate(cv):
+            # Fit classifier
+            clf_ = clone(clf)
+            clf_.fit(X[train, :], y[train])
+            estimators_.append(clf_)
+        # Store classifier
+        estimators.append(estimators_)
+    return estimators
+
+
+def _sliding_window(times, window_params):
+    """Aux function of GeneralizationAcrossTime
+
+    Define the slices on which to train each classifier.
+
+    Parameters
+    ----------
+    times : ndarray, shape (n_times,)
+        Array of times from MNE epochs.
+    window_params : dict keys: ('start', 'stop', 'step', 'length')
+        Either train or test times. See GAT documentation.
+
+    Returns
+    -------
+    time_pick : list
+        List of training slices, indicating for each classifier the time
+        sample (in indices of times) to be fitted on.
+    """
+
+    window_params = _DecodingTime(window_params)
+
+    # Sampling frequency as int
+    freq = (times[-1] - times[0]) / len(times)
+
+    # Default values
+    if ('slices' in window_params and
+            all(k in window_params for k in
+                ('start', 'stop', 'step', 'length'))):
+        time_pick = window_params['slices']
+    else:
+        if 'start' not in window_params:
+            window_params['start'] = times[0]
+        if 'stop' not in window_params:
+            window_params['stop'] = times[-1]
+        if 'step' not in window_params:
+            window_params['step'] = freq
+        if 'length' not in window_params:
+            window_params['length'] = freq
+
+        if (window_params['start'] < times[0] or
+                window_params['start'] > times[-1]):
+            raise ValueError(
+                '`start` (%.2f s) outside time range [%.2f, %.2f].' % (
+                    window_params['start'], times[0], times[-1]))
+        if (window_params['stop'] < times[0] or
+                window_params['stop'] > times[-1]):
+            raise ValueError(
+                '`stop` (%.2f s) outside time range [%.2f, %.2f].' % (
+                    window_params['stop'], times[0], times[-1]))
+        if window_params['step'] < freq:
+            raise ValueError('`step` must be >= 1 / sampling_frequency')
+        if window_params['length'] < freq:
+            raise ValueError('`length` must be >= 1 / sampling_frequency')
+        if window_params['length'] > np.ptp(times):
+            raise ValueError('`length` must be <= time range')
+
+        # Convert seconds to index
+
+        def find_time_idx(t):  # find closest time point
+            return np.argmin(np.abs(np.asarray(times) - t))
+
+        start = find_time_idx(window_params['start'])
+        stop = find_time_idx(window_params['stop'])
+        step = int(round(window_params['step'] / freq))
+        length = int(round(window_params['length'] / freq))
+
+        # For each training slice, give time samples to be included
+        time_pick = [range(start, start + length)]
+        while (time_pick[-1][0] + step) <= (stop - length + 1):
+            start = time_pick[-1][0] + step
+            time_pick.append(range(start, start + length))
+        window_params['slices'] = time_pick
+
+    # Keep last training times in milliseconds
+    t_inds_ = [t[-1] for t in window_params['slices']]
+    window_params['times'] = times[t_inds_]
+
+    return window_params
+
+
+def _predict(X, estimators):
+    """Aux function of GeneralizationAcrossTime
+
+    Predict each classifier. If multiple classifiers are passed, average
+    prediction across all classifiers to result in a single prediction per
+    classifier.
+
+    Parameters
+    ----------
+    estimators : ndarray, shape (n_folds,) | shape (1,)
+        Array of scikit-learn classifiers to predict data.
+    X : ndarray, shape (n_epochs, n_features, n_times)
+        To-be-predicted data
+    Returns
+    -------
+    y_pred : ndarray, shape (n_epochs, m_prediction_dimensions)
+        Classifier's prediction for each trial.
+    """
+    from scipy import stats
+    from sklearn.base import is_classifier
+    # Initialize results:
+    n_epochs = X.shape[0]
+    n_clf = len(estimators)
+
+    # Compute prediction for each sub-estimator (i.e. per fold)
+    # if independent, estimators = all folds
+    for fold, clf in enumerate(estimators):
+        _y_pred = clf.predict(X)
+        # See inconsistency in dimensionality: scikit-learn/scikit-learn#5058
+        if _y_pred.ndim == 1:
+            _y_pred = _y_pred[:, None]
+        # initialize predict_results array
+        if fold == 0:
+            predict_size = _y_pred.shape[1]
+            y_pred = np.ones((n_epochs, predict_size, n_clf))
+        y_pred[:, :, fold] = _y_pred
+
+    # Collapse y_pred across folds if necessary (i.e. if independent)
+    if fold > 0:
+        # XXX need API to identify how multiple predictions can be combined?
+        if is_classifier(clf):
+            y_pred, _ = stats.mode(y_pred, axis=2)
+        else:
+            y_pred = np.mean(y_pred, axis=2)
+
+    # Format shape
+    y_pred = y_pred.reshape((n_epochs, predict_size))
+    return y_pred
+
+
+class GeneralizationAcrossTime(_GeneralizationAcrossTime):
+    """Generalize across time and conditions
+
+    Creates and estimator object used to 1) fit a series of classifiers on
+    multidimensional time-resolved data, and 2) test the ability of each
+    classifier to generalize across other time samples.
+
+    Parameters
+    ----------
+    picks : array-like of int | None
+        The channels indices to include. If None the data
+        channels in info, except bad channels, are used.
+    cv : int | object
+        If an integer is passed, it is the number of folds.
+        Specific cross-validation objects can be passed, see
+        scikit-learn.cross_validation module for the list of possible objects.
+        Defaults to 5.
+    clf : object | None
+        An estimator compliant with the scikit-learn API (fit & predict).
+        If None the classifier will be a standard pipeline including
+        StandardScaler and LogisticRegression with default parameters.
+    train_times : dict | None
+        A dictionary to configure the training times:
+
+            ``slices`` : ndarray, shape (n_clfs,)
+                Array of time slices (in indices) used for each classifier.
+                If not given, computed from 'start', 'stop', 'length', 'step'.
+            ``start`` : float
+                Time at which to start decoding (in seconds).
+                Defaults to min(epochs.times).
+            ``stop`` : float
+                Maximal time at which to stop decoding (in seconds).
+                Defaults to max(times).
+            ``step`` : float
+                Duration separating the start of subsequent classifiers (in
+                seconds). Defaults to one time sample.
+            ``length`` : float
+                Duration of each classifier (in seconds).
+                Defaults to one time sample.
+
+        If None, empty dict.
+    test_times : 'diagonal' | dict | None, optional
+        Configures the testing times.
+        If set to 'diagonal', predictions are made at the time at which
+        each classifier is trained.
+        If set to None, predictions are made at all time points.
+        If set to dict, the dict should contain ``slices`` or be contructed in
+        a similar way to train_times::
+
+            ``slices`` : ndarray, shape (n_clfs,)
+                Array of time slices (in indices) used for each classifier.
+                If not given, computed from 'start', 'stop', 'length', 'step'.
+
+        If None, empty dict.
+    predict_mode : {'cross-validation', 'mean-prediction'}
+        Indicates how predictions are achieved with regards to the cross-
+        validation procedure:
+
+            ``cross-validation`` : estimates a single prediction per sample
+                based on the unique independent classifier fitted in the
+                cross-validation.
+            ``mean-prediction`` : estimates k predictions per sample, based on
+                each of the k-fold cross-validation classifiers, and average
+                these predictions into a single estimate per sample.
+
+        Default: 'cross-validation'
+    scorer : object | None
+        scikit-learn Scorer instance. If None, set to accuracy_score.
+    n_jobs : int
+        Number of jobs to run in parallel. Defaults to 1.
+
+    Attributes
+    ----------
+    picks_ : array-like of int | None
+        The channels indices to include.
+    ch_names : list, array-like, shape (n_channels,)
+        Names of the channels used for training.
+    y_train_ : list | ndarray, shape (n_samples,)
+        The categories used for training.
+    train_times_ : dict
+        A dictionary that configures the training times:
+
+            ``slices`` : ndarray, shape (n_clfs,)
+                Array of time slices (in indices) used for each classifier.
+                If not given, computed from 'start', 'stop', 'length', 'step'.
+            ``times`` : ndarray, shape (n_clfs,)
+                The training times (in seconds).
+
+    test_times_ : dict
+        A dictionary that configures the testing times for each training time:
+
+            ``slices`` : ndarray, shape (n_clfs, n_testing_times)
+                Array of time slices (in indices) used for each classifier.
+            ``times`` : ndarray, shape (n_clfs, n_testing_times)
+                The testing times (in seconds) for each training time.
+
+    cv_ : CrossValidation object
+        The actual CrossValidation input depending on y.
+    estimators_ : list of list of scikit-learn.base.BaseEstimator subclasses.
+        The estimators for each time point and each fold.
+    y_pred_ : list of lists of arrays of floats, shape (n_train_times, n_test_times, n_epochs, n_prediction_dims)
+        The single-trial predictions estimated by self.predict() at each
+        training time and each testing time. Note that the number of testing
+        times per training time need not be regular, else
+        ``np.shape(y_pred_) = (n_train_time, n_test_time, n_epochs).``
+    y_true_ : list | ndarray, shape (n_samples,)
+        The categories used for scoring ``y_pred_``.
+    scorer_ : object
+        scikit-learn Scorer instance.
+    scores_ : list of lists of float
+        The scores estimated by ``self.scorer_`` at each training time and each
+        testing time (e.g. mean accuracy of self.predict(X)). Note that the
+        number of testing times per training time need not be regular;
+        else, ``np.shape(scores) = (n_train_time, n_test_time)``.
+
+    See Also
+    --------
+    TimeDecoding
+
+    Notes
+    -----
+    The function implements the method used in:
+
+        Jean-Remi King, Alexandre Gramfort, Aaron Schurger, Lionel Naccache
+        and Stanislas Dehaene, "Two distinct dynamic modes subtend the
+        detection of unexpected sounds", PLoS ONE, 2014
+        DOI: 10.1371/journal.pone.0085791
+
+    .. versionadded:: 0.9.0
+    """  # noqa
+    def __init__(self, picks=None, cv=5, clf=None, train_times=None,
+                 test_times=None, predict_mode='cross-validation', scorer=None,
+                 n_jobs=1):
+        super(GeneralizationAcrossTime, self).__init__(
+            picks=picks, cv=cv, clf=clf, train_times=train_times,
+            test_times=test_times, predict_mode=predict_mode, scorer=scorer,
+            n_jobs=n_jobs)
+
+    def __repr__(self):
+        s = ''
+        if hasattr(self, "estimators_"):
+            s += "fitted, start : %0.3f (s), stop : %0.3f (s)" % (
+                self.train_times_['start'], self.train_times_['stop'])
+        else:
+            s += 'no fit'
+        if hasattr(self, 'y_pred_'):
+            s += (", predicted %d epochs" % len(self.y_pred_[0][0]))
+        else:
+            s += ", no prediction"
+        if hasattr(self, "estimators_") and hasattr(self, 'scores_'):
+            s += ',\n '
+        else:
+            s += ', '
+        if hasattr(self, 'scores_'):
+            s += "scored"
+            if callable(self.scorer_):
+                s += " (%s)" % (self.scorer_.__name__)
+        else:
+            s += "no score"
+
+        return "<GAT | %s>" % s
+
+    def plot(self, title=None, vmin=None, vmax=None, tlim=None, ax=None,
+             cmap='RdBu_r', show=True, colorbar=True,
+             xlabel=True, ylabel=True):
+        """Plotting function of GeneralizationAcrossTime object
+
+        Plot the score of each classifier at each tested time window.
+
+        Parameters
+        ----------
+        title : str | None
+            Figure title.
+        vmin : float | None
+            Min color value for scores. If None, sets to min(``gat.scores_``).
+        vmax : float | None
+            Max color value for scores. If None, sets to max(``gat.scores_``).
+        tlim : ndarray, (train_min, test_max) | None
+            The time limits used for plotting.
+        ax : object | None
+            Plot pointer. If None, generate new figure.
+        cmap : str | cmap object
+            The color map to be used. Defaults to ``'RdBu_r'``.
+        show : bool
+            If True, the figure will be shown. Defaults to True.
+        colorbar : bool
+            If True, the colorbar of the figure is displayed. Defaults to True.
+        xlabel : bool
+            If True, the xlabel is displayed. Defaults to True.
+        ylabel : bool
+            If True, the ylabel is displayed. Defaults to True.
+
+        Returns
+        -------
+        fig : instance of matplotlib.figure.Figure
+            The figure.
+        """
+        return plot_gat_matrix(self, title=title, vmin=vmin, vmax=vmax,
+                               tlim=tlim, ax=ax, cmap=cmap, show=show,
+                               colorbar=colorbar, xlabel=xlabel, ylabel=ylabel)
+
+    def plot_diagonal(self, title=None, xmin=None, xmax=None, ymin=None,
+                      ymax=None, ax=None, show=True, color=None,
+                      xlabel=True, ylabel=True, legend=True, chance=True,
+                      label='Classif. score'):
+        """Plotting function of GeneralizationAcrossTime object
+
+        Plot each classifier score trained and tested at identical time
+        windows.
+
+        Parameters
+        ----------
+        title : str | None
+            Figure title.
+        xmin : float | None, optional
+            Min time value.
+        xmax : float | None, optional
+            Max time value.
+        ymin : float | None, optional
+            Min score value. If None, sets to min(scores).
+        ymax : float | None, optional
+            Max score value. If None, sets to max(scores).
+        ax : object | None
+            Instance of mataplotlib.axes.Axis. If None, generate new figure.
+        show : bool
+            If True, the figure will be shown. Defaults to True.
+        color : str
+            Score line color.
+        xlabel : bool
+            If True, the xlabel is displayed. Defaults to True.
+        ylabel : bool
+            If True, the ylabel is displayed. Defaults to True.
+        legend : bool
+            If True, a legend is displayed. Defaults to True.
+        chance : bool | float. Defaults to None
+            Plot chance level. If True, chance level is estimated from the type
+            of scorer.
+        label : str
+            Score label used in the legend. Defaults to 'Classif. score'.
+
+        Returns
+        -------
+        fig : instance of matplotlib.figure.Figure
+            The figure.
+        """
+        return plot_gat_times(self, train_time='diagonal', title=title,
+                              xmin=xmin, xmax=xmax,
+                              ymin=ymin, ymax=ymax, ax=ax, show=show,
+                              color=color, xlabel=xlabel, ylabel=ylabel,
+                              legend=legend, chance=chance, label=label)
+
+    def plot_times(self, train_time, title=None, xmin=None, xmax=None,
+                   ymin=None, ymax=None, ax=None, show=True, color=None,
+                   xlabel=True, ylabel=True, legend=True, chance=True,
+                   label='Classif. score'):
+        """Plotting function of GeneralizationAcrossTime object
+
+        Plot the scores of the classifier trained at specific training time(s).
+
+        Parameters
+        ----------
+        train_time : float | list or array of float
+            Plots scores of the classifier trained at train_time.
+        title : str | None
+            Figure title.
+        xmin : float | None, optional
+            Min time value.
+        xmax : float | None, optional
+            Max time value.
+        ymin : float | None, optional
+            Min score value. If None, sets to min(scores).
+        ymax : float | None, optional
+            Max score value. If None, sets to max(scores).
+        ax : object | None
+            Instance of mataplotlib.axes.Axis. If None, generate new figure.
+        show : bool
+            If True, the figure will be shown. Defaults to True.
+        color : str or list of str
+            Score line color(s).
+        xlabel : bool
+            If True, the xlabel is displayed. Defaults to True.
+        ylabel : bool
+            If True, the ylabel is displayed. Defaults to True.
+        legend : bool
+            If True, a legend is displayed. Defaults to True.
+        chance : bool | float.
+            Plot chance level. If True, chance level is estimated from the type
+            of scorer.
+        label : str
+            Score label used in the legend. Defaults to 'Classif. score'.
+
+        Returns
+        -------
+        fig : instance of matplotlib.figure.Figure
+            The figure.
+        """
+        if (not isinstance(train_time, float) and
+            not (isinstance(train_time, (list, np.ndarray)) and
+                 np.all([isinstance(time, float) for time in train_time]))):
+            raise ValueError('train_time must be float | list or array of '
+                             'floats. Got %s.' % type(train_time))
+
+        return plot_gat_times(self, train_time=train_time, title=title,
+                              xmin=xmin, xmax=xmax,
+                              ymin=ymin, ymax=ymax, ax=ax, show=show,
+                              color=color, xlabel=xlabel, ylabel=ylabel,
+                              legend=legend, chance=chance, label=label)
+
+
+class TimeDecoding(_GeneralizationAcrossTime):
+    """Train and test a series of classifiers at each time point to obtain a
+    score across time.
+
+    Parameters
+    ----------
+    picks : array-like of int | None
+        The channels indices to include. If None the data
+        channels in info, except bad channels, are used.
+    cv : int | object
+        If an integer is passed, it is the number of folds.
+        Specific cross-validation objects can be passed, see
+        scikit-learn.cross_validation module for the list of possible objects.
+        Defaults to 5.
+    clf : object | None
+        An estimator compliant with the scikit-learn API (fit & predict).
+        If None the classifier will be a standard pipeline including
+        StandardScaler and a Logistic Regression with default parameters.
+    times : dict | None
+        A dictionary to configure the training times:
+
+            ``slices`` : ndarray, shape (n_clfs,)
+                Array of time slices (in indices) used for each classifier.
+                If not given, computed from 'start', 'stop', 'length', 'step'.
+            ``start`` : float
+                Time at which to start decoding (in seconds). By default,
+                min(epochs.times).
+            ``stop`` : float
+                Maximal time at which to stop decoding (in seconds). By
+                default, max(times).
+            ``step`` : float
+                Duration separating the start of subsequent classifiers (in
+                seconds). By default, equals one time sample.
+            ``length`` : float
+                Duration of each classifier (in seconds). By default, equals
+                one time sample.
+
+        If None, empty dict.
+    predict_mode : {'cross-validation', 'mean-prediction'}
+        Indicates how predictions are achieved with regards to the cross-
+        validation procedure:
+
+            ``cross-validation`` : estimates a single prediction per sample
+                based on the unique independent classifier fitted in the
+                cross-validation.
+            ``mean-prediction`` : estimates k predictions per sample, based on
+                each of the k-fold cross-validation classifiers, and average
+                these predictions into a single estimate per sample.
+
+        Default: 'cross-validation'
+    scorer : object | None
+        scikit-learn Scorer instance. If None, set to accuracy_score.
+    n_jobs : int
+        Number of jobs to run in parallel. Defaults to 1.
+
+    Attributes
+    ----------
+    picks_ : array-like of int | None
+        The channels indices to include.
+    ch_names : list, array-like, shape (n_channels,)
+        Names of the channels used for training.
+    y_train_ : ndarray, shape (n_samples,)
+        The categories used for training.
+    times_ : dict
+        A dictionary that configures the training times:
+
+            ``slices`` : ndarray, shape (n_clfs,)
+                Array of time slices (in indices) used for each classifier.
+                If not given, computed from 'start', 'stop', 'length', 'step'.
+            ``times`` : ndarray, shape (n_clfs,)
+                The training times (in seconds).
+
+    cv_ : CrossValidation object
+        The actual CrossValidation input depending on y.
+    estimators_ : list of list of scikit-learn.base.BaseEstimator subclasses.
+        The estimators for each time point and each fold.
+    y_pred_ : ndarray, shape (n_times, n_epochs, n_prediction_dims)
+        Class labels for samples in X.
+    y_true_ : list | ndarray, shape (n_samples,)
+        The categories used for scoring ``y_pred_``.
+    scorer_ : object
+        scikit-learn Scorer instance.
+    scores_ : list of float, shape (n_times,)
+        The scores (mean accuracy of self.predict(X) wrt. y.).
+
+    See Also
+    --------
+    GeneralizationAcrossTime
+
+    Notes
+    -----
+    The function is equivalent to the diagonal of GeneralizationAcrossTime()
+
+    .. versionadded:: 0.10
+    """
+
+    def __init__(self, picks=None, cv=5, clf=None, times=None,
+                 predict_mode='cross-validation', scorer=None, n_jobs=1):
+        super(TimeDecoding, self).__init__(picks=picks, cv=cv, clf=None,
+                                           train_times=times,
+                                           test_times='diagonal',
+                                           predict_mode=predict_mode,
+                                           scorer=scorer, n_jobs=n_jobs)
+        self._clean_times()
+
+    def __repr__(self):
+        s = ''
+        if hasattr(self, "estimators_"):
+            s += "fitted, start : %0.3f (s), stop : %0.3f (s)" % (
+                self.times_['start'], self.times_['stop'])
+        else:
+            s += 'no fit'
+        if hasattr(self, 'y_pred_'):
+            s += (", predicted %d epochs" % len(self.y_pred_[0]))
+        else:
+            s += ", no prediction"
+        if hasattr(self, "estimators_") and hasattr(self, 'scores_'):
+            s += ',\n '
+        else:
+            s += ', '
+        if hasattr(self, 'scores_'):
+            s += "scored"
+            if callable(self.scorer_):
+                s += " (%s)" % (self.scorer_.__name__)
+        else:
+            s += "no score"
+
+        return "<TimeDecoding | %s>" % s
+
+    def fit(self, epochs, y=None):
+        """ Train a classifier on each specified time slice.
+
+        Note. This function sets the ``picks_``, ``ch_names``, ``cv_``,
+        ``y_train``, ``train_times_`` and ``estimators_`` attributes.
+
+        Parameters
+        ----------
+        epochs : instance of Epochs
+            The epochs.
+        y : list or ndarray of int, shape (n_samples,) or None, optional
+            To-be-fitted model values. If None, y = epochs.events[:, 2].
+
+        Returns
+        -------
+        self : TimeDecoding
+            Returns fitted TimeDecoding object.
+
+        Notes
+        ------
+        If X and y are not C-ordered and contiguous arrays of np.float64 and
+        X is not a scipy.sparse.csr_matrix, X and/or y may be copied.
+
+        If X is a dense array, then the other methods will not support sparse
+        matrices as input.
+        """
+        self._prep_times()
+        super(TimeDecoding, self).fit(epochs, y=y)
+        self._clean_times()
+        return self
+
+    def predict(self, epochs):
+        """ Test each classifier on each specified testing time slice.
+
+        .. note:: This function sets the ``y_pred_`` and ``test_times_``
+                  attributes.
+
+        Parameters
+        ----------
+        epochs : instance of Epochs
+            The epochs. Can be similar to fitted epochs or not. See
+            predict_mode parameter.
+
+        Returns
+        -------
+        y_pred : list of lists of arrays of floats, shape (n_times, n_epochs, n_prediction_dims)
+            The single-trial predictions at each time sample.
+        """  # noqa
+        self._prep_times()
+        super(TimeDecoding, self).predict(epochs)
+        self._clean_times()
+        return self.y_pred_
+
+    def score(self, epochs=None, y=None):
+        """Score Epochs
+
+        Estimate scores across trials by comparing the prediction estimated for
+        each trial to its true value.
+
+        Calls ``predict()`` if it has not been already.
+
+        Note. The function updates the ``scorer_``, ``scores_``, and
+        ``y_true_`` attributes.
+
+        Parameters
+        ----------
+        epochs : instance of Epochs | None, optional
+            The epochs. Can be similar to fitted epochs or not.
+            If None, it needs to rely on the predictions ``y_pred_``
+            generated with ``predict()``.
+        y : list | ndarray, shape (n_epochs,) | None, optional
+            True values to be compared with the predictions ``y_pred_``
+            generated with ``predict()`` via ``scorer_``.
+            If None and ``predict_mode``=='cross-validation' y = ``y_train_``.
+
+        Returns
+        -------
+        scores : list of float, shape (n_times,)
+            The scores estimated by ``scorer_`` at each time sample (e.g. mean
+            accuracy of ``predict(X)``).
+        """
+        if epochs is not None:
+            self.predict(epochs)
+        else:
+            if not hasattr(self, 'y_pred_'):
+                raise RuntimeError('Please predict() epochs first or pass '
+                                   'epochs to score()')
+        self._prep_times()
+        super(TimeDecoding, self).score(epochs=None, y=y)
+        self._clean_times()
+        return self.scores_
+
+    def plot(self, title=None, xmin=None, xmax=None, ymin=None, ymax=None,
+             ax=None, show=True, color=None, xlabel=True, ylabel=True,
+             legend=True, chance=True, label='Classif. score'):
+        """Plotting function
+
+        Predict each classifier. If multiple classifiers are passed, average
+        prediction across all classifiers to result in a single prediction per
+        classifier.
+
+        Parameters
+        ----------
+        title : str | None
+            Figure title.
+        xmin : float | None, optional,
+            Min time value.
+        xmax : float | None, optional,
+            Max time value.
+        ymin : float
+            Min score value. Defaults to 0.
+        ymax : float
+            Max score value. Defaults to 1.
+        ax : object | None
+            Instance of mataplotlib.axes.Axis. If None, generate new figure.
+        show : bool
+            If True, the figure will be shown. Defaults to True.
+        color : str
+            Score line color. Defaults to 'steelblue'.
+        xlabel : bool
+            If True, the xlabel is displayed. Defaults to True.
+        ylabel : bool
+            If True, the ylabel is displayed. Defaults to True.
+        legend : bool
+            If True, a legend is displayed. Defaults to True.
+        chance : bool | float. Defaults to None
+            Plot chance level. If True, chance level is estimated from the type
+            of scorer.
+        label : str
+            Score label used in the legend. Defaults to 'Classif. score'.
+
+        Returns
+        -------
+        fig : instance of matplotlib.figure.Figure
+            The figure.
+        """
+        # XXX JRK: need cleanup in viz
+        self._prep_times()
+        fig = plot_gat_times(self, train_time='diagonal', title=title,
+                             xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax, ax=ax,
+                             show=show, color=color, xlabel=xlabel,
+                             ylabel=ylabel, legend=legend, chance=chance,
+                             label=label)
+        self._clean_times()
+        return fig
+
+    def _prep_times(self):
+        """Auxiliary function to allow compability with GAT"""
+        self.test_times = 'diagonal'
+        if hasattr(self, 'times'):
+            self.train_times = self.times
+        if hasattr(self, 'times_'):
+            self.train_times_ = self.times_
+            self.test_times_ = _DecodingTime()
+            self.test_times_['slices'] = [[slic] for slic in
+                                          self.train_times_['slices']]
+            self.test_times_['times'] = [[tim] for tim in
+                                         self.train_times_['times']]
+        if hasattr(self, 'scores_'):
+            self.scores_ = [[score] for score in self.scores_]
+        if hasattr(self, 'y_pred_'):
+            self.y_pred_ = [[y_pred] for y_pred in self.y_pred_]
+
+    def _clean_times(self):
+        """Auxiliary function to allow compability with GAT"""
+        if hasattr(self, 'train_times'):
+            self.times = self.train_times
+        if hasattr(self, 'train_times_'):
+            self.times_ = self.train_times_
+        for attr in ['test_times', 'train_times',
+                     'test_times_', 'train_times_']:
+            if hasattr(self, attr):
+                delattr(self, attr)
+        if hasattr(self, 'y_pred_'):
+            self.y_pred_ = [y_pred[0] for y_pred in self.y_pred_]
+        if hasattr(self, 'scores_'):
+            self.scores_ = [score[0] for score in self.scores_]
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/decoding/transformer.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/decoding/transformer.py
new file mode 100644
index 0000000..27950cd
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/decoding/transformer.py
@@ -0,0 +1,536 @@
+# Authors: Mainak Jas <mainak at neuro.hut.fi>
+#          Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Romain Trachel <trachelr at gmail.com>
+#
+# License: BSD (3-clause)
+
+import numpy as np
+
+from .mixin import TransformerMixin
+
+from .. import pick_types
+from ..filter import (low_pass_filter, high_pass_filter, band_pass_filter,
+                      band_stop_filter)
+from ..time_frequency import multitaper_psd
+from ..externals import six
+from ..utils import _check_type_picks, deprecated
+
+
+class Scaler(TransformerMixin):
+    """Standardizes data across channels
+
+    Parameters
+    ----------
+    info : instance of Info
+        The measurement info
+    with_mean : boolean, True by default
+        If True, center the data before scaling.
+    with_std : boolean, True by default
+        If True, scale the data to unit variance (or equivalently,
+        unit standard deviation).
+
+    Attributes
+    ----------
+    info : instance of Info
+        The measurement info
+    ch_mean_ : dict
+        The mean value for each channel type
+    std_ : dict
+        The standard deviation for each channel type
+     """
+    def __init__(self, info, with_mean=True, with_std=True):
+        self.info = info
+        self.with_mean = with_mean
+        self.with_std = with_std
+        self.ch_mean_ = dict()  # TODO rename attribute
+        self.std_ = dict()  # TODO rename attribute
+
+    def fit(self, epochs_data, y):
+        """Standardizes data across channels
+
+        Parameters
+        ----------
+        epochs_data : array, shape (n_epochs, n_channels, n_times)
+            The data to concatenate channels.
+        y : array, shape (n_epochs,)
+            The label for each epoch.
+
+        Returns
+        -------
+        self : instance of Scaler
+            Returns the modified instance.
+        """
+        if not isinstance(epochs_data, np.ndarray):
+            raise ValueError("epochs_data should be of type ndarray (got %s)."
+                             % type(epochs_data))
+
+        X = np.atleast_3d(epochs_data)
+
+        picks_list = dict()
+        picks_list['mag'] = pick_types(self.info, meg='mag', ref_meg=False,
+                                       exclude='bads')
+        picks_list['grad'] = pick_types(self.info, meg='grad', ref_meg=False,
+                                        exclude='bads')
+        picks_list['eeg'] = pick_types(self.info, eeg='grad', ref_meg=False,
+                                       exclude='bads')
+
+        self.picks_list_ = picks_list
+
+        for key, this_pick in picks_list.items():
+            if self.with_mean:
+                ch_mean = X[:, this_pick, :].mean(axis=1)[:, None, :]
+                self.ch_mean_[key] = ch_mean  # TODO rename attribute
+            if self.with_std:
+                ch_std = X[:, this_pick, :].mean(axis=1)[:, None, :]
+                self.std_[key] = ch_std  # TODO rename attribute
+
+        return self
+
+    def transform(self, epochs_data, y=None):
+        """Standardizes data across channels
+
+        Parameters
+        ----------
+        epochs_data : array, shape (n_epochs, n_channels, n_times)
+            The data.
+        y : None | array, shape (n_epochs,)
+            The label for each epoch.
+            If None not used. Defaults to None.
+
+        Returns
+        -------
+        X : array, shape (n_epochs, n_channels, n_times)
+            The data concatenated over channels.
+        """
+        if not isinstance(epochs_data, np.ndarray):
+            raise ValueError("epochs_data should be of type ndarray (got %s)."
+                             % type(epochs_data))
+
+        X = np.atleast_3d(epochs_data)
+
+        for key, this_pick in six.iteritems(self.picks_list_):
+            if self.with_mean:
+                X[:, this_pick, :] -= self.ch_mean_[key]
+            if self.with_std:
+                X[:, this_pick, :] /= self.std_[key]
+
+        return X
+
+    def inverse_transform(self, epochs_data, y=None):
+        """ Inverse standardization of data across channels
+
+        Parameters
+        ----------
+        epochs_data : array, shape (n_epochs, n_channels, n_times)
+            The data.
+        y : None | array, shape (n_epochs,)
+            The label for each epoch.
+            If None not used. Defaults to None.
+
+        Returns
+        -------
+        X : array, shape (n_epochs, n_channels, n_times)
+            The data concatenated over channels.
+        """
+        if not isinstance(epochs_data, np.ndarray):
+            raise ValueError("epochs_data should be of type ndarray (got %s)."
+                             % type(epochs_data))
+
+        X = np.atleast_3d(epochs_data)
+
+        for key, this_pick in six.iteritems(self.picks_list_):
+            if self.with_mean:
+                X[:, this_pick, :] += self.ch_mean_[key]
+            if self.with_std:
+                X[:, this_pick, :] *= self.std_[key]
+
+        return X
+
+
+class EpochsVectorizer(TransformerMixin):
+    """EpochsVectorizer transforms epoch data to fit into a scikit-learn pipeline.
+
+    Parameters
+    ----------
+    info : instance of Info
+        The measurement info.
+
+    Attributes
+    ----------
+    n_channels : int
+        The number of channels.
+    n_times : int
+        The number of time points.
+
+    """
+    def __init__(self, info=None):
+        self.info = info
+        self.n_channels = None
+        self.n_times = None
+
+    def fit(self, epochs_data, y):
+        """For each epoch, concatenate data from different channels into a single
+        feature vector.
+
+        Parameters
+        ----------
+        epochs_data : array, shape (n_epochs, n_channels, n_times)
+            The data to concatenate channels.
+        y : array, shape (n_epochs,)
+            The label for each epoch.
+
+        Returns
+        -------
+        self : instance of ConcatenateChannels
+            returns the modified instance
+        """
+        if not isinstance(epochs_data, np.ndarray):
+            raise ValueError("epochs_data should be of type ndarray (got %s)."
+                             % type(epochs_data))
+
+        return self
+
+    def transform(self, epochs_data, y=None):
+        """For each epoch, concatenate data from different channels into a single
+        feature vector.
+
+        Parameters
+        ----------
+        epochs_data : array, shape (n_epochs, n_channels, n_times)
+            The data.
+        y : None | array, shape (n_epochs,)
+            The label for each epoch.
+            If None not used. Defaults to None.
+
+        Returns
+        -------
+        X : array, shape (n_epochs, n_channels * n_times)
+            The data concatenated over channels
+        """
+        if not isinstance(epochs_data, np.ndarray):
+            raise ValueError("epochs_data should be of type ndarray (got %s)."
+                             % type(epochs_data))
+
+        epochs_data = np.atleast_3d(epochs_data)
+
+        n_epochs, n_channels, n_times = epochs_data.shape
+        X = epochs_data.reshape(n_epochs, n_channels * n_times)
+        # save attributes for inverse_transform
+        self.n_epochs = n_epochs
+        self.n_channels = n_channels
+        self.n_times = n_times
+
+        return X
+
+    def inverse_transform(self, X, y=None):
+        """For each epoch, reshape a feature vector into the original data shape
+
+        Parameters
+        ----------
+        X : array, shape (n_epochs, n_channels * n_times)
+            The feature vector concatenated over channels
+        y : None | array, shape (n_epochs,)
+            The label for each epoch.
+            If None not used. Defaults to None.
+
+        Returns
+        -------
+        epochs_data : array, shape (n_epochs, n_channels, n_times)
+            The original data
+        """
+        if not isinstance(X, np.ndarray):
+            raise ValueError("epochs_data should be of type ndarray (got %s)."
+                             % type(X))
+
+        return X.reshape(-1, self.n_channels, self.n_times)
+
+
+ at deprecated("Class 'ConcatenateChannels' has been renamed to "
+            "'EpochsVectorizer' and will be removed in release 0.11.")
+class ConcatenateChannels(EpochsVectorizer):
+    pass
+
+
+class PSDEstimator(TransformerMixin):
+    """Compute power spectrum density (PSD) using a multi-taper method
+
+    Parameters
+    ----------
+    sfreq : float
+        The sampling frequency.
+    fmin : float
+        The lower frequency of interest.
+    fmax : float
+        The upper frequency of interest.
+    bandwidth : float
+        The bandwidth of the multi taper windowing function in Hz.
+    adaptive : bool
+        Use adaptive weights to combine the tapered spectra into PSD
+        (slow, use n_jobs >> 1 to speed up computation).
+    low_bias : bool
+        Only use tapers with more than 90% spectral concentration within
+        bandwidth.
+    n_jobs : int
+        Number of parallel jobs to use (only used if adaptive=True).
+    normalization : str
+        Either "full" or "length" (default). If "full", the PSD will
+        be normalized by the sampling rate as well as the length of
+        the signal (as in nitime).
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+    """
+    def __init__(self, sfreq=2 * np.pi, fmin=0, fmax=np.inf, bandwidth=None,
+                 adaptive=False, low_bias=True, n_jobs=1,
+                 normalization='length', verbose=None):
+        self.sfreq = sfreq
+        self.fmin = fmin
+        self.fmax = fmax
+        self.bandwidth = bandwidth
+        self.adaptive = adaptive
+        self.low_bias = low_bias
+        self.n_jobs = n_jobs
+        self.verbose = verbose
+        self.normalization = normalization
+
+    def fit(self, epochs_data, y):
+        """Compute power spectrum density (PSD) using a multi-taper method
+
+        Parameters
+        ----------
+        epochs_data : array, shape (n_epochs, n_channels, n_times)
+            The data.
+        y : array, shape (n_epochs,)
+            The label for each epoch
+
+        Returns
+        -------
+        self : instance of PSDEstimator
+            returns the modified instance
+
+        """
+        if not isinstance(epochs_data, np.ndarray):
+            raise ValueError("epochs_data should be of type ndarray (got %s)."
+                             % type(epochs_data))
+
+        return self
+
+    def transform(self, epochs_data, y=None):
+        """Compute power spectrum density (PSD) using a multi-taper method
+
+        Parameters
+        ----------
+        epochs_data : array, shape (n_epochs, n_channels, n_times)
+            The data
+        y : None | array, shape (n_epochs,)
+            The label for each epoch.
+            If None not used. Defaults to None.
+
+        Returns
+        -------
+        psd : array, shape (n_signals, len(freqs)) or (len(freqs),)
+            The computed PSD.
+        """
+
+        if not isinstance(epochs_data, np.ndarray):
+            raise ValueError("epochs_data should be of type ndarray (got %s)."
+                             % type(epochs_data))
+
+        epochs_data = np.atleast_3d(epochs_data)
+
+        n_epochs, n_channels, n_times = epochs_data.shape
+        X = epochs_data.reshape(n_epochs * n_channels, n_times)
+
+        psd, _ = multitaper_psd(x=X, sfreq=self.sfreq, fmin=self.fmin,
+                                fmax=self.fmax, bandwidth=self.bandwidth,
+                                adaptive=self.adaptive, low_bias=self.low_bias,
+                                n_jobs=self.n_jobs,
+                                normalization=self.normalization,
+                                verbose=self.verbose)
+
+        _, n_freqs = psd.shape
+        psd = psd.reshape(n_epochs, n_channels, n_freqs)
+
+        return psd
+
+
+class FilterEstimator(TransformerMixin):
+    """Estimator to filter RtEpochs
+
+    Applies a zero-phase low-pass, high-pass, band-pass, or band-stop
+    filter to the channels selected by "picks".
+
+    l_freq and h_freq are the frequencies below which and above which,
+    respectively, to filter out of the data. Thus the uses are:
+
+        - l_freq < h_freq: band-pass filter
+        - l_freq > h_freq: band-stop filter
+        - l_freq is not None, h_freq is None: low-pass filter
+        - l_freq is None, h_freq is not None: high-pass filter
+
+    If n_jobs > 1, more memory is required as "len(picks) * n_times"
+    additional time points need to be temporarily stored in memory.
+
+    Parameters
+    ----------
+    info : instance of Info
+        Measurement info.
+    l_freq : float | None
+        Low cut-off frequency in Hz. If None the data are only low-passed.
+    h_freq : float | None
+        High cut-off frequency in Hz. If None the data are only
+        high-passed.
+    picks : array-like of int | None
+        Indices of channels to filter. If None only the data (MEG/EEG)
+        channels will be filtered.
+    filter_length : str (Default: '10s') | int | None
+        Length of the filter to use. If None or "len(x) < filter_length",
+        the filter length used is len(x). Otherwise, if int, overlap-add
+        filtering with a filter of the specified length in samples) is
+        used (faster for long signals). If str, a human-readable time in
+        units of "s" or "ms" (e.g., "10s" or "5500ms") will be converted
+        to the shortest power-of-two length at least that duration.
+    l_trans_bandwidth : float
+        Width of the transition band at the low cut-off frequency in Hz.
+    h_trans_bandwidth : float
+        Width of the transition band at the high cut-off frequency in Hz.
+    n_jobs : int | str
+        Number of jobs to run in parallel. Can be 'cuda' if scikits.cuda
+        is installed properly, CUDA is initialized, and method='fft'.
+    method : str
+        'fft' will use overlap-add FIR filtering, 'iir' will use IIR
+        forward-backward filtering (via filtfilt).
+    iir_params : dict | None
+        Dictionary of parameters to use for IIR filtering.
+        See mne.filter.construct_iir_filter for details. If iir_params
+        is None and method="iir", 4th order Butterworth will be used.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+        Defaults to self.verbose.
+    """
+    def __init__(self, info, l_freq, h_freq, picks=None, filter_length='10s',
+                 l_trans_bandwidth=0.5, h_trans_bandwidth=0.5, n_jobs=1,
+                 method='fft', iir_params=None, verbose=None):
+        self.info = info
+        self.l_freq = l_freq
+        self.h_freq = h_freq
+        self.picks = _check_type_picks(picks)
+        self.filter_length = filter_length
+        self.l_trans_bandwidth = l_trans_bandwidth
+        self.h_trans_bandwidth = h_trans_bandwidth
+        self.n_jobs = n_jobs
+        self.method = method
+        self.iir_params = iir_params
+
+    def fit(self, epochs_data, y):
+        """Filters data
+
+        Parameters
+        ----------
+        epochs_data : array, shape (n_epochs, n_channels, n_times)
+            The data.
+        y : array, shape (n_epochs,)
+            The label for each epoch.
+
+        Returns
+        -------
+        self : instance of FilterEstimator
+            Returns the modified instance
+        """
+        if not isinstance(epochs_data, np.ndarray):
+            raise ValueError("epochs_data should be of type ndarray (got %s)."
+                             % type(epochs_data))
+
+        if self.picks is None:
+            self.picks = pick_types(self.info, meg=True, eeg=True,
+                                    ref_meg=False, exclude=[])
+
+        if self.l_freq == 0:
+            self.l_freq = None
+        if self.h_freq is not None and self.h_freq > (self.info['sfreq'] / 2.):
+            self.h_freq = None
+        if self.l_freq is not None and not isinstance(self.l_freq, float):
+            self.l_freq = float(self.l_freq)
+        if self.h_freq is not None and not isinstance(self.h_freq, float):
+            self.h_freq = float(self.h_freq)
+
+        if self.info['lowpass'] is None or (self.h_freq is not None and
+                                            (self.l_freq is None or
+                                             self.l_freq < self.h_freq) and
+                                            self.h_freq <
+                                            self.info['lowpass']):
+            self.info['lowpass'] = self.h_freq
+
+        if self.info['highpass'] is None or (self.l_freq is not None and
+                                             (self.h_freq is None or
+                                              self.l_freq < self.h_freq) and
+                                             self.l_freq >
+                                             self.info['highpass']):
+            self.info['highpass'] = self.l_freq
+
+        return self
+
+    def transform(self, epochs_data, y=None):
+        """Filters data
+
+        Parameters
+        ----------
+        epochs_data : array, shape (n_epochs, n_channels, n_times)
+            The data.
+        y : None | array, shape (n_epochs,)
+            The label for each epoch.
+            If None not used. Defaults to None.
+
+        Returns
+        -------
+        X : array, shape (n_epochs, n_channels, n_times)
+            The data after filtering
+        """
+        if not isinstance(epochs_data, np.ndarray):
+            raise ValueError("epochs_data should be of type ndarray (got %s)."
+                             % type(epochs_data))
+
+        epochs_data = np.atleast_3d(epochs_data)
+
+        if self.l_freq is None and self.h_freq is not None:
+            epochs_data = \
+                low_pass_filter(epochs_data, self.info['sfreq'], self.h_freq,
+                                filter_length=self.filter_length,
+                                trans_bandwidth=self.l_trans_bandwidth,
+                                method=self.method, iir_params=self.iir_params,
+                                picks=self.picks, n_jobs=self.n_jobs,
+                                copy=False, verbose=False)
+
+        if self.l_freq is not None and self.h_freq is None:
+            epochs_data = \
+                high_pass_filter(epochs_data, self.info['sfreq'], self.l_freq,
+                                 filter_length=self.filter_length,
+                                 trans_bandwidth=self.h_trans_bandwidth,
+                                 method=self.method,
+                                 iir_params=self.iir_params,
+                                 picks=self.picks, n_jobs=self.n_jobs,
+                                 copy=False, verbose=False)
+
+        if self.l_freq is not None and self.h_freq is not None:
+            if self.l_freq < self.h_freq:
+                epochs_data = \
+                    band_pass_filter(epochs_data, self.info['sfreq'],
+                                     self.l_freq, self.h_freq,
+                                     filter_length=self.filter_length,
+                                     l_trans_bandwidth=self.l_trans_bandwidth,
+                                     h_trans_bandwidth=self.h_trans_bandwidth,
+                                     method=self.method,
+                                     iir_params=self.iir_params,
+                                     picks=self.picks, n_jobs=self.n_jobs,
+                                     copy=False, verbose=False)
+            else:
+                epochs_data = \
+                    band_stop_filter(epochs_data, self.info['sfreq'],
+                                     self.h_freq, self.l_freq,
+                                     filter_length=self.filter_length,
+                                     l_trans_bandwidth=self.h_trans_bandwidth,
+                                     h_trans_bandwidth=self.l_trans_bandwidth,
+                                     method=self.method,
+                                     iir_params=self.iir_params,
+                                     picks=self.picks, n_jobs=self.n_jobs,
+                                     copy=False, verbose=False)
+        return epochs_data
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/defaults.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/defaults.py
new file mode 100644
index 0000000..6a58b47
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/defaults.py
@@ -0,0 +1,54 @@
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Denis A. Engemann <denis.engemann at gmail.com>
+#          Eric Larson <larson.eric.d at gmail.com>
+#
+# License: BSD (3-clause)
+
+from copy import deepcopy
+
+DEFAULTS = dict(
+    color=dict(mag='darkblue', grad='b', eeg='k', eog='k', ecg='m',
+               emg='k', ref_meg='steelblue', misc='k', stim='k',
+               resp='k', chpi='k', exci='k', ias='k', syst='k',
+               seeg='k'),
+    config_opts=dict(),
+    units=dict(eeg='uV', grad='fT/cm', mag='fT', eog='uV', misc='AU',
+               seeg='uV'),
+    scalings=dict(mag=1e15, grad=1e13, eeg=1e6, eog=1e6,
+                  misc=1.0, seeg=1e4),
+    scalings_plot_raw=dict(mag=1e-12, grad=4e-11, eeg=20e-6,
+                           eog=150e-6, ecg=5e-4, emg=1e-3,
+                           ref_meg=1e-12, misc=1e-3,
+                           stim=1, resp=1, chpi=1e-4, exci=1,
+                           ias=1, syst=1, seeg=1e-5),
+    scalings_cov_rank=dict(mag=1e12, grad=1e11, eeg=1e5),
+    ylim=dict(mag=(-600., 600.), grad=(-200., 200.),
+              eeg=(-200., 200.), misc=(-5., 5.),
+              seeg=(-200., 200.)),
+    titles=dict(eeg='EEG', grad='Gradiometers',
+                mag='Magnetometers', misc='misc', seeg='sEEG'),
+    mask_params=dict(marker='o',
+                     markerfacecolor='w',
+                     markeredgecolor='k',
+                     linewidth=0,
+                     markeredgewidth=1,
+                     markersize=4),
+)
+
+
+def _handle_default(k, v=None):
+    """Helper to avoid dicts as default keyword arguments
+
+    Use this function instead to resolve default dict values. Example usage::
+
+        scalings = _handle_default('scalings', scalings)
+
+    """
+    this_mapping = deepcopy(DEFAULTS[k])
+    if v is not None:
+        if isinstance(v, dict):
+            this_mapping.update(v)
+        else:
+            for key in this_mapping.keys():
+                this_mapping[key] = v
+    return this_mapping
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/dipole.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/dipole.py
new file mode 100644
index 0000000..64a313f
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/dipole.py
@@ -0,0 +1,720 @@
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Eric Larson <larson.eric.d at gmail.com>
+#
+# License: Simplified BSD
+
+import numpy as np
+from scipy import linalg
+from copy import deepcopy
+import re
+
+from .cov import read_cov, _get_whitener_data
+from .io.pick import pick_types, channel_type
+from .io.proj import make_projector, _has_eeg_average_ref_proj
+from .bem import _fit_sphere
+from .transforms import (_print_coord_trans, _coord_frame_name,
+                         apply_trans, invert_transform, Transform)
+
+from .forward._make_forward import (_get_mri_head_t, _setup_bem,
+                                    _prep_meg_channels, _prep_eeg_channels)
+from .forward._compute_forward import (_compute_forwards_meeg,
+                                       _prep_field_computation)
+
+from .externals.six import string_types
+from .surface import (transform_surface_to, _normalize_vectors,
+                      _get_ico_surface, _compute_nearest)
+from .bem import _bem_find_surface, _bem_explain_surface
+from .source_space import (_make_volume_source_space, SourceSpaces,
+                           _points_outside_surface)
+from .parallel import parallel_func
+from .fixes import partial
+from .utils import logger, verbose, _time_mask
+
+
+class Dipole(object):
+    """Dipole class
+
+    Used to store positions, orientations, amplitudes, times, goodness of fit
+    of dipoles, typically obtained with Neuromag/xfit, mne_dipole_fit
+    or certain inverse solvers.
+
+    Parameters
+    ----------
+    times : array, shape (n_dipoles,)
+        The time instants at which each dipole was fitted (sec).
+    pos : array, shape (n_dipoles, 3)
+        The dipoles positions (m).
+    amplitude : array, shape (n_dipoles,)
+        The amplitude of the dipoles (nAm).
+    ori : array, shape (n_dipoles, 3)
+        The dipole orientations (normalized to unit length).
+    gof : array, shape (n_dipoles,)
+        The goodness of fit.
+    name : str | None
+        Name of the dipole.
+    """
+    def __init__(self, times, pos, amplitude, ori, gof, name=None):
+        self.times = times
+        self.pos = pos
+        self.amplitude = amplitude
+        self.ori = ori
+        self.gof = gof
+        self.name = name
+
+    def __repr__(self):
+        s = "n_times : %s" % len(self.times)
+        s += ", tmin : %s" % np.min(self.times)
+        s += ", tmax : %s" % np.max(self.times)
+        return "<Dipole  |  %s>" % s
+
+    def save(self, fname):
+        """Save dipole in a .dip file
+
+        Parameters
+        ----------
+        fname : str
+            The name of the .dip file.
+        """
+        fmt = "  %7.1f %7.1f %8.2f %8.2f %8.2f %8.3f %8.3f %8.3f %8.3f %6.1f"
+        with open(fname, 'wb') as fid:
+            fid.write('# CoordinateSystem "Head"\n'.encode('utf-8'))
+            fid.write('#   begin     end   X (mm)   Y (mm)   Z (mm)'
+                      '   Q(nAm)  Qx(nAm)  Qy(nAm)  Qz(nAm)    g/%\n'
+                      .encode('utf-8'))
+            t = self.times[:, np.newaxis] * 1000.
+            gof = self.gof[:, np.newaxis]
+            amp = 1e9 * self.amplitude[:, np.newaxis]
+            out = np.concatenate((t, t, self.pos / 1e-3, amp,
+                                  self.ori * amp, gof), axis=-1)
+            np.savetxt(fid, out, fmt=fmt)
+            if self.name is not None:
+                fid.write(('## Name "%s dipoles" Style "Dipoles"'
+                           % self.name).encode('utf-8'))
+
+    def crop(self, tmin=None, tmax=None):
+        """Crop data to a given time interval
+
+        Parameters
+        ----------
+        tmin : float | None
+            Start time of selection in seconds.
+        tmax : float | None
+            End time of selection in seconds.
+        """
+        mask = _time_mask(self.times, tmin, tmax)
+        for attr in ('times', 'pos', 'gof', 'amplitude', 'ori'):
+            setattr(self, attr, getattr(self, attr)[mask])
+
+    def copy(self):
+        """Copy the Dipoles object
+
+        Returns
+        -------
+        dip : instance of Dipole
+            The copied dipole instance.
+        """
+        return deepcopy(self)
+
+    @verbose
+    def plot_locations(self, trans, subject, subjects_dir=None,
+                       bgcolor=(1, 1, 1), opacity=0.3,
+                       brain_color=(0.7, 0.7, 0.7), mesh_color=(1, 1, 0),
+                       fig_name=None, fig_size=(600, 600), mode='cone',
+                       scale_factor=0.1e-1, colors=None, verbose=None):
+        """Plot dipole locations as arrows
+
+        Parameters
+        ----------
+        trans : dict
+            The mri to head trans.
+        subject : str
+            The subject name corresponding to FreeSurfer environment
+            variable SUBJECT.
+        subjects_dir : None | str
+            The path to the freesurfer subjects reconstructions.
+            It corresponds to Freesurfer environment variable SUBJECTS_DIR.
+            The default is None.
+        bgcolor : tuple of length 3
+            Background color in 3D.
+        opacity : float in [0, 1]
+            Opacity of brain mesh.
+        brain_color : tuple of length 3
+            Brain color.
+        mesh_color : tuple of length 3
+            Mesh color.
+        fig_name : tuple of length 2
+            Mayavi figure name.
+        fig_size : tuple of length 2
+            Mayavi figure size.
+        mode : str
+            Should be ``'cone'`` or ``'sphere'`` to specify how the
+            dipoles should be shown.
+        scale_factor : float
+            The scaling applied to amplitudes for the plot.
+        colors: list of colors | None
+            Color to plot with each dipole. If None defaults colors are used.
+        verbose : bool, str, int, or None
+            If not None, override default verbose level (see mne.verbose).
+
+        Returns
+        -------
+        fig : instance of mlab.Figure
+            The mayavi figure.
+        """
+        from .viz import plot_dipole_locations
+        dipoles = []
+        for t in self.times:
+            dipoles.append(self.copy())
+            dipoles[-1].crop(t, t)
+        return plot_dipole_locations(
+            dipoles, trans, subject, subjects_dir, bgcolor, opacity,
+            brain_color, mesh_color, fig_name, fig_size, mode, scale_factor,
+            colors)
+
+    def plot_amplitudes(self, color='k', show=True):
+        """Plot the dipole amplitudes as a function of time
+
+        Parameters
+        ----------
+        color: matplotlib Color
+            Color to use for the trace.
+        show : bool
+            Show figure if True.
+
+        Returns
+        -------
+        fig : matplotlib.figure.Figure
+            The figure object containing the plot.
+        """
+        from .viz import plot_dipole_amplitudes
+        return plot_dipole_amplitudes([self], [color], show)
+
+    def __getitem__(self, idx_slice):
+        """Handle indexing"""
+        if isinstance(idx_slice, int):  # make sure attributes stay 2d
+            idx_slice = [idx_slice]
+
+        selected_times = self.times[idx_slice].copy()
+        selected_pos = self.pos[idx_slice, :].copy()
+        selected_amplitude = self.amplitude[idx_slice].copy()
+        selected_ori = self.ori[idx_slice, :].copy()
+        selected_gof = self.gof[idx_slice].copy()
+        selected_name = self.name
+
+        new_dipole = Dipole(selected_times, selected_pos,
+                            selected_amplitude, selected_ori,
+                            selected_gof, selected_name)
+        return new_dipole
+
+    def __len__(self):
+        """Handle len function"""
+        return self.pos.shape[0]
+
+
+# #############################################################################
+# IO
+
+ at verbose
+def read_dipole(fname, verbose=None):
+    """Read .dip file from Neuromag/xfit or MNE
+
+    Parameters
+    ----------
+    fname : str
+        The name of the .dip file.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    dipole : instance of Dipole
+        The dipole.
+    """
+    try:
+        data = np.loadtxt(fname, comments='%')
+    except:
+        data = np.loadtxt(fname, comments='#')  # handle 2 types of comments...
+    name = None
+    with open(fname, 'r') as fid:
+        for line in fid.readlines():
+            if line.startswith('##') or line.startswith('%%'):
+                m = re.search('Name "(.*) dipoles"', line)
+                if m:
+                    name = m.group(1)
+                    break
+    if data.ndim == 1:
+        data = data[None, :]
+    logger.info("%d dipole(s) found" % len(data))
+    times = data[:, 0] / 1000.
+    pos = 1e-3 * data[:, 2:5]  # put data in meters
+    amplitude = data[:, 5]
+    norm = amplitude.copy()
+    amplitude /= 1e9
+    norm[norm == 0] = 1
+    ori = data[:, 6:9] / norm[:, np.newaxis]
+    gof = data[:, 9]
+    return Dipole(times, pos, amplitude, ori, gof, name)
+
+
+# #############################################################################
+# Fitting
+
+def _dipole_forwards(fwd_data, whitener, rr, n_jobs=1):
+    """Compute the forward solution and do other nice stuff"""
+    B = _compute_forwards_meeg(rr, fwd_data, n_jobs, verbose=False)
+    B = np.concatenate(B, axis=1)
+    B_orig = B.copy()
+
+    # Apply projection and whiten (cov has projections already)
+    B = np.dot(B, whitener.T)
+
+    # column normalization doesn't affect our fitting, so skip for now
+    # S = np.sum(B * B, axis=1)  # across channels
+    # scales = np.repeat(3. / np.sqrt(np.sum(np.reshape(S, (len(rr), 3)),
+    #                                        axis=1)), 3)
+    # B *= scales[:, np.newaxis]
+    scales = np.ones(3)
+    return B, B_orig, scales
+
+
+def _make_guesses(surf_or_rad, r0, grid, exclude, mindist, n_jobs):
+    """Make a guess space inside a sphere or BEM surface"""
+    if isinstance(surf_or_rad, dict):
+        surf = surf_or_rad
+        logger.info('Guess surface (%s) is in %s coordinates'
+                    % (_bem_explain_surface(surf['id']),
+                       _coord_frame_name(surf['coord_frame'])))
+    else:
+        radius = surf_or_rad[0]
+        logger.info('Making a spherical guess space with radius %7.1f mm...'
+                    % (1000 * radius))
+        surf = _get_ico_surface(3)
+        _normalize_vectors(surf['rr'])
+        surf['rr'] *= radius
+        surf['rr'] += r0
+    logger.info('Filtering (grid = %6.f mm)...' % (1000 * grid))
+    src = _make_volume_source_space(surf, grid, exclude, 1000 * mindist,
+                                    do_neighbors=False, n_jobs=n_jobs)
+    # simplify the result to make things easier later
+    src = dict(rr=src['rr'][src['vertno']], nn=src['nn'][src['vertno']],
+               nuse=src['nuse'], coord_frame=src['coord_frame'],
+               vertno=np.arange(src['nuse']))
+    return SourceSpaces([src])
+
+
+def _fit_eval(rd, B, B2, fwd_svd=None, fwd_data=None, whitener=None):
+    """Calculate the residual sum of squares"""
+    if fwd_svd is None:
+        fwd = _dipole_forwards(fwd_data, whitener, rd[np.newaxis, :])[0]
+        uu, sing, vv = linalg.svd(fwd, overwrite_a=True, full_matrices=False)
+    else:
+        uu, sing, vv = fwd_svd
+    gof = _dipole_gof(uu, sing, vv, B, B2)[0]
+    # mne-c uses fitness=B2-Bm2, but ours (1-gof) is just a normalized version
+    return 1. - gof
+
+
+def _dipole_gof(uu, sing, vv, B, B2):
+    """Calculate the goodness of fit from the forward SVD"""
+    ncomp = 3 if sing[2] / sing[0] > 0.2 else 2
+    one = np.dot(vv[:ncomp], B)
+    Bm2 = np.sum(one * one)
+    gof = Bm2 / B2
+    return gof, one
+
+
+def _fit_Q(fwd_data, whitener, proj_op, B, B2, B_orig, rd):
+    """Fit the dipole moment once the location is known"""
+    fwd, fwd_orig, scales = _dipole_forwards(fwd_data, whitener,
+                                             rd[np.newaxis, :])
+    uu, sing, vv = linalg.svd(fwd, full_matrices=False)
+    gof, one = _dipole_gof(uu, sing, vv, B, B2)
+    ncomp = len(one)
+    # Counteract the effect of column normalization
+    Q = scales[0] * np.sum(uu.T[:ncomp] * (one / sing[:ncomp])[:, np.newaxis],
+                           axis=0)
+    # apply the projector to both elements
+    B_residual = np.dot(proj_op, B_orig) - np.dot(np.dot(Q, fwd_orig),
+                                                  proj_op.T)
+    return Q, gof, B_residual
+
+
+def _fit_dipoles(min_dist_to_inner_skull, data, times, guess_rrs,
+                 guess_fwd_svd, fwd_data, whitener, proj_op, n_jobs):
+    """Fit a single dipole to the given whitened, projected data"""
+    from scipy.optimize import fmin_cobyla
+    parallel, p_fun, _ = parallel_func(_fit_dipole, n_jobs)
+    # parallel over time points
+    res = parallel(p_fun(min_dist_to_inner_skull, B, t, guess_rrs,
+                         guess_fwd_svd, fwd_data, whitener, proj_op,
+                         fmin_cobyla)
+                   for B, t in zip(data.T, times))
+    pos = np.array([r[0] for r in res])
+    amp = np.array([r[1] for r in res])
+    ori = np.array([r[2] for r in res])
+    gof = np.array([r[3] for r in res]) * 100  # convert to percentage
+    residual = np.array([r[4] for r in res]).T
+
+    return pos, amp, ori, gof, residual
+
+
+'''Simplex code in case we ever want/need it for testing
+
+def _make_tetra_simplex():
+    """Make the initial tetrahedron"""
+    #
+    # For this definition of a regular tetrahedron, see
+    #
+    # http://mathworld.wolfram.com/Tetrahedron.html
+    #
+    x = np.sqrt(3.0) / 3.0
+    r = np.sqrt(6.0) / 12.0
+    R = 3 * r
+    d = x / 2.0
+    simplex = 1e-2 * np.array([[x, 0.0, -r],
+                               [-d, 0.5, -r],
+                               [-d, -0.5, -r],
+                               [0., 0., R]])
+    return simplex
+
+
+def try_(p, y, psum, ndim, fun, ihi, neval, fac):
+    """Helper to try a value"""
+    ptry = np.empty(ndim)
+    fac1 = (1.0 - fac) / ndim
+    fac2 = fac1 - fac
+    ptry = psum * fac1 - p[ihi] * fac2
+    ytry = fun(ptry)
+    neval += 1
+    if ytry < y[ihi]:
+        y[ihi] = ytry
+        psum[:] += ptry - p[ihi]
+        p[ihi] = ptry
+    return ytry, neval
+
+
+def _simplex_minimize(p, ftol, stol, fun, max_eval=1000):
+    """Minimization with the simplex algorithm
+
+    Modified from Numerical recipes"""
+    y = np.array([fun(s) for s in p])
+    ndim = p.shape[1]
+    assert p.shape[0] == ndim + 1
+    mpts = ndim + 1
+    neval = 0
+    psum = p.sum(axis=0)
+
+    loop = 1
+    while(True):
+        ilo = 1
+        if y[1] > y[2]:
+            ihi = 1
+            inhi = 2
+        else:
+            ihi = 2
+            inhi = 1
+        for i in range(mpts):
+            if y[i] < y[ilo]:
+                ilo = i
+            if y[i] > y[ihi]:
+                inhi = ihi
+                ihi = i
+            elif y[i] > y[inhi]:
+                if i != ihi:
+                    inhi = i
+
+        rtol = 2 * np.abs(y[ihi] - y[ilo]) / (np.abs(y[ihi]) + np.abs(y[ilo]))
+        if rtol < ftol:
+            break
+        if neval >= max_eval:
+            raise RuntimeError('Maximum number of evaluations exceeded.')
+        if stol > 0:  # Has the simplex collapsed?
+            dsum = np.sqrt(np.sum((p[ilo] - p[ihi]) ** 2))
+            if loop > 5 and dsum < stol:
+                break
+
+        ytry, neval = try_(p, y, psum, ndim, fun, ihi, neval, -1.)
+        if ytry <= y[ilo]:
+            ytry, neval = try_(p, y, psum, ndim, fun, ihi, neval, 2.)
+        elif ytry >= y[inhi]:
+            ysave = y[ihi]
+            ytry, neval = try_(p, y, psum, ndim, fun, ihi, neval, 0.5)
+            if ytry >= ysave:
+                for i in range(mpts):
+                    if i != ilo:
+                        psum[:] = 0.5 * (p[i] + p[ilo])
+                        p[i] = psum
+                        y[i] = fun(psum)
+                neval += ndim
+                psum = p.sum(axis=0)
+        loop += 1
+'''
+
+
+def _fit_dipole(min_dist_to_inner_skull, B_orig, t, guess_rrs,
+                guess_fwd_svd, fwd_data, whitener, proj_op,
+                fmin_cobyla):
+    """Fit a single bit of data"""
+    B = np.dot(whitener, B_orig)
+
+    # make constraint function to keep the solver within the inner skull
+    if isinstance(fwd_data['inner_skull'], dict):  # bem
+        surf = fwd_data['inner_skull']
+
+        def constraint(rd):
+
+            dist = _compute_nearest(surf['rr'], rd[np.newaxis, :],
+                                    return_dists=True)[1][0]
+
+            if _points_outside_surface(rd[np.newaxis, :], surf, 1)[0]:
+                dist *= -1.
+
+            # Once we know the dipole is below the inner skull,
+            # let's check if its distance to the inner skull is at least
+            # min_dist_to_inner_skull. This can be enforced by adding a
+            # constrain proportional to its distance.
+            dist -= min_dist_to_inner_skull
+            return dist
+
+    else:  # sphere
+        surf = None
+        R, r0 = fwd_data['inner_skull']
+        R_adj = R - min_dist_to_inner_skull
+
+        def constraint(rd):
+            return R_adj - np.sqrt(np.sum((rd - r0) ** 2))
+
+    # Find a good starting point (find_best_guess in C)
+    B2 = np.dot(B, B)
+    if B2 == 0:
+        logger.warning('Zero field found for time %s' % t)
+        return np.zeros(3), 0, np.zeros(3), 0
+
+    idx = np.argmin([_fit_eval(guess_rrs[[fi], :], B, B2, fwd_svd)
+                     for fi, fwd_svd in enumerate(guess_fwd_svd)])
+    x0 = guess_rrs[idx]
+    fun = partial(_fit_eval, B=B, B2=B2, fwd_data=fwd_data, whitener=whitener)
+
+    # Tested minimizers:
+    #    Simplex, BFGS, CG, COBYLA, L-BFGS-B, Powell, SLSQP, TNC
+    # Several were similar, but COBYLA won for having a handy constraint
+    # function we can use to ensure we stay inside the inner skull /
+    # smallest sphere
+    rd_final = fmin_cobyla(fun, x0, (constraint,), consargs=(),
+                           rhobeg=5e-2, rhoend=5e-5, disp=False)
+
+    # simplex = _make_tetra_simplex() + x0
+    # _simplex_minimize(simplex, 1e-4, 2e-4, fun)
+    # rd_final = simplex[0]
+
+    # Compute the dipole moment at the final point
+    Q, gof, residual = _fit_Q(fwd_data, whitener, proj_op, B, B2, B_orig,
+                              rd_final)
+    amp = np.sqrt(np.dot(Q, Q))
+    norm = 1. if amp == 0. else amp
+    ori = Q / norm
+
+    msg = '---- Fitted : %7.1f ms' % (1000. * t)
+    if surf is not None:
+        dist_to_inner_skull = _compute_nearest(surf['rr'],
+                                               rd_final[np.newaxis, :],
+                                               return_dists=True)[1][0]
+        msg += (", distance to inner skull : %2.4f mm"
+                % (dist_to_inner_skull * 1000.))
+
+    logger.info(msg)
+    return rd_final, amp, ori, gof, residual
+
+
+ at verbose
+def fit_dipole(evoked, cov, bem, trans=None, min_dist=5., n_jobs=1,
+               verbose=None):
+    """Fit a dipole
+
+    Parameters
+    ----------
+    evoked : instance of Evoked
+        The dataset to fit.
+    cov : str | instance of Covariance
+        The noise covariance.
+    bem : str | dict
+        The BEM filename (str) or a loaded sphere model (dict).
+    trans : str | None
+        The head<->MRI transform filename. Must be provided unless BEM
+        is a sphere model.
+    min_dist : float
+        Minimum distance (in milimeters) from the dipole to the inner skull.
+        Must be positive. Note that because this is a constraint passed to
+        a solver it is not strict but close, i.e. for a ``min_dist=5.`` the
+        fits could be 4.9 mm from the inner skull.
+    n_jobs : int
+        Number of jobs to run in parallel (used in field computation
+        and fitting).
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    dip : instance of Dipole
+        The dipole fits.
+    residual : ndarray, shape (n_meeg_channels, n_times)
+        The good M-EEG data channels with the fitted dipolar activity
+        removed.
+
+    See Also
+    --------
+    mne.beamformer.rap_music
+
+    Notes
+    -----
+    .. versionadded:: 0.9.0
+    """
+    # This could eventually be adapted to work with other inputs, these
+    # are what is needed:
+
+    evoked = evoked.copy()
+
+    # Determine if a list of projectors has an average EEG ref
+    if "eeg" in evoked and not _has_eeg_average_ref_proj(evoked.info['projs']):
+        raise ValueError('EEG average reference is mandatory for dipole '
+                         'fitting.')
+
+    if min_dist < 0:
+        raise ValueError('min_dist should be positive. Got %s' % min_dist)
+
+    data = evoked.data
+    info = evoked.info
+    times = evoked.times.copy()
+    comment = evoked.comment
+
+    # Convert the min_dist to meters
+    min_dist_to_inner_skull = min_dist / 1000.
+    del min_dist
+
+    # Figure out our inputs
+    neeg = len(pick_types(info, meg=False, eeg=True, exclude=[]))
+    if isinstance(bem, string_types):
+        logger.info('BEM              : %s' % bem)
+    if trans is not None:
+        logger.info('MRI transform    : %s' % trans)
+        mri_head_t, trans = _get_mri_head_t(trans)
+    else:
+        mri_head_t = Transform('head', 'mri', np.eye(4))
+    bem = _setup_bem(bem, bem, neeg, mri_head_t)
+    if not bem['is_sphere']:
+        if trans is None:
+            raise ValueError('mri must not be None if BEM is provided')
+        # Find the best-fitting sphere
+        inner_skull = _bem_find_surface(bem, 'inner_skull')
+        inner_skull = inner_skull.copy()
+        R, r0 = _fit_sphere(inner_skull['rr'], disp=False)
+        r0 = apply_trans(mri_head_t['trans'], r0[np.newaxis, :])[0]
+        logger.info('Grid origin      : '
+                    '%6.1f %6.1f %6.1f mm rad = %6.1f mm.'
+                    % (1000 * r0[0], 1000 * r0[1], 1000 * r0[2], 1000 * R))
+    else:
+        r0 = bem['r0']
+        logger.info('Sphere model     : origin at (% 7.2f % 7.2f % 7.2f) mm'
+                    % (1000 * r0[0], 1000 * r0[1], 1000 * r0[2]))
+        if 'layers' in bem:
+            R = bem['layers'][0]['rad']
+        else:
+            R = np.inf
+        inner_skull = [R, r0]
+    r0_mri = apply_trans(invert_transform(mri_head_t)['trans'],
+                         r0[np.newaxis, :])[0]
+
+    # Eventually these could be parameters, but they are just used for
+    # the initial grid anyway
+    guess_grid = 0.02  # MNE-C uses 0.01, but this is faster w/similar perf
+    guess_mindist = max(0.005, min_dist_to_inner_skull)
+    guess_exclude = 0.02
+    accurate = False  # can be made an option later (shouldn't make big diff)
+
+    logger.info('Guess grid       : %6.1f mm' % (1000 * guess_grid,))
+    if guess_mindist > 0.0:
+        logger.info('Guess mindist    : %6.1f mm' % (1000 * guess_mindist,))
+    if guess_exclude > 0:
+        logger.info('Guess exclude    : %6.1f mm' % (1000 * guess_exclude,))
+    logger.info('Using %s MEG coil definitions.'
+                % ("accurate" if accurate else "standard"))
+    if isinstance(cov, string_types):
+        logger.info('Noise covariance : %s' % (cov,))
+        cov = read_cov(cov, verbose=False)
+    logger.info('')
+
+    _print_coord_trans(mri_head_t)
+    _print_coord_trans(info['dev_head_t'])
+    logger.info('%d bad channels total' % len(info['bads']))
+
+    # Forward model setup (setup_forward_model from setup.c)
+    ch_types = [channel_type(info, idx) for idx in range(info['nchan'])]
+
+    megcoils, compcoils, megnames, meg_info = [], [], [], None
+    eegels, eegnames = [], []
+    if 'grad' in ch_types or 'mag' in ch_types:
+        megcoils, compcoils, megnames, meg_info = \
+            _prep_meg_channels(info, exclude='bads',
+                               accurate=accurate, verbose=verbose)
+    if 'eeg' in ch_types:
+        eegels, eegnames = _prep_eeg_channels(info, exclude='bads',
+                                              verbose=verbose)
+
+    # Ensure that MEG and/or EEG channels are present
+    if len(megcoils + eegels) == 0:
+        raise RuntimeError('No MEG or EEG channels found.')
+
+    # Whitener for the data
+    logger.info('Decomposing the sensor noise covariance matrix...')
+    picks = pick_types(info, meg=True, eeg=True)
+
+    # In case we want to more closely match MNE-C for debugging:
+    # from .io.pick import pick_info
+    # from .cov import prepare_noise_cov
+    # info_nb = pick_info(info, picks)
+    # cov = prepare_noise_cov(cov, info_nb, info_nb['ch_names'], verbose=False)
+    # nzero = (cov['eig'] > 0)
+    # n_chan = len(info_nb['ch_names'])
+    # whitener = np.zeros((n_chan, n_chan), dtype=np.float)
+    # whitener[nzero, nzero] = 1.0 / np.sqrt(cov['eig'][nzero])
+    # whitener = np.dot(whitener, cov['eigvec'])
+
+    whitener = _get_whitener_data(info, cov, picks, verbose=False)
+
+    # Proceed to computing the fits (make_guess_data)
+    logger.info('\n---- Computing the forward solution for the guesses...')
+    guess_src = _make_guesses(inner_skull, r0_mri,
+                              guess_grid, guess_exclude, guess_mindist,
+                              n_jobs=n_jobs)[0]
+    if isinstance(inner_skull, dict):
+        transform_surface_to(inner_skull, 'head', mri_head_t)
+    transform_surface_to(guess_src, 'head', mri_head_t)
+
+    # C code computes guesses using a sphere model for speed, don't bother here
+    logger.info('Go through all guess source locations...')
+    fwd_data = dict(coils_list=[megcoils, eegels], infos=[meg_info, None],
+                    ccoils_list=[compcoils, None], coil_types=['meg', 'eeg'],
+                    inner_skull=inner_skull)
+    _prep_field_computation(guess_src['rr'], bem, fwd_data, n_jobs,
+                            verbose=False)
+    guess_fwd = _dipole_forwards(fwd_data, whitener, guess_src['rr'],
+                                 n_jobs=n_jobs)[0]
+    # decompose ahead of time
+    guess_fwd_svd = [linalg.svd(fwd, overwrite_a=True, full_matrices=False)
+                     for fwd in np.array_split(guess_fwd,
+                                               len(guess_src['rr']))]
+    del guess_fwd  # destroyed
+    logger.info('[done %d sources]' % guess_src['nuse'])
+
+    # Do actual fits
+    data = data[picks]
+    ch_names = [info['ch_names'][p] for p in picks]
+    proj_op = make_projector(info['projs'], ch_names, info['bads'])[0]
+    out = _fit_dipoles(min_dist_to_inner_skull, data, times, guess_src['rr'],
+                       guess_fwd_svd, fwd_data,
+                       whitener, proj_op, n_jobs)
+    dipoles = Dipole(times, out[0], out[1], out[2], out[3], comment)
+    residual = out[4]
+
+    logger.info('%d dipoles fitted' % len(dipoles.times))
+    return dipoles, residual
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/epochs.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/epochs.py
new file mode 100644
index 0000000..305fabf
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/epochs.py
@@ -0,0 +1,2602 @@
+"""Tools for working with epoched data"""
+
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
+#          Daniel Strohmeier <daniel.strohmeier at tu-ilmenau.de>
+#          Denis Engemann <denis.engemann at gmail.com>
+#          Mainak Jas <mainak at neuro.hut.fi>
+#
+# License: BSD (3-clause)
+
+from copy import deepcopy
+import warnings
+import json
+import inspect
+import os.path as op
+from distutils.version import LooseVersion
+
+import numpy as np
+import scipy
+
+from .io.write import (start_file, start_block, end_file, end_block,
+                       write_int, write_float_matrix, write_float,
+                       write_id, write_string, _get_split_size)
+from .io.meas_info import read_meas_info, write_meas_info, _merge_info
+from .io.open import fiff_open, _get_next_fname
+from .io.tree import dir_tree_find
+from .io.tag import read_tag, read_tag_info
+from .io.constants import FIFF
+from .io.pick import (pick_types, channel_indices_by_type, channel_type,
+                      pick_channels, pick_info)
+from .io.proj import setup_proj, ProjMixin, _proj_equal
+from .io.base import _BaseRaw, ToDataFrameMixin
+from .evoked import EvokedArray, _aspect_rev
+from .baseline import rescale
+from .channels.channels import (ContainsMixin, UpdateChannelsMixin,
+                                SetChannelsMixin, InterpolationMixin)
+from .filter import resample, detrend, FilterMixin
+from .event import _read_events_fif
+from .fixes import in1d
+from .viz import (plot_epochs, _drop_log_stats,
+                  plot_epochs_psd, plot_epochs_psd_topomap)
+from .utils import (check_fname, logger, verbose, _check_type_picks,
+                    _time_mask, check_random_state, object_hash)
+from .externals.six import iteritems, string_types
+from .externals.six.moves import zip
+
+
+def _save_split(epochs, fname, part_idx, n_parts):
+    """Split epochs"""
+
+    # insert index in filename
+    path, base = op.split(fname)
+    idx = base.find('.')
+    if part_idx > 0:
+        fname = op.join(path, '%s-%d.%s' % (base[:idx], part_idx,
+                                            base[idx + 1:]))
+
+    next_fname = None
+    if part_idx < n_parts - 1:
+        next_fname = op.join(path, '%s-%d.%s' % (base[:idx], part_idx + 1,
+                                                 base[idx + 1:]))
+        next_idx = part_idx + 1
+
+    fid = start_file(fname)
+
+    info = epochs.info
+    meas_id = info['meas_id']
+
+    start_block(fid, FIFF.FIFFB_MEAS)
+    write_id(fid, FIFF.FIFF_BLOCK_ID)
+    if info['meas_id'] is not None:
+        write_id(fid, FIFF.FIFF_PARENT_BLOCK_ID, info['meas_id'])
+
+    # Write measurement info
+    write_meas_info(fid, info)
+
+    # One or more evoked data sets
+    start_block(fid, FIFF.FIFFB_PROCESSED_DATA)
+    start_block(fid, FIFF.FIFFB_EPOCHS)
+
+    # write events out after getting data to ensure bad events are dropped
+    data = epochs.get_data()
+    start_block(fid, FIFF.FIFFB_MNE_EVENTS)
+    write_int(fid, FIFF.FIFF_MNE_EVENT_LIST, epochs.events.T)
+    mapping_ = ';'.join([k + ':' + str(v) for k, v in
+                         epochs.event_id.items()])
+    write_string(fid, FIFF.FIFF_DESCRIPTION, mapping_)
+    end_block(fid, FIFF.FIFFB_MNE_EVENTS)
+
+    # First and last sample
+    first = int(epochs.times[0] * info['sfreq'])
+    last = first + len(epochs.times) - 1
+    write_int(fid, FIFF.FIFF_FIRST_SAMPLE, first)
+    write_int(fid, FIFF.FIFF_LAST_SAMPLE, last)
+
+    # save baseline
+    if epochs.baseline is not None:
+        bmin, bmax = epochs.baseline
+        bmin = epochs.times[0] if bmin is None else bmin
+        bmax = epochs.times[-1] if bmax is None else bmax
+        write_float(fid, FIFF.FIFF_MNE_BASELINE_MIN, bmin)
+        write_float(fid, FIFF.FIFF_MNE_BASELINE_MAX, bmax)
+
+    # The epochs itself
+    decal = np.empty(info['nchan'])
+    for k in range(info['nchan']):
+        decal[k] = 1.0 / (info['chs'][k]['cal'] *
+                          info['chs'][k].get('scale', 1.0))
+
+    data *= decal[np.newaxis, :, np.newaxis]
+
+    write_float_matrix(fid, FIFF.FIFF_EPOCH, data)
+
+    # undo modifications to data
+    data /= decal[np.newaxis, :, np.newaxis]
+
+    write_string(fid, FIFF.FIFFB_MNE_EPOCHS_DROP_LOG,
+                 json.dumps(epochs.drop_log))
+
+    write_int(fid, FIFF.FIFFB_MNE_EPOCHS_SELECTION,
+              epochs.selection)
+
+    # And now write the next file info in case epochs are split on disk
+    if next_fname is not None and n_parts > 1:
+        start_block(fid, FIFF.FIFFB_REF)
+        write_int(fid, FIFF.FIFF_REF_ROLE, FIFF.FIFFV_ROLE_NEXT_FILE)
+        write_string(fid, FIFF.FIFF_REF_FILE_NAME, op.basename(next_fname))
+        if meas_id is not None:
+            write_id(fid, FIFF.FIFF_REF_FILE_ID, meas_id)
+        write_int(fid, FIFF.FIFF_REF_FILE_NUM, next_idx)
+        end_block(fid, FIFF.FIFFB_REF)
+
+    end_block(fid, FIFF.FIFFB_EPOCHS)
+    end_block(fid, FIFF.FIFFB_PROCESSED_DATA)
+    end_block(fid, FIFF.FIFFB_MEAS)
+    end_file(fid)
+
+
+class _BaseEpochs(ProjMixin, ContainsMixin, UpdateChannelsMixin,
+                  SetChannelsMixin, InterpolationMixin, FilterMixin,
+                  ToDataFrameMixin):
+    """Abstract base class for Epochs-type classes
+
+    This class provides basic functionality and should never be instantiated
+    directly. See Epochs below for an explanation of the parameters.
+    """
+    def __init__(self, info, data, events, event_id, tmin, tmax,
+                 baseline=(None, 0), raw=None,
+                 picks=None, name='Unknown', reject=None, flat=None,
+                 decim=1, reject_tmin=None, reject_tmax=None, detrend=None,
+                 add_eeg_ref=True, proj=True, on_missing='error',
+                 preload_at_end=False, selection=None, drop_log=None,
+                 verbose=None):
+
+        self.verbose = verbose
+        self.name = name
+
+        if on_missing not in ['error', 'warning', 'ignore']:
+            raise ValueError('on_missing must be one of: error, '
+                             'warning, ignore. Got: %s' % on_missing)
+
+        # check out event_id dict
+        if event_id is None:  # convert to int to make typing-checks happy
+            event_id = dict((str(e), int(e)) for e in np.unique(events[:, 2]))
+        elif isinstance(event_id, dict):
+            if not all(isinstance(v, int) for v in event_id.values()):
+                raise ValueError('Event IDs must be of type integer')
+            if not all(isinstance(k, string_types) for k in event_id):
+                raise ValueError('Event names must be of type str')
+        elif isinstance(event_id, list):
+            if not all(isinstance(v, int) for v in event_id):
+                raise ValueError('Event IDs must be of type integer')
+            event_id = dict(zip((str(i) for i in event_id), event_id))
+        elif isinstance(event_id, int):
+            event_id = {str(event_id): event_id}
+        else:
+            raise ValueError('event_id must be dict or int.')
+        self.event_id = event_id
+        del event_id
+
+        if events is not None:  # RtEpochs can have events=None
+
+            if events.dtype.kind not in ['i', 'u']:
+                raise ValueError('events must be an array of type int')
+            if events.ndim != 2 or events.shape[1] != 3:
+                raise ValueError('events must be 2D with 3 columns')
+
+            for key, val in self.event_id.items():
+                if val not in events[:, 2]:
+                    msg = ('No matching events found for %s '
+                           '(event id %i)' % (key, val))
+                    if on_missing == 'error':
+                        raise ValueError(msg)
+                    elif on_missing == 'warning':
+                        logger.warning(msg)
+                        warnings.warn(msg)
+                    else:  # on_missing == 'ignore':
+                        pass
+
+            values = list(self.event_id.values())
+            selected = in1d(events[:, 2], values)
+            if selection is None:
+                self.selection = np.where(selected)[0]
+            else:
+                self.selection = selection
+            if drop_log is None:
+                self.drop_log = [list() if k in self.selection else ['IGNORED']
+                                 for k in range(len(events))]
+            else:
+                self.drop_log = drop_log
+            events = events[selected]
+            n_events = len(events)
+            if n_events > 1:
+                if np.diff(events.astype(np.int64)[:, 0]).min() <= 0:
+                    warnings.warn('The events passed to the Epochs '
+                                  'constructor are not chronologically '
+                                  'ordered.', RuntimeWarning)
+
+            if n_events > 0:
+                logger.info('%d matching events found' % n_events)
+            else:
+                raise ValueError('No desired events found.')
+            self.events = events
+            del events
+        else:
+            self.drop_log = list()
+            self.selection = np.array([], int)
+            # do not set self.events here, let subclass do it
+
+        # check reject_tmin and reject_tmax
+        if (reject_tmin is not None) and (reject_tmin < tmin):
+            raise ValueError("reject_tmin needs to be None or >= tmin")
+        if (reject_tmax is not None) and (reject_tmax > tmax):
+            raise ValueError("reject_tmax needs to be None or <= tmax")
+        if (reject_tmin is not None) and (reject_tmax is not None):
+            if reject_tmin >= reject_tmax:
+                raise ValueError('reject_tmin needs to be < reject_tmax')
+        if detrend not in [None, 0, 1]:
+            raise ValueError('detrend must be None, 0, or 1')
+
+        # check that baseline is in available data
+        if baseline is not None:
+            baseline_tmin, baseline_tmax = baseline
+            tstep = 1. / info['sfreq']
+            if baseline_tmin is not None:
+                if baseline_tmin < tmin - tstep:
+                    err = ("Baseline interval (tmin = %s) is outside of epoch "
+                           "data (tmin = %s)" % (baseline_tmin, tmin))
+                    raise ValueError(err)
+            if baseline_tmax is not None:
+                if baseline_tmax > tmax + tstep:
+                    err = ("Baseline interval (tmax = %s) is outside of epoch "
+                           "data (tmax = %s)" % (baseline_tmax, tmax))
+                    raise ValueError(err)
+        if tmin > tmax:
+            raise ValueError('tmin has to be less than or equal to tmax')
+
+        self.tmin = tmin
+        self.tmax = tmax
+        self.baseline = baseline
+        self.reject_tmin = reject_tmin
+        self.reject_tmax = reject_tmax
+        self.detrend = detrend
+        self._raw = raw
+        self.info = info
+        del info
+
+        if picks is None:
+            picks = list(range(len(self.info['ch_names'])))
+        else:
+            self.info = pick_info(self.info, picks)
+        self.picks = _check_type_picks(picks)
+        if len(picks) == 0:
+            raise ValueError("Picks cannot be empty.")
+
+        if data is None:
+            self.preload = False
+            self._data = None
+        else:
+            assert decim == 1
+            if data.ndim != 3 or data.shape[2] != \
+                    round((tmax - tmin) * self.info['sfreq']) + 1:
+                raise RuntimeError('bad data shape')
+            self.preload = True
+            self._data = data
+        self._offset = None
+
+        # Handle times
+        sfreq = float(self.info['sfreq'])
+        start_idx = int(round(self.tmin * sfreq))
+        self._raw_times = np.arange(start_idx,
+                                    int(round(self.tmax * sfreq)) + 1) / sfreq
+        self._decim = 1
+        # this method sets the self.times property
+        self.decimate(decim)
+
+        # setup epoch rejection
+        self.reject = None
+        self.flat = None
+        self._reject_setup(reject, flat)
+
+        # do the rest
+        valid_proj = [True, 'delayed', False]
+        if proj not in valid_proj:
+            raise ValueError('"proj" must be one of %s, not %s'
+                             % (valid_proj, proj))
+        if proj == 'delayed':
+            self._do_delayed_proj = True
+            logger.info('Entering delayed SSP mode.')
+        else:
+            self._do_delayed_proj = False
+
+        activate = False if self._do_delayed_proj else proj
+        self._projector, self.info = setup_proj(self.info, add_eeg_ref,
+                                                activate=activate)
+
+        if preload_at_end:
+            assert self._data is None
+            assert self.preload is False
+            self.load_data()
+
+    def load_data(self):
+        """Load the data if not already preloaded
+
+        Returns
+        -------
+        epochs : instance of Epochs
+            The epochs object.
+
+        Notes
+        -----
+        This function operates in-place.
+
+        .. versionadded:: 0.10.0
+        """
+        if self.preload:
+            return
+        self._data = self._get_data()
+        self.preload = True
+        self._decim_slice = slice(None, None, None)
+        self._decim = 1
+        self._raw_times = self.times
+        assert self._data.shape[-1] == len(self.times)
+        return self
+
+    def decimate(self, decim, copy=False):
+        """Decimate the epochs
+
+        Parameters
+        ----------
+        decim : int
+            The amount to decimate data.
+        copy : bool
+            If True, operate on and return a copy of the Epochs object.
+
+        Returns
+        -------
+        epochs : instance of Epochs
+            The decimated Epochs object.
+
+        Notes
+        -----
+        Decimation can be done multiple times. For example,
+        ``epochs.decimate(2).decimate(2)`` will be the same as
+        ``epochs.decimate(4)``.
+
+        .. versionadded:: 0.10.0
+        """
+        if decim < 1 or decim != int(decim):
+            raise ValueError('decim must be an integer > 0')
+        decim = int(decim)
+        epochs = self.copy() if copy else self
+        del self
+
+        new_sfreq = epochs.info['sfreq'] / float(decim)
+        lowpass = epochs.info['lowpass']
+        if decim > 1 and lowpass is None:
+            warnings.warn('The measurement information indicates data is not '
+                          'low-pass filtered. The decim=%i parameter will '
+                          'result in a sampling frequency of %g Hz, which can '
+                          'cause aliasing artifacts.'
+                          % (decim, new_sfreq))
+        elif decim > 1 and new_sfreq < 2.5 * lowpass:
+            warnings.warn('The measurement information indicates a low-pass '
+                          'frequency of %g Hz. The decim=%i parameter will '
+                          'result in a sampling frequency of %g Hz, which can '
+                          'cause aliasing artifacts.'
+                          % (lowpass, decim, new_sfreq))  # > 50% nyquist limit
+
+        epochs._decim *= decim
+        start_idx = int(round(epochs._raw_times[0] * (epochs.info['sfreq'] *
+                                                      epochs._decim)))
+        i_start = start_idx % epochs._decim
+        decim_slice = slice(i_start, len(epochs._raw_times), epochs._decim)
+        epochs.info['sfreq'] = new_sfreq
+        if epochs.preload:
+            epochs._data = epochs._data[:, :, decim_slice].copy()
+            epochs._raw_times = epochs._raw_times[decim_slice].copy()
+            epochs._decim_slice = slice(None, None, None)
+            epochs._decim = 1
+            epochs.times = epochs._raw_times
+        else:
+            epochs._decim_slice = decim_slice
+            epochs.times = epochs._raw_times[epochs._decim_slice]
+        return epochs
+
+    @verbose
+    def apply_baseline(self, baseline, verbose=None):
+        """Baseline correct epochs
+
+        Parameters
+        ----------
+        baseline : tuple of length 2
+            The time interval to apply baseline correction. (a, b) is the
+            interval is between "a (s)" and "b (s)". If a is None the beginning
+            of the data is used and if b is None then b is set to the end of
+            the interval. If baseline is equal to (None, None) all the time
+            interval is used.
+        verbose : bool, str, int, or None
+            If not None, override default verbose level (see mne.verbose).
+
+        Returns
+        -------
+        epochs : instance of Epochs
+            The baseline-corrected Epochs object.
+
+        Notes
+        -----
+        Baseline correction can be done multiple times.
+
+        .. versionadded:: 0.10.0
+        """
+        if not isinstance(baseline, tuple) or len(baseline) != 2:
+            raise ValueError('`baseline=%s` is an invalid argument.'
+                             % str(baseline))
+
+        data = self._data
+        picks = pick_types(self.info, meg=True, eeg=True, stim=False,
+                           ref_meg=True, eog=True, ecg=True,
+                           emg=True, exclude=[])
+        data[:, picks, :] = rescale(data[:, picks, :], self.times, baseline,
+                                    'mean', copy=False)
+        self.baseline = baseline
+
+    def _reject_setup(self, reject, flat):
+        """Sets self._reject_time and self._channel_type_idx"""
+        idx = channel_indices_by_type(self.info)
+        for rej, kind in zip((reject, flat), ('reject', 'flat')):
+            if not isinstance(rej, (type(None), dict)):
+                raise TypeError('reject and flat must be dict or None, not %s'
+                                % type(rej))
+            if isinstance(rej, dict):
+                bads = set(rej.keys()) - set(idx.keys())
+                if len(bads) > 0:
+                    raise KeyError('Unknown channel types found in %s: %s'
+                                   % (kind, bads))
+
+        for key in idx.keys():
+            if (reject is not None and key in reject) \
+                    or (flat is not None and key in flat):
+                if len(idx[key]) == 0:
+                    raise ValueError("No %s channel found. Cannot reject based"
+                                     " on %s." % (key.upper(), key.upper()))
+            # now check to see if our rejection and flat are getting more
+            # restrictive
+            old_reject = self.reject if self.reject is not None else dict()
+            new_reject = reject if reject is not None else dict()
+            old_flat = self.flat if self.flat is not None else dict()
+            new_flat = flat if flat is not None else dict()
+            bad_msg = ('{kind}["{key}"] == {new} {op} {old} (old value), new '
+                       '{kind} values must be at least as stringent as '
+                       'previous ones')
+            for key in set(new_reject.keys()).union(old_reject.keys()):
+                old = old_reject.get(key, np.inf)
+                new = new_reject.get(key, np.inf)
+                if new > old:
+                    raise ValueError(bad_msg.format(kind='reject', key=key,
+                                                    new=new, old=old, op='>'))
+            for key in set(new_flat.keys()).union(old_flat.keys()):
+                old = old_flat.get(key, -np.inf)
+                new = new_flat.get(key, -np.inf)
+                if new < old:
+                    raise ValueError(bad_msg.format(kind='flat', key=key,
+                                                    new=new, old=old, op='<'))
+
+        # after validation, set parameters
+        self._bad_dropped = False
+        self._channel_type_idx = idx
+        self.reject = reject
+        self.flat = flat
+
+        if (self.reject_tmin is None) and (self.reject_tmax is None):
+            self._reject_time = None
+        else:
+            if self.reject_tmin is None:
+                reject_imin = None
+            else:
+                idxs = np.nonzero(self.times >= self.reject_tmin)[0]
+                reject_imin = idxs[0]
+            if self.reject_tmax is None:
+                reject_imax = None
+            else:
+                idxs = np.nonzero(self.times <= self.reject_tmax)[0]
+                reject_imax = idxs[-1]
+
+            self._reject_time = slice(reject_imin, reject_imax)
+
+    @verbose
+    def _is_good_epoch(self, data, verbose=None):
+        """Determine if epoch is good"""
+        if data is None:
+            return False, ['NO_DATA']
+        n_times = len(self.times)
+        if data.shape[1] < n_times:
+            # epoch is too short ie at the end of the data
+            return False, ['TOO_SHORT']
+        if self.reject is None and self.flat is None:
+            return True, None
+        else:
+            if self._reject_time is not None:
+                data = data[:, self._reject_time]
+
+            return _is_good(data, self.ch_names, self._channel_type_idx,
+                            self.reject, self.flat, full_report=True,
+                            ignore_chs=self.info['bads'])
+
+    @verbose
+    def _detrend_offset_decim(self, epoch, verbose=None):
+        """Aux Function: detrend, baseline correct, offset, decim
+
+        Note: operates inplace
+        """
+        if epoch is None:
+            return None
+
+        # Detrend
+        if self.detrend is not None:
+            picks = pick_types(self.info, meg=True, eeg=True, stim=False,
+                               ref_meg=False, eog=False, ecg=False,
+                               emg=False, exclude=[])
+            epoch[picks] = detrend(epoch[picks], self.detrend, axis=1)
+
+        # Baseline correct
+        picks = pick_types(self.info, meg=True, eeg=True, stim=False,
+                           ref_meg=True, eog=True, ecg=True,
+                           emg=True, exclude=[])
+        epoch[picks] = rescale(epoch[picks], self._raw_times, self.baseline,
+                               'mean', copy=False, verbose=verbose)
+
+        # handle offset
+        if self._offset is not None:
+            epoch += self._offset
+
+        # Decimate if necessary (i.e., epoch not preloaded)
+        epoch = epoch[:, self._decim_slice]
+        return epoch
+
+    def iter_evoked(self):
+        """Iterate over Evoked objects with nave=1
+        """
+        self._current = 0
+
+        while True:
+            data, event_id = self.next(True)
+            tmin = self.times[0]
+            info = deepcopy(self.info)
+
+            yield EvokedArray(data, info, tmin, comment=str(event_id))
+
+    def subtract_evoked(self, evoked=None):
+        """Subtract an evoked response from each epoch
+
+        Can be used to exclude the evoked response when analyzing induced
+        activity, see e.g. [1].
+
+        References
+        ----------
+        [1] David et al. "Mechanisms of evoked and induced responses in
+        MEG/EEG", NeuroImage, vol. 31, no. 4, pp. 1580-1591, July 2006.
+
+        Parameters
+        ----------
+        evoked : instance of Evoked | None
+            The evoked response to subtract. If None, the evoked response
+            is computed from Epochs itself.
+
+        Returns
+        -------
+        self : instance of Epochs
+            The modified instance (instance is also modified inplace).
+        """
+        logger.info('Subtracting Evoked from Epochs')
+        if evoked is None:
+            picks = pick_types(self.info, meg=True, eeg=True,
+                               stim=False, eog=False, ecg=False,
+                               emg=False, exclude=[])
+            evoked = self.average(picks)
+
+        # find the indices of the channels to use
+        picks = pick_channels(evoked.ch_names, include=self.ch_names)
+
+        # make sure the omitted channels are not data channels
+        if len(picks) < len(self.ch_names):
+            sel_ch = [evoked.ch_names[ii] for ii in picks]
+            diff_ch = list(set(self.ch_names).difference(sel_ch))
+            diff_idx = [self.ch_names.index(ch) for ch in diff_ch]
+            diff_types = [channel_type(self.info, idx) for idx in diff_idx]
+            bad_idx = [diff_types.index(t) for t in diff_types if t in
+                       ['grad', 'mag', 'eeg']]
+            if len(bad_idx) > 0:
+                bad_str = ', '.join([diff_ch[ii] for ii in bad_idx])
+                raise ValueError('The following data channels are missing '
+                                 'in the evoked response: %s' % bad_str)
+            logger.info('    The following channels are not included in the '
+                        'subtraction: %s' % ', '.join(diff_ch))
+
+        # make sure the times match
+        if (len(self.times) != len(evoked.times) or
+                np.max(np.abs(self.times - evoked.times)) >= 1e-7):
+            raise ValueError('Epochs and Evoked object do not contain '
+                             'the same time points.')
+
+        # handle SSPs
+        if not self.proj and evoked.proj:
+            warnings.warn('Evoked has SSP applied while Epochs has not.')
+        if self.proj and not evoked.proj:
+            evoked = evoked.copy().apply_proj()
+
+        # find the indices of the channels to use in Epochs
+        ep_picks = [self.ch_names.index(evoked.ch_names[ii]) for ii in picks]
+
+        # do the subtraction
+        if self.preload:
+            self._data[:, ep_picks, :] -= evoked.data[picks][None, :, :]
+        else:
+            if self._offset is None:
+                self._offset = np.zeros((len(self.ch_names), len(self.times)),
+                                        dtype=np.float)
+            self._offset[ep_picks] -= evoked.data[picks]
+        logger.info('[done]')
+
+        return self
+
+    def __next__(self, *args, **kwargs):
+        """Wrapper for Py3k"""
+        return self.next(*args, **kwargs)
+
+    def __hash__(self):
+        if not self.preload:
+            raise RuntimeError('Cannot hash epochs unless preloaded')
+        return object_hash(dict(info=self.info, data=self._data))
+
+    def average(self, picks=None):
+        """Compute average of epochs
+
+        Parameters
+        ----------
+        picks : array-like of int | None
+            If None only MEG and EEG channels are kept
+            otherwise the channels indices in picks are kept.
+
+        Returns
+        -------
+        evoked : instance of Evoked
+            The averaged epochs.
+        """
+
+        return self._compute_mean_or_stderr(picks, 'ave')
+
+    def standard_error(self, picks=None):
+        """Compute standard error over epochs
+
+        Parameters
+        ----------
+        picks : array-like of int | None
+            If None only MEG and EEG channels are kept
+            otherwise the channels indices in picks are kept.
+
+        Returns
+        -------
+        evoked : instance of Evoked
+            The standard error over epochs.
+        """
+        return self._compute_mean_or_stderr(picks, 'stderr')
+
+    def _compute_mean_or_stderr(self, picks, mode='ave'):
+        """Compute the mean or std over epochs and return Evoked"""
+
+        _do_std = True if mode == 'stderr' else False
+
+        n_channels = len(self.ch_names)
+        n_times = len(self.times)
+
+        if self.preload:
+            n_events = len(self.events)
+            fun = np.std if _do_std else np.mean
+            data = fun(self._data, axis=0)
+            assert len(self.events) == len(self._data)
+        else:
+            data = np.zeros((n_channels, n_times))
+            n_events = 0
+            for e in self:
+                data += e
+                n_events += 1
+
+            if n_events > 0:
+                data /= n_events
+            else:
+                data.fill(np.nan)
+
+            # convert to stderr if requested, could do in one pass but do in
+            # two (slower) in case there are large numbers
+            if _do_std:
+                data_mean = data.copy()
+                data.fill(0.)
+                for e in self:
+                    data += (e - data_mean) ** 2
+                data = np.sqrt(data / n_events)
+
+        if not _do_std:
+            _aspect_kind = FIFF.FIFFV_ASPECT_AVERAGE
+        else:
+            _aspect_kind = FIFF.FIFFV_ASPECT_STD_ERR
+            data /= np.sqrt(n_events)
+        kind = _aspect_rev.get(str(_aspect_kind), 'Unknown')
+
+        info = deepcopy(self.info)
+        evoked = EvokedArray(data, info, tmin=self.times[0],
+                             comment=self.name, nave=n_events, kind=kind,
+                             verbose=self.verbose)
+        # XXX: above constructor doesn't recreate the times object precisely
+        evoked.times = self.times.copy()
+        evoked._aspect_kind = _aspect_kind
+
+        # pick channels
+        if picks is None:
+            picks = pick_types(evoked.info, meg=True, eeg=True, ref_meg=True,
+                               stim=False, eog=False, ecg=False,
+                               emg=False, exclude=[])
+
+        ch_names = [evoked.ch_names[p] for p in picks]
+        evoked.pick_channels(ch_names)
+
+        if len(evoked.info['ch_names']) == 0:
+            raise ValueError('No data channel found when averaging.')
+
+        if evoked.nave < 1:
+            warnings.warn('evoked object is empty (based on less '
+                          'than 1 epoch)', RuntimeWarning)
+
+        return evoked
+
+    @property
+    def ch_names(self):
+        """Channel names"""
+        return self.info['ch_names']
+
+    def plot(self, picks=None, scalings=None, show=True,
+             block=False, n_epochs=20,
+             n_channels=20, title=None):
+        """Visualize epochs.
+
+        Bad epochs can be marked with a left click on top of the epoch. Bad
+        channels can be selected by clicking the channel name on the left side
+        of the main axes. Calling this function drops all the selected bad
+        epochs as well as bad epochs marked beforehand with rejection
+        parameters.
+
+        Parameters
+        ----------
+        picks : array-like of int | None
+            Channels to be included. If None only good data channels are used.
+            Defaults to None
+        scalings : dict | None
+            Scale factors for the traces. If None, defaults to
+            ``dict(mag=1e-12, grad=4e-11, eeg=20e-6, eog=150e-6, ecg=5e-4,
+            emg=1e-3, ref_meg=1e-12, misc=1e-3, stim=1, resp=1, chpi=1e-4)``.
+        show : bool
+            Whether to show the figure or not.
+        block : bool
+            Whether to halt program execution until the figure is closed.
+            Useful for rejecting bad trials on the fly by clicking on a
+            sub plot.
+        n_epochs : int
+            The number of epochs per view.
+        n_channels : int
+            The number of channels per view on mne_browse_epochs. If trellis is
+            True, this parameter has no effect. Defaults to 20.
+        title : str | None
+            The title of the window. If None, epochs name will be displayed.
+            If trellis is True, this parameter has no effect.
+            Defaults to None.
+
+        Returns
+        -------
+        fig : Instance of matplotlib.figure.Figure
+            The figure.
+
+        Notes
+        -----
+        The arrow keys (up/down/left/right) can
+        be used to navigate between channels and epochs and the scaling can be
+        adjusted with - and + (or =) keys, but this depends on the backend
+        matplotlib is configured to use (e.g., mpl.use(``TkAgg``) should work).
+        Full screen mode can be to toggled with f11 key. The amount of epochs
+        and channels per view can be adjusted with home/end and
+        page down/page up keys. Butterfly plot can be toggled with ``b`` key.
+        Right mouse click adds a vertical line to the plot.
+
+        .. versionadded:: 0.10.0
+        """
+        return plot_epochs(self, picks=picks, scalings=scalings,
+                           n_epochs=n_epochs, n_channels=n_channels,
+                           title=title, show=show, block=block)
+
+    def plot_psd(self, fmin=0, fmax=np.inf, proj=False, n_fft=256,
+                 picks=None, ax=None, color='black', area_mode='std',
+                 area_alpha=0.33, n_overlap=0, dB=True,
+                 n_jobs=1, verbose=None, show=True):
+        """Plot the power spectral density across epochs
+
+        Parameters
+        ----------
+        fmin : float
+            Start frequency to consider.
+        fmax : float
+            End frequency to consider.
+        proj : bool
+            Apply projection.
+        n_fft : int
+            Number of points to use in Welch FFT calculations.
+        picks : array-like of int | None
+            List of channels to use.
+        ax : instance of matplotlib Axes | None
+            Axes to plot into. If None, axes will be created.
+        color : str | tuple
+            A matplotlib-compatible color to use.
+        area_mode : str | None
+            Mode for plotting area. If 'std', the mean +/- 1 STD (across
+            channels) will be plotted. If 'range', the min and max (across
+            channels) will be plotted. Bad channels will be excluded from
+            these calculations. If None, no area will be plotted.
+        area_alpha : float
+            Alpha for the area.
+        n_overlap : int
+            The number of points of overlap between blocks.
+        dB : bool
+            If True, transform data to decibels.
+        n_jobs : int
+            Number of jobs to run in parallel.
+        verbose : bool, str, int, or None
+            If not None, override default verbose level (see mne.verbose).
+        show : bool
+            Show figure if True.
+
+        Returns
+        -------
+        fig : instance of matplotlib figure
+            Figure distributing one image per channel across sensor topography.
+        """
+        return plot_epochs_psd(self, fmin=fmin, fmax=fmax, proj=proj,
+                               n_fft=n_fft, picks=picks, ax=ax,
+                               color=color, area_mode=area_mode,
+                               area_alpha=area_alpha,
+                               n_overlap=n_overlap, dB=dB, n_jobs=n_jobs,
+                               verbose=None, show=show)
+
+    def plot_psd_topomap(self, bands=None, vmin=None, vmax=None, proj=False,
+                         n_fft=256, ch_type=None,
+                         n_overlap=0, layout=None, cmap='RdBu_r',
+                         agg_fun=None, dB=True, n_jobs=1, normalize=False,
+                         cbar_fmt='%0.3f', outlines='head', show=True,
+                         verbose=None):
+        """Plot the topomap of the power spectral density across epochs
+
+        Parameters
+        ----------
+        bands : list of tuple | None
+            The lower and upper frequency and the name for that band. If None,
+            (default) expands to:
+
+            bands = [(0, 4, 'Delta'), (4, 8, 'Theta'), (8, 12, 'Alpha'),
+                     (12, 30, 'Beta'), (30, 45, 'Gamma')]
+
+        vmin : float | callable | None
+            The value specifying the lower bound of the color range.
+            If None, and vmax is None, -vmax is used. Else np.min(data).
+            If callable, the output equals vmin(data).
+        vmax : float | callable | None
+            The value specifying the upper bound of the color range.
+            If None, the maximum absolute value is used. If callable, the
+            output equals vmax(data). Defaults to None.
+        proj : bool
+            Apply projection.
+        n_fft : int
+            Number of points to use in Welch FFT calculations.
+        ch_type : {None, 'mag', 'grad', 'planar1', 'planar2', 'eeg'}
+            The channel type to plot. For 'grad', the gradiometers are
+            collected in
+            pairs and the RMS for each pair is plotted. If None, defaults to
+            'mag' if MEG data are present and to 'eeg' if only EEG data are
+            present.
+        n_overlap : int
+            The number of points of overlap between blocks.
+        layout : None | Layout
+            Layout instance specifying sensor positions (does not need to
+            be specified for Neuromag data). If possible, the correct layout
+            file is inferred from the data; if no appropriate layout file was
+            found, the layout is automatically generated from the sensor
+            locations.
+        cmap : matplotlib colormap
+            Colormap. For magnetometers and eeg defaults to 'RdBu_r', else
+            'Reds'.
+        agg_fun : callable
+            The function used to aggregate over frequencies.
+            Defaults to np.sum. if normalize is True, else np.mean.
+        dB : bool
+            If True, transform data to decibels (with ``10 * np.log10(data)``)
+            following the application of `agg_fun`. Only valid if normalize
+            is False.
+        n_jobs : int
+            Number of jobs to run in parallel.
+        normalize : bool
+            If True, each band will be devided by the total power. Defaults to
+            False.
+        cbar_fmt : str
+            The colorbar format. Defaults to '%0.3f'.
+        outlines : 'head' | 'skirt' | dict | None
+            The outlines to be drawn. If 'head', the default head scheme will
+            be drawn. If 'skirt' the head scheme will be drawn, but sensors are
+            allowed to be plotted outside of the head circle. If dict, each key
+            refers to a tuple of x and y positions, the values in 'mask_pos'
+            will serve as image mask, and the 'autoshrink' (bool) field will
+            trigger automated shrinking of the positions due to points outside
+            the outline. Alternatively, a matplotlib patch object can be passed
+            for advanced masking options, either directly or as a function that
+            returns patches (required for multi-axis plots). If None, nothing
+            will be drawn. Defaults to 'head'.
+        show : bool
+            Show figure if True.
+        verbose : bool, str, int, or None
+            If not None, override default verbose level (see mne.verbose).
+
+        Returns
+        -------
+        fig : instance of matplotlib figure
+            Figure distributing one image per channel across sensor topography.
+        """
+        return plot_epochs_psd_topomap(
+            self, bands=bands, vmin=vmin, vmax=vmax, proj=proj, n_fft=n_fft,
+            ch_type=ch_type, n_overlap=n_overlap, layout=layout, cmap=cmap,
+            agg_fun=agg_fun, dB=dB, n_jobs=n_jobs, normalize=normalize,
+            cbar_fmt=cbar_fmt, outlines=outlines, show=show, verbose=None)
+
+    def drop_bad_epochs(self, reject='existing', flat='existing'):
+        """Drop bad epochs without retaining the epochs data.
+
+        Should be used before slicing operations.
+
+        .. Warning:: Operation is slow since all epochs have to be read from
+            disk. To avoid reading epochs from disk multiple times, initialize
+            Epochs object with preload=True.
+
+        Parameters
+        ----------
+        reject : dict | str | None
+            Rejection parameters based on peak-to-peak amplitude.
+            Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'.
+            If reject is None then no rejection is done. If 'existing',
+            then the rejection parameters set at instantiation are used.
+        flat : dict | str | None
+            Rejection parameters based on flatness of signal.
+            Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg', and values
+            are floats that set the minimum acceptable peak-to-peak amplitude.
+            If flat is None then no rejection is done. If 'existing',
+            then the flat parameters set at instantiation are used.
+
+        Notes
+        -----
+        Dropping bad epochs can be done multiple times with different
+        ``reject`` and ``flat`` parameters. However, once an epoch is
+        dropped, it is dropped forever, so if more lenient thresholds may
+        subsequently be applied, `epochs.copy` should be used.
+        """
+        if reject == 'existing':
+            if flat == 'existing' and self._bad_dropped:
+                return
+            reject = self.reject
+        if flat == 'existing':
+            flat = self.flat
+        if any(isinstance(rej, string_types) and rej != 'existing' for
+               rej in (reject, flat)):
+            raise ValueError('reject and flat, if strings, must be "existing"')
+        self._reject_setup(reject, flat)
+        self._get_data(out=False)
+
+    def drop_log_stats(self, ignore=['IGNORED']):
+        """Compute the channel stats based on a drop_log from Epochs.
+
+        Parameters
+        ----------
+        ignore : list
+            The drop reasons to ignore.
+
+        Returns
+        -------
+        perc : float
+            Total percentage of epochs dropped.
+
+        See Also
+        --------
+        plot_drop_log
+        """
+        return _drop_log_stats(self.drop_log, ignore)
+
+    def plot_drop_log(self, threshold=0, n_max_plot=20, subject='Unknown',
+                      color=(0.9, 0.9, 0.9), width=0.8, ignore=['IGNORED'],
+                      show=True):
+        """Show the channel stats based on a drop_log from Epochs
+
+        Parameters
+        ----------
+        threshold : float
+            The percentage threshold to use to decide whether or not to
+            plot. Default is zero (always plot).
+        n_max_plot : int
+            Maximum number of channels to show stats for.
+        subject : str
+            The subject name to use in the title of the plot.
+        color : tuple | str
+            Color to use for the bars.
+        width : float
+            Width of the bars.
+        ignore : list
+            The drop reasons to ignore.
+        show : bool
+            Show figure if True.
+
+        Returns
+        -------
+        perc : float
+            Total percentage of epochs dropped.
+        fig : Instance of matplotlib.figure.Figure
+            The figure.
+        """
+        if not self._bad_dropped:
+            raise ValueError("You cannot use plot_drop_log since bad "
+                             "epochs have not yet been dropped. "
+                             "Use epochs.drop_bad_epochs().")
+
+        from .viz import plot_drop_log
+        return plot_drop_log(self.drop_log, threshold, n_max_plot, subject,
+                             color=color, width=width, ignore=ignore,
+                             show=show)
+
+    @verbose
+    def drop_epochs(self, indices, reason='USER', verbose=None):
+        """Drop epochs based on indices or boolean mask
+
+        Note that the indices refer to the current set of undropped epochs
+        rather than the complete set of dropped and undropped epochs.
+        They are therefore not necessarily consistent with any external indices
+        (e.g., behavioral logs). To drop epochs based on external criteria,
+        do not use the preload=True flag when constructing an Epochs object,
+        and call this method before calling the drop_bad_epochs method.
+
+        Parameters
+        ----------
+        indices : array of ints or bools
+            Set epochs to remove by specifying indices to remove or a boolean
+            mask to apply (where True values get removed). Events are
+            correspondingly modified.
+        reason : str
+            Reason for dropping the epochs ('ECG', 'timeout', 'blink' etc).
+            Default: 'USER'.
+        verbose : bool, str, int, or None
+            If not None, override default verbose level (see mne.verbose).
+            Defaults to raw.verbose.
+        """
+        indices = np.atleast_1d(indices)
+
+        if indices.ndim > 1:
+            raise ValueError("indices must be a scalar or a 1-d array")
+
+        if indices.dtype == bool:
+            indices = np.where(indices)[0]
+
+        out_of_bounds = (indices < 0) | (indices >= len(self.events))
+        if out_of_bounds.any():
+            first = indices[out_of_bounds][0]
+            raise IndexError("Epoch index %d is out of bounds" % first)
+
+        for ii in indices:
+            self.drop_log[self.selection[ii]].append(reason)
+
+        self.selection = np.delete(self.selection, indices)
+        self.events = np.delete(self.events, indices, axis=0)
+        if self.preload:
+            self._data = np.delete(self._data, indices, axis=0)
+
+        count = len(indices)
+        logger.info('Dropped %d epoch%s' % (count, '' if count == 1 else 's'))
+
+    def _get_epoch_from_raw(self, idx, verbose=None):
+        """Method to get a given epoch from disk"""
+        raise NotImplementedError
+
+    def _project_epoch(self, epoch):
+        """Helper to process a raw epoch based on the delayed param"""
+        # whenever requested, the first epoch is being projected.
+        if epoch is None:  # can happen if t < 0
+            return None
+        proj = self._do_delayed_proj or self.proj
+        if self._projector is not None and proj is True:
+            epoch = np.dot(self._projector, epoch)
+        return epoch
+
+    @verbose
+    def _get_data(self, out=True, verbose=None):
+        """Load all data, dropping bad epochs along the way
+
+        Parameters
+        ----------
+        out : bool
+            Return the data. Setting this to False is used to reject bad
+            epochs without caching all the data, which saves memory.
+        verbose : bool, str, int, or None
+            If not None, override default verbose level (see mne.verbose).
+            Defaults to self.verbose.
+        """
+        n_events = len(self.events)
+        # in case there are no good events
+        if self.preload:
+            # we will store our result in our existing array
+            data = self._data
+        else:
+            # we start out with an empty array, allocate only if necessary
+            data = np.empty((0, len(self.info['ch_names']), len(self.times)))
+        if self._bad_dropped:
+            if not out:
+                return
+            if self.preload:
+                return data
+
+            # we need to load from disk, drop, and return data
+            for idx in range(n_events):
+                # faster to pre-allocate memory here
+                epoch_noproj = self._get_epoch_from_raw(idx)
+                epoch_noproj = self._detrend_offset_decim(epoch_noproj)
+                if self._do_delayed_proj:
+                    epoch_out = epoch_noproj
+                else:
+                    epoch_out = self._project_epoch(epoch_noproj)
+                if idx == 0:
+                    data = np.empty((n_events, len(self.ch_names),
+                                     len(self.times)), dtype=epoch_out.dtype)
+                data[idx] = epoch_out
+        else:
+            # bads need to be dropped, this might occur after a preload
+            # e.g., when calling drop_bad_epochs w/new params
+            good_idx = []
+            n_out = 0
+            assert n_events == len(self.selection)
+            for idx, sel in enumerate(self.selection):
+                if self.preload:  # from memory
+                    if self._do_delayed_proj:
+                        epoch_noproj = self._data[idx]
+                        epoch = self._project_epoch(epoch_noproj)
+                    else:
+                        epoch_noproj = None
+                        epoch = self._data[idx]
+                else:  # from disk
+                    epoch_noproj = self._get_epoch_from_raw(idx)
+                    epoch_noproj = self._detrend_offset_decim(epoch_noproj)
+                    epoch = self._project_epoch(epoch_noproj)
+                epoch_out = epoch_noproj if self._do_delayed_proj else epoch
+                is_good, offenders = self._is_good_epoch(epoch)
+                if not is_good:
+                    self.drop_log[sel] += offenders
+                    continue
+                good_idx.append(idx)
+
+                # store the epoch if there is a reason to (output or update)
+                if out or self.preload:
+                    # faster to pre-allocate, then trim as necessary
+                    if n_out == 0 and not self.preload:
+                        data = np.empty((n_events, epoch_out.shape[0],
+                                         epoch_out.shape[1]),
+                                        dtype=epoch_out.dtype, order='C')
+                    data[n_out] = epoch_out
+                    n_out += 1
+
+            self._bad_dropped = True
+            logger.info("%d bad epochs dropped" % (n_events - len(good_idx)))
+
+            # Now update our properties
+            if len(good_idx) == 0:  # silly fix for old numpy index error
+                self.selection = np.array([], int)
+                self.events = np.empty((0, 3))
+            else:
+                self.selection = self.selection[good_idx]
+                self.events = np.atleast_2d(self.events[good_idx])
+
+            # adjust the data size if there is a reason to (output or update)
+            if out or self.preload:
+                data.resize((n_out,) + data.shape[1:], refcheck=False)
+
+        return data if out else None
+
+    def get_data(self):
+        """Get all epochs as a 3D array
+
+        Returns
+        -------
+        data : array of shape (n_epochs, n_channels, n_times)
+            A copy of the epochs data.
+        """
+        return self._get_data()
+
+    def __len__(self):
+        """Number of epochs.
+        """
+        if not self._bad_dropped:
+            raise RuntimeError('Since bad epochs have not been dropped, the '
+                               'length of the Epochs is not known. Load the '
+                               'Epochs with preload=True, or call '
+                               'Epochs.drop_bad_epochs(). To find the number '
+                               'of events in the Epochs, use '
+                               'len(Epochs.events).')
+        return len(self.events)
+
+    def __iter__(self):
+        """To make iteration over epochs easy.
+        """
+        self._current = 0
+        return self
+
+    def next(self, return_event_id=False):
+        """To make iteration over epochs easy.
+
+        Parameters
+        ----------
+        return_event_id : bool
+            If True, return both an epoch and and event_id.
+
+        Returns
+        -------
+        epoch : instance of Epochs
+            The epoch.
+        event_id : int
+            The event id. Only returned if ``return_event_id`` is ``True``.
+        """
+        if self.preload:
+            if self._current >= len(self._data):
+                raise StopIteration
+            epoch = self._data[self._current]
+            self._current += 1
+        else:
+            is_good = False
+            while not is_good:
+                if self._current >= len(self.events):
+                    raise StopIteration
+                epoch_noproj = self._get_epoch_from_raw(self._current)
+                epoch_noproj = self._detrend_offset_decim(epoch_noproj)
+                epoch = self._project_epoch(epoch_noproj)
+                self._current += 1
+                is_good, _ = self._is_good_epoch(epoch)
+            # If delayed-ssp mode, pass 'virgin' data after rejection decision.
+            if self._do_delayed_proj:
+                epoch = epoch_noproj
+
+        if not return_event_id:
+            return epoch
+        else:
+            return epoch, self.events[self._current - 1][-1]
+
+        return epoch if not return_event_id else epoch, self.event_id
+
+    def __repr__(self):
+        """ Build string representation
+        """
+        s = 'n_events : %s ' % len(self.events)
+        s += '(all good)' if self._bad_dropped else '(good & bad)'
+        s += ', tmin : %s (s)' % self.tmin
+        s += ', tmax : %s (s)' % self.tmax
+        s += ', baseline : %s' % str(self.baseline)
+        if len(self.event_id) > 1:
+            counts = ['%r: %i' % (k, sum(self.events[:, 2] == v))
+                      for k, v in sorted(self.event_id.items())]
+            s += ',\n %s' % ', '.join(counts)
+        class_name = self.__class__.__name__
+        if class_name == '_BaseEpochs':
+            class_name = 'Epochs'
+        return '<%s  |  %s>' % (class_name, s)
+
+    def _key_match(self, key):
+        """Helper function for event dict use"""
+        if key not in self.event_id:
+            raise KeyError('Event "%s" is not in Epochs.' % key)
+        return self.events[:, 2] == self.event_id[key]
+
+    def __getitem__(self, key):
+        """Return an Epochs object with a subset of epochs
+        """
+        data = self._data
+        del self._data
+        epochs = self.copy()
+        self._data, epochs._data = data, data
+        del self
+
+        if isinstance(key, string_types):
+            key = [key]
+
+        if isinstance(key, (list, tuple)) and isinstance(key[0], string_types):
+            if any('/' in k_i for k_i in epochs.event_id.keys()):
+                if any(k_e not in epochs.event_id for k_e in key):
+                    # Select a given key if the requested set of
+                    # '/'-separated types are a subset of the types in that key
+                    key = [k for k in epochs.event_id.keys()
+                           if all(set(k_i.split('/')).issubset(k.split('/'))
+                                  for k_i in key)]
+                    if len(key) == 0:
+                        raise KeyError('Attempting selection of events via '
+                                       'multiple/partial matching, but no '
+                                       'event matches all criteria.')
+            select = np.any(np.atleast_2d([epochs._key_match(k)
+                                           for k in key]), axis=0)
+            epochs.name = '+'.join(key)
+        else:
+            select = key if isinstance(key, slice) else np.atleast_1d(key)
+
+        key_selection = epochs.selection[select]
+        for k in np.setdiff1d(epochs.selection, key_selection):
+            epochs.drop_log[k] = ['IGNORED']
+        epochs.selection = key_selection
+        epochs.events = np.atleast_2d(epochs.events[select])
+        if epochs.preload:
+            # ensure that each Epochs instance owns its own data so we can
+            # resize later if necessary
+            epochs._data = np.require(epochs._data[select], requirements=['O'])
+        # update event id to reflect new content of epochs
+        epochs.event_id = dict((k, v) for k, v in epochs.event_id.items()
+                               if v in epochs.events[:, 2])
+        return epochs
+
+    def crop(self, tmin=None, tmax=None, copy=False):
+        """Crops a time interval from epochs object.
+
+        Parameters
+        ----------
+        tmin : float | None
+            Start time of selection in seconds.
+        tmax : float | None
+            End time of selection in seconds.
+        copy : bool
+            If False epochs is cropped in place.
+
+        Returns
+        -------
+        epochs : Epochs instance
+            The cropped epochs.
+
+        Notes
+        -----
+        Unlike Python slices, MNE time intervals include both their end points;
+        crop(tmin, tmax) returns the interval tmin <= t <= tmax.
+        """
+        # XXX this could be made to work on non-preloaded data...
+        if not self.preload:
+            raise RuntimeError('Modifying data of epochs is only supported '
+                               'when preloading is used. Use preload=True '
+                               'in the constructor.')
+
+        if tmin is None:
+            tmin = self.tmin
+        elif tmin < self.tmin:
+            warnings.warn("tmin is not in epochs' time interval."
+                          "tmin is set to epochs.tmin")
+            tmin = self.tmin
+
+        if tmax is None:
+            tmax = self.tmax
+        elif tmax > self.tmax:
+            warnings.warn("tmax is not in epochs' time interval."
+                          "tmax is set to epochs.tmax")
+            tmax = self.tmax
+
+        tmask = _time_mask(self.times, tmin, tmax)
+        tidx = np.where(tmask)[0]
+
+        this_epochs = self if not copy else self.copy()
+        this_epochs.tmin = this_epochs.times[tidx[0]]
+        this_epochs.tmax = this_epochs.times[tidx[-1]]
+        this_epochs.times = this_epochs.times[tmask]
+        this_epochs._raw_times = this_epochs._raw_times[tmask]
+        this_epochs._data = this_epochs._data[:, :, tmask]
+        return this_epochs
+
+    @verbose
+    def resample(self, sfreq, npad=100, window='boxcar', n_jobs=1,
+                 copy=False, verbose=None):
+        """Resample preloaded data
+
+        Parameters
+        ----------
+        sfreq : float
+            New sample rate to use
+        npad : int
+            Amount to pad the start and end of the data.
+        window : string or tuple
+            Window to use in resampling. See scipy.signal.resample.
+        n_jobs : int
+            Number of jobs to run in parallel.
+        copy : bool
+            Whether to operate on a copy of the data (True) or modify data
+            in-place (False). Defaults to False.
+        verbose : bool, str, int, or None
+            If not None, override default verbose level (see mne.verbose).
+            Defaults to self.verbose.
+
+        Returns
+        -------
+        epochs : instance of Epochs
+            The resampled epochs object.
+
+        Notes
+        -----
+        For some data, it may be more accurate to use npad=0 to reduce
+        artifacts. This is dataset dependent -- check your data!
+        """
+        # XXX this could operate on non-preloaded data, too
+        if not self.preload:
+            raise RuntimeError('Can only resample preloaded data')
+
+        inst = self.copy() if copy else self
+
+        o_sfreq = inst.info['sfreq']
+        inst._data = resample(inst._data, sfreq, o_sfreq, npad,
+                              n_jobs=n_jobs)
+        # adjust indirectly affected variables
+        inst.info['sfreq'] = sfreq
+        inst.times = (np.arange(inst._data.shape[2], dtype=np.float) /
+                      sfreq + inst.times[0])
+
+        return inst
+
+    def copy(self):
+        """Return copy of Epochs instance"""
+        raw = self._raw
+        del self._raw
+        new = deepcopy(self)
+        self._raw = raw
+        new._raw = raw
+        return new
+
+    def save(self, fname, split_size='2GB'):
+        """Save epochs in a fif file
+
+        Parameters
+        ----------
+        fname : str
+            The name of the file, which should end with -epo.fif or
+            -epo.fif.gz.
+        split_size : string | int
+            Large raw files are automatically split into multiple pieces. This
+            parameter specifies the maximum size of each piece. If the
+            parameter is an integer, it specifies the size in Bytes. It is
+            also possible to pass a human-readable string, e.g., 100MB.
+            Note: Due to FIFF file limitations, the maximum split size is 2GB.
+
+            .. versionadded:: 0.10.0
+
+        Notes
+        -----
+        Bad epochs will be dropped before saving the epochs to disk.
+        """
+        check_fname(fname, 'epochs', ('-epo.fif', '-epo.fif.gz'))
+        split_size = _get_split_size(split_size)
+
+        # to know the length accurately. The get_data() call would drop
+        # bad epochs anyway
+        self.drop_bad_epochs()
+        total_size = self[0].get_data().nbytes * len(self)
+        n_parts = int(np.ceil(total_size / float(split_size)))
+        epoch_idxs = np.array_split(np.arange(len(self)), n_parts)
+
+        for part_idx, epoch_idx in enumerate(epoch_idxs):
+            this_epochs = self[epoch_idx] if n_parts > 1 else self
+            # avoid missing event_ids in splits
+            this_epochs.event_id = self.event_id
+            _save_split(this_epochs, fname, part_idx, n_parts)
+
+    def equalize_event_counts(self, event_ids, method='mintime', copy=True):
+        """Equalize the number of trials in each condition
+
+        It tries to make the remaining epochs occurring as close as possible in
+        time. This method works based on the idea that if there happened to be
+        some time-varying (like on the scale of minutes) noise characteristics
+        during a recording, they could be compensated for (to some extent) in
+        the equalization process. This method thus seeks to reduce any of
+        those effects by minimizing the differences in the times of the events
+        in the two sets of epochs. For example, if one had event times
+        [1, 2, 3, 4, 120, 121] and the other one had [3.5, 4.5, 120.5, 121.5],
+        it would remove events at times [1, 2] in the first epochs and not
+        [20, 21].
+
+        Parameters
+        ----------
+        event_ids : list
+            The event types to equalize. Each entry in the list can either be
+            a str (single event) or a list of str. In the case where one of
+            the entries is a list of str, event_ids in that list will be
+            grouped together before equalizing trial counts across conditions.
+            In the case where partial matching is used (using '/' in
+            `event_ids`), `event_ids` will be matched according to the
+            provided tags, that is, processing works as if the event_ids
+            matched by the provided tags had been supplied instead.
+            The event_ids must identify nonoverlapping subsets of the epochs.
+        method : str
+            If 'truncate', events will be truncated from the end of each event
+            list. If 'mintime', timing differences between each event list
+            will be minimized.
+        copy : bool
+            If True, a copy of epochs will be returned. Otherwise, the
+            function will operate in-place.
+
+        Returns
+        -------
+        epochs : instance of Epochs
+            The modified Epochs instance.
+        indices : array of int
+            Indices from the original events list that were dropped.
+
+        Notes
+        -----
+        For example (if epochs.event_id was {'Left': 1, 'Right': 2,
+        'Nonspatial':3}:
+
+            epochs.equalize_event_counts([['Left', 'Right'], 'Nonspatial'])
+
+        would equalize the number of trials in the 'Nonspatial' condition with
+        the total number of trials in the 'Left' and 'Right' conditions.
+
+        If multiple indices are provided (e.g. 'Left' and 'Right' in the
+        example above), it is not guaranteed that after equalization, the
+        conditions will contribute evenly. E.g., it is possible to end up
+        with 70 'Nonspatial' trials, 69 'Left' and 1 'Right'.
+        """
+        if copy is True:
+            epochs = self.copy()
+        else:
+            epochs = self
+        if len(event_ids) == 0:
+            raise ValueError('event_ids must have at least one element')
+        if not epochs._bad_dropped:
+            epochs.drop_bad_epochs()
+        # figure out how to equalize
+        eq_inds = list()
+
+        # deal with hierarchical tags
+        ids = epochs.event_id
+        tagging = False
+        if "/" in "".join(ids):
+            # make string inputs a list of length 1
+            event_ids = [[x] if isinstance(x, string_types) else x
+                         for x in event_ids]
+            for ids_ in event_ids:  # check if tagging is attempted
+                if any([id_ not in ids for id_ in ids_]):
+                    tagging = True
+            # 1. treat everything that's not in event_id as a tag
+            # 2a. for tags, find all the event_ids matched by the tags
+            # 2b. for non-tag ids, just pass them directly
+            # 3. do this for every input
+            event_ids = [[k for k in ids if all((tag in k.split("/")
+                         for tag in id_))]  # find ids matching all tags
+                         if all(id__ not in ids for id__ in id_)
+                         else id_  # straight pass for non-tag inputs
+                         for id_ in event_ids]
+            for id_ in event_ids:
+                if len(set([sub_id in ids for sub_id in id_])) != 1:
+                    err = ("Don't mix hierarchical and regular event_ids"
+                           " like in \'%s\'." % ", ".join(id_))
+                    raise ValueError(err)
+
+            # raise for non-orthogonal tags
+            if tagging is True:
+                events_ = [set(epochs[x].events[:, 0]) for x in event_ids]
+                doubles = events_[0].intersection(events_[1])
+                if len(doubles):
+                    raise ValueError("The two sets of epochs are "
+                                     "overlapping. Provide an "
+                                     "orthogonal selection.")
+
+        for eq in event_ids:
+            eq = np.atleast_1d(eq)
+            # eq is now a list of types
+            key_match = np.zeros(epochs.events.shape[0])
+            for key in eq:
+                key_match = np.logical_or(key_match, epochs._key_match(key))
+            eq_inds.append(np.where(key_match)[0])
+
+        event_times = [epochs.events[e, 0] for e in eq_inds]
+        indices = _get_drop_indices(event_times, method)
+        # need to re-index indices
+        indices = np.concatenate([e[idx] for e, idx in zip(eq_inds, indices)])
+        epochs.drop_epochs(indices, reason='EQUALIZED_COUNT')
+        # actually remove the indices
+        return epochs, indices
+
+
+class Epochs(_BaseEpochs):
+    """Epochs extracted from a Raw instance
+
+    Parameters
+    ----------
+    raw : Raw object
+        An instance of Raw.
+    events : array of int, shape (n_events, 3)
+        The events typically returned by the read_events function.
+        If some events don't match the events of interest as specified
+        by event_id, they will be marked as 'IGNORED' in the drop log.
+    event_id : int | list of int | dict | None
+        The id of the event to consider. If dict,
+        the keys can later be used to access associated events. Example:
+        dict(auditory=1, visual=3). If int, a dict will be created with
+        the id as string. If a list, all events with the IDs specified
+        in the list are used. If None, all events will be used with
+        and a dict is created with string integer names corresponding
+        to the event id integers.
+    tmin : float
+        Start time before event.
+    tmax : float
+        End time after event.
+    baseline : None or tuple of length 2 (default (None, 0))
+        The time interval to apply baseline correction.
+        If None do not apply it. If baseline is (a, b)
+        the interval is between "a (s)" and "b (s)".
+        If a is None the beginning of the data is used
+        and if b is None then b is set to the end of the interval.
+        If baseline is equal to (None, None) all the time
+        interval is used.
+        The baseline (a, b) includes both endpoints, i.e. all
+        timepoints t such that a <= t <= b.
+    picks : array-like of int | None (default)
+        Indices of channels to include (if None, all channels are used).
+    name : string
+        Comment that describes the Epochs data created.
+    preload : boolean
+        Load all epochs from disk when creating the object
+        or wait before accessing each epoch (more memory
+        efficient but can be slower).
+    reject : dict | None
+        Rejection parameters based on peak-to-peak amplitude.
+        Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'.
+        If reject is None then no rejection is done. Example::
+
+            reject = dict(grad=4000e-13, # T / m (gradiometers)
+                          mag=4e-12, # T (magnetometers)
+                          eeg=40e-6, # uV (EEG channels)
+                          eog=250e-6 # uV (EOG channels)
+                          )
+
+    flat : dict | None
+        Rejection parameters based on flatness of signal.
+        Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg', and values
+        are floats that set the minimum acceptable peak-to-peak amplitude.
+        If flat is None then no rejection is done.
+    proj : bool | 'delayed'
+        Apply SSP projection vectors. If proj is 'delayed' and reject is not
+        None the single epochs will be projected before the rejection
+        decision, but used in unprojected state if they are kept.
+        This way deciding which projection vectors are good can be postponed
+        to the evoked stage without resulting in lower epoch counts and
+        without producing results different from early SSP application
+        given comparable parameters. Note that in this case baselining,
+        detrending and temporal decimation will be postponed.
+        If proj is False no projections will be applied which is the
+        recommended value if SSPs are not used for cleaning the data.
+    decim : int
+        Factor by which to downsample the data from the raw file upon import.
+        Warning: This simply selects every nth sample, data is not filtered
+        here. If data is not properly filtered, aliasing artifacts may occur.
+    reject_tmin : scalar | None
+        Start of the time window used to reject epochs (with the default None,
+        the window will start with tmin).
+    reject_tmax : scalar | None
+        End of the time window used to reject epochs (with the default None,
+        the window will end with tmax).
+    detrend : int | None
+        If 0 or 1, the data channels (MEG and EEG) will be detrended when
+        loaded. 0 is a constant (DC) detrend, 1 is a linear detrend. None
+        is no detrending. Note that detrending is performed before baseline
+        correction. If no DC offset is preferred (zeroth order detrending),
+        either turn off baseline correction, as this may introduce a DC
+        shift, or set baseline correction to use the entire time interval
+        (will yield equivalent results but be slower).
+    add_eeg_ref : bool
+        If True, an EEG average reference will be added (unless one
+        already exists).
+    on_missing : str
+        What to do if one or several event ids are not found in the recording.
+        Valid keys are 'error' | 'warning' | 'ignore'
+        Default is 'error'. If on_missing is 'warning' it will proceed but
+        warn, if 'ignore' it will proceed silently. Note.
+        If none of the event ids are found in the data, an error will be
+        automatically generated irrespective of this parameter.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+        Defaults to raw.verbose.
+
+    Attributes
+    ----------
+    info: dict
+        Measurement info.
+    event_id : dict
+        Names of conditions corresponding to event_ids.
+    ch_names : list of string
+        List of channel names.
+    selection : array
+        List of indices of selected events (not dropped or ignored etc.). For
+        example, if the original event array had 4 events and the second event
+        has been dropped, this attribute would be np.array([0, 2, 3]).
+    preload : bool
+        Indicates whether epochs are in memory.
+    drop_log : list of lists
+        A list of the same length as the event array used to initialize the
+        Epochs object. If the i-th original event is still part of the
+        selection, drop_log[i] will be an empty list; otherwise it will be
+        a list of the reasons the event is not longer in the selection, e.g.:
+
+        'IGNORED' if it isn't part of the current subset defined by the user;
+        'NO_DATA' or 'TOO_SHORT' if epoch didn't contain enough data;
+        names of channels that exceeded the amplitude threshold;
+        'EQUALIZED_COUNTS' (see equalize_event_counts);
+        or 'USER' for user-defined reasons (see drop_epochs).
+    verbose : bool, str, int, or None
+        See above.
+
+    Notes
+    -----
+    When accessing data, Epochs are detrended, baseline-corrected, and
+    decimated, then projectors are (optionally) applied.
+
+    For indexing and slicing:
+
+    epochs[idx] : Epochs
+        Return Epochs object with a subset of epochs (supports single
+        index and python-style slicing)
+
+    For subset selection using categorial labels:
+
+    epochs['name'] : Epochs
+        Return Epochs object with a subset of epochs corresponding to an
+        experimental condition as specified by 'name'.
+
+        If conditions are tagged by names separated by '/' (e.g. 'audio/left',
+        'audio/right'), and 'name' is not in itself an event key, this selects
+        every event whose condition contains the 'name' tag (e.g., 'left'
+        matches 'audio/left' and 'visual/left'; but not 'audio_left'). Note
+        that tags like 'auditory/left' and 'left/auditory' will be treated the
+        same way when accessed using tags.
+
+    epochs[['name_1', 'name_2', ... ]] : Epochs
+        Return Epochs object with a subset of epochs corresponding to multiple
+        experimental conditions as specified by 'name_1', 'name_2', ... .
+
+        If conditions are separated by '/', selects every item containing every
+        list tag (e.g. ['audio', 'left'] selects 'audio/left' and
+        'audio/center/left', but not 'audio/right').
+
+    See Also
+    --------
+    mne.epochs.combine_event_ids
+    mne.Epochs.equalize_event_counts
+    """
+    @verbose
+    def __init__(self, raw, events, event_id, tmin, tmax, baseline=(None, 0),
+                 picks=None, name='Unknown', preload=False, reject=None,
+                 flat=None, proj=True, decim=1, reject_tmin=None,
+                 reject_tmax=None, detrend=None, add_eeg_ref=True,
+                 on_missing='error', verbose=None):
+        if not isinstance(raw, _BaseRaw):
+            raise ValueError('The first argument to `Epochs` must be an '
+                             'instance of `mne.io.Raw`')
+        info = deepcopy(raw.info)
+
+        # proj is on when applied in Raw
+        proj = proj or raw.proj
+
+        # call _BaseEpochs constructor
+        super(Epochs, self).__init__(info, None, events, event_id, tmin, tmax,
+                                     baseline=baseline, raw=raw, picks=picks,
+                                     name=name, reject=reject, flat=flat,
+                                     decim=decim, reject_tmin=reject_tmin,
+                                     reject_tmax=reject_tmax, detrend=detrend,
+                                     add_eeg_ref=add_eeg_ref, proj=proj,
+                                     on_missing=on_missing,
+                                     preload_at_end=preload, verbose=verbose)
+
+    @verbose
+    def _get_epoch_from_raw(self, idx, verbose=None):
+        """Load one epoch from disk"""
+        if self._raw is None:
+            # This should never happen, as raw=None only if preload=True
+            raise ValueError('An error has occurred, no valid raw file found.'
+                             ' Please report this to the mne-python '
+                             'developers.')
+        sfreq = self._raw.info['sfreq']
+        event_samp = self.events[idx, 0]
+        # Read a data segment
+        first_samp = self._raw.first_samp
+        start = int(round(event_samp + self.tmin * sfreq)) - first_samp
+        stop = start + len(self._raw_times)
+        return None if start < 0 else self._raw[self.picks, start:stop][0]
+
+
+class EpochsArray(_BaseEpochs):
+    """Epochs object from numpy array
+
+    Parameters
+    ----------
+    data : array, shape (n_epochs, n_channels, n_times)
+        The channels' time series for each epoch.
+    info : instance of Info
+        Info dictionary. Consider using ``create_info`` to populate
+        this structure.
+    events : array of int, shape (n_events, 3)
+        The events typically returned by the read_events function.
+        If some events don't match the events of interest as specified
+        by event_id, they will be marked as 'IGNORED' in the drop log.
+    tmin : float
+        Start time before event.
+    event_id : int | list of int | dict | None
+        The id of the event to consider. If dict,
+        the keys can later be used to access associated events. Example:
+        dict(auditory=1, visual=3). If int, a dict will be created with
+        the id as string. If a list, all events with the IDs specified
+        in the list are used. If None, all events will be used with
+        and a dict is created with string integer names corresponding
+        to the event id integers.
+    reject : dict | None
+        Rejection parameters based on peak-to-peak amplitude.
+        Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'.
+        If reject is None then no rejection is done. Example::
+
+            reject = dict(grad=4000e-13, # T / m (gradiometers)
+                          mag=4e-12, # T (magnetometers)
+                          eeg=40e-6, # uV (EEG channels)
+                          eog=250e-6 # uV (EOG channels)
+                          )
+
+    flat : dict | None
+        Rejection parameters based on flatness of signal.
+        Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg', and values
+        are floats that set the minimum acceptable peak-to-peak amplitude.
+        If flat is None then no rejection is done.
+    reject_tmin : scalar | None
+        Start of the time window used to reject epochs (with the default None,
+        the window will start with tmin).
+    reject_tmax : scalar | None
+        End of the time window used to reject epochs (with the default None,
+        the window will end with tmax).
+    baseline : None or tuple of length 2 (default: None)
+        The time interval to apply baseline correction.
+        If None do not apply it. If baseline is (a, b)
+        the interval is between "a (s)" and "b (s)".
+        If a is None the beginning of the data is used
+        and if b is None then b is set to the end of the interval.
+        If baseline is equal to (None, None) all the time
+        interval is used.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+        Defaults to raw.verbose.
+
+    See Also
+    --------
+    io.RawArray, EvokedArray, create_info
+    """
+
+    @verbose
+    def __init__(self, data, info, events, tmin=0, event_id=None,
+                 reject=None, flat=None, reject_tmin=None,
+                 reject_tmax=None, baseline=None, verbose=None):
+        dtype = np.complex128 if np.any(np.iscomplex(data)) else np.float64
+        data = np.asanyarray(data, dtype=dtype)
+        if data.ndim != 3:
+            raise ValueError('Data must be a 3D array of shape (n_epochs, '
+                             'n_channels, n_samples)')
+
+        if len(info['ch_names']) != data.shape[1]:
+            raise ValueError('Info and data must have same number of '
+                             'channels.')
+        if data.shape[0] != len(events):
+            raise ValueError('The number of epochs and the number of events'
+                             'must match')
+        tmax = (data.shape[2] - 1) / info['sfreq'] + tmin
+        if event_id is None:  # convert to int to make typing-checks happy
+            event_id = dict((str(e), int(e)) for e in np.unique(events[:, 2]))
+        super(EpochsArray, self).__init__(info, data, events, event_id, tmin,
+                                          tmax, baseline, reject=reject,
+                                          flat=flat, reject_tmin=reject_tmin,
+                                          reject_tmax=reject_tmax, decim=1)
+        if len(events) != in1d(self.events[:, 2],
+                               list(self.event_id.values())).sum():
+            raise ValueError('The events must only contain event numbers from '
+                             'event_id')
+        for ii, e in enumerate(self._data):
+            # This is safe without assignment b/c there is no decim
+            self._detrend_offset_decim(e)
+        self.drop_bad_epochs()
+
+
+def combine_event_ids(epochs, old_event_ids, new_event_id, copy=True):
+    """Collapse event_ids from an epochs instance into a new event_id
+
+    Parameters
+    ----------
+    epochs : instance of Epochs
+        The epochs to operate on.
+    old_event_ids : str, or list
+        Conditions to collapse together.
+    new_event_id : dict, or int
+        A one-element dict (or a single integer) for the new
+        condition. Note that for safety, this cannot be any
+        existing id (in epochs.event_id.values()).
+    copy : bool
+        If True, a copy of epochs will be returned. Otherwise, the
+        function will operate in-place.
+
+    Notes
+    -----
+    This For example (if epochs.event_id was {'Left': 1, 'Right': 2}:
+
+        combine_event_ids(epochs, ['Left', 'Right'], {'Directional': 12})
+
+    would create a 'Directional' entry in epochs.event_id replacing
+    'Left' and 'Right' (combining their trials).
+    """
+    if copy:
+        epochs = epochs.copy()
+    old_event_ids = np.asanyarray(old_event_ids)
+    if isinstance(new_event_id, int):
+        new_event_id = {str(new_event_id): new_event_id}
+    else:
+        if not isinstance(new_event_id, dict):
+            raise ValueError('new_event_id must be a dict or int')
+        if not len(list(new_event_id.keys())) == 1:
+            raise ValueError('new_event_id dict must have one entry')
+    new_event_num = list(new_event_id.values())[0]
+    if not isinstance(new_event_num, int):
+        raise ValueError('new_event_id value must be an integer')
+    if new_event_num in epochs.event_id.values():
+        raise ValueError('new_event_id value must not already exist')
+    # could use .pop() here, but if a latter one doesn't exist, we're
+    # in trouble, so run them all here and pop() later
+    old_event_nums = np.array([epochs.event_id[key] for key in old_event_ids])
+    # find the ones to replace
+    inds = np.any(epochs.events[:, 2][:, np.newaxis] ==
+                  old_event_nums[np.newaxis, :], axis=1)
+    # replace the event numbers in the events list
+    epochs.events[inds, 2] = new_event_num
+    # delete old entries
+    for key in old_event_ids:
+        epochs.event_id.pop(key)
+    # add the new entry
+    epochs.event_id.update(new_event_id)
+    return epochs
+
+
+def equalize_epoch_counts(epochs_list, method='mintime'):
+    """Equalize the number of trials in multiple Epoch instances
+
+    It tries to make the remaining epochs occurring as close as possible in
+    time. This method works based on the idea that if there happened to be some
+    time-varying (like on the scale of minutes) noise characteristics during
+    a recording, they could be compensated for (to some extent) in the
+    equalization process. This method thus seeks to reduce any of those effects
+    by minimizing the differences in the times of the events in the two sets of
+    epochs. For example, if one had event times [1, 2, 3, 4, 120, 121] and the
+    other one had [3.5, 4.5, 120.5, 121.5], it would remove events at times
+    [1, 2] in the first epochs and not [120, 121].
+
+    Note that this operates on the Epochs instances in-place.
+
+    Example:
+
+        equalize_epoch_counts(epochs1, epochs2)
+
+    Parameters
+    ----------
+    epochs_list : list of Epochs instances
+        The Epochs instances to equalize trial counts for.
+    method : str
+        If 'truncate', events will be truncated from the end of each event
+        list. If 'mintime', timing differences between each event list will be
+        minimized.
+    """
+    if not all(isinstance(e, Epochs) for e in epochs_list):
+        raise ValueError('All inputs must be Epochs instances')
+
+    # make sure bad epochs are dropped
+    for e in epochs_list:
+        if not e._bad_dropped:
+            e.drop_bad_epochs()
+    event_times = [e.events[:, 0] for e in epochs_list]
+    indices = _get_drop_indices(event_times, method)
+    for e, inds in zip(epochs_list, indices):
+        e.drop_epochs(inds, reason='EQUALIZED_COUNT')
+
+
+def _get_drop_indices(event_times, method):
+    """Helper to get indices to drop from multiple event timing lists"""
+    small_idx = np.argmin([e.shape[0] for e in event_times])
+    small_e_times = event_times[small_idx]
+    if method not in ['mintime', 'truncate']:
+        raise ValueError('method must be either mintime or truncate, not '
+                         '%s' % method)
+    indices = list()
+    for e in event_times:
+        if method == 'mintime':
+            mask = _minimize_time_diff(small_e_times, e)
+        else:
+            mask = np.ones(e.shape[0], dtype=bool)
+            mask[small_e_times.shape[0]:] = False
+        indices.append(np.where(np.logical_not(mask))[0])
+
+    return indices
+
+
+def _fix_fill(fill):
+    """Helper to fix bug on old scipy"""
+    if LooseVersion(scipy.__version__) < LooseVersion('0.12'):
+        fill = fill[:, np.newaxis]
+    return fill
+
+
+def _minimize_time_diff(t_shorter, t_longer):
+    """Find a boolean mask to minimize timing differences"""
+    from scipy.interpolate import interp1d
+    keep = np.ones((len(t_longer)), dtype=bool)
+    scores = np.ones((len(t_longer)))
+    x1 = np.arange(len(t_shorter))
+    # The first set of keep masks to test
+    kwargs = dict(copy=False, bounds_error=False)
+    # this is a speed tweak, only exists for certain versions of scipy
+    if 'assume_sorted' in inspect.getargspec(interp1d.__init__).args:
+        kwargs['assume_sorted'] = True
+    shorter_interp = interp1d(x1, t_shorter, fill_value=t_shorter[-1],
+                              **kwargs)
+    for ii in range(len(t_longer) - len(t_shorter)):
+        scores.fill(np.inf)
+        # set up the keep masks to test, eliminating any rows that are already
+        # gone
+        keep_mask = ~np.eye(len(t_longer), dtype=bool)[keep]
+        keep_mask[:, ~keep] = False
+        # Check every possible removal to see if it minimizes
+        x2 = np.arange(len(t_longer) - ii - 1)
+        t_keeps = np.array([t_longer[km] for km in keep_mask])
+        longer_interp = interp1d(x2, t_keeps, axis=1,
+                                 fill_value=_fix_fill(t_keeps[:, -1]),
+                                 **kwargs)
+        d1 = longer_interp(x1) - t_shorter
+        d2 = shorter_interp(x2) - t_keeps
+        scores[keep] = np.abs(d1, d1).sum(axis=1) + np.abs(d2, d2).sum(axis=1)
+        keep[np.argmin(scores)] = False
+    return keep
+
+
+ at verbose
+def _is_good(e, ch_names, channel_type_idx, reject, flat, full_report=False,
+             ignore_chs=[], verbose=None):
+    """Test if data segment e is good according to the criteria
+    defined in reject and flat. If full_report=True, it will give
+    True/False as well as a list of all offending channels.
+    """
+    bad_list = list()
+    has_printed = False
+    checkable = np.ones(len(ch_names), dtype=bool)
+    checkable[np.array([c in ignore_chs
+                        for c in ch_names], dtype=bool)] = False
+    for refl, f, t in zip([reject, flat], [np.greater, np.less], ['', 'flat']):
+        if refl is not None:
+            for key, thresh in iteritems(refl):
+                idx = channel_type_idx[key]
+                name = key.upper()
+                if len(idx) > 0:
+                    e_idx = e[idx]
+                    deltas = np.max(e_idx, axis=1) - np.min(e_idx, axis=1)
+                    checkable_idx = checkable[idx]
+                    idx_deltas = np.where(np.logical_and(f(deltas, thresh),
+                                                         checkable_idx))[0]
+
+                    if len(idx_deltas) > 0:
+                        ch_name = [ch_names[idx[i]] for i in idx_deltas]
+                        if (not has_printed):
+                            logger.info('    Rejecting %s epoch based on %s : '
+                                        '%s' % (t, name, ch_name))
+                            has_printed = True
+                        if not full_report:
+                            return False
+                        else:
+                            bad_list.extend(ch_name)
+
+    if not full_report:
+        return True
+    else:
+        if bad_list == []:
+            return True, None
+        else:
+            return False, bad_list
+
+
+ at verbose
+def _read_one_epoch_file(f, tree, fname, preload):
+    """Helper to read a single FIF file"""
+
+    with f as fid:
+        #   Read the measurement info
+        info, meas = read_meas_info(fid, tree)
+        info['filename'] = fname
+
+        events, mappings = _read_events_fif(fid, tree)
+
+        #   Locate the data of interest
+        processed = dir_tree_find(meas, FIFF.FIFFB_PROCESSED_DATA)
+        if len(processed) == 0:
+            raise ValueError('Could not find processed data')
+
+        epochs_node = dir_tree_find(tree, FIFF.FIFFB_EPOCHS)
+        if len(epochs_node) == 0:
+            raise ValueError('Could not find epochs data')
+
+        my_epochs = epochs_node[0]
+
+        # Now find the data in the block
+        name = None
+        data = None
+        data_tag = None
+        bmin, bmax = None, None
+        baseline = None
+        selection = None
+        drop_log = None
+        for k in range(my_epochs['nent']):
+            kind = my_epochs['directory'][k].kind
+            pos = my_epochs['directory'][k].pos
+            if kind == FIFF.FIFF_FIRST_SAMPLE:
+                tag = read_tag(fid, pos)
+                first = int(tag.data)
+            elif kind == FIFF.FIFF_LAST_SAMPLE:
+                tag = read_tag(fid, pos)
+                last = int(tag.data)
+            elif kind == FIFF.FIFF_COMMENT:
+                tag = read_tag(fid, pos)
+                name = tag.data
+            elif kind == FIFF.FIFF_EPOCH:
+                # delay reading until later
+                fid.seek(pos, 0)
+                data_tag = read_tag_info(fid)
+                data_tag.pos = pos
+            elif kind == FIFF.FIFF_MNE_BASELINE_MIN:
+                tag = read_tag(fid, pos)
+                bmin = float(tag.data)
+            elif kind == FIFF.FIFF_MNE_BASELINE_MAX:
+                tag = read_tag(fid, pos)
+                bmax = float(tag.data)
+            elif kind == FIFF.FIFFB_MNE_EPOCHS_SELECTION:
+                tag = read_tag(fid, pos)
+                selection = np.array(tag.data)
+            elif kind == FIFF.FIFFB_MNE_EPOCHS_DROP_LOG:
+                tag = read_tag(fid, pos)
+                drop_log = json.loads(tag.data)
+
+        if bmin is not None or bmax is not None:
+            baseline = (bmin, bmax)
+
+        n_samp = last - first + 1
+        logger.info('    Found the data of interest:')
+        logger.info('        t = %10.2f ... %10.2f ms (%s)'
+                    % (1000 * first / info['sfreq'],
+                       1000 * last / info['sfreq'], name))
+        if info['comps'] is not None:
+            logger.info('        %d CTF compensation matrices available'
+                        % len(info['comps']))
+
+        # Inspect the data
+        if data_tag is None:
+            raise ValueError('Epochs data not found')
+        epoch_shape = (len(info['ch_names']), n_samp)
+        expected = len(events) * np.prod(epoch_shape)
+        if data_tag.size // 4 - 4 != expected:  # 32-bit floats stored
+            raise ValueError('Incorrect number of samples (%d instead of %d)'
+                             % (data_tag.size // 4, expected))
+
+        # Calibration factors
+        cals = np.array([[info['chs'][k]['cal'] *
+                          info['chs'][k].get('scale', 1.0)]
+                         for k in range(info['nchan'])], np.float64)
+
+        # Read the data
+        if preload:
+            data = read_tag(fid, data_tag.pos).data.astype(np.float64)
+            data *= cals[np.newaxis, :, :]
+
+        # Put it all together
+        tmin = first / info['sfreq']
+        tmax = last / info['sfreq']
+        event_id = (dict((str(e), e) for e in np.unique(events[:, 2]))
+                    if mappings is None else mappings)
+        # In case epochs didn't have a FIFF.FIFFB_MNE_EPOCHS_SELECTION tag
+        # (version < 0.8):
+        if selection is None:
+            selection = np.arange(len(events))
+        if drop_log is None:
+            drop_log = [[] for _ in range(len(epochs))]  # noqa, analysis:ignore
+
+    return (info, data, data_tag, events, event_id, tmin, tmax, baseline, name,
+            selection, drop_log, epoch_shape, cals)
+
+
+ at verbose
+def read_epochs(fname, proj=True, add_eeg_ref=True, preload=True,
+                verbose=None):
+    """Read epochs from a fif file
+
+    Parameters
+    ----------
+    fname : str
+        The name of the file, which should end with -epo.fif or -epo.fif.gz.
+    proj : bool | 'delayed'
+        Apply SSP projection vectors. If proj is 'delayed' and reject is not
+        None the single epochs will be projected before the rejection
+        decision, but used in unprojected state if they are kept.
+        This way deciding which projection vectors are good can be postponed
+        to the evoked stage without resulting in lower epoch counts and
+        without producing results different from early SSP application
+        given comparable parameters. Note that in this case baselining,
+        detrending and temporal decimation will be postponed.
+        If proj is False no projections will be applied which is the
+        recommended value if SSPs are not used for cleaning the data.
+    add_eeg_ref : bool
+        If True, an EEG average reference will be added (unless one
+        already exists).
+    preload : bool
+        If True, read all epochs from disk immediately. If False, epochs will
+        be read on demand.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+        Defaults to raw.verbose.
+
+    Returns
+    -------
+    epochs : instance of Epochs
+        The epochs
+    """
+    return EpochsFIF(fname, proj, add_eeg_ref, preload, verbose)
+
+
+class _RawContainer(object):
+    def __init__(self, fid, data_tag, event_samps, epoch_shape, cals):
+        self.fid = fid
+        self.data_tag = data_tag
+        self.event_samps = event_samps
+        self.epoch_shape = epoch_shape
+        self.cals = cals
+        self.proj = False
+
+    def __del__(self):
+        self.fid.close()
+
+
+class EpochsFIF(_BaseEpochs):
+    """Epochs read from disk
+
+    Parameters
+    ----------
+    fname : str
+        The name of the file, which should end with -epo.fif or -epo.fif.gz.
+    proj : bool | 'delayed'
+        Apply SSP projection vectors. If proj is 'delayed' and reject is not
+        None the single epochs will be projected before the rejection
+        decision, but used in unprojected state if they are kept.
+        This way deciding which projection vectors are good can be postponed
+        to the evoked stage without resulting in lower epoch counts and
+        without producing results different from early SSP application
+        given comparable parameters. Note that in this case baselining,
+        detrending and temporal decimation will be postponed.
+        If proj is False no projections will be applied which is the
+        recommended value if SSPs are not used for cleaning the data.
+    add_eeg_ref : bool
+        If True, an EEG average reference will be added (unless one
+        already exists).
+    preload : bool
+        If True, read all epochs from disk immediately. If False, epochs will
+        be read on demand.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+        Defaults to raw.verbose.
+
+    See Also
+    --------
+    mne.Epochs
+    mne.epochs.combine_event_ids
+    mne.Epochs.equalize_event_counts
+    """
+    @verbose
+    def __init__(self, fname, proj=True, add_eeg_ref=True, preload=True,
+                 verbose=None):
+        check_fname(fname, 'epochs', ('-epo.fif', '-epo.fif.gz'))
+
+        fnames = [fname]
+        ep_list = list()
+        raw = list()
+        for fname in fnames:
+            logger.info('Reading %s ...' % fname)
+            fid, tree, _ = fiff_open(fname)
+            next_fname = _get_next_fname(fid, fname, tree)
+            (info, data, data_tag, events, event_id, tmin, tmax, baseline,
+             name, selection, drop_log, epoch_shape, cals) = \
+                _read_one_epoch_file(fid, tree, fname, preload)
+            # here we ignore missing events, since users should already be
+            # aware of missing events if they have saved data that way
+            epoch = _BaseEpochs(
+                info, data, events, event_id, tmin, tmax, baseline,
+                on_missing='ignore', selection=selection, drop_log=drop_log,
+                add_eeg_ref=False, proj=False, verbose=False)
+            ep_list.append(epoch)
+            if not preload:
+                # store everything we need to index back to the original data
+                raw.append(_RawContainer(fiff_open(fname)[0], data_tag,
+                                         events[:, 0].copy(), epoch_shape,
+                                         cals))
+
+            if next_fname is not None:
+                fnames.append(next_fname)
+
+        (info, data, events, event_id, tmin, tmax, baseline, selection,
+         drop_log, _) = _concatenate_epochs(ep_list, with_data=preload)
+        # we need this uniqueness for non-preloaded data to work properly
+        if len(np.unique(events[:, 0])) != len(events):
+            raise RuntimeError('Event time samples were not unique')
+
+        # correct the drop log
+        assert len(drop_log) % len(fnames) == 0
+        step = len(drop_log) // len(fnames)
+        offsets = np.arange(step, len(drop_log) + 1, step)
+        for i1, i2 in zip(offsets[:-1], offsets[1:]):
+            other_log = drop_log[i1:i2]
+            for k, (a, b) in enumerate(zip(drop_log, other_log)):
+                    if a == ['IGNORED'] and b != ['IGNORED']:
+                        drop_log[k] = b
+        drop_log = drop_log[:step]
+
+        # call _BaseEpochs constructor
+        super(EpochsFIF, self).__init__(
+            info, data, events, event_id, tmin, tmax, baseline, raw=raw,
+            name=name, proj=proj, add_eeg_ref=add_eeg_ref,
+            preload_at_end=False, on_missing='ignore', selection=selection,
+            drop_log=drop_log, verbose=verbose)
+        # use the private property instead of drop_bad_epochs so that epochs
+        # are not all read from disk for preload=False
+        self._bad_dropped = True
+
+    @verbose
+    def _get_epoch_from_raw(self, idx, verbose=None):
+        """Load one epoch from disk"""
+        # Find the right file and offset to use
+        event_samp = self.events[idx, 0]
+        for raw in self._raw:
+            idx = np.where(raw.event_samps == event_samp)[0]
+            if len(idx) == 1:
+                idx = idx[0]
+                size = np.prod(raw.epoch_shape) * 4
+                offset = idx * size
+                break
+        else:
+            # read the correct subset of the data
+            raise RuntimeError('Correct epoch could not be found, please '
+                               'contact mne-python developers')
+        # the following is equivalent to this, but faster:
+        #
+        # >>> data = read_tag(raw.fid, raw.data_tag.pos).data.astype(float)
+        # >>> data *= raw.cals[np.newaxis, :, :]
+        # >>> data = data[idx]
+        #
+        # Eventually this could be refactored in io/tag.py if other functions
+        # could make use of it
+
+        raw.fid.seek(raw.data_tag.pos + offset + 16, 0)  # 16 = Tag header
+        data = np.fromstring(raw.fid.read(size), '>f4').astype(np.float64)
+        data.shape = raw.epoch_shape
+        data *= raw.cals
+        return data
+
+
+def bootstrap(epochs, random_state=None):
+    """Compute epochs selected by bootstrapping
+
+    Parameters
+    ----------
+    epochs : Epochs instance
+        epochs data to be bootstrapped
+    random_state : None | int | np.random.RandomState
+        To specify the random generator state
+
+    Returns
+    -------
+    epochs : Epochs instance
+        The bootstrap samples
+    """
+    if not epochs.preload:
+        raise RuntimeError('Modifying data of epochs is only supported '
+                           'when preloading is used. Use preload=True '
+                           'in the constructor.')
+
+    rng = check_random_state(random_state)
+    epochs_bootstrap = epochs.copy()
+    n_events = len(epochs_bootstrap.events)
+    idx = rng.randint(0, n_events, n_events)
+    epochs_bootstrap = epochs_bootstrap[idx]
+    return epochs_bootstrap
+
+
+def _check_merge_epochs(epochs_list):
+    """Aux function"""
+    event_ids = set(tuple(epochs.event_id.items()) for epochs in epochs_list)
+    if len(event_ids) == 1:
+        event_id = dict(event_ids.pop())
+    else:
+        raise NotImplementedError("Epochs with unequal values for event_id")
+
+    tmins = set(epochs.tmin for epochs in epochs_list)
+    if len(tmins) == 1:
+        tmin = tmins.pop()
+    else:
+        raise NotImplementedError("Epochs with unequal values for tmin")
+
+    tmaxs = set(epochs.tmax for epochs in epochs_list)
+    if len(tmaxs) == 1:
+        tmax = tmaxs.pop()
+    else:
+        raise NotImplementedError("Epochs with unequal values for tmax")
+
+    baselines = set(epochs.baseline for epochs in epochs_list)
+    if len(baselines) == 1:
+        baseline = baselines.pop()
+    else:
+        raise NotImplementedError("Epochs with unequal values for baseline")
+
+    return event_id, tmin, tmax, baseline
+
+
+ at verbose
+def add_channels_epochs(epochs_list, name='Unknown', add_eeg_ref=True,
+                        verbose=None):
+    """Concatenate channels, info and data from two Epochs objects
+
+    Parameters
+    ----------
+    epochs_list : list of Epochs
+        Epochs object to concatenate.
+    name : str
+        Comment that describes the Epochs data created.
+    add_eeg_ref : bool
+        If True, an EEG average reference will be added (unless there is no
+        EEG in the data).
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+        Defaults to True if any of the input epochs have verbose=True.
+
+    Returns
+    -------
+    epochs : Epochs
+        Concatenated epochs.
+    """
+    if not all(e.preload for e in epochs_list):
+        raise ValueError('All epochs must be preloaded.')
+
+    info = _merge_info([epochs.info for epochs in epochs_list])
+    data = [epochs.get_data() for epochs in epochs_list]
+    event_id, tmin, tmax, baseline = _check_merge_epochs(epochs_list)
+
+    for d in data:
+        if len(d) != len(data[0]):
+            raise ValueError('all epochs must be of the same length')
+
+    data = np.concatenate(data, axis=1)
+
+    if len(info['chs']) != data.shape[1]:
+        err = "Data shape does not match channel number in measurement info"
+        raise RuntimeError(err)
+
+    events = epochs_list[0].events.copy()
+    all_same = all(np.array_equal(events, epochs.events)
+                   for epochs in epochs_list[1:])
+    if not all_same:
+        raise ValueError('Events must be the same.')
+
+    proj = any(e.proj for e in epochs_list) or add_eeg_ref
+
+    if verbose is None:
+        verbose = any(e.verbose for e in epochs_list)
+
+    epochs = epochs_list[0].copy()
+    epochs.info = info
+    epochs.event_id = event_id
+    epochs.tmin = tmin
+    epochs.tmax = tmax
+    epochs.baseline = baseline
+    epochs.picks = None
+    epochs.name = name
+    epochs.verbose = verbose
+    epochs.events = events
+    epochs.preload = True
+    epochs._bad_dropped = True
+    epochs._data = data
+    epochs._projector, epochs.info = setup_proj(epochs.info, add_eeg_ref,
+                                                activate=proj)
+    return epochs
+
+
+def _compare_epochs_infos(info1, info2, ind):
+    """Compare infos"""
+    info1._check_consistency()
+    info2._check_consistency()
+    if info1['nchan'] != info2['nchan']:
+        raise ValueError('epochs[%d][\'info\'][\'nchan\'] must match' % ind)
+    if info1['bads'] != info2['bads']:
+        raise ValueError('epochs[%d][\'info\'][\'bads\'] must match' % ind)
+    if info1['sfreq'] != info2['sfreq']:
+        raise ValueError('epochs[%d][\'info\'][\'sfreq\'] must match' % ind)
+    if set(info1['ch_names']) != set(info2['ch_names']):
+        raise ValueError('epochs[%d][\'info\'][\'ch_names\'] must match' % ind)
+    if len(info2['projs']) != len(info1['projs']):
+        raise ValueError('SSP projectors in epochs files must be the same')
+    if any(not _proj_equal(p1, p2) for p1, p2 in
+           zip(info2['projs'], info1['projs'])):
+        raise ValueError('SSP projectors in epochs files must be the same')
+
+
+def _concatenate_epochs(epochs_list, with_data=True):
+    """Auxiliary function for concatenating epochs."""
+    out = epochs_list[0]
+    data = [out.get_data()] if with_data else None
+    events = [out.events]
+    baseline, tmin, tmax = out.baseline, out.tmin, out.tmax
+    info = deepcopy(out.info)
+    verbose = out.verbose
+    drop_log = deepcopy(out.drop_log)
+    event_id = deepcopy(out.event_id)
+    selection = out.selection
+    for ii, epochs in enumerate(epochs_list[1:]):
+        _compare_epochs_infos(epochs.info, info, ii)
+        if not np.array_equal(epochs.times, epochs_list[0].times):
+            raise ValueError('Epochs must have same times')
+
+        if epochs.baseline != baseline:
+            raise ValueError('Baseline must be same for all epochs')
+
+        if with_data:
+            data.append(epochs.get_data())
+        events.append(epochs.events)
+        selection = np.concatenate((selection, epochs.selection))
+        drop_log.extend(epochs.drop_log)
+        event_id.update(epochs.event_id)
+    events = np.concatenate(events, axis=0)
+    if with_data:
+        data = np.concatenate(data, axis=0)
+    return (info, data, events, event_id, tmin, tmax, baseline, selection,
+            drop_log, verbose)
+
+
+def _finish_concat(info, data, events, event_id, tmin, tmax, baseline,
+                   selection, drop_log, verbose):
+    """Helper to finish concatenation for epochs not read from disk"""
+    events[:, 0] = np.arange(len(events))  # arbitrary after concat
+    selection = np.where([len(d) == 0 for d in drop_log])[0]
+    out = _BaseEpochs(info, data, events, event_id, tmin, tmax,
+                      baseline=baseline, add_eeg_ref=False,
+                      selection=selection, drop_log=drop_log,
+                      proj=False, on_missing='ignore', verbose=verbose)
+    out.drop_bad_epochs()
+    return out
+
+
+def concatenate_epochs(epochs_list):
+    """Concatenate a list of epochs into one epochs object
+
+    Parameters
+    ----------
+    epochs_list : list
+        list of Epochs instances to concatenate (in order).
+
+    Returns
+    -------
+    epochs : instance of Epochs
+        The result of the concatenation (first Epochs instance passed in).
+
+    Notes
+    -----
+    .. versionadded:: 0.9.0
+    """
+    return _finish_concat(*_concatenate_epochs(epochs_list))
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/event.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/event.py
new file mode 100644
index 0000000..fd15e63
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/event.py
@@ -0,0 +1,775 @@
+"""IO with fif files containing events
+"""
+
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
+#          Teon Brooks <teon.brooks at gmail.com>
+#          Clement Moutard <clement.moutard at polytechnique.org>
+#
+# License: BSD (3-clause)
+
+import numpy as np
+from os.path import splitext
+
+from .utils import check_fname, logger, verbose, _get_stim_channel
+from .io.constants import FIFF
+from .io.tree import dir_tree_find
+from .io.tag import read_tag
+from .io.open import fiff_open
+from .io.write import write_int, start_block, start_file, end_block, end_file
+from .io.pick import pick_channels
+
+
+def pick_events(events, include=None, exclude=None, step=False):
+    """Select some events
+
+    Parameters
+    ----------
+    events : ndarray
+        Array as returned by mne.find_events.
+    include : int | list | None
+        A event id to include or a list of them.
+        If None all events are included.
+    exclude : int | list | None
+        A event id to exclude or a list of them.
+        If None no event is excluded. If include is not None
+        the exclude parameter is ignored.
+    step : bool
+        If True (default is False), events have a step format according
+        to the argument output='step' in the function find_events().
+        In this case, the two last columns are considered in inclusion/
+        exclusion criteria.
+
+    Returns
+    -------
+    events : array, shape (n_events, 3)
+        The list of events
+    """
+    if include is not None:
+        if not isinstance(include, list):
+            include = [include]
+        mask = np.zeros(len(events), dtype=np.bool)
+        for e in include:
+            mask = np.logical_or(mask, events[:, 2] == e)
+            if step:
+                mask = np.logical_or(mask, events[:, 1] == e)
+        events = events[mask]
+    elif exclude is not None:
+        if not isinstance(exclude, list):
+            exclude = [exclude]
+        mask = np.ones(len(events), dtype=np.bool)
+        for e in exclude:
+            mask = np.logical_and(mask, events[:, 2] != e)
+            if step:
+                mask = np.logical_and(mask, events[:, 1] != e)
+        events = events[mask]
+    else:
+        events = np.copy(events)
+
+    if len(events) == 0:
+        raise RuntimeError("No events found")
+
+    return events
+
+
+def define_target_events(events, reference_id, target_id, sfreq, tmin, tmax,
+                         new_id=None, fill_na=None):
+    """Define new events by co-occurrence of existing events
+
+    This function can be used to evaluate events depending on the
+    temporal lag to another event. For example, this can be used to
+    analyze evoked responses which were followed by a button press within
+    a defined time window.
+
+    Parameters
+    ----------
+    events : ndarray
+        Array as returned by mne.find_events.
+    reference_id : int
+        The reference event. The event defining the epoch of interest.
+    target_id : int
+        The target event. The event co-occurring in within a certain time
+        window around the reference event.
+    sfreq : float
+        The sampling frequency of the data.
+    tmin : float
+        The lower limit in seconds from the target event.
+    tmax : float
+        The upper limit border in seconds from the target event.
+    new_id : int
+        new_id for the new event
+    fill_na : int | None
+        Fill event to be inserted if target is not available within the time
+        window specified. If None, the 'null' events will be dropped.
+
+    Returns
+    -------
+    new_events : ndarray
+        The new defined events
+    lag : ndarray
+        time lag between reference and target in milliseconds.
+    """
+
+    if new_id is None:
+        new_id = reference_id
+
+    tsample = 1e3 / sfreq
+    imin = int(tmin * sfreq)
+    imax = int(tmax * sfreq)
+
+    new_events = []
+    lag = []
+    for event in events.copy().astype(int):
+        if event[2] == reference_id:
+            lower = event[0] + imin
+            upper = event[0] + imax
+            res = events[(events[:, 0] > lower) &
+                         (events[:, 0] < upper) & (events[:, 2] == target_id)]
+            if res.any():
+                lag += [event[0] - res[0][0]]
+                event[2] = new_id
+                new_events += [event]
+            elif fill_na is not None:
+                event[2] = fill_na
+                new_events += [event]
+                lag.append(np.nan)
+
+    new_events = np.array(new_events)
+
+    with np.errstate(invalid='ignore'):  # casting nans
+        lag = np.abs(lag, dtype='f8')
+    if lag.any():
+        lag *= tsample
+    else:
+        lag = np.array([])
+
+    return new_events if new_events.any() else np.array([]), lag
+
+
+def _read_events_fif(fid, tree):
+    """Aux function"""
+    #   Find the desired block
+    events = dir_tree_find(tree, FIFF.FIFFB_MNE_EVENTS)
+
+    if len(events) == 0:
+        fid.close()
+        raise ValueError('Could not find event data')
+
+    events = events[0]
+
+    for d in events['directory']:
+        kind = d.kind
+        pos = d.pos
+        if kind == FIFF.FIFF_MNE_EVENT_LIST:
+            tag = read_tag(fid, pos)
+            event_list = tag.data
+            break
+    else:
+        raise ValueError('Could not find any events')
+
+    mappings = dir_tree_find(tree, FIFF.FIFFB_MNE_EVENTS)
+    mappings = mappings[0]
+
+    for d in mappings['directory']:
+        kind = d.kind
+        pos = d.pos
+        if kind == FIFF.FIFF_DESCRIPTION:
+            tag = read_tag(fid, pos)
+            mappings = tag.data
+            break
+    else:
+        mappings = None
+
+    if mappings is not None:  # deal with ':' in keys
+        m_ = [[s[::-1] for s in m[::-1].split(':', 1)]
+              for m in mappings.split(';')]
+        mappings = dict((k, int(v)) for v, k in m_)
+    event_list = event_list.reshape(len(event_list) // 3, 3)
+    return event_list, mappings
+
+
+def read_events(filename, include=None, exclude=None, mask=0):
+    """Reads events from fif or text file
+
+    Parameters
+    ----------
+    filename : string
+        Name of the input file.
+        If the extension is .fif, events are read assuming
+        the file is in FIF format, otherwise (e.g., .eve,
+        .lst, .txt) events are read as coming from text.
+        Note that new format event files do not contain
+        the "time" column (used to be the second column).
+    include : int | list | None
+        A event id to include or a list of them.
+        If None all events are included.
+    exclude : int | list | None
+        A event id to exclude or a list of them.
+        If None no event is excluded. If include is not None
+        the exclude parameter is ignored.
+    mask : int
+        The value of the digital mask to apply to the stim channel values.
+        The default value is 0.
+
+    Returns
+    -------
+    events: array, shape (n_events, 3)
+        The list of events
+
+    See Also
+    --------
+    find_events, write_events
+
+    Notes
+    -----
+    This function will discard the offset line (i.e., first line with zero
+    event number) if it is present in a text file.
+
+    Working with downsampled data: Events that were computed before the data
+    was decimated are no longer valid. Please recompute your events after
+    decimation.
+    """
+    check_fname(filename, 'events', ('.eve', '-eve.fif', '-eve.fif.gz',
+                                     '-eve.lst', '-eve.txt'))
+
+    ext = splitext(filename)[1].lower()
+    if ext == '.fif' or ext == '.gz':
+        fid, tree, _ = fiff_open(filename)
+        try:
+            event_list, _ = _read_events_fif(fid, tree)
+        finally:
+            fid.close()
+    else:
+        #  Have to read this in as float64 then convert because old style
+        #  eve/lst files had a second float column that will raise errors
+        lines = np.loadtxt(filename, dtype=np.float64).astype(np.uint32)
+        if len(lines) == 0:
+            raise ValueError('No text lines found')
+
+        if lines.ndim == 1:  # Special case for only one event
+            lines = lines[np.newaxis, :]
+
+        if len(lines[0]) == 4:  # Old format eve/lst
+            goods = [0, 2, 3]  # Omit "time" variable
+        elif len(lines[0]) == 3:
+            goods = [0, 1, 2]
+        else:
+            raise ValueError('Unknown number of columns in event text file')
+
+        event_list = lines[:, goods]
+        if event_list.shape[0] > 0 and event_list[0, 2] == 0:
+            event_list = event_list[1:]
+
+    event_list = pick_events(event_list, include, exclude)
+    event_list = _mask_trigs(event_list, mask)
+
+    return event_list
+
+
+def write_events(filename, event_list):
+    """Write events to file
+
+    Parameters
+    ----------
+    filename : string
+        Name of the output file.
+        If the extension is .fif, events are written in
+        binary FIF format, otherwise (e.g., .eve, .lst,
+        .txt) events are written as plain text.
+        Note that new format event files do not contain
+        the "time" column (used to be the second column).
+
+    event_list : array, shape (n_events, 3)
+        The list of events
+
+    See Also
+    --------
+    read_events
+    """
+    check_fname(filename, 'events', ('.eve', '-eve.fif', '-eve.fif.gz',
+                                     '-eve.lst', '-eve.txt'))
+
+    ext = splitext(filename)[1].lower()
+    if ext == '.fif' or ext == '.gz':
+        #   Start writing...
+        fid = start_file(filename)
+
+        start_block(fid, FIFF.FIFFB_MNE_EVENTS)
+        write_int(fid, FIFF.FIFF_MNE_EVENT_LIST, event_list.T)
+        end_block(fid, FIFF.FIFFB_MNE_EVENTS)
+
+        end_file(fid)
+    else:
+        f = open(filename, 'w')
+        for e in event_list:
+            f.write('%6d %6d %3d\n' % tuple(e))
+        f.close()
+
+
+def _find_stim_steps(data, first_samp, pad_start=None, pad_stop=None, merge=0):
+    changed = np.diff(data, axis=1) != 0
+    idx = np.where(np.all(changed, axis=0))[0]
+    if len(idx) == 0:
+        return np.empty((0, 3), dtype='int32')
+
+    pre_step = data[0, idx]
+    idx += 1
+    post_step = data[0, idx]
+    idx += first_samp
+    steps = np.c_[idx, pre_step, post_step]
+
+    if pad_start is not None:
+        v = steps[0, 1]
+        if v != pad_start:
+            steps = np.insert(steps, 0, [0, pad_start, v], axis=0)
+
+    if pad_stop is not None:
+        v = steps[-1, 2]
+        if v != pad_stop:
+            last_idx = len(data[0]) + first_samp
+            steps = np.append(steps, [[last_idx, v, pad_stop]], axis=0)
+
+    if merge != 0:
+        diff = np.diff(steps[:, 0])
+        idx = (diff <= abs(merge))
+        if np.any(idx):
+            where = np.where(idx)[0]
+            keep = np.logical_not(idx)
+            if merge > 0:
+                # drop the earlier event
+                steps[where + 1, 1] = steps[where, 1]
+                keep = np.append(keep, True)
+            else:
+                # drop the later event
+                steps[where, 2] = steps[where + 1, 2]
+                keep = np.insert(keep, 0, True)
+
+            is_step = (steps[:, 1] != steps[:, 2])
+            keep = np.logical_and(keep, is_step)
+            steps = steps[keep]
+
+    return steps
+
+
+def find_stim_steps(raw, pad_start=None, pad_stop=None, merge=0,
+                    stim_channel=None):
+    """Find all steps in data from a stim channel
+
+    Parameters
+    ----------
+    raw : Raw object
+        The raw data.
+    pad_start: None | int
+        Values to assume outside of the stim channel (e.g., if pad_start=0 and
+        the stim channel starts with value 5, an event of [0, 0, 5] will be
+        inserted at the beginning). With None, no steps will be inserted.
+    pad_stop : None | int
+        Values to assume outside of the stim channel, see ``pad_start``.
+    merge : int
+        Merge steps occurring in neighboring samples. The integer value
+        indicates over how many samples events should be merged, and the sign
+        indicates in which direction they should be merged (negative means
+        towards the earlier event, positive towards the later event).
+    stim_channel : None | string | list of string
+        Name of the stim channel or all the stim channels
+        affected by the trigger. If None, the config variables
+        'MNE_STIM_CHANNEL', 'MNE_STIM_CHANNEL_1', 'MNE_STIM_CHANNEL_2',
+        etc. are read. If these are not found, it will default to
+        'STI 014'.
+
+    Returns
+    -------
+    steps : array, shape = (n_samples, 3)
+        For each step in the stim channel the values [sample, v_from, v_to].
+        The first column contains the event time in samples (the first sample
+        with the new value). The second column contains the stim channel value
+        before the step, and the third column contains value after the step.
+
+    See Also
+    --------
+    find_events : More sophisticated options for finding events in a Raw file.
+    """
+
+    # pull stim channel from config if necessary
+    stim_channel = _get_stim_channel(stim_channel, raw.info)
+
+    picks = pick_channels(raw.info['ch_names'], include=stim_channel)
+    if len(picks) == 0:
+        raise ValueError('No stim channel found to extract event triggers.')
+    data, _ = raw[picks, :]
+    if np.any(data < 0):
+        logger.warning('Trigger channel contains negative values. '
+                       'Taking absolute value.')
+        data = np.abs(data)  # make sure trig channel is positive
+    data = data.astype(np.int)
+
+    return _find_stim_steps(data, raw.first_samp, pad_start=pad_start,
+                            pad_stop=pad_stop, merge=merge)
+
+
+ at verbose
+def _find_events(data, first_samp, verbose=None, output='onset',
+                 consecutive='increasing', min_samples=0, mask=0):
+    """Helper function for find events"""
+    if min_samples > 0:
+        merge = int(min_samples // 1)
+        if merge == min_samples:
+            merge -= 1
+    else:
+        merge = 0
+
+    if np.any(data < 0):
+        logger.warning('Trigger channel contains negative values. '
+                       'Taking absolute value.')
+        data = np.abs(data)  # make sure trig channel is positive
+    data = data.astype(np.int)
+
+    events = _find_stim_steps(data, first_samp, pad_stop=0, merge=merge)
+    events = _mask_trigs(events, mask)
+
+    # Determine event onsets and offsets
+    if consecutive == 'increasing':
+        onsets = (events[:, 2] > events[:, 1])
+        offsets = np.logical_and(np.logical_or(onsets, (events[:, 2] == 0)),
+                                 (events[:, 1] > 0))
+    elif consecutive:
+        onsets = (events[:, 2] > 0)
+        offsets = (events[:, 1] > 0)
+    else:
+        onsets = (events[:, 1] == 0)
+        offsets = (events[:, 2] == 0)
+
+    onset_idx = np.where(onsets)[0]
+    offset_idx = np.where(offsets)[0]
+
+    if len(onset_idx) == 0 or len(offset_idx) == 0:
+        return np.empty((0, 3), dtype='int32')
+
+    # delete orphaned onsets/offsets
+    if onset_idx[0] > offset_idx[0]:
+        logger.info("Removing orphaned offset at the beginning of the file.")
+        offset_idx = np.delete(offset_idx, 0)
+
+    if onset_idx[-1] > offset_idx[-1]:
+        logger.info("Removing orphaned onset at the end of the file.")
+        onset_idx = np.delete(onset_idx, -1)
+
+    if output == 'onset':
+        events = events[onset_idx]
+    elif output == 'step':
+        idx = np.union1d(onset_idx, offset_idx)
+        events = events[idx]
+    elif output == 'offset':
+        event_id = events[onset_idx, 2]
+        events = events[offset_idx]
+        events[:, 1] = events[:, 2]
+        events[:, 2] = event_id
+        events[:, 0] -= 1
+    else:
+        raise Exception("Invalid output parameter %r" % output)
+
+    logger.info("%s events found" % len(events))
+    logger.info("Events id: %s" % np.unique(events[:, 2]))
+
+    return events
+
+
+ at verbose
+def find_events(raw, stim_channel=None, verbose=None, output='onset',
+                consecutive='increasing', min_duration=0,
+                shortest_event=2, mask=0):
+    """Find events from raw file
+
+    Parameters
+    ----------
+    raw : Raw object
+        The raw data.
+    stim_channel : None | string | list of string
+        Name of the stim channel or all the stim channels
+        affected by the trigger. If None, the config variables
+        'MNE_STIM_CHANNEL', 'MNE_STIM_CHANNEL_1', 'MNE_STIM_CHANNEL_2',
+        etc. are read. If these are not found, it will fall back to
+        'STI 014' if present, then fall back to the first channel of type
+        'stim', if present.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+    output : 'onset' | 'offset' | 'step'
+        Whether to report when events start, when events end, or both.
+    consecutive : bool | 'increasing'
+        If True, consider instances where the value of the events
+        channel changes without first returning to zero as multiple
+        events. If False, report only instances where the value of the
+        events channel changes from/to zero. If 'increasing', report
+        adjacent events only when the second event code is greater than
+        the first.
+    min_duration : float
+        The minimum duration of a change in the events channel required
+        to consider it as an event (in seconds).
+    shortest_event : int
+        Minimum number of samples an event must last (default is 2). If the
+        duration is less than this an exception will be raised.
+    mask : int
+        The value of the digital mask to apply to the stim channel values.
+        The default value is 0.
+
+    Returns
+    -------
+    events : array, shape = (n_events, 3)
+        All events that were found. The first column contains the event time
+        in samples and the third column contains the event id. For output =
+        'onset' or 'step', the second column contains the value of the stim
+        channel immediately before the the event/step. For output = 'offset',
+        the second column contains the value of the stim channel after the
+        event offset.
+
+    Examples
+    --------
+    Consider data with a stim channel that looks like: [0, 32, 32, 33, 32, 0]
+
+    By default, find_events returns all samples at which the value of the
+    stim channel increases::
+
+        >>> print(find_events(raw)) # doctest: +SKIP
+        [[ 1  0 32]
+         [ 3 32 33]]
+
+    If consecutive is False, find_events only returns the samples at which
+    the stim channel changes from zero to a non-zero value::
+
+        >>> print(find_events(raw, consecutive=False)) # doctest: +SKIP
+        [[ 1  0 32]]
+
+    If consecutive is True, find_events returns samples at which the
+    event changes, regardless of whether it first returns to zero::
+
+        >>> print(find_events(raw, consecutive=True)) # doctest: +SKIP
+        [[ 1  0 32]
+         [ 3 32 33]
+         [ 4 33 32]]
+
+    If output is 'offset', find_events returns the last sample of each event
+    instead of the first one::
+
+        >>> print(find_events(raw, consecutive=True, # doctest: +SKIP
+        ...                   output='offset'))
+        [[ 2 33 32]
+         [ 3 32 33]
+         [ 4  0 32]]
+
+    If output is 'step', find_events returns the samples at which an event
+    starts or ends::
+
+        >>> print(find_events(raw, consecutive=True, # doctest: +SKIP
+        ...                   output='step'))
+        [[ 1  0 32]
+         [ 3 32 33]
+         [ 4 33 32]
+         [ 5 32  0]]
+
+    To ignore spurious events, it is also possible to specify a minimum
+    event duration. Assuming our events channel has a sample rate of
+    1000 Hz::
+
+        >>> print(find_events(raw, consecutive=True, # doctest: +SKIP
+        ...                   min_duration=0.002))
+        [[ 1  0 32]]
+
+    For the digital mask, it will take the binary representation of the
+    digital mask, e.g. 5 -> '00000101', and will block the values
+    where mask is one, e.g.::
+
+              7 '0000111' <- trigger value
+             37 '0100101' <- mask
+         ----------------
+              2 '0000010'
+
+    See Also
+    --------
+    find_stim_steps : Find all the steps in the stim channel.
+    """
+    min_samples = min_duration * raw.info['sfreq']
+
+    # pull stim channel from config if necessary
+    stim_channel = _get_stim_channel(stim_channel, raw.info)
+
+    pick = pick_channels(raw.info['ch_names'], include=stim_channel)
+    if len(pick) == 0:
+        raise ValueError('No stim channel found to extract event triggers.')
+    data, _ = raw[pick, :]
+
+    events = _find_events(data, raw.first_samp, verbose=verbose, output=output,
+                          consecutive=consecutive, min_samples=min_samples,
+                          mask=mask)
+
+    # add safety check for spurious events (for ex. from neuromag syst.) by
+    # checking the number of low sample events
+    n_short_events = np.sum(np.diff(events[:, 0]) < shortest_event)
+    if n_short_events > 0:
+        raise ValueError("You have %i events shorter than the "
+                         "shortest_event. These are very unusual and you "
+                         "may want to set min_duration to a larger value e.g."
+                         " x / raw.info['sfreq']. Where x = 1 sample shorter "
+                         "than the shortest event length." % (n_short_events))
+
+    return events
+
+
+def _mask_trigs(events, mask):
+    """Helper function for masking digital trigger values"""
+    if not isinstance(mask, int):
+        raise TypeError('You provided a(n) %s. Mask must be an int.'
+                        % type(mask))
+    n_events = len(events)
+    if n_events == 0:
+        return events.copy()
+
+    mask = np.bitwise_not(mask)
+    events[:, 1:] = np.bitwise_and(events[:, 1:], mask)
+    events = events[events[:, 1] != events[:, 2]]
+
+    return events
+
+
+def merge_events(events, ids, new_id, replace_events=True):
+    """Merge a set of events
+
+    Parameters
+    ----------
+    events : array
+        Events.
+    ids : array of int
+        The ids of events to merge.
+    new_id : int
+        The new id.
+    replace_events : bool
+        If True (default), old event ids are replaced. Otherwise,
+        new events will be added to the old event list.
+
+    Returns
+    -------
+    new_events: array
+        The new events
+    """
+    events_out = events.copy()
+    where = np.empty(events.shape[0], dtype=bool)
+    for col in [1, 2]:
+        where.fill(False)
+        for i in ids:
+            where = (events[:, col] == i)
+            events_out[where, col] = new_id
+    if not replace_events:
+        events_out = np.concatenate((events_out, events), axis=0)
+        events_out = events_out[np.argsort(events_out[:, 0])]
+    return events_out
+
+
+def shift_time_events(events, ids, tshift, sfreq):
+    """Shift an event
+
+    Parameters
+    ----------
+    events : array, shape=(n_events, 3)
+        The events
+    ids : array int
+        The ids of events to shift.
+    tshift : float
+        Time-shift event. Use positive value tshift for forward shifting
+        the event and negative value for backward shift.
+    sfreq : float
+        The sampling frequency of the data.
+
+    Returns
+    -------
+    new_events : array
+        The new events.
+    """
+    events = events.copy()
+    for ii in ids:
+        events[events[:, 2] == ii, 0] += int(tshift * sfreq)
+    return events
+
+
+def make_fixed_length_events(raw, id, start=0, stop=None, duration=1.):
+    """Make a set of events separated by a fixed duration
+
+    Parameters
+    ----------
+    raw : instance of Raw
+        A raw object to use the data from.
+    id : int
+        The id to use.
+    start : float
+        Time of first event.
+    stop : float | None
+        Maximum time of last event. If None, events extend to the end
+        of the recording.
+    duration: float
+        The duration to separate events by.
+
+    Returns
+    -------
+    new_events : array
+        The new events.
+    """
+    start = raw.time_as_index(start)
+    start = start[0] + raw.first_samp
+    if stop is not None:
+        stop = raw.time_as_index(stop)
+        stop = min([stop[0] + raw.first_samp, raw.last_samp + 1])
+    else:
+        stop = raw.last_samp + 1
+    if not isinstance(id, int):
+        raise ValueError('id must be an integer')
+    # Make sure we don't go out the end of the file:
+    stop -= int(np.ceil(raw.info['sfreq'] * duration))
+    ts = np.arange(start, stop, raw.info['sfreq'] * duration).astype(int)
+    n_events = len(ts)
+    events = np.c_[ts, np.zeros(n_events, dtype=int),
+                   id * np.ones(n_events, dtype=int)]
+    return events
+
+
+def concatenate_events(events, first_samps, last_samps):
+    """Concatenate event lists in a manner compatible with
+    concatenate_raws
+
+    This is useful, for example, if you processed and/or changed
+    events in raw files separately before combining them using
+    concatenate_raws.
+
+    Parameters
+    ----------
+    events : list of arrays
+        List of event arrays, typically each extracted from a
+        corresponding raw file that is being concatenated.
+
+    first_samps : list or array of int
+        First sample numbers of the raw files concatenated.
+
+    last_samps : list or array of int
+        Last sample numbers of the raw files concatenated.
+
+    Returns
+    -------
+    events : array
+        The concatenated events.
+    """
+    if not isinstance(events, list):
+        raise ValueError('events must be a list of arrays')
+    if not (len(events) == len(last_samps) and
+            len(events) == len(first_samps)):
+        raise ValueError('events, first_samps, and last_samps must all have '
+                         'the same lengths')
+    first_samps = np.array(first_samps)
+    last_samps = np.array(last_samps)
+    n_samps = np.cumsum(last_samps - first_samps + 1)
+    events_out = events[0]
+    for e, f, n in zip(events[1:], first_samps[1:], n_samps[:-1]):
+        # remove any skip since it doesn't exist in concatenated files
+        e2 = e.copy()
+        e2[:, 0] -= f
+        # add offset due to previous files, plus original file offset
+        e2[:, 0] += n + first_samps[0]
+        events_out = np.concatenate((events_out, e2), axis=0)
+
+    return events_out
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/evoked.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/evoked.py
new file mode 100644
index 0000000..fdd9c60
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/evoked.py
@@ -0,0 +1,1284 @@
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
+#          Denis Engemann <denis.engemann at gmail.com>
+#          Andrew Dykstra <andrew.r.dykstra at gmail.com>
+#          Mads Jensen <mje.mads at gmail.com>
+#
+# License: BSD (3-clause)
+
+from copy import deepcopy
+import numpy as np
+import warnings
+
+from .baseline import rescale
+from .channels.channels import (ContainsMixin, UpdateChannelsMixin,
+                                SetChannelsMixin, InterpolationMixin,
+                                equalize_channels)
+from .filter import resample, detrend, FilterMixin
+from .fixes import in1d
+from .utils import check_fname, logger, verbose, object_hash, _time_mask
+from .viz import (plot_evoked, plot_evoked_topomap, plot_evoked_field,
+                  plot_evoked_image, plot_evoked_topo)
+from .viz.evoked import _plot_evoked_white
+from .externals.six import string_types
+
+from .io.constants import FIFF
+from .io.open import fiff_open
+from .io.tag import read_tag
+from .io.tree import dir_tree_find
+from .io.pick import channel_type, pick_types
+from .io.meas_info import read_meas_info, write_meas_info
+from .io.proj import ProjMixin
+from .io.write import (start_file, start_block, end_file, end_block,
+                       write_int, write_string, write_float_matrix,
+                       write_id)
+from .io.base import ToDataFrameMixin
+
+_aspect_dict = {'average': FIFF.FIFFV_ASPECT_AVERAGE,
+                'standard_error': FIFF.FIFFV_ASPECT_STD_ERR}
+_aspect_rev = {str(FIFF.FIFFV_ASPECT_AVERAGE): 'average',
+               str(FIFF.FIFFV_ASPECT_STD_ERR): 'standard_error'}
+
+
+class Evoked(ProjMixin, ContainsMixin, UpdateChannelsMixin,
+             SetChannelsMixin, InterpolationMixin, FilterMixin,
+             ToDataFrameMixin):
+    """Evoked data
+
+    Parameters
+    ----------
+    fname : string
+        Name of evoked/average FIF file to load.
+        If None no data is loaded.
+    condition : int, or str
+        Dataset ID number (int) or comment/name (str). Optional if there is
+        only one data set in file.
+    baseline : tuple or list of length 2, or None
+        The time interval to apply rescaling / baseline correction.
+        If None do not apply it. If baseline is (a, b)
+        the interval is between "a (s)" and "b (s)".
+        If a is None the beginning of the data is used
+        and if b is None then b is set to the end of the interval.
+        If baseline is equal ot (None, None) all the time
+        interval is used. If None, no correction is applied.
+    proj : bool, optional
+        Apply SSP projection vectors
+    kind : str
+        Either 'average' or 'standard_error'. The type of data to read.
+        Only used if 'condition' is a str.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Attributes
+    ----------
+    info : dict
+        Measurement info.
+    ch_names : list of string
+        List of channels' names.
+    nave : int
+        Number of averaged epochs.
+    kind : str
+        Type of data, either average or standard_error.
+    first : int
+        First time sample.
+    last : int
+        Last time sample.
+    comment : string
+        Comment on dataset. Can be the condition.
+    times : array
+        Array of time instants in seconds.
+    data : array of shape (n_channels, n_times)
+        Evoked response.
+    verbose : bool, str, int, or None.
+        See above.
+    """
+    @verbose
+    def __init__(self, fname, condition=None, baseline=None, proj=True,
+                 kind='average', verbose=None):
+
+        if fname is None:
+            raise ValueError('No evoked filename specified')
+
+        self.verbose = verbose
+        logger.info('Reading %s ...' % fname)
+        f, tree, _ = fiff_open(fname)
+        with f as fid:
+            if not isinstance(proj, bool):
+                raise ValueError(r"'proj' must be 'True' or 'False'")
+
+            #   Read the measurement info
+            info, meas = read_meas_info(fid, tree)
+            info['filename'] = fname
+
+            #   Locate the data of interest
+            processed = dir_tree_find(meas, FIFF.FIFFB_PROCESSED_DATA)
+            if len(processed) == 0:
+                raise ValueError('Could not find processed data')
+
+            evoked_node = dir_tree_find(meas, FIFF.FIFFB_EVOKED)
+            if len(evoked_node) == 0:
+                raise ValueError('Could not find evoked data')
+
+            # find string-based entry
+            if isinstance(condition, string_types):
+                if kind not in _aspect_dict.keys():
+                    raise ValueError('kind must be "average" or '
+                                     '"standard_error"')
+
+                comments, aspect_kinds, t = _get_entries(fid, evoked_node)
+                goods = np.logical_and(in1d(comments, [condition]),
+                                       in1d(aspect_kinds,
+                                            [_aspect_dict[kind]]))
+                found_cond = np.where(goods)[0]
+                if len(found_cond) != 1:
+                    raise ValueError('condition "%s" (%s) not found, out of '
+                                     'found datasets:\n  %s'
+                                     % (condition, kind, t))
+                condition = found_cond[0]
+
+            if condition >= len(evoked_node) or condition < 0:
+                fid.close()
+                raise ValueError('Data set selector out of range')
+
+            my_evoked = evoked_node[condition]
+
+            # Identify the aspects
+            aspects = dir_tree_find(my_evoked, FIFF.FIFFB_ASPECT)
+            if len(aspects) > 1:
+                logger.info('Multiple aspects found. Taking first one.')
+            my_aspect = aspects[0]
+
+            # Now find the data in the evoked block
+            nchan = 0
+            sfreq = -1
+            chs = []
+            comment = None
+            for k in range(my_evoked['nent']):
+                my_kind = my_evoked['directory'][k].kind
+                pos = my_evoked['directory'][k].pos
+                if my_kind == FIFF.FIFF_COMMENT:
+                    tag = read_tag(fid, pos)
+                    comment = tag.data
+                elif my_kind == FIFF.FIFF_FIRST_SAMPLE:
+                    tag = read_tag(fid, pos)
+                    first = int(tag.data)
+                elif my_kind == FIFF.FIFF_LAST_SAMPLE:
+                    tag = read_tag(fid, pos)
+                    last = int(tag.data)
+                elif my_kind == FIFF.FIFF_NCHAN:
+                    tag = read_tag(fid, pos)
+                    nchan = int(tag.data)
+                elif my_kind == FIFF.FIFF_SFREQ:
+                    tag = read_tag(fid, pos)
+                    sfreq = float(tag.data)
+                elif my_kind == FIFF.FIFF_CH_INFO:
+                    tag = read_tag(fid, pos)
+                    chs.append(tag.data)
+
+            if comment is None:
+                comment = 'No comment'
+
+            #   Local channel information?
+            if nchan > 0:
+                if chs is None:
+                    raise ValueError('Local channel information was not found '
+                                     'when it was expected.')
+
+                if len(chs) != nchan:
+                    raise ValueError('Number of channels and number of '
+                                     'channel definitions are different')
+
+                info['chs'] = chs
+                info['nchan'] = nchan
+                logger.info('    Found channel information in evoked data. '
+                            'nchan = %d' % nchan)
+                if sfreq > 0:
+                    info['sfreq'] = sfreq
+
+            nsamp = last - first + 1
+            logger.info('    Found the data of interest:')
+            logger.info('        t = %10.2f ... %10.2f ms (%s)'
+                        % (1000 * first / info['sfreq'],
+                           1000 * last / info['sfreq'], comment))
+            if info['comps'] is not None:
+                logger.info('        %d CTF compensation matrices available'
+                            % len(info['comps']))
+
+            # Read the data in the aspect block
+            nave = 1
+            epoch = []
+            for k in range(my_aspect['nent']):
+                kind = my_aspect['directory'][k].kind
+                pos = my_aspect['directory'][k].pos
+                if kind == FIFF.FIFF_COMMENT:
+                    tag = read_tag(fid, pos)
+                    comment = tag.data
+                elif kind == FIFF.FIFF_ASPECT_KIND:
+                    tag = read_tag(fid, pos)
+                    aspect_kind = int(tag.data)
+                elif kind == FIFF.FIFF_NAVE:
+                    tag = read_tag(fid, pos)
+                    nave = int(tag.data)
+                elif kind == FIFF.FIFF_EPOCH:
+                    tag = read_tag(fid, pos)
+                    epoch.append(tag)
+
+            logger.info('        nave = %d - aspect type = %d'
+                        % (nave, aspect_kind))
+
+            nepoch = len(epoch)
+            if nepoch != 1 and nepoch != info['nchan']:
+                raise ValueError('Number of epoch tags is unreasonable '
+                                 '(nepoch = %d nchan = %d)'
+                                 % (nepoch, info['nchan']))
+
+            if nepoch == 1:
+                # Only one epoch
+                all_data = epoch[0].data.astype(np.float)
+                # May need a transpose if the number of channels is one
+                if all_data.shape[1] == 1 and info['nchan'] == 1:
+                    all_data = all_data.T.astype(np.float)
+            else:
+                # Put the old style epochs together
+                all_data = np.concatenate([e.data[None, :] for e in epoch],
+                                          axis=0).astype(np.float)
+
+            if all_data.shape[1] != nsamp:
+                raise ValueError('Incorrect number of samples (%d instead of '
+                                 ' %d)' % (all_data.shape[1], nsamp))
+
+        # Calibrate
+        cals = np.array([info['chs'][k]['cal'] *
+                         info['chs'][k].get('scale', 1.0)
+                         for k in range(info['nchan'])])
+        all_data *= cals[:, np.newaxis]
+
+        times = np.arange(first, last + 1, dtype=np.float) / info['sfreq']
+        self.info = info
+
+        # Put the rest together all together
+        self.nave = nave
+        self._aspect_kind = aspect_kind
+        self.kind = _aspect_rev.get(str(self._aspect_kind), 'Unknown')
+        self.first = first
+        self.last = last
+        self.comment = comment
+        self.times = times
+        self.data = all_data
+
+        # bind info, proj, data to self so apply_proj can be used
+        self.data = all_data
+        if proj:
+            self.apply_proj()
+        # Run baseline correction
+        self.data = rescale(self.data, times, baseline, 'mean', copy=False)
+
+    def save(self, fname):
+        """Save dataset to file.
+
+        Parameters
+        ----------
+        fname : string
+            Name of the file where to save the data.
+        """
+        write_evokeds(fname, self)
+
+    def __repr__(self):
+        s = "comment : '%s'" % self.comment
+        s += ", time : [%f, %f]" % (self.times[0], self.times[-1])
+        s += ", n_epochs : %d" % self.nave
+        s += ", n_channels x n_times : %s x %s" % self.data.shape
+        return "<Evoked  |  %s>" % s
+
+    @property
+    def ch_names(self):
+        """Channel names"""
+        return self.info['ch_names']
+
+    def crop(self, tmin=None, tmax=None, copy=False):
+        """Crop data to a given time interval
+
+        Parameters
+        ----------
+        tmin : float | None
+            Start time of selection in seconds.
+        tmax : float | None
+            End time of selection in seconds.
+        copy : bool
+            If False epochs is cropped in place.
+        """
+        inst = self if not copy else self.copy()
+        mask = _time_mask(inst.times, tmin, tmax)
+        inst.times = inst.times[mask]
+        inst.first = int(inst.times[0] * inst.info['sfreq'])
+        inst.last = len(inst.times) + inst.first - 1
+        inst.data = inst.data[:, mask]
+        return inst
+
+    def shift_time(self, tshift, relative=True):
+        """Shift time scale in evoked data
+
+        Parameters
+        ----------
+        tshift : float
+            The amount of time shift to be applied if relative is True
+            else the first time point. When relative is True, positive value
+            of tshift moves the data forward while negative tshift moves it
+            backward.
+        relative : bool
+            If true, move the time backwards or forwards by specified amount.
+            Else, set the starting time point to the value of tshift.
+
+        Notes
+        -----
+        Maximum accuracy of time shift is 1 / evoked.info['sfreq']
+        """
+        times = self.times
+        sfreq = self.info['sfreq']
+
+        offset = self.first if relative else 0
+
+        self.first = int(tshift * sfreq) + offset
+        self.last = self.first + len(times) - 1
+        self.times = np.arange(self.first, self.last + 1,
+                               dtype=np.float) / sfreq
+
+    def plot(self, picks=None, exclude='bads', unit=True, show=True, ylim=None,
+             xlim='tight', proj=False, hline=None, units=None, scalings=None,
+             titles=None, axes=None, gfp=False):
+        """Plot evoked data as butterfly plots
+
+        Left click to a line shows the channel name. Selecting an area by
+        clicking and holding left mouse button plots a topographic map of the
+        painted area.
+
+        Note: If bad channels are not excluded they are shown in red.
+
+        Parameters
+        ----------
+        picks : array-like of int | None
+            The indices of channels to plot. If None show all.
+        exclude : list of str | 'bads'
+            Channels names to exclude from being shown. If 'bads', the
+            bad channels are excluded.
+        unit : bool
+            Scale plot with channel (SI) unit.
+        show : bool
+            Call pyplot.show() at the end or not.
+        ylim : dict
+            ylim for plots. e.g. ylim = dict(eeg=[-200e-6, 200e-6])
+            Valid keys are eeg, mag, grad
+        xlim : 'tight' | tuple | None
+            xlim for plots.
+        proj : bool | 'interactive'
+            If true SSP projections are applied before display. If
+            'interactive', a check box for reversible selection of SSP
+            projection vectors will be shown.
+        hline : list of floats | None
+            The values at which show an horizontal line.
+        units : dict | None
+            The units of the channel types used for axes lables. If None,
+            defaults to `dict(eeg='uV', grad='fT/cm', mag='fT')`.
+        scalings : dict | None
+            The scalings of the channel types to be applied for plotting.
+            If None, defaults to `dict(eeg=1e6, grad=1e13, mag=1e15)`.
+        titles : dict | None
+            The titles associated with the channels. If None, defaults to
+            `dict(eeg='EEG', grad='Gradiometers', mag='Magnetometers')`.
+        axes : instance of Axes | list | None
+            The axes to plot to. If list, the list must be a list of Axes of
+            the same length as the number of channel types. If instance of
+            Axes, there must be only one channel type plotted.
+        gfp : bool | 'only'
+            Plot GFP in green if True or "only". If "only", then the individual
+            channel traces will not be shown.
+        """
+        return plot_evoked(self, picks=picks, exclude=exclude, unit=unit,
+                           show=show, ylim=ylim, proj=proj, xlim=xlim,
+                           hline=hline, units=units, scalings=scalings,
+                           titles=titles, axes=axes, gfp=gfp)
+
+    def plot_image(self, picks=None, exclude='bads', unit=True, show=True,
+                   clim=None, xlim='tight', proj=False, units=None,
+                   scalings=None, titles=None, axes=None, cmap='RdBu_r'):
+        """Plot evoked data as images
+
+        Parameters
+        ----------
+        picks : array-like of int | None
+            The indices of channels to plot. If None show all.
+        exclude : list of str | 'bads'
+            Channels names to exclude from being shown. If 'bads', the
+            bad channels are excluded.
+        unit : bool
+            Scale plot with channel (SI) unit.
+        show : bool
+            Call pyplot.show() at the end or not.
+        clim : dict
+            clim for images. e.g. clim = dict(eeg=[-200e-6, 200e6])
+            Valid keys are eeg, mag, grad
+        xlim : 'tight' | tuple | None
+            xlim for plots.
+        proj : bool | 'interactive'
+            If true SSP projections are applied before display. If
+            'interactive', a check box for reversible selection of SSP
+            projection vectors will be shown.
+        units : dict | None
+            The units of the channel types used for axes lables. If None,
+            defaults to `dict(eeg='uV', grad='fT/cm', mag='fT')`.
+        scalings : dict | None
+            The scalings of the channel types to be applied for plotting.
+            If None, defaults to `dict(eeg=1e6, grad=1e13, mag=1e15)`.
+        titles : dict | None
+            The titles associated with the channels. If None, defaults to
+            `dict(eeg='EEG', grad='Gradiometers', mag='Magnetometers')`.
+        axes : instance of Axes | list | None
+            The axes to plot to. If list, the list must be a list of Axes of
+            the same length as the number of channel types. If instance of
+            Axes, there must be only one channel type plotted.
+        cmap : matplotlib colormap
+            Colormap.
+        """
+        return plot_evoked_image(self, picks=picks, exclude=exclude, unit=unit,
+                                 show=show, clim=clim, proj=proj, xlim=xlim,
+                                 units=units, scalings=scalings,
+                                 titles=titles, axes=axes, cmap=cmap)
+
+    def plot_topo(self, layout=None, layout_scale=0.945, color=None,
+                  border='none', ylim=None, scalings=None, title=None,
+                  proj=False, vline=[0.0], fig_facecolor='k',
+                  fig_background=None, axis_facecolor='k', font_color='w',
+                  show=True):
+        """Plot 2D topography of evoked responses.
+
+        Clicking on the plot of an individual sensor opens a new figure showing
+        the evoked response for the selected sensor.
+
+        Parameters
+        ----------
+        layout : instance of Layout | None
+            Layout instance specifying sensor positions (does not need to
+            be specified for Neuromag data). If possible, the correct layout is
+            inferred from the data.
+        layout_scale: float
+            Scaling factor for adjusting the relative size of the layout
+            on the canvas
+        color : list of color objects | color object | None
+            Everything matplotlib accepts to specify colors. If not list-like,
+            the color specified will be repeated. If None, colors are
+            automatically drawn.
+        border : str
+            matplotlib borders style to be used for each sensor plot.
+        ylim : dict | None
+            ylim for plots. The value determines the upper and lower subplot
+            limits. e.g. ylim = dict(eeg=[-200e-6, 200e6]). Valid keys are eeg,
+            mag, grad, misc. If None, the ylim parameter for each channel is
+            determined by the maximum absolute peak.
+        scalings : dict | None
+            The scalings of the channel types to be applied for plotting. If
+            None, defaults to `dict(eeg=1e6, grad=1e13, mag=1e15)`.
+        title : str
+            Title of the figure.
+        proj : bool | 'interactive'
+            If true SSP projections are applied before display. If
+            'interactive', a check box for reversible selection of SSP
+            projection vectors will be shown.
+        vline : list of floats | None
+            The values at which to show a vertical line.
+        fig_facecolor : str | obj
+            The figure face color. Defaults to black.
+        fig_background : None | numpy ndarray
+            A background image for the figure. This must work with a call to
+            plt.imshow. Defaults to None.
+        axis_facecolor : str | obj
+            The face color to be used for each sensor plot. Defaults to black.
+        font_color : str | obj
+            The color of text in the colorbar and title. Defaults to white.
+        show : bool
+            Show figure if True.
+
+        Returns
+        -------
+        fig : Instance of matplotlib.figure.Figure
+            Images of evoked responses at sensor locations
+
+        .. versionadded:: 0.10.0
+        """
+        return plot_evoked_topo(self, layout=layout, layout_scale=layout_scale,
+                                color=color, border=border, ylim=ylim,
+                                scalings=scalings, title=title, proj=proj,
+                                vline=vline, fig_facecolor=fig_facecolor,
+                                fig_background=fig_background,
+                                axis_facecolor=axis_facecolor,
+                                font_color=font_color, show=show)
+
+    def plot_topomap(self, times="auto", ch_type=None, layout=None, vmin=None,
+                     vmax=None, cmap='RdBu_r', sensors=True, colorbar=True,
+                     scale=None, scale_time=1e3, unit=None, res=64, size=1,
+                     cbar_fmt="%3.1f", time_format='%01d ms', proj=False,
+                     show=True, show_names=False, title=None, mask=None,
+                     mask_params=None, outlines='head', contours=6,
+                     image_interp='bilinear', average=None, head_pos=None,
+                     axes=None):
+        """Plot topographic maps of specific time points
+
+        Parameters
+        ----------
+        times : float | array of floats | "auto" | "peaks".
+            The time point(s) to plot. If "auto", the number of ``axes``
+            determines the amount of time point(s). If ``axes`` is also None,
+            10 topographies will be shown with a regular time spacing between
+            the first and last time instant. If "peaks", finds time points
+            automatically by checking for local maxima in Global Field Power.
+        ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
+            The channel type to plot. For 'grad', the gradiometers are collec-
+            ted in pairs and the RMS for each pair is plotted.
+            If None, then channels are chosen in the order given above.
+        layout : None | Layout
+            Layout instance specifying sensor positions (does not need to
+            be specified for Neuromag data). If possible, the correct
+            layout file is inferred from the data; if no appropriate layout
+            file was found, the layout is automatically generated from the
+            sensor locations.
+        vmin : float | callable
+            The value specfying the lower bound of the color range.
+            If None, and vmax is None, -vmax is used. Else np.min(data).
+            If callable, the output equals vmin(data).
+        vmax : float | callable
+            The value specfying the upper bound of the color range.
+            If None, the maximum absolute value is used. If vmin is None,
+            but vmax is not, defaults to np.max(data).
+            If callable, the output equals vmax(data).
+        cmap : matplotlib colormap
+            Colormap. Defaults to 'RdBu_r'.
+        sensors : bool | str
+            Add markers for sensor locations to the plot. Accepts matplotlib
+            plot format string (e.g., 'r+' for red plusses). If True, a circle
+            will be used (via .add_artist). Defaults to True.
+        colorbar : bool
+            Plot a colorbar.
+        scale : dict | float | None
+            Scale the data for plotting. If None, defaults to 1e6 for eeg, 1e13
+            for grad and 1e15 for mag.
+        scale_time : float | None
+            Scale the time labels. Defaults to 1e3 (ms).
+        unit : dict | str | None
+            The unit of the channel type used for colorbar label. If
+            scale is None the unit is automatically determined.
+        res : int
+            The resolution of the topomap image (n pixels along each side).
+        size : scalar
+            Side length of the topomaps in inches (only applies when plotting
+            multiple topomaps at a time).
+        cbar_fmt : str
+            String format for colorbar values.
+        time_format : str
+            String format for topomap values. Defaults to ``"%01d ms"``.
+        proj : bool | 'interactive'
+            If true SSP projections are applied before display. If
+            'interactive', a check box for reversible selection of SSP
+            projection vectors will be shown.
+        show : bool
+            Call pyplot.show() at the end.
+        show_names : bool | callable
+            If True, show channel names on top of the map. If a callable is
+            passed, channel names will be formatted using the callable; e.g.,
+            to delete the prefix 'MEG ' from all channel names, pass the
+            function
+            lambda x: x.replace('MEG ', ''). If `mask` is not None, only
+            significant sensors will be shown.
+        title : str | None
+            Title. If None (default), no title is displayed.
+        mask : ndarray of bool, shape (n_channels, n_times) | None
+            The channels to be marked as significant at a given time point.
+            Indicies set to `True` will be considered. Defaults to None.
+        mask_params : dict | None
+            Additional plotting parameters for plotting significant sensors.
+            Default (None) equals:
+            ``dict(marker='o', markerfacecolor='w', markeredgecolor='k',
+            linewidth=0, markersize=4)``.
+        outlines : 'head' | 'skirt' | dict | None
+            The outlines to be drawn. If 'head', the default head scheme will
+            be drawn. If 'skirt' the head scheme will be drawn, but sensors are
+            allowed to be plotted outside of the head circle. If dict, each key
+            refers to a tuple of x and y positions, the values in 'mask_pos'
+            will serve as image mask, and the 'autoshrink' (bool) field will
+            trigger automated shrinking of the positions due to points outside
+            the outline. Alternatively, a matplotlib patch object can be passed
+            for advanced masking options, either directly or as a function that
+            returns patches (required for multi-axis plots). If None, nothing
+            will be drawn. Defaults to 'head'.
+        contours : int | False | None
+            The number of contour lines to draw. If 0, no contours will be
+            drawn.
+        image_interp : str
+            The image interpolation to be used. All matplotlib options are
+            accepted.
+        average : float | None
+            The time window around a given time to be used for averaging
+            (seconds). For example, 0.01 would translate into window that
+            starts 5 ms before and ends 5 ms after a given time point.
+            Defaults to None, which means no averaging.
+        head_pos : dict | None
+            If None (default), the sensors are positioned such that they span
+            the head circle. If dict, can have entries 'center' (tuple) and
+            'scale' (tuple) for what the center and scale of the head should be
+            relative to the electrode locations.
+        axes : instance of Axes | list | None
+            The axes to plot to. If list, the list must be a list of Axes of
+            the same length as ``times`` (unless ``times`` is None). If
+            instance of Axes, ``times`` must be a float or a list of one float.
+            Defaults to None.
+        """
+        return plot_evoked_topomap(self, times=times, ch_type=ch_type,
+                                   layout=layout, vmin=vmin,
+                                   vmax=vmax, cmap=cmap, sensors=sensors,
+                                   colorbar=colorbar, scale=scale,
+                                   scale_time=scale_time,
+                                   unit=unit, res=res, proj=proj, size=size,
+                                   cbar_fmt=cbar_fmt, time_format=time_format,
+                                   show=show, show_names=show_names,
+                                   title=title, mask=mask,
+                                   mask_params=mask_params,
+                                   outlines=outlines, contours=contours,
+                                   image_interp=image_interp,
+                                   average=average, head_pos=head_pos,
+                                   axes=axes)
+
+    def plot_field(self, surf_maps, time=None, time_label='t = %0.0f ms',
+                   n_jobs=1):
+        """Plot MEG/EEG fields on head surface and helmet in 3D
+
+        Parameters
+        ----------
+        surf_maps : list
+            The surface mapping information obtained with make_field_map.
+        time : float | None
+            The time point at which the field map shall be displayed. If None,
+            the average peak latency (across sensor types) is used.
+        time_label : str
+            How to print info about the time instant visualized.
+        n_jobs : int
+            Number of jobs to run in parallel.
+
+        Returns
+        -------
+        fig : instance of mlab.Figure
+            The mayavi figure.
+        """
+        return plot_evoked_field(self, surf_maps, time=time,
+                                 time_label=time_label, n_jobs=n_jobs)
+
+    def plot_white(self, noise_cov, show=True):
+        """Plot whitened evoked response
+
+        Plots the whitened evoked response and the whitened GFP as described in
+        [1]_. If one single covariance object is passed, the GFP panel (bottom)
+        will depict different sensor types. If multiple covariance objects are
+        passed as a list, the left column will display the whitened evoked
+        responses for each channel based on the whitener from the noise
+        covariance that has the highest log-likelihood. The left column will
+        depict the whitened GFPs based on each estimator separately for each
+        sensor type. Instead of numbers of channels the GFP display shows the
+        estimated rank. The rank estimation will be printed by the logger for
+        each noise covariance estimator that is passed.
+
+
+        Parameters
+        ----------
+        noise_cov : list | instance of Covariance | str
+            The noise covariance as computed by ``mne.cov.compute_covariance``.
+        show : bool
+            Whether to show the figure or not. Defaults to True.
+
+        Returns
+        -------
+        fig : instance of matplotlib.figure.Figure
+            The figure object containing the plot.
+
+        References
+        ----------
+        .. [1] Engemann D. and Gramfort A. (2015) Automated model selection in
+               covariance estimation and spatial whitening of MEG and EEG
+               signals, vol. 108, 328-342, NeuroImage.
+
+        Notes
+        -----
+        .. versionadded:: 0.9.0
+        """
+        return _plot_evoked_white(self, noise_cov=noise_cov, scalings=None,
+                                  rank=None, show=show)
+
+    def as_type(self, ch_type='grad', mode='fast'):
+        """Compute virtual evoked using interpolated fields in mag/grad channels.
+
+        .. Warning:: Using virtual evoked to compute inverse can yield
+            unexpected results. The virtual channels have `'_virtual'` appended
+            at the end of the names to emphasize that the data contained in
+            them are interpolated.
+
+        Parameters
+        ----------
+        ch_type : str
+            The destination channel type. It can be 'mag' or 'grad'.
+        mode : str
+            Either `'accurate'` or `'fast'`, determines the quality of the
+            Legendre polynomial expansion used. `'fast'` should be sufficient
+            for most applications.
+
+        Returns
+        -------
+        evoked : instance of mne.Evoked
+            The transformed evoked object containing only virtual channels.
+
+        Notes
+        -----
+        .. versionadded:: 0.9.0
+        """
+        from .forward import _as_meg_type_evoked
+        return _as_meg_type_evoked(self, ch_type=ch_type, mode=mode)
+
+    def resample(self, sfreq, npad=100, window='boxcar'):
+        """Resample data
+
+        This function operates in-place.
+
+        Parameters
+        ----------
+        sfreq : float
+            New sample rate to use
+        npad : int
+            Amount to pad the start and end of the data.
+        window : string or tuple
+            Window to use in resampling. See scipy.signal.resample.
+        """
+        o_sfreq = self.info['sfreq']
+        self.data = resample(self.data, sfreq, o_sfreq, npad, -1, window)
+        # adjust indirectly affected variables
+        self.info['sfreq'] = sfreq
+        self.times = (np.arange(self.data.shape[1], dtype=np.float) / sfreq +
+                      self.times[0])
+        self.first = int(self.times[0] * self.info['sfreq'])
+        self.last = len(self.times) + self.first - 1
+
+    def detrend(self, order=1, picks=None):
+        """Detrend data
+
+        This function operates in-place.
+
+        Parameters
+        ----------
+        order : int
+            Either 0 or 1, the order of the detrending. 0 is a constant
+            (DC) detrend, 1 is a linear detrend.
+        picks : array-like of int | None
+            If None only MEG and EEG channels are detrended.
+        """
+        if picks is None:
+            picks = pick_types(self.info, meg=True, eeg=True, ref_meg=False,
+                               stim=False, eog=False, ecg=False, emg=False,
+                               exclude='bads')
+        self.data[picks] = detrend(self.data[picks], order, axis=-1)
+
+    def copy(self):
+        """Copy the instance of evoked
+
+        Returns
+        -------
+        evoked : instance of Evoked
+        """
+        evoked = deepcopy(self)
+        return evoked
+
+    def __add__(self, evoked):
+        """Add evoked taking into account number of epochs"""
+        out = combine_evoked([self, evoked])
+        out.comment = self.comment + " + " + evoked.comment
+        return out
+
+    def __sub__(self, evoked):
+        """Add evoked taking into account number of epochs"""
+        this_evoked = deepcopy(evoked)
+        this_evoked.data *= -1.
+        out = combine_evoked([self, this_evoked])
+        if self.comment is None or this_evoked.comment is None:
+            warnings.warn('evoked.comment expects a string but is None')
+            out.comment = 'unknown'
+        else:
+            out.comment = self.comment + " - " + this_evoked.comment
+        return out
+
+    def __hash__(self):
+        return object_hash(dict(info=self.info, data=self.data))
+
+    def get_peak(self, ch_type=None, tmin=None, tmax=None, mode='abs',
+                 time_as_index=False):
+        """Get location and latency of peak amplitude
+
+        Parameters
+        ----------
+        ch_type : {'mag', 'grad', 'eeg', 'misc', None}
+            The channel type to use. Defaults to None. If more than one sensor
+            Type is present in the data the channel type has to be explicitly
+            set.
+        tmin : float | None
+            The minimum point in time to be considered for peak getting.
+        tmax : float | None
+            The maximum point in time to be considered for peak getting.
+        mode : {'pos', 'neg', 'abs'}
+            How to deal with the sign of the data. If 'pos' only positive
+            values will be considered. If 'neg' only negative values will
+            be considered. If 'abs' absolute values will be considered.
+            Defaults to 'abs'.
+        time_as_index : bool
+            Whether to return the time index instead of the latency in seconds.
+
+        Returns
+        -------
+        ch_name : str
+            The channel exhibiting the maximum response.
+        latency : float | int
+            The time point of the maximum response, either latency in seconds
+            or index.
+        """
+        supported = ('mag', 'grad', 'eeg', 'misc', 'None')
+
+        data_picks = pick_types(self.info, meg=True, eeg=True, ref_meg=False)
+        types_used = set([channel_type(self.info, idx) for idx in data_picks])
+
+        if str(ch_type) not in supported:
+            raise ValueError('Channel type must be `{supported}`. You gave me '
+                             '`{ch_type}` instead.'
+                             .format(ch_type=ch_type,
+                                     supported='` or `'.join(supported)))
+
+        elif ch_type is not None and ch_type not in types_used:
+            raise ValueError('Channel type `{ch_type}` not found in this '
+                             'evoked object.'.format(ch_type=ch_type))
+
+        elif len(types_used) > 1 and ch_type is None:
+            raise RuntimeError('More than one sensor type found. `ch_type` '
+                               'must not be `None`, pass a sensor type '
+                               'value instead')
+
+        meg, eeg, misc, picks = False, False, False, None
+
+        if ch_type == 'mag':
+            meg = ch_type
+        elif ch_type == 'grad':
+            meg = ch_type
+        elif ch_type == 'eeg':
+            eeg = True
+        elif ch_type == 'misc':
+            misc = True
+
+        if ch_type is not None:
+            picks = pick_types(self.info, meg=meg, eeg=eeg, misc=misc,
+                               ref_meg=False)
+
+        data = self.data if picks is None else self.data[picks]
+        ch_idx, time_idx = _get_peak(data, self.times, tmin,
+                                     tmax, mode)
+
+        return (self.ch_names[ch_idx],
+                time_idx if time_as_index else self.times[time_idx])
+
+
+class EvokedArray(Evoked):
+    """Evoked object from numpy array
+
+    Parameters
+    ----------
+    data : array of shape (n_channels, n_times)
+        The channels' evoked response.
+    info : instance of Info
+        Info dictionary. Consider using ``create_info`` to populate
+        this structure.
+    tmin : float
+        Start time before event.
+    comment : string
+        Comment on dataset. Can be the condition. Defaults to ''.
+    nave : int
+        Number of averaged epochs. Defaults to 1.
+    kind : str
+        Type of data, either average or standard_error. Defaults to 'average'.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+        Defaults to raw.verbose.
+
+    See Also
+    --------
+    EpochsArray, io.RawArray, create_info
+    """
+
+    @verbose
+    def __init__(self, data, info, tmin, comment='', nave=1, kind='average',
+                 verbose=None):
+
+        dtype = np.complex128 if np.any(np.iscomplex(data)) else np.float64
+        data = np.asanyarray(data, dtype=dtype)
+
+        if data.ndim != 2:
+            raise ValueError('Data must be a 2D array of shape (n_channels, '
+                             'n_samples)')
+
+        if len(info['ch_names']) != np.shape(data)[0]:
+            raise ValueError('Info (%s) and data (%s) must have same number '
+                             'of channels.' % (len(info['ch_names']),
+                                               np.shape(data)[0]))
+
+        self.data = data
+
+        # XXX: this should use round and be tested
+        self.first = int(tmin * info['sfreq'])
+        self.last = self.first + np.shape(data)[-1] - 1
+        self.times = np.arange(self.first, self.last + 1,
+                               dtype=np.float) / info['sfreq']
+        self.info = info
+        self.nave = nave
+        self.kind = kind
+        self.comment = comment
+        self.picks = None
+        self.verbose = verbose
+        self._projector = None
+        if self.kind == 'average':
+            self._aspect_kind = _aspect_dict['average']
+        else:
+            self._aspect_kind = _aspect_dict['standard_error']
+
+
+def _get_entries(fid, evoked_node):
+    """Helper to get all evoked entries"""
+    comments = list()
+    aspect_kinds = list()
+    for ev in evoked_node:
+        for k in range(ev['nent']):
+            my_kind = ev['directory'][k].kind
+            pos = ev['directory'][k].pos
+            if my_kind == FIFF.FIFF_COMMENT:
+                tag = read_tag(fid, pos)
+                comments.append(tag.data)
+        my_aspect = dir_tree_find(ev, FIFF.FIFFB_ASPECT)[0]
+        for k in range(my_aspect['nent']):
+            my_kind = my_aspect['directory'][k].kind
+            pos = my_aspect['directory'][k].pos
+            if my_kind == FIFF.FIFF_ASPECT_KIND:
+                tag = read_tag(fid, pos)
+                aspect_kinds.append(int(tag.data))
+    comments = np.atleast_1d(comments)
+    aspect_kinds = np.atleast_1d(aspect_kinds)
+    if len(comments) != len(aspect_kinds) or len(comments) == 0:
+        fid.close()
+        raise ValueError('Dataset names in FIF file '
+                         'could not be found.')
+    t = [_aspect_rev.get(str(a), 'Unknown') for a in aspect_kinds]
+    t = ['"' + c + '" (' + tt + ')' for tt, c in zip(t, comments)]
+    t = '  ' + '\n  '.join(t)
+    return comments, aspect_kinds, t
+
+
+def _get_evoked_node(fname):
+    """Helper to get info in evoked file"""
+    f, tree, _ = fiff_open(fname)
+    with f as fid:
+        _, meas = read_meas_info(fid, tree)
+        evoked_node = dir_tree_find(meas, FIFF.FIFFB_EVOKED)
+    return evoked_node
+
+
+def grand_average(all_evoked, interpolate_bads=True):
+    """Make grand average of a list evoked data
+
+    The function interpolates bad channels based on `interpolate_bads`
+    parameter. If `interpolate_bads` is True, the grand average
+    file will contain good channels and the bad channels interpolated
+    from the good MEG/EEG channels.
+
+    The grand_average.nave attribute will be equal the number
+    of evoked datasets used to calculate the grand average.
+
+    Note: Grand average evoked shall not be used for source localization.
+
+    Parameters
+    ----------
+    all_evoked : list of Evoked data
+        The evoked datasets.
+    interpolate_bads : bool
+        If True, bad MEG and EEG channels are interpolated.
+
+    Returns
+    -------
+    grand_average : Evoked
+        The grand average data.
+
+    Notes
+    -----
+    .. versionadded:: 0.9.0
+    """
+    # check if all elements in the given list are evoked data
+    if not all(isinstance(e, Evoked) for e in all_evoked):
+        raise ValueError("Not all the elements in list are evoked data")
+
+    # Copy channels to leave the original evoked datasets intact.
+    all_evoked = [e.copy() for e in all_evoked]
+
+    # Interpolates if necessary
+    if interpolate_bads:
+        all_evoked = [e.interpolate_bads() if len(e.info['bads']) > 0
+                      else e for e in all_evoked]
+
+    equalize_channels(all_evoked)  # apply equalize_channels
+    # make grand_average object using combine_evoked
+    grand_average = combine_evoked(all_evoked, weights='equal')
+    # change the grand_average.nave to the number of Evokeds
+    grand_average.nave = len(all_evoked)
+    # change comment field
+    grand_average.comment = "Grand average (n = %d)" % grand_average.nave
+    return grand_average
+
+
+def combine_evoked(all_evoked, weights='nave'):
+    """Merge evoked data by weighted addition
+
+    Data should have the same channels and the same time instants.
+    Subtraction can be performed by passing negative weights (e.g., [1, -1]).
+
+    Parameters
+    ----------
+    all_evoked : list of Evoked
+        The evoked datasets.
+    weights : list of float | str
+        The weights to apply to the data of each evoked instance.
+        Can also be ``'nave'`` to weight according to evoked.nave,
+        or ``"equal"`` to use equal weighting (each weighted as ``1/N``).
+
+    Returns
+    -------
+    evoked : Evoked
+        The new evoked data.
+
+    Notes
+    -----
+    .. versionadded:: 0.9.0
+    """
+    evoked = all_evoked[0].copy()
+    if isinstance(weights, string_types):
+        if weights not in ('nave', 'equal'):
+            raise ValueError('weights must be a list of float, or "nave" or '
+                             '"equal"')
+        if weights == 'nave':
+            weights = np.array([e.nave for e in all_evoked], float)
+            weights /= weights.sum()
+        else:  # == 'equal'
+            weights = [1. / len(all_evoked)] * len(all_evoked)
+    weights = np.array(weights, float)
+    if weights.ndim != 1 or weights.size != len(all_evoked):
+        raise ValueError('weights must be the same size as all_evoked')
+
+    ch_names = evoked.ch_names
+    for e in all_evoked[1:]:
+        assert e.ch_names == ch_names, ValueError("%s and %s do not contain "
+                                                  "the same channels"
+                                                  % (evoked, e))
+        assert np.max(np.abs(e.times - evoked.times)) < 1e-7, \
+            ValueError("%s and %s do not contain the same time instants"
+                       % (evoked, e))
+
+    # use union of bad channels
+    bads = list(set(evoked.info['bads']).union(*(ev.info['bads']
+                                                 for ev in all_evoked[1:])))
+    evoked.info['bads'] = bads
+
+    evoked.data = sum(w * e.data for w, e in zip(weights, all_evoked))
+    evoked.nave = max(int(1. / sum(w ** 2 / e.nave
+                                   for w, e in zip(weights, all_evoked))), 1)
+    return evoked
+
+
+ at verbose
+def read_evokeds(fname, condition=None, baseline=None, kind='average',
+                 proj=True, verbose=None):
+    """Read evoked dataset(s)
+
+    Parameters
+    ----------
+    fname : string
+        The file name, which should end with -ave.fif or -ave.fif.gz.
+    condition : int or str | list of int or str | None
+        The index or list of indices of the evoked dataset to read. FIF files
+        can contain multiple datasets. If None, all datasets are returned as a
+        list.
+    baseline : None (default) or tuple of length 2
+        The time interval to apply baseline correction. If None do not apply
+        it. If baseline is (a, b) the interval is between "a (s)" and "b (s)".
+        If a is None the beginning of the data is used and if b is None then
+        b is set to the end of the interval. If baseline is equal to
+        (None, None) all the time interval is used.
+    kind : str
+        Either 'average' or 'standard_error', the type of data to read.
+    proj : bool
+        If False, available projectors won't be applied to the data.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    evoked : Evoked (if condition is int or str) or list of Evoked (if
+        condition is None or list)
+        The evoked dataset(s).
+
+    See Also
+    --------
+    write_evokeds
+    """
+    check_fname(fname, 'evoked', ('-ave.fif', '-ave.fif.gz'))
+
+    return_list = True
+    if condition is None:
+        evoked_node = _get_evoked_node(fname)
+        condition = range(len(evoked_node))
+    elif not isinstance(condition, list):
+        condition = [condition]
+        return_list = False
+
+    out = [Evoked(fname, c, baseline=baseline, kind=kind, proj=proj,
+           verbose=verbose) for c in condition]
+
+    return out if return_list else out[0]
+
+
+def write_evokeds(fname, evoked):
+    """Write an evoked dataset to a file
+
+    Parameters
+    ----------
+    fname : string
+        The file name, which should end with -ave.fif or -ave.fif.gz.
+    evoked : Evoked instance, or list of Evoked instances
+        The evoked dataset, or list of evoked datasets, to save in one file.
+        Note that the measurement info from the first evoked instance is used,
+        so be sure that information matches.
+
+    See Also
+    --------
+    read_evokeds
+    """
+    check_fname(fname, 'evoked', ('-ave.fif', '-ave.fif.gz'))
+
+    if not isinstance(evoked, list):
+        evoked = [evoked]
+
+    # Create the file and save the essentials
+    with start_file(fname) as fid:
+
+        start_block(fid, FIFF.FIFFB_MEAS)
+        write_id(fid, FIFF.FIFF_BLOCK_ID)
+        if evoked[0].info['meas_id'] is not None:
+            write_id(fid, FIFF.FIFF_PARENT_BLOCK_ID, evoked[0].info['meas_id'])
+
+        # Write measurement info
+        write_meas_info(fid, evoked[0].info)
+
+        # One or more evoked data sets
+        start_block(fid, FIFF.FIFFB_PROCESSED_DATA)
+        for e in evoked:
+            start_block(fid, FIFF.FIFFB_EVOKED)
+
+            # Comment is optional
+            if e.comment is not None and len(e.comment) > 0:
+                write_string(fid, FIFF.FIFF_COMMENT, e.comment)
+
+            # First and last sample
+            write_int(fid, FIFF.FIFF_FIRST_SAMPLE, e.first)
+            write_int(fid, FIFF.FIFF_LAST_SAMPLE, e.last)
+
+            # The epoch itself
+            start_block(fid, FIFF.FIFFB_ASPECT)
+
+            write_int(fid, FIFF.FIFF_ASPECT_KIND, e._aspect_kind)
+            write_int(fid, FIFF.FIFF_NAVE, e.nave)
+
+            decal = np.zeros((e.info['nchan'], 1))
+            for k in range(e.info['nchan']):
+                decal[k] = 1.0 / (e.info['chs'][k]['cal'] *
+                                  e.info['chs'][k].get('scale', 1.0))
+
+            write_float_matrix(fid, FIFF.FIFF_EPOCH, decal * e.data)
+            end_block(fid, FIFF.FIFFB_ASPECT)
+            end_block(fid, FIFF.FIFFB_EVOKED)
+
+        end_block(fid, FIFF.FIFFB_PROCESSED_DATA)
+        end_block(fid, FIFF.FIFFB_MEAS)
+        end_file(fid)
+
+
+def _get_peak(data, times, tmin=None, tmax=None, mode='abs'):
+    """Get feature-index and time of maximum signal from 2D array
+
+    Note. This is a 'getter', not a 'finder'. For non-evoked type
+    data and continuous signals, please use proper peak detection algorithms.
+
+    Parameters
+    ----------
+    data : instance of numpy.ndarray (n_locations, n_times)
+        The data, either evoked in sensor or source space.
+    times : instance of numpy.ndarray (n_times)
+        The times in seconds.
+    tmin : float | None
+        The minimum point in time to be considered for peak getting.
+    tmax : float | None
+        The maximum point in time to be considered for peak getting.
+    mode : {'pos', 'neg', 'abs'}
+        How to deal with the sign of the data. If 'pos' only positive
+        values will be considered. If 'neg' only negative values will
+        be considered. If 'abs' absolute values will be considered.
+        Defaults to 'abs'.
+
+    Returns
+    -------
+    max_loc : int
+        The index of the feature with the maximum value.
+    max_time : int
+        The time point of the maximum response, index.
+    """
+    modes = ('abs', 'neg', 'pos')
+    if mode not in modes:
+        raise ValueError('The `mode` parameter must be `{modes}`. You gave '
+                         'me `{mode}`'.format(modes='` or `'.join(modes),
+                                              mode=mode))
+
+    if tmin is None:
+        tmin = times[0]
+    if tmax is None:
+        tmax = times[-1]
+
+    if tmin < times.min():
+        raise ValueError('The tmin value is out of bounds. It must be '
+                         'within {0} and {1}'.format(times.min(), times.max()))
+    if tmax > times.max():
+        raise ValueError('The tmin value is out of bounds. It must be '
+                         'within {0} and {1}'.format(times.min(), times.max()))
+    if tmin >= tmax:
+        raise ValueError('The tmin must be smaller than tmax')
+
+    time_win = (times >= tmin) & (times <= tmax)
+    mask = np.ones_like(data).astype(np.bool)
+    mask[:, time_win] = False
+
+    maxfun = np.argmax
+    if mode == 'pos':
+        if not np.any(data > 0):
+            raise ValueError('No positive values encountered. Cannot '
+                             'operate in pos mode.')
+    elif mode == 'neg':
+        if not np.any(data < 0):
+            raise ValueError('No negative values encountered. Cannot '
+                             'operate in neg mode.')
+        maxfun = np.argmin
+
+    masked_index = np.ma.array(np.abs(data) if mode == 'abs' else data,
+                               mask=mask)
+
+    max_loc, max_time = np.unravel_index(maxfun(masked_index), data.shape)
+
+    return max_loc, max_time
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/externals/FieldTrip.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/externals/FieldTrip.py
new file mode 100644
index 0000000..66bb1d3
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/externals/FieldTrip.py
@@ -0,0 +1,508 @@
+"""
+FieldTrip buffer (V1) client in pure Python
+
+(C) 2010 S. Klanke
+"""
+
+# We need socket, struct, and numpy
+import socket
+import struct
+import numpy
+
+VERSION = 1
+PUT_HDR = 0x101
+PUT_DAT = 0x102
+PUT_EVT = 0x103
+PUT_OK = 0x104
+PUT_ERR = 0x105
+GET_HDR = 0x201
+GET_DAT = 0x202
+GET_EVT = 0x203
+GET_OK = 0x204
+GET_ERR = 0x205
+FLUSH_HDR = 0x301
+FLUSH_DAT = 0x302
+FLUSH_EVT = 0x303
+FLUSH_OK = 0x304
+FLUSH_ERR = 0x305
+WAIT_DAT = 0x402
+WAIT_OK = 0x404
+WAIT_ERR = 0x405
+
+DATATYPE_CHAR = 0
+DATATYPE_UINT8 = 1
+DATATYPE_UINT16 = 2
+DATATYPE_UINT32 = 3
+DATATYPE_UINT64 = 4
+DATATYPE_INT8 = 5
+DATATYPE_INT16 = 6
+DATATYPE_INT32 = 7
+DATATYPE_INT64 = 8
+DATATYPE_FLOAT32 = 9
+DATATYPE_FLOAT64 = 10
+DATATYPE_UNKNOWN = 0xFFFFFFFF
+
+CHUNK_UNSPECIFIED = 0
+CHUNK_CHANNEL_NAMES = 1
+CHUNK_CHANNEL_FLAGS = 2
+CHUNK_RESOLUTIONS = 3
+CHUNK_ASCII_KEYVAL = 4
+CHUNK_NIFTI1 = 5
+CHUNK_SIEMENS_AP = 6
+CHUNK_CTF_RES4 = 7
+CHUNK_NEUROMAG_FIF = 8
+
+# List for converting FieldTrip datatypes to Numpy datatypes
+numpyType = ['int8', 'uint8', 'uint16', 'uint32', 'uint64',
+             'int8', 'int16', 'int32', 'int64', 'float32', 'float64']
+# Corresponding word sizes
+wordSize = [1, 1, 2, 4, 8, 1, 2, 4, 8, 4, 8]
+# FieldTrip data type as indexed by numpy dtype.num
+# this goes  0 => nothing, 1..4 => int8, uint8, int16, uint16, 7..10 =>
+# int32, uint32, int64, uint64  11..12 => float32, float64
+dataType = [-1, 5, 1, 6, 2, -1, -1, 7, 3, 8, 4, 9, 10]
+
+
+def serialize(A):
+    """
+    Returns Fieldtrip data type and string representation of the given
+    object, if possible.
+    """
+    if isinstance(A, str):
+        return (0, A)
+
+    if isinstance(A, numpy.ndarray):
+        dt = A.dtype
+        if not(dt.isnative) or dt.num < 1 or dt.num >= len(dataType):
+            return (DATATYPE_UNKNOWN, None)
+
+        ft = dataType[dt.num]
+        if ft == -1:
+            return (DATATYPE_UNKNOWN, None)
+
+        if A.flags['C_CONTIGUOUS']:
+            # great, just use the array's buffer interface
+            return (ft, str(A.data))
+
+        # otherwise, we need a copy to C order
+        AC = A.copy('C')
+        return (ft, str(AC.data))
+
+    if isinstance(A, int):
+        return (DATATYPE_INT32, struct.pack('i', A))
+
+    if isinstance(A, float):
+        return (DATATYPE_FLOAT64, struct.pack('d', A))
+
+    return (DATATYPE_UNKNOWN, None)
+
+
+class Chunk:
+
+    def __init__(self):
+        self.type = 0
+        self.size = 0
+        self.buf = ''
+
+
+class Header:
+
+    """Class for storing header information in the FieldTrip buffer format"""
+
+    def __init__(self):
+        self.nChannels = 0
+        self.nSamples = 0
+        self.nEvents = 0
+        self.fSample = 0.0
+        self.dataType = 0
+        self.chunks = {}
+        self.labels = []
+
+    def __str__(self):
+        return ('Channels.: %i\nSamples..: %i\nEvents...: %i\nSampFreq.: '
+                '%f\nDataType.: %s\n'
+                % (self.nChannels, self.nSamples, self.nEvents,
+                   self.fSample, numpyType[self.dataType]))
+
+
+class Event:
+    """Class for storing events in the FieldTrip buffer format"""
+
+    def __init__(self, S=None):
+        if S is None:
+            self.type = ''
+            self.value = ''
+            self.sample = 0
+            self.offset = 0
+            self.duration = 0
+        else:
+            self.deserialize(S)
+
+    def __str__(self):
+        return ('Type.....: %s\nValue....: %s\nSample...: %i\nOffset...: '
+                '%i\nDuration.: %i\n' % (str(self.type), str(self.value),
+                                         self.sample, self.offset,
+                                         self.duration))
+
+    def deserialize(self, buf):
+        bufsize = len(buf)
+        if bufsize < 32:
+            return 0
+
+        (type_type, type_numel, value_type, value_numel, sample,
+         offset, duration, bsiz) = struct.unpack('IIIIIiiI', buf[0:32])
+
+        self.sample = sample
+        self.offset = offset
+        self.duration = duration
+
+        st = type_numel * wordSize[type_type]
+        sv = value_numel * wordSize[value_type]
+
+        if bsiz + 32 > bufsize or st + sv > bsiz:
+            raise IOError(
+                'Invalid event definition -- does not fit in given buffer')
+
+        raw_type = buf[32:32 + st]
+        raw_value = buf[32 + st:32 + st + sv]
+
+        if type_type == 0:
+            self.type = raw_type
+        else:
+            self.type = numpy.ndarray(
+                (type_numel), dtype=numpyType[type_type], buffer=raw_type)
+
+        if value_type == 0:
+            self.value = raw_value
+        else:
+            self.value = numpy.ndarray(
+                (value_numel), dtype=numpyType[value_type], buffer=raw_value)
+
+        return bsiz + 32
+
+    def serialize(self):
+        """
+        Returns the contents of this event as a string, ready to
+        send over the network, or None in case of conversion problems.
+        """
+        type_type, type_buf = serialize(self.type)
+        if type_type == DATATYPE_UNKNOWN:
+            return None
+        type_size = len(type_buf)
+        type_numel = type_size / wordSize[type_type]
+
+        value_type, value_buf = serialize(self.value)
+        if value_type == DATATYPE_UNKNOWN:
+            return None
+        value_size = len(value_buf)
+        value_numel = value_size / wordSize[value_type]
+
+        bufsize = type_size + value_size
+
+        S = struct.pack('IIIIIiiI', type_type, type_numel, value_type,
+                        value_numel, int(self.sample), int(self.offset),
+                        int(self.duration), bufsize)
+        return S + type_buf + value_buf
+
+
+class Client:
+
+    """Class for managing a client connection to a FieldTrip buffer."""
+
+    def __init__(self):
+        self.isConnected = False
+        self.sock = []
+
+    def connect(self, hostname, port=1972):
+        """
+        connect(hostname [, port]) -- make a connection, default port is
+        1972.
+        """
+        self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+        self.sock.connect((hostname, port))
+        self.sock.setblocking(True)
+        self.isConnected = True
+
+    def disconnect(self):
+        """disconnect() -- close a connection."""
+        if self.isConnected:
+            self.sock.close()
+            self.sock = []
+            self.isConnected = False
+
+    def sendRaw(self, request):
+        """Send all bytes of the string 'request' out to socket."""
+        if not(self.isConnected):
+            raise IOError('Not connected to FieldTrip buffer')
+
+        N = len(request)
+        nw = self.sock.send(request)
+        while nw < N:
+            nw += self.sock.send(request[nw:])
+
+    def sendRequest(self, command, payload=None):
+        if payload is None:
+            request = struct.pack('HHI', VERSION, command, 0)
+        else:
+            request = struct.pack(
+                'HHI', VERSION, command, len(payload)) + payload
+        self.sendRaw(request)
+
+    def receiveResponse(self, minBytes=0):
+        """
+        Receive response from server on socket 's' and return it as
+        (status,bufsize,payload).
+        """
+
+        resp_hdr = self.sock.recv(8)
+        while len(resp_hdr) < 8:
+            resp_hdr += self.sock.recv(8 - len(resp_hdr))
+
+        (version, command, bufsize) = struct.unpack('HHI', resp_hdr)
+
+        if version != VERSION:
+            self.disconnect()
+            raise IOError('Bad response from buffer server - disconnecting')
+
+        if bufsize > 0:
+            payload = self.sock.recv(bufsize)
+            while len(payload) < bufsize:
+                payload += self.sock.recv(bufsize - len(payload))
+        else:
+            payload = None
+        return (command, bufsize, payload)
+
+    def getHeader(self):
+        """
+        getHeader() -- grabs header information from the buffer an returns
+        it as a Header object.
+        """
+
+        self.sendRequest(GET_HDR)
+        (status, bufsize, payload) = self.receiveResponse()
+
+        if status == GET_ERR:
+            return None
+
+        if status != GET_OK:
+            self.disconnect()
+            raise IOError('Bad response from buffer server - disconnecting')
+
+        if bufsize < 24:
+            self.disconnect()
+            raise IOError('Invalid HEADER packet received (too few bytes) - '
+                          'disconnecting')
+
+        (nchans, nsamp, nevt, fsamp, dtype,
+         bfsiz) = struct.unpack('IIIfII', payload[0:24])
+
+        H = Header()
+        H.nChannels = nchans
+        H.nSamples = nsamp
+        H.nEvents = nevt
+        H.fSample = fsamp
+        H.dataType = dtype
+
+        if bfsiz > 0:
+            offset = 24
+            while offset + 8 < bufsize:
+                (chunk_type, chunk_len) = struct.unpack(
+                    'II', payload[offset:offset + 8])
+                offset += 8
+                if offset + chunk_len < bufsize:
+                    break
+                H.chunks[chunk_type] = payload[offset:offset + chunk_len]
+                offset += chunk_len
+
+            if CHUNK_CHANNEL_NAMES in H.chunks:
+                L = H.chunks[CHUNK_CHANNEL_NAMES].split(b'\0')
+                numLab = len(L)
+                if numLab >= H.nChannels:
+                    H.labels = [x.decode('utf-8') for x in L[0:H.nChannels]]
+
+        return H
+
+    def putHeader(self, nChannels, fSample, dataType, labels=None,
+                  chunks=None):
+        haveLabels = False
+        extras = ''
+        if not(labels is None):
+            serLabels = ''
+            try:
+                for n in range(0, nChannels):
+                    serLabels += labels[n] + '\0'
+            except:
+                raise ValueError('Channels names (labels), if given,'
+                                 ' must be a list of N=numChannels strings')
+
+            extras = struct.pack('II', CHUNK_CHANNEL_NAMES,
+                                 len(serLabels)) + serLabels
+            haveLabels = True
+
+        if not(chunks is None):
+            for chunk_type, chunk_data in chunks:
+                if haveLabels and chunk_type == CHUNK_CHANNEL_NAMES:
+                    # ignore channel names chunk in case we got labels
+                    continue
+                extras += struct.pack('II', chunk_type,
+                                      len(chunk_data)) + chunk_data
+
+        sizeChunks = len(extras)
+
+        hdef = struct.pack('IIIfII', nChannels, 0, 0,
+                           fSample, dataType, sizeChunks)
+        request = struct.pack('HHI', VERSION, PUT_HDR,
+                              sizeChunks + len(hdef)) + hdef + extras
+        self.sendRaw(request)
+        (status, bufsize, resp_buf) = self.receiveResponse()
+        if status != PUT_OK:
+            raise IOError('Header could not be written')
+
+    def getData(self, index=None):
+        """
+        getData([indices]) -- retrieve data samples and return them as a
+        Numpy array, samples in rows(!). The 'indices' argument is optional,
+        and if given, must be a tuple or list with inclusive, zero-based
+        start/end indices.
+        """
+
+        if index is None:
+            request = struct.pack('HHI', VERSION, GET_DAT, 0)
+        else:
+            indS = int(index[0])
+            indE = int(index[1])
+            request = struct.pack('HHIII', VERSION, GET_DAT, 8, indS, indE)
+        self.sendRaw(request)
+
+        (status, bufsize, payload) = self.receiveResponse()
+        if status == GET_ERR:
+            return None
+
+        if status != GET_OK:
+            self.disconnect()
+            raise IOError('Bad response from buffer server - disconnecting')
+
+        if bufsize < 16:
+            self.disconnect()
+            raise IOError('Invalid DATA packet received (too few bytes)')
+
+        (nchans, nsamp, datype, bfsiz) = struct.unpack('IIII', payload[0:16])
+
+        if bfsiz < bufsize - 16 or datype >= len(numpyType):
+            raise IOError('Invalid DATA packet received')
+
+        raw = payload[16:bfsiz + 16]
+        D = numpy.ndarray((nsamp, nchans), dtype=numpyType[datype], buffer=raw)
+
+        return D
+
+    def getEvents(self, index=None):
+        """
+        getEvents([indices]) -- retrieve events and return them as a list
+        of Event objects. The 'indices' argument is optional, and if given,
+        must be a tuple or list with inclusive, zero-based start/end indices.
+        The 'type' and 'value' fields of the event will be converted to strings
+        or Numpy arrays.
+        """
+
+        if index is None:
+            request = struct.pack('HHI', VERSION, GET_EVT, 0)
+        else:
+            indS = int(index[0])
+            indE = int(index[1])
+            request = struct.pack('HHIII', VERSION, GET_EVT, 8, indS, indE)
+        self.sendRaw(request)
+
+        (status, bufsize, resp_buf) = self.receiveResponse()
+        if status == GET_ERR:
+            return []
+
+        if status != GET_OK:
+            self.disconnect()
+            raise IOError('Bad response from buffer server - disconnecting')
+
+        offset = 0
+        E = []
+        while 1:
+            e = Event()
+            nextOffset = e.deserialize(resp_buf[offset:])
+            if nextOffset == 0:
+                break
+            E.append(e)
+            offset = offset + nextOffset
+
+        return E
+
+    def putEvents(self, E):
+        """
+        putEvents(E) -- writes a single or multiple events, depending on
+        whether an 'Event' object, or a list of 'Event' objects is
+        given as an argument.
+        """
+        if isinstance(E, Event):
+            buf = E.serialize()
+        else:
+            buf = ''
+            num = 0
+            for e in E:
+                if not(isinstance(e, Event)):
+                    raise 'Element %i in given list is not an Event' % num
+                buf = buf + e.serialize()
+                num = num + 1
+
+        self.sendRequest(PUT_EVT, buf)
+        (status, bufsize, resp_buf) = self.receiveResponse()
+
+        if status != PUT_OK:
+            raise IOError('Events could not be written.')
+
+    def putData(self, D):
+        """
+        putData(D) -- writes samples that must be given as a NUMPY array,
+        samples x channels. The type of the samples (D) and the number of
+        channels must match the corresponding quantities in the FieldTrip
+        buffer.
+        """
+
+        if not(isinstance(D, numpy.ndarray)) or len(D.shape) != 2:
+            raise ValueError(
+                'Data must be given as a NUMPY array (samples x channels)')
+
+        nSamp = D.shape[0]
+        nChan = D.shape[1]
+
+        (dataType, dataBuf) = serialize(D)
+
+        dataBufSize = len(dataBuf)
+
+        request = struct.pack('HHI', VERSION, PUT_DAT, 16 + dataBufSize)
+        dataDef = struct.pack('IIII', nChan, nSamp, dataType, dataBufSize)
+        self.sendRaw(request + dataDef + dataBuf)
+
+        (status, bufsize, resp_buf) = self.receiveResponse()
+        if status != PUT_OK:
+            raise IOError('Samples could not be written.')
+
+    def poll(self):
+
+        request = struct.pack('HHIIII', VERSION, WAIT_DAT, 12, 0, 0, 0)
+        self.sendRaw(request)
+
+        (status, bufsize, resp_buf) = self.receiveResponse()
+
+        if status != WAIT_OK or bufsize < 8:
+            raise IOError('Polling failed.')
+
+        return struct.unpack('II', resp_buf[0:8])
+
+    def wait(self, nsamples, nevents, timeout):
+        request = struct.pack('HHIIII', VERSION, WAIT_DAT,
+                              12, int(nsamples), int(nevents), int(timeout))
+        self.sendRaw(request)
+
+        (status, bufsize, resp_buf) = self.receiveResponse()
+
+        if status != WAIT_OK or bufsize < 8:
+            raise IOError('Wait request failed.')
+
+        return struct.unpack('II', resp_buf[0:8])
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/externals/__init__.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/externals/__init__.py
new file mode 100644
index 0000000..6f70ab7
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/externals/__init__.py
@@ -0,0 +1,5 @@
+from . import six
+from . import jdcal
+from . import decorator
+from . import tempita
+from . import h5io
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/externals/decorator.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/externals/decorator.py
new file mode 100644
index 0000000..fa79521
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/externals/decorator.py
@@ -0,0 +1,253 @@
+##########################     LICENCE     ###############################
+
+# Copyright (c) 2005-2012, Michele Simionato
+# All rights reserved.
+
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+
+#   Redistributions of source code must retain the above copyright
+#   notice, this list of conditions and the following disclaimer.
+#   Redistributions in bytecode form must reproduce the above copyright
+#   notice, this list of conditions and the following disclaimer in
+#   the documentation and/or other materials provided with the
+#   distribution.
+
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+# DAMAGE.
+
+"""
+Decorator module, see http://pypi.python.org/pypi/decorator
+for the documentation.
+"""
+from __future__ import print_function
+
+__version__ = '3.4.0'
+
+__all__ = ["decorator", "FunctionMaker", "contextmanager"]
+
+
+import sys, re, inspect
+if sys.version >= '3':
+    from inspect import getfullargspec
+    def get_init(cls):
+        return cls.__init__
+else:
+    class getfullargspec(object):
+        "A quick and dirty replacement for getfullargspec for Python 2.X"
+        def __init__(self, f):
+            self.args, self.varargs, self.varkw, self.defaults = \
+                inspect.getargspec(f)
+            self.kwonlyargs = []
+            self.kwonlydefaults = None
+        def __iter__(self):
+            yield self.args
+            yield self.varargs
+            yield self.varkw
+            yield self.defaults
+    def get_init(cls):
+        return cls.__init__.__func__
+
+DEF = re.compile('\s*def\s*([_\w][_\w\d]*)\s*\(')
+
+# basic functionality
+class FunctionMaker(object):
+    """
+    An object with the ability to create functions with a given signature.
+    It has attributes name, doc, module, signature, defaults, dict and
+    methods update and make.
+    """
+    def __init__(self, func=None, name=None, signature=None,
+                 defaults=None, doc=None, module=None, funcdict=None):
+        self.shortsignature = signature
+        if func:
+            # func can be a class or a callable, but not an instance method
+            self.name = func.__name__
+            if self.name == '<lambda>': # small hack for lambda functions
+                self.name = '_lambda_'
+            self.doc = func.__doc__
+            self.module = func.__module__
+            if inspect.isfunction(func):
+                argspec = getfullargspec(func)
+                self.annotations = getattr(func, '__annotations__', {})
+                for a in ('args', 'varargs', 'varkw', 'defaults', 'kwonlyargs',
+                          'kwonlydefaults'):
+                    setattr(self, a, getattr(argspec, a))
+                for i, arg in enumerate(self.args):
+                    setattr(self, 'arg%d' % i, arg)
+                if sys.version < '3': # easy way
+                    self.shortsignature = self.signature = \
+                        inspect.formatargspec(
+                        formatvalue=lambda val: "", *argspec)[1:-1]
+                else: # Python 3 way
+                    allargs = list(self.args)
+                    allshortargs = list(self.args)
+                    if self.varargs:
+                        allargs.append('*' + self.varargs)
+                        allshortargs.append('*' + self.varargs)
+                    elif self.kwonlyargs:
+                        allargs.append('*') # single star syntax
+                    for a in self.kwonlyargs:
+                        allargs.append('%s=None' % a)
+                        allshortargs.append('%s=%s' % (a, a))
+                    if self.varkw:
+                        allargs.append('**' + self.varkw)
+                        allshortargs.append('**' + self.varkw)
+                    self.signature = ', '.join(allargs)
+                    self.shortsignature = ', '.join(allshortargs)
+                self.dict = func.__dict__.copy()
+        # func=None happens when decorating a caller
+        if name:
+            self.name = name
+        if signature is not None:
+            self.signature = signature
+        if defaults:
+            self.defaults = defaults
+        if doc:
+            self.doc = doc
+        if module:
+            self.module = module
+        if funcdict:
+            self.dict = funcdict
+        # check existence required attributes
+        assert hasattr(self, 'name')
+        if not hasattr(self, 'signature'):
+            raise TypeError('You are decorating a non function: %s' % func)
+
+    def update(self, func, **kw):
+        "Update the signature of func with the data in self"
+        func.__name__ = self.name
+        func.__doc__ = getattr(self, 'doc', None)
+        func.__dict__ = getattr(self, 'dict', {})
+        func.__defaults__ = getattr(self, 'defaults', ())
+        func.__kwdefaults__ = getattr(self, 'kwonlydefaults', None)
+        func.__annotations__ = getattr(self, 'annotations', None)
+        callermodule = sys._getframe(3).f_globals.get('__name__', '?')
+        func.__module__ = getattr(self, 'module', callermodule)
+        func.__dict__.update(kw)
+
+    def make(self, src_templ, evaldict=None, addsource=False, **attrs):
+        "Make a new function from a given template and update the signature"
+        src = src_templ % vars(self) # expand name and signature
+        evaldict = evaldict or {}
+        mo = DEF.match(src)
+        if mo is None:
+            raise SyntaxError('not a valid function template\n%s' % src)
+        name = mo.group(1) # extract the function name
+        names = set([name] + [arg.strip(' *') for arg in
+                             self.shortsignature.split(',')])
+        for n in names:
+            if n in ('_func_', '_call_'):
+                raise NameError('%s is overridden in\n%s' % (n, src))
+        if not src.endswith('\n'): # add a newline just for safety
+            src += '\n' # this is needed in old versions of Python
+        try:
+            code = compile(src, '<string>', 'single')
+            # print >> sys.stderr, 'Compiling %s' % src
+            exec(code, evaldict)
+        except:
+            print('Error in generated code:', file=sys.stderr)
+            print(src, file=sys.stderr)
+            raise
+        func = evaldict[name]
+        if addsource:
+            attrs['__source__'] = src
+        self.update(func, **attrs)
+        return func
+
+    @classmethod
+    def create(cls, obj, body, evaldict, defaults=None,
+               doc=None, module=None, addsource=True, **attrs):
+        """
+        Create a function from the strings name, signature and body.
+        evaldict is the evaluation dictionary. If addsource is true an attribute
+        __source__ is added to the result. The attributes attrs are added,
+        if any.
+        """
+        if isinstance(obj, str): # "name(signature)"
+            name, rest = obj.strip().split('(', 1)
+            signature = rest[:-1] #strip a right parens
+            func = None
+        else: # a function
+            name = None
+            signature = None
+            func = obj
+        self = cls(func, name, signature, defaults, doc, module)
+        ibody = '\n'.join('    ' + line for line in body.splitlines())
+        return self.make('def %(name)s(%(signature)s):\n' + ibody,
+                        evaldict, addsource, **attrs)
+
+def decorator(caller, func=None):
+    """
+    decorator(caller) converts a caller function into a decorator;
+    decorator(caller, func) decorates a function using a caller.
+    """
+    if func is not None: # returns a decorated function
+        evaldict = func.__globals__.copy()
+        evaldict['_call_'] = caller
+        evaldict['_func_'] = func
+        return FunctionMaker.create(
+            func, "return _call_(_func_, %(shortsignature)s)",
+            evaldict, undecorated=func, __wrapped__=func)
+    else: # returns a decorator
+        if inspect.isclass(caller):
+            name = caller.__name__.lower()
+            callerfunc = get_init(caller)
+            doc = 'decorator(%s) converts functions/generators into ' \
+                'factories of %s objects' % (caller.__name__, caller.__name__)
+            fun = getfullargspec(callerfunc).args[1] # second arg
+        elif inspect.isfunction(caller):
+            name = '_lambda_' if caller.__name__ == '<lambda>' \
+                else caller.__name__
+            callerfunc = caller
+            doc = caller.__doc__
+            fun = getfullargspec(callerfunc).args[0] # first arg
+        else: # assume caller is an object with a __call__ method
+            name = caller.__class__.__name__.lower()
+            callerfunc = caller.__call__.__func__
+            doc = caller.__call__.__doc__
+            fun = getfullargspec(callerfunc).args[1] # second arg
+        evaldict = callerfunc.__globals__.copy()
+        evaldict['_call_'] = caller
+        evaldict['decorator'] = decorator
+        return FunctionMaker.create(
+            '%s(%s)' % (name, fun),
+            'return decorator(_call_, %s)' % fun,
+            evaldict, undecorated=caller, __wrapped__=caller,
+            doc=doc, module=caller.__module__)
+
+######################### contextmanager ########################
+
+def __call__(self, func):
+    'Context manager decorator'
+    return FunctionMaker.create(
+        func, "with _self_: return _func_(%(shortsignature)s)",
+        dict(_self_=self, _func_=func), __wrapped__=func)
+
+try: # Python >= 3.2
+
+    from contextlib import _GeneratorContextManager
+    ContextManager = type(
+        'ContextManager', (_GeneratorContextManager,), dict(__call__=__call__))
+
+except ImportError: # Python >= 2.5
+
+    from contextlib import GeneratorContextManager
+    def __init__(self, f, *a, **k):
+        return GeneratorContextManager.__init__(self, f(*a, **k))
+    ContextManager = type(
+        'ContextManager', (GeneratorContextManager,),
+        dict(__call__=__call__, __init__=__init__))
+
+contextmanager = decorator(ContextManager)
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/externals/h5io/__init__.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/externals/h5io/__init__.py
new file mode 100644
index 0000000..ea54792
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/externals/h5io/__init__.py
@@ -0,0 +1,6 @@
+"""Python Objects Onto HDF5
+"""
+
+__version__ = '0.1.dev0'
+
+from ._h5io import read_hdf5, write_hdf5, _TempDir, object_diff  # noqa, analysis:ignore
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/externals/h5io/_h5io.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/externals/h5io/_h5io.py
new file mode 100644
index 0000000..36dd9f7
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/externals/h5io/_h5io.py
@@ -0,0 +1,297 @@
+# -*- coding: utf-8 -*-
+# Authors: Eric Larson <larson.eric.d at gmail.com>
+#
+# License: BSD (3-clause)
+
+import sys
+import tempfile
+from shutil import rmtree
+from os import path as op
+
+import numpy as np
+try:
+    from scipy import sparse
+except ImportError:
+    sparse = None
+
+# Adapted from six
+PY3 = sys.version_info[0] == 3
+text_type = str if PY3 else unicode  # noqa
+string_types = str if PY3 else basestring  # noqa
+
+
+##############################################################################
+# WRITING
+
+def _check_h5py():
+    """Helper to check if h5py is installed"""
+    try:
+        import h5py
+    except ImportError:
+        raise ImportError('the h5py module is required to use HDF5 I/O')
+    return h5py
+
+
+def _create_titled_group(root, key, title):
+    """Helper to create a titled group in h5py"""
+    out = root.create_group(key)
+    out.attrs['TITLE'] = title
+    return out
+
+
+def _create_titled_dataset(root, key, title, data, comp_kw=None):
+    """Helper to create a titled dataset in h5py"""
+    comp_kw = {} if comp_kw is None else comp_kw
+    out = root.create_dataset(key, data=data, **comp_kw)
+    out.attrs['TITLE'] = title
+    return out
+
+
+def write_hdf5(fname, data, overwrite=False, compression=4,
+               title='h5io'):
+    """Write python object to HDF5 format using h5py
+
+    Parameters
+    ----------
+    fname : str
+        Filename to use.
+    data : object
+        Object to write. Can be of any of these types:
+            {ndarray, dict, list, tuple, int, float, str}
+        Note that dict objects must only have ``str`` keys.
+    overwrite : bool
+        If True, overwrite file (if it exists).
+    compression : int
+        Compression level to use (0-9) to compress data using gzip.
+    title : str
+        The top-level directory name to use. Typically it is useful to make
+        this your package name, e.g. ``'mnepython'``.
+    """
+    h5py = _check_h5py()
+    if op.isfile(fname) and not overwrite:
+        raise IOError('file "%s" exists, use overwrite=True to overwrite'
+                      % fname)
+    if not isinstance(title, string_types):
+        raise ValueError('title must be a string')
+    comp_kw = dict()
+    if compression > 0:
+        comp_kw = dict(compression='gzip', compression_opts=compression)
+    with h5py.File(fname, mode='w') as fid:
+        _triage_write(title, data, fid, comp_kw, str(type(data)))
+
+
+def _triage_write(key, value, root, comp_kw, where):
+    if isinstance(value, dict):
+        sub_root = _create_titled_group(root, key, 'dict')
+        for key, sub_value in value.items():
+            if not isinstance(key, string_types):
+                raise TypeError('All dict keys must be strings')
+            _triage_write('key_{0}'.format(key), sub_value, sub_root, comp_kw,
+                          where + '["%s"]' % key)
+    elif isinstance(value, (list, tuple)):
+        title = 'list' if isinstance(value, list) else 'tuple'
+        sub_root = _create_titled_group(root, key, title)
+        for vi, sub_value in enumerate(value):
+            _triage_write('idx_{0}'.format(vi), sub_value, sub_root, comp_kw,
+                          where + '[%s]' % vi)
+    elif isinstance(value, type(None)):
+        _create_titled_dataset(root, key, 'None', [False])
+    elif isinstance(value, (int, float)):
+        if isinstance(value, int):
+            title = 'int'
+        else:  # isinstance(value, float):
+            title = 'float'
+        _create_titled_dataset(root, key, title, np.atleast_1d(value))
+    elif isinstance(value, string_types):
+        if isinstance(value, text_type):  # unicode
+            value = np.fromstring(value.encode('utf-8'), np.uint8)
+            title = 'unicode'
+        else:
+            value = np.fromstring(value.encode('ASCII'), np.uint8)
+            title = 'ascii'
+        _create_titled_dataset(root, key, title, value, comp_kw)
+    elif isinstance(value, np.ndarray):
+        _create_titled_dataset(root, key, 'ndarray', value)
+    elif sparse is not None and isinstance(value, sparse.csc_matrix):
+        sub_root = _create_titled_group(root, key, 'csc_matrix')
+        _triage_write('data', value.data, sub_root, comp_kw,
+                      where + '.csc_matrix_data')
+        _triage_write('indices', value.indices, sub_root, comp_kw,
+                      where + '.csc_matrix_indices')
+        _triage_write('indptr', value.indptr, sub_root, comp_kw,
+                      where + '.csc_matrix_indptr')
+    else:
+        raise TypeError('unsupported type %s (in %s)' % (type(value), where))
+
+
+##############################################################################
+# READING
+
+def read_hdf5(fname, title='h5io'):
+    """Read python object from HDF5 format using h5py
+
+    Parameters
+    ----------
+    fname : str
+        File to load.
+    title : str
+        The top-level directory name to use. Typically it is useful to make
+        this your package name, e.g. ``'mnepython'``.
+
+    Returns
+    -------
+    data : object
+        The loaded data. Can be of any type supported by ``write_hdf5``.
+    """
+    h5py = _check_h5py()
+    if not op.isfile(fname):
+        raise IOError('file "%s" not found' % fname)
+    if not isinstance(title, string_types):
+        raise ValueError('title must be a string')
+    with h5py.File(fname, mode='r') as fid:
+        if title not in fid.keys():
+            raise ValueError('no "%s" data found' % title)
+        data = _triage_read(fid[title])
+    return data
+
+
+def _triage_read(node):
+    h5py = _check_h5py()
+    type_str = node.attrs['TITLE']
+    if isinstance(type_str, bytes):
+        type_str = type_str.decode()
+    if isinstance(node, h5py.Group):
+        if type_str == 'dict':
+            data = dict()
+            for key, subnode in node.items():
+                data[key[4:]] = _triage_read(subnode)
+        elif type_str in ['list', 'tuple']:
+            data = list()
+            ii = 0
+            while True:
+                subnode = node.get('idx_{0}'.format(ii), None)
+                if subnode is None:
+                    break
+                data.append(_triage_read(subnode))
+                ii += 1
+            assert len(data) == ii
+            data = tuple(data) if type_str == 'tuple' else data
+            return data
+        elif type_str == 'csc_matrix':
+            if sparse is None:
+                raise RuntimeError('scipy must be installed to read this data')
+            data = sparse.csc_matrix((_triage_read(node['data']),
+                                      _triage_read(node['indices']),
+                                      _triage_read(node['indptr'])))
+        else:
+            raise NotImplementedError('Unknown group type: {0}'
+                                      ''.format(type_str))
+    elif type_str == 'ndarray':
+        data = np.array(node)
+    elif type_str in ('int', 'float'):
+        cast = int if type_str == 'int' else float
+        data = cast(np.array(node)[0])
+    elif type_str in ('unicode', 'ascii', 'str'):  # 'str' for backward compat
+        decoder = 'utf-8' if type_str == 'unicode' else 'ASCII'
+        cast = text_type if type_str == 'unicode' else str
+        data = cast(np.array(node).tostring().decode(decoder))
+    elif type_str == 'None':
+        data = None
+    else:
+        raise TypeError('Unknown node type: {0}'.format(type_str))
+    return data
+
+
+# ############################################################################
+# UTILITIES
+
+def _sort_keys(x):
+    """Sort and return keys of dict"""
+    keys = list(x.keys())  # note: not thread-safe
+    idx = np.argsort([str(k) for k in keys])
+    keys = [keys[ii] for ii in idx]
+    return keys
+
+
+def object_diff(a, b, pre=''):
+    """Compute all differences between two python variables
+
+    Parameters
+    ----------
+    a : object
+        Currently supported: dict, list, tuple, ndarray, int, str, bytes,
+        float.
+    b : object
+        Must be same type as x1.
+    pre : str
+        String to prepend to each line.
+
+    Returns
+    -------
+    diffs : str
+        A string representation of the differences.
+    """
+    out = ''
+    if type(a) != type(b):
+        out += pre + ' type mismatch (%s, %s)\n' % (type(a), type(b))
+    elif isinstance(a, dict):
+        k1s = _sort_keys(a)
+        k2s = _sort_keys(b)
+        m1 = set(k2s) - set(k1s)
+        if len(m1):
+            out += pre + ' x1 missing keys %s\n' % (m1)
+        for key in k1s:
+            if key not in k2s:
+                out += pre + ' x2 missing key %s\n' % key
+            else:
+                out += object_diff(a[key], b[key], pre + 'd1[%s]' % repr(key))
+    elif isinstance(a, (list, tuple)):
+        if len(a) != len(b):
+            out += pre + ' length mismatch (%s, %s)\n' % (len(a), len(b))
+        else:
+            for xx1, xx2 in zip(a, b):
+                out += object_diff(xx1, xx2, pre='')
+    elif isinstance(a, (string_types, int, float, bytes)):
+        if a != b:
+            out += pre + ' value mismatch (%s, %s)\n' % (a, b)
+    elif a is None:
+        pass  # b must be None due to our type checking
+    elif isinstance(a, np.ndarray):
+        if not np.array_equal(a, b):
+            out += pre + ' array mismatch\n'
+    elif sparse is not None and sparse.isspmatrix(a):
+        # sparsity and sparse type of b vs a already checked above by type()
+        if b.shape != a.shape:
+            out += pre + (' sparse matrix a and b shape mismatch'
+                          '(%s vs %s)' % (a.shape, b.shape))
+        else:
+            c = a - b
+            c.eliminate_zeros()
+            if c.nnz > 0:
+                out += pre + (' sparse matrix a and b differ on %s '
+                              'elements' % c.nnz)
+    else:
+        raise RuntimeError(pre + ': unsupported type %s (%s)' % (type(a), a))
+    return out
+
+
+class _TempDir(str):
+    """Class for creating and auto-destroying temp dir
+
+    This is designed to be used with testing modules. Instances should be
+    defined inside test functions. Instances defined at module level can not
+    guarantee proper destruction of the temporary directory.
+
+    When used at module level, the current use of the __del__() method for
+    cleanup can fail because the rmtree function may be cleaned up before this
+    object (an alternative could be using the atexit module instead).
+    """
+    def __new__(self):
+        new = str.__new__(self, tempfile.mkdtemp())
+        return new
+
+    def __init__(self):
+        self._path = self.__str__()
+
+    def __del__(self):
+        rmtree(self._path, ignore_errors=True)
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/externals/jdcal.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/externals/jdcal.py
new file mode 100644
index 0000000..1b6105c
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/externals/jdcal.py
@@ -0,0 +1,116 @@
+# -*- coding: utf-8 -*-
+"""Functions for converting between Julian dates and calendar dates.
+
+A function for converting Gregorian calendar dates to Julian dates, and
+another function for converting Julian calendar dates to Julian dates
+are defined. Two functions for the reverse calculations are also
+defined.
+
+Different regions of the world switched to Gregorian calendar from
+Julian calendar on different dates. Having separate functions for Julian
+and Gregorian calendars allow maximum flexibility in choosing the
+relevant calendar.
+
+All the above functions are "proleptic". This means that they work for
+dates on which the concerned calendar is not valid. For example,
+Gregorian calendar was not used prior to around October 1582.
+
+Julian dates are stored in two floating point numbers (double).  Julian
+dates, and Modified Julian dates, are large numbers. If only one number
+is used, then the precision of the time stored is limited. Using two
+numbers, time can be split in a manner that will allow maximum
+precision. For example, the first number could be the Julian date for
+the beginning of a day and the second number could be the fractional
+day. Calculations that need the latter part can now work with maximum
+precision.
+
+A function to test if a given Gregorian calendar year is a leap year is
+defined.
+
+Zero point of Modified Julian Date (MJD) and the MJD of 2000/1/1
+12:00:00 are also given.
+
+This module is based on the TPM C library, by Jeffery W. Percival. The
+idea for splitting Julian date into two floating point numbers was
+inspired by the IAU SOFA C library.
+
+:author: Prasanth Nair
+:contact: prasanthhn at gmail.com
+:license: BSD (http://www.opensource.org/licenses/bsd-license.php)
+
+NB: Code has been heavily adapted for streamlined use by mne-python devs
+"""
+
+
+import numpy as np
+
+MJD_0 = 2400000
+
+
+def ipart(x):
+    """Return integer part of given number."""
+    return np.modf(x)[1]
+
+
+def jcal2jd(year, month, day):
+    """Julian calendar date to Julian date.
+
+    The input and output are for the proleptic Julian calendar,
+    i.e., no consideration of historical usage of the calendar is
+    made.
+
+    Parameters
+    ----------
+    year : int
+        Year as an integer.
+    month : int
+        Month as an integer.
+    day : int
+        Day as an integer.
+
+    Returns
+    -------
+    jd: int
+        Julian date.
+    """
+    year = int(year)
+    month = int(month)
+    day = int(day)
+
+    jd = 367 * year
+    x = ipart((month - 9) / 7.0)
+    jd -= ipart((7 * (year + 5001 + x)) / 4.0)
+    jd += ipart((275 * month) / 9.0)
+    jd += day
+    jd += 1729777
+    return jd
+
+
+def jd2jcal(jd):
+    """Julian calendar date for the given Julian date.
+
+    The input and output are for the proleptic Julian calendar,
+    i.e., no consideration of historical usage of the calendar is
+    made.
+
+    Parameters
+    ----------
+    jd: int
+        The Julian date.
+
+    Returns
+    -------
+    y, m, d: int, int, int
+        Three element tuple containing year, month, day.
+    """
+    j = jd + 1402
+    k = ipart((j - 1) / 1461.0)
+    l = j - (1461.0 * k)
+    n = ipart((l - 1) / 365.0) - ipart(l / 1461.0)
+    i = l - (365.0 * n) + 30.0
+    j = ipart((80.0 * i) / 2447.0)
+    day = i - ipart((2447.0 * j) / 80.0)
+    i = ipart(j / 11.0)
+    month = j + 2 - (12.0 * i)
+    year = (4 * k) + n + i - 4716.0
+    return int(year), int(month), int(day)
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/externals/six.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/externals/six.py
new file mode 100644
index 0000000..b3595a4
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/externals/six.py
@@ -0,0 +1,577 @@
+"""Utilities for writing code that runs on Python 2 and 3"""
+
+# Copyright (c) 2010-2013 Benjamin Peterson
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+import operator
+import sys
+import types
+
+__author__ = "Benjamin Peterson <benjamin at python.org>"
+__version__ = "1.4.1"
+
+
+# Useful for very coarse version differentiation.
+PY2 = sys.version_info[0] == 2
+PY3 = sys.version_info[0] == 3
+
+if PY3:
+    string_types = str,
+    integer_types = int,
+    class_types = type,
+    text_type = str
+    binary_type = bytes
+
+    MAXSIZE = sys.maxsize
+else:
+    string_types = basestring,
+    integer_types = (int, long)
+    class_types = (type, types.ClassType)
+    text_type = unicode
+    binary_type = str
+
+    if sys.platform.startswith("java"):
+        # Jython always uses 32 bits.
+        MAXSIZE = int((1 << 31) - 1)
+    else:
+        # It's possible to have sizeof(long) != sizeof(Py_ssize_t).
+        class X(object):
+            def __len__(self):
+                return 1 << 31
+        try:
+            len(X())
+        except OverflowError:
+            # 32-bit
+            MAXSIZE = int((1 << 31) - 1)
+        else:
+            # 64-bit
+            MAXSIZE = int((1 << 63) - 1)
+        del X
+
+
+def _add_doc(func, doc):
+    """Add documentation to a function."""
+    func.__doc__ = doc
+
+
+def _import_module(name):
+    """Import module, returning the module after the last dot."""
+    __import__(name)
+    return sys.modules[name]
+
+
+class _LazyDescr(object):
+
+    def __init__(self, name):
+        self.name = name
+
+    def __get__(self, obj, tp):
+        result = self._resolve()
+        setattr(obj, self.name, result)
+        # This is a bit ugly, but it avoids running this again.
+        delattr(tp, self.name)
+        return result
+
+
+class MovedModule(_LazyDescr):
+
+    def __init__(self, name, old, new=None):
+        super(MovedModule, self).__init__(name)
+        if PY3:
+            if new is None:
+                new = name
+            self.mod = new
+        else:
+            self.mod = old
+
+    def _resolve(self):
+        return _import_module(self.mod)
+
+
+class MovedAttribute(_LazyDescr):
+
+    def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
+        super(MovedAttribute, self).__init__(name)
+        if PY3:
+            if new_mod is None:
+                new_mod = name
+            self.mod = new_mod
+            if new_attr is None:
+                if old_attr is None:
+                    new_attr = name
+                else:
+                    new_attr = old_attr
+            self.attr = new_attr
+        else:
+            self.mod = old_mod
+            if old_attr is None:
+                old_attr = name
+            self.attr = old_attr
+
+    def _resolve(self):
+        module = _import_module(self.mod)
+        return getattr(module, self.attr)
+
+
+
+class _MovedItems(types.ModuleType):
+    """Lazy loading of moved objects"""
+
+
+_moved_attributes = [
+    MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
+    MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
+    MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
+    MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
+    MovedAttribute("map", "itertools", "builtins", "imap", "map"),
+    MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
+    MovedAttribute("reload_module", "__builtin__", "imp", "reload"),
+    MovedAttribute("reduce", "__builtin__", "functools"),
+    MovedAttribute("StringIO", "StringIO", "io"),
+    MovedAttribute("UserString", "UserString", "collections"),
+    MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
+    MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
+    MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
+
+    MovedModule("builtins", "__builtin__"),
+    MovedModule("configparser", "ConfigParser"),
+    MovedModule("copyreg", "copy_reg"),
+    MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
+    MovedModule("http_cookies", "Cookie", "http.cookies"),
+    MovedModule("html_entities", "htmlentitydefs", "html.entities"),
+    MovedModule("html_parser", "HTMLParser", "html.parser"),
+    MovedModule("http_client", "httplib", "http.client"),
+    MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
+    MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
+    MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
+    MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
+    MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
+    MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
+    MovedModule("cPickle", "cPickle", "pickle"),
+    MovedModule("queue", "Queue"),
+    MovedModule("reprlib", "repr"),
+    MovedModule("socketserver", "SocketServer"),
+    MovedModule("tkinter", "Tkinter"),
+    MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
+    MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
+    MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
+    MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
+    MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
+    MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
+    MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
+    MovedModule("tkinter_colorchooser", "tkColorChooser",
+                "tkinter.colorchooser"),
+    MovedModule("tkinter_commondialog", "tkCommonDialog",
+                "tkinter.commondialog"),
+    MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
+    MovedModule("tkinter_font", "tkFont", "tkinter.font"),
+    MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
+    MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
+                "tkinter.simpledialog"),
+    MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
+    MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
+    MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
+    MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
+    MovedModule("winreg", "_winreg"),
+]
+for attr in _moved_attributes:
+    setattr(_MovedItems, attr.name, attr)
+del attr
+
+moves = sys.modules[__name__ + ".moves"] = _MovedItems(__name__ + ".moves")
+
+
+
+class Module_six_moves_urllib_parse(types.ModuleType):
+    """Lazy loading of moved objects in six.moves.urllib_parse"""
+
+
+_urllib_parse_moved_attributes = [
+    MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
+    MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
+    MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
+    MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
+    MovedAttribute("urljoin", "urlparse", "urllib.parse"),
+    MovedAttribute("urlparse", "urlparse", "urllib.parse"),
+    MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
+    MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
+    MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
+    MovedAttribute("quote", "urllib", "urllib.parse"),
+    MovedAttribute("quote_plus", "urllib", "urllib.parse"),
+    MovedAttribute("unquote", "urllib", "urllib.parse"),
+    MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
+    MovedAttribute("urlencode", "urllib", "urllib.parse"),
+]
+for attr in _urllib_parse_moved_attributes:
+    setattr(Module_six_moves_urllib_parse, attr.name, attr)
+del attr
+
+sys.modules[__name__ + ".moves.urllib_parse"] = Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse")
+sys.modules[__name__ + ".moves.urllib.parse"] = Module_six_moves_urllib_parse(__name__ + ".moves.urllib.parse")
+
+
+class Module_six_moves_urllib_error(types.ModuleType):
+    """Lazy loading of moved objects in six.moves.urllib_error"""
+
+
+_urllib_error_moved_attributes = [
+    MovedAttribute("URLError", "urllib2", "urllib.error"),
+    MovedAttribute("HTTPError", "urllib2", "urllib.error"),
+    MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
+]
+for attr in _urllib_error_moved_attributes:
+    setattr(Module_six_moves_urllib_error, attr.name, attr)
+del attr
+
+sys.modules[__name__ + ".moves.urllib_error"] = Module_six_moves_urllib_error(__name__ + ".moves.urllib_error")
+sys.modules[__name__ + ".moves.urllib.error"] = Module_six_moves_urllib_error(__name__ + ".moves.urllib.error")
+
+
+class Module_six_moves_urllib_request(types.ModuleType):
+    """Lazy loading of moved objects in six.moves.urllib_request"""
+
+
+_urllib_request_moved_attributes = [
+    MovedAttribute("urlopen", "urllib2", "urllib.request"),
+    MovedAttribute("install_opener", "urllib2", "urllib.request"),
+    MovedAttribute("build_opener", "urllib2", "urllib.request"),
+    MovedAttribute("pathname2url", "urllib", "urllib.request"),
+    MovedAttribute("url2pathname", "urllib", "urllib.request"),
+    MovedAttribute("getproxies", "urllib", "urllib.request"),
+    MovedAttribute("Request", "urllib2", "urllib.request"),
+    MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
+    MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
+    MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
+    MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
+    MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
+    MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
+    MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
+    MovedAttribute("FileHandler", "urllib2", "urllib.request"),
+    MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
+    MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
+    MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
+    MovedAttribute("urlretrieve", "urllib", "urllib.request"),
+    MovedAttribute("urlcleanup", "urllib", "urllib.request"),
+    MovedAttribute("URLopener", "urllib", "urllib.request"),
+    MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
+]
+for attr in _urllib_request_moved_attributes:
+    setattr(Module_six_moves_urllib_request, attr.name, attr)
+del attr
+
+sys.modules[__name__ + ".moves.urllib_request"] = Module_six_moves_urllib_request(__name__ + ".moves.urllib_request")
+sys.modules[__name__ + ".moves.urllib.request"] = Module_six_moves_urllib_request(__name__ + ".moves.urllib.request")
+
+
+class Module_six_moves_urllib_response(types.ModuleType):
+    """Lazy loading of moved objects in six.moves.urllib_response"""
+
+
+_urllib_response_moved_attributes = [
+    MovedAttribute("addbase", "urllib", "urllib.response"),
+    MovedAttribute("addclosehook", "urllib", "urllib.response"),
+    MovedAttribute("addinfo", "urllib", "urllib.response"),
+    MovedAttribute("addinfourl", "urllib", "urllib.response"),
+]
+for attr in _urllib_response_moved_attributes:
+    setattr(Module_six_moves_urllib_response, attr.name, attr)
+del attr
+
+sys.modules[__name__ + ".moves.urllib_response"] = Module_six_moves_urllib_response(__name__ + ".moves.urllib_response")
+sys.modules[__name__ + ".moves.urllib.response"] = Module_six_moves_urllib_response(__name__ + ".moves.urllib.response")
+
+
+class Module_six_moves_urllib_robotparser(types.ModuleType):
+    """Lazy loading of moved objects in six.moves.urllib_robotparser"""
+
+
+_urllib_robotparser_moved_attributes = [
+    MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
+]
+for attr in _urllib_robotparser_moved_attributes:
+    setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
+del attr
+
+sys.modules[__name__ + ".moves.urllib_robotparser"] = Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib_robotparser")
+sys.modules[__name__ + ".moves.urllib.robotparser"] = Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser")
+
+
+class Module_six_moves_urllib(types.ModuleType):
+    """Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
+    parse = sys.modules[__name__ + ".moves.urllib_parse"]
+    error = sys.modules[__name__ + ".moves.urllib_error"]
+    request = sys.modules[__name__ + ".moves.urllib_request"]
+    response = sys.modules[__name__ + ".moves.urllib_response"]
+    robotparser = sys.modules[__name__ + ".moves.urllib_robotparser"]
+
+
+sys.modules[__name__ + ".moves.urllib"] = Module_six_moves_urllib(__name__ + ".moves.urllib")
+
+
+def add_move(move):
+    """Add an item to six.moves."""
+    setattr(_MovedItems, move.name, move)
+
+
+def remove_move(name):
+    """Remove item from six.moves."""
+    try:
+        delattr(_MovedItems, name)
+    except AttributeError:
+        try:
+            del moves.__dict__[name]
+        except KeyError:
+            raise AttributeError("no such move, %r" % (name,))
+
+
+if PY3:
+    _meth_func = "__func__"
+    _meth_self = "__self__"
+
+    _func_closure = "__closure__"
+    _func_code = "__code__"
+    _func_defaults = "__defaults__"
+    _func_globals = "__globals__"
+
+    _iterkeys = "keys"
+    _itervalues = "values"
+    _iteritems = "items"
+    _iterlists = "lists"
+else:
+    _meth_func = "im_func"
+    _meth_self = "im_self"
+
+    _func_closure = "func_closure"
+    _func_code = "func_code"
+    _func_defaults = "func_defaults"
+    _func_globals = "func_globals"
+
+    _iterkeys = "iterkeys"
+    _itervalues = "itervalues"
+    _iteritems = "iteritems"
+    _iterlists = "iterlists"
+
+
+try:
+    advance_iterator = next
+except NameError:
+    def advance_iterator(it):
+        return it.next()
+next = advance_iterator
+
+
+try:
+    callable = callable
+except NameError:
+    def callable(obj):
+        return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
+
+
+if PY3:
+    def get_unbound_function(unbound):
+        return unbound
+
+    create_bound_method = types.MethodType
+
+    Iterator = object
+else:
+    def get_unbound_function(unbound):
+        return unbound.im_func
+
+    def create_bound_method(func, obj):
+        return types.MethodType(func, obj, obj.__class__)
+
+    class Iterator(object):
+
+        def next(self):
+            return type(self).__next__(self)
+
+    callable = callable
+_add_doc(get_unbound_function,
+         """Get the function out of a possibly unbound function""")
+
+
+get_method_function = operator.attrgetter(_meth_func)
+get_method_self = operator.attrgetter(_meth_self)
+get_function_closure = operator.attrgetter(_func_closure)
+get_function_code = operator.attrgetter(_func_code)
+get_function_defaults = operator.attrgetter(_func_defaults)
+get_function_globals = operator.attrgetter(_func_globals)
+
+
+def iterkeys(d, **kw):
+    """Return an iterator over the keys of a dictionary."""
+    return iter(getattr(d, _iterkeys)(**kw))
+
+def itervalues(d, **kw):
+    """Return an iterator over the values of a dictionary."""
+    return iter(getattr(d, _itervalues)(**kw))
+
+def iteritems(d, **kw):
+    """Return an iterator over the (key, value) pairs of a dictionary."""
+    return iter(getattr(d, _iteritems)(**kw))
+
+def iterlists(d, **kw):
+    """Return an iterator over the (key, [values]) pairs of a dictionary."""
+    return iter(getattr(d, _iterlists)(**kw))
+
+
+if PY3:
+    def b(s):
+        return s.encode("latin-1")
+    def u(s):
+        return s
+    unichr = chr
+    if sys.version_info[1] <= 1:
+        def int2byte(i):
+            return bytes((i,))
+    else:
+        # This is about 2x faster than the implementation above on 3.2+
+        int2byte = operator.methodcaller("to_bytes", 1, "big")
+    byte2int = operator.itemgetter(0)
+    indexbytes = operator.getitem
+    iterbytes = iter
+    import io
+    StringIO = io.StringIO
+    BytesIO = io.BytesIO
+else:
+    def b(s):
+        return s
+    def u(s):
+        return unicode(s, "unicode_escape")
+    unichr = unichr
+    int2byte = chr
+    def byte2int(bs):
+        return ord(bs[0])
+    def indexbytes(buf, i):
+        return ord(buf[i])
+    def iterbytes(buf):
+        return (ord(byte) for byte in buf)
+    import StringIO
+    StringIO = BytesIO = StringIO.StringIO
+_add_doc(b, """Byte literal""")
+_add_doc(u, """Text literal""")
+
+
+if PY3:
+    import builtins
+    exec_ = getattr(builtins, "exec")
+
+
+    def reraise(tp, value, tb=None):
+        if value.__traceback__ is not tb:
+            raise value.with_traceback(tb)
+        raise value
+
+
+    print_ = getattr(builtins, "print")
+    del builtins
+
+else:
+    def exec_(_code_, _globs_=None, _locs_=None):
+        """Execute code in a namespace."""
+        if _globs_ is None:
+            frame = sys._getframe(1)
+            _globs_ = frame.f_globals
+            if _locs_ is None:
+                _locs_ = frame.f_locals
+            del frame
+        elif _locs_ is None:
+            _locs_ = _globs_
+        exec("""exec _code_ in _globs_, _locs_""")
+
+
+    exec_("""def reraise(tp, value, tb=None):
+    raise tp, value, tb
+""")
+
+
+    def print_(*args, **kwargs):
+        """The new-style print function."""
+        fp = kwargs.pop("file", sys.stdout)
+        if fp is None:
+            return
+        def write(data):
+            if not isinstance(data, basestring):
+                data = str(data)
+            fp.write(data)
+        want_unicode = False
+        sep = kwargs.pop("sep", None)
+        if sep is not None:
+            if isinstance(sep, unicode):
+                want_unicode = True
+            elif not isinstance(sep, str):
+                raise TypeError("sep must be None or a string")
+        end = kwargs.pop("end", None)
+        if end is not None:
+            if isinstance(end, unicode):
+                want_unicode = True
+            elif not isinstance(end, str):
+                raise TypeError("end must be None or a string")
+        if kwargs:
+            raise TypeError("invalid keyword arguments to print()")
+        if not want_unicode:
+            for arg in args:
+                if isinstance(arg, unicode):
+                    want_unicode = True
+                    break
+        if want_unicode:
+            newline = unicode("\n")
+            space = unicode(" ")
+        else:
+            newline = "\n"
+            space = " "
+        if sep is None:
+            sep = space
+        if end is None:
+            end = newline
+        for i, arg in enumerate(args):
+            if i:
+                write(sep)
+            write(arg)
+        write(end)
+
+_add_doc(reraise, """Reraise an exception.""")
+
+
+def with_metaclass(meta, *bases):
+    """Create a base class with a metaclass."""
+    return meta("NewBase", bases, {})
+
+def add_metaclass(metaclass):
+    """Class decorator for creating a class with a metaclass."""
+    def wrapper(cls):
+        orig_vars = cls.__dict__.copy()
+        orig_vars.pop('__dict__', None)
+        orig_vars.pop('__weakref__', None)
+        for slots_var in orig_vars.get('__slots__', ()):
+            orig_vars.pop(slots_var)
+        return metaclass(cls.__name__, cls.__bases__, orig_vars)
+    return wrapper
\ No newline at end of file
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/externals/tempita/__init__.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/externals/tempita/__init__.py
new file mode 100644
index 0000000..5bcbd84
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/externals/tempita/__init__.py
@@ -0,0 +1,1303 @@
+"""
+A small templating language
+
+This implements a small templating language.  This language implements
+if/elif/else, for/continue/break, expressions, and blocks of Python
+code.  The syntax is::
+
+  {{any expression (function calls etc)}}
+  {{any expression | filter}}
+  {{for x in y}}...{{endfor}}
+  {{if x}}x{{elif y}}y{{else}}z{{endif}}
+  {{py:x=1}}
+  {{py:
+  def foo(bar):
+      return 'baz'
+  }}
+  {{default var = default_value}}
+  {{# comment}}
+
+You use this with the ``Template`` class or the ``sub`` shortcut.
+The ``Template`` class takes the template string and the name of
+the template (for errors) and a default namespace.  Then (like
+``string.Template``) you can call the ``tmpl.substitute(**kw)``
+method to make a substitution (or ``tmpl.substitute(a_dict)``).
+
+``sub(content, **kw)`` substitutes the template immediately.  You
+can use ``__name='tmpl.html'`` to set the name of the template.
+
+If there are syntax errors ``TemplateError`` will be raised.
+"""
+
+import warnings
+import re
+import sys
+import cgi
+from ..six.moves.urllib.parse import quote as url_quote
+import os
+import tokenize
+from ..six.moves import cStringIO as StringIO
+from ._looper import looper
+from .compat3 import PY3, bytes, basestring_, next, is_unicode, coerce_text
+
+__all__ = ['TemplateError', 'Template', 'sub', 'HTMLTemplate',
+           'sub_html', 'html', 'bunch']
+
+in_re = re.compile(r'\s+in\s+')
+var_re = re.compile(r'^[a-z_][a-z0-9_]*$', re.I)
+
+
+class TemplateError(Exception):
+    """Exception raised while parsing a template
+    """
+
+    def __init__(self, message, position, name=None):
+        Exception.__init__(self, message)
+        self.position = position
+        self.name = name
+
+    def __str__(self):
+        msg = ' '.join(self.args)
+        if self.position:
+            msg = '%s at line %s column %s' % (
+                msg, self.position[0], self.position[1])
+        if self.name:
+            msg += ' in %s' % self.name
+        return msg
+
+
+class _TemplateContinue(Exception):
+    pass
+
+
+class _TemplateBreak(Exception):
+    pass
+
+
+def get_file_template(name, from_template):
+    path = os.path.join(os.path.dirname(from_template.name), name)
+    return from_template.__class__.from_filename(
+        path, namespace=from_template.namespace,
+        get_template=from_template.get_template)
+
+
+class Template(object):
+
+    default_namespace = {
+        'start_braces': '{{',
+        'end_braces': '}}',
+        'looper': looper,
+    }
+
+    default_encoding = 'utf8'
+    default_inherit = None
+
+    def __init__(self, content, name=None, namespace=None, stacklevel=None,
+                 get_template=None, default_inherit=None, line_offset=0,
+                 delimeters=None):
+        self.content = content
+
+        # set delimeters
+        if delimeters is None:
+            delimeters = (self.default_namespace['start_braces'],
+                          self.default_namespace['end_braces'])
+        else:
+            assert len(delimeters) == 2 and all(
+                [isinstance(delimeter, basestring)
+                    for delimeter in delimeters])
+            self.default_namespace = self.__class__.default_namespace.copy()
+            self.default_namespace['start_braces'] = delimeters[0]
+            self.default_namespace['end_braces'] = delimeters[1]
+        self.delimeters = delimeters
+
+        self._unicode = is_unicode(content)
+        if name is None and stacklevel is not None:
+            try:
+                caller = sys._getframe(stacklevel)
+            except ValueError:
+                pass
+            else:
+                globals = caller.f_globals
+                lineno = caller.f_lineno
+                if '__file__' in globals:
+                    name = globals['__file__']
+                    if name.endswith('.pyc') or name.endswith('.pyo'):
+                        name = name[:-1]
+                elif '__name__' in globals:
+                    name = globals['__name__']
+                else:
+                    name = '<string>'
+                if lineno:
+                    name += ':%s' % lineno
+        self.name = name
+        self._parsed = parse(
+            content, name=name, line_offset=line_offset,
+            delimeters=self.delimeters)
+        if namespace is None:
+            namespace = {}
+        self.namespace = namespace
+        self.get_template = get_template
+        if default_inherit is not None:
+            self.default_inherit = default_inherit
+
+    def from_filename(cls, filename, namespace=None, encoding=None,
+                      default_inherit=None, get_template=get_file_template):
+        f = open(filename, 'rb')
+        c = f.read()
+        f.close()
+        if encoding:
+            c = c.decode(encoding)
+        return cls(content=c, name=filename, namespace=namespace,
+                   default_inherit=default_inherit, get_template=get_template)
+
+    from_filename = classmethod(from_filename)
+
+    def __repr__(self):
+        return '<%s %s name=%r>' % (
+            self.__class__.__name__,
+            hex(id(self))[2:], self.name)
+
+    def substitute(self, *args, **kw):
+        if args:
+            if kw:
+                raise TypeError(
+                    "You can only give positional *or* keyword arguments")
+            if len(args) > 1:
+                raise TypeError(
+                    "You can only give one positional argument")
+            if not hasattr(args[0], 'items'):
+                raise TypeError(
+                    ("If you pass in a single argument, you must pass in a ",
+                     "dict-like object (with a .items() method); you gave %r")
+                    % (args[0],))
+            kw = args[0]
+        ns = kw
+        ns['__template_name__'] = self.name
+        if self.namespace:
+            ns.update(self.namespace)
+        result, defs, inherit = self._interpret(ns)
+        if not inherit:
+            inherit = self.default_inherit
+        if inherit:
+            result = self._interpret_inherit(result, defs, inherit, ns)
+        return result
+
+    def _interpret(self, ns):
+        # __traceback_hide__ = True
+        parts = []
+        defs = {}
+        self._interpret_codes(self._parsed, ns, out=parts, defs=defs)
+        if '__inherit__' in defs:
+            inherit = defs.pop('__inherit__')
+        else:
+            inherit = None
+        return ''.join(parts), defs, inherit
+
+    def _interpret_inherit(self, body, defs, inherit_template, ns):
+        # __traceback_hide__ = True
+        if not self.get_template:
+            raise TemplateError(
+                'You cannot use inheritance without passing in get_template',
+                position=None, name=self.name)
+        templ = self.get_template(inherit_template, self)
+        self_ = TemplateObject(self.name)
+        for name, value in defs.iteritems():
+            setattr(self_, name, value)
+        self_.body = body
+        ns = ns.copy()
+        ns['self'] = self_
+        return templ.substitute(ns)
+
+    def _interpret_codes(self, codes, ns, out, defs):
+        # __traceback_hide__ = True
+        for item in codes:
+            if isinstance(item, basestring_):
+                out.append(item)
+            else:
+                self._interpret_code(item, ns, out, defs)
+
+    def _interpret_code(self, code, ns, out, defs):
+        # __traceback_hide__ = True
+        name, pos = code[0], code[1]
+        if name == 'py':
+            self._exec(code[2], ns, pos)
+        elif name == 'continue':
+            raise _TemplateContinue()
+        elif name == 'break':
+            raise _TemplateBreak()
+        elif name == 'for':
+            vars, expr, content = code[2], code[3], code[4]
+            expr = self._eval(expr, ns, pos)
+            self._interpret_for(vars, expr, content, ns, out, defs)
+        elif name == 'cond':
+            parts = code[2:]
+            self._interpret_if(parts, ns, out, defs)
+        elif name == 'expr':
+            parts = code[2].split('|')
+            base = self._eval(parts[0], ns, pos)
+            for part in parts[1:]:
+                func = self._eval(part, ns, pos)
+                base = func(base)
+            out.append(self._repr(base, pos))
+        elif name == 'default':
+            var, expr = code[2], code[3]
+            if var not in ns:
+                result = self._eval(expr, ns, pos)
+                ns[var] = result
+        elif name == 'inherit':
+            expr = code[2]
+            value = self._eval(expr, ns, pos)
+            defs['__inherit__'] = value
+        elif name == 'def':
+            name = code[2]
+            signature = code[3]
+            parts = code[4]
+            ns[name] = defs[name] = TemplateDef(
+                self, name, signature, body=parts, ns=ns, pos=pos)
+        elif name == 'comment':
+            return
+        else:
+            assert 0, "Unknown code: %r" % name
+
+    def _interpret_for(self, vars, expr, content, ns, out, defs):
+        # __traceback_hide__ = True
+        for item in expr:
+            if len(vars) == 1:
+                ns[vars[0]] = item
+            else:
+                if len(vars) != len(item):
+                    raise ValueError(
+                        'Need %i items to unpack (got %i items)'
+                        % (len(vars), len(item)))
+                for name, value in zip(vars, item):
+                    ns[name] = value
+            try:
+                self._interpret_codes(content, ns, out, defs)
+            except _TemplateContinue:
+                continue
+            except _TemplateBreak:
+                break
+
+    def _interpret_if(self, parts, ns, out, defs):
+        # __traceback_hide__ = True
+        # @@: if/else/else gets through
+        for part in parts:
+            assert not isinstance(part, basestring_)
+            name, pos = part[0], part[1]
+            if name == 'else':
+                result = True
+            else:
+                result = self._eval(part[2], ns, pos)
+            if result:
+                self._interpret_codes(part[3], ns, out, defs)
+                break
+
+    def _eval(self, code, ns, pos):
+        # __traceback_hide__ = True
+        try:
+            try:
+                value = eval(code, self.default_namespace, ns)
+            except SyntaxError as e:
+                raise SyntaxError(
+                    'invalid syntax in expression: %s' % code)
+            return value
+        except:
+            exc_info = sys.exc_info()
+            e = exc_info[1]
+            if getattr(e, 'args', None):
+                arg0 = e.args[0]
+            else:
+                arg0 = coerce_text(e)
+            e.args = (self._add_line_info(arg0, pos),)
+            raise (exc_info[1], e, exc_info[2])
+
+    def _exec(self, code, ns, pos):
+        # __traceback_hide__ = True
+        try:
+            exec(code, self.default_namespace, ns)
+        except:
+            exc_info = sys.exc_info()
+            e = exc_info[1]
+            if e.args:
+                e.args = (self._add_line_info(e.args[0], pos),)
+            else:
+                e.args = (self._add_line_info(None, pos),)
+            raise(exc_info[1], e, exc_info[2])
+
+    def _repr(self, value, pos):
+        # __traceback_hide__ = True
+        try:
+            if value is None:
+                return ''
+            if self._unicode:
+                try:
+                    if not is_unicode(value):
+                        value = str(value)
+                        value = value.decode('utf-8')
+                except UnicodeDecodeError:
+                    value = bytes(value)
+            else:
+                if not isinstance(value, basestring_):
+                    value = coerce_text(value)
+                if (is_unicode(value) and self.default_encoding):
+                    value = value.encode(self.default_encoding)
+        except:
+            exc_info = sys.exc_info()
+            e = exc_info[1]
+            e.args = (self._add_line_info(e.args[0], pos),)
+            # raise(exc_info[1], e, exc_info[2])
+            raise(e)
+        else:
+            if self._unicode and isinstance(value, bytes):
+                if not self.default_encoding:
+                    raise UnicodeDecodeError(
+                        'Cannot decode bytes value %r into unicode '
+                        '(no default_encoding provided)' % value)
+                try:
+                    value = value.decode(self.default_encoding)
+                except UnicodeDecodeError as e:
+                    raise UnicodeDecodeError(
+                        e.encoding,
+                        e.object,
+                        e.start,
+                        e.end,
+                        e.reason + ' in string %r' % value)
+            elif not self._unicode and is_unicode(value):
+                if not self.default_encoding:
+                    raise UnicodeEncodeError(
+                        'Cannot encode unicode value %r into bytes '
+                        '(no default_encoding provided)' % value)
+                value = value.encode(self.default_encoding)
+            return value
+
+    def _add_line_info(self, msg, pos):
+        msg = "%s at line %s column %s" % (
+            msg, pos[0], pos[1])
+        if self.name:
+            msg += " in file %s" % self.name
+        return msg
+
+
+def sub(content, delimeters=None, **kw):
+    name = kw.get('__name')
+    tmpl = Template(content, name=name, delimeters=delimeters)
+    return tmpl.substitute(kw)
+
+
+def paste_script_template_renderer(content, vars, filename=None):
+    tmpl = Template(content, name=filename)
+    return tmpl.substitute(vars)
+
+
+class bunch(dict):
+
+    def __init__(self, **kw):
+        for name, value in kw.iteritems():
+            setattr(self, name, value)
+
+    def __setattr__(self, name, value):
+        self[name] = value
+
+    def __getattr__(self, name):
+        try:
+            return self[name]
+        except KeyError:
+            raise AttributeError(name)
+
+    def __getitem__(self, key):
+        if 'default' in self:
+            try:
+                return dict.__getitem__(self, key)
+            except KeyError:
+                return dict.__getitem__(self, 'default')
+        else:
+            return dict.__getitem__(self, key)
+
+    def __repr__(self):
+        items = [
+            (k, v) for k, v in self.iteritems()]
+        items.sort()
+        return '<%s %s>' % (
+            self.__class__.__name__,
+            ' '.join(['%s=%r' % (k, v) for k, v in items]))
+
+############################################################
+## HTML Templating
+############################################################
+
+
+class html(object):
+
+    def __init__(self, value):
+        self.value = value
+
+    def __str__(self):
+        return self.value
+
+    def __html__(self):
+        return self.value
+
+    def __repr__(self):
+        return '<%s %r>' % (
+            self.__class__.__name__, self.value)
+
+
+def html_quote(value, force=True):
+    if not force and hasattr(value, '__html__'):
+        return value.__html__()
+    if value is None:
+        return ''
+    if not isinstance(value, basestring_):
+        value = coerce_text(value)
+    if sys.version >= "3" and isinstance(value, bytes):
+        value = cgi.escape(value.decode('latin1'), 1)
+        value = value.encode('latin1')
+    else:
+        with warnings.catch_warnings(record=True):  # annoying
+            value = cgi.escape(value, 1)
+    if sys.version < "3":
+        if is_unicode(value):
+            value = value.encode('ascii', 'xmlcharrefreplace')
+    return value
+
+
+def url(v):
+    v = coerce_text(v)
+    if is_unicode(v):
+        v = v.encode('utf8')
+    return url_quote(v)
+
+
+def attr(**kw):
+    kw = list(kw.iteritems())
+    kw.sort()
+    parts = []
+    for name, value in kw:
+        if value is None:
+            continue
+        if name.endswith('_'):
+            name = name[:-1]
+        parts.append('%s="%s"' % (html_quote(name), html_quote(value)))
+    return html(' '.join(parts))
+
+
+class HTMLTemplate(Template):
+
+    default_namespace = Template.default_namespace.copy()
+    default_namespace.update(dict(
+        html=html,
+        attr=attr,
+        url=url,
+        html_quote=html_quote))
+
+    def _repr(self, value, pos):
+        if hasattr(value, '__html__'):
+            value = value.__html__()
+            quote = False
+        else:
+            quote = True
+        plain = Template._repr(self, value, pos)
+        if quote:
+            return html_quote(plain)
+        else:
+            return plain
+
+
+def sub_html(content, **kw):
+    name = kw.get('__name')
+    tmpl = HTMLTemplate(content, name=name)
+    return tmpl.substitute(kw)
+
+
+class TemplateDef(object):
+    def __init__(self, template, func_name, func_signature,
+                 body, ns, pos, bound_self=None):
+        self._template = template
+        self._func_name = func_name
+        self._func_signature = func_signature
+        self._body = body
+        self._ns = ns
+        self._pos = pos
+        self._bound_self = bound_self
+
+    def __repr__(self):
+        return '<mne.externals.tempita function %s(%s) at %s:%s>' % (
+            self._func_name, self._func_signature,
+            self._template.name, self._pos)
+
+    def __str__(self):
+        return self()
+
+    def __call__(self, *args, **kw):
+        values = self._parse_signature(args, kw)
+        ns = self._ns.copy()
+        ns.update(values)
+        if self._bound_self is not None:
+            ns['self'] = self._bound_self
+        out = []
+        subdefs = {}
+        self._template._interpret_codes(self._body, ns, out, subdefs)
+        return ''.join(out)
+
+    def __get__(self, obj, type=None):
+        if obj is None:
+            return self
+        return self.__class__(
+            self._template, self._func_name, self._func_signature,
+            self._body, self._ns, self._pos, bound_self=obj)
+
+    def _parse_signature(self, args, kw):
+        values = {}
+        sig_args, var_args, var_kw, defaults = self._func_signature
+        extra_kw = {}
+        for name, value in kw.iteritems():
+            if not var_kw and name not in sig_args:
+                raise TypeError(
+                    'Unexpected argument %s' % name)
+            if name in sig_args:
+                values[sig_args] = value
+            else:
+                extra_kw[name] = value
+        args = list(args)
+        sig_args = list(sig_args)
+        while args:
+            while sig_args and sig_args[0] in values:
+                sig_args.pop(0)
+            if sig_args:
+                name = sig_args.pop(0)
+                values[name] = args.pop(0)
+            elif var_args:
+                values[var_args] = tuple(args)
+                break
+            else:
+                raise TypeError(
+                    'Extra position arguments: %s'
+                    % ', '.join(repr(v) for v in args))
+        for name, value_expr in defaults.iteritems():
+            if name not in values:
+                values[name] = self._template._eval(
+                    value_expr, self._ns, self._pos)
+        for name in sig_args:
+            if name not in values:
+                raise TypeError(
+                    'Missing argument: %s' % name)
+        if var_kw:
+            values[var_kw] = extra_kw
+        return values
+
+
+class TemplateObject(object):
+
+    def __init__(self, name):
+        self.__name = name
+        self.get = TemplateObjectGetter(self)
+
+    def __repr__(self):
+        return '<%s %s>' % (self.__class__.__name__, self.__name)
+
+
+class TemplateObjectGetter(object):
+
+    def __init__(self, template_obj):
+        self.__template_obj = template_obj
+
+    def __getattr__(self, attr):
+        return getattr(self.__template_obj, attr, Empty)
+
+    def __repr__(self):
+        return '<%s around %r>' % (
+            self.__class__.__name__, self.__template_obj)
+
+
+class _Empty(object):
+    def __call__(self, *args, **kw):
+        return self
+
+    def __str__(self):
+        return ''
+
+    def __repr__(self):
+        return 'Empty'
+
+    def __unicode__(self):
+        if PY3:
+            return str('')
+        else:
+            return unicode('')
+
+    def __iter__(self):
+        return iter(())
+
+    def __bool__(self):
+        return False
+
+    if sys.version < "3":
+        __nonzero__ = __bool__
+
+Empty = _Empty()
+del _Empty
+
+############################################################
+## Lexing and Parsing
+############################################################
+
+
+def lex(s, name=None, trim_whitespace=True, line_offset=0, delimeters=None):
+    if delimeters is None:
+        delimeters = (Template.default_namespace['start_braces'],
+                      Template.default_namespace['end_braces'])
+    in_expr = False
+    chunks = []
+    last = 0
+    last_pos = (line_offset + 1, 1)
+    token_re = re.compile(r'%s|%s' % (re.escape(delimeters[0]),
+                                      re.escape(delimeters[1])))
+    for match in token_re.finditer(s):
+        expr = match.group(0)
+        pos = find_position(s, match.end(), last, last_pos)
+        if expr == delimeters[0] and in_expr:
+            raise TemplateError('%s inside expression' % delimeters[0],
+                                position=pos,
+                                name=name)
+        elif expr == delimeters[1] and not in_expr:
+            raise TemplateError('%s outside expression' % delimeters[1],
+                                position=pos,
+                                name=name)
+        if expr == delimeters[0]:
+            part = s[last:match.start()]
+            if part:
+                chunks.append(part)
+            in_expr = True
+        else:
+            chunks.append((s[last:match.start()], last_pos))
+            in_expr = False
+        last = match.end()
+        last_pos = pos
+    if in_expr:
+        raise TemplateError('No %s to finish last expression' % delimeters[1],
+                            name=name, position=last_pos)
+    part = s[last:]
+    if part:
+        chunks.append(part)
+    if trim_whitespace:
+        chunks = trim_lex(chunks)
+    return chunks
+
+lex.__doc__ = """
+Lex a string into chunks:
+
+    >>> lex('hey')
+    ['hey']
+    >>> lex('hey {{you}}')
+    ['hey ', ('you', (1, 7))]
+    >>> lex('hey {{')
+    Traceback (most recent call last):
+        ...
+    mne.externals.tempita.TemplateError: No }} to finish last expression at line 1 column 7
+    >>> lex('hey }}')
+    Traceback (most recent call last):
+        ...
+    mne.externals.tempita.TemplateError: }} outside expression at line 1 column 7
+    >>> lex('hey {{ {{')
+    Traceback (most recent call last):
+        ...
+    mne.externals.tempita.TemplateError: {{ inside expression at line 1 column 10
+
+""" if PY3 else """
+Lex a string into chunks:
+
+    >>> lex('hey')
+    ['hey']
+    >>> lex('hey {{you}}')
+    ['hey ', ('you', (1, 7))]
+    >>> lex('hey {{')
+    Traceback (most recent call last):
+        ...
+    TemplateError: No }} to finish last expression at line 1 column 7
+    >>> lex('hey }}')
+    Traceback (most recent call last):
+        ...
+    TemplateError: }} outside expression at line 1 column 7
+    >>> lex('hey {{ {{')
+    Traceback (most recent call last):
+        ...
+    TemplateError: {{ inside expression at line 1 column 10
+
+"""
+
+statement_re = re.compile(r'^(?:if |elif |for |def |inherit |default |py:)')
+single_statements = ['else', 'endif', 'endfor', 'enddef', 'continue', 'break']
+trail_whitespace_re = re.compile(r'\n\r?[\t ]*$')
+lead_whitespace_re = re.compile(r'^[\t ]*\n')
+
+
+def trim_lex(tokens):
+    last_trim = None
+    for i in range(len(tokens)):
+        current = tokens[i]
+        if isinstance(tokens[i], basestring_):
+            # we don't trim this
+            continue
+        item = current[0]
+        if not statement_re.search(item) and item not in single_statements:
+            continue
+        if not i:
+            prev = ''
+        else:
+            prev = tokens[i - 1]
+        if i + 1 >= len(tokens):
+            next_chunk = ''
+        else:
+            next_chunk = tokens[i + 1]
+        if (not
+                isinstance(next_chunk, basestring_)
+                or not isinstance(prev, basestring_)):
+            continue
+        prev_ok = not prev or trail_whitespace_re.search(prev)
+        if i == 1 and not prev.strip():
+            prev_ok = True
+        if last_trim is not None and last_trim + 2 == i and not prev.strip():
+            prev_ok = 'last'
+        if (prev_ok
+            and (not next_chunk or lead_whitespace_re.search(next_chunk)
+                 or (i == len(tokens) - 2 and not next_chunk.strip()))):
+            if prev:
+                if ((i == 1 and not prev.strip()) or prev_ok == 'last'):
+                    tokens[i - 1] = ''
+                else:
+                    m = trail_whitespace_re.search(prev)
+                    # +1 to leave the leading \n on:
+                    prev = prev[:m.start() + 1]
+                    tokens[i - 1] = prev
+            if next_chunk:
+                last_trim = i
+                if i == len(tokens) - 2 and not next_chunk.strip():
+                    tokens[i + 1] = ''
+                else:
+                    m = lead_whitespace_re.search(next_chunk)
+                    next_chunk = next_chunk[m.end():]
+                    tokens[i + 1] = next_chunk
+    return tokens
+
+trim_lex.__doc__ = r"""
+    Takes a lexed set of tokens, and removes whitespace when there is
+    a directive on a line by itself:
+
+       >>> tokens = lex('{{if x}}\nx\n{{endif}}\ny', trim_whitespace=False)
+       >>> tokens
+       [('if x', (1, 3)), '\nx\n', ('endif', (3, 3)), '\ny']
+       >>> trim_lex(tokens)
+       [('if x', (1, 3)), 'x\n', ('endif', (3, 3)), 'y']
+    """ if PY3 else r"""
+    Takes a lexed set of tokens, and removes whitespace when there is
+    a directive on a line by itself:
+
+       >>> tokens = lex('{{if x}}\nx\n{{endif}}\ny', trim_whitespace=False)
+       >>> tokens
+       [('if x', (1, 3)), '\nx\n', ('endif', (3, 3)), '\ny']
+       >>> trim_lex(tokens)
+       [('if x', (1, 3)), 'x\n', ('endif', (3, 3)), 'y']
+    """
+
+
+def find_position(string, index, last_index, last_pos):
+    """
+    Given a string and index, return (line, column)
+    """
+    lines = string.count('\n', last_index, index)
+    if lines > 0:
+        column = index - string.rfind('\n', last_index, index)
+    else:
+        column = last_pos[1] + (index - last_index)
+    return (last_pos[0] + lines, column)
+
+
+def parse(s, name=None, line_offset=0, delimeters=None):
+
+    if delimeters is None:
+        delimeters = (Template.default_namespace['start_braces'],
+                      Template.default_namespace['end_braces'])
+    tokens = lex(s, name=name, line_offset=line_offset, delimeters=delimeters)
+    result = []
+    while tokens:
+        next_chunk, tokens = parse_expr(tokens, name)
+        result.append(next_chunk)
+    return result
+
+parse.__doc__ = r"""
+    Parses a string into a kind of AST
+
+        >>> parse('{{x}}')
+        [('expr', (1, 3), 'x')]
+        >>> parse('foo')
+        ['foo']
+        >>> parse('{{if x}}test{{endif}}')
+        [('cond', (1, 3), ('if', (1, 3), 'x', ['test']))]
+        >>> parse(
+        ...    'series->{{for x in y}}x={{x}}{{endfor}}'
+        ... )  #doctest: +NORMALIZE_WHITESPACE
+        ['series->',
+            ('for', (1, 11), ('x',), 'y', ['x=', ('expr', (1, 27), 'x')])]
+        >>> parse('{{for x, y in z:}}{{continue}}{{endfor}}')
+        [('for', (1, 3), ('x', 'y'), 'z', [('continue', (1, 21))])]
+        >>> parse('{{py:x=1}}')
+        [('py', (1, 3), 'x=1')]
+        >>> parse(
+        ...    '{{if x}}a{{elif y}}b{{else}}c{{endif}}'
+        ... )  #doctest: +NORMALIZE_WHITESPACE
+        [('cond', (1, 3), ('if', (1, 3), 'x', ['a']),
+            ('elif', (1, 12), 'y', ['b']), ('else', (1, 23), None, ['c']))]
+
+    Some exceptions::
+
+        >>> parse('{{continue}}')
+        Traceback (most recent call last):
+            ...
+        mne.externals.tempita.TemplateError: continue outside of for loop at line 1 column 3
+        >>> parse('{{if x}}foo')
+        Traceback (most recent call last):
+            ...
+        mne.externals.tempita.TemplateError: No {{endif}} at line 1 column 3
+        >>> parse('{{else}}')
+        Traceback (most recent call last):
+            ...
+        mne.externals.tempita.TemplateError: else outside of an if block at line 1 column 3
+        >>> parse('{{if x}}{{for x in y}}{{endif}}{{endfor}}')
+        Traceback (most recent call last):
+            ...
+        mne.externals.tempita.TemplateError: Unexpected endif at line 1 column 25
+        >>> parse('{{if}}{{endif}}')
+        Traceback (most recent call last):
+            ...
+        mne.externals.tempita.TemplateError: if with no expression at line 1 column 3
+        >>> parse('{{for x y}}{{endfor}}')
+        Traceback (most recent call last):
+            ...
+        mne.externals.tempita.TemplateError: Bad for (no "in") in 'x y' at line 1 column 3
+        >>> parse('{{py:x=1\ny=2}}')  #doctest: +NORMALIZE_WHITESPACE
+        Traceback (most recent call last):
+            ...
+        mne.externals.tempita.TemplateError: Multi-line py blocks must start
+            with a newline at line 1 column 3
+    """ if PY3 else r"""
+    Parses a string into a kind of AST
+
+        >>> parse('{{x}}')
+        [('expr', (1, 3), 'x')]
+        >>> parse('foo')
+        ['foo']
+        >>> parse('{{if x}}test{{endif}}')
+        [('cond', (1, 3), ('if', (1, 3), 'x', ['test']))]
+        >>> parse(
+        ...    'series->{{for x in y}}x={{x}}{{endfor}}'
+        ... )  #doctest: +NORMALIZE_WHITESPACE
+        ['series->',
+            ('for', (1, 11), ('x',), 'y', ['x=', ('expr', (1, 27), 'x')])]
+        >>> parse('{{for x, y in z:}}{{continue}}{{endfor}}')
+        [('for', (1, 3), ('x', 'y'), 'z', [('continue', (1, 21))])]
+        >>> parse('{{py:x=1}}')
+        [('py', (1, 3), 'x=1')]
+        >>> parse(
+        ...    '{{if x}}a{{elif y}}b{{else}}c{{endif}}'
+        ... )  #doctest: +NORMALIZE_WHITESPACE
+        [('cond', (1, 3), ('if', (1, 3), 'x', ['a']),
+            ('elif', (1, 12), 'y', ['b']), ('else', (1, 23), None, ['c']))]
+
+    Some exceptions::
+
+        >>> parse('{{continue}}')
+        Traceback (most recent call last):
+            ...
+        TemplateError: continue outside of for loop at line 1 column 3
+        >>> parse('{{if x}}foo')
+        Traceback (most recent call last):
+            ...
+        TemplateError: No {{endif}} at line 1 column 3
+        >>> parse('{{else}}')
+        Traceback (most recent call last):
+            ...
+        TemplateError: else outside of an if block at line 1 column 3
+        >>> parse('{{if x}}{{for x in y}}{{endif}}{{endfor}}')
+        Traceback (most recent call last):
+            ...
+        TemplateError: Unexpected endif at line 1 column 25
+        >>> parse('{{if}}{{endif}}')
+        Traceback (most recent call last):
+            ...
+        TemplateError: if with no expression at line 1 column 3
+        >>> parse('{{for x y}}{{endfor}}')
+        Traceback (most recent call last):
+            ...
+        TemplateError: Bad for (no "in") in 'x y' at line 1 column 3
+        >>> parse('{{py:x=1\ny=2}}')  #doctest: +NORMALIZE_WHITESPACE
+        Traceback (most recent call last):
+            ...
+        TemplateError: Multi-line py blocks must start
+            with a newline at line 1 column 3
+    """
+
+
+def parse_expr(tokens, name, context=()):
+    if isinstance(tokens[0], basestring_):
+        return tokens[0], tokens[1:]
+    expr, pos = tokens[0]
+    expr = expr.strip()
+    if expr.startswith('py:'):
+        expr = expr[3:].lstrip(' \t')
+        if expr.startswith('\n') or expr.startswith('\r'):
+            expr = expr.lstrip('\r\n')
+            if '\r' in expr:
+                expr = expr.replace('\r\n', '\n')
+                expr = expr.replace('\r', '')
+            expr += '\n'
+        else:
+            if '\n' in expr:
+                raise TemplateError(
+                    'Multi-line py blocks must start with a newline',
+                    position=pos, name=name)
+        return ('py', pos, expr), tokens[1:]
+    elif expr in ('continue', 'break'):
+        if 'for' not in context:
+            raise TemplateError(
+                'continue outside of for loop',
+                position=pos, name=name)
+        return (expr, pos), tokens[1:]
+    elif expr.startswith('if '):
+        return parse_cond(tokens, name, context)
+    elif (expr.startswith('elif ')
+          or expr == 'else'):
+        raise TemplateError(
+            '%s outside of an if block' % expr.split()[0],
+            position=pos, name=name)
+    elif expr in ('if', 'elif', 'for'):
+        raise TemplateError(
+            '%s with no expression' % expr,
+            position=pos, name=name)
+    elif expr in ('endif', 'endfor', 'enddef'):
+        raise TemplateError(
+            'Unexpected %s' % expr,
+            position=pos, name=name)
+    elif expr.startswith('for '):
+        return parse_for(tokens, name, context)
+    elif expr.startswith('default '):
+        return parse_default(tokens, name, context)
+    elif expr.startswith('inherit '):
+        return parse_inherit(tokens, name, context)
+    elif expr.startswith('def '):
+        return parse_def(tokens, name, context)
+    elif expr.startswith('#'):
+        return ('comment', pos, tokens[0][0]), tokens[1:]
+    return ('expr', pos, tokens[0][0]), tokens[1:]
+
+
+def parse_cond(tokens, name, context):
+    start = tokens[0][1]
+    pieces = []
+    context = context + ('if',)
+    while 1:
+        if not tokens:
+            raise TemplateError(
+                'Missing {{endif}}',
+                position=start, name=name)
+        if (isinstance(tokens[0], tuple) and tokens[0][0] == 'endif'):
+            return ('cond', start) + tuple(pieces), tokens[1:]
+        next_chunk, tokens = parse_one_cond(tokens, name, context)
+        pieces.append(next_chunk)
+
+
+def parse_one_cond(tokens, name, context):
+    (first, pos), tokens = tokens[0], tokens[1:]
+    content = []
+    if first.endswith(':'):
+        first = first[:-1]
+    if first.startswith('if '):
+        part = ('if', pos, first[3:].lstrip(), content)
+    elif first.startswith('elif '):
+        part = ('elif', pos, first[5:].lstrip(), content)
+    elif first == 'else':
+        part = ('else', pos, None, content)
+    else:
+        assert 0, "Unexpected token %r at %s" % (first, pos)
+    while 1:
+        if not tokens:
+            raise TemplateError(
+                'No {{endif}}',
+                position=pos, name=name)
+        if (isinstance(tokens[0], tuple)
+            and (tokens[0][0] == 'endif'
+                 or tokens[0][0].startswith('elif ')
+                 or tokens[0][0] == 'else')):
+            return part, tokens
+        next_chunk, tokens = parse_expr(tokens, name, context)
+        content.append(next_chunk)
+
+
+def parse_for(tokens, name, context):
+    first, pos = tokens[0]
+    tokens = tokens[1:]
+    context = ('for',) + context
+    content = []
+    assert first.startswith('for ')
+    if first.endswith(':'):
+        first = first[:-1]
+    first = first[3:].strip()
+    match = in_re.search(first)
+    if not match:
+        raise TemplateError(
+            'Bad for (no "in") in %r' % first,
+            position=pos, name=name)
+    vars = first[:match.start()]
+    if '(' in vars:
+        raise TemplateError(
+            'You cannot have () in the variable section of a for loop (%r)'
+            % vars, position=pos, name=name)
+    vars = tuple([
+        v.strip() for v in first[:match.start()].split(',')
+        if v.strip()])
+    expr = first[match.end():]
+    while 1:
+        if not tokens:
+            raise TemplateError(
+                'No {{endfor}}',
+                position=pos, name=name)
+        if (isinstance(tokens[0], tuple) and tokens[0][0] == 'endfor'):
+            return ('for', pos, vars, expr, content), tokens[1:]
+        next_chunk, tokens = parse_expr(tokens, name, context)
+        content.append(next_chunk)
+
+
+def parse_default(tokens, name, context):
+    first, pos = tokens[0]
+    assert first.startswith('default ')
+    first = first.split(None, 1)[1]
+    parts = first.split('=', 1)
+    if len(parts) == 1:
+        raise TemplateError(
+            "Expression must be {{default var=value}}; no = found in %r" %
+            first, position=pos, name=name)
+    var = parts[0].strip()
+    if ',' in var:
+        raise TemplateError(
+            "{{default x, y = ...}} is not supported",
+            position=pos, name=name)
+    if not var_re.search(var):
+        raise TemplateError(
+            "Not a valid variable name for {{default}}: %r"
+            % var, position=pos, name=name)
+    expr = parts[1].strip()
+    return ('default', pos, var, expr), tokens[1:]
+
+
+def parse_inherit(tokens, name, context):
+    first, pos = tokens[0]
+    assert first.startswith('inherit ')
+    expr = first.split(None, 1)[1]
+    return ('inherit', pos, expr), tokens[1:]
+
+
+def parse_def(tokens, name, context):
+    first, start = tokens[0]
+    tokens = tokens[1:]
+    assert first.startswith('def ')
+    first = first.split(None, 1)[1]
+    if first.endswith(':'):
+        first = first[:-1]
+    if '(' not in first:
+        func_name = first
+        sig = ((), None, None, {})
+    elif not first.endswith(')'):
+        raise TemplateError("Function definition doesn't end with ): %s" %
+                            first, position=start, name=name)
+    else:
+        first = first[:-1]
+        func_name, sig_text = first.split('(', 1)
+        sig = parse_signature(sig_text, name, start)
+    context = context + ('def',)
+    content = []
+    while 1:
+        if not tokens:
+            raise TemplateError(
+                'Missing {{enddef}}',
+                position=start, name=name)
+        if (isinstance(tokens[0], tuple) and tokens[0][0] == 'enddef'):
+            return ('def', start, func_name, sig, content), tokens[1:]
+        next_chunk, tokens = parse_expr(tokens, name, context)
+        content.append(next_chunk)
+
+
+def parse_signature(sig_text, name, pos):
+    tokens = tokenize.generate_tokens(StringIO(sig_text).readline)
+    sig_args = []
+    var_arg = None
+    var_kw = None
+    defaults = {}
+
+    def get_token(pos=False):
+        try:
+            tok_type, tok_string, (srow, scol), (erow, ecol), line = next(
+                tokens)
+        except StopIteration:
+            return tokenize.ENDMARKER, ''
+        if pos:
+            return tok_type, tok_string, (srow, scol), (erow, ecol)
+        else:
+            return tok_type, tok_string
+    while 1:
+        var_arg_type = None
+        tok_type, tok_string = get_token()
+        if tok_type == tokenize.ENDMARKER:
+            break
+        if tok_type == tokenize.OP and (
+                tok_string == '*' or tok_string == '**'):
+            var_arg_type = tok_string
+            tok_type, tok_string = get_token()
+        if tok_type != tokenize.NAME:
+            raise TemplateError('Invalid signature: (%s)' % sig_text,
+                                position=pos, name=name)
+        var_name = tok_string
+        tok_type, tok_string = get_token()
+        if tok_type == tokenize.ENDMARKER or (
+                tok_type == tokenize.OP and tok_string == ','):
+            if var_arg_type == '*':
+                var_arg = var_name
+            elif var_arg_type == '**':
+                var_kw = var_name
+            else:
+                sig_args.append(var_name)
+            if tok_type == tokenize.ENDMARKER:
+                break
+            continue
+        if var_arg_type is not None:
+            raise TemplateError('Invalid signature: (%s)' % sig_text,
+                                position=pos, name=name)
+        if tok_type == tokenize.OP and tok_string == '=':
+            nest_type = None
+            unnest_type = None
+            nest_count = 0
+            start_pos = end_pos = None
+            parts = []
+            while 1:
+                tok_type, tok_string, s, e = get_token(True)
+                if start_pos is None:
+                    start_pos = s
+                end_pos = e
+                if tok_type == tokenize.ENDMARKER and nest_count:
+                    raise TemplateError('Invalid signature: (%s)' % sig_text,
+                                        position=pos, name=name)
+                if (not nest_count and
+                    (tok_type == tokenize.ENDMARKER or
+                        (tok_type == tokenize.OP and tok_string == ','))):
+                    default_expr = isolate_expression(
+                        sig_text, start_pos, end_pos)
+                    defaults[var_name] = default_expr
+                    sig_args.append(var_name)
+                    break
+                parts.append((tok_type, tok_string))
+                if nest_count \
+                        and tok_type == tokenize.OP \
+                        and tok_string == nest_type:
+                    nest_count += 1
+                elif nest_count \
+                        and tok_type == tokenize.OP \
+                        and tok_string == unnest_type:
+                    nest_count -= 1
+                    if not nest_count:
+                        nest_type = unnest_type = None
+                elif not nest_count \
+                        and tok_type == tokenize.OP \
+                        and tok_string in ('(', '[', '{'):
+                    nest_type = tok_string
+                    nest_count = 1
+                    unnest_type = {'(': ')', '[': ']', '{': '}'}[nest_type]
+    return sig_args, var_arg, var_kw, defaults
+
+
+def isolate_expression(string, start_pos, end_pos):
+    srow, scol = start_pos
+    srow -= 1
+    erow, ecol = end_pos
+    erow -= 1
+    lines = string.splitlines(True)
+    if srow == erow:
+        return lines[srow][scol:ecol]
+    parts = [lines[srow][scol:]]
+    parts.extend(lines[srow + 1:erow])
+    if erow < len(lines):
+        # It'll sometimes give (end_row_past_finish, 0)
+        parts.append(lines[erow][:ecol])
+    return ''.join(parts)
+
+_fill_command_usage = """\
+%prog [OPTIONS] TEMPLATE arg=value
+
+Use py:arg=value to set a Python value; otherwise all values are
+strings.
+"""
+
+
+def fill_command(args=None):
+    import sys
+    import optparse
+    import pkg_resources
+    import os
+    if args is None:
+        args = sys.argv[1:]
+    dist = pkg_resources.get_distribution('Paste')
+    parser = optparse.OptionParser(
+        version=coerce_text(dist),
+        usage=_fill_command_usage)
+    parser.add_option(
+        '-o', '--output',
+        dest='output',
+        metavar="FILENAME",
+        help="File to write output to (default stdout)")
+    parser.add_option(
+        '--html',
+        dest='use_html',
+        action='store_true',
+        help="Use HTML style filling (including automatic HTML quoting)")
+    parser.add_option(
+        '--env',
+        dest='use_env',
+        action='store_true',
+        help="Put the environment in as top-level variables")
+    options, args = parser.parse_args(args)
+    if len(args) < 1:
+        print('You must give a template filename')
+        sys.exit(2)
+    template_name = args[0]
+    args = args[1:]
+    vars = {}
+    if options.use_env:
+        vars.update(os.environ)
+    for value in args:
+        if '=' not in value:
+            print('Bad argument: %r' % value)
+            sys.exit(2)
+        name, value = value.split('=', 1)
+        if name.startswith('py:'):
+            name = name[:3]
+            value = eval(value)
+        vars[name] = value
+    if template_name == '-':
+        template_content = sys.stdin.read()
+        template_name = '<stdin>'
+    else:
+        f = open(template_name, 'rb')
+        template_content = f.read()
+        f.close()
+    if options.use_html:
+        TemplateClass = HTMLTemplate
+    else:
+        TemplateClass = Template
+    template = TemplateClass(template_content, name=template_name)
+    result = template.substitute(vars)
+    if options.output:
+        f = open(options.output, 'wb')
+        f.write(result)
+        f.close()
+    else:
+        sys.stdout.write(result)
+
+if __name__ == '__main__':
+    fill_command()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/externals/tempita/_looper.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/externals/tempita/_looper.py
new file mode 100644
index 0000000..4413a5b
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/externals/tempita/_looper.py
@@ -0,0 +1,163 @@
+"""
+Helper for looping over sequences, particular in templates.
+
+Often in a loop in a template it's handy to know what's next up,
+previously up, if this is the first or last item in the sequence, etc.
+These can be awkward to manage in a normal Python loop, but using the
+looper you can get a better sense of the context.  Use like::
+
+    >>> for loop, item in looper(['a', 'b', 'c']):
+    ...     print loop.number, item
+    ...     if not loop.last:
+    ...         print '---'
+    1 a
+    ---
+    2 b
+    ---
+    3 c
+
+"""
+
+import sys
+from .compat3 import basestring_
+
+__all__ = ['looper']
+
+
+class looper(object):
+    """
+    Helper for looping (particularly in templates)
+
+    Use this like::
+
+        for loop, item in looper(seq):
+            if loop.first:
+                ...
+    """
+
+    def __init__(self, seq):
+        self.seq = seq
+
+    def __iter__(self):
+        return looper_iter(self.seq)
+
+    def __repr__(self):
+        return '<%s for %r>' % (
+            self.__class__.__name__, self.seq)
+
+
+class looper_iter(object):
+
+    def __init__(self, seq):
+        self.seq = list(seq)
+        self.pos = 0
+
+    def __iter__(self):
+        return self
+
+    def __next__(self):
+        if self.pos >= len(self.seq):
+            raise StopIteration
+        result = loop_pos(self.seq, self.pos), self.seq[self.pos]
+        self.pos += 1
+        return result
+
+    if sys.version < "3":
+        next = __next__
+
+
+class loop_pos(object):
+
+    def __init__(self, seq, pos):
+        self.seq = seq
+        self.pos = pos
+
+    def __repr__(self):
+        return '<loop pos=%r at %r>' % (
+            self.seq[self.pos], self.pos)
+
+    def index(self):
+        return self.pos
+    index = property(index)
+
+    def number(self):
+        return self.pos + 1
+    number = property(number)
+
+    def item(self):
+        return self.seq[self.pos]
+    item = property(item)
+
+    def __next__(self):
+        try:
+            return self.seq[self.pos + 1]
+        except IndexError:
+            return None
+    __next__ = property(__next__)
+
+    if sys.version < "3":
+        next = __next__
+
+    def previous(self):
+        if self.pos == 0:
+            return None
+        return self.seq[self.pos - 1]
+    previous = property(previous)
+
+    def odd(self):
+        return not self.pos % 2
+    odd = property(odd)
+
+    def even(self):
+        return self.pos % 2
+    even = property(even)
+
+    def first(self):
+        return self.pos == 0
+    first = property(first)
+
+    def last(self):
+        return self.pos == len(self.seq) - 1
+    last = property(last)
+
+    def length(self):
+        return len(self.seq)
+    length = property(length)
+
+    def first_group(self, getter=None):
+        """
+        Returns true if this item is the start of a new group,
+        where groups mean that some attribute has changed.  The getter
+        can be None (the item itself changes), an attribute name like
+        ``'.attr'``, a function, or a dict key or list index.
+        """
+        if self.first:
+            return True
+        return self._compare_group(self.item, self.previous, getter)
+
+    def last_group(self, getter=None):
+        """
+        Returns true if this item is the end of a new group,
+        where groups mean that some attribute has changed.  The getter
+        can be None (the item itself changes), an attribute name like
+        ``'.attr'``, a function, or a dict key or list index.
+        """
+        if self.last:
+            return True
+        return self._compare_group(self.item, self.__next__, getter)
+
+    def _compare_group(self, item, other, getter):
+        if getter is None:
+            return item != other
+        elif (isinstance(getter, basestring_)
+              and getter.startswith('.')):
+            getter = getter[1:]
+            if getter.endswith('()'):
+                getter = getter[:-2]
+                return getattr(item, getter)() != getattr(other, getter)()
+            else:
+                return getattr(item, getter) != getattr(other, getter)
+        elif hasattr(getter, '__call__'):
+            return getter(item) != getter(other)
+        else:
+            return item[getter] != other[getter]
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/externals/tempita/compat3.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/externals/tempita/compat3.py
new file mode 100644
index 0000000..d49412b
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/externals/tempita/compat3.py
@@ -0,0 +1,45 @@
+import sys
+
+__all__ = ['PY3', 'b', 'basestring_', 'bytes', 'next', 'is_unicode']
+
+PY3 = True if sys.version_info[0] == 3 else False
+
+if sys.version_info[0] < 3:
+    b = bytes = str
+    basestring_ = basestring
+else:
+
+    def b(s):
+        if isinstance(s, str):
+            return s.encode('latin1')
+        return bytes(s)
+    basestring_ = (bytes, str)
+    bytes = bytes
+text = str
+
+if sys.version_info[0] < 3:
+
+    def next(obj):
+        return obj.next()
+else:
+    next = next
+
+
+def is_unicode(obj):
+    if sys.version_info[0] < 3:
+        return isinstance(obj, unicode)
+    else:
+        return isinstance(obj, str)
+
+
+def coerce_text(v):
+    if not isinstance(v, basestring_):
+        if sys.version_info[0] < 3:
+            attr = '__unicode__'
+        else:
+            attr = '__str__'
+        if hasattr(v, attr):
+            return unicode(v)
+        else:
+            return bytes(v)
+    return v
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/filter.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/filter.py
new file mode 100644
index 0000000..8e881d8
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/filter.py
@@ -0,0 +1,1571 @@
+"""IIR and FIR filtering functions"""
+
+from .externals.six import string_types, integer_types
+import warnings
+import numpy as np
+from scipy.fftpack import fft, ifftshift, fftfreq
+from copy import deepcopy
+
+from .fixes import get_firwin2, get_filtfilt
+from .time_frequency.multitaper import dpss_windows, _mt_spectra
+from .parallel import parallel_func, check_n_jobs
+from .cuda import (setup_cuda_fft_multiply_repeated, fft_multiply_repeated,
+                   setup_cuda_fft_resample, fft_resample, _smart_pad)
+from .utils import logger, verbose, sum_squared, check_version
+
+
+def is_power2(num):
+    """Test if number is a power of 2
+
+    Parameters
+    ----------
+    num : int
+        Number.
+
+    Returns
+    -------
+    b : bool
+        True if is power of 2.
+
+    Examples
+    --------
+    >>> is_power2(2 ** 3)
+    True
+    >>> is_power2(5)
+    False
+    """
+    num = int(num)
+    return num != 0 and ((num & (num - 1)) == 0)
+
+
+def _overlap_add_filter(x, h, n_fft=None, zero_phase=True, picks=None,
+                        n_jobs=1):
+    """ Filter using overlap-add FFTs.
+
+    Filters the signal x using a filter with the impulse response h.
+    If zero_phase==True, the the filter is applied twice, once in the forward
+    direction and once backward , resulting in a zero-phase filter.
+
+    .. warning:: This operates on the data in-place.
+
+    Parameters
+    ----------
+    x : 2d array
+        Signal to filter.
+    h : 1d array
+        Filter impulse response (FIR filter coefficients).
+    n_fft : int
+        Length of the FFT. If None, the best size is determined automatically.
+    zero_phase : bool
+        If True: the filter is applied in forward and backward direction,
+        resulting in a zero-phase filter.
+    picks : array-like of int | None
+        Indices to filter. If None all indices will be filtered.
+    n_jobs : int | str
+        Number of jobs to run in parallel. Can be 'cuda' if scikits.cuda
+        is installed properly and CUDA is initialized.
+
+    Returns
+    -------
+    xf : 2d array
+        x filtered.
+    """
+    if picks is None:
+        picks = np.arange(x.shape[0])
+
+    # Extend the signal by mirroring the edges to reduce transient filter
+    # response
+    n_h = len(h)
+    if n_h == 1:
+        return x * h ** 2 if zero_phase else x * h
+    if x.shape[1] < len(h):
+        raise ValueError('Overlap add should only be used for signals '
+                         'longer than the requested filter')
+    n_edge = max(min(n_h, x.shape[1]) - 1, 0)
+
+    n_x = x.shape[1] + 2 * n_edge
+
+    # Determine FFT length to use
+    if n_fft is None:
+        min_fft = 2 * n_h - 1
+        max_fft = n_x
+        if max_fft >= min_fft:
+            n_tot = 2 * n_x if zero_phase else n_x
+
+            # cost function based on number of multiplications
+            N = 2 ** np.arange(np.ceil(np.log2(min_fft)),
+                               np.ceil(np.log2(max_fft)) + 1, dtype=int)
+            # if doing zero-phase, h needs to be thought of as ~ twice as long
+            n_h_cost = 2 * n_h - 1 if zero_phase else n_h
+            cost = (np.ceil(n_tot / (N - n_h_cost + 1).astype(np.float)) *
+                    N * (np.log2(N) + 1))
+
+            # add a heuristic term to prevent too-long FFT's which are slow
+            # (not predicted by mult. cost alone, 4e-5 exp. determined)
+            cost += 4e-5 * N * n_tot
+
+            n_fft = N[np.argmin(cost)]
+        else:
+            # Use only a single block
+            n_fft = 2 ** int(np.ceil(np.log2(n_x + n_h - 1)))
+
+    if zero_phase and n_fft <= 2 * n_h - 1:
+        raise ValueError("n_fft is too short, has to be at least "
+                         "2 * len(h) - 1 if zero_phase == True")
+    elif not zero_phase and n_fft <= n_h:
+        raise ValueError("n_fft is too short, has to be at least "
+                         "len(h) if zero_phase == False")
+
+    if not is_power2(n_fft):
+        warnings.warn("FFT length is not a power of 2. Can be slower.")
+
+    # Filter in frequency domain
+    h_fft = fft(np.concatenate([h, np.zeros(n_fft - n_h, dtype=h.dtype)]))
+    assert(len(h_fft) == n_fft)
+
+    if zero_phase:
+        """Zero-phase filtering is now done in one pass by taking the squared
+        magnitude of h_fft. This gives equivalent results to the old two-pass
+        method but theoretically doubles the speed for long fft lengths. To
+        compensate for this, overlapping must be done both before and after
+        each segment. When zero_phase == False it only needs to be done after.
+        """
+        h_fft = (h_fft * h_fft.conj()).real
+        # equivalent to convolving h(t) and h(-t) in the time domain
+
+    # Figure out if we should use CUDA
+    n_jobs, cuda_dict, h_fft = setup_cuda_fft_multiply_repeated(n_jobs, h_fft)
+
+    # Process each row separately
+    if n_jobs == 1:
+        for p in picks:
+            x[p] = _1d_overlap_filter(x[p], h_fft, n_h, n_edge, zero_phase,
+                                      cuda_dict)
+    else:
+        parallel, p_fun, _ = parallel_func(_1d_overlap_filter, n_jobs)
+        data_new = parallel(p_fun(x[p], h_fft, n_h, n_edge, zero_phase,
+                                  cuda_dict)
+                            for p in picks)
+        for pp, p in enumerate(picks):
+            x[p] = data_new[pp]
+
+    return x
+
+
+def _1d_overlap_filter(x, h_fft, n_h, n_edge, zero_phase, cuda_dict):
+    """Do one-dimensional overlap-add FFT FIR filtering"""
+    # pad to reduce ringing
+    if cuda_dict['use_cuda']:
+        n_fft = cuda_dict['x'].size  # account for CUDA's modification of h_fft
+    else:
+        n_fft = len(h_fft)
+    x_ext = _smart_pad(x, n_edge)
+    n_x = len(x_ext)
+    x_filtered = np.zeros_like(x_ext)
+
+    if zero_phase:
+        # Segment length for signal x (convolving twice)
+        n_seg = n_fft - 2 * (n_h - 1) - 1
+
+        # Number of segments (including fractional segments)
+        n_segments = int(np.ceil(n_x / float(n_seg)))
+
+        # padding parameters to ensure filtering is done properly
+        pre_pad = n_h - 1
+        post_pad = n_fft - (n_h - 1)
+    else:
+        n_seg = n_fft - n_h + 1
+        n_segments = int(np.ceil(n_x / float(n_seg)))
+        pre_pad = 0
+        post_pad = n_fft
+
+    # Now the actual filtering step is identical for zero-phase (filtfilt-like)
+    # or single-pass
+    for seg_idx in range(n_segments):
+        start = seg_idx * n_seg
+        stop = (seg_idx + 1) * n_seg
+        seg = x_ext[start:stop]
+        seg = np.concatenate([np.zeros(pre_pad), seg,
+                              np.zeros(post_pad - len(seg))])
+
+        prod = fft_multiply_repeated(h_fft, seg, cuda_dict)
+
+        start_filt = max(0, start - pre_pad)
+        stop_filt = min(start - pre_pad + n_fft, n_x)
+        start_prod = max(0, pre_pad - start)
+        stop_prod = start_prod + stop_filt - start_filt
+        x_filtered[start_filt:stop_filt] += prod[start_prod:stop_prod]
+
+    # Remove mirrored edges that we added and cast
+    if n_edge > 0:
+        x_filtered = x_filtered[n_edge:-n_edge]
+    x_filtered = x_filtered.astype(x.dtype)
+    return x_filtered
+
+
+def _filter_attenuation(h, freq, gain):
+    """Compute minimum attenuation at stop frequency"""
+    from scipy.signal import freqz
+    _, filt_resp = freqz(h.ravel(), worN=np.pi * freq)
+    filt_resp = np.abs(filt_resp)  # use amplitude response
+    filt_resp /= np.max(filt_resp)
+    filt_resp[np.where(gain == 1)] = 0
+    idx = np.argmax(filt_resp)
+    att_db = -20 * np.log10(filt_resp[idx])
+    att_freq = freq[idx]
+    return att_db, att_freq
+
+
+def _1d_fftmult_ext(x, B, extend_x, cuda_dict):
+    """Helper to parallelize FFT FIR, with extension if necessary"""
+    # extend, if necessary
+    if extend_x is True:
+        x = np.r_[x, x[-1]]
+
+    # do Fourier transforms
+    xf = fft_multiply_repeated(B, x, cuda_dict)
+
+    # put back to original size and type
+    if extend_x is True:
+        xf = xf[:-1]
+
+    xf = xf.astype(x.dtype)
+    return xf
+
+
+def _prep_for_filtering(x, copy, picks=None):
+    """Set up array as 2D for filtering ease"""
+    if x.dtype != np.float64:
+        raise TypeError("Arrays passed for filtering must have a dtype of "
+                        "np.float64")
+    if copy is True:
+        x = x.copy()
+    orig_shape = x.shape
+    x = np.atleast_2d(x)
+    x.shape = (np.prod(x.shape[:-1]), x.shape[-1])
+    if picks is None:
+        picks = np.arange(x.shape[0])
+    elif len(orig_shape) == 3:
+        n_epochs, n_channels, n_times = orig_shape
+        offset = np.repeat(np.arange(0, n_channels * n_epochs, n_channels),
+                           len(picks))
+        picks = np.tile(picks, n_epochs) + offset
+    elif len(orig_shape) > 3:
+        raise ValueError('picks argument is not supported for data with more'
+                         ' than three dimensions')
+
+    return x, orig_shape, picks
+
+
+def _filter(x, Fs, freq, gain, filter_length='10s', picks=None, n_jobs=1,
+            copy=True):
+    """Filter signal using gain control points in the frequency domain.
+
+    The filter impulse response is constructed from a Hamming window (window
+    used in "firwin2" function) to avoid ripples in the frequency response
+    (windowing is a smoothing in frequency domain). The filter is zero-phase.
+
+    If x is multi-dimensional, this operates along the last dimension.
+
+    Parameters
+    ----------
+    x : array
+        Signal to filter.
+    Fs : float
+        Sampling rate in Hz.
+    freq : 1d array
+        Frequency sampling points in Hz.
+    gain : 1d array
+        Filter gain at frequency sampling points.
+    filter_length : str (Default: '10s') | int | None
+        Length of the filter to use. If None or "len(x) < filter_length",
+        the filter length used is len(x). Otherwise, if int, overlap-add
+        filtering with a filter of the specified length in samples) is
+        used (faster for long signals). If str, a human-readable time in
+        units of "s" or "ms" (e.g., "10s" or "5500ms") will be converted
+        to the shortest power-of-two length at least that duration.
+    picks : array-like of int | None
+        Indices to filter. If None all indices will be filtered.
+    n_jobs : int | str
+        Number of jobs to run in parallel. Can be 'cuda' if scikits.cuda
+        is installed properly and CUDA is initialized.
+    copy : bool
+        If True, a copy of x, filtered, is returned. Otherwise, it operates
+        on x in place.
+
+    Returns
+    -------
+    xf : array
+        x filtered.
+    """
+    firwin2 = get_firwin2()
+    # set up array for filtering, reshape to 2D, operate on last axis
+    x, orig_shape, picks = _prep_for_filtering(x, copy, picks)
+
+    # issue a warning if attenuation is less than this
+    min_att_db = 20
+
+    # normalize frequencies
+    freq = np.array(freq) / (Fs / 2.)
+    gain = np.array(gain)
+    filter_length = _get_filter_length(filter_length, Fs, len_x=x.shape[1])
+    n_jobs = check_n_jobs(n_jobs, allow_cuda=True)
+
+    if filter_length is None or x.shape[1] <= filter_length:
+        # Use direct FFT filtering for short signals
+
+        Norig = x.shape[1]
+
+        extend_x = False
+        if (gain[-1] == 0.0 and Norig % 2 == 1) \
+                or (gain[-1] == 1.0 and Norig % 2 != 1):
+            # Gain at Nyquist freq: 1: make x EVEN, 0: make x ODD
+            extend_x = True
+
+        N = x.shape[1] + (extend_x is True)
+
+        h = firwin2(N, freq, gain)[np.newaxis, :]
+
+        att_db, att_freq = _filter_attenuation(h, freq, gain)
+        if att_db < min_att_db:
+            att_freq *= Fs / 2
+            warnings.warn('Attenuation at stop frequency %0.1fHz is only '
+                          '%0.1fdB.' % (att_freq, att_db))
+
+        # Make zero-phase filter function
+        B = np.abs(fft(h)).ravel()
+
+        # Figure out if we should use CUDA
+        n_jobs, cuda_dict, B = setup_cuda_fft_multiply_repeated(n_jobs, B)
+
+        if n_jobs == 1:
+            for p in picks:
+                x[p] = _1d_fftmult_ext(x[p], B, extend_x, cuda_dict)
+        else:
+            parallel, p_fun, _ = parallel_func(_1d_fftmult_ext, n_jobs)
+            data_new = parallel(p_fun(x[p], B, extend_x, cuda_dict)
+                                for p in picks)
+            for pp, p in enumerate(picks):
+                x[p] = data_new[pp]
+    else:
+        # Use overlap-add filter with a fixed length
+        N = filter_length
+
+        if (gain[-1] == 0.0 and N % 2 == 1) \
+                or (gain[-1] == 1.0 and N % 2 != 1):
+            # Gain at Nyquist freq: 1: make N EVEN, 0: make N ODD
+            N += 1
+
+        # construct filter with gain resulting from forward-backward filtering
+        h = firwin2(N, freq, gain, window='hann')
+
+        att_db, att_freq = _filter_attenuation(h, freq, gain)
+        att_db += 6  # the filter is applied twice (zero phase)
+        if att_db < min_att_db:
+            att_freq *= Fs / 2
+            warnings.warn('Attenuation at stop frequency %0.1fHz is only '
+                          '%0.1fdB. Increase filter_length for higher '
+                          'attenuation.' % (att_freq, att_db))
+
+        # reconstruct filter, this time with appropriate gain for fwd-bkwd
+        gain = np.sqrt(gain)
+        h = firwin2(N, freq, gain, window='hann')
+        x = _overlap_add_filter(x, h, zero_phase=True, picks=picks,
+                                n_jobs=n_jobs)
+
+    x.shape = orig_shape
+    return x
+
+
+def _check_coefficients(b, a):
+    """Check for filter stability"""
+    from scipy.signal import tf2zpk
+    z, p, k = tf2zpk(b, a)
+    if np.any(np.abs(p) > 1.0):
+        raise RuntimeError('Filter poles outside unit circle, filter will be '
+                           'unstable. Consider using different filter '
+                           'coefficients.')
+
+
+def _filtfilt(x, b, a, padlen, picks, n_jobs, copy):
+    """Helper to more easily call filtfilt"""
+    # set up array for filtering, reshape to 2D, operate on last axis
+    filtfilt = get_filtfilt()
+    n_jobs = check_n_jobs(n_jobs)
+    x, orig_shape, picks = _prep_for_filtering(x, copy, picks)
+    _check_coefficients(b, a)
+    if n_jobs == 1:
+        for p in picks:
+            x[p] = filtfilt(b, a, x[p], padlen=padlen)
+    else:
+        parallel, p_fun, _ = parallel_func(filtfilt, n_jobs)
+        data_new = parallel(p_fun(b, a, x[p], padlen=padlen)
+                            for p in picks)
+        for pp, p in enumerate(picks):
+            x[p] = data_new[pp]
+    x.shape = orig_shape
+    return x
+
+
+def _estimate_ringing_samples(b, a):
+    """Helper function for determining IIR padding"""
+    from scipy.signal import lfilter
+    x = np.zeros(1000)
+    x[0] = 1
+    h = lfilter(b, a, x)
+    return np.where(np.abs(h) > 0.001 * np.max(np.abs(h)))[0][-1]
+
+
+def construct_iir_filter(iir_params=dict(b=[1, 0], a=[1, 0], padlen=0),
+                         f_pass=None, f_stop=None, sfreq=None, btype=None,
+                         return_copy=True):
+    """Use IIR parameters to get filtering coefficients
+
+    This function works like a wrapper for iirdesign and iirfilter in
+    scipy.signal to make filter coefficients for IIR filtering. It also
+    estimates the number of padding samples based on the filter ringing.
+    It creates a new iir_params dict (or updates the one passed to the
+    function) with the filter coefficients ('b' and 'a') and an estimate
+    of the padding necessary ('padlen') so IIR filtering can be performed.
+
+    Parameters
+    ----------
+    iir_params : dict
+        Dictionary of parameters to use for IIR filtering.
+        If iir_params['b'] and iir_params['a'] exist, these will be used
+        as coefficients to perform IIR filtering. Otherwise, if
+        iir_params['order'] and iir_params['ftype'] exist, these will be
+        used with scipy.signal.iirfilter to make a filter. Otherwise, if
+        iir_params['gpass'] and iir_params['gstop'] exist, these will be
+        used with scipy.signal.iirdesign to design a filter.
+        iir_params['padlen'] defines the number of samples to pad (and
+        an estimate will be calculated if it is not given). See Notes for
+        more details.
+    f_pass : float or list of float
+        Frequency for the pass-band. Low-pass and high-pass filters should
+        be a float, band-pass should be a 2-element list of float.
+    f_stop : float or list of float
+        Stop-band frequency (same size as f_pass). Not used if 'order' is
+        specified in iir_params.
+    sfreq : float | None
+        The sample rate.
+    btype : str
+        Type of filter. Should be 'lowpass', 'highpass', or 'bandpass'
+        (or analogous string representations known to scipy.signal).
+    return_copy : bool
+        If False, the 'b', 'a', and 'padlen' entries in iir_params will be
+        set inplace (if they weren't already). Otherwise, a new iir_params
+        instance will be created and returned with these entries.
+
+    Returns
+    -------
+    iir_params : dict
+        Updated iir_params dict, with the entries (set only if they didn't
+        exist before) for 'b', 'a', and 'padlen' for IIR filtering.
+
+    Notes
+    -----
+    This function triages calls to scipy.signal.iirfilter and iirdesign
+    based on the input arguments (see descriptions of these functions
+    and scipy's scipy.signal.filter_design documentation for details).
+
+    Examples
+    --------
+    iir_params can have several forms. Consider constructing a low-pass
+    filter at 40 Hz with 1000 Hz sampling rate.
+
+    In the most basic (2-parameter) form of iir_params, the order of the
+    filter 'N' and the type of filtering 'ftype' are specified. To get
+    coefficients for a 4th-order Butterworth filter, this would be:
+
+    >>> iir_params = dict(order=4, ftype='butter')
+    >>> iir_params = construct_iir_filter(iir_params, 40, None, 1000, 'low', return_copy=False)
+    >>> print((len(iir_params['b']), len(iir_params['a']), iir_params['padlen']))
+    (5, 5, 82)
+
+    Filters can also be constructed using filter design methods. To get a
+    40 Hz Chebyshev type 1 lowpass with specific gain characteristics in the
+    pass and stop bands (assuming the desired stop band is at 45 Hz), this
+    would be a filter with much longer ringing:
+
+    >>> iir_params = dict(ftype='cheby1', gpass=3, gstop=20)
+    >>> iir_params = construct_iir_filter(iir_params, 40, 50, 1000, 'low')
+    >>> print((len(iir_params['b']), len(iir_params['a']), iir_params['padlen']))
+    (6, 6, 439)
+
+    Padding and/or filter coefficients can also be manually specified. For
+    a 10-sample moving window with no padding during filtering, for example,
+    one can just do:
+
+    >>> iir_params = dict(b=np.ones((10)), a=[1, 0], padlen=0)
+    >>> iir_params = construct_iir_filter(iir_params, return_copy=False)
+    >>> print((iir_params['b'], iir_params['a'], iir_params['padlen']))
+    (array([ 1.,  1.,  1.,  1.,  1.,  1.,  1.,  1.,  1.,  1.]), [1, 0], 0)
+
+    """  # noqa
+    from scipy.signal import iirfilter, iirdesign
+    known_filters = ('bessel', 'butter', 'butterworth', 'cauer', 'cheby1',
+                     'cheby2', 'chebyshev1', 'chebyshev2', 'chebyshevi',
+                     'chebyshevii', 'ellip', 'elliptic')
+    a = None
+    b = None
+    # if the filter has been designed, we're good to go
+    if 'a' in iir_params and 'b' in iir_params:
+        [b, a] = [iir_params['b'], iir_params['a']]
+    else:
+        # ensure we have a valid ftype
+        if 'ftype' not in iir_params:
+            raise RuntimeError('ftype must be an entry in iir_params if ''b'' '
+                               'and ''a'' are not specified')
+        ftype = iir_params['ftype']
+        if ftype not in known_filters:
+            raise RuntimeError('ftype must be in filter_dict from '
+                               'scipy.signal (e.g., butter, cheby1, etc.) not '
+                               '%s' % ftype)
+
+        # use order-based design
+        Wp = np.asanyarray(f_pass) / (float(sfreq) / 2)
+        if 'order' in iir_params:
+            [b, a] = iirfilter(iir_params['order'], Wp, btype=btype,
+                               ftype=ftype)
+        else:
+            # use gpass / gstop design
+            Ws = np.asanyarray(f_stop) / (float(sfreq) / 2)
+            if 'gpass' not in iir_params or 'gstop' not in iir_params:
+                raise ValueError('iir_params must have at least ''gstop'' and'
+                                 ' ''gpass'' (or ''N'') entries')
+            [b, a] = iirdesign(Wp, Ws, iir_params['gpass'],
+                               iir_params['gstop'], ftype=ftype)
+
+    if a is None or b is None:
+        raise RuntimeError('coefficients could not be created from iir_params')
+
+    # now deal with padding
+    if 'padlen' not in iir_params:
+        padlen = _estimate_ringing_samples(b, a)
+    else:
+        padlen = iir_params['padlen']
+
+    if return_copy:
+        iir_params = deepcopy(iir_params)
+
+    iir_params.update(dict(b=b, a=a, padlen=padlen))
+    return iir_params
+
+
+def _check_method(method, iir_params, extra_types):
+    """Helper to parse method arguments"""
+    allowed_types = ['iir', 'fft'] + extra_types
+    if not isinstance(method, string_types):
+        raise TypeError('method must be a string')
+    if method not in allowed_types:
+        raise ValueError('method must be one of %s, not "%s"'
+                         % (allowed_types, method))
+    if method == 'iir':
+        if iir_params is None:
+            iir_params = dict(order=4, ftype='butter')
+        if not isinstance(iir_params, dict):
+            raise ValueError('iir_params must be a dict')
+    elif iir_params is not None:
+        raise ValueError('iir_params must be None if method != "iir"')
+    method = method.lower()
+    return iir_params
+
+
+ at verbose
+def band_pass_filter(x, Fs, Fp1, Fp2, filter_length='10s',
+                     l_trans_bandwidth=0.5, h_trans_bandwidth=0.5,
+                     method='fft', iir_params=None,
+                     picks=None, n_jobs=1, copy=True, verbose=None):
+    """Bandpass filter for the signal x.
+
+    Applies a zero-phase bandpass filter to the signal x, operating on the
+    last dimension.
+
+    Parameters
+    ----------
+    x : array
+        Signal to filter.
+    Fs : float
+        Sampling rate in Hz.
+    Fp1 : float
+        Low cut-off frequency in Hz.
+    Fp2 : float
+        High cut-off frequency in Hz.
+    filter_length : str (Default: '10s') | int | None
+        Length of the filter to use. If None or "len(x) < filter_length",
+        the filter length used is len(x). Otherwise, if int, overlap-add
+        filtering with a filter of the specified length in samples) is
+        used (faster for long signals). If str, a human-readable time in
+        units of "s" or "ms" (e.g., "10s" or "5500ms") will be converted
+        to the shortest power-of-two length at least that duration.
+        Not used for 'iir' filters.
+    l_trans_bandwidth : float
+        Width of the transition band at the low cut-off frequency in Hz.
+        Not used if 'order' is specified in iir_params.
+    h_trans_bandwidth : float
+        Width of the transition band at the high cut-off frequency in Hz.
+        Not used if 'order' is specified in iir_params.
+    method : str
+        'fft' will use overlap-add FIR filtering, 'iir' will use IIR
+        forward-backward filtering (via filtfilt).
+    iir_params : dict | None
+        Dictionary of parameters to use for IIR filtering.
+        See mne.filter.construct_iir_filter for details. If iir_params
+        is None and method="iir", 4th order Butterworth will be used.
+    picks : array-like of int | None
+        Indices to filter. If None all indices will be filtered.
+    n_jobs : int | str
+        Number of jobs to run in parallel. Can be 'cuda' if scikits.cuda
+        is installed properly, CUDA is initialized, and method='fft'.
+    copy : bool
+        If True, a copy of x, filtered, is returned. Otherwise, it operates
+        on x in place.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    xf : array
+        x filtered.
+
+    See Also
+    --------
+    low_pass_filter, high_pass_filter
+
+    Notes
+    -----
+    The frequency response is (approximately) given by::
+
+                     ----------
+                   /|         | \
+                  / |         |  \
+                 /  |         |   \
+                /   |         |    \
+      ----------    |         |     -----------------
+                    |         |
+              Fs1  Fp1       Fp2   Fs2
+
+    Where:
+
+        Fs1 = Fp1 - l_trans_bandwidth in Hz
+        Fs2 = Fp2 + h_trans_bandwidth in Hz
+    """
+    iir_params = _check_method(method, iir_params, [])
+
+    Fs = float(Fs)
+    Fp1 = float(Fp1)
+    Fp2 = float(Fp2)
+    Fs1 = Fp1 - l_trans_bandwidth if method == 'fft' else Fp1
+    Fs2 = Fp2 + h_trans_bandwidth if method == 'fft' else Fp2
+    if Fs2 > Fs / 2:
+        raise ValueError('Effective band-stop frequency (%s) is too high '
+                         '(maximum based on Nyquist is %s)' % (Fs2, Fs / 2.))
+
+    if Fs1 <= 0:
+        raise ValueError('Filter specification invalid: Lower stop frequency '
+                         'too low (%0.1fHz). Increase Fp1 or reduce '
+                         'transition bandwidth (l_trans_bandwidth)' % Fs1)
+
+    if method == 'fft':
+        freq = [0, Fs1, Fp1, Fp2, Fs2, Fs / 2]
+        gain = [0, 0, 1, 1, 0, 0]
+        xf = _filter(x, Fs, freq, gain, filter_length, picks, n_jobs, copy)
+    else:
+        iir_params = construct_iir_filter(iir_params, [Fp1, Fp2],
+                                          [Fs1, Fs2], Fs, 'bandpass')
+        padlen = min(iir_params['padlen'], len(x))
+        xf = _filtfilt(x, iir_params['b'], iir_params['a'], padlen,
+                       picks, n_jobs, copy)
+
+    return xf
+
+
+ at verbose
+def band_stop_filter(x, Fs, Fp1, Fp2, filter_length='10s',
+                     l_trans_bandwidth=0.5, h_trans_bandwidth=0.5,
+                     method='fft', iir_params=None,
+                     picks=None, n_jobs=1, copy=True, verbose=None):
+    """Bandstop filter for the signal x.
+
+    Applies a zero-phase bandstop filter to the signal x, operating on the
+    last dimension.
+
+    Parameters
+    ----------
+    x : array
+        Signal to filter.
+    Fs : float
+        Sampling rate in Hz.
+    Fp1 : float | array of float
+        Low cut-off frequency in Hz.
+    Fp2 : float | array of float
+        High cut-off frequency in Hz.
+    filter_length : str (Default: '10s') | int | None
+        Length of the filter to use. If None or "len(x) < filter_length",
+        the filter length used is len(x). Otherwise, if int, overlap-add
+        filtering with a filter of the specified length in samples) is
+        used (faster for long signals). If str, a human-readable time in
+        units of "s" or "ms" (e.g., "10s" or "5500ms") will be converted
+        to the shortest power-of-two length at least that duration.
+        Not used for 'iir' filters.
+    l_trans_bandwidth : float
+        Width of the transition band at the low cut-off frequency in Hz.
+        Not used if 'order' is specified in iir_params.
+    h_trans_bandwidth : float
+        Width of the transition band at the high cut-off frequency in Hz.
+        Not used if 'order' is specified in iir_params.
+    method : str
+        'fft' will use overlap-add FIR filtering, 'iir' will use IIR
+        forward-backward filtering (via filtfilt).
+    iir_params : dict | None
+        Dictionary of parameters to use for IIR filtering.
+        See mne.filter.construct_iir_filter for details. If iir_params
+        is None and method="iir", 4th order Butterworth will be used.
+    picks : array-like of int | None
+        Indices to filter. If None all indices will be filtered.
+    n_jobs : int | str
+        Number of jobs to run in parallel. Can be 'cuda' if scikits.cuda
+        is installed properly, CUDA is initialized, and method='fft'.
+    copy : bool
+        If True, a copy of x, filtered, is returned. Otherwise, it operates
+        on x in place.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    xf : array
+        x filtered.
+
+    Notes
+    -----
+    The frequency response is (approximately) given by::
+
+      ----------                   ----------
+               |\                 /|
+               | \               / |
+               |  \             /  |
+               |   \           /   |
+               |    -----------    |
+               |    |         |    |
+              Fp1  Fs1       Fs2  Fp2
+
+    Where:
+
+        Fs1 = Fp1 + l_trans_bandwidth in Hz
+        Fs2 = Fp2 - h_trans_bandwidth in Hz
+
+    Note that multiple stop bands can be specified using arrays.
+    """
+    iir_params = _check_method(method, iir_params, [])
+
+    Fp1 = np.atleast_1d(Fp1)
+    Fp2 = np.atleast_1d(Fp2)
+    if not len(Fp1) == len(Fp2):
+        raise ValueError('Fp1 and Fp2 must be the same length')
+
+    Fs = float(Fs)
+    Fp1 = Fp1.astype(float)
+    Fp2 = Fp2.astype(float)
+    Fs1 = Fp1 + l_trans_bandwidth if method == 'fft' else Fp1
+    Fs2 = Fp2 - h_trans_bandwidth if method == 'fft' else Fp2
+
+    if np.any(Fs1 <= 0):
+        raise ValueError('Filter specification invalid: Lower stop frequency '
+                         'too low (%0.1fHz). Increase Fp1 or reduce '
+                         'transition bandwidth (l_trans_bandwidth)' % Fs1)
+
+    if method == 'fft':
+        freq = np.r_[0, Fp1, Fs1, Fs2, Fp2, Fs / 2]
+        gain = np.r_[1, np.ones_like(Fp1), np.zeros_like(Fs1),
+                     np.zeros_like(Fs2), np.ones_like(Fp2), 1]
+        order = np.argsort(freq)
+        freq = freq[order]
+        gain = gain[order]
+        if np.any(np.abs(np.diff(gain, 2)) > 1):
+            raise ValueError('Stop bands are not sufficiently separated.')
+        xf = _filter(x, Fs, freq, gain, filter_length, picks, n_jobs, copy)
+    else:
+        for fp_1, fp_2, fs_1, fs_2 in zip(Fp1, Fp2, Fs1, Fs2):
+            iir_params_new = construct_iir_filter(iir_params, [fp_1, fp_2],
+                                                  [fs_1, fs_2], Fs, 'bandstop')
+            padlen = min(iir_params_new['padlen'], len(x))
+            xf = _filtfilt(x, iir_params_new['b'], iir_params_new['a'], padlen,
+                           picks, n_jobs, copy)
+
+    return xf
+
+
+ at verbose
+def low_pass_filter(x, Fs, Fp, filter_length='10s', trans_bandwidth=0.5,
+                    method='fft', iir_params=None,
+                    picks=None, n_jobs=1, copy=True, verbose=None):
+    """Lowpass filter for the signal x.
+
+    Applies a zero-phase lowpass filter to the signal x, operating on the
+    last dimension.
+
+    Parameters
+    ----------
+    x : array
+        Signal to filter.
+    Fs : float
+        Sampling rate in Hz.
+    Fp : float
+        Cut-off frequency in Hz.
+    filter_length : str (Default: '10s') | int | None
+        Length of the filter to use. If None or "len(x) < filter_length",
+        the filter length used is len(x). Otherwise, if int, overlap-add
+        filtering with a filter of the specified length in samples) is
+        used (faster for long signals). If str, a human-readable time in
+        units of "s" or "ms" (e.g., "10s" or "5500ms") will be converted
+        to the shortest power-of-two length at least that duration.
+        Not used for 'iir' filters.
+    trans_bandwidth : float
+        Width of the transition band in Hz. Not used if 'order' is specified
+        in iir_params.
+    method : str
+        'fft' will use overlap-add FIR filtering, 'iir' will use IIR
+        forward-backward filtering (via filtfilt).
+    iir_params : dict | None
+        Dictionary of parameters to use for IIR filtering.
+        See mne.filter.construct_iir_filter for details. If iir_params
+        is None and method="iir", 4th order Butterworth will be used.
+    picks : array-like of int | None
+        Indices to filter. If None all indices will be filtered.
+    n_jobs : int | str
+        Number of jobs to run in parallel. Can be 'cuda' if scikits.cuda
+        is installed properly, CUDA is initialized, and method='fft'.
+    copy : bool
+        If True, a copy of x, filtered, is returned. Otherwise, it operates
+        on x in place.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    xf : array
+        x filtered.
+
+    See Also
+    --------
+    resample
+    band_pass_filter, high_pass_filter
+
+    Notes
+    -----
+    The frequency response is (approximately) given by::
+
+      -------------------------
+                              | \
+                              |  \
+                              |   \
+                              |    \
+                              |     -----------------
+                              |
+                              Fp  Fp+trans_bandwidth
+
+    """
+    iir_params = _check_method(method, iir_params, [])
+    Fs = float(Fs)
+    Fp = float(Fp)
+    Fstop = Fp + trans_bandwidth if method == 'fft' else Fp
+    if Fstop > Fs / 2.:
+        raise ValueError('Effective stop frequency (%s) is too high '
+                         '(maximum based on Nyquist is %s)' % (Fstop, Fs / 2.))
+
+    if method == 'fft':
+        freq = [0, Fp, Fstop, Fs / 2]
+        gain = [1, 1, 0, 0]
+        xf = _filter(x, Fs, freq, gain, filter_length, picks, n_jobs, copy)
+    else:
+        iir_params = construct_iir_filter(iir_params, Fp, Fstop, Fs, 'low')
+        padlen = min(iir_params['padlen'], len(x))
+        xf = _filtfilt(x, iir_params['b'], iir_params['a'], padlen,
+                       picks, n_jobs, copy)
+
+    return xf
+
+
+ at verbose
+def high_pass_filter(x, Fs, Fp, filter_length='10s', trans_bandwidth=0.5,
+                     method='fft', iir_params=None,
+                     picks=None, n_jobs=1, copy=True, verbose=None):
+    """Highpass filter for the signal x.
+
+    Applies a zero-phase highpass filter to the signal x, operating on the
+    last dimension.
+
+    Parameters
+    ----------
+    x : array
+        Signal to filter.
+    Fs : float
+        Sampling rate in Hz.
+    Fp : float
+        Cut-off frequency in Hz.
+    filter_length : str (Default: '10s') | int | None
+        Length of the filter to use. If None or "len(x) < filter_length",
+        the filter length used is len(x). Otherwise, if int, overlap-add
+        filtering with a filter of the specified length in samples) is
+        used (faster for long signals). If str, a human-readable time in
+        units of "s" or "ms" (e.g., "10s" or "5500ms") will be converted
+        to the shortest power-of-two length at least that duration.
+        Not used for 'iir' filters.
+    trans_bandwidth : float
+        Width of the transition band in Hz. Not used if 'order' is
+        specified in iir_params.
+    method : str
+        'fft' will use overlap-add FIR filtering, 'iir' will use IIR
+        forward-backward filtering (via filtfilt).
+    iir_params : dict | None
+        Dictionary of parameters to use for IIR filtering.
+        See mne.filter.construct_iir_filter for details. If iir_params
+        is None and method="iir", 4th order Butterworth will be used.
+    picks : array-like of int | None
+        Indices to filter. If None all indices will be filtered.
+    n_jobs : int | str
+        Number of jobs to run in parallel. Can be 'cuda' if scikits.cuda
+        is installed properly, CUDA is initialized, and method='fft'.
+    copy : bool
+        If True, a copy of x, filtered, is returned. Otherwise, it operates
+        on x in place.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    xf : array
+        x filtered.
+
+    See Also
+    --------
+    low_pass_filter, band_pass_filter
+
+    Notes
+    -----
+    The frequency response is (approximately) given by::
+
+                       -----------------------
+                     /|
+                    / |
+                   /  |
+                  /   |
+        ----------    |
+                      |
+               Fstop  Fp
+
+    Where Fstop = Fp - trans_bandwidth.
+    """
+    iir_params = _check_method(method, iir_params, [])
+    Fs = float(Fs)
+    Fp = float(Fp)
+
+    Fstop = Fp - trans_bandwidth if method == 'fft' else Fp
+    if Fstop <= 0:
+        raise ValueError('Filter specification invalid: Stop frequency too low'
+                         '(%0.1fHz). Increase Fp or reduce transition '
+                         'bandwidth (trans_bandwidth)' % Fstop)
+
+    if method == 'fft':
+        freq = [0, Fstop, Fp, Fs / 2]
+        gain = [0, 0, 1, 1]
+        xf = _filter(x, Fs, freq, gain, filter_length, picks, n_jobs, copy)
+    else:
+        iir_params = construct_iir_filter(iir_params, Fp, Fstop, Fs, 'high')
+        padlen = min(iir_params['padlen'], len(x))
+        xf = _filtfilt(x, iir_params['b'], iir_params['a'], padlen,
+                       picks, n_jobs, copy)
+
+    return xf
+
+
+ at verbose
+def notch_filter(x, Fs, freqs, filter_length='10s', notch_widths=None,
+                 trans_bandwidth=1, method='fft',
+                 iir_params=None, mt_bandwidth=None,
+                 p_value=0.05, picks=None, n_jobs=1, copy=True, verbose=None):
+    """Notch filter for the signal x.
+
+    Applies a zero-phase notch filter to the signal x, operating on the last
+    dimension.
+
+    Parameters
+    ----------
+    x : array
+        Signal to filter.
+    Fs : float
+        Sampling rate in Hz.
+    freqs : float | array of float | None
+        Frequencies to notch filter in Hz, e.g. np.arange(60, 241, 60).
+        None can only be used with the mode 'spectrum_fit', where an F
+        test is used to find sinusoidal components.
+    filter_length : str (Default: '10s') | int | None
+        Length of the filter to use. If None or "len(x) < filter_length",
+        the filter length used is len(x). Otherwise, if int, overlap-add
+        filtering with a filter of the specified length in samples) is
+        used (faster for long signals). If str, a human-readable time in
+        units of "s" or "ms" (e.g., "10s" or "5500ms") will be converted
+        to the shortest power-of-two length at least that duration.
+        Not used for 'iir' filters.
+    notch_widths : float | array of float | None
+        Width of the stop band (centred at each freq in freqs) in Hz.
+        If None, freqs / 200 is used.
+    trans_bandwidth : float
+        Width of the transition band in Hz. Not used if 'order' is
+        specified in iir_params.
+    method : str
+        'fft' will use overlap-add FIR filtering, 'iir' will use IIR
+        forward-backward filtering (via filtfilt). 'spectrum_fit' will
+        use multi-taper estimation of sinusoidal components. If freqs=None
+        and method='spectrum_fit', significant sinusoidal components
+        are detected using an F test, and noted by logging.
+    iir_params : dict | None
+        Dictionary of parameters to use for IIR filtering.
+        See mne.filter.construct_iir_filter for details. If iir_params
+        is None and method="iir", 4th order Butterworth will be used.
+    mt_bandwidth : float | None
+        The bandwidth of the multitaper windowing function in Hz.
+        Only used in 'spectrum_fit' mode.
+    p_value : float
+        p-value to use in F-test thresholding to determine significant
+        sinusoidal components to remove when method='spectrum_fit' and
+        freqs=None. Note that this will be Bonferroni corrected for the
+        number of frequencies, so large p-values may be justified.
+    picks : array-like of int | None
+        Indices to filter. If None all indices will be filtered.
+    n_jobs : int | str
+        Number of jobs to run in parallel. Can be 'cuda' if scikits.cuda
+        is installed properly, CUDA is initialized, and method='fft'.
+    copy : bool
+        If True, a copy of x, filtered, is returned. Otherwise, it operates
+        on x in place.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    xf : array
+        x filtered.
+
+    Notes
+    -----
+    The frequency response is (approximately) given by::
+
+      ----------         -----------
+               |\       /|
+               | \     / |
+               |  \   /  |
+               |   \ /   |
+               |    -    |
+               |    |    |
+              Fp1 freq  Fp2
+
+    For each freq in freqs, where:
+
+        Fp1 = freq - trans_bandwidth / 2 in Hz
+        Fs2 = freq + trans_bandwidth / 2 in Hz
+
+    References
+    ----------
+    Multi-taper removal is inspired by code from the Chronux toolbox, see
+    www.chronux.org and the book "Observed Brain Dynamics" by Partha Mitra
+    & Hemant Bokil, Oxford University Press, New York, 2008. Please
+    cite this in publications if method 'spectrum_fit' is used.
+    """
+    iir_params = _check_method(method, iir_params, ['spectrum_fit'])
+
+    if freqs is not None:
+        freqs = np.atleast_1d(freqs)
+    elif method != 'spectrum_fit':
+        raise ValueError('freqs=None can only be used with method '
+                         'spectrum_fit')
+
+    # Only have to deal with notch_widths for non-autodetect
+    if freqs is not None:
+        if notch_widths is None:
+            notch_widths = freqs / 200.0
+        elif np.any(notch_widths < 0):
+            raise ValueError('notch_widths must be >= 0')
+        else:
+            notch_widths = np.atleast_1d(notch_widths)
+            if len(notch_widths) == 1:
+                notch_widths = notch_widths[0] * np.ones_like(freqs)
+            elif len(notch_widths) != len(freqs):
+                raise ValueError('notch_widths must be None, scalar, or the '
+                                 'same length as freqs')
+
+    if method in ['fft', 'iir']:
+        # Speed this up by computing the fourier coefficients once
+        tb_2 = trans_bandwidth / 2.0
+        lows = [freq - nw / 2.0 - tb_2
+                for freq, nw in zip(freqs, notch_widths)]
+        highs = [freq + nw / 2.0 + tb_2
+                 for freq, nw in zip(freqs, notch_widths)]
+        xf = band_stop_filter(x, Fs, lows, highs, filter_length, tb_2, tb_2,
+                              method, iir_params, picks, n_jobs, copy)
+    elif method == 'spectrum_fit':
+        xf = _mt_spectrum_proc(x, Fs, freqs, notch_widths, mt_bandwidth,
+                               p_value, picks, n_jobs, copy)
+
+    return xf
+
+
+def _mt_spectrum_proc(x, sfreq, line_freqs, notch_widths, mt_bandwidth,
+                      p_value, picks, n_jobs, copy):
+    """Helper to more easily call _mt_spectrum_remove"""
+    from scipy import stats
+    # set up array for filtering, reshape to 2D, operate on last axis
+    n_jobs = check_n_jobs(n_jobs)
+    x, orig_shape, picks = _prep_for_filtering(x, copy, picks)
+
+    # XXX need to implement the moving window version for raw files
+    n_times = x.shape[1]
+
+    # max taper size chosen because it has an max error < 1e-3:
+    # >>> np.max(np.diff(dpss_windows(953, 4, 100)[0]))
+    # 0.00099972447657578449
+    # so we use 1000 because it's the first "nice" number bigger than 953:
+    dpss_n_times_max = 1000
+
+    # figure out what tapers to use
+    if mt_bandwidth is not None:
+        half_nbw = float(mt_bandwidth) * n_times / (2 * sfreq)
+    else:
+        half_nbw = 4
+
+    # compute dpss windows
+    n_tapers_max = int(2 * half_nbw)
+    window_fun, eigvals = dpss_windows(n_times, half_nbw, n_tapers_max,
+                                       low_bias=False,
+                                       interp_from=min(n_times,
+                                                       dpss_n_times_max))
+    # F-stat of 1-p point
+    threshold = stats.f.ppf(1 - p_value / n_times, 2, 2 * len(window_fun) - 2)
+
+    if n_jobs == 1:
+        freq_list = list()
+        for ii, x_ in enumerate(x):
+            if ii in picks:
+                x[ii], f = _mt_spectrum_remove(x_, sfreq, line_freqs,
+                                               notch_widths, window_fun,
+                                               threshold)
+                freq_list.append(f)
+    else:
+        parallel, p_fun, _ = parallel_func(_mt_spectrum_remove, n_jobs)
+        data_new = parallel(p_fun(x_, sfreq, line_freqs, notch_widths,
+                                  window_fun, threshold)
+                            for xi, x_ in enumerate(x)
+                            if xi in picks)
+        freq_list = [d[1] for d in data_new]
+        data_new = np.array([d[0] for d in data_new])
+        x[picks, :] = data_new
+
+    # report found frequencies
+    for rm_freqs in freq_list:
+        if line_freqs is None:
+            if len(rm_freqs) > 0:
+                logger.info('Detected notch frequencies:\n%s'
+                            % ', '.join([str(rm_f) for rm_f in rm_freqs]))
+            else:
+                logger.info('Detected notch frequecies:\nNone')
+
+    x.shape = orig_shape
+    return x
+
+
+def _mt_spectrum_remove(x, sfreq, line_freqs, notch_widths,
+                        window_fun, threshold):
+    """Use MT-spectrum to remove line frequencies
+
+    Based on Chronux. If line_freqs is specified, all freqs within notch_width
+    of each line_freq is set to zero.
+    """
+    # drop the even tapers
+    n_tapers = len(window_fun)
+    tapers_odd = np.arange(0, n_tapers, 2)
+    tapers_even = np.arange(1, n_tapers, 2)
+    tapers_use = window_fun[tapers_odd]
+
+    # sum tapers for (used) odd prolates across time (n_tapers, 1)
+    H0 = np.sum(tapers_use, axis=1)
+
+    # sum of squares across tapers (1, )
+    H0_sq = sum_squared(H0)
+
+    # make "time" vector
+    rads = 2 * np.pi * (np.arange(x.size) / float(sfreq))
+
+    # compute mt_spectrum (returning n_ch, n_tapers, n_freq)
+    x_p, freqs = _mt_spectra(x[np.newaxis, :], window_fun, sfreq)
+
+    # sum of the product of x_p and H0 across tapers (1, n_freqs)
+    x_p_H0 = np.sum(x_p[:, tapers_odd, :] *
+                    H0[np.newaxis, :, np.newaxis], axis=1)
+
+    # resulting calculated amplitudes for all freqs
+    A = x_p_H0 / H0_sq
+
+    if line_freqs is None:
+        # figure out which freqs to remove using F stat
+
+        # estimated coefficient
+        x_hat = A * H0[:, np.newaxis]
+
+        # numerator for F-statistic
+        num = (n_tapers - 1) * (A * A.conj()).real * H0_sq
+        # denominator for F-statistic
+        den = (np.sum(np.abs(x_p[:, tapers_odd, :] - x_hat) ** 2, 1) +
+               np.sum(np.abs(x_p[:, tapers_even, :]) ** 2, 1))
+        den[den == 0] = np.inf
+        f_stat = num / den
+
+        # find frequencies to remove
+        indices = np.where(f_stat > threshold)[1]
+        rm_freqs = freqs[indices]
+    else:
+        # specify frequencies
+        indices_1 = np.unique([np.argmin(np.abs(freqs - lf))
+                               for lf in line_freqs])
+        notch_widths /= 2.0
+        indices_2 = [np.logical_and(freqs > lf - nw, freqs < lf + nw)
+                     for lf, nw in zip(line_freqs, notch_widths)]
+        indices_2 = np.where(np.any(np.array(indices_2), axis=0))[0]
+        indices = np.unique(np.r_[indices_1, indices_2])
+        rm_freqs = freqs[indices]
+
+    fits = list()
+    for ind in indices:
+        c = 2 * A[0, ind]
+        fit = np.abs(c) * np.cos(freqs[ind] * rads + np.angle(c))
+        fits.append(fit)
+
+    if len(fits) == 0:
+        datafit = 0.0
+    else:
+        # fitted sinusoids are summed, and subtracted from data
+        datafit = np.sum(np.atleast_2d(fits), axis=0)
+
+    return x - datafit, rm_freqs
+
+
+ at verbose
+def resample(x, up, down, npad=100, axis=-1, window='boxcar', n_jobs=1,
+             verbose=None):
+    """Resample the array x
+
+    Operates along the last dimension of the array.
+
+    Parameters
+    ----------
+    x : n-d array
+        Signal to resample.
+    up : float
+        Factor to upsample by.
+    down : float
+        Factor to downsample by.
+    npad : integer
+        Number of samples to use at the beginning and end for padding.
+    axis : int
+        Axis along which to resample (default is the last axis).
+    window : string or tuple
+        See scipy.signal.resample for description.
+    n_jobs : int | str
+        Number of jobs to run in parallel. Can be 'cuda' if scikits.cuda
+        is installed properly and CUDA is initialized.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    xf : array
+        x resampled.
+
+    Notes
+    -----
+    This uses (hopefully) intelligent edge padding and frequency-domain
+    windowing improve scipy.signal.resample's resampling method, which
+    we have adapted for our use here. Choices of npad and window have
+    important consequences, and the default choices should work well
+    for most natural signals.
+
+    Resampling arguments are broken into "up" and "down" components for future
+    compatibility in case we decide to use an upfirdn implementation. The
+    current implementation is functionally equivalent to passing
+    up=up/down and down=1.
+    """
+    from scipy.signal import get_window
+    # check explicitly for backwards compatibility
+    if not isinstance(axis, int):
+        err = ("The axis parameter needs to be an integer (got %s). "
+               "The axis parameter was missing from this function for a "
+               "period of time, you might be intending to specify the "
+               "subsequent window parameter." % repr(axis))
+        raise TypeError(err)
+
+    # make sure our arithmetic will work
+    x = np.asanyarray(x)
+    ratio = float(up) / down
+    if axis < 0:
+        axis = x.ndim + axis
+    orig_last_axis = x.ndim - 1
+    if axis != orig_last_axis:
+        x = x.swapaxes(axis, orig_last_axis)
+    orig_shape = x.shape
+    x_len = orig_shape[-1]
+    if x_len == 0:
+        warnings.warn('x has zero length along last axis, returning a copy of '
+                      'x')
+        return x.copy()
+
+    # prep for resampling now
+    x_flat = x.reshape((-1, x_len))
+    orig_len = x_len + 2 * npad  # length after padding
+    new_len = int(round(ratio * orig_len))  # length after resampling
+    to_remove = np.round(ratio * npad).astype(int)
+
+    # figure out windowing function
+    if window is not None:
+        if callable(window):
+            W = window(fftfreq(orig_len))
+        elif isinstance(window, np.ndarray) and \
+                window.shape == (orig_len,):
+            W = window
+        else:
+            W = ifftshift(get_window(window, orig_len))
+    else:
+        W = np.ones(orig_len)
+    W *= (float(new_len) / float(orig_len))
+    W = W.astype(np.complex128)
+
+    # figure out if we should use CUDA
+    n_jobs, cuda_dict, W = setup_cuda_fft_resample(n_jobs, W, new_len)
+
+    # do the resampling using an adaptation of scipy's FFT-based resample()
+    # use of the 'flat' window is recommended for minimal ringing
+    if n_jobs == 1:
+        y = np.zeros((len(x_flat), new_len - 2 * to_remove), dtype=x.dtype)
+        for xi, x_ in enumerate(x_flat):
+            y[xi] = fft_resample(x_, W, new_len, npad, to_remove,
+                                 cuda_dict)
+    else:
+        parallel, p_fun, _ = parallel_func(fft_resample, n_jobs)
+        y = parallel(p_fun(x_, W, new_len, npad, to_remove, cuda_dict)
+                     for x_ in x_flat)
+        y = np.array(y)
+
+    # Restore the original array shape (modified for resampling)
+    y.shape = orig_shape[:-1] + (y.shape[1],)
+    if axis != orig_last_axis:
+        y = y.swapaxes(axis, orig_last_axis)
+
+    return y
+
+
+def _resample_stim_channels(stim_data, up, down):
+    """Resample stim channels, carefully.
+
+    Parameters
+    ----------
+    stim_data : 1D array, shape (n_samples,) |
+                2D array, shape (n_stim_channels, n_samples)
+        Stim channels to resample.
+    up : float
+        Factor to upsample by.
+    down : float
+        Factor to downsample by.
+
+    Returns
+    -------
+    stim_resampled : 2D array, shape (n_stim_channels, n_samples_resampled)
+        The resampled stim channels
+
+    Note
+    ----
+    The approach taken here is equivalent to the approach in the C-code.
+    See the decimate_stimch function in MNE/mne_browse_raw/save.c
+    """
+    stim_data = np.atleast_2d(stim_data)
+    n_stim_channels, n_samples = stim_data.shape
+
+    ratio = float(up) / down
+    resampled_n_samples = int(round(n_samples * ratio))
+
+    stim_resampled = np.zeros((n_stim_channels, resampled_n_samples))
+
+    # Figure out which points in old data to subsample protect against
+    # out-of-bounds, which can happen (having one sample more than
+    # expected) due to padding
+    sample_picks = np.minimum(
+        (np.arange(resampled_n_samples) / ratio).astype(int),
+        n_samples - 1
+    )
+
+    # Create windows starting from sample_picks[i], ending at sample_picks[i+1]
+    windows = zip(sample_picks, np.r_[sample_picks[1:], n_samples])
+
+    # Use the first non-zero value in each window
+    for window_i, window in enumerate(windows):
+        for stim_num, stim in enumerate(stim_data):
+            nonzero = stim[window[0]:window[1]].nonzero()[0]
+            if len(nonzero) > 0:
+                val = stim[window[0] + nonzero[0]]
+            else:
+                val = stim[window[0]]
+            stim_resampled[stim_num, window_i] = val
+
+    return stim_resampled
+
+
+def detrend(x, order=1, axis=-1):
+    """Detrend the array x.
+
+    Parameters
+    ----------
+    x : n-d array
+        Signal to detrend.
+    order : int
+        Fit order. Currently must be '0' or '1'.
+    axis : integer
+        Axis of the array to operate on.
+
+    Returns
+    -------
+    xf : array
+        x detrended.
+
+    Examples
+    --------
+    As in scipy.signal.detrend:
+        >>> randgen = np.random.RandomState(9)
+        >>> npoints = int(1e3)
+        >>> noise = randgen.randn(npoints)
+        >>> x = 3 + 2*np.linspace(0, 1, npoints) + noise
+        >>> (detrend(x) - noise).max() < 0.01
+        True
+    """
+    from scipy.signal import detrend
+    if axis > len(x.shape):
+        raise ValueError('x does not have %d axes' % axis)
+    if order == 0:
+        fit = 'constant'
+    elif order == 1:
+        fit = 'linear'
+    else:
+        raise ValueError('order must be 0 or 1')
+
+    y = detrend(x, axis=axis, type=fit)
+
+    return y
+
+
+def _get_filter_length(filter_length, sfreq, min_length=128, len_x=np.inf):
+    """Helper to determine a reasonable filter length"""
+    if not isinstance(min_length, int):
+        raise ValueError('min_length must be an int')
+    if isinstance(filter_length, string_types):
+        # parse time values
+        if filter_length[-2:].lower() == 'ms':
+            mult_fact = 1e-3
+            filter_length = filter_length[:-2]
+        elif filter_length[-1].lower() == 's':
+            mult_fact = 1
+            filter_length = filter_length[:-1]
+        else:
+            raise ValueError('filter_length, if a string, must be a '
+                             'human-readable time (e.g., "10s"), not '
+                             '"%s"' % filter_length)
+        # now get the number
+        try:
+            filter_length = float(filter_length)
+        except ValueError:
+            raise ValueError('filter_length, if a string, must be a '
+                             'human-readable time (e.g., "10s"), not '
+                             '"%s"' % filter_length)
+        filter_length = 2 ** int(np.ceil(np.log2(filter_length *
+                                                 mult_fact * sfreq)))
+        # shouldn't make filter longer than length of x
+        if filter_length >= len_x:
+            filter_length = len_x
+        # only need to check min_length if the filter is shorter than len_x
+        elif filter_length < min_length:
+            filter_length = min_length
+            warnings.warn('filter_length was too short, using filter of '
+                          'length %d samples ("%0.1fs")'
+                          % (filter_length, filter_length / float(sfreq)))
+
+    if filter_length is not None:
+        if not isinstance(filter_length, integer_types):
+            raise ValueError('filter_length must be str, int, or None')
+    return filter_length
+
+
+class FilterMixin(object):
+    """Object for Epoch/Evoked filtering"""
+
+    def savgol_filter(self, h_freq):
+        """Filter the data using Savitzky-Golay polynomial method
+
+        Parameters
+        ----------
+        h_freq : float
+            Approximate high cut-off frequency in Hz. Note that this
+            is not an exact cutoff, since Savitzky-Golay filtering [1]_ is
+            done using polynomial fits instead of FIR/IIR filtering.
+            This parameter is thus used to determine the length of the
+            window over which a 5th-order polynomial smoothing is used.
+
+        See Also
+        --------
+        mne.io.Raw.filter
+
+        Notes
+        -----
+        Data are modified in-place.
+
+        For Savitzky-Golay low-pass approximation, see:
+
+            https://gist.github.com/Eric89GXL/bbac101d50176611136b
+
+
+        .. versionadded:: 0.9.0
+
+        Examples
+        --------
+        >>> import mne
+        >>> from os import path as op
+        >>> evoked_fname = op.join(mne.datasets.sample.data_path(), 'MEG', 'sample', 'sample_audvis-ave.fif')  # doctest:+SKIP
+        >>> evoked = mne.read_evokeds(evoked_fname, baseline=(None, 0))[0]  # doctest:+SKIP
+        >>> evoked.savgol_filter(10.)  # low-pass at around 10 Hz # doctest:+SKIP
+        >>> evoked.plot()  # doctest:+SKIP
+
+        References
+        ----------
+        .. [1] Savitzky, A., Golay, M.J.E. (1964). "Smoothing and
+               Differentiation of Data by Simplified Least Squares
+               Procedures". Analytical Chemistry 36 (8): 1627-39.
+        """  # noqa
+        from .evoked import Evoked
+        from .epochs import _BaseEpochs
+        if isinstance(self, Evoked):
+            data = self.data
+            axis = 1
+        elif isinstance(self, _BaseEpochs):
+            if not self.preload:
+                raise RuntimeError('data must be preloaded to filter')
+            data = self._data
+            axis = 2
+
+        h_freq = float(h_freq)
+        if h_freq >= self.info['sfreq'] / 2.:
+            raise ValueError('h_freq must be less than half the sample rate')
+
+        # savitzky-golay filtering
+        if not check_version('scipy', '0.14'):
+            raise RuntimeError('scipy >= 0.14 must be installed for savgol')
+        from scipy.signal import savgol_filter
+        window_length = (int(np.round(self.info['sfreq'] /
+                                      h_freq)) // 2) * 2 + 1
+        data[...] = savgol_filter(data, axis=axis, polyorder=5,
+                                  window_length=window_length)
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/fixes.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/fixes.py
new file mode 100644
index 0000000..d8ceec7
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/fixes.py
@@ -0,0 +1,888 @@
+"""Compatibility fixes for older version of python, numpy and scipy
+
+If you add content to this file, please give the version of the package
+at which the fixe is no longer needed.
+
+# XXX : copied from scikit-learn
+
+"""
+# Authors: Emmanuelle Gouillart <emmanuelle.gouillart at normalesup.org>
+#          Gael Varoquaux <gael.varoquaux at normalesup.org>
+#          Fabian Pedregosa <fpedregosa at acm.org>
+#          Lars Buitinck <L.J.Buitinck at uva.nl>
+# License: BSD
+
+from __future__ import division
+import collections
+from operator import itemgetter
+import inspect
+
+import warnings
+import numpy as np
+import scipy
+from scipy import linalg, sparse
+from math import ceil, log
+from numpy.fft import irfft
+from distutils.version import LooseVersion
+from functools import partial
+from .externals import six
+from .externals.six.moves import copyreg, xrange
+from gzip import GzipFile
+
+
+###############################################################################
+# Misc
+
+class gzip_open(GzipFile):  # python2.6 doesn't have context managing
+
+    def __enter__(self):
+        if hasattr(GzipFile, '__enter__'):
+            return GzipFile.__enter__(self)
+        else:
+            return self
+
+    def __exit__(self, exc_type, exc_value, traceback):
+        if hasattr(GzipFile, '__exit__'):
+            return GzipFile.__exit__(self, exc_type, exc_value, traceback)
+        else:
+            return self.close()
+
+
+class _Counter(collections.defaultdict):
+    """Partial replacement for Python 2.7 collections.Counter."""
+    def __init__(self, iterable=(), **kwargs):
+        super(_Counter, self).__init__(int, **kwargs)
+        self.update(iterable)
+
+    def most_common(self):
+        return sorted(six.iteritems(self), key=itemgetter(1), reverse=True)
+
+    def update(self, other):
+        """Adds counts for elements in other"""
+        if isinstance(other, self.__class__):
+            for x, n in six.iteritems(other):
+                self[x] += n
+        else:
+            for x in other:
+                self[x] += 1
+
+try:
+    Counter = collections.Counter
+except AttributeError:
+    Counter = _Counter
+
+
+def _unique(ar, return_index=False, return_inverse=False):
+    """A replacement for the np.unique that appeared in numpy 1.4.
+
+    While np.unique existed long before, keyword return_inverse was
+    only added in 1.4.
+    """
+    try:
+        ar = ar.flatten()
+    except AttributeError:
+        if not return_inverse and not return_index:
+            items = sorted(set(ar))
+            return np.asarray(items)
+        else:
+            ar = np.asarray(ar).flatten()
+
+    if ar.size == 0:
+        if return_inverse and return_index:
+            return ar, np.empty(0, np.bool), np.empty(0, np.bool)
+        elif return_inverse or return_index:
+            return ar, np.empty(0, np.bool)
+        else:
+            return ar
+
+    if return_inverse or return_index:
+        perm = ar.argsort()
+        aux = ar[perm]
+        flag = np.concatenate(([True], aux[1:] != aux[:-1]))
+        if return_inverse:
+            iflag = np.cumsum(flag) - 1
+            iperm = perm.argsort()
+            if return_index:
+                return aux[flag], perm[flag], iflag[iperm]
+            else:
+                return aux[flag], iflag[iperm]
+        else:
+            return aux[flag], perm[flag]
+
+    else:
+        ar.sort()
+        flag = np.concatenate(([True], ar[1:] != ar[:-1]))
+        return ar[flag]
+
+if LooseVersion(np.__version__) < LooseVersion('1.5'):
+    unique = _unique
+else:
+    unique = np.unique
+
+
+def _bincount(X, weights=None, minlength=None):
+    """Replacing np.bincount in numpy < 1.6 to provide minlength."""
+    result = np.bincount(X, weights)
+    if minlength is None or len(result) >= minlength:
+        return result
+    out = np.zeros(minlength, np.int)
+    out[:len(result)] = result
+    return out
+
+if LooseVersion(np.__version__) < LooseVersion('1.6'):
+    bincount = _bincount
+else:
+    bincount = np.bincount
+
+
+def _copysign(x1, x2):
+    """Slow replacement for np.copysign, which was introduced in numpy 1.4"""
+    return np.abs(x1) * np.sign(x2)
+
+if not hasattr(np, 'copysign'):
+    copysign = _copysign
+else:
+    copysign = np.copysign
+
+
+def _in1d(ar1, ar2, assume_unique=False, invert=False):
+    """Replacement for in1d that is provided for numpy >= 1.4"""
+    # Ravel both arrays, behavior for the first array could be different
+    ar1 = np.asarray(ar1).ravel()
+    ar2 = np.asarray(ar2).ravel()
+
+    # This code is significantly faster when the condition is satisfied.
+    if len(ar2) < 10 * len(ar1) ** 0.145:
+        if invert:
+            mask = np.ones(len(ar1), dtype=np.bool)
+            for a in ar2:
+                mask &= (ar1 != a)
+        else:
+            mask = np.zeros(len(ar1), dtype=np.bool)
+            for a in ar2:
+                mask |= (ar1 == a)
+        return mask
+
+    # Otherwise use sorting
+    if not assume_unique:
+        ar1, rev_idx = unique(ar1, return_inverse=True)
+        ar2 = np.unique(ar2)
+
+    ar = np.concatenate((ar1, ar2))
+    # We need this to be a stable sort, so always use 'mergesort'
+    # here. The values from the first array should always come before
+    # the values from the second array.
+    order = ar.argsort(kind='mergesort')
+    sar = ar[order]
+    if invert:
+        bool_ar = (sar[1:] != sar[:-1])
+    else:
+        bool_ar = (sar[1:] == sar[:-1])
+    flag = np.concatenate((bool_ar, [invert]))
+    indx = order.argsort(kind='mergesort')[:len(ar1)]
+
+    if assume_unique:
+        return flag[indx]
+    else:
+        return flag[indx][rev_idx]
+
+
+if not hasattr(np, 'in1d') or LooseVersion(np.__version__) < '1.8':
+    in1d = _in1d
+else:
+    in1d = np.in1d
+
+
+def _digitize(x, bins, right=False):
+    """Replacement for digitize with right kwarg (numpy < 1.7).
+
+    Notes
+    -----
+    This fix is only meant for integer arrays. If ``right==True`` but either
+    ``x`` or ``bins`` are of a different type, a NotImplementedError will be
+    raised.
+    """
+    if right:
+        x = np.asarray(x)
+        bins = np.asarray(bins)
+        if (x.dtype.kind not in 'ui') or (bins.dtype.kind not in 'ui'):
+            raise NotImplementedError("Only implemented for integer input")
+        return np.digitize(x - 1e-5, bins)
+    else:
+        return np.digitize(x, bins)
+
+if LooseVersion(np.__version__) < LooseVersion('1.7'):
+    digitize = _digitize
+else:
+    digitize = np.digitize
+
+
+def _tril_indices(n, k=0):
+    """Replacement for tril_indices that is provided for numpy >= 1.4"""
+    mask = np.greater_equal(np.subtract.outer(np.arange(n), np.arange(n)), -k)
+    indices = np.where(mask)
+
+    return indices
+
+if not hasattr(np, 'tril_indices'):
+    tril_indices = _tril_indices
+else:
+    tril_indices = np.tril_indices
+
+
+def _unravel_index(indices, dims):
+    """Add support for multiple indices in unravel_index that is provided
+    for numpy >= 1.4"""
+    indices_arr = np.asarray(indices)
+    if indices_arr.size == 1:
+        return np.unravel_index(indices, dims)
+    else:
+        if indices_arr.ndim != 1:
+            raise ValueError('indices should be one dimensional')
+
+        ndims = len(dims)
+        unraveled_coords = np.empty((indices_arr.size, ndims), dtype=np.int)
+        for coord, idx in zip(unraveled_coords, indices_arr):
+            coord[:] = np.unravel_index(idx, dims)
+        return tuple(unraveled_coords.T)
+
+
+if LooseVersion(np.__version__) < LooseVersion('1.4'):
+    unravel_index = _unravel_index
+else:
+    unravel_index = np.unravel_index
+
+
+def _qr_economic_old(A, **kwargs):
+    """
+    Compat function for the QR-decomposition in economic mode
+    Scipy 0.9 changed the keyword econ=True to mode='economic'
+    """
+    with warnings.catch_warnings(record=True):
+        return linalg.qr(A, econ=True, **kwargs)
+
+
+def _qr_economic_new(A, **kwargs):
+    return linalg.qr(A, mode='economic', **kwargs)
+
+
+if LooseVersion(scipy.__version__) < LooseVersion('0.9'):
+    qr_economic = _qr_economic_old
+else:
+    qr_economic = _qr_economic_new
+
+
+def savemat(file_name, mdict, oned_as="column", **kwargs):
+    """MATLAB-format output routine that is compatible with SciPy 0.7's.
+
+    0.7.2 (or .1?) added the oned_as keyword arg with 'column' as the default
+    value. It issues a warning if this is not provided, stating that "This will
+    change to 'row' in future versions."
+    """
+    import scipy.io
+    try:
+        return scipy.io.savemat(file_name, mdict, oned_as=oned_as, **kwargs)
+    except TypeError:
+        return scipy.io.savemat(file_name, mdict, **kwargs)
+
+if hasattr(np, 'count_nonzero'):
+    from numpy import count_nonzero
+else:
+    def count_nonzero(X):
+        return len(np.flatnonzero(X))
+
+# little dance to see if np.copy has an 'order' keyword argument
+if 'order' in inspect.getargspec(np.copy)[0]:
+    def safe_copy(X):
+        # Copy, but keep the order
+        return np.copy(X, order='K')
+else:
+    # Before an 'order' argument was introduced, numpy wouldn't muck with
+    # the ordering
+    safe_copy = np.copy
+
+
+def _meshgrid(*xi, **kwargs):
+    """
+    Return coordinate matrices from coordinate vectors.
+    Make N-D coordinate arrays for vectorized evaluations of
+    N-D scalar/vector fields over N-D grids, given
+    one-dimensional coordinate arrays x1, x2,..., xn.
+    .. versionchanged:: 1.9
+       1-D and 0-D cases are allowed.
+    Parameters
+    ----------
+    x1, x2,..., xn : array_like
+        1-D arrays representing the coordinates of a grid.
+    indexing : {'xy', 'ij'}, optional
+        Cartesian ('xy', default) or matrix ('ij') indexing of output.
+        See Notes for more details.
+        .. versionadded:: 1.7.0
+    sparse : bool, optional
+        If True a sparse grid is returned in order to conserve memory.
+        Default is False.
+        .. versionadded:: 1.7.0
+    copy : bool, optional
+        If False, a view into the original arrays are returned in order to
+        conserve memory.  Default is True.  Please note that
+        ``sparse=False, copy=False`` will likely return non-contiguous
+        arrays.  Furthermore, more than one element of a broadcast array
+        may refer to a single memory location.  If you need to write to the
+        arrays, make copies first.
+        .. versionadded:: 1.7.0
+    Returns
+    -------
+    X1, X2,..., XN : ndarray
+        For vectors `x1`, `x2`,..., 'xn' with lengths ``Ni=len(xi)`` ,
+        return ``(N1, N2, N3,...Nn)`` shaped arrays if indexing='ij'
+        or ``(N2, N1, N3,...Nn)`` shaped arrays if indexing='xy'
+        with the elements of `xi` repeated to fill the matrix along
+        the first dimension for `x1`, the second for `x2` and so on.
+    """
+    ndim = len(xi)
+
+    copy_ = kwargs.pop('copy', True)
+    sparse = kwargs.pop('sparse', False)
+    indexing = kwargs.pop('indexing', 'xy')
+
+    if kwargs:
+        raise TypeError("meshgrid() got an unexpected keyword argument '%s'"
+                        % (list(kwargs)[0],))
+
+    if indexing not in ['xy', 'ij']:
+        raise ValueError(
+            "Valid values for `indexing` are 'xy' and 'ij'.")
+
+    s0 = (1,) * ndim
+    output = [np.asanyarray(x).reshape(s0[:i] + (-1,) + s0[i + 1::])
+              for i, x in enumerate(xi)]
+
+    shape = [x.size for x in output]
+
+    if indexing == 'xy' and ndim > 1:
+        # switch first and second axis
+        output[0].shape = (1, -1) + (1,) * (ndim - 2)
+        output[1].shape = (-1, 1) + (1,) * (ndim - 2)
+        shape[0], shape[1] = shape[1], shape[0]
+
+    if sparse:
+        if copy_:
+            return [x.copy() for x in output]
+        else:
+            return output
+    else:
+        # Return the full N-D matrix (not only the 1-D vector)
+        if copy_:
+            mult_fact = np.ones(shape, dtype=int)
+            return [x * mult_fact for x in output]
+        else:
+            return np.broadcast_arrays(*output)
+
+if LooseVersion(np.__version__) < LooseVersion('1.7'):
+    meshgrid = _meshgrid
+else:
+    meshgrid = np.meshgrid
+
+
+###############################################################################
+# Back porting firwin2 for older scipy
+
+# Original version of firwin2 from scipy ticket #457, submitted by "tash".
+#
+# Rewritten by Warren Weckesser, 2010.
+
+
+def _firwin2(numtaps, freq, gain, nfreqs=None, window='hamming', nyq=1.0):
+    """FIR filter design using the window method.
+
+    From the given frequencies `freq` and corresponding gains `gain`,
+    this function constructs an FIR filter with linear phase and
+    (approximately) the given frequency response.
+
+    Parameters
+    ----------
+    numtaps : int
+        The number of taps in the FIR filter.  `numtaps` must be less than
+        `nfreqs`.  If the gain at the Nyquist rate, `gain[-1]`, is not 0,
+        then `numtaps` must be odd.
+
+    freq : array-like, 1D
+        The frequency sampling points. Typically 0.0 to 1.0 with 1.0 being
+        Nyquist.  The Nyquist frequency can be redefined with the argument
+        `nyq`.
+
+        The values in `freq` must be nondecreasing.  A value can be repeated
+        once to implement a discontinuity.  The first value in `freq` must
+        be 0, and the last value must be `nyq`.
+
+    gain : array-like
+        The filter gains at the frequency sampling points.
+
+    nfreqs : int, optional
+        The size of the interpolation mesh used to construct the filter.
+        For most efficient behavior, this should be a power of 2 plus 1
+        (e.g, 129, 257, etc).  The default is one more than the smallest
+        power of 2 that is not less than `numtaps`.  `nfreqs` must be greater
+        than `numtaps`.
+
+    window : string or (string, float) or float, or None, optional
+        Window function to use. Default is "hamming".  See
+        `scipy.signal.get_window` for the complete list of possible values.
+        If None, no window function is applied.
+
+    nyq : float
+        Nyquist frequency.  Each frequency in `freq` must be between 0 and
+        `nyq` (inclusive).
+
+    Returns
+    -------
+    taps : numpy 1D array of length `numtaps`
+        The filter coefficients of the FIR filter.
+
+    Examples
+    --------
+    A lowpass FIR filter with a response that is 1 on [0.0, 0.5], and
+    that decreases linearly on [0.5, 1.0] from 1 to 0:
+
+    >>> taps = firwin2(150, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0])  # doctest: +SKIP
+    >>> print(taps[72:78])  # doctest: +SKIP
+    [-0.02286961 -0.06362756  0.57310236  0.57310236 -0.06362756 -0.02286961]
+
+    See also
+    --------
+    scipy.signal.firwin
+
+    Notes
+    -----
+
+    From the given set of frequencies and gains, the desired response is
+    constructed in the frequency domain.  The inverse FFT is applied to the
+    desired response to create the associated convolution kernel, and the
+    first `numtaps` coefficients of this kernel, scaled by `window`, are
+    returned.
+
+    The FIR filter will have linear phase.  The filter is Type I if `numtaps`
+    is odd and Type II if `numtaps` is even.  Because Type II filters always
+    have a zero at the Nyquist frequency, `numtaps` must be odd if `gain[-1]`
+    is not zero.
+
+    .. versionadded:: 0.9.0
+
+    References
+    ----------
+    .. [1] Oppenheim, A. V. and Schafer, R. W., "Discrete-Time Signal
+       Processing", Prentice-Hall, Englewood Cliffs, New Jersey (1989).
+       (See, for example, Section 7.4.)
+
+    .. [2] Smith, Steven W., "The Scientist and Engineer's Guide to Digital
+       Signal Processing", Ch. 17. http://www.dspguide.com/ch17/1.htm
+
+    """
+
+    if len(freq) != len(gain):
+        raise ValueError('freq and gain must be of same length.')
+
+    if nfreqs is not None and numtaps >= nfreqs:
+        raise ValueError('ntaps must be less than nfreqs, but firwin2 was '
+                         'called with ntaps=%d and nfreqs=%s'
+                         % (numtaps, nfreqs))
+
+    if freq[0] != 0 or freq[-1] != nyq:
+        raise ValueError('freq must start with 0 and end with `nyq`.')
+    d = np.diff(freq)
+    if (d < 0).any():
+        raise ValueError('The values in freq must be nondecreasing.')
+    d2 = d[:-1] + d[1:]
+    if (d2 == 0).any():
+        raise ValueError('A value in freq must not occur more than twice.')
+
+    if numtaps % 2 == 0 and gain[-1] != 0.0:
+        raise ValueError("A filter with an even number of coefficients must "
+                         "have zero gain at the Nyquist rate.")
+
+    if nfreqs is None:
+        nfreqs = 1 + 2 ** int(ceil(log(numtaps, 2)))
+
+    # Tweak any repeated values in freq so that interp works.
+    eps = np.finfo(float).eps
+    for k in range(len(freq)):
+        if k < len(freq) - 1 and freq[k] == freq[k + 1]:
+            freq[k] = freq[k] - eps
+            freq[k + 1] = freq[k + 1] + eps
+
+    # Linearly interpolate the desired response on a uniform mesh `x`.
+    x = np.linspace(0.0, nyq, nfreqs)
+    fx = np.interp(x, freq, gain)
+
+    # Adjust the phases of the coefficients so that the first `ntaps` of the
+    # inverse FFT are the desired filter coefficients.
+    shift = np.exp(-(numtaps - 1) / 2. * 1.j * np.pi * x / nyq)
+    fx2 = fx * shift
+
+    # Use irfft to compute the inverse FFT.
+    out_full = irfft(fx2)
+
+    if window is not None:
+        # Create the window to apply to the filter coefficients.
+        from scipy.signal.signaltools import get_window
+        wind = get_window(window, numtaps, fftbins=False)
+    else:
+        wind = 1
+
+    # Keep only the first `numtaps` coefficients in `out`, and multiply by
+    # the window.
+    out = out_full[:numtaps] * wind
+
+    return out
+
+
+def get_firwin2():
+    """Helper to get firwin2"""
+    try:
+        from scipy.signal import firwin2
+    except ImportError:
+        firwin2 = _firwin2
+    return firwin2
+
+
+def _filtfilt(*args, **kwargs):
+    """wrap filtfilt, excluding padding arguments"""
+    from scipy.signal import filtfilt
+    # cut out filter args
+    if len(args) > 4:
+        args = args[:4]
+    if 'padlen' in kwargs:
+        del kwargs['padlen']
+    return filtfilt(*args, **kwargs)
+
+
+def get_filtfilt():
+    """Helper to get filtfilt from scipy"""
+    from scipy.signal import filtfilt
+
+    if 'padlen' in inspect.getargspec(filtfilt)[0]:
+        return filtfilt
+
+    return _filtfilt
+
+
+def _get_argrelmax():
+    try:
+        from scipy.signal import argrelmax
+    except ImportError:
+        argrelmax = _argrelmax
+    return argrelmax
+
+
+def _argrelmax(data, axis=0, order=1, mode='clip'):
+    """Calculate the relative maxima of `data`.
+
+    Parameters
+    ----------
+    data : ndarray
+        Array in which to find the relative maxima.
+    axis : int, optional
+        Axis over which to select from `data`.  Default is 0.
+    order : int, optional
+        How many points on each side to use for the comparison
+        to consider ``comparator(n, n+x)`` to be True.
+    mode : str, optional
+        How the edges of the vector are treated.
+        Available options are 'wrap' (wrap around) or 'clip' (treat overflow
+        as the same as the last (or first) element).
+        Default 'clip'.  See `numpy.take`.
+
+    Returns
+    -------
+    extrema : tuple of ndarrays
+        Indices of the maxima in arrays of integers.  ``extrema[k]`` is
+        the array of indices of axis `k` of `data`.  Note that the
+        return value is a tuple even when `data` is one-dimensional.
+    """
+    comparator = np.greater
+    if((int(order) != order) or (order < 1)):
+        raise ValueError('Order must be an int >= 1')
+    datalen = data.shape[axis]
+    locs = np.arange(0, datalen)
+    results = np.ones(data.shape, dtype=bool)
+    main = data.take(locs, axis=axis, mode=mode)
+    for shift in xrange(1, order + 1):
+        plus = data.take(locs + shift, axis=axis, mode=mode)
+        minus = data.take(locs - shift, axis=axis, mode=mode)
+        results &= comparator(main, plus)
+        results &= comparator(main, minus)
+        if(~results.any()):
+            return results
+    return np.where(results)
+
+
+###############################################################################
+# Back porting matrix_rank for numpy < 1.7
+
+
+def _matrix_rank(M, tol=None):
+    """ Return matrix rank of array using SVD method
+
+    Rank of the array is the number of SVD singular values of the array that
+    are greater than `tol`.
+
+    Parameters
+    ----------
+    M : {(M,), (M, N)} array_like
+        array of <=2 dimensions
+    tol : {None, float}, optional
+       threshold below which SVD values are considered zero. If `tol` is
+       None, and ``S`` is an array with singular values for `M`, and
+       ``eps`` is the epsilon value for datatype of ``S``, then `tol` is
+       set to ``S.max() * max(M.shape) * eps``.
+
+    Notes
+    -----
+    The default threshold to detect rank deficiency is a test on the magnitude
+    of the singular values of `M`. By default, we identify singular values less
+    than ``S.max() * max(M.shape) * eps`` as indicating rank deficiency (with
+    the symbols defined above). This is the algorithm MATLAB uses [1]. It also
+    appears in *Numerical recipes* in the discussion of SVD solutions for
+    linear least squares [2].
+
+    This default threshold is designed to detect rank deficiency accounting
+    for the numerical errors of the SVD computation. Imagine that there is a
+    column in `M` that is an exact (in floating point) linear combination of
+    other columns in `M`. Computing the SVD on `M` will not produce a
+    singular value exactly equal to 0 in general: any difference of the
+    smallest SVD value from 0 will be caused by numerical imprecision in the
+    calculation of the SVD. Our threshold for small SVD values takes this
+    numerical imprecision into account, and the default threshold will detect
+    such numerical rank deficiency. The threshold may declare a matrix `M`
+    rank deficient even if the linear combination of some columns of `M` is
+    not exactly equal to another column of `M` but only numerically very
+    close to another column of `M`.
+
+    We chose our default threshold because it is in wide use. Other
+    thresholds are possible. For example, elsewhere in the 2007 edition of
+    *Numerical recipes* there is an alternative threshold of ``S.max() *
+    np.finfo(M.dtype).eps / 2. * np.sqrt(m + n + 1.)``. The authors describe
+    this threshold as being based on "expected roundoff error" (p 71).
+
+    The thresholds above deal with floating point roundoff error in the
+    calculation of the SVD. However, you may have more information about the
+    sources of error in `M` that would make you consider other tolerance
+    values to detect *effective* rank deficiency. The most useful measure of
+    the tolerance depends on the operations you intend to use on your matrix.
+    For example, if your data come from uncertain measurements with
+    uncertainties greater than floating point epsilon, choosing a tolerance
+    near that uncertainty may be preferable. The tolerance may be absolute if
+    the uncertainties are absolute rather than relative.
+
+    References
+    ----------
+    .. [1] MATLAB reference documention, "Rank"
+           http://www.mathworks.com/help/techdoc/ref/rank.html
+    .. [2] W. H. Press, S. A. Teukolsky, W. T. Vetterling and B. P. Flannery,
+           "Numerical Recipes (3rd edition)", Cambridge University Press, 2007,
+           page 795.
+
+    Examples
+    --------
+    >>> from numpy.linalg import matrix_rank
+    >>> matrix_rank(np.eye(4)) # Full rank matrix
+    4
+    >>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix
+    >>> matrix_rank(I)
+    3
+    >>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0
+    1
+    >>> matrix_rank(np.zeros((4,)))
+    0
+    """
+    M = np.asarray(M)
+    if M.ndim > 2:
+        raise TypeError('array should have 2 or fewer dimensions')
+    if M.ndim < 2:
+        return np.int(not all(M == 0))
+    S = np.linalg.svd(M, compute_uv=False)
+    if tol is None:
+        tol = S.max() * np.max(M.shape) * np.finfo(S.dtype).eps
+    return np.sum(S > tol)
+
+if LooseVersion(np.__version__) > '1.7.1':
+    from numpy.linalg import matrix_rank
+else:
+    matrix_rank = _matrix_rank
+
+
+def _reconstruct_partial(func, args, kwargs):
+    """Helper to pickle partial functions"""
+    return partial(func, *args, **(kwargs or {}))
+
+
+def _reduce_partial(p):
+    """Helper to pickle partial functions"""
+    return _reconstruct_partial, (p.func, p.args, p.keywords)
+
+# This adds pickling functionality to older Python 2.6
+# Please always import partial from here.
+copyreg.pickle(partial, _reduce_partial)
+
+
+def normalize_colors(vmin, vmax, clip=False):
+    """Helper to handle matplotlib API"""
+    import matplotlib.pyplot as plt
+    try:
+        return plt.Normalize(vmin, vmax, clip=clip)
+    except AttributeError:
+        return plt.normalize(vmin, vmax, clip=clip)
+
+
+def assert_true(expr, msg='False is not True'):
+    """Fake assert_true without message"""
+    if not expr:
+        raise AssertionError(msg)
+
+
+def assert_is(expr1, expr2, msg=None):
+    """Fake assert_is without message"""
+    assert_true(expr2 is expr2, msg)
+
+
+def assert_is_not(expr1, expr2, msg=None):
+    """Fake assert_is_not without message"""
+    assert_true(expr1 is not expr2, msg)
+
+
+def _sparse_block_diag(mats, format=None, dtype=None):
+    """An implementation of scipy.sparse.block_diag since old versions of
+    scipy don't have it. Forms a sparse matrix by stacking matrices in block
+    diagonal form.
+
+    Parameters
+    ----------
+    mats : list of matrices
+        Input matrices.
+    format : str, optional
+        The sparse format of the result (e.g. "csr"). If not given, the
+        matrix is returned in "coo" format.
+    dtype : dtype specifier, optional
+        The data-type of the output matrix. If not given, the dtype is
+        determined from that of blocks.
+
+    Returns
+    -------
+    res : sparse matrix
+    """
+    nmat = len(mats)
+    rows = []
+    for ia, a in enumerate(mats):
+        row = [None] * nmat
+        row[ia] = a
+        rows.append(row)
+    return sparse.bmat(rows, format=format, dtype=dtype)
+
+try:
+    from scipy.sparse import block_diag as sparse_block_diag
+except Exception:
+    sparse_block_diag = _sparse_block_diag
+
+
+def _isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
+    """
+    Returns a boolean array where two arrays are element-wise equal within a
+    tolerance.
+
+    The tolerance values are positive, typically very small numbers.  The
+    relative difference (`rtol` * abs(`b`)) and the absolute difference
+    `atol` are added together to compare against the absolute difference
+    between `a` and `b`.
+
+    Parameters
+    ----------
+    a, b : array_like
+        Input arrays to compare.
+    rtol : float
+        The relative tolerance parameter (see Notes).
+    atol : float
+        The absolute tolerance parameter (see Notes).
+    equal_nan : bool
+        Whether to compare NaN's as equal.  If True, NaN's in `a` will be
+        considered equal to NaN's in `b` in the output array.
+
+    Returns
+    -------
+    y : array_like
+        Returns a boolean array of where `a` and `b` are equal within the
+        given tolerance. If both `a` and `b` are scalars, returns a single
+        boolean value.
+
+    See Also
+    --------
+    allclose
+
+    Notes
+    -----
+    .. versionadded:: 1.7.0
+
+    For finite values, isclose uses the following equation to test whether
+    two floating point values are equivalent.
+
+     absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`))
+
+    The above equation is not symmetric in `a` and `b`, so that
+    `isclose(a, b)` might be different from `isclose(b, a)` in
+    some rare cases.
+
+    Examples
+    --------
+    >>> isclose([1e10,1e-7], [1.00001e10,1e-8])
+    array([ True, False], dtype=bool)
+    >>> isclose([1e10,1e-8], [1.00001e10,1e-9])
+    array([ True,  True], dtype=bool)
+    >>> isclose([1e10,1e-8], [1.0001e10,1e-9])
+    array([False,  True], dtype=bool)
+    >>> isclose([1.0, np.nan], [1.0, np.nan])
+    array([ True, False], dtype=bool)
+    >>> isclose([1.0, np.nan], [1.0, np.nan], equal_nan=True)
+    array([ True,  True], dtype=bool)
+    """
+    def within_tol(x, y, atol, rtol):
+        with np.errstate(invalid='ignore'):
+            result = np.less_equal(abs(x - y), atol + rtol * abs(y))
+        if np.isscalar(a) and np.isscalar(b):
+            result = bool(result)
+        return result
+
+    x = np.array(a, copy=False, subok=True, ndmin=1)
+    y = np.array(b, copy=False, subok=True, ndmin=1)
+
+    # Make sure y is an inexact type to avoid bad behavior on abs(MIN_INT).
+    # This will cause casting of x later. Also, make sure to allow subclasses
+    # (e.g., for numpy.ma).
+    dt = np.core.multiarray.result_type(y, 1.)
+    y = np.array(y, dtype=dt, copy=False, subok=True)
+
+    xfin = np.isfinite(x)
+    yfin = np.isfinite(y)
+    if np.all(xfin) and np.all(yfin):
+        return within_tol(x, y, atol, rtol)
+    else:
+        finite = xfin & yfin
+        cond = np.zeros_like(finite, subok=True)
+        # Because we're using boolean indexing, x & y must be the same shape.
+        # Ideally, we'd just do x, y = broadcast_arrays(x, y). It's in
+        # lib.stride_tricks, though, so we can't import it here.
+        x = x * np.ones_like(cond)
+        y = y * np.ones_like(cond)
+        # Avoid subtraction with infinite/nan values...
+        cond[finite] = within_tol(x[finite], y[finite], atol, rtol)
+        # Check for equality of infinite values...
+        cond[~finite] = (x[~finite] == y[~finite])
+        if equal_nan:
+            # Make NaN == NaN
+            both_nan = np.isnan(x) & np.isnan(y)
+            cond[both_nan] = both_nan[both_nan]
+        return cond
+
+
+if LooseVersion(np.__version__) < LooseVersion('1.7'):
+    isclose = _isclose
+else:
+    isclose = np.isclose
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/forward/__init__.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/forward/__init__.py
new file mode 100644
index 0000000..b2da0c0
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/forward/__init__.py
@@ -0,0 +1,19 @@
+from .forward import (Forward, read_forward_solution, write_forward_solution,
+                      is_fixed_orient, _read_forward_meas_info,
+                      write_forward_meas_info,
+                      compute_orient_prior, compute_depth_prior,
+                      apply_forward, apply_forward_raw,
+                      restrict_forward_to_stc, restrict_forward_to_label,
+                      do_forward_solution, average_forward_solutions,
+                      _restrict_gain_matrix, _stc_src_sel,
+                      _fill_measurement_info, _apply_forward,
+                      _subject_from_forward, convert_forward_solution,
+                      _to_fixed_ori, prepare_bem_model, _merge_meg_eeg_fwds)
+from ._make_forward import (make_forward_solution, _prepare_for_forward,
+                            _prep_meg_channels, _prep_eeg_channels,
+                            _to_forward_dict, _create_meg_coils)
+from ._compute_forward import (_magnetic_dipole_field_vec, _compute_forwards,
+                               _concatenate_coils)
+from ._field_interpolation import (_make_surface_mapping, make_field_map,
+                                   _as_meg_type_evoked, _map_meg_channels)
+from . import _lead_dots  # for testing purposes
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/forward/_compute_forward.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/forward/_compute_forward.py
new file mode 100644
index 0000000..583f0bb
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/forward/_compute_forward.py
@@ -0,0 +1,863 @@
+# -*- coding: utf-8 -*-
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#          Eric Larson <larsoner at uw.edu>
+#          Mark Wronkiewicz <wronk at uw.edu>
+#
+# License: BSD (3-clause)
+#
+# Many of the idealized equations behind these calculations can be found in:
+# 1) Realistic conductivity geometry model of the human head for interpretation
+#        of neuromagnetic data. Hamalainen and Sarvas, 1989. Specific to MNE
+# 2) EEG and MEG: forward solutions for inverse methods. Mosher, Leahy, and
+#        Lewis, 1999. Generalized discussion of forward solutions.
+
+import numpy as np
+from copy import deepcopy
+
+from ..surface import (fast_cross_3d, _find_nearest_tri_pt, _get_tri_supp_geom,
+                       _triangle_coords)
+from ..io.constants import FIFF
+from ..transforms import apply_trans
+from ..utils import logger, verbose
+from ..parallel import parallel_func
+from ..io.compensator import get_current_comp, make_compensator
+from ..io.pick import pick_types
+
+
+# #############################################################################
+# COIL SPECIFICATION AND FIELD COMPUTATION MATRIX
+
+def _dup_coil_set(coils, coord_frame, t):
+    """Make a duplicate."""
+    if t is not None and coord_frame != t['from']:
+        raise RuntimeError('transformation frame does not match the coil set')
+    coils = deepcopy(coils)
+    if t is not None:
+        coord_frame = t['to']
+        for coil in coils:
+            coil['r0'] = apply_trans(t['trans'], coil['r0'])
+            coil['ex'] = apply_trans(t['trans'], coil['ex'], False)
+            coil['ey'] = apply_trans(t['trans'], coil['ey'], False)
+            coil['ez'] = apply_trans(t['trans'], coil['ez'], False)
+            coil['rmag'] = apply_trans(t['trans'], coil['rmag'])
+            coil['cosmag'] = apply_trans(t['trans'], coil['cosmag'], False)
+            coil['coord_frame'] = t['to']
+    return coils, coord_frame
+
+
+def _check_coil_frame(coils, coord_frame, bem):
+    """Check to make sure the coils are in the correct coordinate frame."""
+    if coord_frame != FIFF.FIFFV_COORD_MRI:
+        if coord_frame == FIFF.FIFFV_COORD_HEAD:
+            # Make a transformed duplicate
+            coils, coord_Frame = _dup_coil_set(coils, coord_frame,
+                                               bem['head_mri_t'])
+        else:
+            raise RuntimeError('Bad coil coordinate frame %s' % coord_frame)
+    return coils, coord_frame
+
+
+def _lin_field_coeff(surf, mult, rmags, cosmags, ws, n_int, n_jobs):
+    """Parallel wrapper for _do_lin_field_coeff to compute linear coefficients.
+
+    Parameters
+    ----------
+    surf : dict
+        Dict containing information for one surface of the BEM
+    mult : float
+        Multiplier for particular BEM surface (Iso Skull Approach discussed in
+        Mosher et al., 1999 and Hamalainen and Sarvas, 1989 Section III?)
+    rmag : ndarray, shape (n_integration_pts, 3)
+        3D positions of MEG coil integration points (from coil['rmag'])
+    cosmag : ndarray, shape (n_integration_pts, 3)
+        Direction of the MEG coil integration points (from coil['cosmag'])
+    ws : ndarray, shape (n_sensor_pts,)
+        Weights for MEG coil integration points
+    n_int : ndarray, shape (n_MEG_sensors,)
+        Number of integration points for each MEG sensor
+    n_jobs : int
+        Number of jobs to run in parallel
+
+    Returns
+    -------
+    coeff : list
+        Linear coefficients with lead fields for each BEM vertex on each sensor
+        (?)
+    """
+    parallel, p_fun, _ = parallel_func(_do_lin_field_coeff, n_jobs)
+    nas = np.array_split
+    coeffs = parallel(p_fun(surf['rr'], t, tn, ta, rmags, cosmags, ws, n_int)
+                      for t, tn, ta in zip(nas(surf['tris'], n_jobs),
+                                           nas(surf['tri_nn'], n_jobs),
+                                           nas(surf['tri_area'], n_jobs)))
+    return mult * np.sum(coeffs, axis=0)
+
+
+def _do_lin_field_coeff(bem_rr, tris, tn, ta, rmags, cosmags, ws, n_int):
+    """Compute field coefficients (parallel-friendly).
+
+    See section IV of Mosher et al., 1999 (specifically equation 35).
+
+    Parameters
+    ----------
+    bem_rr : ndarray, shape (n_BEM_vertices, 3)
+        Positions on one BEM surface in 3-space. 2562 BEM vertices for BEM with
+        5120 triangles (ico-4)
+    tris : ndarray, shape (n_BEM_vertices, 3)
+        Vertex indices for each triangle (referring to bem_rr)
+    tn : ndarray, shape (n_BEM_vertices, 3)
+        Triangle unit normal vectors
+    ta : ndarray, shape (n_BEM_vertices,)
+        Triangle areas
+    rmag : ndarray, shape (n_sensor_pts, 3)
+        3D positions of MEG coil integration points (from coil['rmag'])
+    cosmag : ndarray, shape (n_sensor_pts, 3)
+        Direction of the MEG coil integration points (from coil['cosmag'])
+    ws : ndarray, shape (n_sensor_pts,)
+        Weights for MEG coil integration points
+    n_int : ndarray, shape (n_MEG_sensors,)
+        Number of integration points for each MEG sensor
+
+    Returns
+    -------
+    coeff : ndarray, shape (n_MEG_sensors, n_BEM_vertices)
+        Linear coefficients with effect of each BEM vertex on each sensor (?)
+    """
+    coeff = np.zeros((len(n_int), len(bem_rr)))
+    bins = np.repeat(np.arange(len(n_int)), n_int)
+    for tri, tri_nn, tri_area in zip(tris, tn, ta):
+        # Accumulate the coefficients for each triangle node and add to the
+        # corresponding coefficient matrix
+        tri_rr = bem_rr[tri]
+
+        # The following is equivalent to:
+        # for j, coil in enumerate(coils['coils']):
+        #     x = func(coil['rmag'], coil['cosmag'],
+        #              tri_rr, tri_nn, tri_area)
+        #     res = np.sum(coil['w'][np.newaxis, :] * x, axis=1)
+        #     coeff[j][tri + off] += mult * res
+
+        # Simple version (bem_lin_field_coeffs_simple)
+        zz = []
+        for trr in tri_rr:
+            diff = rmags - trr
+            dl = np.sum(diff * diff, axis=1)
+            c = fast_cross_3d(diff, tri_nn[np.newaxis, :])
+            x = tri_area * np.sum(c * cosmags, axis=1) / \
+                (3.0 * dl * np.sqrt(dl))
+            zz += [np.bincount(bins, weights=x * ws, minlength=len(n_int))]
+        coeff[:, tri] += np.array(zz).T
+    return coeff
+
+
+def _concatenate_coils(coils):
+    """Helper to concatenate MEG coil parameters."""
+    rmags = np.concatenate([coil['rmag'] for coil in coils])
+    cosmags = np.concatenate([coil['cosmag'] for coil in coils])
+    ws = np.concatenate([coil['w'] for coil in coils])
+    n_int = np.array([len(coil['rmag']) for coil in coils])
+    return rmags, cosmags, ws, n_int
+
+
+def _bem_specify_coils(bem, coils, coord_frame, mults, n_jobs):
+    """Set up for computing the solution at a set of MEG coils.
+
+    Parameters
+    ----------
+    bem : dict
+        BEM information
+    coils : list of dict, len(n_MEG_sensors)
+        MEG sensor information dicts
+    coord_frame : int
+        Class constant identifying coordinate frame
+    mults : ndarray, shape (1, n_BEM_vertices)
+        Multiplier for every vertex in BEM
+    n_jobs : int
+        Number of jobs to run in parallel
+
+    Returns
+    -------
+    sol: ndarray, shape (n_MEG_sensors, n_BEM_vertices)
+        MEG solution
+    """
+    # Make sure MEG coils are in MRI coordinate frame to match BEM coords
+    coils, coord_frame = _check_coil_frame(coils, coord_frame, bem)
+
+    # leaving this in in case we want to easily add in the future
+    # if method != 'simple':  # in ['ferguson', 'urankar']:
+    #     raise NotImplementedError
+
+    # Compute the weighting factors to obtain the magnetic field in the linear
+    # potential approximation
+
+    # Process each of the surfaces
+    rmags, cosmags, ws, n_int = _concatenate_coils(coils)
+    lens = np.cumsum(np.r_[0, [len(s['rr']) for s in bem['surfs']]])
+    coeff = np.empty((len(n_int), lens[-1]))  # shape(n_coils, n_BEM_verts)
+
+    # Compute coeffs for each surface, one at a time
+    for o1, o2, surf, mult in zip(lens[:-1], lens[1:],
+                                  bem['surfs'], bem['field_mult']):
+        coeff[:, o1:o2] = _lin_field_coeff(surf, mult, rmags, cosmags, ws,
+                                           n_int, n_jobs)
+    # put through the bem
+    sol = np.dot(coeff, bem['solution'])
+    sol *= mults
+    return sol
+
+
+def _bem_specify_els(bem, els, mults):
+    """Set up for computing the solution at a set of EEG electrodes.
+
+    Parameters
+    ----------
+    bem : dict
+        BEM information
+    els : list of dict, len(n_EEG_sensors)
+        List of EEG sensor information dicts
+    mults: ndarray, shape (1, n_BEM_vertices)
+        Multiplier for every vertex in BEM
+
+    Returns
+    -------
+    sol : ndarray, shape (n_EEG_sensors, n_BEM_vertices)
+        EEG solution
+    """
+    sol = np.zeros((len(els), bem['solution'].shape[1]))
+    scalp = bem['surfs'][0]
+    # Get supplementary geometry information for tris and rr
+    scalp['geom'] = _get_tri_supp_geom(scalp['tris'], scalp['rr'])
+    inds = np.arange(len(scalp['tris']))  # Inds of every BEM vertex
+
+    # Iterate over all electrodes
+    # In principle this could be parallelized, but pickling overhead is huge
+    # (makes it slower than non-parallel)
+    for k, el in enumerate(els):
+        # Get electrode and reference position in head coords
+        el_r = apply_trans(bem['head_mri_t']['trans'], el['rmag'])
+        # Iterate over all integration points
+        for elw, r in zip(el['w'], el_r):
+            # Get index of closest tri on scalp BEM to electrode position
+            best = _find_nearest_tri_pt(inds, r, scalp['geom'], True)[2]
+            # Calculate a linear interpolation between the vertex values
+            tri = scalp['tris'][best]  # Get 3 vertex indices of closest tri
+            # Get coords of pt projected onto closest triangle
+            x, y, z = _triangle_coords(r, scalp['geom'], best)
+            w = elw * np.array([(1.0 - x - y), x, y])
+            amt = np.dot(w, bem['solution'][tri])
+            sol[k] += amt
+    sol *= mults
+    return sol
+
+
+# #############################################################################
+# COMPENSATION
+
+def _make_ctf_comp_coils(info, coils):
+    """Get the correct compensator for CTF coils."""
+    # adapted from mne_make_ctf_comp() from mne_ctf_comp.c
+    logger.info('Setting up compensation data...')
+    comp_num = get_current_comp(info)
+    if comp_num is None or comp_num == 0:
+        logger.info('    No compensation set. Nothing more to do.')
+        return None
+
+    # Need to meaningfully populate comp['set'] dict a.k.a. compset
+    n_comp_ch = sum([c['kind'] == FIFF.FIFFV_MEG_CH for c in info['chs']])
+    logger.info('    %d out of %d channels have the compensation set.'
+                % (n_comp_ch, len(coils)))
+
+    # Find the desired compensation data matrix
+    compensator = make_compensator(info, 0, comp_num, True)
+    logger.info('    Desired compensation data (%s) found.' % comp_num)
+    logger.info('    All compensation channels found.')
+    logger.info('    Preselector created.')
+    logger.info('    Compensation data matrix created.')
+    logger.info('    Postselector created.')
+    return compensator
+
+
+# #############################################################################
+# BEM COMPUTATION
+
+_MAG_FACTOR = 1e-7  # μ_0 / (4π)
+
+# def _bem_inf_pot(rd, Q, rp):
+#     """The infinite medium potential in one direction. See Eq. (8) in
+#     Mosher, 1999"""
+#     NOTE: the (μ_0 / (4π) factor has been moved to _prep_field_communication
+#     diff = rp - rd  # (Observation point position) - (Source position)
+#     diff2 = np.sum(diff * diff, axis=1)  # Squared magnitude of diff
+#     # (Dipole moment) dot (diff) / (magnitude ^ 3)
+#     return np.sum(Q * diff, axis=1) / (diff2 * np.sqrt(diff2))
+
+
+def _bem_inf_pots(mri_rr, bem_rr, mri_Q=None):
+    """Compute the infinite medium potential in all 3 directions.
+
+    Parameters
+    ----------
+    mri_rr : ndarray, shape (n_dipole_vertices, 3)
+        Chunk of 3D dipole positions in MRI coordinates
+    bem_rr: ndarray, shape (n_BEM_vertices, 3)
+        3D vertex positions for one BEM surface
+    mri_Q : ndarray, shape (3, 3)
+        3x3 head -> MRI transform. I.e., head_mri_t.dot(np.eye(3))
+
+    Returns
+    -------
+    ndarray : shape(n_dipole_vertices, 3, n_BEM_vertices)
+    """
+    # NOTE: the (μ_0 / (4π) factor has been moved to _prep_field_communication
+    # Get position difference vector between BEM vertex and dipole
+    diff = bem_rr.T[np.newaxis, :, :] - mri_rr[:, :, np.newaxis]
+    diff_norm = np.sum(diff * diff, axis=1)
+    diff_norm *= np.sqrt(diff_norm)  # Position difference magnitude cubed
+    diff_norm[diff_norm == 0] = 1  # avoid nans
+    if mri_Q is None:  # save time when mri_Q=np.eye(3) (e.g., MEG sensors)
+        return diff / diff_norm[:, np.newaxis, :]
+    else:  # get components in each direction (e.g., EEG sensors)
+        return np.einsum('ijk,mj->imk', diff, mri_Q) / diff_norm[:,
+                                                                 np.newaxis, :]
+
+
+# This function has been refactored to process all points simultaneously
+# def _bem_inf_field(rd, Q, rp, d):
+# """Infinite-medium magnetic field. See (7) in Mosher, 1999"""
+#     # Get vector from source to sensor integration point
+#     diff = rp - rd
+#     diff2 = np.sum(diff * diff, axis=1)  # Get magnitude of diff
+#
+#     # Compute cross product between diff and dipole to get magnetic field at
+#     # integration point
+#     x = fast_cross_3d(Q[np.newaxis, :], diff)
+#
+#     # Take magnetic field dotted by integration point normal to get magnetic
+#     # field threading the current loop. Divide by R^3 (equivalently, R^2 * R)
+#     return np.sum(x * d, axis=1) / (diff2 * np.sqrt(diff2))
+
+
+def _bem_inf_fields(rr, rmag, cosmag):
+    """Compute infinite-medium magnetic field at one MEG sensor from all
+    dipoles in all 3 basis directions.
+
+    Parameters
+    ----------
+    rr : ndarray, shape (n_source_points, 3)
+        3D dipole source positions
+    rmag : ndarray, shape (n_sensor points, 3)
+        3D positions of 1 MEG coil's integration points (from coil['rmag'])
+    cosmag : ndarray, shape (n_sensor_points, 3)
+        Direction of 1 MEG coil's integration points (from coil['cosmag'])
+
+    Returns
+    -------
+    ndarray, shape (n_dipoles, 3, n_integration_pts)
+        Magnetic field from all dipoles at each MEG sensor integration point
+    """
+    # rr, rmag refactored according to Equation (19) in Mosher, 1999
+    # Knowing that we're doing all directions, refactor above function:
+
+    diff = rmag.T[np.newaxis, :, :] - rr[:, :, np.newaxis]
+    diff_norm = np.sum(diff * diff, axis=1)
+    diff_norm *= np.sqrt(diff_norm)  # Get magnitude of distance cubed
+    diff_norm[diff_norm == 0] = 1  # avoid nans
+
+    # This is the result of cross-prod calcs with basis vectors,
+    # as if we had taken (Q=np.eye(3)), then multiplied by cosmags
+    # factor, and then summed across directions
+    x = np.array([diff[:, 1] * cosmag[:, 2] - diff[:, 2] * cosmag[:, 1],
+                  diff[:, 2] * cosmag[:, 0] - diff[:, 0] * cosmag[:, 2],
+                  diff[:, 0] * cosmag[:, 1] - diff[:, 1] * cosmag[:, 0]])
+    return np.rollaxis(x / diff_norm, 1)
+
+
+def _bem_pot_or_field(rr, mri_rr, mri_Q, coils, solution, bem_rr, n_jobs,
+                      coil_type):
+    """Calculate the magnetic field or electric potential forward solution.
+
+    The code is very similar between EEG and MEG potentials, so combine them.
+    This does the work of "fwd_comp_field" (which wraps to "fwd_bem_field")
+    and "fwd_bem_pot_els" in MNE-C.
+
+    Parameters
+    ----------
+    rr : ndarray, shape (n_dipoles, 3)
+        3D dipole source positions
+    mri_rr : ndarray, shape (n_dipoles, 3)
+        3D source positions in MRI coordinates
+    mri_Q :
+        3x3 head -> MRI transform. I.e., head_mri_t.dot(np.eye(3))
+    coils : list of dict, len(sensors)
+        List of sensors where each element contains sensor specific information
+    solution : ndarray, shape (n_sensors, n_BEM_rr)
+        Comes from _bem_specify_coils
+    bem_rr : ndarray, shape (n_BEM_vertices, 3)
+        3D vertex positions for all surfaces in the BEM
+    n_jobs : int
+        Number of jobs to run in parallel
+    coil_type : str
+        'meg' or 'eeg'
+
+    Returns
+    -------
+    B : ndarray, shape (n_dipoles * 3, n_sensors)
+        Foward solution for a set of sensors
+    """
+    # Both MEG and EEG have the inifinite-medium potentials
+    # This could be just vectorized, but eats too much memory, so instead we
+    # reduce memory by chunking within _do_inf_pots and parallelize, too:
+    parallel, p_fun, _ = parallel_func(_do_inf_pots, n_jobs)
+    nas = np.array_split
+    B = np.sum(parallel(p_fun(mri_rr, sr.copy(), mri_Q, sol.copy())
+                        for sr, sol in zip(nas(bem_rr, n_jobs),
+                                           nas(solution.T, n_jobs))), axis=0)
+    # The copy()s above should make it so the whole objects don't need to be
+    # pickled...
+
+    # Only MEG coils are sensitive to the primary current distribution.
+    if coil_type == 'meg':
+        # Primary current contribution (can be calc. in coil/dipole coords)
+        parallel, p_fun, _ = parallel_func(_do_prim_curr, n_jobs)
+        pcc = np.concatenate(parallel(p_fun(rr, c)
+                                      for c in nas(coils, n_jobs)), axis=1)
+        B += pcc
+        B *= _MAG_FACTOR
+    return B
+
+
+def _do_prim_curr(rr, coils):
+    """Calculate primary currents in a set of MEG coils.
+
+    See Mosher et al., 1999 Section II for discussion of primary vs. volume
+    currents.
+
+    Parameters
+    ----------
+    rr : ndarray, shape (n_dipoles, 3)
+        3D dipole source positions in head coordinates
+    coils : list of dict
+        List of MEG coils where each element contains coil specific information
+
+    Returns
+    -------
+    pc : ndarray, shape (n_sources, n_MEG_sensors)
+        Primary current for set of MEG coils due to all sources
+    """
+    pc = np.empty((len(rr) * 3, len(coils)))
+    for ci, c in enumerate(coils):
+        # For all integration points, multiply by weights, sum across pts
+        # and then flatten
+        pc[:, ci] = np.sum(c['w'] * _bem_inf_fields(rr, c['rmag'],
+                                                    c['cosmag']), 2).ravel()
+    return pc
+
+
+def _do_inf_pots(mri_rr, bem_rr, mri_Q, sol):
+    """Calculate infinite potentials for MEG or EEG sensors using chunks.
+
+    Parameters
+    ----------
+    mri_rr : ndarray, shape (n_dipoles, 3)
+        3D dipole source positions in MRI coordinates
+    bem_rr : ndarray, shape (n_BEM_vertices, 3)
+        3D vertex positions for all surfaces in the BEM
+    mri_Q :
+        3x3 head -> MRI transform. I.e., head_mri_t.dot(np.eye(3))
+    sol : ndarray, shape (n_sensors_subset, n_BEM_vertices_subset)
+        Comes from _bem_specify_coils
+
+    Returns
+    -------
+    B : ndarray, (n_dipoles * 3, n_sensors)
+        Foward solution for sensors due to volume currents
+    """
+
+    # Doing work of 'fwd_bem_pot_calc' in MNE-C
+    # The following code is equivalent to this, but saves memory
+    # v0s = _bem_inf_pots(rr, bem_rr, Q)  # n_rr x 3 x n_bem_rr
+    # v0s.shape = (len(rr) * 3, v0s.shape[2])
+    # B = np.dot(v0s, sol)
+
+    # We chunk the source mri_rr's in order to save memory
+    bounds = np.r_[np.arange(0, len(mri_rr), 1000), len(mri_rr)]
+    B = np.empty((len(mri_rr) * 3, sol.shape[1]))
+    for bi in range(len(bounds) - 1):
+        # v0 in Hamalainen et al., 1989 == v_inf in Mosher, et al., 1999
+        v0s = _bem_inf_pots(mri_rr[bounds[bi]:bounds[bi + 1]], bem_rr, mri_Q)
+        v0s.shape = (v0s.shape[0] * 3, v0s.shape[2])
+        B[3 * bounds[bi]:3 * bounds[bi + 1]] = np.dot(v0s, sol)
+    return B
+
+
+# #############################################################################
+# SPHERE COMPUTATION
+
+def _sphere_pot_or_field(rr, mri_rr, mri_Q, coils, sphere, bem_rr,
+                         n_jobs, coil_type):
+    """Do potential or field for spherical model."""
+    fun = _eeg_spherepot_coil if coil_type == 'eeg' else _sphere_field
+    parallel, p_fun, _ = parallel_func(fun, n_jobs)
+    B = np.concatenate(parallel(p_fun(r, coils, sphere)
+                       for r in np.array_split(rr, n_jobs)))
+    return B
+
+
+def _sphere_field(rrs, coils, sphere):
+    """Compute field for spherical model using Jukka Sarvas' field computation.
+
+    Jukka Sarvas, "Basic mathematical and electromagnetic concepts of the
+    biomagnetic inverse problem", Phys. Med. Biol. 1987, Vol. 32, 1, 11-22.
+
+    The formulas have been manipulated for efficient computation
+    by Matti Hamalainen, February 1990
+    """
+    rmags, cosmags, ws, n_int = _concatenate_coils(coils)
+    bins = np.repeat(np.arange(len(n_int)), n_int)
+
+    # Shift to the sphere model coordinates
+    rrs = rrs - sphere['r0']
+
+    B = np.zeros((3 * len(rrs), len(coils)))
+    for ri, rr in enumerate(rrs):
+        # Check for a dipole at the origin
+        if np.sqrt(np.dot(rr, rr)) <= 1e-10:
+            continue
+        this_poss = rmags - sphere['r0']
+
+        # Vector from dipole to the field point
+        a_vec = this_poss - rr
+        a = np.sqrt(np.sum(a_vec * a_vec, axis=1))
+        r = np.sqrt(np.sum(this_poss * this_poss, axis=1))
+        rr0 = np.sum(this_poss * rr, axis=1)
+        ar = (r * r) - rr0
+        ar0 = ar / a
+        F = a * (r * a + ar)
+        gr = (a * a) / r + ar0 + 2.0 * (a + r)
+        g0 = a + 2 * r + ar0
+        # Compute the dot products needed
+        re = np.sum(this_poss * cosmags, axis=1)
+        r0e = np.sum(rr * cosmags, axis=1)
+        g = (g0 * r0e - gr * re) / (F * F)
+        good = (a > 0) | (r > 0) | ((a * r) + 1 > 1e-5)
+        v1 = fast_cross_3d(rr[np.newaxis, :], cosmags)
+        v2 = fast_cross_3d(rr[np.newaxis, :], this_poss)
+        xx = ((good * ws)[:, np.newaxis] *
+              (v1 / F[:, np.newaxis] + v2 * g[:, np.newaxis]))
+        zz = np.array([np.bincount(bins, weights=x,
+                                   minlength=len(n_int)) for x in xx.T])
+        B[3 * ri:3 * ri + 3, :] = zz
+    B *= _MAG_FACTOR
+    return B
+
+
+def _eeg_spherepot_coil(rrs, coils, sphere):
+    """Calculate the EEG in the sphere model."""
+    rmags, cosmags, ws, n_int = _concatenate_coils(coils)
+    bins = np.repeat(np.arange(len(n_int)), n_int)
+
+    # Shift to the sphere model coordinates
+    rrs = rrs - sphere['r0']
+
+    B = np.zeros((3 * len(rrs), len(coils)))
+    for ri, rr in enumerate(rrs):
+        # Only process dipoles inside the innermost sphere
+        if np.sqrt(np.dot(rr, rr)) >= sphere['layers'][0]['rad']:
+            continue
+        # fwd_eeg_spherepot_vec
+        vval_one = np.zeros((len(rmags), 3))
+
+        # Make a weighted sum over the equivalence parameters
+        for eq in range(sphere['nfit']):
+            # Scale the dipole position
+            rd = sphere['mu'][eq] * rr
+            rd2 = np.sum(rd * rd)
+            rd2_inv = 1.0 / rd2
+            # Go over all electrodes
+            this_pos = rmags - sphere['r0']
+
+            # Scale location onto the surface of the sphere (not used)
+            # if sphere['scale_pos']:
+            #     pos_len = (sphere['layers'][-1]['rad'] /
+            #                np.sqrt(np.sum(this_pos * this_pos, axis=1)))
+            #     this_pos *= pos_len
+
+            # Vector from dipole to the field point
+            a_vec = this_pos - rd
+
+            # Compute the dot products needed
+            a = np.sqrt(np.sum(a_vec * a_vec, axis=1))
+            a3 = 2.0 / (a * a * a)
+            r2 = np.sum(this_pos * this_pos, axis=1)
+            r = np.sqrt(r2)
+            rrd = np.sum(this_pos * rd, axis=1)
+            ra = r2 - rrd
+            rda = rrd - rd2
+
+            # The main ingredients
+            F = a * (r * a + ra)
+            c1 = a3 * rda + 1.0 / a - 1.0 / r
+            c2 = a3 + (a + r) / (r * F)
+
+            # Mix them together and scale by lambda/(rd*rd)
+            m1 = (c1 - c2 * rrd)
+            m2 = c2 * rd2
+
+            vval_one += (sphere['lambda'][eq] * rd2_inv *
+                         (m1[:, np.newaxis] * rd +
+                          m2[:, np.newaxis] * this_pos))
+
+            # compute total result
+            xx = vval_one * ws[:, np.newaxis]
+            zz = np.array([np.bincount(bins, weights=x,
+                                       minlength=len(n_int)) for x in xx.T])
+            B[3 * ri:3 * ri + 3, :] = zz
+    # finishing by scaling by 1/(4*M_PI)
+    B *= 0.25 / np.pi
+    return B
+
+
+# #############################################################################
+# MAGNETIC DIPOLE (e.g. CHPI)
+
+def _magnetic_dipole_field_vec(rrs, coils):
+    """Compute an MEG forward solution for a set of magnetic dipoles."""
+    fwd = np.empty((3 * len(rrs), len(coils)))
+    # The code below is a more efficient version (~30x) of this:
+    # for ri, rr in enumerate(rrs):
+    #     for k in range(len(coils)):
+    #         this_coil = coils[k]
+    #         # Go through all points
+    #         diff = this_coil['rmag'] - rr
+    #         dist2 = np.sum(diff * diff, axis=1)[:, np.newaxis]
+    #         dist = np.sqrt(dist2)
+    #         if (dist < 1e-5).any():
+    #             raise RuntimeError('Coil too close')
+    #         dist5 = dist2 * dist2 * dist
+    #         sum_ = (3 * diff * np.sum(diff * this_coil['cosmag'],
+    #                                   axis=1)[:, np.newaxis] -
+    #                 dist2 * this_coil['cosmag']) / dist5
+    #         fwd[3*ri:3*ri+3, k] = 1e-7 * np.dot(this_coil['w'], sum_)
+    if isinstance(coils, tuple):
+        rmags, cosmags, ws, n_int = coils
+    else:
+        rmags, cosmags, ws, n_int = _concatenate_coils(coils)
+    del coils
+
+    fwd = np.empty((3 * len(rrs), len(n_int)))
+    bins = np.repeat(np.arange(len(n_int)), n_int)
+    for ri, rr in enumerate(rrs):
+        diff = rmags - rr
+        dist2 = np.sum(diff * diff, axis=1)[:, np.newaxis]
+        dist = np.sqrt(dist2)
+        if (dist < 1e-5).any():
+            raise RuntimeError('Coil too close (dist = %g m)' % dist.min())
+        sum_ = ws[:, np.newaxis] * (3 * diff * np.sum(diff * cosmags,
+                                                      axis=1)[:, np.newaxis] -
+                                    dist2 * cosmags) / (dist2 * dist2 * dist)
+        for ii in range(3):
+            fwd[3 * ri + ii] = np.bincount(bins, weights=sum_[:, ii],
+                                           minlength=len(n_int))
+    fwd *= 1e-7
+    return fwd
+
+
+# #############################################################################
+# MAIN TRIAGING FUNCTION
+
+ at verbose
+def _prep_field_computation(rr, bem, fwd_data, n_jobs, verbose=None):
+    """Precompute and store some things that are used for both MEG and EEG.
+
+    Calculation includes multiplication factors, coordinate transforms,
+    compensations, and forward solutions. All are stored in modified fwd_data.
+
+    Parameters
+    ----------
+    rr : ndarray, shape (n_dipoles, 3)
+        3D dipole source positions in head coordinates
+    bem : dict
+        Boundary Element Model information
+    fwd_data : dict
+        Dict containing sensor information. Gets updated here with BEM and
+        sensor information for later foward calculations
+    n_jobs : int
+        Number of jobs to run in parallel
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose)
+    """
+
+    bem_rr = mults = mri_Q = head_mri_t = None
+    if not bem['is_sphere']:
+        if bem['bem_method'] != FIFF.FWD_BEM_LINEAR_COLL:
+            raise RuntimeError('only linear collocation supported')
+        # Store (and apply soon) μ_0/(4π) factor before source computations
+        mults = np.repeat(bem['source_mult'] / (4.0 * np.pi),
+                          [len(s['rr']) for s in bem['surfs']])[np.newaxis, :]
+        # Get positions of BEM points for every surface
+        bem_rr = np.concatenate([s['rr'] for s in bem['surfs']])
+
+        # The dipole location and orientation must be transformed
+        head_mri_t = bem['head_mri_t']
+        mri_Q = apply_trans(bem['head_mri_t']['trans'], np.eye(3), False)
+
+    # Compute solution and compensation for dif sensor types ('meg', 'eeg')
+    if len(set(fwd_data['coil_types'])) != len(fwd_data['coil_types']):
+        raise RuntimeError('Non-unique sensor types found')
+    compensators, solutions, csolutions = [], [], []
+    for coil_type, coils, ccoils, info in zip(fwd_data['coil_types'],
+                                              fwd_data['coils_list'],
+                                              fwd_data['ccoils_list'],
+                                              fwd_data['infos']):
+        compensator = solution = csolution = None
+        if len(coils) > 0:  # Only proceed if sensors exist
+            if coil_type == 'meg':
+                # Compose a compensation data set if necessary
+                compensator = _make_ctf_comp_coils(info, coils)
+
+            if not bem['is_sphere']:
+                if coil_type == 'meg':
+                    # MEG field computation matrices for BEM
+                    start = 'Composing the field computation matrix'
+                    logger.info('\n' + start + '...')
+                    cf = FIFF.FIFFV_COORD_HEAD
+                    # multiply solution by "mults" here for simplicity
+                    solution = _bem_specify_coils(bem, coils, cf, mults,
+                                                  n_jobs)
+                    if compensator is not None:
+                        logger.info(start + ' (compensation coils)...')
+                        csolution = _bem_specify_coils(bem, ccoils, cf,
+                                                       mults, n_jobs)
+                else:
+                    # Compute solution for EEG sensor
+                    solution = _bem_specify_els(bem, coils, mults)
+            else:
+                solution = bem
+                if coil_type == 'eeg':
+                    logger.info('Using the equivalent source approach in the '
+                                'homogeneous sphere for EEG')
+        compensators.append(compensator)
+        solutions.append(solution)
+        csolutions.append(csolution)
+
+    # Get appropriate forward physics function depending on sphere or BEM model
+    fun = _sphere_pot_or_field if bem['is_sphere'] else _bem_pot_or_field
+
+    # Update fwd_data with
+    #    bem_rr (3D BEM vertex positions)
+    #    mri_Q (3x3 Head->MRI coord transformation applied to identity matrix)
+    #    head_mri_t (head->MRI coord transform dict)
+    #    fun (_bem_pot_or_field if not 'sphere'; otherwise _sph_pot_or_field)
+    #    solutions (len 2 list; [ndarray, shape (n_MEG_sens, n BEM vertices),
+    #                            ndarray, shape (n_EEG_sens, n BEM vertices)]
+    #    csolutions (compensation for solution)
+    fwd_data.update(dict(bem_rr=bem_rr, mri_Q=mri_Q, head_mri_t=head_mri_t,
+                         compensators=compensators, solutions=solutions,
+                         csolutions=csolutions, fun=fun))
+
+
+ at verbose
+def _compute_forwards_meeg(rr, fd, n_jobs, verbose=None):
+    """Compute MEG and EEG forward solutions for all sensor types.
+
+    Parameters
+    ----------
+    rr : ndarray, shape (n_dipoles, 3)
+        3D dipole positions in head coordinates
+    fd : dict
+        Dict containing forward data after update in _prep_field_computation
+    n_jobs : int
+        Number of jobs to run in parallel
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose)
+
+    Returns
+    -------
+    Bs : list
+        Each element contains ndarray, shape (3 * n_dipoles, n_sensors) where
+        n_sensors depends on which channel types are requested (MEG and/or EEG)
+    """
+
+    n_jobs = max(min(n_jobs, len(rr)), 1)
+    Bs = list()
+    # The dipole location and orientation must be transformed to mri coords
+    mri_rr = None
+    if fd['head_mri_t'] is not None:
+        mri_rr = apply_trans(fd['head_mri_t']['trans'], rr)
+    mri_Q, bem_rr, fun = fd['mri_Q'], fd['bem_rr'], fd['fun']
+    for ci in range(len(fd['coils_list'])):
+        coils, ccoils = fd['coils_list'][ci], fd['ccoils_list'][ci]
+        if len(coils) == 0:  # nothing to do
+            Bs.append(np.zeros((3 * len(rr), 0)))
+            continue
+
+        coil_type, compensator = fd['coil_types'][ci], fd['compensators'][ci]
+        solution, csolution = fd['solutions'][ci], fd['csolutions'][ci]
+        info = fd['infos'][ci]
+
+        # Do the actual forward calculation for a list MEG/EEG sensors
+        logger.info('Computing %s at %d source location%s '
+                    '(free orientations)...'
+                    % (coil_type.upper(), len(rr),
+                       '' if len(rr) == 1 else 's'))
+        # Calculate foward solution using spherical or BEM model
+        B = fun(rr, mri_rr, mri_Q, coils, solution, bem_rr, n_jobs,
+                coil_type)
+
+        # Compensate if needed (only done for MEG systems w/compensation)
+        if compensator is not None:
+            # Compute the field in the compensation sensors
+            work = fun(rr, mri_rr, mri_Q, ccoils, csolution, bem_rr,
+                       n_jobs, coil_type)
+            # Combine solutions so we can do the compensation
+            both = np.zeros((work.shape[0], B.shape[1] + work.shape[1]))
+            picks = pick_types(info, meg=True, ref_meg=False)
+            both[:, picks] = B
+            picks = pick_types(info, meg=False, ref_meg=True)
+            both[:, picks] = work
+            B = np.dot(both, compensator.T)
+        Bs.append(B)
+    return Bs
+
+
+ at verbose
+def _compute_forwards(rr, bem, coils_list, ccoils_list, infos, coil_types,
+                      n_jobs, verbose=None):
+    """Compute the MEG and EEG forward solutions.
+
+    This effectively combines compute_forward_meg and compute_forward_eeg
+    from MNE-C.
+
+    Parameters
+    ----------
+    rr : ndarray, shape (n_sources, 3)
+        3D dipole in head coordinates
+    bem : dict
+        Boundary Element Model information for all surfaces
+    coils_list : list
+        List of MEG and/or EEG sensor information dicts
+    ccoils_list : list
+        Optional list of MEG compensation information
+    coil_types : list of str
+        Sensor types. May contain 'meg' and/or 'eeg'
+    n_jobs: int
+        Number of jobs to run in parallel
+    infos : list, len(2)
+        infos[0] is MEG info, infos[1] is EEG info
+
+    Returns
+    -------
+    Bs : list of ndarray
+        Each element contains ndarray, shape (3 * n_dipoles, n_sensors) where
+        n_sensors depends on which channel types are requested (MEG and/or EEG)
+    """
+
+    # Split calculation into two steps to save (potentially) a lot of time
+    # when e.g. dipole fitting
+    fwd_data = dict(coils_list=coils_list, ccoils_list=ccoils_list,
+                    infos=infos, coil_types=coil_types)
+    _prep_field_computation(rr, bem, fwd_data, n_jobs)
+    Bs = _compute_forwards_meeg(rr, fwd_data, n_jobs)
+    return Bs
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/forward/_field_interpolation.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/forward/_field_interpolation.py
new file mode 100644
index 0000000..88d3802
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/forward/_field_interpolation.py
@@ -0,0 +1,413 @@
+# -*- coding: utf-8 -*-
+
+import numpy as np
+from scipy import linalg
+from copy import deepcopy
+
+from ..io.constants import FIFF
+from ..io.pick import pick_types, pick_info
+from ..surface import get_head_surf, get_meg_helmet_surf
+
+from ..io.proj import _has_eeg_average_ref_proj, make_projector
+from ..transforms import transform_surface_to, read_trans, _find_trans
+from ._make_forward import _create_meg_coils, _create_eeg_els, _read_coil_defs
+from ._lead_dots import (_do_self_dots, _do_surface_dots, _get_legen_table,
+                         _get_legen_lut_fast, _get_legen_lut_accurate,
+                         _do_cross_dots)
+from ..parallel import check_n_jobs
+from ..utils import logger, verbose
+from ..fixes import partial
+
+
+def _is_axial_coil(coil):
+    is_ax = coil['coil_class'] in (FIFF.FWD_COILC_MAG,
+                                   FIFF.FWD_COILC_AXIAL_GRAD,
+                                   FIFF.FWD_COILC_AXIAL_GRAD2)
+    return is_ax
+
+
+def _ad_hoc_noise(coils, ch_type='meg'):
+    v = np.empty(len(coils))
+    if ch_type == 'meg':
+        axs = np.array([_is_axial_coil(coil) for coil in coils], dtype=bool)
+        v[axs] = 4e-28  # 20e-15 ** 2
+        v[np.logical_not(axs)] = 2.5e-25  # 5e-13 ** 2
+    else:
+        v.fill(1e-12)  # 1e-6 ** 2
+    cov = dict(diag=True, data=v, eig=None, eigvec=None)
+    return cov
+
+
+def _setup_dots(mode, coils, ch_type):
+    """Setup dot products"""
+    my_origin = np.array([0.0, 0.0, 0.04])
+    int_rad = 0.06
+    noise = _ad_hoc_noise(coils, ch_type)
+    if mode == 'fast':
+        # Use 50 coefficients with nearest-neighbor interpolation
+        lut, n_fact = _get_legen_table(ch_type, False, 50)
+        lut_fun = partial(_get_legen_lut_fast, lut=lut)
+    else:  # 'accurate'
+        # Use 100 coefficients with linear interpolation
+        lut, n_fact = _get_legen_table(ch_type, False, 100)
+        lut_fun = partial(_get_legen_lut_accurate, lut=lut)
+
+    return my_origin, int_rad, noise, lut_fun, n_fact
+
+
+def _compute_mapping_matrix(fmd, info):
+    """Do the hairy computations"""
+    logger.info('preparing the mapping matrix...')
+    # assemble a projector and apply it to the data
+    ch_names = fmd['ch_names']
+    projs = info.get('projs', list())
+    proj_op = make_projector(projs, ch_names)[0]
+    proj_dots = np.dot(proj_op.T, np.dot(fmd['self_dots'], proj_op))
+
+    noise_cov = fmd['noise']
+    # Whiten
+    if not noise_cov['diag']:
+        raise NotImplementedError  # this shouldn't happen
+    whitener = np.diag(1.0 / np.sqrt(noise_cov['data'].ravel()))
+    whitened_dots = np.dot(whitener.T, np.dot(proj_dots, whitener))
+
+    # SVD is numerically better than the eigenvalue composition even if
+    # mat is supposed to be symmetric and positive definite
+    uu, sing, vv = linalg.svd(whitened_dots, full_matrices=False,
+                              overwrite_a=True)
+
+    # Eigenvalue truncation
+    sumk = np.cumsum(sing)
+    sumk /= sumk[-1]
+    fmd['nest'] = np.where(sumk > (1.0 - fmd['miss']))[0][0]
+    logger.info('Truncate at %d missing %g' % (fmd['nest'], fmd['miss']))
+    sing = 1.0 / sing[:fmd['nest']]
+
+    # Put the inverse together
+    logger.info('Put the inverse together...')
+    inv = np.dot(uu[:, :fmd['nest']] * sing, vv[:fmd['nest']]).T
+
+    # Sandwich with the whitener
+    inv_whitened = np.dot(whitener.T, np.dot(inv, whitener))
+
+    # Take into account that the lead fields used to compute
+    # d->surface_dots were unprojected
+    inv_whitened_proj = (np.dot(inv_whitened.T, proj_op)).T
+
+    # Finally sandwich in the selection matrix
+    # This one picks up the correct lead field projection
+    mapping_mat = np.dot(fmd['surface_dots'], inv_whitened_proj)
+
+    # Optionally apply the average electrode reference to the final field map
+    if fmd['kind'] == 'eeg':
+        if _has_eeg_average_ref_proj(projs):
+            logger.info('The map will have average electrode reference')
+            mapping_mat -= np.mean(mapping_mat, axis=0)[np.newaxis, :]
+    return mapping_mat
+
+
+def _map_meg_channels(inst, pick_from, pick_to, mode='fast'):
+    """Find mapping from one set of channels to another.
+
+    Parameters
+    ----------
+    inst : mne.io.Raw, mne.Epochs or mne.Evoked
+        The data to interpolate. Must be preloaded.
+    pick_from : array-like of int
+        The channels from which to interpolate.
+    pick_to : array-like of int
+        The channels to which to interpolate.
+    mode : str
+        Either `'accurate'` or `'fast'`, determines the quality of the
+        Legendre polynomial expansion used. `'fast'` should be sufficient
+        for most applications.
+
+    Returns
+    -------
+    mapping : array
+        A mapping matrix of shape len(pick_to) x len(pick_from).
+    """
+    info_from = pick_info(inst.info, pick_from, copy=True)
+    info_to = pick_info(inst.info, pick_to, copy=True)
+
+    # no need to apply trans because both from and to coils are in device
+    # coordinates
+    templates = _read_coil_defs()
+    coils_from = _create_meg_coils(info_from['chs'], 'normal',
+                                   info_from['dev_head_t'], templates)
+    coils_to = _create_meg_coils(info_to['chs'], 'normal',
+                                 info_to['dev_head_t'], templates)
+    miss = 1e-4  # Smoothing criterion for MEG
+
+    #
+    # Step 2. Calculate the dot products
+    #
+    my_origin, int_rad, noise, lut_fun, n_fact = _setup_dots(mode, coils_from,
+                                                             'meg')
+    logger.info('Computing dot products for %i coils...' % (len(coils_from)))
+    self_dots = _do_self_dots(int_rad, False, coils_from, my_origin, 'meg',
+                              lut_fun, n_fact, n_jobs=1)
+    logger.info('Computing cross products for coils %i x %i coils...'
+                % (len(coils_from), len(coils_to)))
+    cross_dots = _do_cross_dots(int_rad, False, coils_from, coils_to,
+                                my_origin, 'meg', lut_fun, n_fact).T
+
+    ch_names = [c['ch_name'] for c in info_from['chs']]
+    fmd = dict(kind='meg', ch_names=ch_names,
+               origin=my_origin, noise=noise, self_dots=self_dots,
+               surface_dots=cross_dots, int_rad=int_rad, miss=miss)
+    logger.info('Field mapping data ready')
+
+    #
+    # Step 3. Compute the mapping matrix
+    #
+    fmd['data'] = _compute_mapping_matrix(fmd, info_from)
+
+    return fmd['data']
+
+
+def _as_meg_type_evoked(evoked, ch_type='grad', mode='fast'):
+    """Compute virtual evoked using interpolated fields in mag/grad channels.
+
+    Parameters
+    ----------
+    evoked : instance of mne.Evoked
+        The evoked object.
+    ch_type : str
+        The destination channel type. It can be 'mag' or 'grad'.
+    mode : str
+        Either `'accurate'` or `'fast'`, determines the quality of the
+        Legendre polynomial expansion used. `'fast'` should be sufficient
+        for most applications.
+
+    Returns
+    -------
+    evoked : instance of mne.Evoked
+        The transformed evoked object containing only virtual channels.
+    """
+    evoked = evoked.copy()
+
+    if ch_type not in ['mag', 'grad']:
+        raise ValueError('to_type must be "mag" or "grad", not "%s"'
+                         % ch_type)
+    # pick the original and destination channels
+    pick_from = pick_types(evoked.info, meg=True, eeg=False,
+                           ref_meg=False)
+    pick_to = pick_types(evoked.info, meg=ch_type, eeg=False,
+                         ref_meg=False)
+
+    if len(pick_to) == 0:
+        raise ValueError('No channels matching the destination channel type'
+                         ' found in info. Please pass an evoked containing'
+                         'both the original and destination channels. Only the'
+                         ' locations of the destination channels will be used'
+                         ' for interpolation.')
+
+    mapping = _map_meg_channels(evoked, pick_from, pick_to, mode='fast')
+
+    # compute evoked data by multiplying by the 'gain matrix' from
+    # original sensors to virtual sensors
+    data = np.dot(mapping, evoked.data[pick_from])
+
+    # keep only the destination channel types
+    evoked.pick_types(meg=ch_type, eeg=False, ref_meg=False)
+    evoked.data = data
+
+    # change channel names to emphasize they contain interpolated data
+    for ch in evoked.info['chs']:
+        ch['ch_name'] += '_virtual'
+    evoked.info['ch_names'] = [ch['ch_name'] for ch in evoked.info['chs']]
+
+    return evoked
+
+
+ at verbose
+def _make_surface_mapping(info, surf, ch_type='meg', trans=None, mode='fast',
+                          n_jobs=1, verbose=None):
+    """Re-map M/EEG data to a surface
+
+    Parameters
+    ----------
+    info : instance of io.meas_info.Info
+        Measurement info.
+    surf : dict
+        The surface to map the data to. The required fields are `'rr'`,
+        `'nn'`, and `'coord_frame'`. Must be in head coordinates.
+    ch_type : str
+        Must be either `'meg'` or `'eeg'`, determines the type of field.
+    trans : None | dict
+        If None, no transformation applied. Should be a Head<->MRI
+        transformation.
+    mode : str
+        Either `'accurate'` or `'fast'`, determines the quality of the
+        Legendre polynomial expansion used. `'fast'` should be sufficient
+        for most applications.
+    n_jobs : int
+        Number of permutations to run in parallel (requires joblib package).
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    mapping : array
+        A n_vertices x n_sensors array that remaps the MEG or EEG data,
+        as `new_data = np.dot(mapping, data)`.
+    """
+    if not all(key in surf for key in ['rr', 'nn']):
+        raise KeyError('surf must have both "rr" and "nn"')
+    if 'coord_frame' not in surf:
+        raise KeyError('The surface coordinate frame must be specified '
+                       'in surf["coord_frame"]')
+    if mode not in ['accurate', 'fast']:
+        raise ValueError('mode must be "accurate" or "fast", not "%s"' % mode)
+
+    # deal with coordinate frames here -- always go to "head" (easiest)
+    surf = transform_surface_to(deepcopy(surf), 'head', trans)
+
+    n_jobs = check_n_jobs(n_jobs)
+
+    #
+    # Step 1. Prepare the coil definitions
+    # Do the dot products, assume surf in head coords
+    #
+    if ch_type not in ('meg', 'eeg'):
+        raise ValueError('unknown coil type "%s"' % ch_type)
+    if ch_type == 'meg':
+        picks = pick_types(info, meg=True, eeg=False, ref_meg=False)
+        logger.info('Prepare MEG mapping...')
+    else:
+        picks = pick_types(info, meg=False, eeg=True, ref_meg=False)
+        logger.info('Prepare EEG mapping...')
+    if len(picks) == 0:
+        raise RuntimeError('cannot map, no channels found')
+    chs = pick_info(info, picks, copy=True)['chs']
+
+    # create coil defs in head coordinates
+    if ch_type == 'meg':
+        # Put them in head coordinates
+        coils = _create_meg_coils(chs, 'normal', info['dev_head_t'])
+        type_str = 'coils'
+        miss = 1e-4  # Smoothing criterion for MEG
+    else:  # EEG
+        coils = _create_eeg_els(chs)
+        type_str = 'electrodes'
+        miss = 1e-3  # Smoothing criterion for EEG
+
+    #
+    # Step 2. Calculate the dot products
+    #
+    my_origin, int_rad, noise, lut_fun, n_fact = _setup_dots(mode, coils,
+                                                             ch_type)
+    logger.info('Computing dot products for %i %s...' % (len(coils), type_str))
+    self_dots = _do_self_dots(int_rad, False, coils, my_origin, ch_type,
+                              lut_fun, n_fact, n_jobs)
+    sel = np.arange(len(surf['rr']))  # eventually we should do sub-selection
+    logger.info('Computing dot products for %i surface locations...'
+                % len(sel))
+    surface_dots = _do_surface_dots(int_rad, False, coils, surf, sel,
+                                    my_origin, ch_type, lut_fun, n_fact,
+                                    n_jobs)
+
+    #
+    # Step 4. Return the result
+    #
+    ch_names = [c['ch_name'] for c in chs]
+    fmd = dict(kind=ch_type, surf=surf, ch_names=ch_names, coils=coils,
+               origin=my_origin, noise=noise, self_dots=self_dots,
+               surface_dots=surface_dots, int_rad=int_rad, miss=miss)
+    logger.info('Field mapping data ready')
+
+    fmd['data'] = _compute_mapping_matrix(fmd, info)
+
+    # Remove some unecessary fields
+    del fmd['self_dots']
+    del fmd['surface_dots']
+    del fmd['int_rad']
+    del fmd['miss']
+    return fmd
+
+
+def make_field_map(evoked, trans='auto', subject=None, subjects_dir=None,
+                   ch_type=None, mode='fast', meg_surf='helmet',
+                   n_jobs=1):
+    """Compute surface maps used for field display in 3D
+
+    Parameters
+    ----------
+    evoked : Evoked | Epochs | Raw
+        The measurement file. Need to have info attribute.
+    trans : str | 'auto' | None
+        The full path to the `*-trans.fif` file produced during
+        coregistration. If present or found using 'auto'
+        the maps will be in MRI coordinates.
+        If None, map for EEG data will not be available.
+    subject : str | None
+        The subject name corresponding to FreeSurfer environment
+        variable SUBJECT. If None, map for EEG data will not be available.
+    subjects_dir : str
+        The path to the freesurfer subjects reconstructions.
+        It corresponds to Freesurfer environment variable SUBJECTS_DIR.
+    ch_type : None | 'eeg' | 'meg'
+        If None, a map for each available channel type will be returned.
+        Else only the specified type will be used.
+    mode : str
+        Either `'accurate'` or `'fast'`, determines the quality of the
+        Legendre polynomial expansion used. `'fast'` should be sufficient
+        for most applications.
+    meg_surf : str
+        Should be ``'helmet'`` or ``'head'`` to specify in which surface
+        to compute the MEG field map. The default value is ``'helmet'``
+    n_jobs : int
+        The number of jobs to run in parallel.
+
+    Returns
+    -------
+    surf_maps : list
+        The surface maps to be used for field plots. The list contains
+        separate ones for MEG and EEG (if both MEG and EEG are present).
+    """
+    info = evoked.info
+
+    if ch_type is None:
+        types = [t for t in ['eeg', 'meg'] if t in evoked]
+    else:
+        if ch_type not in ['eeg', 'meg']:
+            raise ValueError("ch_type should be 'eeg' or 'meg' (got %s)"
+                             % ch_type)
+        types = [ch_type]
+
+    if trans == 'auto':
+        # let's try to do this in MRI coordinates so they're easy to plot
+        trans = _find_trans(subject, subjects_dir)
+
+    if 'eeg' in types and trans is None:
+        logger.info('No trans file available. EEG data ignored.')
+        types.remove('eeg')
+
+    if len(types) == 0:
+        raise RuntimeError('No data available for mapping.')
+
+    if trans is not None:
+        trans = read_trans(trans)
+
+    if meg_surf not in ['helmet', 'head']:
+        raise ValueError('Surface to plot MEG fields must be '
+                         '"helmet" or "head"')
+
+    surfs = []
+    for this_type in types:
+        if this_type == 'meg' and meg_surf == 'helmet':
+            surf = get_meg_helmet_surf(info, trans)
+        else:
+            surf = get_head_surf(subject, subjects_dir=subjects_dir)
+        surfs.append(surf)
+
+    surf_maps = list()
+
+    for this_type, this_surf in zip(types, surfs):
+        this_map = _make_surface_mapping(evoked.info, this_surf, this_type,
+                                         trans, n_jobs=n_jobs)
+        this_map['surf'] = this_surf  # XXX : a bit weird...
+        surf_maps.append(this_map)
+
+    return surf_maps
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/forward/_lead_dots.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/forward/_lead_dots.py
new file mode 100644
index 0000000..f0f4d15
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/forward/_lead_dots.py
@@ -0,0 +1,521 @@
+# Authors: Eric Larson <larsoner at uw.edu>
+#          Mainak Jas <mainak.jas at telecom-paristech.fr>
+#          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+import os
+from os import path as op
+
+import numpy as np
+from numpy.polynomial import legendre
+
+from ..parallel import parallel_func
+from ..utils import logger, _get_extra_data_path
+
+
+##############################################################################
+# FAST LEGENDRE (DERIVATIVE) POLYNOMIALS USING LOOKUP TABLE
+
+def _next_legen_der(n, x, p0, p01, p0d, p0dd):
+    """Compute the next Legendre polynomial and its derivatives"""
+    # only good for n > 1 !
+    help_ = p0
+    helpd = p0d
+    p0 = ((2 * n - 1) * x * help_ - (n - 1) * p01) / n
+    p0d = n * help_ + x * helpd
+    p0dd = (n + 1) * helpd + x * p0dd
+    p01 = help_
+    return p0, p0d, p0dd
+
+
+def _get_legen(x, n_coeff=100):
+    """Get Legendre polynomials expanded about x"""
+    return legendre.legvander(x, n_coeff - 1)
+
+
+def _get_legen_der(xx, n_coeff=100):
+    """Get Legendre polynomial derivatives expanded about x"""
+    coeffs = np.empty((len(xx), n_coeff, 3))
+    for c, x in zip(coeffs, xx):
+        p0s, p0ds, p0dds = c[:, 0], c[:, 1], c[:, 2]
+        p0s[:2] = [1.0, x]
+        p0ds[:2] = [0.0, 1.0]
+        p0dds[:2] = [0.0, 0.0]
+        for n in range(2, n_coeff):
+            p0s[n], p0ds[n], p0dds[n] = _next_legen_der(
+                n, x, p0s[n - 1], p0s[n - 2], p0ds[n - 1], p0dds[n - 1])
+    return coeffs
+
+
+def _get_legen_table(ch_type, volume_integral=False, n_coeff=100,
+                     n_interp=20000, force_calc=False):
+    """Return a (generated) LUT of Legendre (derivative) polynomial coeffs"""
+    if n_interp % 2 != 0:
+        raise RuntimeError('n_interp must be even')
+    fname = op.join(_get_extra_data_path(), 'tables')
+    if not op.isdir(fname):
+        # Updated due to API chang (GH 1167)
+        os.makedirs(fname)
+    if ch_type == 'meg':
+        fname = op.join(fname, 'legder_%s_%s.bin' % (n_coeff, n_interp))
+        leg_fun = _get_legen_der
+        extra_str = ' derivative'
+        lut_shape = (n_interp + 1, n_coeff, 3)
+    else:  # 'eeg'
+        fname = op.join(fname, 'legval_%s_%s.bin' % (n_coeff, n_interp))
+        leg_fun = _get_legen
+        extra_str = ''
+        lut_shape = (n_interp + 1, n_coeff)
+    if not op.isfile(fname) or force_calc:
+        n_out = (n_interp // 2)
+        logger.info('Generating Legendre%s table...' % extra_str)
+        x_interp = np.arange(-n_out, n_out + 1, dtype=np.float64) / n_out
+        lut = leg_fun(x_interp, n_coeff).astype(np.float32)
+        if not force_calc:
+            with open(fname, 'wb') as fid:
+                fid.write(lut.tostring())
+    else:
+        logger.info('Reading Legendre%s table...' % extra_str)
+        with open(fname, 'rb', buffering=0) as fid:
+            lut = np.fromfile(fid, np.float32)
+    lut.shape = lut_shape
+
+    # we need this for the integration step
+    n_fact = np.arange(1, n_coeff, dtype=float)
+    if ch_type == 'meg':
+        n_facts = list()  # multn, then mult, then multn * (n + 1)
+        if volume_integral:
+            n_facts.append(n_fact / ((2.0 * n_fact + 1.0) *
+                                     (2.0 * n_fact + 3.0)))
+        else:
+            n_facts.append(n_fact / (2.0 * n_fact + 1.0))
+        n_facts.append(n_facts[0] / (n_fact + 1.0))
+        n_facts.append(n_facts[0] * (n_fact + 1.0))
+        # skip the first set of coefficients because they are not used
+        lut = lut[:, 1:, [0, 1, 1, 2]]  # for multiplicative convenience later
+        # reshape this for convenience, too
+        n_facts = np.array(n_facts)[[2, 0, 1, 1], :].T
+        n_facts = np.ascontiguousarray(n_facts)
+        n_fact = n_facts
+    else:  # 'eeg'
+        n_fact = (2.0 * n_fact + 1.0) * (2.0 * n_fact + 1.0) / n_fact
+        # skip the first set of coefficients because they are not used
+        lut = lut[:, 1:].copy()
+    return lut, n_fact
+
+
+def _get_legen_lut_fast(x, lut):
+    """Return Legendre coefficients for given x values in -1<=x<=1"""
+    # map into table vals (works for both vals and deriv tables)
+    n_interp = (lut.shape[0] - 1.0)
+    # equiv to "(x + 1.0) / 2.0) * n_interp" but faster
+    mm = x * (n_interp / 2.0) + 0.5 * n_interp
+    # nearest-neighbor version (could be decent enough...)
+    idx = np.round(mm).astype(int)
+    vals = lut[idx]
+    return vals
+
+
+def _get_legen_lut_accurate(x, lut):
+    """Return Legendre coefficients for given x values in -1<=x<=1"""
+    # map into table vals (works for both vals and deriv tables)
+    n_interp = (lut.shape[0] - 1.0)
+    # equiv to "(x + 1.0) / 2.0) * n_interp" but faster
+    mm = x * (n_interp / 2.0) + 0.5 * n_interp
+    # slower, more accurate interpolation version
+    mm = np.minimum(mm, n_interp - 0.0000000001)
+    idx = np.floor(mm).astype(int)
+    w2 = mm - idx
+    w2.shape += tuple([1] * (lut.ndim - w2.ndim))  # expand to correct size
+    vals = (1 - w2) * lut[idx] + w2 * lut[idx + 1]
+    return vals
+
+
+def _comp_sum_eeg(beta, ctheta, lut_fun, n_fact):
+    """Lead field dot products using Legendre polynomial (P_n) series"""
+    # Compute the sum occurring in the evaluation.
+    # The result is
+    #   sums[:]    (2n+1)^2/n beta^n P_n
+    coeffs = lut_fun(ctheta)
+    betans = np.cumprod(np.tile(beta[:, np.newaxis], (1, n_fact.shape[0])),
+                        axis=1)
+    s0 = np.dot(coeffs * betans, n_fact)  # == weighted sum across cols
+    return s0
+
+
+def _comp_sums_meg(beta, ctheta, lut_fun, n_fact, volume_integral):
+    """Lead field dot products using Legendre polynomial (P_n) series.
+
+    Parameters
+    ----------
+    beta : array, shape (n_points * n_points, 1)
+        Coefficients of the integration.
+    ctheta : array, shape (n_points * n_points, 1)
+        Cosine of the angle between the sensor integration points.
+    lut_fun : callable
+        Look-up table for evaluating Legendre polynomials.
+    n_fact : array
+        Coefficients in the integration sum.
+    volume_integral : bool
+        If True, compute volume integral.
+
+    Returns
+    -------
+    sums : array, shape (4, n_points * n_points)
+        The results.
+    """
+    # Compute the sums occurring in the evaluation.
+    # Two point magnetometers on the xz plane are assumed.
+    # The four sums are:
+    #  * sums[:, 0]    n(n+1)/(2n+1) beta^(n+1) P_n
+    #  * sums[:, 1]    n/(2n+1) beta^(n+1) P_n'
+    #  * sums[:, 2]    n/((2n+1)(n+1)) beta^(n+1) P_n'
+    #  * sums[:, 3]    n/((2n+1)(n+1)) beta^(n+1) P_n''
+    coeffs = lut_fun(ctheta)
+    beta = (np.cumprod(np.tile(beta[:, np.newaxis], (1, n_fact.shape[0])),
+                       axis=1) * beta[:, np.newaxis])
+    # This is equivalent, but slower:
+    # sums = np.sum(beta[:, :, np.newaxis] * n_fact * coeffs, axis=1)
+    # sums = np.rollaxis(sums, 2)
+    sums = np.einsum('ij,jk,ijk->ki', beta, n_fact, coeffs)
+    return sums
+
+
+###############################################################################
+# SPHERE DOTS
+
+def _fast_sphere_dot_r0(r, rr1, rr2, lr1, lr2, cosmags1, cosmags2,
+                        w1, w2, volume_integral, lut, n_fact, ch_type):
+    """Lead field dot product computation for M/EEG in the sphere model.
+
+    Parameters
+    ----------
+    r : float
+        The integration radius. It is used to calculate beta as:
+        beta = (r * r) / (lr1 * lr2).
+    rr1 : array, shape (n_points x 3)
+        Normalized position vectors of integrations points in first sensor.
+    rr2 : array, shape (n_points x 3)
+        Normalized position vector of integration points in second sensor.
+    lr1 : array, shape (n_points x 1)
+        Magnitude of position vector of integration points in first sensor.
+    lr2 : array, shape (n_points x 1)
+        Magnitude of position vector of integration points in second sensor.
+    cosmags1 : array, shape (n_points x 1)
+        Direction of integration points in first sensor.
+    cosmags2 : array, shape (n_points x 1)
+        Direction of integration points in second sensor.
+    w1 : array, shape (n_points x 1)
+        Weights of integration points in the first sensor.
+    w2 : array, shape (n_points x 1)
+        Weights of integration points in the second sensor.
+    volume_integral : bool
+        If True, compute volume integral.
+    lut : callable
+        Look-up table for evaluating Legendre polynomials.
+    n_fact : array
+        Coefficients in the integration sum.
+    ch_type : str
+        The channel type. It can be 'meg' or 'eeg'.
+
+    Returns
+    -------
+    result : float
+        The integration sum.
+    """
+    ct = np.einsum('ik,jk->ij', rr1, rr2)  # outer product, sum over coords
+
+    # expand axes
+    rr1 = rr1[:, np.newaxis, :]  # (n_rr1, n_rr2, n_coord) e.g. 4x4x3
+    rr2 = rr2[np.newaxis, :, :]
+    lr1lr2 = lr1[:, np.newaxis] * lr2[np.newaxis, :]
+
+    beta = (r * r) / lr1lr2
+    if ch_type == 'meg':
+        sums = _comp_sums_meg(beta.flatten(), ct.flatten(), lut, n_fact,
+                              volume_integral)
+        sums.shape = (4,) + beta.shape
+
+        # Accumulate the result, a little bit streamlined version
+        # cosmags1 = cosmags1[:, np.newaxis, :]
+        # cosmags2 = cosmags2[np.newaxis, :, :]
+        # n1c1 = np.sum(cosmags1 * rr1, axis=2)
+        # n1c2 = np.sum(cosmags1 * rr2, axis=2)
+        # n2c1 = np.sum(cosmags2 * rr1, axis=2)
+        # n2c2 = np.sum(cosmags2 * rr2, axis=2)
+        # n1n2 = np.sum(cosmags1 * cosmags2, axis=2)
+        n1c1 = np.einsum('ik,ijk->ij', cosmags1, rr1)
+        n1c2 = np.einsum('ik,ijk->ij', cosmags1, rr2)
+        n2c1 = np.einsum('jk,ijk->ij', cosmags2, rr1)
+        n2c2 = np.einsum('jk,ijk->ij', cosmags2, rr2)
+        n1n2 = np.einsum('ik,jk->ij', cosmags1, cosmags2)
+        part1 = ct * n1c1 * n2c2
+        part2 = n1c1 * n2c1 + n1c2 * n2c2
+
+        result = (n1c1 * n2c2 * sums[0] +
+                  (2.0 * part1 - part2) * sums[1] +
+                  (n1n2 + part1 - part2) * sums[2] +
+                  (n1c2 - ct * n1c1) * (n2c1 - ct * n2c2) * sums[3])
+
+        # Give it a finishing touch!
+        const = 4e-14 * np.pi  # This is \mu_0^2/4\pi
+        result *= (const / lr1lr2)
+        if volume_integral:
+            result *= r
+    else:  # 'eeg'
+        sums = _comp_sum_eeg(beta.flatten(), ct.flatten(), lut, n_fact)
+        sums.shape = beta.shape
+
+        # Give it a finishing touch!
+        eeg_const = 1.0 / (4.0 * np.pi)
+        result = eeg_const * sums / lr1lr2
+    # new we add them all up with weights
+    if w1 is None:  # operating on surface, treat independently
+        # result = np.sum(w2[np.newaxis, :] * result, axis=1)
+        result = np.dot(result, w2)
+    else:
+        # result = np.sum((w1[:, np.newaxis] * w2[np.newaxis, :]) * result)
+        result = np.einsum('i,j,ij', w1, w2, result)
+    return result
+
+
+def _do_self_dots(intrad, volume, coils, r0, ch_type, lut, n_fact, n_jobs):
+    """Perform the lead field dot product integrations.
+
+    Parameters
+    ----------
+    intrad : float
+        The integration radius. It is used to calculate beta as:
+        beta = (intrad * intrad) / (r1 * r2).
+    volume : bool
+        If True, perform volume integral.
+    coils : list of dict
+        The coils.
+    r0 : array, shape (3 x 1)
+        The origin of the sphere.
+    ch_type : str
+        The channel type. It can be 'meg' or 'eeg'.
+    lut : callable
+        Look-up table for evaluating Legendre polynomials.
+    n_fact : array
+        Coefficients in the integration sum.
+    n_jobs : int
+        Number of jobs to run in parallel.
+
+    Returns
+    -------
+    products : array, shape (n_coils, n_coils)
+        The integration products.
+    """
+    if ch_type == 'eeg':
+        intrad *= 0.7
+    # convert to normalized distances from expansion center
+    rmags = [coil['rmag'] - r0[np.newaxis, :] for coil in coils]
+    rlens = [np.sqrt(np.sum(r * r, axis=1)) for r in rmags]
+    rmags = [r / rl[:, np.newaxis] for r, rl in zip(rmags, rlens)]
+    cosmags = [coil['cosmag'] for coil in coils]
+    ws = [coil['w'] for coil in coils]
+    parallel, p_fun, _ = parallel_func(_do_self_dots_subset, n_jobs)
+    prods = parallel(p_fun(intrad, rmags, rlens, cosmags,
+                           ws, volume, lut, n_fact, ch_type, idx)
+                     for idx in np.array_split(np.arange(len(rmags)), n_jobs))
+    products = np.sum(prods, axis=0)
+    return products
+
+
+def _do_self_dots_subset(intrad, rmags, rlens, cosmags, ws, volume, lut,
+                         n_fact, ch_type, idx):
+    """Helper for parallelization"""
+    # all possible combinations of two magnetometers
+    products = np.zeros((len(rmags), len(rmags)))
+    for ci1 in idx:
+        for ci2 in range(0, ci1 + 1):
+            res = _fast_sphere_dot_r0(intrad, rmags[ci1], rmags[ci2],
+                                      rlens[ci1], rlens[ci2],
+                                      cosmags[ci1], cosmags[ci2],
+                                      ws[ci1], ws[ci2], volume, lut,
+                                      n_fact, ch_type)
+            products[ci1, ci2] = res
+            products[ci2, ci1] = res
+    return products
+
+
+def _do_cross_dots(intrad, volume, coils1, coils2, r0, ch_type,
+                   lut, n_fact):
+    """Compute lead field dot product integrations between two coil sets.
+
+    The code is a direct translation of MNE-C code found in
+    `mne_map_data/lead_dots.c`.
+
+    Parameters
+    ----------
+    intrad : float
+        The integration radius. It is used to calculate beta as:
+        beta = (intrad * intrad) / (r1 * r2).
+    volume : bool
+        If True, compute volume integral.
+    coils1 : list of dict
+        The original coils.
+    coils2 : list of dict
+        The coils to which data is being mapped.
+    r0 : array, shape (3 x 1).
+        The origin of the sphere.
+    ch_type : str
+        The channel type. It can be 'meg' or 'eeg'
+    lut : callable
+        Look-up table for evaluating Legendre polynomials.
+    n_fact : array
+        Coefficients in the integration sum.
+
+    Returns
+    -------
+    products : array, shape (n_coils, n_coils)
+        The integration products.
+    """
+    rmags1 = [coil['rmag'] - r0[np.newaxis, :] for coil in coils1]
+    rmags2 = [coil['rmag'] - r0[np.newaxis, :] for coil in coils2]
+
+    rlens1 = [np.sqrt(np.sum(r * r, axis=1)) for r in rmags1]
+    rlens2 = [np.sqrt(np.sum(r * r, axis=1)) for r in rmags2]
+
+    rmags1 = [r / rl[:, np.newaxis] for r, rl in zip(rmags1, rlens1)]
+    rmags2 = [r / rl[:, np.newaxis] for r, rl in zip(rmags2, rlens2)]
+
+    ws1 = [coil['w'] for coil in coils1]
+    ws2 = [coil['w'] for coil in coils2]
+
+    cosmags1 = [coil['cosmag'] for coil in coils1]
+    cosmags2 = [coil['cosmag'] for coil in coils2]
+
+    products = np.zeros((len(rmags1), len(rmags2)))
+    for ci1 in range(len(coils1)):
+        for ci2 in range(len(coils2)):
+            res = _fast_sphere_dot_r0(intrad, rmags1[ci1], rmags2[ci2],
+                                      rlens1[ci1], rlens2[ci2], cosmags1[ci1],
+                                      cosmags2[ci2], ws1[ci1], ws2[ci2],
+                                      volume, lut, n_fact, ch_type)
+            products[ci1, ci2] = res
+    return products
+
+
+def _do_surface_dots(intrad, volume, coils, surf, sel, r0, ch_type,
+                     lut, n_fact, n_jobs):
+    """Compute the map construction products
+
+    Parameters
+    ----------
+    intrad : float
+        The integration radius. It is used to calculate beta as:
+        beta = (intrad * intrad) / (r1 * r2)
+    volume : bool
+        If True, compute a volume integral.
+    coils : list of dict
+        The coils.
+    surf : dict
+        The surface on which the field is interpolated.
+    sel : array
+        Indices of the surface vertices to select.
+    r0 : array, shape (3 x 1)
+        The origin of the sphere.
+    ch_type : str
+        The channel type. It can be 'meg' or 'eeg'.
+    lut : callable
+        Look-up table for Legendre polynomials.
+    n_fact : array
+        Coefficients in the integration sum.
+    n_jobs : int
+        Number of jobs to run in parallel.
+
+    Returns
+    -------
+    products : array, shape (n_coils, n_coils)
+        The integration products.
+    """
+    # convert to normalized distances from expansion center
+    rmags = [coil['rmag'] - r0[np.newaxis, :] for coil in coils]
+    rlens = [np.sqrt(np.sum(r * r, axis=1)) for r in rmags]
+    rmags = [r / rl[:, np.newaxis] for r, rl in zip(rmags, rlens)]
+    cosmags = [coil['cosmag'] for coil in coils]
+    ws = [coil['w'] for coil in coils]
+    rref = None
+    refl = None
+    # virt_ref = False
+    if ch_type == 'eeg':
+        intrad *= 0.7
+        # The virtual ref code is untested and unused, so it is
+        # commented out for now
+        # if virt_ref:
+        #     rref = virt_ref[np.newaxis, :] - r0[np.newaxis, :]
+        #     refl = np.sqrt(np.sum(rref * rref, axis=1))
+        #     rref /= refl[:, np.newaxis]
+
+    rsurf = surf['rr'][sel] - r0[np.newaxis, :]
+    lsurf = np.sqrt(np.sum(rsurf * rsurf, axis=1))
+    rsurf /= lsurf[:, np.newaxis]
+    this_nn = surf['nn'][sel]
+
+    # loop over the coils
+    parallel, p_fun, _ = parallel_func(_do_surface_dots_subset, n_jobs)
+    prods = parallel(p_fun(intrad, rsurf, rmags, rref, refl, lsurf, rlens,
+                           this_nn, cosmags, ws, volume, lut, n_fact, ch_type,
+                           idx)
+                     for idx in np.array_split(np.arange(len(rmags)), n_jobs))
+    products = np.sum(prods, axis=0)
+    return products
+
+
+def _do_surface_dots_subset(intrad, rsurf, rmags, rref, refl, lsurf, rlens,
+                            this_nn, cosmags, ws, volume, lut, n_fact, ch_type,
+                            idx):
+    """Helper for parallelization.
+
+    Parameters
+    ----------
+    refl : array | None
+        If ch_type is 'eeg', the magnitude of position vector of the
+        virtual reference (never used).
+    lsurf : array
+        Magnitude of position vector of the surface points.
+    rlens : list of arrays of length n_coils
+        Magnitude of position vector.
+    this_nn : array, shape (n_vertices, 3)
+        Surface normals.
+    cosmags : list of array.
+        Direction of the integration points in the coils.
+    ws : list of array
+        Integration weights of the coils.
+    volume : bool
+        If True, compute volume integral.
+    lut : callable
+        Look-up table for evaluating Legendre polynomials.
+    n_fact : array
+        Coefficients in the integration sum.
+    ch_type : str
+        'meg' or 'eeg'
+    idx : array, shape (n_coils x 1)
+        Index of coil.
+
+    Returns
+    -------
+    products : array, shape (n_coils, n_coils)
+        The integration products.
+    """
+    products = np.zeros((len(rsurf), len(rmags)))
+    for ci in idx:
+        res = _fast_sphere_dot_r0(intrad, rsurf, rmags[ci],
+                                  lsurf, rlens[ci],
+                                  this_nn, cosmags[ci],
+                                  None, ws[ci], volume, lut,
+                                  n_fact, ch_type)
+        if rref is not None:
+            raise NotImplementedError  # we don't ever use this, isn't tested
+            # vres = _fast_sphere_dot_r0(intrad, rref, rmags[ci],
+            #                            refl, rlens[ci],
+            #                            this_nn, cosmags[ci],
+            #                            None, ws[ci], volume, lut,
+            #                            n_fact, ch_type)
+            # products[:, ci] = res - vres
+        else:
+            products[:, ci] = res
+    return products
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/forward/_make_forward.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/forward/_make_forward.py
new file mode 100644
index 0000000..2d96811
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/forward/_make_forward.py
@@ -0,0 +1,584 @@
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#          Eric Larson <larsoner at uw.edu>
+#
+# License: BSD (3-clause)
+
+import os
+from os import path as op
+import numpy as np
+
+from .. import pick_types, pick_info
+from ..io.pick import _has_kit_refs
+from ..io import read_info, _loc_to_coil_trans, _loc_to_eeg_loc
+from ..io.meas_info import Info
+from ..io.constants import FIFF
+from .forward import Forward, write_forward_solution, _merge_meg_eeg_fwds
+from ._compute_forward import _compute_forwards
+from ..transforms import (_ensure_trans, transform_surface_to, apply_trans,
+                          _get_mri_head_t, _print_coord_trans,
+                          _coord_frame_name, Transform)
+from ..utils import logger, verbose
+from ..source_space import _ensure_src, _filter_source_spaces
+from ..surface import _normalize_vectors
+from ..bem import read_bem_solution, _bem_find_surface, ConductorModel
+from ..externals.six import string_types
+
+
+_accuracy_dict = dict(normal=FIFF.FWD_COIL_ACCURACY_NORMAL,
+                      accurate=FIFF.FWD_COIL_ACCURACY_ACCURATE)
+
+
+ at verbose
+def _read_coil_defs(fname=None, elekta_defs=False, verbose=None):
+    """Read a coil definition file.
+
+    Parameters
+    ----------
+    fname : str
+        The name of the file from which coil definitions are read.
+    elekta_defs : bool
+        If true, use Elekta's coil definitions for numerical integration
+        (from Abramowitz and Stegun section 25.4.62).
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+        Defaults to raw.verbose.
+
+    Returns
+    -------
+    res : list of dict
+        The coils. It is a dictionary with valid keys:
+        'cosmag' | 'coil_class' | 'coord_frame' | 'rmag' | 'type' |
+        'chname' | 'accuracy'.
+        cosmag contains the direction of the coils and rmag contains the
+        position vector.
+    """
+    if fname is None:
+        if not elekta_defs:
+            fname = op.join(op.split(__file__)[0], '..', 'data',
+                            'coil_def.dat')
+        else:
+            fname = op.join(op.split(__file__)[0], '..', 'data',
+                            'coil_def_Elekta.dat')
+    big_val = 0.5
+    with open(fname, 'r') as fid:
+        lines = fid.readlines()
+        res = dict(coils=list())
+        lines = lines[::-1]
+        while len(lines) > 0:
+            line = lines.pop()
+            if line[0] != '#':
+                vals = np.fromstring(line, sep=' ')
+                assert len(vals) in (6, 7)  # newer numpy can truncate comment
+                start = line.find('"')
+                end = len(line.strip()) - 1
+                assert line.strip()[end] == '"'
+                desc = line[start:end]
+                npts = int(vals[3])
+                coil = dict(coil_type=vals[1], coil_class=vals[0], desc=desc,
+                            accuracy=vals[2], size=vals[4], base=vals[5])
+                # get parameters of each component
+                rmag = list()
+                cosmag = list()
+                w = list()
+                for p in range(npts):
+                    # get next non-comment line
+                    line = lines.pop()
+                    while(line[0] == '#'):
+                        line = lines.pop()
+                    vals = np.fromstring(line, sep=' ')
+                    assert len(vals) == 7
+                    # Read and verify data for each integration point
+                    w.append(vals[0])
+                    rmag.append(vals[[1, 2, 3]])
+                    cosmag.append(vals[[4, 5, 6]])
+                w = np.array(w)
+                rmag = np.array(rmag)
+                cosmag = np.array(cosmag)
+                size = np.sqrt(np.sum(cosmag ** 2, axis=1))
+                if np.any(np.sqrt(np.sum(rmag ** 2, axis=1)) > big_val):
+                    raise RuntimeError('Unreasonable integration point')
+                if np.any(size <= 0):
+                    raise RuntimeError('Unreasonable normal')
+                cosmag /= size[:, np.newaxis]
+                coil.update(dict(w=w, cosmag=cosmag, rmag=rmag))
+                res['coils'].append(coil)
+    logger.info('%d coil definitions read', len(res['coils']))
+    return res
+
+
+def _create_meg_coil(coilset, ch, acc, t):
+    """Create a coil definition using templates, transform if necessary"""
+    # Also change the coordinate frame if so desired
+    if t is None:
+        t = Transform('meg', 'meg', np.eye(4))  # identity, no change
+
+    if ch['kind'] not in [FIFF.FIFFV_MEG_CH, FIFF.FIFFV_REF_MEG_CH]:
+        raise RuntimeError('%s is not a MEG channel' % ch['ch_name'])
+
+    # Simple linear search from the coil definitions
+    for coil in coilset['coils']:
+        if coil['coil_type'] == (ch['coil_type'] & 0xFFFF) and \
+                coil['accuracy'] == acc:
+            break
+    else:
+        raise RuntimeError('Desired coil definition not found '
+                           '(type = %d acc = %d)' % (ch['coil_type'], acc))
+
+    # Apply a coordinate transformation if so desired
+    coil_trans = np.dot(t['trans'], _loc_to_coil_trans(ch['loc']))
+
+    # Create the result
+    res = dict(chname=ch['ch_name'], coil_class=coil['coil_class'],
+               accuracy=coil['accuracy'], base=coil['base'], size=coil['size'],
+               type=ch['coil_type'], w=coil['w'], desc=coil['desc'],
+               coord_frame=t['to'], rmag=apply_trans(coil_trans, coil['rmag']),
+               cosmag=apply_trans(coil_trans, coil['cosmag'], False))
+    res.update(ex=coil_trans[:3, 0], ey=coil_trans[:3, 1],
+               ez=coil_trans[:3, 2], r0=coil_trans[:3, 3])
+    return res
+
+
+def _create_eeg_el(ch, t=None):
+    """Create an electrode definition, transform coords if necessary"""
+    if ch['kind'] != FIFF.FIFFV_EEG_CH:
+        raise RuntimeError('%s is not an EEG channel. Cannot create an '
+                           'electrode definition.' % ch['ch_name'])
+    if t is None:
+        t = Transform('head', 'head', np.eye(4))  # identity, no change
+    if t.from_str != 'head':
+        raise RuntimeError('Inappropriate coordinate transformation')
+
+    r0ex = _loc_to_eeg_loc(ch['loc'])
+    if r0ex.shape[1] == 1:  # no reference
+        w = np.array([1.])
+    else:  # has reference
+        w = np.array([1., -1.])
+
+    # Optional coordinate transformation
+    r0ex = apply_trans(t['trans'], r0ex.T)
+
+    # The electrode location
+    cosmag = r0ex.copy()
+    _normalize_vectors(cosmag)
+    res = dict(chname=ch['ch_name'], coil_class=FIFF.FWD_COILC_EEG, w=w,
+               accuracy=_accuracy_dict['normal'], type=ch['coil_type'],
+               coord_frame=t['to'], rmag=r0ex, cosmag=cosmag)
+    return res
+
+
+def _create_meg_coils(chs, acc=None, t=None, coilset=None):
+    """Create a set of MEG or EEG coils in the head coordinate frame"""
+    acc = _accuracy_dict[acc] if isinstance(acc, string_types) else acc
+    coilset = _read_coil_defs(verbose=False) if coilset is None else coilset
+    coils = [_create_meg_coil(coilset, ch, acc, t) for ch in chs]
+    return coils
+
+
+def _create_eeg_els(chs):
+    """Create a set of MEG or EEG coils in the head coordinate frame"""
+    return [_create_eeg_el(ch) for ch in chs]
+
+
+ at verbose
+def _setup_bem(bem, bem_extra, neeg, mri_head_t, verbose=None):
+    """Set up a BEM for forward computation"""
+    logger.info('')
+    if isinstance(bem, string_types):
+        logger.info('Setting up the BEM model using %s...\n' % bem_extra)
+        bem = read_bem_solution(bem)
+    if not isinstance(bem, ConductorModel):
+        raise TypeError('bem must be a string or ConductorModel')
+    if bem['is_sphere']:
+        logger.info('Using the sphere model.\n')
+        if len(bem['layers']) == 0:
+            raise RuntimeError('Spherical model has zero layers')
+        if bem['coord_frame'] != FIFF.FIFFV_COORD_HEAD:
+            raise RuntimeError('Spherical model is not in head coordinates')
+    else:
+        if neeg > 0 and len(bem['surfs']) == 1:
+            raise RuntimeError('Cannot use a homogeneous model in EEG '
+                               'calculations')
+        logger.info('Employing the head->MRI coordinate transform with the '
+                    'BEM model.')
+        # fwd_bem_set_head_mri_t: Set the coordinate transformation
+        bem['head_mri_t'] = _ensure_trans(mri_head_t, 'head', 'mri')
+        logger.info('BEM model %s is now set up' % op.split(bem_extra)[1])
+        logger.info('')
+    return bem
+
+
+ at verbose
+def _prep_meg_channels(info, accurate=True, exclude=(), ignore_ref=False,
+                       elekta_defs=False, verbose=None):
+    """Prepare MEG coil definitions for forward calculation
+
+    Parameters
+    ----------
+    info : instance of Info
+        The measurement information dictionary
+    accurate : bool
+        If true (default) then use `accurate` coil definitions (more
+        integration points)
+    exclude : list of str | str
+        List of channels to exclude. If 'bads', exclude channels in
+        info['bads']
+    ignore_ref : bool
+        If true, ignore compensation coils
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+        Defaults to raw.verbose.
+
+    Returns
+    -------
+    megcoils : list of dict
+        Information for each prepped MEG coil
+    compcoils : list of dict
+        Information for each prepped MEG coil
+    megnames : list of str
+        Name of each prepped MEG coil
+    meginfo : Info
+        Information subselected for just the set of MEG coils
+    """
+
+    accuracy = 'accurate' if accurate else 'normal'
+    info_extra = 'info'
+    meg_info = None
+    megnames, megcoils, compcoils = [], [], []
+
+    # Find MEG channels
+    picks = pick_types(info, meg=True, eeg=False, ref_meg=False,
+                       exclude=exclude)
+
+    # Make sure MEG coils exist
+    nmeg = len(picks)
+    if nmeg <= 0:
+        raise RuntimeError('Could not find any MEG channels')
+
+    # Get channel info and names for MEG channels
+    megchs = pick_info(info, picks)['chs']
+    megnames = [info['ch_names'][p] for p in picks]
+    logger.info('Read %3d MEG channels from %s'
+                % (len(picks), info_extra))
+
+    # Get MEG compensation channels
+    if not ignore_ref:
+        picks = pick_types(info, meg=False, ref_meg=True, exclude=exclude)
+        ncomp = len(picks)
+        if (ncomp > 0):
+            compchs = pick_info(info, picks)['chs']
+            logger.info('Read %3d MEG compensation channels from %s'
+                        % (ncomp, info_extra))
+            # We need to check to make sure these are NOT KIT refs
+            if _has_kit_refs(info, picks):
+                err = ('Cannot create forward solution with KIT reference '
+                       'channels. Consider using "ignore_ref=True" in '
+                       'calculation')
+                raise NotImplementedError(err)
+    else:
+        ncomp = 0
+
+    _print_coord_trans(info['dev_head_t'])
+
+    # Make info structure to allow making compensator later
+    ncomp_data = len(info['comps'])
+    ref_meg = True if not ignore_ref else False
+    picks = pick_types(info, meg=True, ref_meg=ref_meg, exclude=exclude)
+    meg_info = pick_info(info, picks) if nmeg > 0 else None
+
+    # Create coil descriptions with transformation to head or MRI frame
+    templates = _read_coil_defs(elekta_defs=elekta_defs)
+
+    megcoils = _create_meg_coils(megchs, accuracy, info['dev_head_t'],
+                                 templates)
+    if ncomp > 0:
+        logger.info('%d compensation data sets in %s' % (ncomp_data,
+                                                         info_extra))
+        compcoils = _create_meg_coils(compchs, 'normal', info['dev_head_t'],
+                                      templates)
+    logger.info('Head coordinate MEG coil definitions created.')
+
+    return megcoils, compcoils, megnames, meg_info
+
+
+ at verbose
+def _prep_eeg_channels(info, exclude=(), verbose=None):
+    """Prepare EEG electrode definitions for forward calculation
+
+    Parameters
+    ----------
+    info : instance of Info
+        The measurement information dictionary
+    exclude : list of str | str
+        List of channels to exclude. If 'bads', exclude channels in
+        info['bads']
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+        Defaults to raw.verbose.
+
+    Returns
+    -------
+    eegels : list of dict
+        Information for each prepped EEG electrode
+    eegnames : list of str
+        Name of each prepped EEG electrode
+    """
+    eegnames, eegels = [], []
+    info_extra = 'info'
+
+    # Find EEG electrodes
+    picks = pick_types(info, meg=False, eeg=True, ref_meg=False,
+                       exclude=exclude)
+
+    # Make sure EEG electrodes exist
+    neeg = len(picks)
+    if neeg <= 0:
+        raise RuntimeError('Could not find any EEG channels')
+
+    # Get channel info and names for EEG channels
+    eegchs = pick_info(info, picks)['chs']
+    eegnames = [info['ch_names'][p] for p in picks]
+    logger.info('Read %3d EEG channels from %s' % (len(picks), info_extra))
+
+    # Create EEG electrode descriptions
+    eegels = _create_eeg_els(eegchs)
+    logger.info('Head coordinate coil definitions created.')
+
+    return eegels, eegnames
+
+
+ at verbose
+def _prepare_for_forward(src, mri_head_t, info, bem, mindist, n_jobs,
+                         bem_extra='', trans='', info_extra='',
+                         meg=True, eeg=True, ignore_ref=False, fname=None,
+                         overwrite=False, verbose=None):
+    """Helper to prepare for forward computation"""
+
+    # Read the source locations
+    logger.info('')
+    # let's make a copy in case we modify something
+    src = _ensure_src(src).copy()
+    nsource = sum(s['nuse'] for s in src)
+    if nsource == 0:
+        raise RuntimeError('No sources are active in these source spaces. '
+                           '"do_all" option should be used.')
+    logger.info('Read %d source spaces a total of %d active source locations'
+                % (len(src), nsource))
+    # Delete some keys to clean up the source space:
+    for key in ['working_dir', 'command_line']:
+        if key in src.info:
+            del src.info[key]
+
+    # Read the MRI -> head coordinate transformation
+    logger.info('')
+    _print_coord_trans(mri_head_t)
+
+    # make a new dict with the relevant information
+    arg_list = [info_extra, trans, src, bem_extra, fname,  meg, eeg,
+                mindist, overwrite, n_jobs, verbose]
+    cmd = 'make_forward_solution(%s)' % (', '.join([str(a) for a in arg_list]))
+    mri_id = dict(machid=np.zeros(2, np.int32), version=0, secs=0, usecs=0)
+    info = Info(nchan=info['nchan'], chs=info['chs'], comps=info['comps'],
+                ch_names=info['ch_names'], dev_head_t=info['dev_head_t'],
+                mri_file=trans, mri_id=mri_id, meas_file=info_extra,
+                meas_id=None, working_dir=os.getcwd(),
+                command_line=cmd, bads=info['bads'], mri_head_t=mri_head_t)
+    logger.info('')
+
+    megcoils, compcoils, megnames, meg_info = [], [], [], []
+    eegels, eegnames = [], []
+
+    if meg and len(pick_types(info, ref_meg=False, exclude=[])) > 0:
+        megcoils, compcoils, megnames, meg_info = \
+            _prep_meg_channels(info, ignore_ref=ignore_ref)
+    if eeg and len(pick_types(info, meg=False, eeg=True, ref_meg=False,
+                              exclude=[])) > 0:
+        eegels, eegnames = _prep_eeg_channels(info)
+
+    # Check that some channels were found
+    if len(megcoils + eegels) == 0:
+        raise RuntimeError('No MEG or EEG channels found.')
+
+    # pick out final info
+    info = pick_info(info, pick_types(info, meg=meg, eeg=eeg, ref_meg=False,
+                                      exclude=[]))
+
+    # Transform the source spaces into the appropriate coordinates
+    # (will either be HEAD or MRI)
+    for s in src:
+        transform_surface_to(s, 'head', mri_head_t)
+    logger.info('Source spaces are now in %s coordinates.'
+                % _coord_frame_name(s['coord_frame']))
+
+    # Prepare the BEM model
+    bem = _setup_bem(bem, bem_extra, len(eegnames), mri_head_t)
+
+    # Circumvent numerical problems by excluding points too close to the skull
+    if not bem['is_sphere']:
+        inner_skull = _bem_find_surface(bem, 'inner_skull')
+        _filter_source_spaces(inner_skull, mindist, mri_head_t, src, n_jobs)
+        logger.info('')
+
+    rr = np.concatenate([s['rr'][s['vertno']] for s in src])
+
+    # deal with free orientations:
+    source_nn = np.tile(np.eye(3), (len(rr), 1))
+    update_kwargs = dict(nchan=len(info['ch_names']), nsource=len(rr),
+                         info=info, src=src, source_nn=source_nn,
+                         source_rr=rr, surf_ori=False, mri_head_t=mri_head_t)
+    return megcoils, meg_info, compcoils, megnames, eegels, eegnames, rr, \
+        info, update_kwargs, bem
+
+
+ at verbose
+def make_forward_solution(info, trans, src, bem, fname=None, meg=True,
+                          eeg=True, mindist=0.0, ignore_ref=False,
+                          overwrite=False, n_jobs=1, verbose=None):
+    """Calculate a forward solution for a subject
+
+    Parameters
+    ----------
+    info : instance of mne.io.meas_info.Info | str
+        If str, then it should be a filename to a Raw, Epochs, or Evoked
+        file with measurement information. If dict, should be an info
+        dict (such as one from Raw, Epochs, or Evoked).
+    trans : dict | str | None
+        Either a transformation filename (usually made using mne_analyze)
+        or an info dict (usually opened using read_trans()).
+        If string, an ending of `.fif` or `.fif.gz` will be assumed to
+        be in FIF format, any other ending will be assumed to be a text
+        file with a 4x4 transformation matrix (like the `--trans` MNE-C
+        option). Can be None to use the identity transform.
+    src : str | instance of SourceSpaces
+        If string, should be a source space filename. Can also be an
+        instance of loaded or generated SourceSpaces.
+    bem : dict | str
+        Filename of the BEM (e.g., "sample-5120-5120-5120-bem-sol.fif") to
+        use, or a loaded sphere model (dict).
+    fname : str | None
+        Destination forward solution filename. If None, the solution
+        will not be saved.
+    meg : bool
+        If True (Default), include MEG computations.
+    eeg : bool
+        If True (Default), include EEG computations.
+    mindist : float
+        Minimum distance of sources from inner skull surface (in mm).
+    ignore_ref : bool
+        If True, do not include reference channels in compensation. This
+        option should be True for KIT files, since forward computation
+        with reference channels is not currently supported.
+    overwrite : bool
+        If True, the destination file (if it exists) will be overwritten.
+        If False (default), an error will be raised if the file exists.
+    n_jobs : int
+        Number of jobs to run in parallel.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    fwd : instance of Forward
+        The forward solution.
+
+    See Also
+    --------
+    do_forward_solution
+
+    Notes
+    -----
+    Some of the forward solution calculation options from the C code
+    (e.g., `--grad`, `--fixed`) are not implemented here. For those,
+    consider using the C command line tools or the Python wrapper
+    `do_forward_solution`.
+    """
+    # Currently not (sup)ported:
+    # 1. --grad option (gradients of the field, not used much)
+    # 2. --fixed option (can be computed post-hoc)
+    # 3. --mricoord option (probably not necessary)
+
+    # read the transformation from MRI to HEAD coordinates
+    # (could also be HEAD to MRI)
+    mri_head_t, trans = _get_mri_head_t(trans)
+    bem_extra = 'dict' if isinstance(bem, dict) else bem
+    if fname is not None and op.isfile(fname) and not overwrite:
+        raise IOError('file "%s" exists, consider using overwrite=True'
+                      % fname)
+    if not isinstance(info, (dict, string_types)):
+        raise TypeError('info should be a dict or string')
+    if isinstance(info, string_types):
+        info_extra = op.split(info)[1]
+        info = read_info(info, verbose=False)
+    else:
+        info_extra = 'info dict'
+
+    # Report the setup
+    logger.info('Source space                 : %s' % src)
+    logger.info('MRI -> head transform source : %s' % trans)
+    logger.info('Measurement data             : %s' % info_extra)
+    if isinstance(bem, dict) and bem['is_sphere']:
+        logger.info('Sphere model                 : origin at %s mm'
+                    % (bem['r0'],))
+        logger.info('Standard field computations')
+    else:
+        logger.info('BEM model                    : %s' % bem_extra)
+        logger.info('Accurate field computations')
+    logger.info('Do computations in %s coordinates',
+                _coord_frame_name(FIFF.FIFFV_COORD_HEAD))
+    logger.info('Free source orientations')
+    logger.info('Destination for the solution : %s' % fname)
+
+    megcoils, meg_info, compcoils, megnames, eegels, eegnames, rr, info, \
+        update_kwargs, bem = _prepare_for_forward(
+            src, mri_head_t, info, bem, mindist, n_jobs, bem_extra, trans,
+            info_extra, meg, eeg, ignore_ref, fname, overwrite)
+    del (src, mri_head_t, trans, info_extra, bem_extra, mindist,
+         meg, eeg, ignore_ref)
+
+    # Time to do the heavy lifting: MEG first, then EEG
+    coil_types = ['meg', 'eeg']
+    coils = [megcoils, eegels]
+    ccoils = [compcoils, None]
+    infos = [meg_info, None]
+    megfwd, eegfwd = _compute_forwards(rr, bem, coils, ccoils,
+                                       infos, coil_types, n_jobs)
+
+    # merge forwards
+    fwd = _merge_meg_eeg_fwds(_to_forward_dict(megfwd, megnames),
+                              _to_forward_dict(eegfwd, eegnames),
+                              verbose=False)
+    logger.info('')
+
+    # Don't transform the source spaces back into MRI coordinates (which is
+    # done in the C code) because mne-python assumes forward solution source
+    # spaces are in head coords.
+    fwd.update(**update_kwargs)
+    if fname is not None:
+        logger.info('writing %s...', fname)
+        write_forward_solution(fname, fwd, overwrite, verbose=False)
+
+    logger.info('Finished.')
+    return fwd
+
+
+def _to_forward_dict(fwd, names, fwd_grad=None,
+                     coord_frame=FIFF.FIFFV_COORD_HEAD,
+                     source_ori=FIFF.FIFFV_MNE_FREE_ORI):
+    """Convert forward solution matrices to dicts"""
+    assert names is not None
+    if len(fwd) == 0:
+        return None
+    sol = dict(data=fwd.T, nrow=fwd.shape[1], ncol=fwd.shape[0],
+               row_names=names, col_names=[])
+    fwd = Forward(sol=sol, source_ori=source_ori, nsource=sol['ncol'],
+                  coord_frame=coord_frame, sol_grad=None,
+                  nchan=sol['nrow'], _orig_source_ori=source_ori,
+                  _orig_sol=sol['data'].copy(), _orig_sol_grad=None)
+    if fwd_grad is not None:
+        sol_grad = dict(data=fwd_grad.T, nrow=fwd_grad.shape[1],
+                        ncol=fwd_grad.shape[0], row_names=names,
+                        col_names=[])
+        fwd.update(dict(sol_grad=sol_grad),
+                   _orig_sol_grad=sol_grad['data'].copy())
+    return fwd
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/forward/forward.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/forward/forward.py
new file mode 100644
index 0000000..c937c5b
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/forward/forward.py
@@ -0,0 +1,1670 @@
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+from ..externals.six import string_types
+from time import time
+import warnings
+from copy import deepcopy
+import re
+
+import numpy as np
+from scipy import linalg, sparse
+
+import shutil
+import os
+from os import path as op
+import tempfile
+
+from ..fixes import sparse_block_diag
+from ..io import RawArray
+from ..io.constants import FIFF
+from ..io.open import fiff_open
+from ..io.tree import dir_tree_find
+from ..io.tag import find_tag, read_tag
+from ..io.matrix import (_read_named_matrix, _transpose_named_matrix,
+                         write_named_matrix)
+from ..io.meas_info import read_bad_channels, Info
+from ..io.pick import (pick_channels_forward, pick_info, pick_channels,
+                       pick_types)
+from ..io.write import (write_int, start_block, end_block,
+                        write_coord_trans, write_ch_info, write_name_list,
+                        write_string, start_file, end_file, write_id)
+from ..io.base import _BaseRaw
+from ..evoked import Evoked, write_evokeds, EvokedArray
+from ..epochs import Epochs
+from ..source_space import (_read_source_spaces_from_tree,
+                            find_source_space_hemi,
+                            _write_source_spaces_to_fid)
+from ..source_estimate import VolSourceEstimate
+from ..transforms import (transform_surface_to, invert_transform,
+                          write_trans)
+from ..utils import (_check_fname, get_subjects_dir, has_mne_c,
+                     run_subprocess, check_fname, logger, verbose)
+
+
+class Forward(dict):
+    """Forward class to represent info from forward solution
+    """
+
+    def __repr__(self):
+        """Summarize forward info instead of printing all"""
+
+        entr = '<Forward'
+
+        nchan = len(pick_types(self['info'], meg=True, eeg=False))
+        entr += ' | ' + 'MEG channels: %d' % nchan
+        nchan = len(pick_types(self['info'], meg=False, eeg=True))
+        entr += ' | ' + 'EEG channels: %d' % nchan
+
+        src_types = np.array([src['type'] for src in self['src']])
+        if (src_types == 'surf').all():
+            entr += (' | Source space: Surface with %d vertices'
+                     % self['nsource'])
+        elif (src_types == 'vol').all():
+            entr += (' | Source space: Volume with %d grid points'
+                     % self['nsource'])
+        elif (src_types == 'discrete').all():
+            entr += (' | Source space: Discrete with %d dipoles'
+                     % self['nsource'])
+        else:
+            count_string = ''
+            if (src_types == 'surf').any():
+                count_string += '%d surface, ' % (src_types == 'surf').sum()
+            if (src_types == 'vol').any():
+                count_string += '%d volume, ' % (src_types == 'vol').sum()
+            if (src_types == 'discrete').any():
+                count_string += '%d discrete, ' \
+                                % (src_types == 'discrete').sum()
+            count_string = count_string.rstrip(', ')
+            entr += (' | Source space: Mixed (%s) with %d vertices'
+                     % (count_string, self['nsource']))
+
+        if self['source_ori'] == FIFF.FIFFV_MNE_UNKNOWN_ORI:
+            entr += (' | Source orientation: Unknown')
+        elif self['source_ori'] == FIFF.FIFFV_MNE_FIXED_ORI:
+            entr += (' | Source orientation: Fixed')
+        elif self['source_ori'] == FIFF.FIFFV_MNE_FREE_ORI:
+            entr += (' | Source orientation: Free')
+
+        entr += '>'
+
+        return entr
+
+
+def prepare_bem_model(bem, sol_fname=None, method='linear'):
+    """Wrapper for the mne_prepare_bem_model command line utility
+
+    Parameters
+    ----------
+    bem : str
+        The name of the file containing the triangulations of the BEM surfaces
+        and the conductivities of the compartments. The standard ending for
+        this file is -bem.fif and it is produced either with the utility
+        mne_surf2bem or the convenience script mne_setup_forward_model.
+    sol_fname : None | str
+        The output file. None (the default) will employ the standard naming
+        scheme. To conform with the standard naming conventions the filename
+        should start with the subject name and end in "-bem-sol.fif".
+    method : 'linear' | 'constant'
+        The BEM approach.
+    """
+    cmd = ['mne_prepare_bem_model', '--bem', bem, '--method', method]
+    if sol_fname is not None:
+        cmd.extend(('--sol', sol_fname))
+    run_subprocess(cmd)
+
+
+def _block_diag(A, n):
+    """Constructs a block diagonal from a packed structure
+
+    You have to try it on a matrix to see what it's doing.
+
+    If A is not sparse, then returns a sparse block diagonal "bd",
+    diagonalized from the
+    elements in "A".
+    "A" is ma x na, comprising bdn=(na/"n") blocks of submatrices.
+    Each submatrix is ma x "n", and these submatrices are
+    placed down the diagonal of the matrix.
+
+    If A is already sparse, then the operation is reversed, yielding
+    a block
+    row matrix, where each set of n columns corresponds to a block element
+    from the block diagonal.
+
+    Parameters
+    ----------
+    A : array
+        The matrix
+    n : int
+        The block size
+    Returns
+    -------
+    bd : sparse matrix
+        The block diagonal matrix
+    """
+    if sparse.issparse(A):  # then make block sparse
+        raise NotImplemented('sparse reversal not implemented yet')
+    ma, na = A.shape
+    bdn = na // int(n)  # number of submatrices
+
+    if na % n > 0:
+        raise ValueError('Width of matrix must be a multiple of n')
+
+    tmp = np.arange(ma * bdn, dtype=np.int).reshape(bdn, ma)
+    tmp = np.tile(tmp, (1, n))
+    ii = tmp.ravel()
+
+    jj = np.arange(na, dtype=np.int)[None, :]
+    jj = jj * np.ones(ma, dtype=np.int)[:, None]
+    jj = jj.T.ravel()  # column indices foreach sparse bd
+
+    bd = sparse.coo_matrix((A.T.ravel(), np.c_[ii, jj].T)).tocsc()
+
+    return bd
+
+
+def _inv_block_diag(A, n):
+    """Constructs an inverse block diagonal from a packed structure
+
+    You have to try it on a matrix to see what it's doing.
+
+    "A" is ma x na, comprising bdn=(na/"n") blocks of submatrices.
+    Each submatrix is ma x "n", and the inverses of these submatrices
+    are placed down the diagonal of the matrix.
+
+    Parameters
+    ----------
+    A : array
+        The matrix.
+    n : int
+        The block size.
+    Returns
+    -------
+    bd : sparse matrix
+        The block diagonal matrix.
+    """
+    ma, na = A.shape
+    bdn = na // int(n)  # number of submatrices
+
+    if na % n > 0:
+        raise ValueError('Width of matrix must be a multiple of n')
+
+    # modify A in-place to invert each sub-block
+    A = A.copy()
+    for start in range(0, na, 3):
+        # this is a view
+        A[:, start:start + 3] = linalg.inv(A[:, start:start + 3])
+
+    tmp = np.arange(ma * bdn, dtype=np.int).reshape(bdn, ma)
+    tmp = np.tile(tmp, (1, n))
+    ii = tmp.ravel()
+
+    jj = np.arange(na, dtype=np.int)[None, :]
+    jj = jj * np.ones(ma, dtype=np.int)[:, None]
+    jj = jj.T.ravel()  # column indices foreach sparse bd
+
+    bd = sparse.coo_matrix((A.T.ravel(), np.c_[ii, jj].T)).tocsc()
+
+    return bd
+
+
+def _get_tag_int(fid, node, name, id_):
+    """Helper to check we have an appropriate tag"""
+    tag = find_tag(fid, node, id_)
+    if tag is None:
+        fid.close()
+        raise ValueError(name + ' tag not found')
+    return int(tag.data)
+
+
+def _read_one(fid, node):
+    """Read all interesting stuff for one forward solution
+    """
+    # This function assumes the fid is open as a context manager
+    if node is None:
+        return None
+
+    one = Forward()
+    one['source_ori'] = _get_tag_int(fid, node, 'Source orientation',
+                                     FIFF.FIFF_MNE_SOURCE_ORIENTATION)
+    one['coord_frame'] = _get_tag_int(fid, node, 'Coordinate frame',
+                                      FIFF.FIFF_MNE_COORD_FRAME)
+    one['nsource'] = _get_tag_int(fid, node, 'Number of sources',
+                                  FIFF.FIFF_MNE_SOURCE_SPACE_NPOINTS)
+    one['nchan'] = _get_tag_int(fid, node, 'Number of channels',
+                                FIFF.FIFF_NCHAN)
+    try:
+        one['sol'] = _read_named_matrix(fid, node,
+                                        FIFF.FIFF_MNE_FORWARD_SOLUTION)
+        one['sol'] = _transpose_named_matrix(one['sol'], copy=False)
+        one['_orig_sol'] = one['sol']['data'].copy()
+    except Exception:
+        logger.error('Forward solution data not found')
+        raise
+
+    try:
+        fwd_type = FIFF.FIFF_MNE_FORWARD_SOLUTION_GRAD
+        one['sol_grad'] = _read_named_matrix(fid, node, fwd_type)
+        one['sol_grad'] = _transpose_named_matrix(one['sol_grad'], copy=False)
+        one['_orig_sol_grad'] = one['sol_grad']['data'].copy()
+    except Exception:
+        one['sol_grad'] = None
+
+    if one['sol']['data'].shape[0] != one['nchan'] or \
+            (one['sol']['data'].shape[1] != one['nsource'] and
+             one['sol']['data'].shape[1] != 3 * one['nsource']):
+        raise ValueError('Forward solution matrix has wrong dimensions')
+
+    if one['sol_grad'] is not None:
+        if one['sol_grad']['data'].shape[0] != one['nchan'] or \
+                (one['sol_grad']['data'].shape[1] != 3 * one['nsource'] and
+                 one['sol_grad']['data'].shape[1] != 3 * 3 * one['nsource']):
+            raise ValueError('Forward solution gradient matrix has '
+                             'wrong dimensions')
+
+    return one
+
+
+def _read_forward_meas_info(tree, fid):
+    """Read light measurement info from forward operator
+
+    Parameters
+    ----------
+    tree : tree
+        FIF tree structure.
+    fid : file id
+        The file id.
+
+    Returns
+    -------
+    info : instance of mne.io.meas_info.Info
+        The measurement info.
+    """
+    # This function assumes fid is being used as a context manager
+    info = Info()
+
+    # Information from the MRI file
+    parent_mri = dir_tree_find(tree, FIFF.FIFFB_MNE_PARENT_MRI_FILE)
+    if len(parent_mri) == 0:
+        raise ValueError('No parent MEG information found in operator')
+    parent_mri = parent_mri[0]
+
+    tag = find_tag(fid, parent_mri, FIFF.FIFF_MNE_FILE_NAME)
+    info['mri_file'] = tag.data if tag is not None else None
+    tag = find_tag(fid, parent_mri, FIFF.FIFF_PARENT_FILE_ID)
+    info['mri_id'] = tag.data if tag is not None else None
+
+    # Information from the MEG file
+    parent_meg = dir_tree_find(tree, FIFF.FIFFB_MNE_PARENT_MEAS_FILE)
+    if len(parent_meg) == 0:
+        raise ValueError('No parent MEG information found in operator')
+    parent_meg = parent_meg[0]
+
+    tag = find_tag(fid, parent_meg, FIFF.FIFF_MNE_FILE_NAME)
+    info['meas_file'] = tag.data if tag is not None else None
+    tag = find_tag(fid, parent_meg, FIFF.FIFF_PARENT_FILE_ID)
+    info['meas_id'] = tag.data if tag is not None else None
+
+    # Add channel information
+    chs = list()
+    for k in range(parent_meg['nent']):
+        kind = parent_meg['directory'][k].kind
+        pos = parent_meg['directory'][k].pos
+        if kind == FIFF.FIFF_CH_INFO:
+            tag = read_tag(fid, pos)
+            chs.append(tag.data)
+    info['chs'] = chs
+
+    info['ch_names'] = [c['ch_name'] for c in chs]
+    info['nchan'] = len(chs)
+
+    #   Get the MRI <-> head coordinate transformation
+    tag = find_tag(fid, parent_mri, FIFF.FIFF_COORD_TRANS)
+    coord_head = FIFF.FIFFV_COORD_HEAD
+    coord_mri = FIFF.FIFFV_COORD_MRI
+    coord_device = FIFF.FIFFV_COORD_DEVICE
+    coord_ctf_head = FIFF.FIFFV_MNE_COORD_CTF_HEAD
+    if tag is None:
+        raise ValueError('MRI/head coordinate transformation not found')
+    cand = tag.data
+    if cand['from'] == coord_mri and cand['to'] == coord_head:
+        info['mri_head_t'] = cand
+    else:
+        raise ValueError('MRI/head coordinate transformation not found')
+
+    #   Get the MEG device <-> head coordinate transformation
+    tag = find_tag(fid, parent_meg, FIFF.FIFF_COORD_TRANS)
+    if tag is None:
+        raise ValueError('MEG/head coordinate transformation not found')
+    cand = tag.data
+    if cand['from'] == coord_device and cand['to'] == coord_head:
+        info['dev_head_t'] = cand
+    elif cand['from'] == coord_ctf_head and cand['to'] == coord_head:
+        info['ctf_head_t'] = cand
+    else:
+        raise ValueError('MEG/head coordinate transformation not found')
+
+    info['bads'] = read_bad_channels(fid, parent_meg)
+    # clean up our bad list, old versions could have non-existent bads
+    info['bads'] = [bad for bad in info['bads'] if bad in info['ch_names']]
+
+    # Check if a custom reference has been applied
+    tag = find_tag(fid, parent_mri, FIFF.FIFF_CUSTOM_REF)
+    info['custom_ref_applied'] = bool(tag.data) if tag is not None else False
+    info._check_consistency()
+    return info
+
+
+def _subject_from_forward(forward):
+    """Get subject id from inverse operator"""
+    return forward['src'][0].get('subject_his_id', None)
+
+
+ at verbose
+def _merge_meg_eeg_fwds(megfwd, eegfwd, verbose=None):
+    """Merge loaded MEG and EEG forward dicts into one dict"""
+    if megfwd is not None and eegfwd is not None:
+        if (megfwd['sol']['data'].shape[1] != eegfwd['sol']['data'].shape[1] or
+                megfwd['source_ori'] != eegfwd['source_ori'] or
+                megfwd['nsource'] != eegfwd['nsource'] or
+                megfwd['coord_frame'] != eegfwd['coord_frame']):
+            raise ValueError('The MEG and EEG forward solutions do not match')
+
+        fwd = megfwd
+        fwd['sol']['data'] = np.r_[fwd['sol']['data'], eegfwd['sol']['data']]
+        fwd['_orig_sol'] = np.r_[fwd['_orig_sol'], eegfwd['_orig_sol']]
+        fwd['sol']['nrow'] = fwd['sol']['nrow'] + eegfwd['sol']['nrow']
+
+        fwd['sol']['row_names'] = (fwd['sol']['row_names'] +
+                                   eegfwd['sol']['row_names'])
+        if fwd['sol_grad'] is not None:
+            fwd['sol_grad']['data'] = np.r_[fwd['sol_grad']['data'],
+                                            eegfwd['sol_grad']['data']]
+            fwd['_orig_sol_grad'] = np.r_[fwd['_orig_sol_grad'],
+                                          eegfwd['_orig_sol_grad']]
+            fwd['sol_grad']['nrow'] = (fwd['sol_grad']['nrow'] +
+                                       eegfwd['sol_grad']['nrow'])
+            fwd['sol_grad']['row_names'] = (fwd['sol_grad']['row_names'] +
+                                            eegfwd['sol_grad']['row_names'])
+
+        fwd['nchan'] = fwd['nchan'] + eegfwd['nchan']
+        logger.info('    MEG and EEG forward solutions combined')
+    elif megfwd is not None:
+        fwd = megfwd
+    else:
+        fwd = eegfwd
+    return fwd
+
+
+ at verbose
+def read_forward_solution(fname, force_fixed=False, surf_ori=False,
+                          include=[], exclude=[], verbose=None):
+    """Read a forward solution a.k.a. lead field
+
+    Parameters
+    ----------
+    fname : string
+        The file name, which should end with -fwd.fif or -fwd.fif.gz.
+    force_fixed : bool, optional (default False)
+        Force fixed source orientation mode?
+    surf_ori : bool, optional (default False)
+        Use surface-based source coordinate system? Note that force_fixed=True
+        implies surf_ori=True.
+    include : list, optional
+        List of names of channels to include. If empty all channels
+        are included.
+    exclude : list, optional
+        List of names of channels to exclude. If empty include all
+        channels.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    fwd : instance of Forward
+        The forward solution.
+
+    See Also
+    --------
+    write_forward_solution, make_forward_solution
+    """
+    check_fname(fname, 'forward', ('-fwd.fif', '-fwd.fif.gz'))
+
+    #   Open the file, create directory
+    logger.info('Reading forward solution from %s...' % fname)
+    f, tree, _ = fiff_open(fname)
+    with f as fid:
+        #   Find all forward solutions
+        fwds = dir_tree_find(tree, FIFF.FIFFB_MNE_FORWARD_SOLUTION)
+        if len(fwds) == 0:
+            raise ValueError('No forward solutions in %s' % fname)
+
+        #   Parent MRI data
+        parent_mri = dir_tree_find(tree, FIFF.FIFFB_MNE_PARENT_MRI_FILE)
+        if len(parent_mri) == 0:
+            raise ValueError('No parent MRI information in %s' % fname)
+        parent_mri = parent_mri[0]
+
+        src = _read_source_spaces_from_tree(fid, tree, patch_stats=False)
+        for s in src:
+            s['id'] = find_source_space_hemi(s)
+
+        fwd = None
+
+        #   Locate and read the forward solutions
+        megnode = None
+        eegnode = None
+        for k in range(len(fwds)):
+            tag = find_tag(fid, fwds[k], FIFF.FIFF_MNE_INCLUDED_METHODS)
+            if tag is None:
+                raise ValueError('Methods not listed for one of the forward '
+                                 'solutions')
+
+            if tag.data == FIFF.FIFFV_MNE_MEG:
+                megnode = fwds[k]
+            elif tag.data == FIFF.FIFFV_MNE_EEG:
+                eegnode = fwds[k]
+
+        megfwd = _read_one(fid, megnode)
+        if megfwd is not None:
+            if is_fixed_orient(megfwd):
+                ori = 'fixed'
+            else:
+                ori = 'free'
+            logger.info('    Read MEG forward solution (%d sources, '
+                        '%d channels, %s orientations)'
+                        % (megfwd['nsource'], megfwd['nchan'], ori))
+
+        eegfwd = _read_one(fid, eegnode)
+        if eegfwd is not None:
+            if is_fixed_orient(eegfwd):
+                ori = 'fixed'
+            else:
+                ori = 'free'
+            logger.info('    Read EEG forward solution (%d sources, '
+                        '%d channels, %s orientations)'
+                        % (eegfwd['nsource'], eegfwd['nchan'], ori))
+
+        fwd = _merge_meg_eeg_fwds(megfwd, eegfwd)
+
+        #   Get the MRI <-> head coordinate transformation
+        tag = find_tag(fid, parent_mri, FIFF.FIFF_COORD_TRANS)
+        if tag is None:
+            raise ValueError('MRI/head coordinate transformation not found')
+        mri_head_t = tag.data
+        if (mri_head_t['from'] != FIFF.FIFFV_COORD_MRI or
+                mri_head_t['to'] != FIFF.FIFFV_COORD_HEAD):
+            mri_head_t = invert_transform(mri_head_t)
+            if (mri_head_t['from'] != FIFF.FIFFV_COORD_MRI or
+                    mri_head_t['to'] != FIFF.FIFFV_COORD_HEAD):
+                fid.close()
+                raise ValueError('MRI/head coordinate transformation not '
+                                 'found')
+        fwd['mri_head_t'] = mri_head_t
+
+        #
+        # get parent MEG info
+        #
+        fwd['info'] = _read_forward_meas_info(tree, fid)
+
+        # MNE environment
+        parent_env = dir_tree_find(tree, FIFF.FIFFB_MNE_ENV)
+        if len(parent_env) > 0:
+            parent_env = parent_env[0]
+            tag = find_tag(fid, parent_env, FIFF.FIFF_MNE_ENV_WORKING_DIR)
+            if tag is not None:
+                fwd['info']['working_dir'] = tag.data
+            tag = find_tag(fid, parent_env, FIFF.FIFF_MNE_ENV_COMMAND_LINE)
+            if tag is not None:
+                fwd['info']['command_line'] = tag.data
+
+    #   Transform the source spaces to the correct coordinate frame
+    #   if necessary
+
+    # Make sure forward solution is in either the MRI or HEAD coordinate frame
+    if fwd['coord_frame'] not in (FIFF.FIFFV_COORD_MRI, FIFF.FIFFV_COORD_HEAD):
+        raise ValueError('Only forward solutions computed in MRI or head '
+                         'coordinates are acceptable')
+
+    nuse = 0
+
+    # Transform each source space to the HEAD or MRI coordinate frame,
+    # depending on the coordinate frame of the forward solution
+    # NOTE: the function transform_surface_to will also work on discrete and
+    # volume sources
+    for s in src:
+        try:
+            s = transform_surface_to(s, fwd['coord_frame'], mri_head_t)
+        except Exception as inst:
+            raise ValueError('Could not transform source space (%s)' % inst)
+
+        nuse += s['nuse']
+
+    # Make sure the number of sources match after transformation
+    if nuse != fwd['nsource']:
+        raise ValueError('Source spaces do not match the forward solution.')
+
+    logger.info('    Source spaces transformed to the forward solution '
+                'coordinate frame')
+    fwd['src'] = src
+
+    #   Handle the source locations and orientations
+    fwd['source_rr'] = np.concatenate([ss['rr'][ss['vertno'], :]
+                                       for ss in src], axis=0)
+
+    # deal with transformations, storing orig copies so transforms can be done
+    # as necessary later
+    fwd['_orig_source_ori'] = fwd['source_ori']
+    convert_forward_solution(fwd, surf_ori, force_fixed, copy=False)
+    fwd = pick_channels_forward(fwd, include=include, exclude=exclude)
+
+    return Forward(fwd)
+
+
+ at verbose
+def convert_forward_solution(fwd, surf_ori=False, force_fixed=False,
+                             copy=True, verbose=None):
+    """Convert forward solution between different source orientations
+
+    Parameters
+    ----------
+    fwd : dict
+        The forward solution to modify.
+    surf_ori : bool, optional (default False)
+        Use surface-based source coordinate system? Note that force_fixed=True
+        implies surf_ori=True.
+    force_fixed : bool, optional (default False)
+        Force fixed source orientation mode?
+    copy : bool, optional (default True)
+        If False, operation will be done in-place (modifying the input).
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    fwd : dict
+        The modified forward solution.
+    """
+    if copy is True:
+        fwd = deepcopy(fwd)
+
+    # We need to change these entries (only):
+    # 1. source_nn
+    # 2. sol['data']
+    # 3. sol['ncol']
+    # 4. sol_grad['data']
+    # 5. sol_grad['ncol']
+    # 6. source_ori
+    if is_fixed_orient(fwd, orig=True) or force_fixed:  # Fixed
+        nuse = 0
+        fwd['source_nn'] = np.concatenate([s['nn'][s['vertno'], :]
+                                           for s in fwd['src']], axis=0)
+
+        #   Modify the forward solution for fixed source orientations
+        if not is_fixed_orient(fwd, orig=True):
+            logger.info('    Changing to fixed-orientation forward '
+                        'solution with surface-based source orientations...')
+            fix_rot = _block_diag(fwd['source_nn'].T, 1)
+            # newer versions of numpy require explicit casting here, so *= no
+            # longer works
+            fwd['sol']['data'] = (fwd['_orig_sol'] *
+                                  fix_rot).astype('float32')
+            fwd['sol']['ncol'] = fwd['nsource']
+            fwd['source_ori'] = FIFF.FIFFV_MNE_FIXED_ORI
+
+            if fwd['sol_grad'] is not None:
+                x = sparse_block_diag([fix_rot] * 3)
+                fwd['sol_grad']['data'] = fwd['_orig_sol_grad'] * x  # dot prod
+                fwd['sol_grad']['ncol'] = 3 * fwd['nsource']
+            logger.info('    [done]')
+        fwd['source_ori'] = FIFF.FIFFV_MNE_FIXED_ORI
+        fwd['surf_ori'] = True
+    elif surf_ori:  # Free, surf-oriented
+        #   Rotate the local source coordinate systems
+        nuse_total = sum([s['nuse'] for s in fwd['src']])
+        fwd['source_nn'] = np.empty((3 * nuse_total, 3), dtype=np.float)
+        logger.info('    Converting to surface-based source orientations...')
+        if fwd['src'][0]['patch_inds'] is not None:
+            use_ave_nn = True
+            logger.info('    Average patch normals will be employed in the '
+                        'rotation to the local surface coordinates....')
+        else:
+            use_ave_nn = False
+
+        #   Actually determine the source orientations
+        nuse = 0
+        pp = 0
+        for s in fwd['src']:
+            for p in range(s['nuse']):
+                #  Project out the surface normal and compute SVD
+                if use_ave_nn is True:
+                    nn = s['nn'][s['pinfo'][s['patch_inds'][p]], :]
+                    nn = np.sum(nn, axis=0)[:, np.newaxis]
+                    nn /= linalg.norm(nn)
+                else:
+                    nn = s['nn'][s['vertno'][p], :][:, np.newaxis]
+                U, S, _ = linalg.svd(np.eye(3, 3) - nn * nn.T)
+                #  Make sure that ez is in the direction of nn
+                if np.sum(nn.ravel() * U[:, 2].ravel()) < 0:
+                    U *= -1.0
+                fwd['source_nn'][pp:pp + 3, :] = U.T
+                pp += 3
+            nuse += s['nuse']
+
+        #   Rotate the solution components as well
+        surf_rot = _block_diag(fwd['source_nn'].T, 3)
+        fwd['sol']['data'] = fwd['_orig_sol'] * surf_rot
+        fwd['sol']['ncol'] = 3 * fwd['nsource']
+        if fwd['sol_grad'] is not None:
+            x = sparse_block_diag([surf_rot] * 3)
+            fwd['sol_grad']['data'] = fwd['_orig_sol_grad'] * x  # dot prod
+            fwd['sol_grad']['ncol'] = 3 * fwd['nsource']
+        logger.info('[done]')
+        fwd['source_ori'] = FIFF.FIFFV_MNE_FREE_ORI
+        fwd['surf_ori'] = True
+    else:  # Free, cartesian
+        logger.info('    Cartesian source orientations...')
+        fwd['source_nn'] = np.kron(np.ones((fwd['nsource'], 1)), np.eye(3))
+        fwd['sol']['data'] = fwd['_orig_sol'].copy()
+        fwd['sol']['ncol'] = 3 * fwd['nsource']
+        if fwd['sol_grad'] is not None:
+            fwd['sol_grad']['data'] = fwd['_orig_sol_grad'].copy()
+            fwd['sol_grad']['ncol'] = 3 * fwd['nsource']
+        fwd['source_ori'] = FIFF.FIFFV_MNE_FREE_ORI
+        fwd['surf_ori'] = False
+        logger.info('[done]')
+
+    return fwd
+
+
+ at verbose
+def write_forward_solution(fname, fwd, overwrite=False, verbose=None):
+    """Write forward solution to a file
+
+    Parameters
+    ----------
+    fname : str
+        File name to save the forward solution to. It should end with -fwd.fif
+        or -fwd.fif.gz.
+    fwd : dict
+        Forward solution.
+    overwrite : bool
+        If True, overwrite destination file (if it exists).
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    See Also
+    --------
+    read_forward_solution
+    """
+    check_fname(fname, 'forward', ('-fwd.fif', '-fwd.fif.gz'))
+
+    # check for file existence
+    _check_fname(fname, overwrite)
+    fid = start_file(fname)
+    start_block(fid, FIFF.FIFFB_MNE)
+
+    #
+    # MNE env
+    #
+    start_block(fid, FIFF.FIFFB_MNE_ENV)
+    write_id(fid, FIFF.FIFF_BLOCK_ID)
+    data = fwd['info'].get('working_dir', None)
+    if data is not None:
+        write_string(fid, FIFF.FIFF_MNE_ENV_WORKING_DIR, data)
+    data = fwd['info'].get('command_line', None)
+    if data is not None:
+        write_string(fid, FIFF.FIFF_MNE_ENV_COMMAND_LINE, data)
+    end_block(fid, FIFF.FIFFB_MNE_ENV)
+
+    #
+    # Information from the MRI file
+    #
+    start_block(fid, FIFF.FIFFB_MNE_PARENT_MRI_FILE)
+    write_string(fid, FIFF.FIFF_MNE_FILE_NAME, fwd['info']['mri_file'])
+    if fwd['info']['mri_id'] is not None:
+        write_id(fid, FIFF.FIFF_PARENT_FILE_ID, fwd['info']['mri_id'])
+    # store the MRI to HEAD transform in MRI file
+    write_coord_trans(fid, fwd['info']['mri_head_t'])
+    end_block(fid, FIFF.FIFFB_MNE_PARENT_MRI_FILE)
+
+    # write measurement info
+    write_forward_meas_info(fid, fwd['info'])
+
+    # invert our original source space transform
+    src = list()
+    for s in fwd['src']:
+        s = deepcopy(s)
+        try:
+            # returns source space to original coordinate frame
+            # usually MRI
+            s = transform_surface_to(s, fwd['mri_head_t']['from'],
+                                     fwd['mri_head_t'])
+        except Exception as inst:
+            raise ValueError('Could not transform source space (%s)' % inst)
+        src.append(s)
+
+    #
+    # Write the source spaces (again)
+    #
+    _write_source_spaces_to_fid(fid, src)
+    n_vert = sum([ss['nuse'] for ss in src])
+    n_col = fwd['sol']['data'].shape[1]
+    if fwd['source_ori'] == FIFF.FIFFV_MNE_FIXED_ORI:
+        assert n_col == n_vert
+    else:
+        assert n_col == 3 * n_vert
+
+    # Undo surf_ori rotation
+    sol = fwd['sol']['data']
+    if fwd['sol_grad'] is not None:
+        sol_grad = fwd['sol_grad']['data']
+    else:
+        sol_grad = None
+
+    if fwd['surf_ori'] is True:
+        inv_rot = _inv_block_diag(fwd['source_nn'].T, 3)
+        sol = sol * inv_rot
+        if sol_grad is not None:
+            sol_grad = sol_grad * sparse_block_diag([inv_rot] * 3)  # dot prod
+
+    #
+    # MEG forward solution
+    #
+    picks_meg = pick_types(fwd['info'], meg=True, eeg=False, ref_meg=False,
+                           exclude=[])
+    picks_eeg = pick_types(fwd['info'], meg=False, eeg=True, ref_meg=False,
+                           exclude=[])
+    n_meg = len(picks_meg)
+    n_eeg = len(picks_eeg)
+    row_names_meg = [fwd['sol']['row_names'][p] for p in picks_meg]
+    row_names_eeg = [fwd['sol']['row_names'][p] for p in picks_eeg]
+
+    if n_meg > 0:
+        meg_solution = dict(data=sol[picks_meg], nrow=n_meg, ncol=n_col,
+                            row_names=row_names_meg, col_names=[])
+        meg_solution = _transpose_named_matrix(meg_solution, copy=False)
+        start_block(fid, FIFF.FIFFB_MNE_FORWARD_SOLUTION)
+        write_int(fid, FIFF.FIFF_MNE_INCLUDED_METHODS, FIFF.FIFFV_MNE_MEG)
+        write_int(fid, FIFF.FIFF_MNE_COORD_FRAME, fwd['coord_frame'])
+        write_int(fid, FIFF.FIFF_MNE_SOURCE_ORIENTATION, fwd['source_ori'])
+        write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NPOINTS, n_vert)
+        write_int(fid, FIFF.FIFF_NCHAN, n_meg)
+        write_named_matrix(fid, FIFF.FIFF_MNE_FORWARD_SOLUTION, meg_solution)
+        if sol_grad is not None:
+            meg_solution_grad = dict(data=sol_grad[picks_meg],
+                                     nrow=n_meg, ncol=n_col * 3,
+                                     row_names=row_names_meg, col_names=[])
+            meg_solution_grad = _transpose_named_matrix(meg_solution_grad,
+                                                        copy=False)
+            write_named_matrix(fid, FIFF.FIFF_MNE_FORWARD_SOLUTION_GRAD,
+                               meg_solution_grad)
+        end_block(fid, FIFF.FIFFB_MNE_FORWARD_SOLUTION)
+
+    #
+    #  EEG forward solution
+    #
+    if n_eeg > 0:
+        eeg_solution = dict(data=sol[picks_eeg], nrow=n_eeg, ncol=n_col,
+                            row_names=row_names_eeg, col_names=[])
+        eeg_solution = _transpose_named_matrix(eeg_solution, copy=False)
+        start_block(fid, FIFF.FIFFB_MNE_FORWARD_SOLUTION)
+        write_int(fid, FIFF.FIFF_MNE_INCLUDED_METHODS, FIFF.FIFFV_MNE_EEG)
+        write_int(fid, FIFF.FIFF_MNE_COORD_FRAME, fwd['coord_frame'])
+        write_int(fid, FIFF.FIFF_MNE_SOURCE_ORIENTATION, fwd['source_ori'])
+        write_int(fid, FIFF.FIFF_NCHAN, n_eeg)
+        write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NPOINTS, n_vert)
+        write_named_matrix(fid, FIFF.FIFF_MNE_FORWARD_SOLUTION, eeg_solution)
+        if sol_grad is not None:
+            eeg_solution_grad = dict(data=sol_grad[picks_eeg],
+                                     nrow=n_eeg, ncol=n_col * 3,
+                                     row_names=row_names_eeg, col_names=[])
+            eeg_solution_grad = _transpose_named_matrix(eeg_solution_grad,
+                                                        copy=False)
+            write_named_matrix(fid, FIFF.FIFF_MNE_FORWARD_SOLUTION_GRAD,
+                               eeg_solution_grad)
+        end_block(fid, FIFF.FIFFB_MNE_FORWARD_SOLUTION)
+
+    end_block(fid, FIFF.FIFFB_MNE)
+    end_file(fid)
+
+
+def _to_fixed_ori(forward):
+    """Helper to convert the forward solution to fixed ori from free"""
+    if not forward['surf_ori'] or is_fixed_orient(forward):
+        raise ValueError('Only surface-oriented, free-orientation forward '
+                         'solutions can be converted to fixed orientaton')
+    forward['sol']['data'] = forward['sol']['data'][:, 2::3]
+    forward['sol']['ncol'] = forward['sol']['ncol'] / 3
+    forward['source_ori'] = FIFF.FIFFV_MNE_FIXED_ORI
+    logger.info('    Converted the forward solution into the '
+                'fixed-orientation mode.')
+    return forward
+
+
+def is_fixed_orient(forward, orig=False):
+    """Has forward operator fixed orientation?
+    """
+    if orig:  # if we want to know about the original version
+        fixed_ori = (forward['_orig_source_ori'] == FIFF.FIFFV_MNE_FIXED_ORI)
+    else:  # most of the time we want to know about the current version
+        fixed_ori = (forward['source_ori'] == FIFF.FIFFV_MNE_FIXED_ORI)
+    return fixed_ori
+
+
+def write_forward_meas_info(fid, info):
+    """Write measurement info stored in forward solution
+
+    Parameters
+    ----------
+    fid : file id
+        The file id
+    info : instance of mne.io.meas_info.Info
+        The measurement info.
+    """
+    info._check_consistency()
+    #
+    # Information from the MEG file
+    #
+    start_block(fid, FIFF.FIFFB_MNE_PARENT_MEAS_FILE)
+    write_string(fid, FIFF.FIFF_MNE_FILE_NAME, info['meas_file'])
+    if info['meas_id'] is not None:
+        write_id(fid, FIFF.FIFF_PARENT_BLOCK_ID, info['meas_id'])
+    # get transformation from CTF and DEVICE to HEAD coordinate frame
+    meg_head_t = info.get('dev_head_t', info.get('ctf_head_t'))
+    if meg_head_t is None:
+        fid.close()
+        raise ValueError('Head<-->sensor transform not found')
+    write_coord_trans(fid, meg_head_t)
+
+    if 'chs' in info:
+        #  Channel information
+        write_int(fid, FIFF.FIFF_NCHAN, len(info['chs']))
+        for k, c in enumerate(info['chs']):
+            #   Scan numbers may have been messed up
+            c = deepcopy(c)
+            c['scanno'] = k + 1
+            write_ch_info(fid, c)
+    if 'bads' in info and len(info['bads']) > 0:
+        #   Bad channels
+        start_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS)
+        write_name_list(fid, FIFF.FIFF_MNE_CH_NAME_LIST, info['bads'])
+        end_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS)
+
+    end_block(fid, FIFF.FIFFB_MNE_PARENT_MEAS_FILE)
+
+
+ at verbose
+def compute_orient_prior(forward, loose=0.2, verbose=None):
+    """Compute orientation prior
+
+    Parameters
+    ----------
+    forward : dict
+        Forward operator.
+    loose : float in [0, 1] or None
+        The loose orientation parameter.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    orient_prior : array
+        Orientation priors.
+    """
+    is_fixed_ori = is_fixed_orient(forward)
+    n_sources = forward['sol']['data'].shape[1]
+
+    if loose is not None:
+        if not (0 <= loose <= 1):
+            raise ValueError('loose value should be smaller than 1 and bigger '
+                             'than 0, or None for not loose orientations.')
+
+        if loose < 1 and not forward['surf_ori']:
+            raise ValueError('Forward operator is not oriented in surface '
+                             'coordinates. loose parameter should be None '
+                             'not %s.' % loose)
+
+        if is_fixed_ori:
+            warnings.warn('Ignoring loose parameter with forward operator '
+                          'with fixed orientation.')
+
+    orient_prior = np.ones(n_sources, dtype=np.float)
+    if (not is_fixed_ori) and (loose is not None) and (loose < 1):
+        logger.info('Applying loose dipole orientations. Loose value '
+                    'of %s.' % loose)
+        orient_prior[np.mod(np.arange(n_sources), 3) != 2] *= loose
+
+    return orient_prior
+
+
+def _restrict_gain_matrix(G, info):
+    """Restrict gain matrix entries for optimal depth weighting"""
+    # Figure out which ones have been used
+    if not (len(info['chs']) == G.shape[0]):
+        raise ValueError("G.shape[0] and length of info['chs'] do not match: "
+                         "%d != %d" % (G.shape[0], len(info['chs'])))
+    sel = pick_types(info, meg='grad', ref_meg=False, exclude=[])
+    if len(sel) > 0:
+        G = G[sel]
+        logger.info('    %d planar channels' % len(sel))
+    else:
+        sel = pick_types(info, meg='mag', ref_meg=False, exclude=[])
+        if len(sel) > 0:
+            G = G[sel]
+            logger.info('    %d magnetometer or axial gradiometer '
+                        'channels' % len(sel))
+        else:
+            sel = pick_types(info, meg=False, eeg=True, exclude=[])
+            if len(sel) > 0:
+                G = G[sel]
+                logger.info('    %d EEG channels' % len(sel))
+            else:
+                logger.warning('Could not find MEG or EEG channels')
+    return G
+
+
+def compute_depth_prior(G, gain_info, is_fixed_ori, exp=0.8, limit=10.0,
+                        patch_areas=None, limit_depth_chs=False):
+    """Compute weighting for depth prior
+    """
+    logger.info('Creating the depth weighting matrix...')
+
+    # If possible, pick best depth-weighting channels
+    if limit_depth_chs is True:
+        G = _restrict_gain_matrix(G, gain_info)
+
+    # Compute the gain matrix
+    if is_fixed_ori:
+        d = np.sum(G ** 2, axis=0)
+    else:
+        n_pos = G.shape[1] // 3
+        d = np.zeros(n_pos)
+        for k in range(n_pos):
+            Gk = G[:, 3 * k:3 * (k + 1)]
+            d[k] = linalg.svdvals(np.dot(Gk.T, Gk))[0]
+
+    # XXX Currently the fwd solns never have "patch_areas" defined
+    if patch_areas is not None:
+        d /= patch_areas ** 2
+        logger.info('    Patch areas taken into account in the depth '
+                    'weighting')
+
+    w = 1.0 / d
+    ws = np.sort(w)
+    weight_limit = limit ** 2
+    if limit_depth_chs is False:
+        # match old mne-python behavor
+        ind = np.argmin(ws)
+        n_limit = ind
+        limit = ws[ind] * weight_limit
+        wpp = (np.minimum(w / limit, 1)) ** exp
+    else:
+        # match C code behavior
+        limit = ws[-1]
+        n_limit = len(d)
+        if ws[-1] > weight_limit * ws[0]:
+            ind = np.where(ws > weight_limit * ws[0])[0][0]
+            limit = ws[ind]
+            n_limit = ind
+
+    logger.info('    limit = %d/%d = %f'
+                % (n_limit + 1, len(d),
+                   np.sqrt(limit / ws[0])))
+    scale = 1.0 / limit
+    logger.info('    scale = %g exp = %g' % (scale, exp))
+    wpp = np.minimum(w / limit, 1) ** exp
+
+    depth_prior = wpp if is_fixed_ori else np.repeat(wpp, 3)
+
+    return depth_prior
+
+
+def _stc_src_sel(src, stc):
+    """ Select the vertex indices of a source space using a source estimate
+    """
+    if isinstance(stc, VolSourceEstimate):
+        vertices = [stc.vertices]
+    else:
+        vertices = stc.vertices
+    if not len(src) == len(vertices):
+        raise RuntimeError('Mismatch between number of source spaces (%s) and '
+                           'STC vertices (%s)' % (len(src), len(vertices)))
+    src_sels = []
+    offset = 0
+    for s, v in zip(src, vertices):
+        src_sel = np.intersect1d(s['vertno'], v)
+        src_sel = np.searchsorted(s['vertno'], src_sel)
+        src_sels.append(src_sel + offset)
+        offset += len(s['vertno'])
+    src_sel = np.concatenate(src_sels)
+    return src_sel
+
+
+def _fill_measurement_info(info, fwd, sfreq):
+    """ Fill the measurement info of a Raw or Evoked object
+    """
+    sel = pick_channels(info['ch_names'], fwd['sol']['row_names'])
+    info = pick_info(info, sel)
+    info['bads'] = []
+
+    info['filename'] = None
+    # this is probably correct based on what's done in meas_info.py...
+    info['meas_id'] = fwd['info']['meas_id']
+    info['file_id'] = info['meas_id']
+
+    now = time()
+    sec = np.floor(now)
+    usec = 1e6 * (now - sec)
+
+    info['meas_date'] = np.array([sec, usec], dtype=np.int32)
+    info['highpass'] = 0.0
+    info['lowpass'] = sfreq / 2.0
+    info['sfreq'] = sfreq
+    info['projs'] = []
+
+    return info
+
+
+ at verbose
+def _apply_forward(fwd, stc, start=None, stop=None, verbose=None):
+    """ Apply forward model and return data, times, ch_names
+    """
+    if not is_fixed_orient(fwd):
+        raise ValueError('Only fixed-orientation forward operators are '
+                         'supported.')
+
+    if np.all(stc.data > 0):
+        warnings.warn('Source estimate only contains currents with positive '
+                      'values. Use pick_ori="normal" when computing the '
+                      'inverse to compute currents not current magnitudes.')
+
+    max_cur = np.max(np.abs(stc.data))
+    if max_cur > 1e-7:  # 100 nAm threshold for warning
+        warnings.warn('The maximum current magnitude is %0.1f nAm, which is '
+                      'very large. Are you trying to apply the forward model '
+                      'to dSPM values? The result will only be correct if '
+                      'currents are used.' % (1e9 * max_cur))
+
+    src_sel = _stc_src_sel(fwd['src'], stc)
+    if isinstance(stc, VolSourceEstimate):
+        n_src = len(stc.vertices)
+    else:
+        n_src = sum([len(v) for v in stc.vertices])
+    if len(src_sel) != n_src:
+        raise RuntimeError('Only %i of %i SourceEstimate vertices found in '
+                           'fwd' % (len(src_sel), n_src))
+
+    gain = fwd['sol']['data'][:, src_sel]
+
+    logger.info('Projecting source estimate to sensor space...')
+    data = np.dot(gain, stc.data[:, start:stop])
+    logger.info('[done]')
+
+    times = deepcopy(stc.times[start:stop])
+
+    return data, times
+
+
+ at verbose
+def apply_forward(fwd, stc, info=None, start=None, stop=None,
+                  verbose=None, evoked_template=None):
+    """
+    Project source space currents to sensor space using a forward operator.
+
+    The sensor space data is computed for all channels present in fwd. Use
+    pick_channels_forward or pick_types_forward to restrict the solution to a
+    subset of channels.
+
+    The function returns an Evoked object, which is constructed from
+    evoked_template. The evoked_template should be from the same MEG system on
+    which the original data was acquired. An exception will be raised if the
+    forward operator contains channels that are not present in the template.
+
+
+    Parameters
+    ----------
+    fwd : dict
+        Forward operator to use. Has to be fixed-orientation.
+    stc : SourceEstimate
+        The source estimate from which the sensor space data is computed.
+    info : instance of mne.io.meas_info.Info
+        Measurement info to generate the evoked.
+    start : int, optional
+        Index of first time sample (index not time is seconds).
+    stop : int, optional
+        Index of first time sample not to include (index not time is seconds).
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+    evoked_template : Evoked object (deprecated)
+        Evoked object used as template to generate the output argument.
+
+    Returns
+    -------
+    evoked : Evoked
+        Evoked object with computed sensor space data.
+
+    See Also
+    --------
+    apply_forward_raw: Compute sensor space data and return a Raw object.
+    """
+    if evoked_template is None and info is None:
+        raise ValueError('You have to provide the info parameter.')
+
+    if evoked_template is not None and not isinstance(evoked_template, Info):
+        warnings.warn('The "evoked_template" parameter is being deprecated '
+                      'and will be removed in MNE-0.11. '
+                      'Please provide info parameter instead',
+                      DeprecationWarning)
+        info = evoked_template.info
+
+    if info is not None and not isinstance(info, Info):
+        warnings.warn('The "evoked_template" parameter is being deprecated '
+                      'and will be removed in MNE-0.11. '
+                      'Please provide info parameter instead',
+                      DeprecationWarning)
+        info = info.info
+
+    # make sure evoked_template contains all channels in fwd
+    for ch_name in fwd['sol']['row_names']:
+        if ch_name not in info['ch_names']:
+            raise ValueError('Channel %s of forward operator not present in '
+                             'evoked_template.' % ch_name)
+
+    # project the source estimate to the sensor space
+    data, times = _apply_forward(fwd, stc, start, stop)
+
+    # fill the measurement info
+    sfreq = float(1.0 / stc.tstep)
+    info_out = _fill_measurement_info(info, fwd, sfreq)
+
+    evoked = EvokedArray(data, info_out, times[0], nave=1)
+
+    evoked.times = times
+    evoked.first = int(np.round(evoked.times[0] * sfreq))
+    evoked.last = evoked.first + evoked.data.shape[1] - 1
+
+    return evoked
+
+
+ at verbose
+def apply_forward_raw(fwd, stc, info, start=None, stop=None,
+                      verbose=None):
+    """Project source space currents to sensor space using a forward operator
+
+    The sensor space data is computed for all channels present in fwd. Use
+    pick_channels_forward or pick_types_forward to restrict the solution to a
+    subset of channels.
+
+    The function returns a Raw object, which is constructed using provided
+    info. The info object should be from the same MEG system on which the
+    original data was acquired. An exception will be raised if the forward
+    operator contains channels that are not present in the info.
+
+    Parameters
+    ----------
+    fwd : dict
+        Forward operator to use. Has to be fixed-orientation.
+    stc : SourceEstimate
+        The source estimate from which the sensor space data is computed.
+    info : Instance of mne.io.meas_info.Info
+        The measurement info.
+    start : int, optional
+        Index of first time sample (index not time is seconds).
+    stop : int, optional
+        Index of first time sample not to include (index not time is seconds).
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    raw : Raw object
+        Raw object with computed sensor space data.
+
+    See Also
+    --------
+    apply_forward: Compute sensor space data and return an Evoked object.
+    """
+    if isinstance(info, _BaseRaw):
+        warnings.warn('The "Raw_template" parameter is being deprecated '
+                      'and will be removed in MNE-0.11. '
+                      'Please provide info parameter instead',
+                      DeprecationWarning)
+        info = info.info
+
+    # make sure info contains all channels in fwd
+    for ch_name in fwd['sol']['row_names']:
+        if ch_name not in info['ch_names']:
+            raise ValueError('Channel %s of forward operator not present in '
+                             'info.' % ch_name)
+
+    # project the source estimate to the sensor space
+    data, times = _apply_forward(fwd, stc, start, stop)
+
+    sfreq = 1.0 / stc.tstep
+    info = _fill_measurement_info(info, fwd, sfreq)
+    info['projs'] = []
+    # store sensor data in Raw object using the info
+    raw = RawArray(data, info)
+    raw.preload = True
+
+    raw._first_samps = np.array([int(np.round(times[0] * sfreq))])
+    raw._last_samps = np.array([raw.first_samp + raw._data.shape[1] - 1])
+    raw._projector = None
+    raw._update_times()
+    return raw
+
+
+def restrict_forward_to_stc(fwd, stc):
+    """Restricts forward operator to active sources in a source estimate
+
+    Parameters
+    ----------
+    fwd : dict
+        Forward operator.
+    stc : SourceEstimate
+        Source estimate.
+
+    Returns
+    -------
+    fwd_out : dict
+        Restricted forward operator.
+
+    See Also
+    --------
+    restrict_forward_to_label
+    """
+
+    fwd_out = deepcopy(fwd)
+    src_sel = _stc_src_sel(fwd['src'], stc)
+
+    fwd_out['source_rr'] = fwd['source_rr'][src_sel]
+    fwd_out['nsource'] = len(src_sel)
+
+    if is_fixed_orient(fwd):
+        idx = src_sel
+    else:
+        idx = (3 * src_sel[:, None] + np.arange(3)).ravel()
+
+    fwd_out['source_nn'] = fwd['source_nn'][idx]
+    fwd_out['sol']['data'] = fwd['sol']['data'][:, idx]
+    fwd_out['sol']['ncol'] = len(idx)
+
+    for i in range(2):
+        fwd_out['src'][i]['vertno'] = stc.vertices[i]
+        fwd_out['src'][i]['nuse'] = len(stc.vertices[i])
+        fwd_out['src'][i]['inuse'] = fwd['src'][i]['inuse'].copy()
+        fwd_out['src'][i]['inuse'].fill(0)
+        fwd_out['src'][i]['inuse'][stc.vertices[i]] = 1
+        fwd_out['src'][i]['use_tris'] = np.array([], int)
+        fwd_out['src'][i]['nuse_tri'] = np.array([0])
+
+    return fwd_out
+
+
+def restrict_forward_to_label(fwd, labels):
+    """Restricts forward operator to labels
+
+    Parameters
+    ----------
+    fwd : dict
+        Forward operator.
+    labels : label object | list
+        Label object or list of label objects.
+
+    Returns
+    -------
+    fwd_out : dict
+        Restricted forward operator.
+
+    See Also
+    --------
+    restrict_forward_to_stc
+    """
+
+    if not isinstance(labels, list):
+        labels = [labels]
+
+    fwd_out = deepcopy(fwd)
+    fwd_out['source_rr'] = np.zeros((0, 3))
+    fwd_out['nsource'] = 0
+    fwd_out['source_nn'] = np.zeros((0, 3))
+    fwd_out['sol']['data'] = np.zeros((fwd['sol']['data'].shape[0], 0))
+    fwd_out['sol']['ncol'] = 0
+
+    for i in range(2):
+        fwd_out['src'][i]['vertno'] = np.array([], int)
+        fwd_out['src'][i]['nuse'] = 0
+        fwd_out['src'][i]['inuse'] = fwd['src'][i]['inuse'].copy()
+        fwd_out['src'][i]['inuse'].fill(0)
+        fwd_out['src'][i]['use_tris'] = np.array([], int)
+        fwd_out['src'][i]['nuse_tri'] = np.array([0])
+
+    for label in labels:
+        if label.hemi == 'lh':
+            i = 0
+            src_sel = np.intersect1d(fwd['src'][0]['vertno'], label.vertices)
+            src_sel = np.searchsorted(fwd['src'][0]['vertno'], src_sel)
+        else:
+            i = 1
+            src_sel = np.intersect1d(fwd['src'][1]['vertno'], label.vertices)
+            src_sel = (np.searchsorted(fwd['src'][1]['vertno'], src_sel) +
+                       len(fwd['src'][0]['vertno']))
+
+        fwd_out['source_rr'] = np.vstack([fwd_out['source_rr'],
+                                          fwd['source_rr'][src_sel]])
+        fwd_out['nsource'] += len(src_sel)
+
+        fwd_out['src'][i]['vertno'] = np.r_[fwd_out['src'][i]['vertno'],
+                                            src_sel]
+        fwd_out['src'][i]['nuse'] += len(src_sel)
+        fwd_out['src'][i]['inuse'][src_sel] = 1
+
+        if is_fixed_orient(fwd):
+            idx = src_sel
+        else:
+            idx = (3 * src_sel[:, None] + np.arange(3)).ravel()
+
+        fwd_out['source_nn'] = np.vstack([fwd_out['source_nn'],
+                                          fwd['source_nn'][idx]])
+        fwd_out['sol']['data'] = np.hstack([fwd_out['sol']['data'],
+                                            fwd['sol']['data'][:, idx]])
+        fwd_out['sol']['ncol'] += len(idx)
+
+    return fwd_out
+
+
+ at verbose
+def do_forward_solution(subject, meas, fname=None, src=None, spacing=None,
+                        mindist=None, bem=None, mri=None, trans=None,
+                        eeg=True, meg=True, fixed=False, grad=False,
+                        mricoord=False, overwrite=False, subjects_dir=None,
+                        verbose=None):
+    """Calculate a forward solution for a subject using MNE-C routines
+
+    This function wraps to mne_do_forward_solution, so the mne
+    command-line tools must be installed and accessible from Python.
+
+    Parameters
+    ----------
+    subject : str
+        Name of the subject.
+    meas : Raw | Epochs | Evoked | str
+        If Raw or Epochs, a temporary evoked file will be created and
+        saved to a temporary directory. If str, then it should be a
+        filename to a file with measurement information the mne
+        command-line tools can understand (i.e., raw or evoked).
+    fname : str | None
+        Destination forward solution filename. If None, the solution
+        will be created in a temporary directory, loaded, and deleted.
+    src : str | None
+        Source space name. If None, the MNE default is used.
+    spacing : str
+        The spacing to use. Can be ``'#'`` for spacing in mm, ``'ico#'`` for a
+        recursively subdivided icosahedron, or ``'oct#'`` for a recursively
+        subdivided octahedron (e.g., ``spacing='ico4'``). Default is 7 mm.
+    mindist : float | str | None
+        Minimum distance of sources from inner skull surface (in mm).
+        If None, the MNE default value is used. If string, 'all'
+        indicates to include all points.
+    bem : str | None
+        Name of the BEM to use (e.g., "sample-5120-5120-5120"). If None
+        (Default), the MNE default will be used.
+    mri : str | None
+        The name of the trans file in FIF format.
+        If None, trans must not be None.
+    trans : dict | str | None
+        File name of the trans file in text format.
+        If None, mri must not be None.
+    eeg : bool
+        If True (Default), include EEG computations.
+    meg : bool
+        If True (Default), include MEG computations.
+    fixed : bool
+        If True, make a fixed-orientation forward solution (Default:
+        False). Note that fixed-orientation inverses can still be
+        created from free-orientation forward solutions.
+    grad : bool
+        If True, compute the gradient of the field with respect to the
+        dipole coordinates as well (Default: False).
+    mricoord : bool
+        If True, calculate in MRI coordinates (Default: False).
+    overwrite : bool
+        If True, the destination file (if it exists) will be overwritten.
+        If False (default), an error will be raised if the file exists.
+    subjects_dir : None | str
+        Override the SUBJECTS_DIR environment variable.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    See Also
+    --------
+    forward.make_forward_solution
+
+    Returns
+    -------
+    fwd : dict
+        The generated forward solution.
+    """
+    if not has_mne_c():
+        raise RuntimeError('mne command line tools could not be found')
+
+    # check for file existence
+    temp_dir = tempfile.mkdtemp()
+    if fname is None:
+        fname = op.join(temp_dir, 'temp-fwd.fif')
+    _check_fname(fname, overwrite)
+
+    if not isinstance(subject, string_types):
+        raise ValueError('subject must be a string')
+
+    # check for meas to exist as string, or try to make evoked
+    meas_data = None
+    if isinstance(meas, string_types):
+        if not op.isfile(meas):
+            raise IOError('measurement file "%s" could not be found' % meas)
+    elif isinstance(meas, _BaseRaw):
+        events = np.array([[0, 0, 1]], dtype=np.int)
+        end = 1. / meas.info['sfreq']
+        meas_data = Epochs(meas, events, 1, 0, end, proj=False).average()
+    elif isinstance(meas, Epochs):
+        meas_data = meas.average()
+    elif isinstance(meas, Evoked):
+        meas_data = meas
+    else:
+        raise ValueError('meas must be string, Raw, Epochs, or Evoked')
+
+    if meas_data is not None:
+        meas = op.join(temp_dir, 'evoked.fif')
+        write_evokeds(meas, meas_data)
+
+    # deal with trans/mri
+    if mri is not None and trans is not None:
+        raise ValueError('trans and mri cannot both be specified')
+    if mri is None and trans is None:
+        # MNE allows this to default to a trans/mri in the subject's dir,
+        # but let's be safe here and force the user to pass us a trans/mri
+        raise ValueError('Either trans or mri must be specified')
+
+    if trans is not None:
+        if not isinstance(trans, string_types):
+            raise ValueError('trans must be a string')
+        if not op.isfile(trans):
+            raise IOError('trans file "%s" not found' % trans)
+    if mri is not None:
+        # deal with trans
+        if not isinstance(mri, string_types):
+            if isinstance(mri, dict):
+                mri_data = deepcopy(mri)
+                mri = op.join(temp_dir, 'mri-trans.fif')
+                try:
+                    write_trans(mri, mri_data)
+                except Exception:
+                    raise IOError('mri was a dict, but could not be '
+                                  'written to disk as a transform file')
+            else:
+                raise ValueError('trans must be a string or dict (trans)')
+        if not op.isfile(mri):
+            raise IOError('trans file "%s" could not be found' % trans)
+
+    # deal with meg/eeg
+    if not meg and not eeg:
+        raise ValueError('meg or eeg (or both) must be True')
+
+    path, fname = op.split(fname)
+    if not op.splitext(fname)[1] == '.fif':
+        raise ValueError('Forward name does not end with .fif')
+    path = op.abspath(path)
+
+    # deal with mindist
+    if mindist is not None:
+        if isinstance(mindist, string_types):
+            if not mindist.lower() == 'all':
+                raise ValueError('mindist, if string, must be "all"')
+            mindist = ['--all']
+        else:
+            mindist = ['--mindist', '%g' % mindist]
+
+    # src, spacing, bem
+    if src is not None:
+        if not isinstance(src, string_types):
+            raise ValueError('src must be a string or None')
+    if spacing is not None:
+        if not isinstance(spacing, string_types):
+            raise ValueError('spacing must be a string or None')
+    if bem is not None:
+        if not isinstance(bem, string_types):
+            raise ValueError('bem must be a string or None')
+
+    # put together the actual call
+    cmd = ['mne_do_forward_solution',
+           '--subject', subject,
+           '--meas', meas,
+           '--fwd', fname,
+           '--destdir', path]
+    if src is not None:
+        cmd += ['--src', src]
+    if spacing is not None:
+        if spacing.isdigit():
+            pass  # spacing in mm
+        else:
+            # allow both "ico4" and "ico-4" style values
+            match = re.match("(oct|ico)-?(\d+)$", spacing)
+            if match is None:
+                raise ValueError("Invalid spacing parameter: %r" % spacing)
+            spacing = '-'.join(match.groups())
+        cmd += ['--spacing', spacing]
+    if mindist is not None:
+        cmd += mindist
+    if bem is not None:
+        cmd += ['--bem', bem]
+    if mri is not None:
+        cmd += ['--mri', '%s' % mri]
+    if trans is not None:
+        cmd += ['--trans', '%s' % trans]
+    if not meg:
+        cmd.append('--eegonly')
+    if not eeg:
+        cmd.append('--megonly')
+    if fixed:
+        cmd.append('--fixed')
+    if grad:
+        cmd.append('--grad')
+    if mricoord:
+        cmd.append('--mricoord')
+    if overwrite:
+        cmd.append('--overwrite')
+
+    env = os.environ.copy()
+    subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
+    env['SUBJECTS_DIR'] = subjects_dir
+
+    try:
+        logger.info('Running forward solution generation command with '
+                    'subjects_dir %s' % subjects_dir)
+        run_subprocess(cmd, env=env)
+    except Exception:
+        raise
+    else:
+        fwd = read_forward_solution(op.join(path, fname), verbose=False)
+    finally:
+        shutil.rmtree(temp_dir, ignore_errors=True)
+    return fwd
+
+
+ at verbose
+def average_forward_solutions(fwds, weights=None):
+    """Average forward solutions
+
+    Parameters
+    ----------
+    fwds : list of dict
+        Forward solutions to average. Each entry (dict) should be a
+        forward solution.
+    weights : array | None
+        Weights to apply to each forward solution in averaging. If None,
+        forward solutions will be equally weighted. Weights must be
+        non-negative, and will be adjusted to sum to one.
+
+    Returns
+    -------
+    fwd : dict
+        The averaged forward solution.
+    """
+    # check for fwds being a list
+    if not isinstance(fwds, list):
+        raise TypeError('fwds must be a list')
+    if not len(fwds) > 0:
+        raise ValueError('fwds must not be empty')
+
+    # check weights
+    if weights is None:
+        weights = np.ones(len(fwds))
+    weights = np.asanyarray(weights)  # in case it's a list, convert it
+    if not np.all(weights >= 0):
+        raise ValueError('weights must be non-negative')
+    if not len(weights) == len(fwds):
+        raise ValueError('weights must be None or the same length as fwds')
+    w_sum = np.sum(weights)
+    if not w_sum > 0:
+        raise ValueError('weights cannot all be zero')
+    weights /= w_sum
+
+    # check our forward solutions
+    for fwd in fwds:
+        # check to make sure it's a forward solution
+        if not isinstance(fwd, dict):
+            raise TypeError('Each entry in fwds must be a dict')
+        # check to make sure the dict is actually a fwd
+        check_keys = ['info', 'sol_grad', 'nchan', 'src', 'source_nn', 'sol',
+                      'source_rr', 'source_ori', 'surf_ori', 'coord_frame',
+                      'mri_head_t', 'nsource']
+        if not all(key in fwd for key in check_keys):
+            raise KeyError('forward solution dict does not have all standard '
+                           'entries, cannot compute average.')
+
+    # check forward solution compatibility
+    if any(fwd['sol'][k] != fwds[0]['sol'][k]
+           for fwd in fwds[1:] for k in ['nrow', 'ncol']):
+        raise ValueError('Forward solutions have incompatible dimensions')
+    if any(fwd[k] != fwds[0][k] for fwd in fwds[1:]
+           for k in ['source_ori', 'surf_ori', 'coord_frame']):
+        raise ValueError('Forward solutions have incompatible orientations')
+
+    # actually average them (solutions and gradients)
+    fwd_ave = deepcopy(fwds[0])
+    fwd_ave['sol']['data'] *= weights[0]
+    fwd_ave['_orig_sol'] *= weights[0]
+    for fwd, w in zip(fwds[1:], weights[1:]):
+        fwd_ave['sol']['data'] += w * fwd['sol']['data']
+        fwd_ave['_orig_sol'] += w * fwd['_orig_sol']
+    if fwd_ave['sol_grad'] is not None:
+        fwd_ave['sol_grad']['data'] *= weights[0]
+        fwd_ave['_orig_sol_grad'] *= weights[0]
+        for fwd, w in zip(fwds[1:], weights[1:]):
+            fwd_ave['sol_grad']['data'] += w * fwd['sol_grad']['data']
+            fwd_ave['_orig_sol_grad'] += w * fwd['_orig_sol_grad']
+    return fwd_ave
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/forward/tests/__init__.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/forward/tests/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/forward/tests/test_field_interpolation.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/forward/tests/test_field_interpolation.py
new file mode 100644
index 0000000..43fbc35
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/forward/tests/test_field_interpolation.py
@@ -0,0 +1,223 @@
+import numpy as np
+from os import path as op
+from numpy.polynomial import legendre
+from numpy.testing.utils import (assert_allclose, assert_array_equal,
+                                 assert_array_almost_equal)
+from nose.tools import assert_raises, assert_true
+
+from mne.forward import _make_surface_mapping, make_field_map
+from mne.forward._lead_dots import (_comp_sum_eeg, _comp_sums_meg,
+                                    _get_legen_table,
+                                    _get_legen_lut_fast,
+                                    _get_legen_lut_accurate,
+                                    _do_cross_dots)
+from mne.forward._make_forward import _create_meg_coils
+from mne.forward._field_interpolation import _setup_dots
+from mne.surface import get_meg_helmet_surf, get_head_surf
+from mne.datasets import testing
+from mne import read_evokeds
+from mne.fixes import partial
+from mne.externals.six.moves import zip
+from mne.utils import run_tests_if_main, slow_test
+
+
+base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
+evoked_fname = op.join(base_dir, 'test-ave.fif')
+
+data_path = testing.data_path(download=False)
+trans_fname = op.join(data_path, 'MEG', 'sample',
+                      'sample_audvis_trunc-trans.fif')
+subjects_dir = op.join(data_path, 'subjects')
+
+
+def test_legendre_val():
+    """Test Legendre polynomial (derivative) equivalence
+    """
+    # check table equiv
+    xs = np.linspace(-1., 1., 1000)
+    n_terms = 100
+
+    # True, numpy
+    vals_np = legendre.legvander(xs, n_terms - 1)
+
+    # Table approximation
+    for fun, nc in zip([_get_legen_lut_fast, _get_legen_lut_accurate],
+                       [100, 50]):
+        lut, n_fact = _get_legen_table('eeg', n_coeff=nc, force_calc=True)
+        vals_i = fun(xs, lut)
+        # Need a "1:" here because we omit the first coefficient in our table!
+        assert_allclose(vals_np[:, 1:vals_i.shape[1] + 1], vals_i,
+                        rtol=1e-2, atol=5e-3)
+
+        # Now let's look at our sums
+        ctheta = np.random.rand(20, 30) * 2.0 - 1.0
+        beta = np.random.rand(20, 30) * 0.8
+        lut_fun = partial(fun, lut=lut)
+        c1 = _comp_sum_eeg(beta.flatten(), ctheta.flatten(), lut_fun, n_fact)
+        c1.shape = beta.shape
+
+        # compare to numpy
+        n = np.arange(1, n_terms, dtype=float)[:, np.newaxis, np.newaxis]
+        coeffs = np.zeros((n_terms,) + beta.shape)
+        coeffs[1:] = (np.cumprod([beta] * (n_terms - 1), axis=0) *
+                      (2.0 * n + 1.0) * (2.0 * n + 1.0) / n)
+        # can't use tensor=False here b/c it isn't in old numpy
+        c2 = np.empty((20, 30))
+        for ci1 in range(20):
+            for ci2 in range(30):
+                c2[ci1, ci2] = legendre.legval(ctheta[ci1, ci2],
+                                               coeffs[:, ci1, ci2])
+        assert_allclose(c1, c2, 1e-2, 1e-3)  # close enough...
+
+    # compare fast and slow for MEG
+    ctheta = np.random.rand(20 * 30) * 2.0 - 1.0
+    beta = np.random.rand(20 * 30) * 0.8
+    lut, n_fact = _get_legen_table('meg', n_coeff=10, force_calc=True)
+    fun = partial(_get_legen_lut_fast, lut=lut)
+    coeffs = _comp_sums_meg(beta, ctheta, fun, n_fact, False)
+    lut, n_fact = _get_legen_table('meg', n_coeff=20, force_calc=True)
+    fun = partial(_get_legen_lut_accurate, lut=lut)
+    coeffs = _comp_sums_meg(beta, ctheta, fun, n_fact, False)
+
+
+def test_legendre_table():
+    """Test Legendre table calculation
+    """
+    # double-check our table generation
+    n = 10
+    for ch_type in ['eeg', 'meg']:
+        lut1, n_fact1 = _get_legen_table(ch_type, n_coeff=25, force_calc=True)
+        lut1 = lut1[:, :n - 1].copy()
+        n_fact1 = n_fact1[:n - 1].copy()
+        lut2, n_fact2 = _get_legen_table(ch_type, n_coeff=n, force_calc=True)
+        assert_allclose(lut1, lut2)
+        assert_allclose(n_fact1, n_fact2)
+
+
+ at testing.requires_testing_data
+def test_make_field_map_eeg():
+    """Test interpolation of EEG field onto head
+    """
+    evoked = read_evokeds(evoked_fname, condition='Left Auditory')
+    evoked.info['bads'] = ['MEG 2443', 'EEG 053']  # add some bads
+    surf = get_head_surf('sample', subjects_dir=subjects_dir)
+    # we must have trans if surface is in MRI coords
+    assert_raises(ValueError, _make_surface_mapping, evoked.info, surf, 'eeg')
+
+    evoked.pick_types(meg=False, eeg=True)
+    fmd = make_field_map(evoked, trans_fname,
+                         subject='sample', subjects_dir=subjects_dir)
+
+    # trans is necessary for EEG only
+    assert_raises(RuntimeError, make_field_map, evoked, None,
+                  subject='sample', subjects_dir=subjects_dir)
+
+    fmd = make_field_map(evoked, trans_fname,
+                         subject='sample', subjects_dir=subjects_dir)
+    assert_true(len(fmd) == 1)
+    assert_array_equal(fmd[0]['data'].shape, (642, 59))  # maps data onto surf
+    assert_true(len(fmd[0]['ch_names']), 59)
+
+
+ at testing.requires_testing_data
+ at slow_test
+def test_make_field_map_meg():
+    """Test interpolation of MEG field onto helmet | head
+    """
+    evoked = read_evokeds(evoked_fname, condition='Left Auditory')
+    info = evoked.info
+    surf = get_meg_helmet_surf(info)
+    # let's reduce the number of channels by a bunch to speed it up
+    info['bads'] = info['ch_names'][:200]
+    # bad ch_type
+    assert_raises(ValueError, _make_surface_mapping, info, surf, 'foo')
+    # bad mode
+    assert_raises(ValueError, _make_surface_mapping, info, surf, 'meg',
+                  mode='foo')
+    # no picks
+    evoked_eeg = evoked.pick_types(meg=False, eeg=True, copy=True)
+    assert_raises(RuntimeError, _make_surface_mapping, evoked_eeg.info,
+                  surf, 'meg')
+    # bad surface def
+    nn = surf['nn']
+    del surf['nn']
+    assert_raises(KeyError, _make_surface_mapping, info, surf, 'meg')
+    surf['nn'] = nn
+    cf = surf['coord_frame']
+    del surf['coord_frame']
+    assert_raises(KeyError, _make_surface_mapping, info, surf, 'meg')
+    surf['coord_frame'] = cf
+
+    # now do it with make_field_map
+    evoked.pick_types(meg=True, eeg=False)
+    fmd = make_field_map(evoked, None,
+                         subject='sample', subjects_dir=subjects_dir)
+    assert_true(len(fmd) == 1)
+    assert_array_equal(fmd[0]['data'].shape, (304, 106))  # maps data onto surf
+    assert_true(len(fmd[0]['ch_names']), 106)
+
+    assert_raises(ValueError, make_field_map, evoked, ch_type='foobar')
+
+    # now test the make_field_map on head surf for MEG
+    evoked.pick_types(meg=True, eeg=False)
+    fmd = make_field_map(evoked, trans_fname, meg_surf='head',
+                         subject='sample', subjects_dir=subjects_dir)
+    assert_true(len(fmd) == 1)
+    assert_array_equal(fmd[0]['data'].shape, (642, 106))  # maps data onto surf
+    assert_true(len(fmd[0]['ch_names']), 106)
+
+    assert_raises(ValueError, make_field_map, evoked, meg_surf='foobar',
+                  subjects_dir=subjects_dir, trans=trans_fname)
+
+
+def _setup_args(info):
+    """Helper to test_as_meg_type_evoked."""
+    coils = _create_meg_coils(info['chs'], 'normal', info['dev_head_t'])
+    my_origin, int_rad, noise, lut_fun, n_fact = _setup_dots('fast',
+                                                             coils,
+                                                             'meg')
+    args_dict = dict(intrad=int_rad, volume=False, coils1=coils, r0=my_origin,
+                     ch_type='meg', lut=lut_fun, n_fact=n_fact)
+    return args_dict
+
+
+ at testing.requires_testing_data
+def test_as_meg_type_evoked():
+    """Test interpolation of data on to virtual channels."""
+
+    # validation tests
+    evoked = read_evokeds(evoked_fname, condition='Left Auditory')
+    assert_raises(ValueError, evoked.as_type, 'meg')
+    assert_raises(ValueError, evoked.copy().pick_types(meg='grad').as_type,
+                  'meg')
+
+    # channel names
+    ch_names = evoked.info['ch_names']
+    virt_evoked = evoked.pick_channels(ch_names=ch_names[:10:1],
+                                       copy=True).as_type('mag')
+    assert_true(all('_virtual' in ch for ch in virt_evoked.info['ch_names']))
+
+    # pick from and to channels
+    evoked_from = evoked.pick_channels(ch_names=ch_names[2:10:3], copy=True)
+    evoked_to = evoked.pick_channels(ch_names=ch_names[0:10:3], copy=True)
+
+    info_from, info_to = evoked_from.info, evoked_to.info
+
+    # set up things
+    args1, args2 = _setup_args(info_from), _setup_args(info_to)
+    args1.update(coils2=args2['coils1'])
+    args2.update(coils2=args1['coils1'])
+
+    # test cross dots
+    cross_dots1 = _do_cross_dots(**args1)
+    cross_dots2 = _do_cross_dots(**args2)
+
+    assert_array_almost_equal(cross_dots1, cross_dots2.T)
+
+    # correlation test
+    evoked = evoked.pick_channels(ch_names=ch_names[:10:]).copy()
+    data1 = evoked.pick_types(meg='grad').data.ravel()
+    data2 = evoked.as_type('grad').data.ravel()
+    assert_true(np.corrcoef(data1, data2)[0, 1] > 0.95)
+
+run_tests_if_main()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/forward/tests/test_forward.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/forward/tests/test_forward.py
new file mode 100644
index 0000000..eee1cdb
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/forward/tests/test_forward.py
@@ -0,0 +1,331 @@
+import os
+import os.path as op
+import warnings
+import gc
+
+from nose.tools import assert_true, assert_raises
+import numpy as np
+from numpy.testing import (assert_array_almost_equal, assert_equal,
+                           assert_array_equal, assert_allclose)
+
+from mne.datasets import testing
+from mne import (read_forward_solution, apply_forward, apply_forward_raw,
+                 average_forward_solutions, write_forward_solution,
+                 convert_forward_solution)
+from mne import SourceEstimate, pick_types_forward, read_evokeds
+from mne.label import read_label
+from mne.utils import (requires_mne, run_subprocess, _TempDir,
+                       run_tests_if_main, slow_test)
+from mne.forward import (restrict_forward_to_stc, restrict_forward_to_label,
+                         Forward)
+
+data_path = testing.data_path(download=False)
+fname_meeg = op.join(data_path, 'MEG', 'sample',
+                     'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif')
+fname_meeg_grad = op.join(data_path, 'MEG', 'sample',
+                          'sample_audvis_trunc-meg-eeg-oct-2-grad-fwd.fif')
+
+fname_raw = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data',
+                    'test_raw.fif')
+
+fname_evoked = op.join(op.dirname(__file__), '..', '..', 'io', 'tests',
+                       'data', 'test-ave.fif')
+fname_mri = op.join(data_path, 'MEG', 'sample',
+                    'sample_audvis_trunc-trans.fif')
+subjects_dir = os.path.join(data_path, 'subjects')
+fname_src = op.join(subjects_dir, 'sample', 'bem', 'sample-oct-4-src.fif')
+
+
+def compare_forwards(f1, f2):
+    """Helper to compare two potentially converted forward solutions"""
+    assert_allclose(f1['sol']['data'], f2['sol']['data'])
+    assert_equal(f1['sol']['ncol'], f2['sol']['ncol'])
+    assert_allclose(f1['source_nn'], f2['source_nn'])
+    if f1['sol_grad'] is not None:
+        assert_true(f2['sol_grad'] is not None)
+        assert_allclose(f1['sol_grad']['data'], f2['sol_grad']['data'])
+        assert_equal(f1['sol_grad']['ncol'], f2['sol_grad']['ncol'])
+    else:
+        assert_true(f2['sol_grad'] is None)
+    assert_equal(f1['source_ori'], f2['source_ori'])
+    assert_equal(f1['surf_ori'], f2['surf_ori'])
+
+
+ at testing.requires_testing_data
+def test_convert_forward():
+    """Test converting forward solution between different representations
+    """
+    fwd = read_forward_solution(fname_meeg_grad)
+    assert_true(repr(fwd))
+    assert_true(isinstance(fwd, Forward))
+    # look at surface orientation
+    fwd_surf = convert_forward_solution(fwd, surf_ori=True)
+    fwd_surf_io = read_forward_solution(fname_meeg_grad, surf_ori=True)
+    compare_forwards(fwd_surf, fwd_surf_io)
+    del fwd_surf_io
+    gc.collect()
+    # go back
+    fwd_new = convert_forward_solution(fwd_surf, surf_ori=False)
+    assert_true(repr(fwd_new))
+    assert_true(isinstance(fwd_new, Forward))
+    compare_forwards(fwd, fwd_new)
+    # now go to fixed
+    fwd_fixed = convert_forward_solution(fwd_surf, surf_ori=False,
+                                         force_fixed=True)
+    del fwd_surf
+    gc.collect()
+    assert_true(repr(fwd_fixed))
+    assert_true(isinstance(fwd_fixed, Forward))
+    fwd_fixed_io = read_forward_solution(fname_meeg_grad, surf_ori=False,
+                                         force_fixed=True)
+    compare_forwards(fwd_fixed, fwd_fixed_io)
+    del fwd_fixed_io
+    gc.collect()
+    # now go back to cartesian (original condition)
+    fwd_new = convert_forward_solution(fwd_fixed)
+    assert_true(repr(fwd_new))
+    assert_true(isinstance(fwd_new, Forward))
+    compare_forwards(fwd, fwd_new)
+    del fwd, fwd_new, fwd_fixed
+    gc.collect()
+
+
+ at slow_test
+ at testing.requires_testing_data
+def test_io_forward():
+    """Test IO for forward solutions
+    """
+    temp_dir = _TempDir()
+    # do extensive tests with MEEG + grad
+    n_channels, n_src = 366, 108
+    fwd = read_forward_solution(fname_meeg_grad)
+    assert_true(isinstance(fwd, Forward))
+    fwd = read_forward_solution(fname_meeg_grad, surf_ori=True)
+    leadfield = fwd['sol']['data']
+    assert_equal(leadfield.shape, (n_channels, n_src))
+    assert_equal(len(fwd['sol']['row_names']), n_channels)
+    fname_temp = op.join(temp_dir, 'test-fwd.fif')
+    write_forward_solution(fname_temp, fwd, overwrite=True)
+
+    fwd = read_forward_solution(fname_meeg_grad, surf_ori=True)
+    fwd_read = read_forward_solution(fname_temp, surf_ori=True)
+    leadfield = fwd_read['sol']['data']
+    assert_equal(leadfield.shape, (n_channels, n_src))
+    assert_equal(len(fwd_read['sol']['row_names']), n_channels)
+    assert_equal(len(fwd_read['info']['chs']), n_channels)
+    assert_true('dev_head_t' in fwd_read['info'])
+    assert_true('mri_head_t' in fwd_read)
+    assert_array_almost_equal(fwd['sol']['data'], fwd_read['sol']['data'])
+
+    fwd = read_forward_solution(fname_meeg_grad, force_fixed=True)
+    leadfield = fwd['sol']['data']
+    assert_equal(leadfield.shape, (n_channels, n_src / 3))
+    assert_equal(len(fwd['sol']['row_names']), n_channels)
+    assert_equal(len(fwd['info']['chs']), n_channels)
+    assert_true('dev_head_t' in fwd['info'])
+    assert_true('mri_head_t' in fwd)
+    assert_true(fwd['surf_ori'])
+
+    # test warnings on bad filenames
+    fwd = read_forward_solution(fname_meeg_grad)
+    with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter('always')
+        fwd_badname = op.join(temp_dir, 'test-bad-name.fif.gz')
+        write_forward_solution(fwd_badname, fwd)
+        read_forward_solution(fwd_badname)
+    assert_true(len(w) == 2)
+
+    fwd = read_forward_solution(fname_meeg)
+    write_forward_solution(fname_temp, fwd, overwrite=True)
+    fwd_read = read_forward_solution(fname_temp)
+    compare_forwards(fwd, fwd_read)
+
+
+ at testing.requires_testing_data
+def test_apply_forward():
+    """Test projection of source space data to sensor space
+    """
+    start = 0
+    stop = 5
+    n_times = stop - start - 1
+    sfreq = 10.0
+    t_start = 0.123
+
+    fwd = read_forward_solution(fname_meeg, force_fixed=True)
+    fwd = pick_types_forward(fwd, meg=True)
+    assert_true(isinstance(fwd, Forward))
+
+    vertno = [fwd['src'][0]['vertno'], fwd['src'][1]['vertno']]
+    stc_data = np.ones((len(vertno[0]) + len(vertno[1]), n_times))
+    stc = SourceEstimate(stc_data, vertno, tmin=t_start, tstep=1.0 / sfreq)
+
+    gain_sum = np.sum(fwd['sol']['data'], axis=1)
+
+    # Evoked
+    with warnings.catch_warnings(record=True) as w:
+        evoked = read_evokeds(fname_evoked, condition=0)
+        evoked.pick_types(meg=True)
+        evoked = apply_forward(fwd, stc, evoked.info, start=start, stop=stop)
+        assert_equal(len(w), 2)
+        data = evoked.data
+        times = evoked.times
+
+        # do some tests
+        assert_array_almost_equal(evoked.info['sfreq'], sfreq)
+        assert_array_almost_equal(np.sum(data, axis=1), n_times * gain_sum)
+        assert_array_almost_equal(times[0], t_start)
+        assert_array_almost_equal(times[-1], t_start + (n_times - 1) / sfreq)
+
+        # Raw
+        raw_proj = apply_forward_raw(fwd, stc, evoked.info, start=start,
+                                     stop=stop)
+        data, times = raw_proj[:, :]
+
+        # do some tests
+        assert_array_almost_equal(raw_proj.info['sfreq'], sfreq)
+        assert_array_almost_equal(np.sum(data, axis=1), n_times * gain_sum)
+        atol = 1. / sfreq
+        assert_allclose(raw_proj.first_samp / sfreq, t_start, atol=atol)
+        assert_allclose(raw_proj.last_samp / sfreq,
+                        t_start + (n_times - 1) / sfreq, atol=atol)
+
+
+ at testing.requires_testing_data
+def test_restrict_forward_to_stc():
+    """Test restriction of source space to source SourceEstimate
+    """
+    start = 0
+    stop = 5
+    n_times = stop - start - 1
+    sfreq = 10.0
+    t_start = 0.123
+
+    fwd = read_forward_solution(fname_meeg, force_fixed=True)
+    fwd = pick_types_forward(fwd, meg=True)
+
+    vertno = [fwd['src'][0]['vertno'][0:15], fwd['src'][1]['vertno'][0:5]]
+    stc_data = np.ones((len(vertno[0]) + len(vertno[1]), n_times))
+    stc = SourceEstimate(stc_data, vertno, tmin=t_start, tstep=1.0 / sfreq)
+
+    fwd_out = restrict_forward_to_stc(fwd, stc)
+    assert_true(isinstance(fwd_out, Forward))
+
+    assert_equal(fwd_out['sol']['ncol'], 20)
+    assert_equal(fwd_out['src'][0]['nuse'], 15)
+    assert_equal(fwd_out['src'][1]['nuse'], 5)
+    assert_equal(fwd_out['src'][0]['vertno'], fwd['src'][0]['vertno'][0:15])
+    assert_equal(fwd_out['src'][1]['vertno'], fwd['src'][1]['vertno'][0:5])
+
+    fwd = read_forward_solution(fname_meeg, force_fixed=False)
+    fwd = pick_types_forward(fwd, meg=True)
+
+    vertno = [fwd['src'][0]['vertno'][0:15], fwd['src'][1]['vertno'][0:5]]
+    stc_data = np.ones((len(vertno[0]) + len(vertno[1]), n_times))
+    stc = SourceEstimate(stc_data, vertno, tmin=t_start, tstep=1.0 / sfreq)
+
+    fwd_out = restrict_forward_to_stc(fwd, stc)
+
+    assert_equal(fwd_out['sol']['ncol'], 60)
+    assert_equal(fwd_out['src'][0]['nuse'], 15)
+    assert_equal(fwd_out['src'][1]['nuse'], 5)
+    assert_equal(fwd_out['src'][0]['vertno'], fwd['src'][0]['vertno'][0:15])
+    assert_equal(fwd_out['src'][1]['vertno'], fwd['src'][1]['vertno'][0:5])
+
+
+ at testing.requires_testing_data
+def test_restrict_forward_to_label():
+    """Test restriction of source space to label
+    """
+    fwd = read_forward_solution(fname_meeg, force_fixed=True)
+    fwd = pick_types_forward(fwd, meg=True)
+
+    label_path = op.join(data_path, 'MEG', 'sample', 'labels')
+    labels = ['Aud-lh', 'Vis-rh']
+    label_lh = read_label(op.join(label_path, labels[0] + '.label'))
+    label_rh = read_label(op.join(label_path, labels[1] + '.label'))
+
+    fwd_out = restrict_forward_to_label(fwd, [label_lh, label_rh])
+
+    src_sel_lh = np.intersect1d(fwd['src'][0]['vertno'], label_lh.vertices)
+    src_sel_lh = np.searchsorted(fwd['src'][0]['vertno'], src_sel_lh)
+
+    src_sel_rh = np.intersect1d(fwd['src'][1]['vertno'], label_rh.vertices)
+    src_sel_rh = (np.searchsorted(fwd['src'][1]['vertno'], src_sel_rh) +
+                  len(fwd['src'][0]['vertno']))
+
+    assert_equal(fwd_out['sol']['ncol'], len(src_sel_lh) + len(src_sel_rh))
+    assert_equal(fwd_out['src'][0]['nuse'], len(src_sel_lh))
+    assert_equal(fwd_out['src'][1]['nuse'], len(src_sel_rh))
+    assert_equal(fwd_out['src'][0]['vertno'], src_sel_lh)
+    assert_equal(fwd_out['src'][1]['vertno'], src_sel_rh)
+
+    fwd = read_forward_solution(fname_meeg, force_fixed=False)
+    fwd = pick_types_forward(fwd, meg=True)
+
+    label_path = op.join(data_path, 'MEG', 'sample', 'labels')
+    labels = ['Aud-lh', 'Vis-rh']
+    label_lh = read_label(op.join(label_path, labels[0] + '.label'))
+    label_rh = read_label(op.join(label_path, labels[1] + '.label'))
+
+    fwd_out = restrict_forward_to_label(fwd, [label_lh, label_rh])
+
+    src_sel_lh = np.intersect1d(fwd['src'][0]['vertno'], label_lh.vertices)
+    src_sel_lh = np.searchsorted(fwd['src'][0]['vertno'], src_sel_lh)
+
+    src_sel_rh = np.intersect1d(fwd['src'][1]['vertno'], label_rh.vertices)
+    src_sel_rh = (np.searchsorted(fwd['src'][1]['vertno'], src_sel_rh) +
+                  len(fwd['src'][0]['vertno']))
+
+    assert_equal(fwd_out['sol']['ncol'],
+                 3 * (len(src_sel_lh) + len(src_sel_rh)))
+    assert_equal(fwd_out['src'][0]['nuse'], len(src_sel_lh))
+    assert_equal(fwd_out['src'][1]['nuse'], len(src_sel_rh))
+    assert_equal(fwd_out['src'][0]['vertno'], src_sel_lh)
+    assert_equal(fwd_out['src'][1]['vertno'], src_sel_rh)
+
+
+ at testing.requires_testing_data
+ at requires_mne
+def test_average_forward_solution():
+    """Test averaging forward solutions
+    """
+    temp_dir = _TempDir()
+    fwd = read_forward_solution(fname_meeg)
+    # input not a list
+    assert_raises(TypeError, average_forward_solutions, 1)
+    # list is too short
+    assert_raises(ValueError, average_forward_solutions, [])
+    # negative weights
+    assert_raises(ValueError, average_forward_solutions, [fwd, fwd], [-1, 0])
+    # all zero weights
+    assert_raises(ValueError, average_forward_solutions, [fwd, fwd], [0, 0])
+    # weights not same length
+    assert_raises(ValueError, average_forward_solutions, [fwd, fwd], [0, 0, 0])
+    # list does not only have all dict()
+    assert_raises(TypeError, average_forward_solutions, [1, fwd])
+
+    # try an easy case
+    fwd_copy = average_forward_solutions([fwd])
+    assert_true(isinstance(fwd_copy, Forward))
+    assert_array_equal(fwd['sol']['data'], fwd_copy['sol']['data'])
+
+    # modify a fwd solution, save it, use MNE to average with old one
+    fwd_copy['sol']['data'] *= 0.5
+    fname_copy = op.join(temp_dir, 'copy-fwd.fif')
+    write_forward_solution(fname_copy, fwd_copy, overwrite=True)
+    cmd = ('mne_average_forward_solutions', '--fwd', fname_meeg, '--fwd',
+           fname_copy, '--out', fname_copy)
+    run_subprocess(cmd)
+
+    # now let's actually do it, with one filename and one fwd
+    fwd_ave = average_forward_solutions([fwd, fwd_copy])
+    assert_array_equal(0.75 * fwd['sol']['data'], fwd_ave['sol']['data'])
+    # fwd_ave_mne = read_forward_solution(fname_copy)
+    # assert_array_equal(fwd_ave_mne['sol']['data'], fwd_ave['sol']['data'])
+
+    # with gradient
+    fwd = read_forward_solution(fname_meeg_grad)
+    fwd_ave = average_forward_solutions([fwd, fwd])
+    compare_forwards(fwd, fwd_ave)
+
+run_tests_if_main()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/forward/tests/test_make_forward.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/forward/tests/test_make_forward.py
new file mode 100644
index 0000000..dba5d58
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/forward/tests/test_make_forward.py
@@ -0,0 +1,356 @@
+from __future__ import print_function
+
+import os
+import os.path as op
+from subprocess import CalledProcessError
+import warnings
+
+from nose.tools import assert_raises, assert_true
+import numpy as np
+from numpy.testing import (assert_equal, assert_allclose)
+
+from mne.datasets import testing
+from mne.io import Raw, read_raw_kit, read_raw_bti, read_info
+from mne.io.constants import FIFF
+from mne import (read_forward_solution, make_forward_solution,
+                 do_forward_solution, read_trans,
+                 convert_forward_solution, setup_volume_source_space,
+                 read_source_spaces, make_sphere_model,
+                 pick_types_forward, pick_info, pick_types, Transform)
+from mne.utils import (requires_mne, requires_nibabel, _TempDir,
+                       run_tests_if_main, slow_test, run_subprocess)
+from mne.forward._make_forward import _create_meg_coils
+from mne.forward._compute_forward import _magnetic_dipole_field_vec
+from mne.forward import Forward
+from mne.source_space import (get_volume_labels_from_aseg,
+                              _compare_source_spaces, setup_source_space)
+
+data_path = testing.data_path(download=False)
+fname_meeg = op.join(data_path, 'MEG', 'sample',
+                     'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif')
+fname_raw = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data',
+                    'test_raw.fif')
+fname_evoked = op.join(op.dirname(__file__), '..', '..', 'io', 'tests',
+                       'data', 'test-ave.fif')
+fname_trans = op.join(data_path, 'MEG', 'sample',
+                      'sample_audvis_trunc-trans.fif')
+subjects_dir = os.path.join(data_path, 'subjects')
+fname_src = op.join(subjects_dir, 'sample', 'bem', 'sample-oct-4-src.fif')
+fname_bem = op.join(subjects_dir, 'sample', 'bem',
+                    'sample-1280-1280-1280-bem-sol.fif')
+fname_aseg = op.join(subjects_dir, 'sample', 'mri', 'aseg.mgz')
+fname_bem_meg = op.join(subjects_dir, 'sample', 'bem',
+                        'sample-1280-bem-sol.fif')
+
+
+def _compare_forwards(fwd, fwd_py, n_sensors, n_src,
+                      meg_rtol=1e-4, meg_atol=1e-9,
+                      eeg_rtol=1e-3, eeg_atol=1e-3):
+    """Helper to test forwards"""
+    # check source spaces
+    assert_equal(len(fwd['src']), len(fwd_py['src']))
+    _compare_source_spaces(fwd['src'], fwd_py['src'], mode='approx')
+    for surf_ori in [False, True]:
+        if surf_ori:
+            # use copy here to leave our originals unmodified
+            fwd = convert_forward_solution(fwd, surf_ori, copy=True)
+            fwd_py = convert_forward_solution(fwd, surf_ori, copy=True)
+
+        for key in ['nchan', 'source_nn', 'source_rr', 'source_ori',
+                    'surf_ori', 'coord_frame', 'nsource']:
+            print(key)
+            assert_allclose(fwd_py[key], fwd[key], rtol=1e-4, atol=1e-7)
+        assert_allclose(fwd_py['mri_head_t']['trans'],
+                        fwd['mri_head_t']['trans'], rtol=1e-5, atol=1e-8)
+
+        assert_equal(fwd_py['sol']['data'].shape, (n_sensors, n_src))
+        assert_equal(len(fwd['sol']['row_names']), n_sensors)
+        assert_equal(len(fwd_py['sol']['row_names']), n_sensors)
+
+        # check MEG
+        assert_allclose(fwd['sol']['data'][:306],
+                        fwd_py['sol']['data'][:306],
+                        rtol=meg_rtol, atol=meg_atol,
+                        err_msg='MEG mismatch')
+        # check EEG
+        if fwd['sol']['data'].shape[0] > 306:
+            assert_allclose(fwd['sol']['data'][306:],
+                            fwd_py['sol']['data'][306:],
+                            rtol=eeg_rtol, atol=eeg_atol,
+                            err_msg='EEG mismatch')
+
+
+def test_magnetic_dipole():
+    """Test basic magnetic dipole forward calculation
+    """
+    trans = Transform('mri', 'head', np.eye(4))
+    info = read_info(fname_raw)
+    picks = pick_types(info, meg=True, eeg=False, exclude=[])
+    info = pick_info(info, picks[:12])
+    coils = _create_meg_coils(info['chs'], 'normal', trans)
+    # magnetic dipole at device origin
+    r0 = np.array([0., 13., -6.])
+    for ch, coil in zip(info['chs'], coils):
+        rr = (ch['loc'][:3] + r0) / 2.
+        far_fwd = _magnetic_dipole_field_vec(r0[np.newaxis, :], [coil])
+        near_fwd = _magnetic_dipole_field_vec(rr[np.newaxis, :], [coil])
+        ratio = 8. if ch['ch_name'][-1] == '1' else 16.  # grad vs mag
+        assert_allclose(np.median(near_fwd / far_fwd), ratio, atol=1e-1)
+
+
+ at testing.requires_testing_data
+ at requires_mne
+def test_make_forward_solution_kit():
+    """Test making fwd using KIT, BTI, and CTF (compensated) files
+    """
+    kit_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'kit',
+                      'tests', 'data')
+    sqd_path = op.join(kit_dir, 'test.sqd')
+    mrk_path = op.join(kit_dir, 'test_mrk.sqd')
+    elp_path = op.join(kit_dir, 'test_elp.txt')
+    hsp_path = op.join(kit_dir, 'test_hsp.txt')
+    trans_path = op.join(kit_dir, 'trans-sample.fif')
+    fname_kit_raw = op.join(kit_dir, 'test_bin_raw.fif')
+
+    bti_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'bti',
+                      'tests', 'data')
+    bti_pdf = op.join(bti_dir, 'test_pdf_linux')
+    bti_config = op.join(bti_dir, 'test_config_linux')
+    bti_hs = op.join(bti_dir, 'test_hs_linux')
+    fname_bti_raw = op.join(bti_dir, 'exported4D_linux_raw.fif')
+
+    fname_ctf_raw = op.join(op.dirname(__file__), '..', '..', 'io', 'tests',
+                            'data', 'test_ctf_comp_raw.fif')
+
+    # first set up a small testing source space
+    temp_dir = _TempDir()
+    fname_src_small = op.join(temp_dir, 'sample-oct-2-src.fif')
+    src = setup_source_space('sample', fname_src_small, 'oct2',
+                             subjects_dir=subjects_dir, add_dist=False)
+    n_src = 108  # this is the resulting # of verts in fwd
+
+    # first use mne-C: convert file, make forward solution
+    fwd = do_forward_solution('sample', fname_kit_raw, src=fname_src_small,
+                              bem=fname_bem_meg, mri=trans_path,
+                              eeg=False, meg=True, subjects_dir=subjects_dir)
+    assert_true(isinstance(fwd, Forward))
+
+    # now let's use python with the same raw file
+    fwd_py = make_forward_solution(fname_kit_raw, trans_path, src,
+                                   fname_bem_meg, eeg=False, meg=True)
+    _compare_forwards(fwd, fwd_py, 157, n_src)
+    assert_true(isinstance(fwd_py, Forward))
+
+    # now let's use mne-python all the way
+    raw_py = read_raw_kit(sqd_path, mrk_path, elp_path, hsp_path)
+    # without ignore_ref=True, this should throw an error:
+    assert_raises(NotImplementedError, make_forward_solution, raw_py.info,
+                  src=src, eeg=False, meg=True,
+                  bem=fname_bem_meg, trans=trans_path)
+
+    # check that asking for eeg channels (even if they don't exist) is handled
+    meg_only_info = pick_info(raw_py.info, pick_types(raw_py.info, meg=True,
+                                                      eeg=False))
+    fwd_py = make_forward_solution(meg_only_info, src=src, meg=True, eeg=True,
+                                   bem=fname_bem_meg, trans=trans_path,
+                                   ignore_ref=True)
+    _compare_forwards(fwd, fwd_py, 157, n_src,
+                      meg_rtol=1e-3, meg_atol=1e-7)
+
+    # BTI python end-to-end versus C
+    fwd = do_forward_solution('sample', fname_bti_raw, src=fname_src_small,
+                              bem=fname_bem_meg, mri=trans_path,
+                              eeg=False, meg=True, subjects_dir=subjects_dir)
+    raw_py = read_raw_bti(bti_pdf, bti_config, bti_hs)
+    fwd_py = make_forward_solution(raw_py.info, src=src, eeg=False, meg=True,
+                                   bem=fname_bem_meg, trans=trans_path)
+    _compare_forwards(fwd, fwd_py, 248, n_src)
+
+    # now let's test CTF w/compensation
+    fwd_py = make_forward_solution(fname_ctf_raw, fname_trans, src,
+                                   fname_bem_meg, eeg=False, meg=True)
+
+    fwd = do_forward_solution('sample', fname_ctf_raw, mri=fname_trans,
+                              src=fname_src_small, bem=fname_bem_meg,
+                              eeg=False, meg=True, subjects_dir=subjects_dir)
+    _compare_forwards(fwd, fwd_py, 274, n_src)
+
+    # CTF with compensation changed in python
+    ctf_raw = Raw(fname_ctf_raw, compensation=2)
+
+    fwd_py = make_forward_solution(ctf_raw.info, fname_trans, src,
+                                   fname_bem_meg, eeg=False, meg=True)
+    with warnings.catch_warnings(record=True):
+        fwd = do_forward_solution('sample', ctf_raw, mri=fname_trans,
+                                  src=fname_src_small, bem=fname_bem_meg,
+                                  eeg=False, meg=True,
+                                  subjects_dir=subjects_dir)
+    _compare_forwards(fwd, fwd_py, 274, n_src)
+
+
+ at slow_test
+ at testing.requires_testing_data
+def test_make_forward_solution():
+    """Test making M-EEG forward solution from python
+    """
+    fwd_py = make_forward_solution(fname_raw, fname_trans, fname_src,
+                                   fname_bem, mindist=5.0, eeg=True, meg=True)
+    assert_true(isinstance(fwd_py, Forward))
+    fwd = read_forward_solution(fname_meeg)
+    assert_true(isinstance(fwd, Forward))
+    _compare_forwards(fwd, fwd_py, 366, 1494, meg_rtol=1e-3)
+
+
+ at testing.requires_testing_data
+ at requires_mne
+def test_make_forward_solution_sphere():
+    """Test making a forward solution with a sphere model"""
+    temp_dir = _TempDir()
+    fname_src_small = op.join(temp_dir, 'sample-oct-2-src.fif')
+    src = setup_source_space('sample', fname_src_small, 'oct2',
+                             subjects_dir=subjects_dir, add_dist=False)
+    out_name = op.join(temp_dir, 'tmp-fwd.fif')
+    run_subprocess(['mne_forward_solution', '--meg', '--eeg',
+                    '--meas', fname_raw, '--src', fname_src_small,
+                    '--mri', fname_trans, '--fwd', out_name])
+    fwd = read_forward_solution(out_name)
+    sphere = make_sphere_model(verbose=True)
+    fwd_py = make_forward_solution(fname_raw, fname_trans, src, sphere,
+                                   meg=True, eeg=True, verbose=True)
+    _compare_forwards(fwd, fwd_py, 366, 108,
+                      meg_rtol=5e-1, meg_atol=1e-6,
+                      eeg_rtol=5e-1, eeg_atol=5e-1)
+    # Since the above is pretty lax, let's check a different way
+    for meg, eeg in zip([True, False], [False, True]):
+        fwd_ = pick_types_forward(fwd, meg=meg, eeg=eeg)
+        fwd_py_ = pick_types_forward(fwd, meg=meg, eeg=eeg)
+        assert_allclose(np.corrcoef(fwd_['sol']['data'].ravel(),
+                                    fwd_py_['sol']['data'].ravel())[0, 1],
+                        1.0, rtol=1e-3)
+
+
+ at testing.requires_testing_data
+ at requires_mne
+def test_do_forward_solution():
+    """Test wrapping forward solution from python
+    """
+    temp_dir = _TempDir()
+    existing_file = op.join(temp_dir, 'test.fif')
+    with open(existing_file, 'w') as fid:
+        fid.write('aoeu')
+
+    mri = read_trans(fname_trans)
+    fname_fake = op.join(temp_dir, 'no_have.fif')
+
+    # ## Error checks
+    # bad subject
+    assert_raises(ValueError, do_forward_solution, 1, fname_raw,
+                  subjects_dir=subjects_dir)
+    # bad meas
+    assert_raises(ValueError, do_forward_solution, 'sample', 1,
+                  subjects_dir=subjects_dir)
+    # meas doesn't exist
+    assert_raises(IOError, do_forward_solution, 'sample', fname_fake,
+                  subjects_dir=subjects_dir)
+    # don't specify trans and meas
+    assert_raises(ValueError, do_forward_solution, 'sample', fname_raw,
+                  subjects_dir=subjects_dir)
+    # specify both trans and meas
+    assert_raises(ValueError, do_forward_solution, 'sample', fname_raw,
+                  trans='me', mri='you', subjects_dir=subjects_dir)
+    # specify non-existent trans
+    assert_raises(IOError, do_forward_solution, 'sample', fname_raw,
+                  trans=fname_fake, subjects_dir=subjects_dir)
+    # specify non-existent mri
+    assert_raises(IOError, do_forward_solution, 'sample', fname_raw,
+                  mri=fname_fake, subjects_dir=subjects_dir)
+    # specify non-string mri
+    assert_raises(ValueError, do_forward_solution, 'sample', fname_raw,
+                  mri=1, subjects_dir=subjects_dir)
+    # specify non-string trans
+    assert_raises(ValueError, do_forward_solution, 'sample', fname_raw,
+                  trans=1, subjects_dir=subjects_dir)
+    # test specifying an actual trans in python space -- this should work but
+    # the transform I/O reduces our accuracy -- so we'll just hack a test here
+    # by making it bomb with eeg=False and meg=False
+    assert_raises(ValueError, do_forward_solution, 'sample', fname_raw,
+                  mri=mri, eeg=False, meg=False, subjects_dir=subjects_dir)
+    # mindist as non-integer
+    assert_raises(TypeError, do_forward_solution, 'sample', fname_raw,
+                  mri=fname_trans, mindist=dict(), subjects_dir=subjects_dir)
+    # mindist as string but not 'all'
+    assert_raises(ValueError, do_forward_solution, 'sample', fname_raw,
+                  mri=fname_trans, eeg=False, mindist='yall',
+                  subjects_dir=subjects_dir)
+    # src, spacing, and bem as non-str
+    assert_raises(ValueError, do_forward_solution, 'sample', fname_raw,
+                  mri=fname_trans, src=1, subjects_dir=subjects_dir)
+    assert_raises(ValueError, do_forward_solution, 'sample', fname_raw,
+                  mri=fname_trans, spacing=1, subjects_dir=subjects_dir)
+    assert_raises(ValueError, do_forward_solution, 'sample', fname_raw,
+                  mri=fname_trans, bem=1, subjects_dir=subjects_dir)
+    # no overwrite flag
+    assert_raises(IOError, do_forward_solution, 'sample', fname_raw,
+                  existing_file, mri=fname_trans, subjects_dir=subjects_dir)
+    # let's catch an MNE error, this time about trans being wrong
+    assert_raises(CalledProcessError, do_forward_solution, 'sample',
+                  fname_raw, existing_file, trans=fname_trans, overwrite=True,
+                  spacing='oct6', subjects_dir=subjects_dir)
+
+    # No need to actually calculate and check here, since it's effectively
+    # done in previous tests.
+
+
+ at slow_test
+ at testing.requires_testing_data
+ at requires_nibabel(False)
+def test_forward_mixed_source_space():
+    """Test making the forward solution for a mixed source space
+    """
+    temp_dir = _TempDir()
+    # get the surface source space
+    surf = read_source_spaces(fname_src)
+
+    # setup two volume source spaces
+    label_names = get_volume_labels_from_aseg(fname_aseg)
+    vol_labels = [label_names[int(np.random.rand() * len(label_names))]
+                  for _ in range(2)]
+    vol1 = setup_volume_source_space('sample', fname=None, pos=20.,
+                                     mri=fname_aseg,
+                                     volume_label=vol_labels[0],
+                                     add_interpolator=False)
+    vol2 = setup_volume_source_space('sample', fname=None, pos=20.,
+                                     mri=fname_aseg,
+                                     volume_label=vol_labels[1],
+                                     add_interpolator=False)
+
+    # merge surfaces and volume
+    src = surf + vol1 + vol2
+
+    # calculate forward solution
+    fwd = make_forward_solution(fname_raw, fname_trans, src, fname_bem, None)
+    assert_true(repr(fwd))
+
+    # extract source spaces
+    src_from_fwd = fwd['src']
+
+    # get the coordinate frame of each source space
+    coord_frames = np.array([s['coord_frame'] for s in src_from_fwd])
+
+    # assert that all source spaces are in head coordinates
+    assert_true((coord_frames == FIFF.FIFFV_COORD_HEAD).all())
+
+    # run tests for SourceSpaces.export_volume
+    fname_img = op.join(temp_dir, 'temp-image.mgz')
+
+    # head coordinates and mri_resolution, but trans file
+    assert_raises(ValueError, src_from_fwd.export_volume, fname_img,
+                  mri_resolution=True, trans=None)
+
+    # head coordinates and mri_resolution, but wrong trans file
+    vox_mri_t = vol1[0]['vox_mri_t']
+    assert_raises(ValueError, src_from_fwd.export_volume, fname_img,
+                  mri_resolution=True, trans=vox_mri_t)
+
+
+run_tests_if_main()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/gui/__init__.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/gui/__init__.py
new file mode 100644
index 0000000..f9f66fc
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/gui/__init__.py
@@ -0,0 +1,101 @@
+"""Convenience functions for opening GUIs."""
+
+# Authors: Christian Brodbeck <christianbrodbeck at nyu.edu>
+#
+# License: BSD (3-clause)
+
+from ..utils import _check_mayavi_version
+
+
+def combine_kit_markers():
+    """Create a new KIT marker file by interpolating two marker files
+
+    Notes
+    -----
+    The functionality in this GUI is also part of :func:`kit2fiff`.
+    """
+    _check_mayavi_version()
+    from ._marker_gui import CombineMarkersFrame
+    gui = CombineMarkersFrame()
+    gui.configure_traits()
+    return gui
+
+
+def coregistration(tabbed=False, split=True, scene_width=0o1, inst=None,
+                   subject=None, subjects_dir=None, raw=None):
+    """Coregister an MRI with a subject's head shape
+
+    Parameters
+    ----------
+    tabbed : bool
+        Combine the data source panel and the coregistration panel into a
+        single panel with tabs.
+    split : bool
+        Split the main panels with a movable splitter (good for QT4 but
+        unnecessary for wx backend).
+    scene_width : int
+        Specify a minimum width for the 3d scene (in pixels).
+    inst : None | str
+        Path to an instance file containing the digitizer data. Compatible for
+        Raw, Epochs, and Evoked files.
+    subject : None | str
+        Name of the mri subject.
+    subjects_dir : None | path
+        Override the SUBJECTS_DIR environment variable
+        (sys.environ['SUBJECTS_DIR'])
+
+    Notes
+    -----
+    All parameters are optional, since they can be set through the GUI.
+    Step by step instructions for the coregistrations can be accessed as
+    slides, `for subjects with structural MRI
+    <http://www.slideshare.net/mne-python/mnepython-coregistration>`_ and `for
+    subjects for which no MRI is available
+    <http://www.slideshare.net/mne-python/mnepython-scale-mri>`_.
+    """
+    _check_mayavi_version()
+    if raw is not None:
+        raise DeprecationWarning('The `raw` argument has been deprecated for '
+                                 'the `inst` argument. Will be removed '
+                                 'in 0.11. Use `inst` instead.')
+        inst = raw
+    from ._coreg_gui import CoregFrame, _make_view
+    view = _make_view(tabbed, split, scene_width)
+    gui = CoregFrame(inst, subject, subjects_dir)
+    gui.configure_traits(view=view)
+    return gui
+
+
+def fiducials(subject=None, fid_file=None, subjects_dir=None):
+    """Set the fiducials for an MRI subject
+
+    Parameters
+    ----------
+    subject : str
+        Name of the mri subject.
+    fid_file : None | str
+        Load a fiducials file different form the subject's default
+        ("{subjects_dir}/{subject}/bem/{subject}-fiducials.fif").
+    subjects_dir : None | str
+        Overrule the subjects_dir environment variable.
+
+    Notes
+    -----
+    All parameters are optional, since they can be set through the GUI.
+    The functionality in this GUI is also part of :func:`coregistration`.
+    """
+    _check_mayavi_version()
+    from ._fiducials_gui import FiducialsFrame
+    gui = FiducialsFrame(subject, subjects_dir, fid_file=fid_file)
+    gui.configure_traits()
+    return gui
+
+
+def kit2fiff():
+    """Convert KIT files to the fiff format
+    """
+    _check_mayavi_version()
+    from ._kit2fiff_gui import Kit2FiffFrame
+    gui = Kit2FiffFrame()
+    gui.configure_traits()
+    return gui
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/gui/_coreg_gui.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/gui/_coreg_gui.py
new file mode 100644
index 0000000..3a9493d
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/gui/_coreg_gui.py
@@ -0,0 +1,1383 @@
+"""Traits-based GUI for head-MRI coregistration"""
+
+# Authors: Christian Brodbeck <christianbrodbeck at nyu.edu>
+#
+# License: BSD (3-clause)
+
+import os
+from ..externals.six.moves import queue
+import re
+from threading import Thread
+import warnings
+
+import numpy as np
+from scipy.spatial.distance import cdist
+
+# allow import without traits
+try:
+    from mayavi.core.ui.mayavi_scene import MayaviScene
+    from mayavi.tools.mlab_scene_model import MlabSceneModel
+    from pyface.api import (error, confirm, warning, OK, YES, information,
+                            FileDialog, GUI)
+    from traits.api import (Bool, Button, cached_property, DelegatesTo,
+                            Directory, Enum, Float, HasTraits,
+                            HasPrivateTraits, Instance, Int, on_trait_change,
+                            Property, Str)
+    from traitsui.api import (View, Item, Group, HGroup, VGroup, VGrid,
+                              EnumEditor, Handler, Label, TextEditor)
+    from traitsui.menu import Action, UndoButton, CancelButton, NoButtons
+    from tvtk.pyface.scene_editor import SceneEditor
+except:
+    from ..utils import trait_wraith
+    HasTraits = HasPrivateTraits = Handler = object
+    cached_property = on_trait_change = MayaviScene = MlabSceneModel =\
+        Bool = Button = DelegatesTo = Directory = Enum = Float = Instance =\
+        Int = Property = Str = View = Item = Group = HGroup = VGroup = VGrid =\
+        EnumEditor = Label = TextEditor = Action = UndoButton = CancelButton =\
+        NoButtons = SceneEditor = trait_wraith
+
+
+from ..coreg import bem_fname, trans_fname
+from ..forward import prepare_bem_model
+from ..transforms import (write_trans, read_trans, apply_trans, rotation,
+                          translation, scaling, rotation_angles, Transform)
+from ..coreg import (fit_matched_points, fit_point_cloud, scale_mri,
+                     _point_cloud_error)
+from ..utils import get_subjects_dir, logger
+from ._fiducials_gui import MRIHeadWithFiducialsModel, FiducialsPanel
+from ._file_traits import (set_mne_root, trans_wildcard, InstSource,
+                           SubjectSelectorPanel)
+from ._viewer import (defaults, HeadViewController, PointObject, SurfaceObject,
+                      _testing_mode)
+
+
+laggy_float_editor = TextEditor(auto_set=False, enter_set=True, evaluate=float)
+
+
+class CoregModel(HasPrivateTraits):
+    """Traits object for estimating the head mri transform.
+
+    Notes
+    -----
+    Transform from head to mri space is modelled with the following steps:
+
+     * move the head shape to its nasion position
+     * rotate the head shape with user defined rotation around its nasion
+     * move the head shape by user defined translation
+     * move the head shape origin to the mri nasion
+
+    If MRI scaling is enabled,
+
+     * the MRI is scaled relative to its origin center (prior to any
+       transformation of the digitizer head)
+
+
+    Don't sync transforms to anything to prevent them from being recomputed
+    upon every parameter change.
+    """
+    # data sources
+    mri = Instance(MRIHeadWithFiducialsModel, ())
+    hsp = Instance(InstSource, ())
+
+    # parameters
+    grow_hair = Float(label="Grow Hair [mm]", desc="Move the back of the MRI "
+                      "head outwards to compensate for hair on the digitizer "
+                      "head shape")
+    n_scale_params = Enum(0, 1, 3, desc="Scale the MRI to better fit the "
+                          "subject's head shape (a new MRI subject will be "
+                          "created with a name specified upon saving)")
+    scale_x = Float(1, label="Right (X)")
+    scale_y = Float(1, label="Anterior (Y)")
+    scale_z = Float(1, label="Superior (Z)")
+    rot_x = Float(0, label="Right (X)")
+    rot_y = Float(0, label="Anterior (Y)")
+    rot_z = Float(0, label="Superior (Z)")
+    trans_x = Float(0, label="Right (X)")
+    trans_y = Float(0, label="Anterior (Y)")
+    trans_z = Float(0, label="Superior (Z)")
+
+    prepare_bem_model = Bool(True, desc="whether to run mne_prepare_bem_model "
+                             "after scaling the MRI")
+
+    # secondary to parameters
+    scale = Property(depends_on=['n_scale_params', 'scale_x', 'scale_y',
+                                 'scale_z'])
+    has_fid_data = Property(Bool, depends_on=['mri_origin', 'hsp.nasion'],
+                            desc="Required fiducials data is present.")
+    has_pts_data = Property(Bool, depends_on=['mri.points', 'hsp.points'])
+
+    # MRI dependent
+    mri_origin = Property(depends_on=['mri.nasion', 'scale'],
+                          desc="Coordinates of the scaled MRI's nasion.")
+
+    # target transforms
+    mri_scale_trans = Property(depends_on=['scale'])
+    head_mri_trans = Property(depends_on=['hsp.nasion', 'rot_x', 'rot_y',
+                                          'rot_z', 'trans_x', 'trans_y',
+                                          'trans_z', 'mri_origin'],
+                              desc="Transformaiton of the head shape to "
+                              "match the scaled MRI.")
+
+    # info
+    subject_has_bem = DelegatesTo('mri')
+    lock_fiducials = DelegatesTo('mri')
+    can_prepare_bem_model = Property(Bool, depends_on=['n_scale_params',
+                                                       'subject_has_bem'])
+    can_save = Property(Bool, depends_on=['head_mri_trans'])
+    raw_subject = Property(depends_on='hsp.inst_fname', desc="Subject guess "
+                           "based on the raw file name.")
+
+    # transformed geometry
+    processed_mri_points = Property(depends_on=['mri.points', 'grow_hair'])
+    transformed_mri_points = Property(depends_on=['processed_mri_points',
+                                                  'mri_scale_trans'])
+    transformed_hsp_points = Property(depends_on=['hsp.points',
+                                                  'head_mri_trans'])
+    transformed_mri_lpa = Property(depends_on=['mri.lpa', 'mri_scale_trans'])
+    transformed_hsp_lpa = Property(depends_on=['hsp.lpa', 'head_mri_trans'])
+    transformed_mri_nasion = Property(depends_on=['mri.nasion',
+                                                  'mri_scale_trans'])
+    transformed_hsp_nasion = Property(depends_on=['hsp.nasion',
+                                                  'head_mri_trans'])
+    transformed_mri_rpa = Property(depends_on=['mri.rpa', 'mri_scale_trans'])
+    transformed_hsp_rpa = Property(depends_on=['hsp.rpa', 'head_mri_trans'])
+
+    # fit properties
+    lpa_distance = Property(depends_on=['transformed_mri_lpa',
+                                        'transformed_hsp_lpa'])
+    nasion_distance = Property(depends_on=['transformed_mri_nasion',
+                                           'transformed_hsp_nasion'])
+    rpa_distance = Property(depends_on=['transformed_mri_rpa',
+                                        'transformed_hsp_rpa'])
+    point_distance = Property(depends_on=['transformed_mri_points',
+                                          'transformed_hsp_points'])
+
+    # fit property info strings
+    fid_eval_str = Property(depends_on=['lpa_distance', 'nasion_distance',
+                                        'rpa_distance'])
+    points_eval_str = Property(depends_on='point_distance')
+
+    @cached_property
+    def _get_can_prepare_bem_model(self):
+        return self.subject_has_bem and self.n_scale_params > 0
+
+    @cached_property
+    def _get_can_save(self):
+        return np.any(self.head_mri_trans != np.eye(4))
+
+    @cached_property
+    def _get_has_pts_data(self):
+        has = (np.any(self.mri.points) and np.any(self.hsp.points))
+        return has
+
+    @cached_property
+    def _get_has_fid_data(self):
+        has = (np.any(self.mri_origin) and np.any(self.hsp.nasion))
+        return has
+
+    @cached_property
+    def _get_scale(self):
+        if self.n_scale_params == 0:
+            return np.array(1)
+        elif self.n_scale_params == 1:
+            return np.array(self.scale_x)
+        else:
+            return np.array([self.scale_x, self.scale_y, self.scale_z])
+
+    @cached_property
+    def _get_mri_scale_trans(self):
+        if np.isscalar(self.scale) or self.scale.ndim == 0:
+            if self.scale == 1:
+                return np.eye(4)
+            else:
+                s = self.scale
+                return scaling(s, s, s)
+        else:
+            return scaling(*self.scale)
+
+    @cached_property
+    def _get_mri_origin(self):
+        if np.isscalar(self.scale) and self.scale == 1:
+            return self.mri.nasion
+        else:
+            return self.mri.nasion * self.scale
+
+    @cached_property
+    def _get_head_mri_trans(self):
+        if not self.has_fid_data:
+            return np.eye(4)
+
+        # move hsp so that its nasion becomes the origin
+        x, y, z = -self.hsp.nasion[0]
+        trans = translation(x, y, z)
+
+        # rotate hsp by rotation parameters
+        rot = rotation(self.rot_x, self.rot_y, self.rot_z)
+        trans = np.dot(rot, trans)
+
+        # move hsp by translation parameters
+        transl = translation(self.trans_x, self.trans_y, self.trans_z)
+        trans = np.dot(transl, trans)
+
+        # move the hsp origin(/nasion) to the MRI's nasion
+        x, y, z = self.mri_origin[0]
+        tgt_mri_trans = translation(x, y, z)
+        trans = np.dot(tgt_mri_trans, trans)
+
+        return trans
+
+    @cached_property
+    def _get_processed_mri_points(self):
+        if self.grow_hair:
+            if len(self.mri.norms):
+                if self.n_scale_params == 0:
+                    scaled_hair_dist = self.grow_hair / 1000
+                else:
+                    scaled_hair_dist = self.grow_hair / self.scale / 1000
+
+                points = self.mri.points.copy()
+                hair = points[:, 2] > points[:, 1]
+                points[hair] += self.mri.norms[hair] * scaled_hair_dist
+                return points
+            else:
+                error(None, "Norms missing form bem, can't grow hair")
+                self.grow_hair = 0
+        return self.mri.points
+
+    @cached_property
+    def _get_transformed_mri_points(self):
+        points = apply_trans(self.mri_scale_trans, self.processed_mri_points)
+        return points
+
+    @cached_property
+    def _get_transformed_mri_lpa(self):
+        return apply_trans(self.mri_scale_trans, self.mri.lpa)
+
+    @cached_property
+    def _get_transformed_mri_nasion(self):
+        return apply_trans(self.mri_scale_trans, self.mri.nasion)
+
+    @cached_property
+    def _get_transformed_mri_rpa(self):
+        return apply_trans(self.mri_scale_trans, self.mri.rpa)
+
+    @cached_property
+    def _get_transformed_hsp_points(self):
+        return apply_trans(self.head_mri_trans, self.hsp.points)
+
+    @cached_property
+    def _get_transformed_hsp_lpa(self):
+        return apply_trans(self.head_mri_trans, self.hsp.lpa)
+
+    @cached_property
+    def _get_transformed_hsp_nasion(self):
+        return apply_trans(self.head_mri_trans, self.hsp.nasion)
+
+    @cached_property
+    def _get_transformed_hsp_rpa(self):
+        return apply_trans(self.head_mri_trans, self.hsp.rpa)
+
+    @cached_property
+    def _get_lpa_distance(self):
+        d = np.ravel(self.transformed_mri_lpa - self.transformed_hsp_lpa)
+        return np.sqrt(np.dot(d, d))
+
+    @cached_property
+    def _get_nasion_distance(self):
+        d = np.ravel(self.transformed_mri_nasion - self.transformed_hsp_nasion)
+        return np.sqrt(np.dot(d, d))
+
+    @cached_property
+    def _get_rpa_distance(self):
+        d = np.ravel(self.transformed_mri_rpa - self.transformed_hsp_rpa)
+        return np.sqrt(np.dot(d, d))
+
+    @cached_property
+    def _get_point_distance(self):
+        if (len(self.transformed_hsp_points) == 0 or
+                len(self.transformed_mri_points) == 0):
+            return
+        dists = cdist(self.transformed_hsp_points, self.transformed_mri_points,
+                      'euclidean')
+        dists = np.min(dists, 1)
+        return dists
+
+    @cached_property
+    def _get_fid_eval_str(self):
+        d = (self.lpa_distance * 1000, self.nasion_distance * 1000,
+             self.rpa_distance * 1000)
+        txt = ("Fiducials Error: LPA %.1f mm, NAS %.1f mm, RPA %.1f mm" % d)
+        return txt
+
+    @cached_property
+    def _get_points_eval_str(self):
+        if self.point_distance is None:
+            return ""
+        av_dist = np.mean(self.point_distance)
+        return "Average Points Error: %.1f mm" % (av_dist * 1000)
+
+    def _get_raw_subject(self):
+        # subject name guessed based on the inst file name
+        if '_' in self.hsp.inst_fname:
+            subject, _ = self.hsp.inst_fname.split('_', 1)
+            if not subject:
+                subject = None
+        else:
+            subject = None
+        return subject
+
+    @on_trait_change('raw_subject')
+    def _on_raw_subject_change(self, subject):
+        if subject in self.mri.subject_source.subjects:
+            self.mri.subject = subject
+        elif 'fsaverage' in self.mri.subject_source.subjects:
+            self.mri.subject = 'fsaverage'
+
+    def omit_hsp_points(self, distance=0, reset=False):
+        """Exclude head shape points that are far away from the MRI head
+
+        Parameters
+        ----------
+        distance : float
+            Exclude all points that are further away from the MRI head than
+            this distance. Previously excluded points are still excluded unless
+            reset=True is specified. A value of distance <= 0 excludes nothing.
+        reset : bool
+            Reset the filter before calculating new omission (default is
+            False).
+        """
+        distance = float(distance)
+        if reset:
+            logger.info("Coregistration: Reset excluded head shape points")
+            with warnings.catch_warnings(record=True):  # Traits None comp
+                self.hsp.points_filter = None
+
+        if distance <= 0:
+            return
+
+        # find the new filter
+        hsp_pts = self.transformed_hsp_points
+        mri_pts = self.transformed_mri_points
+        point_distance = _point_cloud_error(hsp_pts, mri_pts)
+        new_sub_filter = point_distance <= distance
+        n_excluded = np.sum(new_sub_filter == False)  # noqa
+        logger.info("Coregistration: Excluding %i head shape points with "
+                    "distance >= %.3f m.", n_excluded, distance)
+
+        # combine the new filter with the previous filter
+        old_filter = self.hsp.points_filter
+        if old_filter is None:
+            new_filter = new_sub_filter
+        else:
+            new_filter = np.ones(len(self.hsp.raw_points), np.bool8)
+            new_filter[old_filter] = new_sub_filter
+
+        # set the filter
+        with warnings.catch_warnings(record=True):  # comp to None in Traits
+            self.hsp.points_filter = new_filter
+
+    def fit_auricular_points(self):
+        "Find rotation to fit LPA and RPA"
+        src_fid = np.vstack((self.hsp.lpa, self.hsp.rpa))
+        src_fid -= self.hsp.nasion
+
+        tgt_fid = np.vstack((self.mri.lpa, self.mri.rpa))
+        tgt_fid -= self.mri.nasion
+        tgt_fid *= self.scale
+        tgt_fid -= [self.trans_x, self.trans_y, self.trans_z]
+
+        x0 = (self.rot_x, self.rot_y, self.rot_z)
+        rot = fit_matched_points(src_fid, tgt_fid, rotate=True,
+                                 translate=False, x0=x0, out='params')
+
+        self.rot_x, self.rot_y, self.rot_z = rot
+
+    def fit_fiducials(self):
+        "Find rotation and translation to fit all 3 fiducials"
+        src_fid = np.vstack((self.hsp.lpa, self.hsp.nasion, self.hsp.rpa))
+        src_fid -= self.hsp.nasion
+
+        tgt_fid = np.vstack((self.mri.lpa, self.mri.nasion, self.mri.rpa))
+        tgt_fid -= self.mri.nasion
+        tgt_fid *= self.scale
+
+        x0 = (self.rot_x, self.rot_y, self.rot_z, self.trans_x, self.trans_y,
+              self.trans_z)
+        est = fit_matched_points(src_fid, tgt_fid, x0=x0, out='params')
+
+        self.rot_x, self.rot_y, self.rot_z = est[:3]
+        self.trans_x, self.trans_y, self.trans_z = est[3:]
+
+    def fit_hsp_points(self):
+        "Find rotation to fit head shapes"
+        src_pts = self.hsp.points - self.hsp.nasion
+
+        tgt_pts = self.processed_mri_points - self.mri.nasion
+        tgt_pts *= self.scale
+        tgt_pts -= [self.trans_x, self.trans_y, self.trans_z]
+
+        x0 = (self.rot_x, self.rot_y, self.rot_z)
+        rot = fit_point_cloud(src_pts, tgt_pts, rotate=True, translate=False,
+                              x0=x0)
+
+        self.rot_x, self.rot_y, self.rot_z = rot
+
+    def fit_scale_auricular_points(self):
+        "Find rotation and MRI scaling based on LPA and RPA"
+        src_fid = np.vstack((self.hsp.lpa, self.hsp.rpa))
+        src_fid -= self.hsp.nasion
+
+        tgt_fid = np.vstack((self.mri.lpa, self.mri.rpa))
+        tgt_fid -= self.mri.nasion
+        tgt_fid -= [self.trans_x, self.trans_y, self.trans_z]
+
+        x0 = (self.rot_x, self.rot_y, self.rot_z, 1. / self.scale_x)
+        x = fit_matched_points(src_fid, tgt_fid, rotate=True, translate=False,
+                               scale=1, x0=x0, out='params')
+
+        self.scale_x = 1. / x[3]
+        self.rot_x, self.rot_y, self.rot_z = x[:3]
+
+    def fit_scale_fiducials(self):
+        "Find translation, rotation and scaling based on the three fiducials"
+        src_fid = np.vstack((self.hsp.lpa, self.hsp.nasion, self.hsp.rpa))
+        src_fid -= self.hsp.nasion
+
+        tgt_fid = np.vstack((self.mri.lpa, self.mri.nasion, self.mri.rpa))
+        tgt_fid -= self.mri.nasion
+
+        x0 = (self.rot_x, self.rot_y, self.rot_z, self.trans_x, self.trans_y,
+              self.trans_z, 1. / self.scale_x,)
+        est = fit_matched_points(src_fid, tgt_fid, rotate=True, translate=True,
+                                 scale=1, x0=x0, out='params')
+
+        self.scale_x = 1. / est[6]
+        self.rot_x, self.rot_y, self.rot_z = est[:3]
+        self.trans_x, self.trans_y, self.trans_z = est[3:6]
+
+    def fit_scale_hsp_points(self):
+        "Find MRI scaling and rotation to match head shape points"
+        src_pts = self.hsp.points - self.hsp.nasion
+
+        tgt_pts = self.processed_mri_points - self.mri.nasion
+
+        if self.n_scale_params == 1:
+            x0 = (self.rot_x, self.rot_y, self.rot_z, 1. / self.scale_x)
+            est = fit_point_cloud(src_pts, tgt_pts, rotate=True,
+                                  translate=False, scale=1, x0=x0)
+
+            self.scale_x = 1. / est[3]
+        else:
+            x0 = (self.rot_x, self.rot_y, self.rot_z, 1. / self.scale_x,
+                  1. / self.scale_y, 1. / self.scale_z)
+            est = fit_point_cloud(src_pts, tgt_pts, rotate=True,
+                                  translate=False, scale=3, x0=x0)
+            self.scale_x, self.scale_y, self.scale_z = 1. / est[3:]
+
+        self.rot_x, self.rot_y, self.rot_z = est[:3]
+
+    def get_scaling_job(self, subject_to):
+        desc = 'Scaling %s' % subject_to
+        func = scale_mri
+        args = (self.mri.subject, subject_to, self.scale)
+        kwargs = dict(overwrite=True, subjects_dir=self.mri.subjects_dir)
+        return (desc, func, args, kwargs)
+
+    def get_prepare_bem_model_job(self, subject_to):
+        subjects_dir = self.mri.subjects_dir
+        subject_from = self.mri.subject
+
+        bem_name = 'inner_skull-bem'
+        bem_file = bem_fname.format(subjects_dir=subjects_dir,
+                                    subject=subject_from, name=bem_name)
+        if not os.path.exists(bem_file):
+            pattern = bem_fname.format(subjects_dir=subjects_dir,
+                                       subject=subject_to, name='(.+-bem)')
+            bem_dir, bem_file = os.path.split(pattern)
+            m = None
+            bem_file_pattern = re.compile(bem_file)
+            for name in os.listdir(bem_dir):
+                m = bem_file_pattern.match(name)
+                if m is not None:
+                    break
+
+            if m is None:
+                pattern = bem_fname.format(subjects_dir=subjects_dir,
+                                           subject=subject_to, name='*-bem')
+                err = ("No bem file found; looking for files matching "
+                       "%s" % pattern)
+                error(None, err)
+
+            bem_name = m.group(1)
+
+        bem_file = bem_fname.format(subjects_dir=subjects_dir,
+                                    subject=subject_to, name=bem_name)
+
+        # job
+        desc = 'mne_prepare_bem_model for %s' % subject_to
+        func = prepare_bem_model
+        args = (bem_file,)
+        kwargs = {}
+        return (desc, func, args, kwargs)
+
+    def load_trans(self, fname):
+        """Load the head-mri transform from a fif file
+
+        Parameters
+        ----------
+        fname : str
+            File path.
+        """
+        info = read_trans(fname)
+        head_mri_trans = info['trans']
+        self.set_trans(head_mri_trans)
+
+    def reset(self):
+        """Reset all the parameters affecting the coregistration"""
+        self.reset_traits(('grow_hair', 'n_scaling_params', 'scale_x',
+                           'scale_y', 'scale_z', 'rot_x', 'rot_y', 'rot_z',
+                           'trans_x', 'trans_y', 'trans_z'))
+
+    def set_trans(self, head_mri_trans):
+        """Set rotation and translation parameters from a transformation matrix
+
+        Parameters
+        ----------
+        head_mri_trans : array, shape (4, 4)
+            Transformation matrix from head to MRI space.
+        """
+        x, y, z = -self.mri_origin[0]
+        mri_tgt_trans = translation(x, y, z)
+        head_tgt_trans = np.dot(mri_tgt_trans, head_mri_trans)
+
+        x, y, z = self.hsp.nasion[0]
+        src_hsp_trans = translation(x, y, z)
+        src_tgt_trans = np.dot(head_tgt_trans, src_hsp_trans)
+
+        rot_x, rot_y, rot_z = rotation_angles(src_tgt_trans[:3, :3])
+        x, y, z = src_tgt_trans[:3, 3]
+
+        self.rot_x = rot_x
+        self.rot_y = rot_y
+        self.rot_z = rot_z
+        self.trans_x = x
+        self.trans_y = y
+        self.trans_z = z
+
+    def save_trans(self, fname):
+        """Save the head-mri transform as a fif file
+
+        Parameters
+        ----------
+        fname : str
+            Target file path.
+        """
+        if not self.can_save:
+            raise RuntimeError("Not enough information for saving transform")
+        write_trans(fname, Transform('head', 'mri', self.head_mri_trans))
+
+
+class CoregFrameHandler(Handler):
+    """Handler that checks for unfinished processes before closing its window
+    """
+    def close(self, info, is_ok):
+        if info.object.queue.unfinished_tasks:
+            information(None, "Can not close the window while saving is still "
+                        "in progress. Please wait until all MRIs are "
+                        "processed.", "Saving Still in Progress")
+            return False
+        else:
+            return True
+
+
+class CoregPanel(HasPrivateTraits):
+    model = Instance(CoregModel)
+
+    # parameters
+    reset_params = Button(label='Reset')
+    grow_hair = DelegatesTo('model')
+    n_scale_params = DelegatesTo('model')
+    scale_step = Float(1.01)
+    scale_x = DelegatesTo('model')
+    scale_x_dec = Button('-')
+    scale_x_inc = Button('+')
+    scale_y = DelegatesTo('model')
+    scale_y_dec = Button('-')
+    scale_y_inc = Button('+')
+    scale_z = DelegatesTo('model')
+    scale_z_dec = Button('-')
+    scale_z_inc = Button('+')
+    rot_step = Float(0.01)
+    rot_x = DelegatesTo('model')
+    rot_x_dec = Button('-')
+    rot_x_inc = Button('+')
+    rot_y = DelegatesTo('model')
+    rot_y_dec = Button('-')
+    rot_y_inc = Button('+')
+    rot_z = DelegatesTo('model')
+    rot_z_dec = Button('-')
+    rot_z_inc = Button('+')
+    trans_step = Float(0.001)
+    trans_x = DelegatesTo('model')
+    trans_x_dec = Button('-')
+    trans_x_inc = Button('+')
+    trans_y = DelegatesTo('model')
+    trans_y_dec = Button('-')
+    trans_y_inc = Button('+')
+    trans_z = DelegatesTo('model')
+    trans_z_dec = Button('-')
+    trans_z_inc = Button('+')
+
+    # fitting
+    has_fid_data = DelegatesTo('model')
+    has_pts_data = DelegatesTo('model')
+    # fitting with scaling
+    fits_hsp_points = Button(label='Fit Head Shape')
+    fits_fid = Button(label='Fit Fiducials')
+    fits_ap = Button(label='Fit LPA/RPA')
+    # fitting without scaling
+    fit_hsp_points = Button(label='Fit Head Shape')
+    fit_fid = Button(label='Fit Fiducials')
+    fit_ap = Button(label='Fit LPA/RPA')
+
+    # fit info
+    fid_eval_str = DelegatesTo('model')
+    points_eval_str = DelegatesTo('model')
+
+    # saving
+    can_prepare_bem_model = DelegatesTo('model')
+    can_save = DelegatesTo('model')
+    prepare_bem_model = DelegatesTo('model')
+    save = Button(label="Save As...")
+    load_trans = Button
+    queue = Instance(queue.Queue, ())
+    queue_feedback = Str('')
+    queue_current = Str('')
+    queue_len = Int(0)
+    queue_len_str = Property(Str, depends_on=['queue_len'])
+    error = Str('')
+
+    view = View(VGroup(Item('grow_hair', show_label=True),
+                       Item('n_scale_params', label='MRI Scaling',
+                            style='custom', show_label=True,
+                            editor=EnumEditor(values={0: '1:No Scaling',
+                                                      1: '2:1 Param',
+                                                      3: '3:3 Params'},
+                                              cols=3)),
+                       VGrid(Item('scale_x', editor=laggy_float_editor,
+                                  show_label=True, tooltip="Scale along "
+                                  "right-left axis",
+                                  enabled_when='n_scale_params > 0'),
+                             Item('scale_x_dec',
+                                  enabled_when='n_scale_params > 0'),
+                             Item('scale_x_inc',
+                                  enabled_when='n_scale_params > 0'),
+                             Item('scale_step', tooltip="Scaling step",
+                                  enabled_when='n_scale_params > 0'),
+                             Item('scale_y', editor=laggy_float_editor,
+                                  show_label=True,
+                                  enabled_when='n_scale_params > 1',
+                                  tooltip="Scale along anterior-posterior "
+                                  "axis"),
+                             Item('scale_y_dec',
+                                  enabled_when='n_scale_params > 1'),
+                             Item('scale_y_inc',
+                                  enabled_when='n_scale_params > 1'),
+                             Label('(Step)'),
+                             Item('scale_z', editor=laggy_float_editor,
+                                  show_label=True,
+                                  enabled_when='n_scale_params > 1',
+                                  tooltip="Scale along anterior-posterior "
+                                  "axis"),
+                             Item('scale_z_dec',
+                                  enabled_when='n_scale_params > 1'),
+                             Item('scale_z_inc',
+                                  enabled_when='n_scale_params > 1'),
+                             show_labels=False, columns=4),
+                       HGroup(Item('fits_hsp_points',
+                                   enabled_when='n_scale_params',
+                                   tooltip="Rotate the digitizer head shape "
+                                   "and scale the MRI so as to minimize the "
+                                   "distance from each digitizer point to the "
+                                   "closest MRI point"),
+                              Item('fits_ap',
+                                   enabled_when='n_scale_params == 1',
+                                   tooltip="While leaving the nasion in "
+                                   "place, rotate the digitizer head shape "
+                                   "and scale the MRI so as to minimize the "
+                                   "distance of the two auricular points"),
+                              Item('fits_fid',
+                                   enabled_when='n_scale_params == 1',
+                                   tooltip="Move and rotate the digitizer "
+                                   "head shape, and scale the MRI so as to "
+                                   "minimize the distance of the three "
+                                   "fiducials."),
+                              show_labels=False),
+                       '_',
+                       Label("Translation:"),
+                       VGrid(Item('trans_x', editor=laggy_float_editor,
+                                  show_label=True, tooltip="Move along "
+                                  "right-left axis"),
+                             'trans_x_dec', 'trans_x_inc',
+                             Item('trans_step', tooltip="Movement step"),
+                             Item('trans_y', editor=laggy_float_editor,
+                                  show_label=True, tooltip="Move along "
+                                  "anterior-posterior axis"),
+                             'trans_y_dec', 'trans_y_inc',
+                             Label('(Step)'),
+                             Item('trans_z', editor=laggy_float_editor,
+                                  show_label=True, tooltip="Move along "
+                                  "anterior-posterior axis"),
+                             'trans_z_dec', 'trans_z_inc',
+                             show_labels=False, columns=4),
+                       Label("Rotation:"),
+                       VGrid(Item('rot_x', editor=laggy_float_editor,
+                                  show_label=True, tooltip="Rotate along "
+                                  "right-left axis"),
+                             'rot_x_dec', 'rot_x_inc',
+                             Item('rot_step', tooltip="Rotation step"),
+                             Item('rot_y', editor=laggy_float_editor,
+                                  show_label=True, tooltip="Rotate along "
+                                  "anterior-posterior axis"),
+                             'rot_y_dec', 'rot_y_inc',
+                             Label('(Step)'),
+                             Item('rot_z', editor=laggy_float_editor,
+                                  show_label=True, tooltip="Rotate along "
+                                  "anterior-posterior axis"),
+                             'rot_z_dec', 'rot_z_inc',
+                             show_labels=False, columns=4),
+                       # buttons
+                       HGroup(Item('fit_hsp_points',
+                                   enabled_when='has_pts_data',
+                                   tooltip="Rotate the head shape (around the "
+                                   "nasion) so as to minimize the distance "
+                                   "from each head shape point to its closest "
+                                   "MRI point"),
+                              Item('fit_ap', enabled_when='has_fid_data',
+                                   tooltip="Try to match the LPA and the RPA, "
+                                   "leaving the Nasion in place"),
+                              Item('fit_fid', enabled_when='has_fid_data',
+                                   tooltip="Move and rotate the head shape so "
+                                   "as to minimize the distance between the "
+                                   "MRI and head shape fiducials"),
+                              Item('load_trans', enabled_when='has_fid_data'),
+                              show_labels=False),
+                       '_',
+                       Item('fid_eval_str', style='readonly'),
+                       Item('points_eval_str', style='readonly'),
+                       '_',
+                       HGroup(Item('prepare_bem_model'),
+                              Label("Run mne_prepare_bem_model"),
+                              show_labels=False,
+                              enabled_when='can_prepare_bem_model'),
+                       HGroup(Item('save', enabled_when='can_save',
+                                   tooltip="Save the trans file and (if "
+                                   "scaling is enabled) the scaled MRI"),
+                              Item('reset_params', tooltip="Reset all "
+                                   "coregistration parameters"),
+                              show_labels=False),
+                       Item('queue_feedback', style='readonly'),
+                       Item('queue_current', style='readonly'),
+                       Item('queue_len_str', style='readonly'),
+                       show_labels=False),
+                kind='panel', buttons=[UndoButton])
+
+    def __init__(self, *args, **kwargs):
+        super(CoregPanel, self).__init__(*args, **kwargs)
+
+        # setup save worker
+        def worker():
+            while True:
+                desc, cmd, args, kwargs = self.queue.get()
+
+                self.queue_len -= 1
+                self.queue_current = 'Processing: %s' % desc
+
+                # task
+                try:
+                    cmd(*args, **kwargs)
+                except Exception as err:
+                    self.error = str(err)
+                    res = "Error in %s"
+                else:
+                    res = "Done: %s"
+
+                # finalize
+                self.queue_current = ''
+                self.queue_feedback = res % desc
+                self.queue.task_done()
+
+        t = Thread(target=worker)
+        t.daemon = True
+        t.start()
+
+    @cached_property
+    def _get_queue_len_str(self):
+        if self.queue_len:
+            return "Queue length: %i" % self.queue_len
+        else:
+            return ''
+
+    @cached_property
+    def _get_rotation(self):
+        rot = np.array([self.rot_x, self.rot_y, self.rot_z])
+        return rot
+
+    @cached_property
+    def _get_src_pts(self):
+        return self.hsp_pts - self.hsp_fid[0]
+
+    @cached_property
+    def _get_src_fid(self):
+        return self.hsp_fid - self.hsp_fid[0]
+
+    @cached_property
+    def _get_tgt_origin(self):
+        return self.mri_fid[0] * self.scale
+
+    @cached_property
+    def _get_tgt_pts(self):
+        pts = self.mri_pts * self.scale
+        pts -= self.tgt_origin
+        return pts
+
+    @cached_property
+    def _get_tgt_fid(self):
+        fid = self.mri_fid * self.scale
+        fid -= self.tgt_origin
+        return fid
+
+    @cached_property
+    def _get_translation(self):
+        trans = np.array([self.trans_x, self.trans_y, self.trans_z])
+        return trans
+
+    def _fit_ap_fired(self):
+        GUI.set_busy()
+        self.model.fit_auricular_points()
+        GUI.set_busy(False)
+
+    def _fit_fid_fired(self):
+        GUI.set_busy()
+        self.model.fit_fiducials()
+        GUI.set_busy(False)
+
+    def _fit_hsp_points_fired(self):
+        GUI.set_busy()
+        self.model.fit_hsp_points()
+        GUI.set_busy(False)
+
+    def _fits_ap_fired(self):
+        GUI.set_busy()
+        self.model.fit_scale_auricular_points()
+        GUI.set_busy(False)
+
+    def _fits_fid_fired(self):
+        GUI.set_busy()
+        self.model.fit_scale_fiducials()
+        GUI.set_busy(False)
+
+    def _fits_hsp_points_fired(self):
+        GUI.set_busy()
+        self.model.fit_scale_hsp_points()
+        GUI.set_busy(False)
+
+    def _n_scale_params_changed(self, new):
+        if not new:
+            return
+
+        # Make sure that MNE_ROOT environment variable is set
+        if not set_mne_root(True):
+            err = ("MNE_ROOT environment variable could not be set. "
+                   "You will be able to scale MRIs, but the "
+                   "mne_prepare_bem_model tool will fail. Please install "
+                   "MNE.")
+            warning(None, err, "MNE_ROOT Not Set")
+
+    def _reset_params_fired(self):
+        self.model.reset()
+
+    def _rot_x_dec_fired(self):
+        self.rot_x -= self.rot_step
+
+    def _rot_x_inc_fired(self):
+        self.rot_x += self.rot_step
+
+    def _rot_y_dec_fired(self):
+        self.rot_y -= self.rot_step
+
+    def _rot_y_inc_fired(self):
+        self.rot_y += self.rot_step
+
+    def _rot_z_dec_fired(self):
+        self.rot_z -= self.rot_step
+
+    def _rot_z_inc_fired(self):
+        self.rot_z += self.rot_step
+
+    def _load_trans_fired(self):
+        # find trans file destination
+        raw_dir = os.path.dirname(self.model.hsp.file)
+        subject = self.model.mri.subject
+        trans_file = trans_fname.format(raw_dir=raw_dir, subject=subject)
+        dlg = FileDialog(action="open", wildcard=trans_wildcard,
+                         default_path=trans_file)
+        dlg.open()
+        if dlg.return_code != OK:
+            return
+        trans_file = dlg.path
+        self.model.load_trans(trans_file)
+
+    def _save_fired(self):
+        if self.n_scale_params:
+            subjects_dir = self.model.mri.subjects_dir
+            subject_from = self.model.mri.subject
+            subject_to = self.model.raw_subject or self.model.mri.subject
+        else:
+            subject_to = self.model.mri.subject
+
+        # ask for target subject
+        if self.n_scale_params:
+            mridlg = NewMriDialog(subjects_dir=subjects_dir,
+                                  subject_from=subject_from,
+                                  subject_to=subject_to)
+            ui = mridlg.edit_traits(kind='modal')
+            if ui.result != True:  # noqa
+                return
+            subject_to = mridlg.subject_to
+
+        # find bem file to run mne_prepare_bem_model
+        if self.can_prepare_bem_model and self.prepare_bem_model:
+            bem_job = self.model.get_prepare_bem_model_job(subject_to)
+        else:
+            bem_job = None
+
+        # find trans file destination
+        raw_dir = os.path.dirname(self.model.hsp.file)
+        trans_file = trans_fname.format(raw_dir=raw_dir, subject=subject_to)
+        dlg = FileDialog(action="save as", wildcard=trans_wildcard,
+                         default_path=trans_file)
+        dlg.open()
+        if dlg.return_code != OK:
+            return
+        trans_file = dlg.path
+        if not trans_file.endswith('.fif'):
+            trans_file = trans_file + '.fif'
+            if os.path.exists(trans_file):
+                answer = confirm(None, "The file %r already exists. Should it "
+                                 "be replaced?", "Overwrite File?")
+                if answer != YES:
+                    return
+
+        # save the trans file
+        try:
+            self.model.save_trans(trans_file)
+        except Exception as e:
+            error(None, str(e), "Error Saving Trans File")
+            return
+
+        # save the scaled MRI
+        if self.n_scale_params:
+            job = self.model.get_scaling_job(subject_to)
+            self.queue.put(job)
+            self.queue_len += 1
+
+            if bem_job is not None:
+                self.queue.put(bem_job)
+                self.queue_len += 1
+
+    def _scale_x_dec_fired(self):
+        step = 1. / self.scale_step
+        self.scale_x *= step
+
+    def _scale_x_inc_fired(self):
+        self.scale_x *= self.scale_step
+
+    def _scale_x_changed(self, old, new):
+        if self.n_scale_params == 1:
+            self.scale_y = new
+            self.scale_z = new
+
+    def _scale_y_dec_fired(self):
+        step = 1. / self.scale_step
+        self.scale_y *= step
+
+    def _scale_y_inc_fired(self):
+        self.scale_y *= self.scale_step
+
+    def _scale_z_dec_fired(self):
+        step = 1. / self.scale_step
+        self.scale_z *= step
+
+    def _scale_z_inc_fired(self):
+        self.scale_z *= self.scale_step
+
+    def _trans_x_dec_fired(self):
+        self.trans_x -= self.trans_step
+
+    def _trans_x_inc_fired(self):
+        self.trans_x += self.trans_step
+
+    def _trans_y_dec_fired(self):
+        self.trans_y -= self.trans_step
+
+    def _trans_y_inc_fired(self):
+        self.trans_y += self.trans_step
+
+    def _trans_z_dec_fired(self):
+        self.trans_z -= self.trans_step
+
+    def _trans_z_inc_fired(self):
+        self.trans_z += self.trans_step
+
+
+class NewMriDialog(HasPrivateTraits):
+    # Dialog to determine target subject name for a scaled MRI
+    subjects_dir = Directory
+    subject_to = Str
+    subject_from = Str
+    subject_to_dir = Property(depends_on=['subjects_dir', 'subject_to'])
+    subject_to_exists = Property(Bool, depends_on='subject_to_dir')
+
+    feedback = Str(' ' * 100)
+    can_overwrite = Bool
+    overwrite = Bool
+    can_save = Bool
+
+    view = View(Item('subject_to', label='New MRI Subject Name', tooltip="A "
+                     "new folder with this name will be created in the "
+                     "current subjects_dir for the scaled MRI files"),
+                Item('feedback', show_label=False, style='readonly'),
+                Item('overwrite', enabled_when='can_overwrite', tooltip="If a "
+                     "subject with the chosen name exists, delete the old "
+                     "subject"),
+                width=500,
+                buttons=[CancelButton,
+                         Action(name='OK', enabled_when='can_save')])
+
+    def _can_overwrite_changed(self, new):
+        if not new:
+            self.overwrite = False
+
+    @cached_property
+    def _get_subject_to_dir(self):
+        return os.path.join(self.subjects_dir, self.subject_to)
+
+    @cached_property
+    def _get_subject_to_exists(self):
+        if not self.subject_to:
+            return False
+        elif os.path.exists(self.subject_to_dir):
+            return True
+        else:
+            return False
+
+    @on_trait_change('subject_to_dir,overwrite')
+    def update_dialog(self):
+        if not self.subject_to:
+            self.feedback = "No subject specified..."
+            self.can_save = False
+            self.can_overwrite = False
+        elif self.subject_to == self.subject_from:
+            self.feedback = "Must be different from MRI source subject..."
+            self.can_save = False
+            self.can_overwrite = False
+        elif self.subject_to_exists:
+            if self.overwrite:
+                self.feedback = "%s will be overwritten." % self.subject_to
+                self.can_save = True
+                self.can_overwrite = True
+            else:
+                self.feedback = "Subject already exists..."
+                self.can_save = False
+                self.can_overwrite = True
+        else:
+            self.feedback = "Name ok."
+            self.can_save = True
+            self.can_overwrite = False
+
+
+def _make_view(tabbed=False, split=False, scene_width=-1):
+    """Create a view for the CoregFrame
+
+    Parameters
+    ----------
+    tabbed : bool
+        Combine the data source panel and the coregistration panel into a
+        single panel with tabs.
+    split : bool
+        Split the main panels with a movable splitter (good for QT4 but
+        unnecessary for wx backend).
+    scene_width : int
+        Specify a minimum width for the 3d scene (in pixels).
+
+    returns
+    -------
+    view : traits View
+        View object for the CoregFrame.
+    """
+    view_options = VGroup(Item('headview', style='custom'), 'view_options',
+                          show_border=True, show_labels=False, label='View')
+
+    scene = VGroup(Item('scene', show_label=False,
+                        editor=SceneEditor(scene_class=MayaviScene),
+                        dock='vertical', width=500),
+                   view_options)
+
+    data_panel = VGroup(VGroup(Item('subject_panel', style='custom'),
+                               label="MRI Subject", show_border=True,
+                               show_labels=False),
+                        VGroup(Item('lock_fiducials', style='custom',
+                                    editor=EnumEditor(cols=2,
+                                                      values={False: '2:Edit',
+                                                              True: '1:Lock'}),
+                                    enabled_when='fid_ok'),
+                               HGroup('hsp_always_visible',
+                                      Label("Always Show Head Shape Points"),
+                                      show_labels=False),
+                               Item('fid_panel', style='custom'),
+                               label="MRI Fiducials", show_border=True,
+                               show_labels=False),
+                        VGroup(Item('raw_src', style="custom"),
+                               HGroup(Item('distance', show_label=True),
+                                      'omit_points', 'reset_omit_points',
+                                      show_labels=False),
+                               Item('omitted_info', style='readonly',
+                                    show_label=False),
+                               label='Head Shape Source (Raw/Epochs/Evoked)',
+                               show_border=True, show_labels=False),
+                        show_labels=False, label="Data Source")
+
+    coreg_panel = VGroup(Item('coreg_panel', style='custom'),
+                         label="Coregistration", show_border=True,
+                         show_labels=False,
+                         enabled_when="fid_panel.locked")
+
+    if split:
+        main_layout = 'split'
+    else:
+        main_layout = 'normal'
+
+    if tabbed:
+        main = HGroup(scene,
+                      Group(data_panel, coreg_panel, show_labels=False,
+                            layout='tabbed'),
+                      layout=main_layout)
+    else:
+        main = HGroup(data_panel, scene, coreg_panel, show_labels=False,
+                      layout=main_layout)
+
+    view = View(main, resizable=True, handler=CoregFrameHandler(),
+                buttons=NoButtons)
+    return view
+
+
+class ViewOptionsPanel(HasTraits):
+    mri_obj = Instance(SurfaceObject)
+    hsp_obj = Instance(PointObject)
+    view = View(VGroup(Item('mri_obj', style='custom',  # show_border=True,
+                            label="MRI Head Surface"),
+                       Item('hsp_obj', style='custom',  # show_border=True,
+                            label="Head Shape Points")),
+                title="View Options")
+
+
+class CoregFrame(HasTraits):
+    """GUI for head-MRI coregistration
+    """
+    model = Instance(CoregModel, ())
+
+    scene = Instance(MlabSceneModel, ())
+    headview = Instance(HeadViewController)
+
+    subject_panel = Instance(SubjectSelectorPanel)
+    fid_panel = Instance(FiducialsPanel)
+    coreg_panel = Instance(CoregPanel)
+    raw_src = DelegatesTo('model', 'hsp')
+
+    # Omit Points
+    distance = Float(5., label="Distance [mm]", desc="Maximal distance for "
+                     "head shape points from MRI in mm")
+    omit_points = Button(label='Omit Points', desc="Omit head shape points "
+                         "for the purpose of the automatic coregistration "
+                         "procedure.")
+    reset_omit_points = Button(label='Reset Omission', desc="Reset the "
+                               "omission of head shape points to include all.")
+    omitted_info = Property(Str, depends_on=['model.hsp.n_omitted'])
+
+    fid_ok = DelegatesTo('model', 'mri.fid_ok')
+    lock_fiducials = DelegatesTo('model')
+    hsp_always_visible = Bool(False, label="Always Show Head Shape")
+
+    # visualization
+    hsp_obj = Instance(PointObject)
+    mri_obj = Instance(SurfaceObject)
+    lpa_obj = Instance(PointObject)
+    nasion_obj = Instance(PointObject)
+    rpa_obj = Instance(PointObject)
+    hsp_lpa_obj = Instance(PointObject)
+    hsp_nasion_obj = Instance(PointObject)
+    hsp_rpa_obj = Instance(PointObject)
+    hsp_visible = Property(depends_on=['hsp_always_visible', 'lock_fiducials'])
+
+    view_options = Button(label="View Options")
+
+    picker = Instance(object)
+
+    view_options_panel = Instance(ViewOptionsPanel)
+
+    # Processing
+    queue = DelegatesTo('coreg_panel')
+
+    view = _make_view()
+
+    def _subject_panel_default(self):
+        return SubjectSelectorPanel(model=self.model.mri.subject_source)
+
+    def _fid_panel_default(self):
+        panel = FiducialsPanel(model=self.model.mri, headview=self.headview)
+        return panel
+
+    def _coreg_panel_default(self):
+        panel = CoregPanel(model=self.model)
+        return panel
+
+    def _headview_default(self):
+        return HeadViewController(scene=self.scene, system='RAS')
+
+    def __init__(self, raw=None, subject=None, subjects_dir=None):
+        super(CoregFrame, self).__init__()
+
+        subjects_dir = get_subjects_dir(subjects_dir)
+        if (subjects_dir is not None) and os.path.isdir(subjects_dir):
+            self.model.mri.subjects_dir = subjects_dir
+
+        if subject is not None:
+            self.model.mri.subject = subject
+
+        if raw is not None:
+            self.model.hsp.file = raw
+
+    @on_trait_change('scene.activated')
+    def _init_plot(self):
+        self.scene.disable_render = True
+
+        lpa_color = defaults['lpa_color']
+        nasion_color = defaults['nasion_color']
+        rpa_color = defaults['rpa_color']
+
+        # MRI scalp
+        color = defaults['mri_color']
+        self.mri_obj = SurfaceObject(points=self.model.transformed_mri_points,
+                                     color=color, tri=self.model.mri.tris,
+                                     scene=self.scene)
+        # on_trait_change was unreliable, so link it another way:
+        self.model.mri.on_trait_change(self._on_mri_src_change, 'tris')
+        self.model.sync_trait('transformed_mri_points', self.mri_obj, 'points',
+                              mutual=False)
+        self.fid_panel.hsp_obj = self.mri_obj
+
+        # MRI Fiducials
+        point_scale = defaults['mri_fid_scale']
+        self.lpa_obj = PointObject(scene=self.scene, color=lpa_color,
+                                   point_scale=point_scale)
+        self.model.mri.sync_trait('lpa', self.lpa_obj, 'points', mutual=False)
+        self.model.sync_trait('scale', self.lpa_obj, 'trans', mutual=False)
+
+        self.nasion_obj = PointObject(scene=self.scene, color=nasion_color,
+                                      point_scale=point_scale)
+        self.model.mri.sync_trait('nasion', self.nasion_obj, 'points',
+                                  mutual=False)
+        self.model.sync_trait('scale', self.nasion_obj, 'trans', mutual=False)
+
+        self.rpa_obj = PointObject(scene=self.scene, color=rpa_color,
+                                   point_scale=point_scale)
+        self.model.mri.sync_trait('rpa', self.rpa_obj, 'points', mutual=False)
+        self.model.sync_trait('scale', self.rpa_obj, 'trans', mutual=False)
+
+        # Digitizer Head Shape
+        color = defaults['hsp_point_color']
+        point_scale = defaults['hsp_points_scale']
+        p = PointObject(view='cloud', scene=self.scene, color=color,
+                        point_scale=point_scale, resolution=5)
+        self.hsp_obj = p
+        self.model.hsp.sync_trait('points', p, mutual=False)
+        self.model.sync_trait('head_mri_trans', p, 'trans', mutual=False)
+        self.sync_trait('hsp_visible', p, 'visible', mutual=False)
+
+        # Digitizer Fiducials
+        point_scale = defaults['hsp_fid_scale']
+        opacity = defaults['hsp_fid_opacity']
+        p = PointObject(scene=self.scene, color=lpa_color, opacity=opacity,
+                        point_scale=point_scale)
+        self.hsp_lpa_obj = p
+        self.model.hsp.sync_trait('lpa', p, 'points', mutual=False)
+        self.model.sync_trait('head_mri_trans', p, 'trans', mutual=False)
+        self.sync_trait('hsp_visible', p, 'visible', mutual=False)
+
+        p = PointObject(scene=self.scene, color=nasion_color, opacity=opacity,
+                        point_scale=point_scale)
+        self.hsp_nasion_obj = p
+        self.model.hsp.sync_trait('nasion', p, 'points', mutual=False)
+        self.model.sync_trait('head_mri_trans', p, 'trans', mutual=False)
+        self.sync_trait('hsp_visible', p, 'visible', mutual=False)
+
+        p = PointObject(scene=self.scene, color=rpa_color, opacity=opacity,
+                        point_scale=point_scale)
+        self.hsp_rpa_obj = p
+        self.model.hsp.sync_trait('rpa', p, 'points', mutual=False)
+        self.model.sync_trait('head_mri_trans', p, 'trans', mutual=False)
+        self.sync_trait('hsp_visible', p, 'visible', mutual=False)
+
+        on_pick = self.scene.mayavi_scene.on_mouse_pick
+        if not _testing_mode():
+            self.picker = on_pick(self.fid_panel._on_pick, type='cell')
+
+        self.headview.left = True
+        self.scene.disable_render = False
+
+        self.view_options_panel = ViewOptionsPanel(mri_obj=self.mri_obj,
+                                                   hsp_obj=self.hsp_obj)
+
+    @cached_property
+    def _get_hsp_visible(self):
+        return self.hsp_always_visible or self.lock_fiducials
+
+    @cached_property
+    def _get_omitted_info(self):
+        if self.model.hsp.n_omitted == 0:
+            return "No points omitted"
+        elif self.model.hsp.n_omitted == 1:
+            return "1 point omitted"
+        else:
+            return "%i points omitted" % self.model.hsp.n_omitted
+
+    def _omit_points_fired(self):
+        distance = self.distance / 1000.
+        self.model.omit_hsp_points(distance)
+
+    def _reset_omit_points_fired(self):
+        self.model.omit_hsp_points(0, True)
+
+    @on_trait_change('model.mri.tris')
+    def _on_mri_src_change(self):
+        if self.mri_obj is None:
+            return
+        if not (np.any(self.model.mri.points) and np.any(self.model.mri.tris)):
+            self.mri_obj.clear()
+            return
+
+        self.mri_obj.points = self.model.mri.points
+        self.mri_obj.tri = self.model.mri.tris
+        self.mri_obj.plot()
+
+    # automatically lock fiducials if a good fiducials file is loaded
+    @on_trait_change('model.mri.fid_file')
+    def _on_fid_file_loaded(self):
+        if self.model.mri.fid_file:
+            self.fid_panel.locked = True
+        else:
+            self.fid_panel.locked = False
+
+    def _view_options_fired(self):
+        self.view_options_panel.edit_traits()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/gui/_fiducials_gui.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/gui/_fiducials_gui.py
new file mode 100644
index 0000000..e0a2ff2
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/gui/_fiducials_gui.py
@@ -0,0 +1,453 @@
+"""Mayavi/traits GUI for setting MRI fiducials"""
+
+# Authors: Christian Brodbeck <christianbrodbeck at nyu.edu>
+#
+# License: BSD (3-clause)
+
+from glob import glob
+import os
+from ..externals.six.moves import map
+
+# allow import without traits
+try:
+    from mayavi.core.ui.mayavi_scene import MayaviScene
+    from mayavi.tools.mlab_scene_model import MlabSceneModel
+    import numpy as np
+    from pyface.api import confirm, FileDialog, OK, YES
+    from traits.api import (HasTraits, HasPrivateTraits, on_trait_change,
+                            cached_property, DelegatesTo, Event, Instance,
+                            Property, Array, Bool, Button, Enum)
+    from traitsui.api import HGroup, Item, VGroup, View
+    from traitsui.menu import NoButtons
+    from tvtk.pyface.scene_editor import SceneEditor
+except:
+    from ..utils import trait_wraith
+    HasTraits = HasPrivateTraits = object
+    cached_property = on_trait_change = MayaviScene = MlabSceneModel = \
+        Array = Bool = Button = DelegatesTo = Enum = Event = Instance = \
+        Property = View = Item = HGroup = VGroup = SceneEditor = \
+        NoButtons = trait_wraith
+
+from ..coreg import fid_fname, fid_fname_general, head_bem_fname
+from ..io import write_fiducials
+from ..io.constants import FIFF
+from ..utils import get_subjects_dir, logger
+from ._file_traits import (BemSource, fid_wildcard, FiducialsSource,
+                           MRISubjectSource, SubjectSelectorPanel)
+from ._viewer import (defaults, HeadViewController, PointObject, SurfaceObject,
+                      headview_borders)
+
+
+class MRIHeadWithFiducialsModel(HasPrivateTraits):
+    """Represent an MRI head shape with fiducials
+
+    Attributes
+    ----------
+    points : array (n_points, 3)
+        MRI head surface points.
+    tris : array (n_tris, 3)
+        Triangles based on points.
+    lpa : array (1, 3)
+        Left peri-auricular point coordinates.
+    nasion : array (1, 3)
+        Nasion coordinates.
+    rpa : array (1, 3)
+        Right peri-auricular point coordinates.
+    """
+    subject_source = Instance(MRISubjectSource, ())
+    bem = Instance(BemSource, ())
+    fid = Instance(FiducialsSource, ())
+
+    fid_file = DelegatesTo('fid', 'file')
+    fid_fname = DelegatesTo('fid', 'fname')
+    fid_points = DelegatesTo('fid', 'points')
+    subjects_dir = DelegatesTo('subject_source')
+    subject = DelegatesTo('subject_source')
+    subject_has_bem = DelegatesTo('subject_source')
+    points = DelegatesTo('bem')
+    norms = DelegatesTo('bem')
+    tris = DelegatesTo('bem')
+    lpa = Array(float, (1, 3))
+    nasion = Array(float, (1, 3))
+    rpa = Array(float, (1, 3))
+
+    reset = Event(desc="Reset fiducials to the file.")
+
+    # info
+    can_save = Property(depends_on=['file', 'can_save_as'])
+    can_save_as = Property(depends_on=['lpa', 'nasion', 'rpa'])
+    can_reset = Property(depends_on=['file', 'fid.points', 'lpa', 'nasion',
+                                     'rpa'])
+    fid_ok = Property(depends_on=['lpa', 'nasion', 'rpa'], desc="All points "
+                      "are set")
+    default_fid_fname = Property(depends_on=['subjects_dir', 'subject'],
+                                 desc="the default file name for the "
+                                 "fiducials fif file")
+
+    # switch for the GUI (has no effect in the model)
+    lock_fiducials = Bool(False, desc="Used by GIU, has no effect in the "
+                          "model.")
+
+    @on_trait_change('fid_points')
+    def reset_fiducials(self):
+        if self.fid_points is not None:
+            self.lpa = self.fid_points[0:1]
+            self.nasion = self.fid_points[1:2]
+            self.rpa = self.fid_points[2:3]
+
+    def save(self, fname=None):
+        """Save the current fiducials to a file
+
+        Parameters
+        ----------
+        fname : str
+            Destination file path. If None, will use the current fid filename
+            if available, or else use the default pattern.
+        """
+        if fname is None:
+            fname = self.fid_file
+        if not fname:
+            fname = self.default_fid_fname
+
+        dig = [{'kind': 1, 'ident': 1, 'r': np.array(self.lpa[0])},
+               {'kind': 1, 'ident': 2, 'r': np.array(self.nasion[0])},
+               {'kind': 1, 'ident': 3, 'r': np.array(self.rpa[0])}]
+        write_fiducials(fname, dig, FIFF.FIFFV_COORD_MRI)
+        self.fid_file = fname
+
+    @cached_property
+    def _get_can_reset(self):
+        if not self.fid_file:
+            return False
+        elif np.any(self.lpa != self.fid.points[0:1]):
+            return True
+        elif np.any(self.nasion != self.fid.points[1:2]):
+            return True
+        elif np.any(self.rpa != self.fid.points[2:3]):
+            return True
+        return False
+
+    @cached_property
+    def _get_can_save_as(self):
+        can = not (np.all(self.nasion == self.lpa) or
+                   np.all(self.nasion == self.rpa) or
+                   np.all(self.lpa == self.rpa))
+        return can
+
+    @cached_property
+    def _get_can_save(self):
+        if not self.can_save_as:
+            return False
+        elif self.fid_file:
+            return True
+        elif self.subjects_dir and self.subject:
+            return True
+        else:
+            return False
+
+    @cached_property
+    def _get_default_fid_fname(self):
+        fname = fid_fname.format(subjects_dir=self.subjects_dir,
+                                 subject=self.subject)
+        return fname
+
+    @cached_property
+    def _get_fid_ok(self):
+        return all(np.any(pt) for pt in (self.nasion, self.lpa, self.rpa))
+
+    def _reset_fired(self):
+        self.reset_fiducials()
+
+    # if subject changed because of a change of subjects_dir this was not
+    # triggered
+    @on_trait_change('subjects_dir,subject')
+    def _subject_changed(self):
+        subject = self.subject
+        subjects_dir = self.subjects_dir
+        if not subjects_dir or not subject:
+            return
+
+        # update bem head
+        path = head_bem_fname.format(subjects_dir=subjects_dir,
+                                     subject=subject)
+        self.bem.file = path
+
+        # find fiducials file
+        path = fid_fname.format(subjects_dir=subjects_dir, subject=subject)
+        if os.path.exists(path):
+            self.fid_file = path
+            self.lock_fiducials = True
+        else:
+            path = fid_fname_general.format(subjects_dir=subjects_dir,
+                                            subject=subject, head='*')
+            fnames = glob(path)
+            if fnames:
+                path = fnames[0]
+                self.fid.file = path
+                self.lock_fiducials = True
+            else:
+                self.fid.reset_traits(['file'])
+                self.lock_fiducials = False
+
+        # does not seem to happen by itself ... so hard code it:
+        self.reset_fiducials()
+
+
+class FiducialsPanel(HasPrivateTraits):
+    """Set fiducials on an MRI surface"""
+    model = Instance(MRIHeadWithFiducialsModel)
+
+    fid_file = DelegatesTo('model')
+    fid_fname = DelegatesTo('model')
+    lpa = DelegatesTo('model')
+    nasion = DelegatesTo('model')
+    rpa = DelegatesTo('model')
+    can_save = DelegatesTo('model')
+    can_save_as = DelegatesTo('model')
+    can_reset = DelegatesTo('model')
+    fid_ok = DelegatesTo('model')
+    locked = DelegatesTo('model', 'lock_fiducials')
+
+    set = Enum('LPA', 'Nasion', 'RPA')
+    current_pos = Array(float, (1, 3))  # for editing
+
+    save_as = Button(label='Save As...')
+    save = Button(label='Save')
+    reset_fid = Button(label="Reset to File")
+
+    headview = Instance(HeadViewController)
+    hsp_obj = Instance(SurfaceObject)
+
+    picker = Instance(object)
+
+    # the layout of the dialog created
+    view = View(VGroup(Item('fid_file', label='Fiducials File'),
+                       Item('fid_fname', show_label=False, style='readonly'),
+                       Item('set', style='custom'),
+                       Item('current_pos', label='Pos'),
+                       HGroup(Item('save', enabled_when='can_save',
+                                   tooltip="If a filename is currently "
+                                   "specified, save to that file, otherwise "
+                                   "save to the default file name"),
+                              Item('save_as', enabled_when='can_save_as'),
+                              Item('reset_fid', enabled_when='can_reset'),
+                              show_labels=False),
+                       enabled_when="locked==False"))
+
+    def __init__(self, *args, **kwargs):
+        super(FiducialsPanel, self).__init__(*args, **kwargs)
+        self.sync_trait('lpa', self, 'current_pos', mutual=True)
+
+    def _reset_fid_fired(self):
+        self.model.reset = True
+
+    def _save_fired(self):
+        self.model.save()
+
+    def _save_as_fired(self):
+        if self.fid_file:
+            default_path = self.fid_file
+        else:
+            default_path = self.model.default_fid_fname
+
+        dlg = FileDialog(action="save as", wildcard=fid_wildcard,
+                         default_path=default_path)
+        dlg.open()
+        if dlg.return_code != OK:
+            return
+
+        path = dlg.path
+        if not path.endswith('.fif'):
+            path = path + '.fif'
+            if os.path.exists(path):
+                answer = confirm(None, "The file %r already exists. Should it "
+                                 "be replaced?", "Overwrite File?")
+                if answer != YES:
+                    return
+
+        self.model.save(path)
+
+    def _on_pick(self, picker):
+        if self.locked:
+            return
+
+        self.picker = picker
+        n_pos = len(picker.picked_positions)
+
+        if n_pos == 0:
+            logger.debug("GUI: picked empty location")
+            return
+
+        if picker.actor is self.hsp_obj.surf.actor.actor:
+            idxs = []
+            idx = None
+            pt = [picker.pick_position]
+        elif self.hsp_obj.surf.actor.actor in picker.actors:
+            idxs = [i for i in range(n_pos) if picker.actors[i] is
+                    self.hsp_obj.surf.actor.actor]
+            idx = idxs[-1]
+            pt = [picker.picked_positions[idx]]
+        else:
+            logger.debug("GUI: picked object other than MRI")
+
+        def round_(x):
+            return round(x, 3)
+
+        poss = [map(round_, pos) for pos in picker.picked_positions]
+        pos = map(round_, picker.pick_position)
+        msg = ["Pick Event: %i picked_positions:" % n_pos]
+
+        line = str(pos)
+        if idx is None:
+            line += " <-pick_position"
+        msg.append(line)
+
+        for i, pos in enumerate(poss):
+            line = str(pos)
+            if i == idx:
+                line += " <- MRI mesh"
+            elif i in idxs:
+                line += " (<- also MRI mesh)"
+            msg.append(line)
+        logger.debug(os.linesep.join(msg))
+
+        if self.set == 'Nasion':
+            self.nasion = pt
+        elif self.set == 'LPA':
+            self.lpa = pt
+        elif self.set == 'RPA':
+            self.rpa = pt
+        else:
+            raise ValueError("set = %r" % self.set)
+
+    @on_trait_change('set')
+    def _on_set_change(self, obj, name, old, new):
+        self.sync_trait(old.lower(), self, 'current_pos', mutual=True,
+                        remove=True)
+        self.sync_trait(new.lower(), self, 'current_pos', mutual=True)
+        if new == 'Nasion':
+            self.headview.front = True
+        elif new == 'LPA':
+            self.headview.left = True
+        elif new == 'RPA':
+            self.headview.right = True
+
+
+# FiducialsPanel view that allows manipulating all coordinates numerically
+view2 = View(VGroup(Item('fid_file', label='Fiducials File'),
+                    Item('fid_fname', show_label=False, style='readonly'),
+                    Item('set', style='custom'), 'lpa', 'nasion', 'rpa',
+                    HGroup(Item('save', enabled_when='can_save'),
+                           Item('save_as', enabled_when='can_save_as'),
+                           Item('reset_fid', enabled_when='can_reset'),
+                           show_labels=False),
+                    enabled_when="locked==False"))
+
+
+class FiducialsFrame(HasTraits):
+    """GUI for interpolating between two KIT marker files
+
+    Parameters
+    ----------
+    subject : None | str
+        Set the subject which is initially selected.
+    subjects_dir : None | str
+        Override the SUBJECTS_DIR environment variable.
+    """
+    model = Instance(MRIHeadWithFiducialsModel, ())
+
+    scene = Instance(MlabSceneModel, ())
+    headview = Instance(HeadViewController)
+
+    spanel = Instance(SubjectSelectorPanel)
+    panel = Instance(FiducialsPanel)
+
+    mri_obj = Instance(SurfaceObject)
+    point_scale = float(defaults['mri_fid_scale'])
+    lpa_obj = Instance(PointObject)
+    nasion_obj = Instance(PointObject)
+    rpa_obj = Instance(PointObject)
+
+    def _headview_default(self):
+        return HeadViewController(scene=self.scene, system='RAS')
+
+    def _panel_default(self):
+        panel = FiducialsPanel(model=self.model, headview=self.headview)
+        panel.trait_view('view', view2)
+        return panel
+
+    def _spanel_default(self):
+        return SubjectSelectorPanel(model=self.model.subject_source)
+
+    view = View(HGroup(Item('scene',
+                            editor=SceneEditor(scene_class=MayaviScene),
+                            dock='vertical'),
+                       VGroup(headview_borders,
+                              VGroup(Item('spanel', style='custom'),
+                                     label="Subject", show_border=True,
+                                     show_labels=False),
+                              VGroup(Item('panel', style="custom"),
+                                     label="Fiducials", show_border=True,
+                                     show_labels=False),
+                              show_labels=False),
+                       show_labels=False),
+                resizable=True,
+                buttons=NoButtons)
+
+    def __init__(self, subject=None, subjects_dir=None, **kwargs):
+        super(FiducialsFrame, self).__init__(**kwargs)
+
+        subjects_dir = get_subjects_dir(subjects_dir)
+        if subjects_dir is not None:
+            self.spanel.subjects_dir = subjects_dir
+
+        if subject is not None:
+            if subject in self.spanel.subjects:
+                self.spanel.subject = subject
+
+    @on_trait_change('scene.activated')
+    def _init_plot(self):
+        self.scene.disable_render = True
+
+        lpa_color = defaults['lpa_color']
+        nasion_color = defaults['nasion_color']
+        rpa_color = defaults['rpa_color']
+
+        # bem
+        color = defaults['mri_color']
+        self.mri_obj = SurfaceObject(points=self.model.points, color=color,
+                                     tri=self.model.tris, scene=self.scene)
+        self.model.on_trait_change(self._on_mri_src_change, 'tris')
+        self.panel.hsp_obj = self.mri_obj
+
+        # fiducials
+        self.lpa_obj = PointObject(scene=self.scene, color=lpa_color,
+                                   point_scale=self.point_scale)
+        self.panel.sync_trait('lpa', self.lpa_obj, 'points', mutual=False)
+        self.sync_trait('point_scale', self.lpa_obj, mutual=False)
+
+        self.nasion_obj = PointObject(scene=self.scene, color=nasion_color,
+                                      point_scale=self.point_scale)
+        self.panel.sync_trait('nasion', self.nasion_obj, 'points',
+                              mutual=False)
+        self.sync_trait('point_scale', self.nasion_obj, mutual=False)
+
+        self.rpa_obj = PointObject(scene=self.scene, color=rpa_color,
+                                   point_scale=self.point_scale)
+        self.panel.sync_trait('rpa', self.rpa_obj, 'points', mutual=False)
+        self.sync_trait('point_scale', self.rpa_obj, mutual=False)
+
+        self.headview.left = True
+        self.scene.disable_render = False
+
+        # picker
+        self.scene.mayavi_scene.on_mouse_pick(self.panel._on_pick, type='cell')
+
+    def _on_mri_src_change(self):
+        if (not np.any(self.model.points)) or (not np.any(self.model.tris)):
+            self.mri_obj.clear()
+            return
+
+        self.mri_obj.points = self.model.points
+        self.mri_obj.tri = self.model.tris
+        self.mri_obj.plot()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/gui/_file_traits.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/gui/_file_traits.py
new file mode 100644
index 0000000..fd59d7d
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/gui/_file_traits.py
@@ -0,0 +1,509 @@
+"""File data sources for traits GUIs"""
+
+# Authors: Christian Brodbeck <christianbrodbeck at nyu.edu>
+#
+# License: BSD (3-clause)
+
+import os
+
+import numpy as np
+from ..externals.six.moves import map
+
+# allow import without traits
+try:
+    from traits.api import (Any, HasTraits, HasPrivateTraits, cached_property,
+                            on_trait_change, Array, Bool, Button, DelegatesTo,
+                            Directory, Enum, Event, File, Instance, Int, List,
+                            Property, Str)
+    from traitsui.api import View, Item, VGroup
+    from pyface.api import (DirectoryDialog, OK, ProgressDialog, error,
+                            information)
+except:
+    from ..utils import trait_wraith
+    HasTraits = HasPrivateTraits = object
+    cached_property = on_trait_change = Any = Array = Bool = Button = \
+        DelegatesTo = Directory = Enum = Event = File = Instance = \
+        Int = List = Property = Str = View = Item = VGroup = trait_wraith
+
+from ..io.constants import FIFF
+from ..io import read_info, read_fiducials
+from ..surface import read_bem_surfaces
+from ..coreg import (_is_mri_subject, _mri_subject_has_bem,
+                     create_default_subject)
+from ..utils import get_config, set_config
+
+
+fid_wildcard = "*.fif"
+trans_wildcard = "*.fif"
+# for wx backend:
+# fid_wildcard = "Fiducials FIFF file (*.fif)|*.fif"
+# trans_wildcard = "Trans File (*.fif)|*.fif"
+
+
+def _expand_path(p):
+    return os.path.abspath(os.path.expandvars(os.path.expanduser(p)))
+
+
+def get_fs_home():
+    """Get the FREESURFER_HOME directory
+
+    Returns
+    -------
+    fs_home : None | str
+        The FREESURFER_HOME path or None if the user cancels.
+
+    Notes
+    -----
+    If FREESURFER_HOME can't be found, the user is prompted with a file dialog.
+    If specified successfully, the resulting path is stored with
+    mne.set_config().
+    """
+    return _get_root_home('FREESURFER_HOME', 'freesurfer', _fs_home_problem)
+
+
+def get_mne_root():
+    """Get the MNE_ROOT directory
+
+    Returns
+    -------
+    mne_root : None | str
+        The MNE_ROOT path or None if the user cancels.
+
+    Notes
+    -----
+    If MNE_ROOT can't be found, the user is prompted with a file dialog.
+    If specified successfully, the resulting path is stored with
+    mne.set_config().
+    """
+    return _get_root_home('MNE_ROOT', 'MNE', _mne_root_problem)
+
+
+def _get_root_home(cfg, name, check_fun):
+    root = get_config(cfg)
+    problem = check_fun(root)
+    while problem:
+        info = ("Please select the %s directory. This is the root "
+                "directory of the %s installation." % (cfg, name))
+        msg = '\n\n'.join((problem, info))
+        information(None, msg, "Select the %s Directory" % cfg)
+        msg = "Please select the %s Directory" % cfg
+        dlg = DirectoryDialog(message=msg, new_directory=False)
+        if dlg.open() == OK:
+            root = dlg.path
+            problem = check_fun(root)
+            if problem is None:
+                set_config(cfg, root)
+        else:
+            return None
+    return root
+
+
+def set_fs_home():
+    """Set the FREESURFER_HOME environment variable
+
+    Returns
+    -------
+    success : bool
+        True if the environment variable could be set, False if FREESURFER_HOME
+        could not be found.
+
+    Notes
+    -----
+    If FREESURFER_HOME can't be found, the user is prompted with a file dialog.
+    If specified successfully, the resulting path is stored with
+    mne.set_config().
+    """
+    fs_home = get_fs_home()
+    if fs_home is None:
+        return False
+    else:
+        os.environ['FREESURFER_HOME'] = fs_home
+        return True
+
+
+def _fs_home_problem(fs_home):
+    """Check FREESURFER_HOME path
+
+    Return str describing problem or None if the path is okay.
+    """
+    if fs_home is None:
+        return "FREESURFER_HOME is not set."
+    elif not os.path.exists(fs_home):
+        return "FREESURFER_HOME (%s) does not exist." % fs_home
+    else:
+        test_dir = os.path.join(fs_home, 'subjects', 'fsaverage')
+        if not os.path.exists(test_dir):
+            return ("FREESURFER_HOME (%s) does not contain the fsaverage "
+                    "subject." % fs_home)
+
+
+def set_mne_root(set_mne_bin=False):
+    """Set the MNE_ROOT environment variable
+
+    Parameters
+    ----------
+    set_mne_bin : bool
+        Also add the MNE binary directory to the PATH (default: False).
+
+    Returns
+    -------
+    success : bool
+        True if the environment variable could be set, False if MNE_ROOT
+        could not be found.
+
+    Notes
+    -----
+    If MNE_ROOT can't be found, the user is prompted with a file dialog.
+    If specified successfully, the resulting path is stored with
+    mne.set_config().
+    """
+    mne_root = get_mne_root()
+    if mne_root is None:
+        return False
+    else:
+        os.environ['MNE_ROOT'] = mne_root
+        if set_mne_bin:
+            mne_bin = os.path.realpath(os.path.join(mne_root, 'bin'))
+            if mne_bin not in map(_expand_path, os.environ['PATH'].split(':')):
+                os.environ['PATH'] += ':' + mne_bin
+        return True
+
+
+def _mne_root_problem(mne_root):
+    """Check MNE_ROOT path
+
+    Return str describing problem or None if the path is okay.
+    """
+    if mne_root is None:
+        return "MNE_ROOT is not set."
+    elif not os.path.exists(mne_root):
+        return "MNE_ROOT (%s) does not exist." % mne_root
+    else:
+        test_dir = os.path.join(mne_root, 'share', 'mne', 'mne_analyze')
+        if not os.path.exists(test_dir):
+            return ("MNE_ROOT (%s) is missing files. If this is your MNE "
+                    "installation, consider reinstalling." % mne_root)
+
+
+class BemSource(HasTraits):
+    """Expose points and tris of a given BEM file
+
+    Parameters
+    ----------
+    file : File
+        Path to the BEM file (*.fif).
+
+    Attributes
+    ----------
+    pts : Array, shape = (n_pts, 3)
+        BEM file points.
+    tri : Array, shape = (n_tri, 3)
+        BEM file triangles.
+
+    Notes
+    -----
+    tri is always updated after pts, so in case downstream objects depend on
+    both, they should sync to a change in tri.
+    """
+    file = File(exists=True, filter=['*.fif'])
+    points = Array(shape=(None, 3), value=np.empty((0, 3)))
+    norms = Array
+    tris = Array(shape=(None, 3), value=np.empty((0, 3)))
+
+    @on_trait_change('file')
+    def read_file(self):
+        if os.path.exists(self.file):
+            bem = read_bem_surfaces(self.file)[0]
+            self.points = bem['rr']
+            self.norms = bem['nn']
+            self.tris = bem['tris']
+        else:
+            self.points = np.empty((0, 3))
+            self.norms = np.empty((0, 3))
+            self.tris = np.empty((0, 3))
+
+
+class FiducialsSource(HasTraits):
+    """Expose points of a given fiducials fif file
+
+    Parameters
+    ----------
+    file : File
+        Path to a fif file with fiducials (*.fif).
+
+    Attributes
+    ----------
+    points : Array, shape = (n_points, 3)
+        Fiducials file points.
+    """
+    file = File(filter=[fid_wildcard])
+    fname = Property(depends_on='file')
+    points = Property(depends_on='file')
+
+    @cached_property
+    def _get_fname(self):
+        fname = os.path.basename(self.file)
+        return fname
+
+    @cached_property
+    def _get_points(self):
+        if not os.path.exists(self.file):
+            return None
+
+        points = np.zeros((3, 3))
+        fids, _ = read_fiducials(self.file)
+        for fid in fids:
+            ident = fid['ident']
+            if ident == FIFF.FIFFV_POINT_LPA:
+                points[0] = fid['r']
+            elif ident == FIFF.FIFFV_POINT_NASION:
+                points[1] = fid['r']
+            elif ident == FIFF.FIFFV_POINT_RPA:
+                points[2] = fid['r']
+        return points
+
+
+class InstSource(HasPrivateTraits):
+    """Expose measurement information from a inst file
+
+    Parameters
+    ----------
+    file : File
+        Path to the BEM file (*.fif).
+
+    Attributes
+    ----------
+    fid : Array, shape = (3, 3)
+        Each row contains the coordinates for one fiducial point, in the order
+        Nasion, RAP, LAP. If no file is set all values are 0.
+    """
+    file = File(exists=True, filter=['*.fif'])
+
+    inst_fname = Property(Str, depends_on='file')
+    inst_dir = Property(depends_on='file')
+    inst = Property(depends_on='file')
+
+    points_filter = Any(desc="Index to select a subset of the head shape "
+                        "points")
+    n_omitted = Property(Int, depends_on=['points_filter'])
+
+    # head shape
+    inst_points = Property(depends_on='inst', desc="Head shape points in the "
+                           "inst file(n x 3 array)")
+    points = Property(depends_on=['inst_points', 'points_filter'], desc="Head "
+                      "shape points selected by the filter (n x 3 array)")
+
+    # fiducials
+    fid_dig = Property(depends_on='inst', desc="Fiducial points "
+                       "(list of dict)")
+    fid_points = Property(depends_on='fid_dig', desc="Fiducial points {ident: "
+                          "point} dict}")
+    lpa = Property(depends_on='fid_points', desc="LPA coordinates (1 x 3 "
+                   "array)")
+    nasion = Property(depends_on='fid_points', desc="Nasion coordinates (1 x "
+                      "3 array)")
+    rpa = Property(depends_on='fid_points', desc="RPA coordinates (1 x 3 "
+                   "array)")
+
+    view = View(VGroup(Item('file'),
+                       Item('inst_fname', show_label=False, style='readonly')))
+
+    @cached_property
+    def _get_n_omitted(self):
+        if self.points_filter is None:
+            return 0
+        else:
+            return np.sum(self.points_filter == False)  # noqa
+
+    @cached_property
+    def _get_inst(self):
+        if self.file:
+            return read_info(self.file)
+
+    @cached_property
+    def _get_inst_dir(self):
+        return os.path.dirname(self.file)
+
+    @cached_property
+    def _get_inst_fname(self):
+        if self.file:
+            return os.path.basename(self.file)
+        else:
+            return '-'
+
+    @cached_property
+    def _get_inst_points(self):
+        if not self.inst:
+            return np.zeros((1, 3))
+
+        points = np.array([d['r'] for d in self.inst['dig']
+                           if d['kind'] == FIFF.FIFFV_POINT_EXTRA])
+        return points
+
+    @cached_property
+    def _get_points(self):
+        if self.points_filter is None:
+            return self.inst_points
+        else:
+            return self.inst_points[self.points_filter]
+
+    @cached_property
+    def _get_fid_dig(self):
+        """Fiducials for info['dig']"""
+        if not self.inst:
+            return []
+        dig = self.inst['dig']
+        dig = [d for d in dig if d['kind'] == FIFF.FIFFV_POINT_CARDINAL]
+        return dig
+
+    @cached_property
+    def _get_fid_points(self):
+        if not self.inst:
+            return {}
+        digs = dict((d['ident'], d) for d in self.fid_dig)
+        return digs
+
+    @cached_property
+    def _get_nasion(self):
+        if self.fid_points:
+            return self.fid_points[FIFF.FIFFV_POINT_NASION]['r'][None, :]
+        else:
+            return np.zeros((1, 3))
+
+    @cached_property
+    def _get_lpa(self):
+        if self.fid_points:
+            return self.fid_points[FIFF.FIFFV_POINT_LPA]['r'][None, :]
+        else:
+            return np.zeros((1, 3))
+
+    @cached_property
+    def _get_rpa(self):
+        if self.fid_points:
+            return self.fid_points[FIFF.FIFFV_POINT_RPA]['r'][None, :]
+        else:
+            return np.zeros((1, 3))
+
+    def _file_changed(self):
+        self.reset_traits(('points_filter',))
+
+
+class MRISubjectSource(HasPrivateTraits):
+    """Find subjects in SUBJECTS_DIR and select one
+
+    Parameters
+    ----------
+    subjects_dir : directory
+        SUBJECTS_DIR.
+    subject : str
+        Subject, corresponding to a folder in SUBJECTS_DIR.
+    """
+    refresh = Event(desc="Refresh the subject list based on the directory "
+                    "structure of subjects_dir.")
+
+    # settings
+    subjects_dir = Directory(exists=True)
+    subjects = Property(List(Str), depends_on=['subjects_dir', 'refresh'])
+    subject = Enum(values='subjects')
+
+    # info
+    can_create_fsaverage = Property(Bool, depends_on=['subjects_dir',
+                                                      'subjects'])
+    subject_has_bem = Property(Bool, depends_on=['subjects_dir', 'subject'],
+                               desc="whether the subject has a file matching "
+                               "the bem file name pattern")
+    bem_pattern = Property(depends_on='mri_dir')
+
+    @cached_property
+    def _get_can_create_fsaverage(self):
+        if not os.path.exists(self.subjects_dir):
+            return False
+        if 'fsaverage' in self.subjects:
+            return False
+        return True
+
+    @cached_property
+    def _get_mri_dir(self):
+        if not self.subject:
+            return
+        elif not self.subjects_dir:
+            return
+        else:
+            return os.path.join(self.subjects_dir, self.subject)
+
+    @cached_property
+    def _get_subjects(self):
+        sdir = self.subjects_dir
+        is_dir = sdir and os.path.isdir(sdir)
+        if is_dir:
+            dir_content = os.listdir(sdir)
+            subjects = [s for s in dir_content if _is_mri_subject(s, sdir)]
+            if len(subjects) == 0:
+                subjects.append('')
+        else:
+            subjects = ['']
+
+        return subjects
+
+    @cached_property
+    def _get_subject_has_bem(self):
+        if not self.subject:
+            return False
+        return _mri_subject_has_bem(self.subject, self.subjects_dir)
+
+    def create_fsaverage(self):
+        if not self.subjects_dir:
+            err = ("No subjects directory is selected. Please specify "
+                   "subjects_dir first.")
+            raise RuntimeError(err)
+
+        mne_root = get_mne_root()
+        if mne_root is None:
+            err = ("MNE contains files that are needed for copying the "
+                   "fsaverage brain. Please install MNE and try again.")
+            raise RuntimeError(err)
+        fs_home = get_fs_home()
+        if fs_home is None:
+            err = ("FreeSurfer contains files that are needed for copying the "
+                   "fsaverage brain. Please install FreeSurfer and try again.")
+            raise RuntimeError(err)
+
+        create_default_subject(mne_root, fs_home,
+                               subjects_dir=self.subjects_dir)
+        self.refresh = True
+        self.subject = 'fsaverage'
+
+
+class SubjectSelectorPanel(HasPrivateTraits):
+    model = Instance(MRISubjectSource)
+
+    can_create_fsaverage = DelegatesTo('model')
+    subjects_dir = DelegatesTo('model')
+    subject = DelegatesTo('model')
+    subjects = DelegatesTo('model')
+
+    create_fsaverage = Button("Copy FsAverage to Subjects Folder",
+                              desc="Copy the files for the fsaverage subject "
+                              "to the subjects directory.")
+
+    view = View(VGroup(Item('subjects_dir', label='subjects_dir'),
+                       'subject',
+                       Item('create_fsaverage', show_label=False,
+                            enabled_when='can_create_fsaverage')))
+
+    def _create_fsaverage_fired(self):
+        # progress dialog with indefinite progress bar
+        title = "Creating FsAverage ..."
+        message = "Copying fsaverage files ..."
+        prog = ProgressDialog(title=title, message=message)
+        prog.open()
+        prog.update(0)
+
+        try:
+            self.model.create_fsaverage()
+        except Exception as err:
+            msg = str(err)
+            error(None, msg, "Error Creating FsAverage")
+            raise
+        finally:
+            prog.close()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/gui/_kit2fiff_gui.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/gui/_kit2fiff_gui.py
new file mode 100644
index 0000000..ee07198
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/gui/_kit2fiff_gui.py
@@ -0,0 +1,508 @@
+"""Mayavi/traits GUI for converting data from KIT systems"""
+
+# Authors: Christian Brodbeck <christianbrodbeck at nyu.edu>
+#
+# License: BSD (3-clause)
+
+import os
+import numpy as np
+from scipy.linalg import inv
+from threading import Thread
+
+from ..externals.six.moves import queue
+from ..io.meas_info import _read_dig_points, _make_dig_points
+
+
+# allow import without traits
+try:
+    from mayavi.core.ui.mayavi_scene import MayaviScene
+    from mayavi.tools.mlab_scene_model import MlabSceneModel
+    from pyface.api import confirm, error, FileDialog, OK, YES, information
+    from traits.api import (HasTraits, HasPrivateTraits, cached_property,
+                            Instance, Property, Bool, Button, Enum, File, Int,
+                            List, Str, Array, DelegatesTo)
+    from traitsui.api import (View, Item, HGroup, VGroup, spring,
+                              CheckListEditor, EnumEditor, Handler)
+    from traitsui.menu import NoButtons
+    from tvtk.pyface.scene_editor import SceneEditor
+except:
+    from ..utils import trait_wraith
+    HasTraits = HasPrivateTraits = Handler = object
+    cached_property = MayaviScene = MlabSceneModel = Bool = Button = \
+        DelegatesTo = Enum = File = Instance = Int = List = Property = \
+        Str = Array = spring = View = Item = HGroup = VGroup = EnumEditor = \
+        NoButtons = CheckListEditor = SceneEditor = trait_wraith
+
+from ..io.kit.kit import RawKIT, KIT
+from ..transforms import (apply_trans, als_ras_trans, als_ras_trans_mm,
+                          get_ras_to_neuromag_trans, Transform)
+from ..coreg import _decimate_points, fit_matched_points
+from ._marker_gui import CombineMarkersPanel, CombineMarkersModel
+from ._viewer import (HeadViewController, headview_item, PointObject,
+                      _testing_mode)
+
+
+use_editor = CheckListEditor(cols=5, values=[(i, str(i)) for i in range(5)])
+backend_is_wx = False  # is there a way to determine this?
+if backend_is_wx:
+    # wx backend allows labels for wildcards
+    hsp_points_wildcard = ['Head Shape Points (*.txt)|*.txt']
+    hsp_fid_wildcard = ['Head Shape Fiducials (*.txt)|*.txt']
+    kit_con_wildcard = ['Continuous KIT Files (*.sqd;*.con)|*.sqd;*.con']
+else:
+    hsp_points_wildcard = ['*.txt']
+    hsp_fid_wildcard = ['*.txt']
+    kit_con_wildcard = ['*.sqd;*.con']
+
+
+class Kit2FiffModel(HasPrivateTraits):
+    """Data Model for Kit2Fiff conversion
+
+     - Markers are transformed into RAS coordinate system (as are the sensor
+       coordinates).
+     - Head shape digitizer data is transformed into neuromag-like space.
+
+    """
+    # Input Traits
+    markers = Instance(CombineMarkersModel, ())
+    sqd_file = File(exists=True, filter=kit_con_wildcard)
+    hsp_file = File(exists=True, filter=hsp_points_wildcard, desc="Digitizer "
+                    "head shape")
+    fid_file = File(exists=True, filter=hsp_fid_wildcard, desc="Digitizer "
+                    "fiducials")
+    stim_chs = Enum(">", "<", "man")
+    stim_chs_manual = Array(int, (8,), range(168, 176))
+    stim_slope = Enum("-", "+")
+    # Marker Points
+    use_mrk = List(list(range(5)), desc="Which marker points to use for the "
+                   "device head coregistration.")
+
+    # Derived Traits
+    mrk = Property(depends_on=('markers.mrk3.points'))
+
+    # Polhemus Fiducials
+    elp_raw = Property(depends_on=['fid_file'])
+    hsp_raw = Property(depends_on=['hsp_file'])
+    polhemus_neuromag_trans = Property(depends_on=['elp_raw'])
+
+    # Polhemus data (in neuromag space)
+    elp = Property(depends_on=['elp_raw', 'polhemus_neuromag_trans'])
+    fid = Property(depends_on=['elp_raw', 'polhemus_neuromag_trans'])
+    hsp = Property(depends_on=['hsp_raw', 'polhemus_neuromag_trans'])
+
+    # trans
+    dev_head_trans = Property(depends_on=['elp', 'mrk', 'use_mrk'])
+    head_dev_trans = Property(depends_on=['dev_head_trans'])
+
+    # info
+    sqd_fname = Property(Str, depends_on='sqd_file')
+    hsp_fname = Property(Str, depends_on='hsp_file')
+    fid_fname = Property(Str, depends_on='fid_file')
+    can_save = Property(Bool, depends_on=['sqd_file', 'fid', 'elp', 'hsp',
+                                          'dev_head_trans'])
+
+    @cached_property
+    def _get_can_save(self):
+        "Only allow saving when either all or no head shape elements are set."
+        has_sqd = bool(self.sqd_file)
+        if not has_sqd:
+            return False
+
+        has_all_hsp = (np.any(self.dev_head_trans) and np.any(self.hsp) and
+                       np.any(self.elp) and np.any(self.fid))
+        if has_all_hsp:
+            return True
+
+        has_any_hsp = self.hsp_file or self.fid_file or np.any(self.mrk)
+        return not has_any_hsp
+
+    @cached_property
+    def _get_dev_head_trans(self):
+        if (self.mrk is None) or not np.any(self.fid):
+            return np.eye(4)
+
+        src_pts = self.mrk
+        dst_pts = self.elp
+
+        n_use = len(self.use_mrk)
+        if n_use < 3:
+            error(None, "Estimating the device head transform requires at "
+                  "least 3 marker points. Please adjust the markers used.",
+                  "Not Enough Marker Points")
+            return
+        elif n_use < 5:
+            src_pts = src_pts[self.use_mrk]
+            dst_pts = dst_pts[self.use_mrk]
+
+        trans = fit_matched_points(src_pts, dst_pts, out='trans')
+        return trans
+
+    @cached_property
+    def _get_elp(self):
+        if self.elp_raw is None:
+            return np.empty((0, 3))
+        pts = self.elp_raw[3:8]
+        pts = apply_trans(self.polhemus_neuromag_trans, pts)
+        return pts
+
+    @cached_property
+    def _get_elp_raw(self):
+        if not self.fid_file:
+            return
+
+        try:
+            pts = _read_dig_points(self.fid_file)
+            if len(pts) < 8:
+                raise ValueError("File contains %i points, need 8" % len(pts))
+        except Exception as err:
+            error(None, str(err), "Error Reading Fiducials")
+            self.reset_traits(['fid_file'])
+            raise
+        else:
+            return pts
+
+    @cached_property
+    def _get_fid(self):
+        if self.elp_raw is None:
+            return np.empty((0, 3))
+        pts = self.elp_raw[:3]
+        pts = apply_trans(self.polhemus_neuromag_trans, pts)
+        return pts
+
+    @cached_property
+    def _get_fid_fname(self):
+        if self.fid_file:
+            return os.path.basename(self.fid_file)
+        else:
+            return '-'
+
+    @cached_property
+    def _get_head_dev_trans(self):
+        return inv(self.dev_head_trans)
+
+    @cached_property
+    def _get_hsp(self):
+        if (self.hsp_raw is None) or not np.any(self.polhemus_neuromag_trans):
+            return np.empty((0, 3))
+        else:
+            pts = apply_trans(self.polhemus_neuromag_trans, self.hsp_raw)
+            return pts
+
+    @cached_property
+    def _get_hsp_fname(self):
+        if self.hsp_file:
+            return os.path.basename(self.hsp_file)
+        else:
+            return '-'
+
+    @cached_property
+    def _get_hsp_raw(self):
+        fname = self.hsp_file
+        if not fname:
+            return
+
+        try:
+            pts = _read_dig_points(fname)
+            n_pts = len(pts)
+            if n_pts > KIT.DIG_POINTS:
+                msg = ("The selected head shape contains {n_in} points, "
+                       "which is more than the recommended maximum ({n_rec}). "
+                       "The file will be automatically downsampled, which "
+                       "might take a while. A better way to downsample is "
+                       "using FastScan.")
+                msg = msg.format(n_in=n_pts, n_rec=KIT.DIG_POINTS)
+                information(None, msg, "Too Many Head Shape Points")
+                pts = _decimate_points(pts, 5)
+
+        except Exception as err:
+            error(None, str(err), "Error Reading Head Shape")
+            self.reset_traits(['hsp_file'])
+            raise
+        else:
+            return pts
+
+    @cached_property
+    def _get_mrk(self):
+        return apply_trans(als_ras_trans, self.markers.mrk3.points)
+
+    @cached_property
+    def _get_polhemus_neuromag_trans(self):
+        if self.elp_raw is None:
+            return
+        pts = apply_trans(als_ras_trans_mm, self.elp_raw[:3])
+        nasion, lpa, rpa = pts
+        trans = get_ras_to_neuromag_trans(nasion, lpa, rpa)
+        trans = np.dot(trans, als_ras_trans_mm)
+        return trans
+
+    @cached_property
+    def _get_sqd_fname(self):
+        if self.sqd_file:
+            return os.path.basename(self.sqd_file)
+        else:
+            return '-'
+
+    def clear_all(self):
+        """Clear all specified input parameters"""
+        self.markers.clear = True
+        self.reset_traits(['sqd_file', 'hsp_file', 'fid_file', 'use_mrk'])
+
+    def get_event_info(self):
+        """
+        Return a string with the number of events found for each trigger value
+        """
+        if len(self.events) == 0:
+            return "No events found."
+
+        count = ["Events found:"]
+        events = np.array(self.events)
+        for i in np.unique(events):
+            n = np.sum(events == i)
+            count.append('%3i: %i' % (i, n))
+
+        return os.linesep.join(count)
+
+    def get_raw(self, preload=False):
+        """Create a raw object based on the current model settings
+        """
+        if not self.sqd_file:
+            raise ValueError("sqd file not set")
+
+        if self.stim_chs == 'man':
+            stim = self.stim_chs_manual
+        else:
+            stim = self.stim_chs
+
+        raw = RawKIT(self.sqd_file, preload=preload, stim=stim,
+                     slope=self.stim_slope)
+
+        if np.any(self.fid):
+            raw.info['dig'] = _make_dig_points(self.fid[0], self.fid[1],
+                                               self.fid[2], self.elp,
+                                               self.hsp)
+            raw.info['dev_head_t'] = Transform('meg', 'head',
+                                               self.dev_head_trans)
+        return raw
+
+
+class Kit2FiffFrameHandler(Handler):
+    """Handler that checks for unfinished processes before closing its window
+    """
+    def close(self, info, is_ok):
+        if info.object.kit2fiff_panel.queue.unfinished_tasks:
+            msg = ("Can not close the window while saving is still in "
+                   "progress. Please wait until all files are processed.")
+            title = "Saving Still in Progress"
+            information(None, msg, title)
+            return False
+        else:
+            return True
+
+
+class Kit2FiffPanel(HasPrivateTraits):
+    """Control panel for kit2fiff conversion"""
+    model = Instance(Kit2FiffModel)
+
+    # model copies for view
+    use_mrk = DelegatesTo('model')
+    sqd_file = DelegatesTo('model')
+    hsp_file = DelegatesTo('model')
+    fid_file = DelegatesTo('model')
+    stim_chs = DelegatesTo('model')
+    stim_chs_manual = DelegatesTo('model')
+    stim_slope = DelegatesTo('model')
+
+    # info
+    can_save = DelegatesTo('model')
+    sqd_fname = DelegatesTo('model')
+    hsp_fname = DelegatesTo('model')
+    fid_fname = DelegatesTo('model')
+
+    # Source Files
+    reset_dig = Button
+
+    # Visualization
+    scene = Instance(MlabSceneModel)
+    fid_obj = Instance(PointObject)
+    elp_obj = Instance(PointObject)
+    hsp_obj = Instance(PointObject)
+
+    # Output
+    save_as = Button(label='Save FIFF...')
+    clear_all = Button(label='Clear All')
+    queue = Instance(queue.Queue, ())
+    queue_feedback = Str('')
+    queue_current = Str('')
+    queue_len = Int(0)
+    queue_len_str = Property(Str, depends_on=['queue_len'])
+    error = Str('')
+
+    view = View(
+        VGroup(VGroup(Item('sqd_file', label="Data"),
+                      Item('sqd_fname', show_label=False,
+                           style='readonly'),
+                      Item('hsp_file', label='Dig Head Shape'),
+                      Item('hsp_fname', show_label=False,
+                           style='readonly'),
+                      Item('fid_file', label='Dig Points'),
+                      Item('fid_fname', show_label=False,
+                           style='readonly'),
+                      Item('reset_dig', label='Clear Digitizer Files',
+                           show_label=False),
+                      Item('use_mrk', editor=use_editor,
+                           style='custom'),
+                      label="Sources", show_border=True),
+               VGroup(Item('stim_slope', label="Event Onset",
+                           style='custom',
+                           editor=EnumEditor(
+                               values={'+': '2:Peak (0 to 5 V)',
+                                       '-': '1:Trough (5 to 0 V)'},
+                               cols=2),
+                           help="Whether events are marked by a decrease "
+                           "(trough) or an increase (peak) in trigger "
+                           "channel values"),
+                      Item('stim_chs', label="Binary Coding",
+                           style='custom',
+                           editor=EnumEditor(values={'>': '1:1 ... 128',
+                                                     '<': '3:128 ... 1',
+                                                     'man': '2:Manual'},
+                                             cols=2),
+                           help="Specifies the bit order in event "
+                           "channels. Assign the first bit (1) to the "
+                           "first or the last trigger channel."),
+                      Item('stim_chs_manual', label='Stim Channels',
+                           style='custom',
+                           visible_when="stim_chs == 'man'"),
+                      label='Events', show_border=True),
+               HGroup(Item('save_as', enabled_when='can_save'), spring,
+                      'clear_all', show_labels=False),
+               Item('queue_feedback', show_label=False, style='readonly'),
+               Item('queue_current', show_label=False, style='readonly'),
+               Item('queue_len_str', show_label=False, style='readonly')
+               )
+    )
+
+    def __init__(self, *args, **kwargs):
+        super(Kit2FiffPanel, self).__init__(*args, **kwargs)
+
+        # setup save worker
+        def worker():
+            while True:
+                raw, fname = self.queue.get()
+                basename = os.path.basename(fname)
+                self.queue_len -= 1
+                self.queue_current = 'Processing: %s' % basename
+
+                # task
+                try:
+                    raw.save(fname, overwrite=True)
+                except Exception as err:
+                    self.error = str(err)
+                    res = "Error saving: %s"
+                else:
+                    res = "Saved: %s"
+
+                # finalize
+                self.queue_current = ''
+                self.queue_feedback = res % basename
+                self.queue.task_done()
+
+        t = Thread(target=worker)
+        t.daemon = True
+        t.start()
+
+        # setup mayavi visualization
+        m = self.model
+        self.fid_obj = PointObject(scene=self.scene, color=(25, 225, 25),
+                                   point_scale=5e-3)
+        self.elp_obj = PointObject(scene=self.scene, color=(50, 50, 220),
+                                   point_scale=1e-2, opacity=.2)
+        self.hsp_obj = PointObject(scene=self.scene, color=(200, 200, 200),
+                                   point_scale=2e-3)
+        if not _testing_mode():
+            for name, obj in zip(['fid', 'elp', 'hsp'],
+                                 [self.fid_obj, self.elp_obj, self.hsp_obj]):
+                m.sync_trait(name, obj, 'points', mutual=False)
+                m.sync_trait('head_dev_trans', obj, 'trans', mutual=False)
+            self.scene.camera.parallel_scale = 0.15
+            self.scene.mlab.view(0, 0, .15)
+
+    def _clear_all_fired(self):
+        self.model.clear_all()
+
+    @cached_property
+    def _get_queue_len_str(self):
+        if self.queue_len:
+            return "Queue length: %i" % self.queue_len
+        else:
+            return ''
+
+    def _reset_dig_fired(self):
+        self.reset_traits(['hsp_file', 'fid_file'])
+
+    def _save_as_fired(self):
+        # create raw
+        try:
+            raw = self.model.get_raw()
+        except Exception as err:
+            error(None, str(err), "Error Creating KIT Raw")
+            raise
+
+        # find default path
+        stem, _ = os.path.splitext(self.sqd_file)
+        if not stem.endswith('raw'):
+            stem += '-raw'
+        default_path = stem + '.fif'
+
+        # save as dialog
+        dlg = FileDialog(action="save as",
+                         wildcard="fiff raw file (*.fif)|*.fif",
+                         default_path=default_path)
+        dlg.open()
+        if dlg.return_code != OK:
+            return
+
+        fname = dlg.path
+        if not fname.endswith('.fif'):
+            fname += '.fif'
+            if os.path.exists(fname):
+                answer = confirm(None, "The file %r already exists. Should it "
+                                 "be replaced?", "Overwrite File?")
+                if answer != YES:
+                    return
+
+        self.queue.put((raw, fname))
+        self.queue_len += 1
+
+
+class Kit2FiffFrame(HasTraits):
+    """GUI for interpolating between two KIT marker files"""
+    model = Instance(Kit2FiffModel, ())
+    scene = Instance(MlabSceneModel, ())
+    headview = Instance(HeadViewController)
+    marker_panel = Instance(CombineMarkersPanel)
+    kit2fiff_panel = Instance(Kit2FiffPanel)
+
+    view = View(HGroup(VGroup(Item('marker_panel', style='custom'),
+                              show_labels=False),
+                       VGroup(Item('scene',
+                                   editor=SceneEditor(scene_class=MayaviScene),
+                                   dock='vertical', show_label=False),
+                              VGroup(headview_item, show_labels=False),
+                              ),
+                       VGroup(Item('kit2fiff_panel', style='custom'),
+                              show_labels=False),
+                       show_labels=False,
+                       ),
+                handler=Kit2FiffFrameHandler(),
+                height=700, resizable=True, buttons=NoButtons)
+
+    def _headview_default(self):
+        return HeadViewController(scene=self.scene, scale=160, system='RAS')
+
+    def _kit2fiff_panel_default(self):
+        return Kit2FiffPanel(scene=self.scene, model=self.model)
+
+    def _marker_panel_default(self):
+        return CombineMarkersPanel(scene=self.scene, model=self.model.markers,
+                                   trans=als_ras_trans)
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/gui/_marker_gui.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/gui/_marker_gui.py
new file mode 100644
index 0000000..835a206
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/gui/_marker_gui.py
@@ -0,0 +1,435 @@
+"""Mayavi/traits GUI for averaging two sets of KIT marker points"""
+
+# Authors: Christian Brodbeck <christianbrodbeck at nyu.edu>
+#
+# License: BSD (3-clause)
+
+import os
+
+import numpy as np
+
+# allow import without traits
+try:
+    from mayavi.core.ui.mayavi_scene import MayaviScene
+    from mayavi.tools.mlab_scene_model import MlabSceneModel
+    from pyface.api import confirm, error, FileDialog, OK, YES
+    from traits.api import (HasTraits, HasPrivateTraits, on_trait_change,
+                            cached_property, Instance, Property, Array, Bool,
+                            Button, Enum, File, Float, List, Str)
+    from traitsui.api import View, Item, HGroup, VGroup, CheckListEditor
+    from traitsui.menu import NoButtons
+    from tvtk.pyface.scene_editor import SceneEditor
+except:
+    from ..utils import trait_wraith
+    HasTraits = HasPrivateTraits = object
+    cached_property = on_trait_change = MayaviScene = MlabSceneModel = \
+        Array = Bool = Button = Enum = File = Float = Instance = Int = \
+        List = Property = Str = View = Item = HGroup = VGroup = \
+        CheckListEditor = NoButtons = SceneEditor = trait_wraith
+
+from ..transforms import apply_trans, rotation, translation
+from ..coreg import fit_matched_points
+from ..io.kit import read_mrk
+from ..io.meas_info import _write_dig_points
+from ._viewer import HeadViewController, headview_borders, PointObject
+
+
+backend_is_wx = False  # is there a way to determine this?
+if backend_is_wx:
+    mrk_wildcard = ['Supported Files (*.sqd, *.mrk, *.txt, *.pickled)|'
+                    '*.sqd;*.mrk;*.txt;*.pickled',
+                    'Sqd marker file (*.sqd;*.mrk)|*.sqd;*.mrk',
+                    'Text marker file (*.txt)|*.txt',
+                    'Pickled markers (*.pickled)|*.pickled']
+    mrk_out_wildcard = ["Tab separated values file (*.txt)|*.txt"]
+else:
+    mrk_wildcard = ["*.sqd;*.mrk;*.txt;*.pickled"]
+    mrk_out_wildcard = "*.txt"
+out_ext = '.txt'
+
+
+use_editor_v = CheckListEditor(cols=1, values=[(i, str(i)) for i in range(5)])
+use_editor_h = CheckListEditor(cols=5, values=[(i, str(i)) for i in range(5)])
+
+mrk_view_editable = View(
+    VGroup('file',
+           Item('name', show_label=False, style='readonly'),
+           HGroup(
+               Item('use', editor=use_editor_v, enabled_when="enabled",
+                    style='custom'),
+               'points',
+           ),
+           HGroup(Item('clear', enabled_when="can_save", show_label=False),
+                  Item('save_as', enabled_when="can_save",
+                       show_label=False)),
+           ))
+
+mrk_view_basic = View(
+    VGroup('file',
+           Item('name', show_label=False, style='readonly'),
+           Item('use', editor=use_editor_h, enabled_when="enabled",
+                style='custom'),
+           HGroup(Item('clear', enabled_when="can_save", show_label=False),
+                  Item('edit', show_label=False),
+                  Item('save_as', enabled_when="can_save",
+                       show_label=False)),
+           ))
+
+mrk_view_edit = View(VGroup('points'))
+
+
+class MarkerPoints(HasPrivateTraits):
+    """Represent 5 marker points"""
+    points = Array(float, (5, 3))
+
+    can_save = Property(depends_on='points')
+    save_as = Button()
+
+    view = View(VGroup('points',
+                       Item('save_as', enabled_when='can_save')))
+
+    @cached_property
+    def _get_can_save(self):
+        return np.any(self.points)
+
+    def _save_as_fired(self):
+        dlg = FileDialog(action="save as", wildcard=mrk_out_wildcard,
+                         default_filename=self.name,
+                         default_directory=self.dir)
+        dlg.open()
+        if dlg.return_code != OK:
+            return
+
+        path, ext = os.path.splitext(dlg.path)
+        if not path.endswith(out_ext) and len(ext) != 0:
+            ValueError("The extension '%s' is not supported." % ext)
+        path = path + out_ext
+
+        if os.path.exists(path):
+            answer = confirm(None, "The file %r already exists. Should it "
+                             "be replaced?", "Overwrite File?")
+            if answer != YES:
+                return
+        self.save(path)
+
+    def save(self, path):
+        """Save the marker points
+
+        Parameters
+        ----------
+        path : str
+            Path to the file to write. The kind of file to write is determined
+            based on the extension: '.txt' for tab separated text file,
+            '.pickled' for pickled file.
+        """
+        _write_dig_points(path, self.points)
+
+
+class MarkerPointSource(MarkerPoints):
+    """MarkerPoints subclass for source files"""
+    file = File(filter=mrk_wildcard, exists=True)
+    name = Property(Str, depends_on='file')
+    dir = Property(Str, depends_on='file')
+
+    use = List(list(range(5)), desc="Which points to use for the interpolated "
+               "marker.")
+    enabled = Property(Bool, depends_on=['points', 'use'])
+    clear = Button(desc="Clear the current marker data")
+    edit = Button(desc="Edit the marker coordinates manually")
+
+    view = mrk_view_basic
+
+    @cached_property
+    def _get_enabled(self):
+        return np.any(self.points)
+
+    @cached_property
+    def _get_dir(self):
+        if self.file:
+            return os.path.dirname(self.file)
+
+    @cached_property
+    def _get_name(self):
+        if self.file:
+            return os.path.basename(self.file)
+
+    @on_trait_change('file')
+    def load(self, fname):
+        if not fname:
+            self.reset_traits(['points'])
+            return
+
+        try:
+            pts = read_mrk(fname)
+        except Exception as err:
+            error(None, str(err), "Error Reading mrk")
+            self.reset_traits(['points'])
+        else:
+            self.points = pts
+
+    def _clear_fired(self):
+        self.reset_traits(['file', 'points', 'use'])
+
+    def _edit_fired(self):
+        self.edit_traits(view=mrk_view_edit)
+
+
+class MarkerPointDest(MarkerPoints):
+    """MarkerPoints subclass that serves for derived points"""
+    src1 = Instance(MarkerPointSource)
+    src2 = Instance(MarkerPointSource)
+
+    name = Property(Str, depends_on='src1.name,src2.name')
+    dir = Property(Str, depends_on='src1.dir,src2.dir')
+
+    points = Property(Array(float, (5, 3)),
+                      depends_on=['method', 'src1.points', 'src1.use',
+                                  'src2.points', 'src2.use'])
+    enabled = Property(Bool, depends_on=['points'])
+
+    method = Enum('Transform', 'Average', desc="Transform: estimate a rotation"
+                  "/translation from mrk1 to mrk2; Average: use the average "
+                  "of the mrk1 and mrk2 coordinates for each point.")
+
+    view = View(VGroup(Item('method', style='custom'),
+                       Item('save_as', enabled_when='can_save',
+                            show_label=False)))
+
+    @cached_property
+    def _get_dir(self):
+        return self.src1.dir
+
+    @cached_property
+    def _get_name(self):
+        n1 = self.src1.name
+        n2 = self.src2.name
+
+        if not n1:
+            if n2:
+                return n2
+            else:
+                return ''
+        elif not n2:
+            return n1
+
+        if n1 == n2:
+            return n1
+
+        i = 0
+        l1 = len(n1) - 1
+        l2 = len(n1) - 2
+        while n1[i] == n2[i]:
+            if i == l1:
+                return n1
+            elif i == l2:
+                return n2
+
+            i += 1
+
+        return n1[:i]
+
+    @cached_property
+    def _get_enabled(self):
+        return np.any(self.points)
+
+    @cached_property
+    def _get_points(self):
+        # in case only one or no source is enabled
+        if not (self.src1 and self.src1.enabled):
+            if (self.src2 and self.src2.enabled):
+                return self.src2.points
+            else:
+                return np.zeros((5, 3))
+        elif not (self.src2 and self.src2.enabled):
+            return self.src1.points
+
+        # Average method
+        if self.method == 'Average':
+            if len(np.union1d(self.src1.use, self.src2.use)) < 5:
+                error(None, "Need at least one source for each point.",
+                      "Marker Average Error")
+                return np.zeros((5, 3))
+
+            pts = (self.src1.points + self.src2.points) / 2.
+            for i in np.setdiff1d(self.src1.use, self.src2.use):
+                pts[i] = self.src1.points[i]
+            for i in np.setdiff1d(self.src2.use, self.src1.use):
+                pts[i] = self.src2.points[i]
+
+            return pts
+
+        # Transform method
+        idx = np.intersect1d(self.src1.use, self.src2.use, assume_unique=True)
+        if len(idx) < 3:
+            error(None, "Need at least three shared points for trans"
+                  "formation.", "Marker Interpolation Error")
+            return np.zeros((5, 3))
+
+        src_pts = self.src1.points[idx]
+        tgt_pts = self.src2.points[idx]
+        est = fit_matched_points(src_pts, tgt_pts, out='params')
+        rot = np.array(est[:3]) / 2.
+        tra = np.array(est[3:]) / 2.
+
+        if len(self.src1.use) == 5:
+            trans = np.dot(translation(*tra), rotation(*rot))
+            pts = apply_trans(trans, self.src1.points)
+        elif len(self.src2.use) == 5:
+            trans = np.dot(translation(* -tra), rotation(* -rot))
+            pts = apply_trans(trans, self.src2.points)
+        else:
+            trans1 = np.dot(translation(*tra), rotation(*rot))
+            pts = apply_trans(trans1, self.src1.points)
+            trans2 = np.dot(translation(* -tra), rotation(* -rot))
+            for i in np.setdiff1d(self.src2.use, self.src1.use):
+                pts[i] = apply_trans(trans2, self.src2.points[i])
+
+        return pts
+
+
+class CombineMarkersModel(HasPrivateTraits):
+    mrk1_file = Instance(File)
+    mrk2_file = Instance(File)
+    mrk1 = Instance(MarkerPointSource)
+    mrk2 = Instance(MarkerPointSource)
+    mrk3 = Instance(MarkerPointDest)
+
+    clear = Button(desc="Clear the current marker data")
+
+    # stats
+    distance = Property(Str, depends_on=['mrk1.points', 'mrk2.points'])
+
+    def _clear_fired(self):
+        self.mrk1.clear = True
+        self.mrk2.clear = True
+        self.mrk3.reset_traits(['method'])
+
+    def _mrk1_default(self):
+        mrk = MarkerPointSource()
+        return mrk
+
+    def _mrk1_file_default(self):
+        return self.mrk1.trait('file')
+
+    def _mrk2_default(self):
+        mrk = MarkerPointSource()
+        return mrk
+
+    def _mrk2_file_default(self):
+        return self.mrk2.trait('file')
+
+    def _mrk3_default(self):
+        mrk = MarkerPointDest(src1=self.mrk1, src2=self.mrk2)
+        return mrk
+
+    @cached_property
+    def _get_distance(self):
+        if (self.mrk1 is None or self.mrk2 is None or
+                (not np.any(self.mrk1.points)) or
+                (not np.any(self.mrk2.points))):
+            return ""
+
+        ds = np.sqrt(np.sum((self.mrk1.points - self.mrk2.points) ** 2, 1))
+        desc = '\t'.join('%.1f mm' % (d * 1000) for d in ds)
+        return desc
+
+
+class CombineMarkersPanel(HasTraits):
+    """Has two marker points sources and interpolates to a third one"""
+    model = Instance(CombineMarkersModel, ())
+
+    # model references for UI
+    mrk1 = Instance(MarkerPointSource)
+    mrk2 = Instance(MarkerPointSource)
+    mrk3 = Instance(MarkerPointDest)
+    distance = Str
+
+    # Visualization
+    scene = Instance(MlabSceneModel)
+    scale = Float(5e-3)
+    mrk1_obj = Instance(PointObject)
+    mrk2_obj = Instance(PointObject)
+    mrk3_obj = Instance(PointObject)
+    trans = Array()
+
+    view = View(VGroup(VGroup(Item('mrk1', style='custom'),
+                              Item('mrk1_obj', style='custom'),
+                              show_labels=False,
+                              label="Source Marker 1", show_border=True),
+                       VGroup(Item('mrk2', style='custom'),
+                              Item('mrk2_obj', style='custom'),
+                              show_labels=False,
+                              label="Source Marker 2", show_border=True),
+                       VGroup(Item('distance', style='readonly'),
+                              label='Stats', show_border=True),
+                       VGroup(Item('mrk3', style='custom'),
+                              Item('mrk3_obj', style='custom'),
+                              show_labels=False,
+                              label="New Marker", show_border=True),
+                       ))
+
+    def _mrk1_default(self):
+        return self.model.mrk1
+
+    def _mrk2_default(self):
+        return self.model.mrk2
+
+    def _mrk3_default(self):
+        return self.model.mrk3
+
+    def __init__(self, *args, **kwargs):
+        super(CombineMarkersPanel, self).__init__(*args, **kwargs)
+
+        m = self.model
+        m.sync_trait('distance', self, 'distance', mutual=False)
+
+        self.mrk1_obj = PointObject(scene=self.scene, color=(155, 55, 55),
+                                    point_scale=self.scale)
+        self.sync_trait('trans', self.mrk1_obj, mutual=False)
+        m.mrk1.sync_trait('points', self.mrk1_obj, 'points', mutual=False)
+        m.mrk1.sync_trait('enabled', self.mrk1_obj, 'visible',
+                          mutual=False)
+
+        self.mrk2_obj = PointObject(scene=self.scene, color=(55, 155, 55),
+                                    point_scale=self.scale)
+        self.sync_trait('trans', self.mrk2_obj, mutual=False)
+        m.mrk2.sync_trait('points', self.mrk2_obj, 'points', mutual=False)
+        m.mrk2.sync_trait('enabled', self.mrk2_obj, 'visible',
+                          mutual=False)
+
+        self.mrk3_obj = PointObject(scene=self.scene, color=(150, 200, 255),
+                                    point_scale=self.scale)
+        self.sync_trait('trans', self.mrk3_obj, mutual=False)
+        m.mrk3.sync_trait('points', self.mrk3_obj, 'points', mutual=False)
+        m.mrk3.sync_trait('enabled', self.mrk3_obj, 'visible', mutual=False)
+
+
+class CombineMarkersFrame(HasTraits):
+    """GUI for interpolating between two KIT marker files
+
+    Parameters
+    ----------
+    mrk1, mrk2 : str
+        Path to pre- and post measurement marker files (*.sqd) or empty string.
+    """
+    model = Instance(CombineMarkersModel, ())
+    scene = Instance(MlabSceneModel, ())
+    headview = Instance(HeadViewController)
+    panel = Instance(CombineMarkersPanel)
+
+    def _headview_default(self):
+        return HeadViewController(scene=self.scene, system='ALS')
+
+    def _panel_default(self):
+        return CombineMarkersPanel(model=self.model, scene=self.scene)
+
+    view = View(HGroup(Item('scene',
+                            editor=SceneEditor(scene_class=MayaviScene),
+                            dock='vertical'),
+                       VGroup(headview_borders,
+                              Item('panel', style="custom"),
+                              show_labels=False),
+                       show_labels=False,
+                       ),
+                width=1100, resizable=True,
+                buttons=NoButtons)
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/gui/_viewer.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/gui/_viewer.py
new file mode 100644
index 0000000..f90a219
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/gui/_viewer.py
@@ -0,0 +1,331 @@
+"""Mayavi/traits GUI visualization elements"""
+
+# Authors: Christian Brodbeck <christianbrodbeck at nyu.edu>
+#
+# License: BSD (3-clause)
+
+import os
+import numpy as np
+
+# allow import without traits
+try:
+    from mayavi.mlab import pipeline, text3d
+    from mayavi.modules.glyph import Glyph
+    from mayavi.modules.surface import Surface
+    from mayavi.sources.vtk_data_source import VTKDataSource
+    from mayavi.tools.mlab_scene_model import MlabSceneModel
+    from pyface.api import error
+    from traits.api import (HasTraits, HasPrivateTraits, on_trait_change,
+                            cached_property, Instance, Property, Array, Bool,
+                            Button, Color, Enum, Float, Int, List, Range, Str)
+    from traitsui.api import View, Item, Group, HGroup, VGrid, VGroup
+except:
+    from ..utils import trait_wraith
+    HasTraits = HasPrivateTraits = object
+    cached_property = on_trait_change = MlabSceneModel = Array = Bool = \
+        Button = Color = Enum = Float = Instance = Int = List = Property = \
+        Range = Str = View = Item = Group = HGroup = VGrid = VGroup = \
+        Glyph = Surface = VTKDataSource = trait_wraith
+
+from ..transforms import apply_trans
+
+
+headview_item = Item('headview', style='custom', show_label=False)
+headview_borders = VGroup(Item('headview', style='custom', show_label=False),
+                          show_border=True, label='View')
+defaults = {'mri_fid_scale': 1e-2, 'hsp_fid_scale': 3e-2,
+            'hsp_fid_opacity': 0.3, 'hsp_points_scale': 4e-3,
+            'mri_color': (252, 227, 191), 'hsp_point_color': (255, 255, 255),
+            'lpa_color': (255, 0, 0), 'nasion_color': (0, 255, 0),
+            'rpa_color': (0, 0, 255)}
+
+
+def _testing_mode():
+    """Helper to determine if we're running tests"""
+    return (os.getenv('_MNE_GUI_TESTING_MODE', '') == 'true')
+
+
+class HeadViewController(HasTraits):
+    """
+    Set head views for Anterior-Left-Superior coordinate system
+
+    Parameters
+    ----------
+    system : 'RAS' | 'ALS' | 'ARI'
+        Coordinate system described as initials for directions associated with
+        the x, y, and z axes. Relevant terms are: Anterior, Right, Left,
+        Superior, Inferior.
+    """
+    system = Enum("RAS", "ALS", "ARI", desc="Coordinate system: directions of "
+                  "the x, y, and z axis.")
+
+    right = Button()
+    front = Button()
+    left = Button()
+    top = Button()
+
+    scale = Float(0.16)
+
+    scene = Instance(MlabSceneModel)
+
+    view = View(VGrid('0', 'top', '0', Item('scale', label='Scale',
+                                            show_label=True),
+                      'right', 'front', 'left', show_labels=False, columns=4))
+
+    @on_trait_change('scene.activated')
+    def _init_view(self):
+        self.scene.parallel_projection = True
+
+        # apparently scene,activated happens several times
+        if self.scene.renderer:
+            self.sync_trait('scale', self.scene.camera, 'parallel_scale')
+            # and apparently this does not happen by default:
+            self.on_trait_change(self.scene.render, 'scale')
+
+    @on_trait_change('top,left,right,front')
+    def on_set_view(self, view, _):
+        if self.scene is None:
+            return
+
+        system = self.system
+        kwargs = None
+
+        if system == 'ALS':
+            if view == 'front':
+                kwargs = dict(azimuth=0, elevation=90, roll=-90)
+            elif view == 'left':
+                kwargs = dict(azimuth=90, elevation=90, roll=180)
+            elif view == 'right':
+                kwargs = dict(azimuth=-90, elevation=90, roll=0)
+            elif view == 'top':
+                kwargs = dict(azimuth=0, elevation=0, roll=-90)
+        elif system == 'RAS':
+            if view == 'front':
+                kwargs = dict(azimuth=90, elevation=90, roll=180)
+            elif view == 'left':
+                kwargs = dict(azimuth=180, elevation=90, roll=90)
+            elif view == 'right':
+                kwargs = dict(azimuth=0, elevation=90, roll=270)
+            elif view == 'top':
+                kwargs = dict(azimuth=90, elevation=0, roll=180)
+        elif system == 'ARI':
+            if view == 'front':
+                kwargs = dict(azimuth=0, elevation=90, roll=90)
+            elif view == 'left':
+                kwargs = dict(azimuth=-90, elevation=90, roll=180)
+            elif view == 'right':
+                kwargs = dict(azimuth=90, elevation=90, roll=0)
+            elif view == 'top':
+                kwargs = dict(azimuth=0, elevation=180, roll=90)
+        else:
+            raise ValueError("Invalid system: %r" % system)
+
+        if kwargs is None:
+            raise ValueError("Invalid view: %r" % view)
+
+        if not _testing_mode():
+            self.scene.mlab.view(distance=None, reset_roll=True,
+                                 figure=self.scene.mayavi_scene, **kwargs)
+
+
+class Object(HasPrivateTraits):
+    """Represents a 3d object in a mayavi scene"""
+    points = Array(float, shape=(None, 3))
+    trans = Array()
+    name = Str
+
+    scene = Instance(MlabSceneModel, ())
+    src = Instance(VTKDataSource)
+
+    color = Color()
+    rgbcolor = Property(depends_on='color')
+    point_scale = Float(10, label='Point Scale')
+    opacity = Range(low=0., high=1., value=1.)
+    visible = Bool(True)
+
+    @cached_property
+    def _get_rgbcolor(self):
+        if hasattr(self.color, 'Get'):  # wx
+            color = tuple(v / 255. for v in self.color.Get())
+        else:
+            color = self.color.getRgbF()[:3]
+        return color
+
+    @on_trait_change('trans,points')
+    def _update_points(self):
+        """Update the location of the plotted points"""
+        if not hasattr(self.src, 'data'):
+            return
+
+        trans = self.trans
+        if np.any(trans):
+            if trans.ndim == 0 or trans.shape == (3,) or trans.shape == (1, 3):
+                pts = self.points * trans
+            elif trans.shape == (3, 3):
+                pts = np.dot(self.points, trans.T)
+            elif trans.shape == (4, 4):
+                pts = apply_trans(trans, self.points)
+            else:
+                err = ("trans must be a scalar, a length 3 sequence, or an "
+                       "array of shape (1,3), (3, 3) or (4, 4). "
+                       "Got %s" % str(trans))
+                error(None, err, "Display Error")
+                raise ValueError(err)
+        else:
+            pts = self.points
+
+        self.src.data.points = pts
+
+
+class PointObject(Object):
+    """Represents a group of individual points in a mayavi scene"""
+    label = Bool(False, enabled_when='visible')
+    text3d = List
+
+    glyph = Instance(Glyph)
+    resolution = Int(8)
+
+    def __init__(self, view='points', *args, **kwargs):
+        """
+        Parameters
+        ----------
+        view : 'points' | 'cloud'
+            Whether the view options should be tailored to individual points
+            or a point cloud.
+        """
+        self._view = view
+        super(PointObject, self).__init__(*args, **kwargs)
+
+    def default_traits_view(self):
+        color = Item('color', show_label=False)
+        scale = Item('point_scale', label='Size')
+        if self._view == 'points':
+            visible = Item('visible', label='Show', show_label=True)
+            view = View(HGroup(visible, color, scale, 'label'))
+        elif self._view == 'cloud':
+            visible = Item('visible', show_label=False)
+            view = View(HGroup(visible, color, scale))
+        else:
+            raise ValueError("PointObject(view = %r)" % self._view)
+        return view
+
+    @on_trait_change('label')
+    def _show_labels(self, show):
+        self.scene.disable_render = True
+        while self.text3d:
+            text = self.text3d.pop()
+            text.remove()
+
+        if show:
+            fig = self.scene.mayavi_scene
+            for i, pt in enumerate(np.array(self.src.data.points)):
+                x, y, z = pt
+                t = text3d(x, y, z, ' %i' % i, scale=.01, color=self.rgbcolor,
+                           figure=fig)
+                self.text3d.append(t)
+
+        self.scene.disable_render = False
+
+    @on_trait_change('visible')
+    def _on_hide(self):
+        if not self.visible:
+            self.label = False
+
+    @on_trait_change('scene.activated')
+    def _plot_points(self):
+        """Add the points to the mayavi pipeline"""
+#         _scale = self.scene.camera.parallel_scale
+
+        if hasattr(self.glyph, 'remove'):
+            self.glyph.remove()
+        if hasattr(self.src, 'remove'):
+            self.src.remove()
+
+        if not _testing_mode():
+            fig = self.scene.mayavi_scene
+        else:
+            fig = None
+
+        x, y, z = self.points.T
+        scatter = pipeline.scalar_scatter(x, y, z)
+        glyph = pipeline.glyph(scatter, color=self.rgbcolor, figure=fig,
+                               scale_factor=self.point_scale, opacity=1.,
+                               resolution=self.resolution)
+        self.src = scatter
+        self.glyph = glyph
+
+        self.sync_trait('point_scale', self.glyph.glyph.glyph, 'scale_factor')
+        self.sync_trait('rgbcolor', self.glyph.actor.property, 'color',
+                        mutual=False)
+        self.sync_trait('visible', self.glyph)
+        self.sync_trait('opacity', self.glyph.actor.property)
+        self.on_trait_change(self._update_points, 'points')
+
+#         self.scene.camera.parallel_scale = _scale
+
+    def _resolution_changed(self, new):
+        if not self.glyph:
+            return
+
+        self.glyph.glyph.glyph_source.glyph_source.phi_resolution = new
+        self.glyph.glyph.glyph_source.glyph_source.theta_resolution = new
+
+
+class SurfaceObject(Object):
+    """Represents a solid object in a mayavi scene
+
+    Notes
+    -----
+    Doesn't automatically update plot because update requires both
+    :attr:`points` and :attr:`tri`. Call :meth:`plot` after updateing both
+    attributes.
+
+    """
+    rep = Enum("Surface", "Wireframe")
+    tri = Array(int, shape=(None, 3))
+
+    surf = Instance(Surface)
+
+    view = View(HGroup(Item('visible', show_label=False),
+                       Item('color', show_label=False), Item('opacity')))
+
+    def clear(self):
+        if hasattr(self.src, 'remove'):
+            self.src.remove()
+        if hasattr(self.surf, 'remove'):
+            self.surf.remove()
+        self.reset_traits(['src', 'surf'])
+
+    @on_trait_change('scene.activated')
+    def plot(self):
+        """Add the points to the mayavi pipeline"""
+        _scale = self.scene.camera.parallel_scale if not _testing_mode() else 1
+        self.clear()
+
+        if not np.any(self.tri):
+            return
+
+        fig = self.scene.mayavi_scene
+
+        x, y, z = self.points.T
+
+        if self.rep == 'Wireframe':
+            rep = 'wireframe'
+        else:
+            rep = 'surface'
+
+        src = pipeline.triangular_mesh_source(x, y, z, self.tri, figure=fig)
+        surf = pipeline.surface(src, figure=fig, color=self.rgbcolor,
+                                opacity=self.opacity,
+                                representation=rep, line_width=1)
+
+        self.src = src
+        self.surf = surf
+
+        self.sync_trait('visible', self.surf, 'visible')
+        self.sync_trait('rgbcolor', self.surf.actor.property, 'color',
+                        mutual=False)
+        self.sync_trait('opacity', self.surf.actor.property, 'opacity')
+
+        if not _testing_mode():
+            self.scene.camera.parallel_scale = _scale
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/gui/tests/__init__.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/gui/tests/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/gui/tests/test_coreg_gui.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/gui/tests/test_coreg_gui.py
new file mode 100644
index 0000000..a82e09d
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/gui/tests/test_coreg_gui.py
@@ -0,0 +1,187 @@
+# Author: Christian Brodbeck <christianbrodbeck at nyu.edu>
+#
+# License: BSD (3-clause)
+
+import os
+
+import numpy as np
+from numpy.testing import assert_allclose
+from nose.tools import (assert_equal, assert_almost_equal, assert_false,
+                        assert_raises, assert_true)
+import warnings
+
+import mne
+from mne.datasets import testing
+from mne.io.kit.tests import data_dir as kit_data_dir
+from mne.utils import (_TempDir, requires_traits, requires_mne,
+                       requires_freesurfer, run_tests_if_main)
+from mne.externals.six import string_types
+
+
+data_path = testing.data_path(download=False)
+raw_path = os.path.join(data_path, 'MEG', 'sample',
+                        'sample_audvis_trunc_raw.fif')
+fname_trans = os.path.join(data_path, 'MEG', 'sample',
+                           'sample_audvis_trunc-trans.fif')
+kit_raw_path = os.path.join(kit_data_dir, 'test_bin_raw.fif')
+subjects_dir = os.path.join(data_path, 'subjects')
+warnings.simplefilter('always')
+
+
+ at testing.requires_testing_data
+ at requires_traits
+def test_coreg_model():
+    """Test CoregModel"""
+    from mne.gui._coreg_gui import CoregModel
+    tempdir = _TempDir()
+    trans_dst = os.path.join(tempdir, 'test-trans.fif')
+
+    model = CoregModel()
+    assert_raises(RuntimeError, model.save_trans, 'blah.fif')
+
+    model.mri.subjects_dir = subjects_dir
+    model.mri.subject = 'sample'
+
+    assert_false(model.mri.fid_ok)
+    model.mri.lpa = [[-0.06, 0, 0]]
+    model.mri.nasion = [[0, 0.05, 0]]
+    model.mri.rpa = [[0.08, 0, 0]]
+    assert_true(model.mri.fid_ok)
+
+    model.hsp.file = raw_path
+    assert_allclose(model.hsp.lpa, [[-7.137e-2, 0, 5.122e-9]], 1e-4)
+    assert_allclose(model.hsp.rpa, [[+7.527e-2, 0, 5.588e-9]], 1e-4)
+    assert_allclose(model.hsp.nasion, [[+3.725e-9, 1.026e-1, 4.191e-9]], 1e-4)
+    assert_true(model.has_fid_data)
+
+    lpa_distance = model.lpa_distance
+    nasion_distance = model.nasion_distance
+    rpa_distance = model.rpa_distance
+    avg_point_distance = np.mean(model.point_distance)
+
+    model.fit_auricular_points()
+    old_x = lpa_distance ** 2 + rpa_distance ** 2
+    new_x = model.lpa_distance ** 2 + model.rpa_distance ** 2
+    assert_true(new_x < old_x)
+
+    model.fit_fiducials()
+    old_x = lpa_distance ** 2 + rpa_distance ** 2 + nasion_distance ** 2
+    new_x = (model.lpa_distance ** 2 + model.rpa_distance ** 2 +
+             model.nasion_distance ** 2)
+    assert_true(new_x < old_x)
+
+    model.fit_hsp_points()
+    assert_true(np.mean(model.point_distance) < avg_point_distance)
+
+    model.save_trans(trans_dst)
+    trans = mne.read_trans(trans_dst)
+    assert_allclose(trans['trans'], model.head_mri_trans)
+
+    # test restoring trans
+    x, y, z, rot_x, rot_y, rot_z = .1, .2, .05, 1.5, 0.1, -1.2
+    model.trans_x = x
+    model.trans_y = y
+    model.trans_z = z
+    model.rot_x = rot_x
+    model.rot_y = rot_y
+    model.rot_z = rot_z
+    trans = model.head_mri_trans
+    model.reset_traits(["trans_x", "trans_y", "trans_z", "rot_x", "rot_y",
+                        "rot_z"])
+    assert_equal(model.trans_x, 0)
+    model.set_trans(trans)
+    assert_almost_equal(model.trans_x, x)
+    assert_almost_equal(model.trans_y, y)
+    assert_almost_equal(model.trans_z, z)
+    assert_almost_equal(model.rot_x, rot_x)
+    assert_almost_equal(model.rot_y, rot_y)
+    assert_almost_equal(model.rot_z, rot_z)
+
+    # info
+    assert_true(isinstance(model.fid_eval_str, string_types))
+    assert_true(isinstance(model.points_eval_str, string_types))
+
+    model.get_prepare_bem_model_job('sample')
+    model.load_trans(fname_trans)
+
+    from mne.gui._coreg_gui import CoregFrame
+    x = CoregFrame(raw_path, 'sample', subjects_dir)
+    os.environ['_MNE_GUI_TESTING_MODE'] = 'true'
+    try:
+        with warnings.catch_warnings(record=True):  # traits spews warnings
+            warnings.simplefilter('always')
+            x._init_plot()
+    finally:
+        del os.environ['_MNE_GUI_TESTING_MODE']
+
+
+ at testing.requires_testing_data
+ at requires_traits
+ at requires_mne
+ at requires_freesurfer
+def test_coreg_model_with_fsaverage():
+    """Test CoregModel"""
+    tempdir = _TempDir()
+    from mne.gui._coreg_gui import CoregModel
+
+    mne.create_default_subject(subjects_dir=tempdir)
+
+    model = CoregModel()
+    model.mri.subjects_dir = tempdir
+    model.mri.subject = 'fsaverage'
+    assert_true(model.mri.fid_ok)
+
+    model.hsp.file = raw_path
+    lpa_distance = model.lpa_distance
+    nasion_distance = model.nasion_distance
+    rpa_distance = model.rpa_distance
+    avg_point_distance = np.mean(model.point_distance)
+
+    # test hsp point omission
+    model.trans_y = -0.008
+    model.fit_auricular_points()
+    model.omit_hsp_points(0.02)
+    assert_equal(model.hsp.n_omitted, 1)
+    model.omit_hsp_points(reset=True)
+    assert_equal(model.hsp.n_omitted, 0)
+    model.omit_hsp_points(0.02, reset=True)
+    assert_equal(model.hsp.n_omitted, 1)
+
+    # scale with 1 parameter
+    model.n_scale_params = 1
+
+    model.fit_scale_auricular_points()
+    old_x = lpa_distance ** 2 + rpa_distance ** 2
+    new_x = model.lpa_distance ** 2 + model.rpa_distance ** 2
+    assert_true(new_x < old_x)
+
+    model.fit_scale_fiducials()
+    old_x = lpa_distance ** 2 + rpa_distance ** 2 + nasion_distance ** 2
+    new_x = (model.lpa_distance ** 2 + model.rpa_distance ** 2 +
+             model.nasion_distance ** 2)
+    assert_true(new_x < old_x)
+
+    model.fit_scale_hsp_points()
+    avg_point_distance_1param = np.mean(model.point_distance)
+    assert_true(avg_point_distance_1param < avg_point_distance)
+
+    desc, func, args, kwargs = model.get_scaling_job('test')
+    assert_true(isinstance(desc, string_types))
+    assert_equal(args[0], 'fsaverage')
+    assert_equal(args[1], 'test')
+    assert_allclose(args[2], model.scale)
+    assert_equal(kwargs['subjects_dir'], tempdir)
+
+    # scale with 3 parameters
+    model.n_scale_params = 3
+    model.fit_scale_hsp_points()
+    assert_true(np.mean(model.point_distance) < avg_point_distance_1param)
+
+    # test switching raw disables point omission
+    assert_equal(model.hsp.n_omitted, 1)
+    with warnings.catch_warnings(record=True):
+        model.hsp.file = kit_raw_path
+    assert_equal(model.hsp.n_omitted, 0)
+
+
+run_tests_if_main()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/gui/tests/test_fiducials_gui.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/gui/tests/test_fiducials_gui.py
new file mode 100644
index 0000000..4eea1f7
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/gui/tests/test_fiducials_gui.py
@@ -0,0 +1,67 @@
+# Authors: Christian Brodbeck <christianbrodbeck at nyu.edu>
+#
+# License: BSD (3-clause)
+
+import os
+
+from numpy.testing import assert_array_equal
+from nose.tools import assert_true, assert_false, assert_equal
+
+from mne.datasets import testing
+from mne.utils import _TempDir, requires_traits
+
+sample_path = testing.data_path(download=False)
+subjects_dir = os.path.join(sample_path, 'subjects')
+
+
+ at testing.requires_testing_data
+ at requires_traits
+def test_mri_model():
+    """Test MRIHeadWithFiducialsModel Traits Model"""
+    from mne.gui._fiducials_gui import MRIHeadWithFiducialsModel
+    tempdir = _TempDir()
+    tgt_fname = os.path.join(tempdir, 'test-fiducials.fif')
+
+    model = MRIHeadWithFiducialsModel(subjects_dir=subjects_dir)
+    model.subject = 'sample'
+    assert_equal(model.default_fid_fname[-20:], "sample-fiducials.fif")
+    assert_false(model.can_reset)
+    assert_false(model.can_save)
+    model.lpa = [[-1, 0, 0]]
+    model.nasion = [[0, 1, 0]]
+    model.rpa = [[1, 0, 0]]
+    assert_false(model.can_reset)
+    assert_true(model.can_save)
+
+    bem_fname = os.path.basename(model.bem.file)
+    assert_false(model.can_reset)
+    assert_equal(bem_fname, 'sample-head.fif')
+
+    model.save(tgt_fname)
+    assert_equal(model.fid_file, tgt_fname)
+
+    # resetting the file should not affect the model's fiducials
+    model.fid_file = ''
+    assert_array_equal(model.lpa, [[-1, 0, 0]])
+    assert_array_equal(model.nasion, [[0, 1, 0]])
+    assert_array_equal(model.rpa, [[1, 0, 0]])
+
+    # reset model
+    model.lpa = [[0, 0, 0]]
+    model.nasion = [[0, 0, 0]]
+    model.rpa = [[0, 0, 0]]
+    assert_array_equal(model.lpa, [[0, 0, 0]])
+    assert_array_equal(model.nasion, [[0, 0, 0]])
+    assert_array_equal(model.rpa, [[0, 0, 0]])
+
+    # loading the file should assign the model's fiducials
+    model.fid_file = tgt_fname
+    assert_array_equal(model.lpa, [[-1, 0, 0]])
+    assert_array_equal(model.nasion, [[0, 1, 0]])
+    assert_array_equal(model.rpa, [[1, 0, 0]])
+
+    # after changing from file model should be able to reset
+    model.nasion = [[1, 1, 1]]
+    assert_true(model.can_reset)
+    model.reset = True
+    assert_array_equal(model.nasion, [[0, 1, 0]])
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/gui/tests/test_file_traits.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/gui/tests/test_file_traits.py
new file mode 100644
index 0000000..ea90fb1
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/gui/tests/test_file_traits.py
@@ -0,0 +1,104 @@
+# Author: Christian Brodbeck <christianbrodbeck at nyu.edu>
+#
+# License: BSD (3-clause)
+
+import os
+
+from numpy import array
+from numpy.testing import assert_allclose
+from nose.tools import assert_equal, assert_false, assert_raises, assert_true
+
+from mne.datasets import testing
+from mne.io.tests import data_dir as fiff_data_dir
+from mne.utils import (_TempDir, requires_mne, requires_freesurfer,
+                       requires_traits)
+
+data_path = testing.data_path(download=False)
+subjects_dir = os.path.join(data_path, 'subjects')
+bem_path = os.path.join(subjects_dir, 'sample', 'bem', 'sample-1280-bem.fif')
+inst_path = os.path.join(data_path, 'MEG', 'sample',
+                         'sample_audvis_trunc_raw.fif')
+fid_path = os.path.join(fiff_data_dir, 'fsaverage-fiducials.fif')
+
+
+ at testing.requires_testing_data
+ at requires_traits
+def test_bem_source():
+    """Test BemSource"""
+    from mne.gui._file_traits import BemSource
+
+    bem = BemSource()
+    assert_equal(bem.points.shape, (0, 3))
+    assert_equal(bem.tris.shape, (0, 3))
+
+    bem.file = bem_path
+    assert_equal(bem.points.shape, (642, 3))
+    assert_equal(bem.tris.shape, (1280, 3))
+
+
+ at testing.requires_testing_data
+ at requires_traits
+def test_fiducials_source():
+    """Test FiducialsSource"""
+    from mne.gui._file_traits import FiducialsSource
+
+    fid = FiducialsSource()
+    fid.file = fid_path
+
+    points = array([[-0.08061612, -0.02908875, -0.04131077],
+                    [0.00146763, 0.08506715, -0.03483611],
+                    [0.08436285, -0.02850276, -0.04127743]])
+    assert_allclose(fid.points, points, 1e-6)
+
+    fid.file = ''
+    assert_equal(fid.points, None)
+
+
+ at testing.requires_testing_data
+ at requires_traits
+def test_inst_source():
+    """Test InstSource"""
+    from mne.gui._file_traits import InstSource
+
+    inst = InstSource()
+    assert_equal(inst.inst_fname, '-')
+
+    inst.file = inst_path
+    assert_equal(inst.inst_dir, os.path.dirname(inst_path))
+
+    lpa = array([[-7.13766068e-02, 0.00000000e+00, 5.12227416e-09]])
+    nasion = array([[3.72529030e-09, 1.02605611e-01, 4.19095159e-09]])
+    rpa = array([[7.52676800e-02, 0.00000000e+00, 5.58793545e-09]])
+    assert_allclose(inst.lpa, lpa)
+    assert_allclose(inst.nasion, nasion)
+    assert_allclose(inst.rpa, rpa)
+
+
+ at testing.requires_testing_data
+ at requires_traits
+def test_subject_source():
+    """Test SubjectSelector"""
+    from mne.gui._file_traits import MRISubjectSource
+
+    mri = MRISubjectSource()
+    mri.subjects_dir = subjects_dir
+    assert_true('sample' in mri.subjects)
+    mri.subject = 'sample'
+
+
+ at testing.requires_testing_data
+ at requires_traits
+ at requires_mne
+ at requires_freesurfer
+def test_subject_source_with_fsaverage():
+    """Test SubjectSelector"""
+    from mne.gui._file_traits import MRISubjectSource
+    tempdir = _TempDir()
+
+    mri = MRISubjectSource()
+    assert_false(mri.can_create_fsaverage)
+    assert_raises(RuntimeError, mri.create_fsaverage)
+
+    mri.subjects_dir = tempdir
+    assert_true(mri.can_create_fsaverage)
+    mri.create_fsaverage()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/gui/tests/test_kit2fiff_gui.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/gui/tests/test_kit2fiff_gui.py
new file mode 100644
index 0000000..4e7d90a
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/gui/tests/test_kit2fiff_gui.py
@@ -0,0 +1,106 @@
+# Authors: Christian Brodbeck <christianbrodbeck at nyu.edu>
+#
+# License: BSD (3-clause)
+
+import os
+import warnings
+
+import numpy as np
+from numpy.testing import assert_allclose, assert_array_equal
+from nose.tools import assert_true, assert_false, assert_equal
+
+import mne
+from mne.io.kit.tests import data_dir as kit_data_dir
+from mne.io import Raw
+from mne.utils import _TempDir, requires_traits, run_tests_if_main
+
+mrk_pre_path = os.path.join(kit_data_dir, 'test_mrk_pre.sqd')
+mrk_post_path = os.path.join(kit_data_dir, 'test_mrk_post.sqd')
+sqd_path = os.path.join(kit_data_dir, 'test.sqd')
+hsp_path = os.path.join(kit_data_dir, 'test_hsp.txt')
+fid_path = os.path.join(kit_data_dir, 'test_elp.txt')
+fif_path = os.path.join(kit_data_dir, 'test_bin_raw.fif')
+
+warnings.simplefilter('always')
+
+
+ at requires_traits
+def test_kit2fiff_model():
+    """Test CombineMarkersModel Traits Model"""
+    from mne.gui._kit2fiff_gui import Kit2FiffModel, Kit2FiffPanel
+    tempdir = _TempDir()
+    tgt_fname = os.path.join(tempdir, 'test-raw.fif')
+
+    model = Kit2FiffModel()
+    assert_false(model.can_save)
+    model.markers.mrk1.file = mrk_pre_path
+    model.markers.mrk2.file = mrk_post_path
+    model.sqd_file = sqd_path
+    model.hsp_file = hsp_path
+    assert_false(model.can_save)
+    model.fid_file = fid_path
+
+    # export raw
+    assert_true(model.can_save)
+    raw_out = model.get_raw()
+    raw_out.save(tgt_fname)
+    raw = Raw(tgt_fname)
+
+    # Compare exported raw with the original binary conversion
+    raw_bin = Raw(fif_path)
+    trans_bin = raw.info['dev_head_t']['trans']
+    want_keys = list(raw_bin.info.keys())
+    assert_equal(sorted(want_keys), sorted(list(raw.info.keys())))
+    trans_transform = raw_bin.info['dev_head_t']['trans']
+    assert_allclose(trans_transform, trans_bin, 0.1)
+
+    # Averaging markers
+    model.markers.mrk3.method = "Average"
+    trans_avg = model.dev_head_trans
+    assert_false(np.all(trans_avg == trans_transform))
+    assert_allclose(trans_avg, trans_bin, 0.1)
+
+    # Test exclusion of one marker
+    model.markers.mrk3.method = "Transform"
+    model.use_mrk = [1, 2, 3, 4]
+    assert_false(np.all(model.dev_head_trans == trans_transform))
+    assert_false(np.all(model.dev_head_trans == trans_avg))
+    assert_false(np.all(model.dev_head_trans == np.eye(4)))
+
+    # test setting stim channels
+    model.stim_slope = '+'
+    events_bin = mne.find_events(raw_bin, stim_channel='STI 014')
+
+    model.stim_chs = '<'
+    raw = model.get_raw()
+    events = mne.find_events(raw, stim_channel='STI 014')
+    assert_array_equal(events, events_bin)
+
+    events_rev = events_bin.copy()
+    events_rev[:, 2] = 1
+    model.stim_chs = '>'
+    raw = model.get_raw()
+    events = mne.find_events(raw, stim_channel='STI 014')
+    assert_array_equal(events, events_rev)
+
+    model.stim_chs = 'man'
+    model.stim_chs_manual = list(range(167, 159, -1))
+    raw = model.get_raw()
+    events = mne.find_events(raw, stim_channel='STI 014')
+    assert_array_equal(events, events_bin)
+
+    # test reset
+    model.clear_all()
+    assert_equal(model.use_mrk, [0, 1, 2, 3, 4])
+    assert_equal(model.sqd_file, "")
+
+    os.environ['_MNE_GUI_TESTING_MODE'] = 'true'
+    try:
+        with warnings.catch_warnings(record=True):  # traits warnings
+            warnings.simplefilter('always')
+            Kit2FiffPanel()
+    finally:
+        del os.environ['_MNE_GUI_TESTING_MODE']
+
+
+run_tests_if_main()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/gui/tests/test_marker_gui.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/gui/tests/test_marker_gui.py
new file mode 100644
index 0000000..974d965
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/gui/tests/test_marker_gui.py
@@ -0,0 +1,83 @@
+# Authors: Christian Brodbeck <christianbrodbeck at nyu.edu>
+#
+# License: BSD (3-clause)
+
+import os
+import warnings
+
+import numpy as np
+from numpy.testing import assert_array_equal
+from nose.tools import assert_true, assert_false
+
+from mne.io.kit.tests import data_dir as kit_data_dir
+from mne.io.kit import read_mrk
+from mne.utils import _TempDir, requires_traits, run_tests_if_main
+
+mrk_pre_path = os.path.join(kit_data_dir, 'test_mrk_pre.sqd')
+mrk_post_path = os.path.join(kit_data_dir, 'test_mrk_post.sqd')
+mrk_avg_path = os.path.join(kit_data_dir, 'test_mrk.sqd')
+
+warnings.simplefilter('always')
+
+
+ at requires_traits
+def test_combine_markers_model():
+    """Test CombineMarkersModel Traits Model"""
+    from mne.gui._marker_gui import CombineMarkersModel, CombineMarkersPanel
+    tempdir = _TempDir()
+    tgt_fname = os.path.join(tempdir, 'test.txt')
+
+    model = CombineMarkersModel()
+
+    # set one marker file
+    assert_false(model.mrk3.can_save)
+    model.mrk1.file = mrk_pre_path
+    assert_true(model.mrk3.can_save)
+    assert_array_equal(model.mrk3.points, model.mrk1.points)
+
+    # setting second marker file
+    model.mrk2.file = mrk_pre_path
+    assert_array_equal(model.mrk3.points, model.mrk1.points)
+
+    # set second marker
+    model.mrk2.clear = True
+    model.mrk2.file = mrk_post_path
+    assert_true(np.any(model.mrk3.points))
+    points_interpolate_mrk1_mrk2 = model.mrk3.points
+
+    # change interpolation method
+    model.mrk3.method = 'Average'
+    mrk_avg = read_mrk(mrk_avg_path)
+    assert_array_equal(model.mrk3.points, mrk_avg)
+
+    # clear second marker
+    model.mrk2.clear = True
+    assert_array_equal(model.mrk1.points, model.mrk3.points)
+
+    # I/O
+    model.mrk2.file = mrk_post_path
+    model.mrk3.save(tgt_fname)
+    mrk_io = read_mrk(tgt_fname)
+    assert_array_equal(mrk_io, model.mrk3.points)
+
+    # exlude an individual marker
+    model.mrk1.use = [1, 2, 3, 4]
+    assert_array_equal(model.mrk3.points[0], model.mrk2.points[0])
+    assert_array_equal(model.mrk3.points[1:], mrk_avg[1:])
+
+    # reset model
+    model.clear = True
+    model.mrk1.file = mrk_pre_path
+    model.mrk2.file = mrk_post_path
+    assert_array_equal(model.mrk3.points, points_interpolate_mrk1_mrk2)
+
+    os.environ['_MNE_GUI_TESTING_MODE'] = 'true'
+    try:
+        with warnings.catch_warnings(record=True):  # traits warnings
+            warnings.simplefilter('always')
+            CombineMarkersPanel()
+    finally:
+        del os.environ['_MNE_GUI_TESTING_MODE']
+
+
+run_tests_if_main()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/html/bootstrap.min.css b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/html/bootstrap.min.css
new file mode 100644
index 0000000..c547283
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/html/bootstrap.min.css
@@ -0,0 +1,7 @@
+/*!
+ * Bootstrap v3.0.3 (http://getbootstrap.com)
+ * Copyright 2013 Twitter, Inc.
+ * Licensed under http://www.apache.org/licenses/LICENSE-2.0
+ */
+
+/*! normalize.css v2.1.3 | MIT License | git.io/normalize */article,aside,details,figcaption,figure,footer,header,hgroup,main,nav,section,summary{display:block}audio,canvas,video{display:inline-block}audio:not([controls]){display:none;height:0}[hidden],template{display:none}html{font-family:sans-serif;-webkit-text-size-adjust:100%;-ms-text-size-adjust:100%}body{margin:0}a{background:transparent}a:focus{outline:thin dotted}a:active,a:hover{outline:0}h1{margin:.67em 0;font-size:2em}abbr[ti [...]
\ No newline at end of file
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/html/bootstrap.min.js b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/html/bootstrap.min.js
new file mode 100644
index 0000000..f70ebe4
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/html/bootstrap.min.js
@@ -0,0 +1,7 @@
+/*
+ * Bootstrap v3.1.1 (http://getbootstrap.com)
+ * Copyright 2011-2014 Twitter, Inc.
+ * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
+ */
+if(typeof jQuery==="undefined"){throw new Error("Bootstrap's JavaScript requires jQuery")}+function(b){function a(){var e=document.createElement("bootstrap");var d={WebkitTransition:"webkitTransitionEnd",MozTransition:"transitionend",OTransition:"oTransitionEnd otransitionend",transition:"transitionend"};for(var c in d){if(e.style[c]!==undefined){return{end:d[c]}}}return false}b.fn.emulateTransitionEnd=function(e){var d=false,c=this;b(this).one(b.support.transition.end,function(){d=true} [...]
+
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/html/d3.v3.min.js b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/html/d3.v3.min.js
new file mode 120000
index 0000000..b79ad7f
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/html/d3.v3.min.js
@@ -0,0 +1 @@
+../../../../../share/javascript/d3/d3.min.js
\ No newline at end of file
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/html/jquery-1.10.2.min.js b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/html/jquery-1.10.2.min.js
new file mode 120000
index 0000000..d0f48cc
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/html/jquery-1.10.2.min.js
@@ -0,0 +1 @@
+../../../../../share/javascript/jquery/jquery.min.js
\ No newline at end of file
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/html/jquery-ui.min.css b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/html/jquery-ui.min.css
new file mode 100644
index 0000000..47047cf
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/html/jquery-ui.min.css
@@ -0,0 +1,6 @@
+/*! jQuery UI - v1.10.3 - 2013-05-03
+* http://jqueryui.com
+* Includes: jquery.ui.core.css, jquery.ui.accordion.css, jquery.ui.autocomplete.css, jquery.ui.button.css, jquery.ui.datepicker.css, jquery.ui.dialog.css, jquery.ui.menu.css, jquery.ui.progressbar.css, jquery.ui.resizable.css, jquery.ui.selectable.css, jquery.ui.slider.css, jquery.ui.spinner.css, jquery.ui.tabs.css, jquery.ui.tooltip.css
+* To view and modify this theme, visit http://jqueryui.com/themeroller/?ffDefault=Verdana%2CArial%2Csans-serif&fwDefault=normal&fsDefault=1.1em&cornerRadius=4px&bgColorHeader=cccccc&bgTextureHeader=highlight_soft&bgImgOpacityHeader=75&borderColorHeader=aaaaaa&fcHeader=222222&iconColorHeader=222222&bgColorContent=ffffff&bgTextureContent=flat&bgImgOpacityContent=75&borderColorContent=aaaaaa&fcContent=222222&iconColorContent=222222&bgColorDefault=e6e6e6&bgTextureDefault=glass&bgImgOpacityDe [...]
+* Copyright 2013 jQuery Foundation and other contributors Licensed MIT */
+.ui-helper-hidden{display:none}.ui-helper-hidden-accessible{border:0;clip:rect(0 0 0 0);height:1px;margin:-1px;overflow:hidden;padding:0;position:absolute;width:1px}.ui-helper-reset{margin:0;padding:0;border:0;outline:0;line-height:1.3;text-decoration:none;font-size:100%;list-style:none}.ui-helper-clearfix:after,.ui-helper-clearfix:before{content:"";display:table;border-collapse:collapse}.ui-helper-clearfix:after{clear:both}.ui-helper-clearfix{min-height:0}.ui-helper-zfix{width:100%;heig [...]
\ No newline at end of file
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/html/jquery-ui.min.js b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/html/jquery-ui.min.js
new file mode 120000
index 0000000..7b690a2
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/html/jquery-ui.min.js
@@ -0,0 +1 @@
+../../../../../share/javascript/jquery-ui/jquery-ui.min.js
\ No newline at end of file
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/inverse_sparse/__init__.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/inverse_sparse/__init__.py
new file mode 100644
index 0000000..e198907
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/inverse_sparse/__init__.py
@@ -0,0 +1,8 @@
+"""Non-Linear sparse inverse solvers"""
+
+# Author: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#
+# License: Simplified BSD
+
+from .mxne_inverse import mixed_norm, tf_mixed_norm
+from ._gamma_map import gamma_map
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/inverse_sparse/_gamma_map.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/inverse_sparse/_gamma_map.py
new file mode 100644
index 0000000..d615b50
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/inverse_sparse/_gamma_map.py
@@ -0,0 +1,301 @@
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+# License: Simplified BSD
+from copy import deepcopy
+
+import numpy as np
+from scipy import linalg
+
+from ..forward import is_fixed_orient, _to_fixed_ori
+
+from ..minimum_norm.inverse import _check_reference
+from ..utils import logger, verbose
+from ..externals.six.moves import xrange as range
+from .mxne_inverse import (_make_sparse_stc, _prepare_gain,
+                           _reapply_source_weighting, _compute_residual)
+
+
+ at verbose
+def _gamma_map_opt(M, G, alpha, maxit=10000, tol=1e-6, update_mode=1,
+                   group_size=1, gammas=None, verbose=None):
+    """Hierarchical Bayes (Gamma-MAP)
+
+    Parameters
+    ----------
+    M : array, shape=(n_sensors, n_times)
+        Observation.
+    G : array, shape=(n_sensors, n_sources)
+        Forward operator.
+    alpha : float
+        Regularization parameter (noise variance).
+    maxit : int
+        Maximum number of iterations.
+    tol : float
+        Tolerance parameter for convergence.
+    group_size : int
+        Number of consecutive sources which use the same gamma.
+    update_mode : int
+        Update mode, 1: MacKay update (default), 3: Modified MacKay update.
+    gammas : array, shape=(n_sources,)
+        Initial values for posterior variances (gammas). If None, a
+        variance of 1.0 is used.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    X : array, shape=(n_active, n_times)
+        Estimated source time courses.
+    active_set : array, shape=(n_active,)
+        Indices of active sources.
+
+    References
+    ----------
+    [1] Wipf et al. Analysis of Empirical Bayesian Methods for
+    Neuroelectromagnetic Source Localization, Advances in Neural Information
+    Processing Systems (2007).
+    """
+    G = G.copy()
+    M = M.copy()
+
+    if gammas is None:
+        gammas = np.ones(G.shape[1], dtype=np.float)
+
+    eps = np.finfo(float).eps
+
+    n_sources = G.shape[1]
+    n_sensors, n_times = M.shape
+
+    # apply normalization so the numerical values are sane
+    M_normalize_constant = linalg.norm(np.dot(M, M.T), ord='fro')
+    M /= np.sqrt(M_normalize_constant)
+    alpha /= M_normalize_constant
+    G_normalize_constant = linalg.norm(G, ord=np.inf)
+    G /= G_normalize_constant
+
+    if n_sources % group_size != 0:
+        raise ValueError('Number of sources has to be evenly dividable by the '
+                         'group size')
+
+    n_active = n_sources
+    active_set = np.arange(n_sources)
+
+    gammas_full_old = gammas.copy()
+
+    if update_mode == 2:
+        denom_fun = np.sqrt
+    else:
+        # do nothing
+        def denom_fun(x):
+            return x
+
+    for itno in range(maxit):
+        gammas[np.isnan(gammas)] = 0.0
+
+        gidx = (np.abs(gammas) > eps)
+        active_set = active_set[gidx]
+        gammas = gammas[gidx]
+
+        # update only active gammas (once set to zero it stays at zero)
+        if n_active > len(active_set):
+            n_active = active_set.size
+            G = G[:, gidx]
+
+        CM = alpha * np.eye(n_sensors) + np.dot(G * gammas[np.newaxis, :], G.T)
+        # Invert CM keeping symmetry
+        U, S, V = linalg.svd(CM, full_matrices=False)
+        S = S[np.newaxis, :]
+        CM = np.dot(U * S, U.T)
+        CMinv = np.dot(U / (S + eps), U.T)
+
+        CMinvG = np.dot(CMinv, G)
+        A = np.dot(CMinvG.T, M)  # mult. w. Diag(gamma) in gamma update
+
+        if update_mode == 1:
+            # MacKay fixed point update (10) in [1]
+            numer = gammas ** 2 * np.mean((A * A.conj()).real, axis=1)
+            denom = gammas * np.sum(G * CMinvG, axis=0)
+        elif update_mode == 2:
+            # modified MacKay fixed point update (11) in [1]
+            numer = gammas * np.sqrt(np.mean((A * A.conj()).real, axis=1))
+            denom = np.sum(G * CMinvG, axis=0)  # sqrt is applied below
+        else:
+            raise ValueError('Invalid value for update_mode')
+
+        if group_size == 1:
+            if denom is None:
+                gammas = numer
+            else:
+                gammas = numer / denom_fun(denom)
+        else:
+            numer_comb = np.sum(numer.reshape(-1, group_size), axis=1)
+            if denom is None:
+                gammas_comb = numer_comb
+            else:
+                denom_comb = np.sum(denom.reshape(-1, group_size), axis=1)
+                gammas_comb = numer_comb / denom_fun(denom_comb)
+
+            gammas = np.repeat(gammas_comb / group_size, group_size)
+
+        # compute convergence criterion
+        gammas_full = np.zeros(n_sources, dtype=np.float)
+        gammas_full[active_set] = gammas
+
+        err = (np.sum(np.abs(gammas_full - gammas_full_old)) /
+               np.sum(np.abs(gammas_full_old)))
+
+        gammas_full_old = gammas_full
+
+        logger.info('Iteration: %d\t active set size: %d\t convergence: %0.3e'
+                    % (itno, len(gammas), err))
+
+        if err < tol:
+            break
+
+        if n_active == 0:
+            break
+
+    if itno < maxit - 1:
+        print('\nConvergence reached !\n')
+    else:
+        print('\nConvergence NOT reached !\n')
+
+    # undo normalization and compute final posterior mean
+    n_const = np.sqrt(M_normalize_constant) / G_normalize_constant
+    x_active = n_const * gammas[:, None] * A
+
+    return x_active, active_set
+
+
+ at verbose
+def gamma_map(evoked, forward, noise_cov, alpha, loose=0.2, depth=0.8,
+              xyz_same_gamma=True, maxit=10000, tol=1e-6, update_mode=1,
+              gammas=None, pca=True, return_residual=False,
+              verbose=None):
+    """Hierarchical Bayes (Gamma-MAP) sparse source localization method
+
+    Models each source time course using a zero-mean Gaussian prior with an
+    unknown variance (gamma) parameter. During estimation, most gammas are
+    driven to zero, resulting in a sparse source estimate.
+
+    For fixed-orientation forward operators, a separate gamma is used for each
+    source time course, while for free-orientation forward operators, the same
+    gamma is used for the three source time courses at each source space point
+    (separate gammas can be used in this case by using xyz_same_gamma=False).
+
+    Parameters
+    ----------
+    evoked : instance of Evoked
+        Evoked data to invert.
+    forward : dict
+        Forward operator.
+    noise_cov : instance of Covariance
+        Noise covariance to compute whitener.
+    alpha : float
+        Regularization parameter (noise variance).
+    loose : float in [0, 1]
+        Value that weights the source variances of the dipole components
+        that are parallel (tangential) to the cortical surface. If loose
+        is 0 or None then the solution is computed with fixed orientation.
+        If loose is 1, it corresponds to free orientations.
+    depth: None | float in [0, 1]
+        Depth weighting coefficients. If None, no depth weighting is performed.
+    xyz_same_gamma : bool
+        Use same gamma for xyz current components at each source space point.
+        Recommended for free-orientation forward solutions.
+    maxit : int
+        Maximum number of iterations.
+    tol : float
+        Tolerance parameter for convergence.
+    update_mode : int
+        Update mode, 1: MacKay update (default), 2: Modified MacKay update.
+    gammas : array, shape=(n_sources,)
+        Initial values for posterior variances (gammas). If None, a
+        variance of 1.0 is used.
+    pca : bool
+        If True the rank of the data is reduced to the true dimension.
+    return_residual : bool
+        If True, the residual is returned as an Evoked instance.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    stc : instance of SourceEstimate
+        Source time courses.
+    residual : instance of Evoked
+        The residual a.k.a. data not explained by the sources.
+        Only returned if return_residual is True.
+
+    References
+    ----------
+    Wipf et al. Analysis of Empirical Bayesian Methods for Neuroelectromagnetic
+    Source Localization, Advances in Neural Information Process. Systems (2007)
+
+    Wipf et al. A unified Bayesian framework for MEG/EEG source imaging,
+    NeuroImage, vol. 44, no. 3, pp. 947-66, Mar. 2009.
+    """
+    _check_reference(evoked)
+
+    # make forward solution in fixed orientation if necessary
+    if loose is None and not is_fixed_orient(forward):
+        forward = deepcopy(forward)
+        _to_fixed_ori(forward)
+
+    if is_fixed_orient(forward) or not xyz_same_gamma:
+        group_size = 1
+    else:
+        group_size = 3
+
+    gain, gain_info, whitener, source_weighting, mask = _prepare_gain(
+        forward, evoked.info, noise_cov, pca, depth, loose, None, None)
+
+    # get the data
+    sel = [evoked.ch_names.index(name) for name in gain_info['ch_names']]
+    M = evoked.data[sel]
+
+    # whiten the data
+    logger.info('Whitening data matrix.')
+    M = np.dot(whitener, M)
+
+    # run the optimization
+    X, active_set = _gamma_map_opt(M, gain, alpha, maxit=maxit, tol=tol,
+                                   update_mode=update_mode, gammas=gammas,
+                                   group_size=group_size, verbose=verbose)
+
+    if len(active_set) == 0:
+        raise Exception("No active dipoles found. alpha is too big.")
+
+    # Reapply weights to have correct unit
+    n_dip_per_pos = 1 if is_fixed_orient(forward) else 3
+    X = _reapply_source_weighting(X, source_weighting,
+                                  active_set, n_dip_per_pos)
+
+    if return_residual:
+        residual = _compute_residual(forward, evoked, X, active_set,
+                                     gain_info)
+
+    if group_size == 1 and not is_fixed_orient(forward):
+        # make sure each source has 3 components
+        active_src = np.unique(active_set // 3)
+        in_pos = 0
+        if len(X) < 3 * len(active_src):
+            X_xyz = np.zeros((3 * len(active_src), X.shape[1]), dtype=X.dtype)
+            for ii in range(len(active_src)):
+                for jj in range(3):
+                    if in_pos >= len(active_set):
+                        break
+                    if (active_set[in_pos] + jj) % 3 == 0:
+                        X_xyz[3 * ii + jj] = X[in_pos]
+                        in_pos += 1
+            X = X_xyz
+
+    tmin = evoked.times[0]
+    tstep = 1.0 / evoked.info['sfreq']
+    stc = _make_sparse_stc(X, active_set, forward, tmin, tstep,
+                           active_is_idx=True, verbose=verbose)
+
+    if return_residual:
+        return stc, residual
+    else:
+        return stc
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/inverse_sparse/mxne_debiasing.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/inverse_sparse/mxne_debiasing.py
new file mode 100644
index 0000000..e3c0b89
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/inverse_sparse/mxne_debiasing.py
@@ -0,0 +1,135 @@
+# Authors: Daniel Strohmeier <daniel.strohmeier at tu-ilmenau.de>
+#          Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#
+# License: BSD (3-clause)
+
+from math import sqrt
+import numpy as np
+from scipy import linalg
+
+from ..utils import check_random_state, logger, verbose
+
+
+def power_iteration_kron(A, C, max_iter=1000, tol=1e-3, random_state=0):
+    """Find the largest singular value for the matrix kron(C.T, A)
+
+    It uses power iterations.
+
+    Parameters
+    ----------
+    A : array
+        An array
+    C : array
+        An array
+    max_iter : int
+        Maximum number of iterations
+    random_state : int | RandomState | None
+        Random state for random number generation
+
+    Returns
+    -------
+    L : float
+        largest singular value
+
+    Notes
+    -----
+    http://en.wikipedia.org/wiki/Power_iteration
+    """
+    AS_size = C.shape[0]
+    rng = check_random_state(random_state)
+    B = rng.randn(AS_size, AS_size)
+    B /= linalg.norm(B, 'fro')
+    ATA = np.dot(A.T, A)
+    CCT = np.dot(C, C.T)
+    L0 = np.inf
+    for _ in range(max_iter):
+        Y = np.dot(np.dot(ATA, B), CCT)
+        L = linalg.norm(Y, 'fro')
+
+        if abs(L - L0) < tol:
+            break
+
+        B = Y / L
+        L0 = L
+    return L
+
+
+ at verbose
+def compute_bias(M, G, X, max_iter=1000, tol=1e-6, n_orient=1, verbose=None):
+    """Compute scaling to correct amplitude bias
+
+    It solves the following optimization problem using FISTA:
+
+    min 1/2 * (|| M - GDX ||fro)^2
+    s.t. D >= 1 and D is a diagonal matrix
+
+    Reference for the FISTA algorithm:
+    Amir Beck and Marc Teboulle
+    A Fast Iterative Shrinkage-Thresholding Algorithm for Linear Inverse
+    Problems, SIAM J. Imaging Sci., 2(1), 183-202. (20 pages)
+    http://epubs.siam.org/doi/abs/10.1137/080716542
+
+    Parameters
+    ----------
+    M : array
+        measurement data.
+    G : array
+        leadfield matrix.
+    X : array
+        reconstructed time courses with amplitude bias.
+    max_iter : int
+        Maximum number of iterations.
+    tol : float
+        The tolerance on convergence.
+    n_orient : int
+        The number of orientations (1 for fixed and 3 otherwise).
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    D : array
+        Debiasing weights.
+    """
+    n_sources = X.shape[0]
+
+    lipschitz_constant = 1.1 * power_iteration_kron(G, X)
+
+    # initializations
+    D = np.ones(n_sources)
+    Y = np.ones(n_sources)
+    t = 1.0
+
+    for i in range(max_iter):
+        D0 = D
+
+        # gradient step
+        R = M - np.dot(G * Y, X)
+        D = Y + np.sum(np.dot(G.T, R) * X, axis=1) / lipschitz_constant
+        # Equivalent but faster than:
+        # D = Y + np.diag(np.dot(np.dot(G.T, R), X.T)) / lipschitz_constant
+
+        # prox ie projection on constraint
+        if n_orient != 1:  # take care of orientations
+            # The scaling has to be the same for all orientations
+            D = np.mean(D.reshape(-1, n_orient), axis=1)
+            D = np.tile(D, [n_orient, 1]).T.ravel()
+        D = np.maximum(D, 1.0)
+
+        t0 = t
+        t = 0.5 * (1.0 + sqrt(1.0 + 4.0 * t ** 2))
+        Y.fill(0.0)
+        dt = (t0 - 1.0) / t
+        Y = D + dt * (D - D0)
+
+        Ddiff = linalg.norm(D - D0, np.inf)
+
+        if Ddiff < tol:
+            logger.info("Debiasing converged after %d iterations "
+                        "max(|D - D0| = %e < %e)" % (i, Ddiff, tol))
+            break
+    else:
+        Ddiff = linalg.norm(D - D0, np.inf)
+        logger.info("Debiasing did not converge after %d iterations! "
+                    "max(|D - D0| = %e >= %e)" % (max_iter, Ddiff, tol))
+    return D
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/inverse_sparse/mxne_inverse.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/inverse_sparse/mxne_inverse.py
new file mode 100644
index 0000000..72e5c75
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/inverse_sparse/mxne_inverse.py
@@ -0,0 +1,531 @@
+# Author: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#         Daniel Strohmeier <daniel.strohmeier at gmail.com>
+#
+# License: Simplified BSD
+
+from copy import deepcopy
+import numpy as np
+from scipy import linalg, signal
+
+from ..source_estimate import SourceEstimate
+from ..minimum_norm.inverse import combine_xyz, _prepare_forward
+from ..minimum_norm.inverse import _check_reference
+from ..forward import compute_orient_prior, is_fixed_orient, _to_fixed_ori
+from ..io.pick import pick_channels_evoked
+from ..io.proj import deactivate_proj
+from ..utils import logger, verbose
+from ..externals.six.moves import xrange as range
+
+from .mxne_optim import (mixed_norm_solver, iterative_mixed_norm_solver,
+                         norm_l2inf, tf_mixed_norm_solver)
+
+
+ at verbose
+def _prepare_weights(forward, gain, source_weighting, weights, weights_min):
+    mask = None
+    if isinstance(weights, SourceEstimate):
+        # weights = np.sqrt(np.sum(weights.data ** 2, axis=1))
+        weights = np.max(np.abs(weights.data), axis=1)
+    weights_max = np.max(weights)
+    if weights_min > weights_max:
+        raise ValueError('weights_min > weights_max (%s > %s)' %
+                         (weights_min, weights_max))
+    weights_min = weights_min / weights_max
+    weights = weights / weights_max
+    n_dip_per_pos = 1 if is_fixed_orient(forward) else 3
+    weights = np.ravel(np.tile(weights, [n_dip_per_pos, 1]).T)
+    if len(weights) != gain.shape[1]:
+        raise ValueError('weights do not have the correct dimension '
+                         ' (%d != %d)' % (len(weights), gain.shape[1]))
+    if len(source_weighting.shape) == 1:
+        source_weighting *= weights
+    else:
+        source_weighting *= weights[:, None]
+    gain *= weights[None, :]
+
+    if weights_min is not None:
+        mask = (weights > weights_min)
+        gain = gain[:, mask]
+        n_sources = np.sum(mask) // n_dip_per_pos
+        logger.info("Reducing source space to %d sources" % n_sources)
+
+    return gain, source_weighting, mask
+
+
+ at verbose
+def _prepare_gain_column(forward, info, noise_cov, pca, depth, loose, weights,
+                         weights_min, verbose=None):
+    gain_info, gain, _, whitener, _ = _prepare_forward(forward, info,
+                                                       noise_cov, pca)
+
+    logger.info('Whitening lead field matrix.')
+    gain = np.dot(whitener, gain)
+
+    if depth is not None:
+        depth_prior = np.sum(gain ** 2, axis=0) ** depth
+        source_weighting = np.sqrt(depth_prior ** -1.)
+    else:
+        source_weighting = np.ones(gain.shape[1], dtype=gain.dtype)
+
+    if loose is not None and loose != 1.0:
+        source_weighting *= np.sqrt(compute_orient_prior(forward, loose))
+
+    gain *= source_weighting[None, :]
+
+    if weights is None:
+        mask = None
+    else:
+        gain, source_weighting, mask = _prepare_weights(forward, gain,
+                                                        source_weighting,
+                                                        weights, weights_min)
+
+    return gain, gain_info, whitener, source_weighting, mask
+
+
+def _prepare_gain(forward, info, noise_cov, pca, depth, loose, weights,
+                  weights_min, verbose=None):
+    if not isinstance(depth, float):
+        raise ValueError('Invalid depth parameter. '
+                         'A float is required (got %s).'
+                         % type(depth))
+    elif depth < 0.0:
+        raise ValueError('Depth parameter must be positive (got %s).'
+                         % depth)
+
+    gain, gain_info, whitener, source_weighting, mask = \
+        _prepare_gain_column(forward, info, noise_cov, pca, depth,
+                             loose, weights, weights_min)
+
+    return gain, gain_info, whitener, source_weighting, mask
+
+
+def _reapply_source_weighting(X, source_weighting, active_set,
+                              n_dip_per_pos):
+    X *= source_weighting[active_set][:, None]
+    return X
+
+
+def _compute_residual(forward, evoked, X, active_set, info):
+    # OK, picking based on row_names is safe
+    sel = [forward['sol']['row_names'].index(c) for c in info['ch_names']]
+    residual = evoked.copy()
+    residual = pick_channels_evoked(residual, include=info['ch_names'])
+    r_tmp = residual.copy()
+
+    r_tmp.data = np.dot(forward['sol']['data'][sel, :][:, active_set], X)
+
+    # Take care of proj
+    active_projs = list()
+    non_active_projs = list()
+    for p in evoked.info['projs']:
+        if p['active']:
+            active_projs.append(p)
+        else:
+            non_active_projs.append(p)
+
+    if len(active_projs) > 0:
+        r_tmp.info['projs'] = deactivate_proj(active_projs, copy=True)
+        r_tmp.apply_proj()
+        r_tmp.add_proj(non_active_projs, remove_existing=False)
+
+    residual.data -= r_tmp.data
+
+    return residual
+
+
+ at verbose
+def _make_sparse_stc(X, active_set, forward, tmin, tstep,
+                     active_is_idx=False, verbose=None):
+    if not is_fixed_orient(forward):
+        logger.info('combining the current components...')
+        X = combine_xyz(X)
+
+    if not active_is_idx:
+        active_idx = np.where(active_set)[0]
+    else:
+        active_idx = active_set
+
+    n_dip_per_pos = 1 if is_fixed_orient(forward) else 3
+    if n_dip_per_pos > 1:
+        active_idx = np.unique(active_idx // n_dip_per_pos)
+
+    src = forward['src']
+
+    n_lh_points = len(src[0]['vertno'])
+    lh_vertno = src[0]['vertno'][active_idx[active_idx < n_lh_points]]
+    rh_vertno = src[1]['vertno'][active_idx[active_idx >= n_lh_points] -
+                                 n_lh_points]
+    vertices = [lh_vertno, rh_vertno]
+    stc = SourceEstimate(X, vertices=vertices, tmin=tmin, tstep=tstep)
+    return stc
+
+
+ at verbose
+def mixed_norm(evoked, forward, noise_cov, alpha, loose=0.2, depth=0.8,
+               maxit=3000, tol=1e-4, active_set_size=10, pca=True,
+               debias=True, time_pca=True, weights=None, weights_min=None,
+               solver='auto', n_mxne_iter=1, return_residual=False,
+               verbose=None):
+    """Mixed-norm estimate (MxNE) and iterative reweighted MxNE (irMxNE)
+
+    Compute L1/L2 mixed-norm solution or L0.5/L2 mixed-norm solution
+    on evoked data.
+
+    References:
+    Gramfort A., Kowalski M. and Hamalainen, M.,
+    Mixed-norm estimates for the M/EEG inverse problem using accelerated
+    gradient methods, Physics in Medicine and Biology, 2012
+    http://dx.doi.org/10.1088/0031-9155/57/7/1937
+
+    Strohmeier D., Haueisen J., and Gramfort A.,
+    Improved MEG/EEG source localization with reweighted mixed-norms,
+    4th International Workshop on Pattern Recognition in Neuroimaging,
+    Tuebingen, 2014
+
+    Parameters
+    ----------
+    evoked : instance of Evoked or list of instances of Evoked
+        Evoked data to invert.
+    forward : dict
+        Forward operator.
+    noise_cov : instance of Covariance
+        Noise covariance to compute whitener.
+    alpha : float
+        Regularization parameter.
+    loose : float in [0, 1]
+        Value that weights the source variances of the dipole components
+        that are parallel (tangential) to the cortical surface. If loose
+        is 0 or None then the solution is computed with fixed orientation.
+        If loose is 1, it corresponds to free orientations.
+    depth: None | float in [0, 1]
+        Depth weighting coefficients. If None, no depth weighting is performed.
+    maxit : int
+        Maximum number of iterations.
+    tol : float
+        Tolerance parameter.
+    active_set_size : int | None
+        Size of active set increment. If None, no active set strategy is used.
+    pca : bool
+        If True the rank of the data is reduced to true dimension.
+    debias : bool
+        Remove coefficient amplitude bias due to L1 penalty.
+    time_pca : bool or int
+        If True the rank of the concatenated epochs is reduced to
+        its true dimension. If is 'int' the rank is limited to this value.
+    weights : None | array | SourceEstimate
+        Weight for penalty in mixed_norm. Can be None or
+        1d array of length n_sources or a SourceEstimate e.g. obtained
+        with wMNE or dSPM or fMRI.
+    weights_min : float
+        Do not consider in the estimation sources for which weights
+        is less than weights_min.
+    solver : 'prox' | 'cd' | 'bcd' | 'auto'
+        The algorithm to use for the optimization. 'prox' stands for
+        proximal interations using the FISTA algorithm, 'cd' uses
+        coordinate descent, and 'bcd' applies block coordinate descent.
+        'cd' is only available for fixed orientation.
+    n_mxne_iter : int
+        The number of MxNE iterations. If > 1, iterative reweighting
+        is applied.
+    return_residual : bool
+        If True, the residual is returned as an Evoked instance.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    stc : SourceEstimate | list of SourceEstimate
+        Source time courses for each evoked data passed as input.
+    residual : instance of Evoked
+        The residual a.k.a. data not explained by the sources.
+        Only returned if return_residual is True.
+
+    See Also
+    --------
+    tf_mixed_norm
+    """
+    if n_mxne_iter < 1:
+        raise ValueError('MxNE has to be computed at least 1 time. '
+                         'Requires n_mxne_iter > 0. '
+                         'Got n_mxne_iter = %d.' % n_mxne_iter)
+
+    if not isinstance(evoked, list):
+        evoked = [evoked]
+
+    _check_reference(evoked[0])
+
+    all_ch_names = evoked[0].ch_names
+    if not all(all_ch_names == evoked[i].ch_names
+               for i in range(1, len(evoked))):
+        raise Exception('All the datasets must have the same good channels.')
+
+    # put the forward solution in fixed orientation if it's not already
+    if loose is None and not is_fixed_orient(forward):
+        forward = deepcopy(forward)
+        _to_fixed_ori(forward)
+
+    gain, gain_info, whitener, source_weighting, mask = _prepare_gain(
+        forward, evoked[0].info, noise_cov, pca, depth, loose, weights,
+        weights_min)
+
+    sel = [all_ch_names.index(name) for name in gain_info['ch_names']]
+    M = np.concatenate([e.data[sel] for e in evoked], axis=1)
+
+    # Whiten data
+    logger.info('Whitening data matrix.')
+    M = np.dot(whitener, M)
+
+    if time_pca:
+        U, s, Vh = linalg.svd(M, full_matrices=False)
+        if not isinstance(time_pca, bool) and isinstance(time_pca, int):
+            U = U[:, :time_pca]
+            s = s[:time_pca]
+            Vh = Vh[:time_pca]
+        M = U * s
+
+    # Scaling to make setting of alpha easy
+    n_dip_per_pos = 1 if is_fixed_orient(forward) else 3
+    alpha_max = norm_l2inf(np.dot(gain.T, M), n_dip_per_pos, copy=False)
+    alpha_max *= 0.01
+    gain /= alpha_max
+    source_weighting /= alpha_max
+
+    if n_mxne_iter == 1:
+        X, active_set, E = mixed_norm_solver(
+            M, gain, alpha, maxit=maxit, tol=tol,
+            active_set_size=active_set_size, n_orient=n_dip_per_pos,
+            debias=debias, solver=solver, verbose=verbose)
+    else:
+        X, active_set, E = iterative_mixed_norm_solver(
+            M, gain, alpha, n_mxne_iter, maxit=maxit, tol=tol,
+            n_orient=n_dip_per_pos, active_set_size=active_set_size,
+            debias=debias, solver=solver, verbose=verbose)
+
+    if mask is not None:
+        active_set_tmp = np.zeros(len(mask), dtype=np.bool)
+        active_set_tmp[mask] = active_set
+        active_set = active_set_tmp
+        del active_set_tmp
+
+    if time_pca:
+        X = np.dot(X, Vh)
+
+    if active_set.sum() == 0:
+        raise Exception("No active dipoles found. alpha is too big.")
+
+    # Reapply weights to have correct unit
+    X = _reapply_source_weighting(X, source_weighting,
+                                  active_set, n_dip_per_pos)
+
+    stcs = list()
+    residual = list()
+    cnt = 0
+    for e in evoked:
+        tmin = e.times[0]
+        tstep = 1.0 / e.info['sfreq']
+        Xe = X[:, cnt:(cnt + len(e.times))]
+        stc = _make_sparse_stc(Xe, active_set, forward, tmin, tstep)
+        stcs.append(stc)
+        cnt += len(e.times)
+
+        if return_residual:
+            residual.append(_compute_residual(forward, e, Xe, active_set,
+                            gain_info))
+
+    logger.info('[done]')
+
+    if len(stcs) == 1:
+        out = stcs[0]
+        if return_residual:
+            residual = residual[0]
+    else:
+        out = stcs
+
+    if return_residual:
+        out = out, residual
+
+    return out
+
+
+def _window_evoked(evoked, size):
+    """Window evoked (size in seconds)"""
+    if isinstance(size, (float, int)):
+        lsize = rsize = float(size)
+    else:
+        lsize, rsize = size
+    evoked = evoked.copy()
+    sfreq = float(evoked.info['sfreq'])
+    lsize = int(lsize * sfreq)
+    rsize = int(rsize * sfreq)
+    lhann = signal.hann(lsize * 2)
+    rhann = signal.hann(rsize * 2)
+    window = np.r_[lhann[:lsize],
+                   np.ones(len(evoked.times) - lsize - rsize),
+                   rhann[-rsize:]]
+    evoked.data *= window[None, :]
+    return evoked
+
+
+ at verbose
+def tf_mixed_norm(evoked, forward, noise_cov, alpha_space, alpha_time,
+                  loose=0.2, depth=0.8, maxit=3000, tol=1e-4,
+                  weights=None, weights_min=None, pca=True, debias=True,
+                  wsize=64, tstep=4, window=0.02, return_residual=False,
+                  verbose=None):
+    """Time-Frequency Mixed-norm estimate (TF-MxNE)
+
+    Compute L1/L2 + L1 mixed-norm solution on time frequency
+    dictionary. Works with evoked data.
+
+    References:
+
+    A. Gramfort, D. Strohmeier, J. Haueisen, M. Hamalainen, M. Kowalski
+    Time-Frequency Mixed-Norm Estimates: Sparse M/EEG imaging with
+    non-stationary source activations
+    Neuroimage, Volume 70, 15 April 2013, Pages 410-422, ISSN 1053-8119,
+    DOI: 10.1016/j.neuroimage.2012.12.051.
+
+    A. Gramfort, D. Strohmeier, J. Haueisen, M. Hamalainen, M. Kowalski
+    Functional Brain Imaging with M/EEG Using Structured Sparsity in
+    Time-Frequency Dictionaries
+    Proceedings Information Processing in Medical Imaging
+    Lecture Notes in Computer Science, 2011, Volume 6801/2011,
+    600-611, DOI: 10.1007/978-3-642-22092-0_49
+    http://dx.doi.org/10.1007/978-3-642-22092-0_49
+
+    Parameters
+    ----------
+    evoked : instance of Evoked
+        Evoked data to invert.
+    forward : dict
+        Forward operator.
+    noise_cov : instance of Covariance
+        Noise covariance to compute whitener.
+    alpha_space : float in [0, 100]
+        Regularization parameter for spatial sparsity. If larger than 100,
+        then no source will be active.
+    alpha_time : float in [0, 100]
+        Regularization parameter for temporal sparsity. It set to 0,
+        no temporal regularization is applied. It this case, TF-MxNE is
+        equivalent to MxNE with L21 norm.
+    loose : float in [0, 1]
+        Value that weights the source variances of the dipole components
+        that are parallel (tangential) to the cortical surface. If loose
+        is 0 or None then the solution is computed with fixed orientation.
+        If loose is 1, it corresponds to free orientations.
+    depth: None | float in [0, 1]
+        Depth weighting coefficients. If None, no depth weighting is performed.
+    maxit : int
+        Maximum number of iterations.
+    tol : float
+        Tolerance parameter.
+    weights: None | array | SourceEstimate
+        Weight for penalty in mixed_norm. Can be None or
+        1d array of length n_sources or a SourceEstimate e.g. obtained
+        with wMNE or dSPM or fMRI.
+    weights_min: float
+        Do not consider in the estimation sources for which weights
+        is less than weights_min.
+    pca: bool
+        If True the rank of the data is reduced to true dimension.
+    debias: bool
+        Remove coefficient amplitude bias due to L1 penalty.
+    wsize: int
+        Length of the STFT window in samples (must be a multiple of 4).
+    tstep: int
+        Step between successive windows in samples (must be a multiple of 2,
+        a divider of wsize and smaller than wsize/2) (default: wsize/2).
+    window : float or (float, float)
+        Length of time window used to take care of edge artifacts in seconds.
+        It can be one float or float if the values are different for left
+        and right window length.
+    return_residual : bool
+        If True, the residual is returned as an Evoked instance.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    stc : instance of SourceEstimate
+        Source time courses.
+    residual : instance of Evoked
+        The residual a.k.a. data not explained by the sources.
+        Only returned if return_residual is True.
+
+    See Also
+    --------
+    mixed_norm
+    """
+    _check_reference(evoked)
+
+    all_ch_names = evoked.ch_names
+    info = evoked.info
+
+    if (alpha_space < 0.) or (alpha_space > 100.):
+        raise Exception('alpha_space must be in range [0, 100].'
+                        ' Got alpha_space = %f' % alpha_space)
+
+    if (alpha_time < 0.) or (alpha_time > 100.):
+        raise Exception('alpha_time must be in range [0, 100].'
+                        ' Got alpha_time = %f' % alpha_time)
+
+    # put the forward solution in fixed orientation if it's not already
+    if loose is None and not is_fixed_orient(forward):
+        forward = deepcopy(forward)
+        _to_fixed_ori(forward)
+
+    n_dip_per_pos = 1 if is_fixed_orient(forward) else 3
+
+    gain, gain_info, whitener, source_weighting, mask = _prepare_gain(
+        forward, evoked.info, noise_cov, pca, depth, loose, weights,
+        weights_min)
+
+    if window is not None:
+        evoked = _window_evoked(evoked, window)
+
+    sel = [all_ch_names.index(name) for name in gain_info["ch_names"]]
+    M = evoked.data[sel]
+
+    # Whiten data
+    logger.info('Whitening data matrix.')
+    M = np.dot(whitener, M)
+
+    # Scaling to make setting of alpha easy
+    alpha_max = norm_l2inf(np.dot(gain.T, M), n_dip_per_pos, copy=False)
+    alpha_max *= 0.01
+    gain /= alpha_max
+    source_weighting /= alpha_max
+
+    X, active_set, E = tf_mixed_norm_solver(
+        M, gain, alpha_space, alpha_time, wsize=wsize, tstep=tstep,
+        maxit=maxit, tol=tol, verbose=verbose, n_orient=n_dip_per_pos,
+        log_objective=False, debias=debias)
+
+    if active_set.sum() == 0:
+        raise Exception("No active dipoles found. "
+                        "alpha_space/alpha_time are too big.")
+
+    if mask is not None:
+        active_set_tmp = np.zeros(len(mask), dtype=np.bool)
+        active_set_tmp[mask] = active_set
+        active_set = active_set_tmp
+        del active_set_tmp
+
+    X = _reapply_source_weighting(
+        X, source_weighting, active_set, n_dip_per_pos)
+
+    if return_residual:
+        residual = _compute_residual(
+            forward, evoked, X, active_set, gain_info)
+
+    stc = _make_sparse_stc(
+        X, active_set, forward, evoked.times[0], 1.0 / info['sfreq'])
+
+    logger.info('[done]')
+
+    if return_residual:
+        out = stc, residual
+    else:
+        out = stc
+
+    return out
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/inverse_sparse/mxne_optim.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/inverse_sparse/mxne_optim.py
new file mode 100644
index 0000000..c3f929e
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/inverse_sparse/mxne_optim.py
@@ -0,0 +1,1046 @@
+from __future__ import print_function
+# Author: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#         Daniel Strohmeier <daniel.strohmeier at gmail.com>
+#
+# License: Simplified BSD
+
+from copy import deepcopy
+import warnings
+from math import sqrt, ceil
+import numpy as np
+from scipy import linalg
+
+from .mxne_debiasing import compute_bias
+from ..utils import logger, verbose, sum_squared
+from ..time_frequency.stft import stft_norm2, stft, istft
+from ..externals.six.moves import xrange as range
+
+
+def groups_norm2(A, n_orient):
+    """compute squared L2 norms of groups inplace"""
+    n_positions = A.shape[0] // n_orient
+    return np.sum(np.power(A, 2, A).reshape(n_positions, -1), axis=1)
+
+
+def norm_l2inf(A, n_orient, copy=True):
+    """L2-inf norm"""
+    if A.size == 0:
+        return 0.0
+    if copy:
+        A = A.copy()
+    return sqrt(np.max(groups_norm2(A, n_orient)))
+
+
+def norm_l21(A, n_orient, copy=True):
+    """L21 norm"""
+    if A.size == 0:
+        return 0.0
+    if copy:
+        A = A.copy()
+    return np.sum(np.sqrt(groups_norm2(A, n_orient)))
+
+
+def prox_l21(Y, alpha, n_orient, shape=None, is_stft=False):
+    """proximity operator for l21 norm
+
+    L2 over columns and L1 over rows => groups contain n_orient rows.
+
+    It can eventually take into account the negative frequencies
+    when a complex value is passed and is_stft=True.
+
+    Example
+    -------
+    >>> Y = np.tile(np.array([0, 4, 3, 0, 0], dtype=np.float), (2, 1))
+    >>> Y = np.r_[Y, np.zeros_like(Y)]
+    >>> print(Y)
+    [[ 0.  4.  3.  0.  0.]
+     [ 0.  4.  3.  0.  0.]
+     [ 0.  0.  0.  0.  0.]
+     [ 0.  0.  0.  0.  0.]]
+    >>> Yp, active_set = prox_l21(Y, 2, 2)
+    >>> print(Yp)
+    [[ 0.          2.86862915  2.15147186  0.          0.        ]
+     [ 0.          2.86862915  2.15147186  0.          0.        ]]
+    >>> print(active_set)
+    [ True  True False False]
+    """
+    if len(Y) == 0:
+        return np.zeros_like(Y), np.zeros((0,), dtype=np.bool)
+    if shape is not None:
+        shape_init = Y.shape
+        Y = Y.reshape(*shape)
+    n_positions = Y.shape[0] // n_orient
+
+    if is_stft:
+        rows_norm = np.sqrt(stft_norm2(Y).reshape(n_positions, -1).sum(axis=1))
+    else:
+        rows_norm = np.sqrt((Y * Y.conj()).real.reshape(n_positions,
+                                                        -1).sum(axis=1))
+    # Ensure shrink is >= 0 while avoiding any division by zero
+    shrink = np.maximum(1.0 - alpha / np.maximum(rows_norm, alpha), 0.0)
+    active_set = shrink > 0.0
+    if n_orient > 1:
+        active_set = np.tile(active_set[:, None], [1, n_orient]).ravel()
+        shrink = np.tile(shrink[:, None], [1, n_orient]).ravel()
+    Y = Y[active_set]
+    if shape is None:
+        Y *= shrink[active_set][:, np.newaxis]
+    else:
+        Y *= shrink[active_set][:, np.newaxis, np.newaxis]
+        Y = Y.reshape(-1, *shape_init[1:])
+    return Y, active_set
+
+
+def prox_l1(Y, alpha, n_orient):
+    """proximity operator for l1 norm with multiple orientation support
+
+    L2 over orientation and L1 over position (space + time)
+
+    Example
+    -------
+    >>> Y = np.tile(np.array([1, 2, 3, 2, 0], dtype=np.float), (2, 1))
+    >>> Y = np.r_[Y, np.zeros_like(Y)]
+    >>> print(Y)
+    [[ 1.  2.  3.  2.  0.]
+     [ 1.  2.  3.  2.  0.]
+     [ 0.  0.  0.  0.  0.]
+     [ 0.  0.  0.  0.  0.]]
+    >>> Yp, active_set = prox_l1(Y, 2, 2)
+    >>> print(Yp)
+    [[ 0.          0.58578644  1.58578644  0.58578644  0.        ]
+     [ 0.          0.58578644  1.58578644  0.58578644  0.        ]]
+    >>> print(active_set)
+    [ True  True False False]
+    """
+    n_positions = Y.shape[0] // n_orient
+    norms = np.sqrt((Y * Y.conj()).real.T.reshape(-1, n_orient).sum(axis=1))
+    # Ensure shrink is >= 0 while avoiding any division by zero
+    shrink = np.maximum(1.0 - alpha / np.maximum(norms, alpha), 0.0)
+    shrink = shrink.reshape(-1, n_positions).T
+    active_set = np.any(shrink > 0.0, axis=1)
+    shrink = shrink[active_set]
+    if n_orient > 1:
+        active_set = np.tile(active_set[:, None], [1, n_orient]).ravel()
+    Y = Y[active_set]
+    if len(Y) > 0:
+        for o in range(n_orient):
+            Y[o::n_orient] *= shrink
+    return Y, active_set
+
+
+def dgap_l21(M, G, X, active_set, alpha, n_orient):
+    """Duality gaps for the mixed norm inverse problem
+
+    For details see:
+    Gramfort A., Kowalski M. and Hamalainen, M,
+    Mixed-norm estimates for the M/EEG inverse problem using accelerated
+    gradient methods, Physics in Medicine and Biology, 2012
+    http://dx.doi.org/10.1088/0031-9155/57/7/1937
+
+    Parameters
+    ----------
+    M : array, shape (n_sensors, n_times)
+        The data.
+    G : array, shape (n_sensors, n_active)
+        The gain matrix a.k.a. lead field.
+    X : array, shape (n_active, n_times)
+        Sources
+    active_set : array of bool
+        Mask of active sources
+    alpha : float
+        Regularization parameter
+    n_orient : int
+        Number of dipoles per locations (typically 1 or 3)
+
+    Returns
+    -------
+    gap : float
+        Dual gap
+    pobj : float
+        Primal cost
+    dobj : float
+        Dual cost. gap = pobj - dobj
+    R : array, shape (n_sensors, n_times)
+        Current residual of M - G * X
+    """
+    GX = np.dot(G[:, active_set], X)
+    R = M - GX
+    penalty = norm_l21(X, n_orient, copy=True)
+    nR2 = sum_squared(R)
+    pobj = 0.5 * nR2 + alpha * penalty
+    dual_norm = norm_l2inf(np.dot(G.T, R), n_orient, copy=False)
+    scaling = alpha / dual_norm
+    scaling = min(scaling, 1.0)
+    dobj = 0.5 * (scaling ** 2) * nR2 + scaling * np.sum(R * GX)
+    gap = pobj - dobj
+    return gap, pobj, dobj, R
+
+
+ at verbose
+def _mixed_norm_solver_prox(M, G, alpha, lipschitz_constant, maxit=200,
+                            tol=1e-8, verbose=None, init=None, n_orient=1):
+    """Solves L21 inverse problem with proximal iterations and FISTA"""
+    n_sensors, n_times = M.shape
+    n_sensors, n_sources = G.shape
+
+    if n_sources < n_sensors:
+        gram = np.dot(G.T, G)
+        GTM = np.dot(G.T, M)
+    else:
+        gram = None
+
+    if init is None:
+        X = 0.0
+        R = M.copy()
+        if gram is not None:
+            R = np.dot(G.T, R)
+    else:
+        X = init
+        if gram is None:
+            R = M - np.dot(G, X)
+        else:
+            R = GTM - np.dot(gram, X)
+
+    t = 1.0
+    Y = np.zeros((n_sources, n_times))  # FISTA aux variable
+    E = []  # track cost function
+
+    active_set = np.ones(n_sources, dtype=np.bool)  # start with full AS
+
+    for i in range(maxit):
+        X0, active_set_0 = X, active_set  # store previous values
+        if gram is None:
+            Y += np.dot(G.T, R) / lipschitz_constant  # ISTA step
+        else:
+            Y += R / lipschitz_constant  # ISTA step
+        X, active_set = prox_l21(Y, alpha / lipschitz_constant, n_orient)
+
+        t0 = t
+        t = 0.5 * (1.0 + sqrt(1.0 + 4.0 * t ** 2))
+        Y.fill(0.0)
+        dt = ((t0 - 1.0) / t)
+        Y[active_set] = (1.0 + dt) * X
+        Y[active_set_0] -= dt * X0
+        Y_as = active_set_0 | active_set
+
+        if gram is None:
+            R = M - np.dot(G[:, Y_as], Y[Y_as])
+        else:
+            R = GTM - np.dot(gram[:, Y_as], Y[Y_as])
+
+        gap, pobj, dobj, _ = dgap_l21(M, G, X, active_set, alpha, n_orient)
+        E.append(pobj)
+        logger.debug("pobj : %s -- gap : %s" % (pobj, gap))
+        if gap < tol:
+            logger.debug('Convergence reached ! (gap: %s < %s)' % (gap, tol))
+            break
+    return X, active_set, E
+
+
+ at verbose
+def _mixed_norm_solver_cd(M, G, alpha, lipschitz_constant, maxit=10000,
+                          tol=1e-8, verbose=None, init=None, n_orient=1):
+    """Solves L21 inverse problem with coordinate descent"""
+    from sklearn.linear_model.coordinate_descent import MultiTaskLasso
+
+    n_sensors, n_times = M.shape
+    n_sensors, n_sources = G.shape
+
+    if init is not None:
+        init = init.T
+
+    clf = MultiTaskLasso(alpha=alpha / len(M), tol=tol, normalize=False,
+                         fit_intercept=False, max_iter=maxit,
+                         warm_start=True)
+    clf.coef_ = init
+    clf.fit(G, M)
+
+    X = clf.coef_.T
+    active_set = np.any(X, axis=1)
+    X = X[active_set]
+    gap, pobj, dobj, _ = dgap_l21(M, G, X, active_set, alpha, n_orient)
+    return X, active_set, pobj
+
+
+ at verbose
+def _mixed_norm_solver_bcd(M, G, alpha, lipschitz_constant, maxit=200,
+                           tol=1e-8, verbose=None, init=None, n_orient=1):
+    """Solves L21 inverse problem with block coordinate descent"""
+    # First make G fortran for faster access to blocks of columns
+    G = np.asfortranarray(G)
+
+    n_sensors, n_times = M.shape
+    n_sensors, n_sources = G.shape
+    n_positions = n_sources // n_orient
+
+    if init is None:
+        X = np.zeros((n_sources, n_times))
+        R = M.copy()
+    else:
+        X = init
+        R = M - np.dot(G, X)
+
+    E = []  # track cost function
+
+    active_set = np.zeros(n_sources, dtype=np.bool)  # start with full AS
+
+    alpha_lc = alpha / lipschitz_constant
+
+    for i in range(maxit):
+        for j in range(n_positions):
+            idx = slice(j * n_orient, (j + 1) * n_orient)
+
+            G_j = G[:, idx]
+            X_j = X[idx]
+
+            X_j_new = np.dot(G_j.T, R) / lipschitz_constant[j]
+
+            was_non_zero = np.any(X_j)
+            if was_non_zero:
+                R += np.dot(G_j, X_j)
+                X_j_new += X_j
+
+            block_norm = linalg.norm(X_j_new, 'fro')
+            if block_norm <= alpha_lc[j]:
+                X_j.fill(0.)
+                active_set[idx] = False
+            else:
+                shrink = np.maximum(1.0 - alpha_lc[j] / block_norm, 0.0)
+                X_j_new *= shrink
+                R -= np.dot(G_j, X_j_new)
+                X_j[:] = X_j_new
+                active_set[idx] = True
+
+        gap, pobj, dobj, _ = dgap_l21(M, G, X[active_set], active_set, alpha,
+                                      n_orient)
+        E.append(pobj)
+        logger.debug("Iteration %d :: pobj %f :: dgap %f :: n_active %d" % (
+                     i + 1, pobj, gap, np.sum(active_set) / n_orient))
+
+        if gap < tol:
+            logger.debug('Convergence reached ! (gap: %s < %s)' % (gap, tol))
+            break
+
+    X = X[active_set]
+
+    return X, active_set, E
+
+
+ at verbose
+def mixed_norm_solver(M, G, alpha, maxit=3000, tol=1e-8, verbose=None,
+                      active_set_size=50, debias=True, n_orient=1,
+                      solver='auto'):
+    """Solves L1/L2 mixed-norm inverse problem with active set strategy
+
+    Algorithm is detailed in:
+    Gramfort A., Kowalski M. and Hamalainen, M,
+    Mixed-norm estimates for the M/EEG inverse problem using accelerated
+    gradient methods, Physics in Medicine and Biology, 2012
+    http://dx.doi.org/10.1088/0031-9155/57/7/1937
+
+    Parameters
+    ----------
+    M : array, shape (n_sensors, n_times)
+        The data.
+    G : array, shape (n_sensors, n_dipoles)
+        The gain matrix a.k.a. lead field.
+    alpha : float
+        The regularization parameter. It should be between 0 and 100.
+        A value of 100 will lead to an empty active set (no active source).
+    maxit : int
+        The number of iterations.
+    tol : float
+        Tolerance on dual gap for convergence checking.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+    active_set_size : int
+        Size of active set increase at each iteration.
+    debias : bool
+        Debias source estimates.
+    n_orient : int
+        The number of orientation (1 : fixed or 3 : free or loose).
+    solver : 'prox' | 'cd' | 'bcd' | 'auto'
+        The algorithm to use for the optimization.
+
+    Returns
+    -------
+    X : array, shape (n_active, n_times)
+        The source estimates.
+    active_set : array
+        The mask of active sources.
+    E : list
+        The value of the objective function over the iterations.
+    """
+    n_dipoles = G.shape[1]
+    n_positions = n_dipoles // n_orient
+    n_sensors, n_times = M.shape
+    alpha_max = norm_l2inf(np.dot(G.T, M), n_orient, copy=False)
+    logger.info("-- ALPHA MAX : %s" % alpha_max)
+    alpha = float(alpha)
+
+    has_sklearn = True
+    try:
+        from sklearn.linear_model.coordinate_descent import MultiTaskLasso  # noqa
+    except ImportError:
+        has_sklearn = False
+
+    if solver == 'auto':
+        if has_sklearn and (n_orient == 1):
+            solver = 'cd'
+        else:
+            solver = 'bcd'
+
+    if solver == 'cd':
+        if n_orient == 1 and not has_sklearn:
+            warnings.warn("Scikit-learn >= 0.12 cannot be found. "
+                          "Using block coordinate descent instead of "
+                          "coordinate descent.")
+            solver = 'bcd'
+        if n_orient > 1:
+            warnings.warn("Coordinate descent is only available for fixed "
+                          "orientation. Using block coordinate descent "
+                          "instead of coordinate descent")
+            solver = 'bcd'
+
+    if solver == 'cd':
+        logger.info("Using coordinate descent")
+        l21_solver = _mixed_norm_solver_cd
+        lc = None
+    elif solver == 'bcd':
+        logger.info("Using block coordinate descent")
+        l21_solver = _mixed_norm_solver_bcd
+        G = np.asfortranarray(G)
+        if n_orient == 1:
+            lc = np.sum(G * G, axis=0)
+        else:
+            lc = np.empty(n_positions)
+            for j in range(n_positions):
+                G_tmp = G[:, (j * n_orient):((j + 1) * n_orient)]
+                lc[j] = linalg.norm(np.dot(G_tmp.T, G_tmp), ord=2)
+    else:
+        logger.info("Using proximal iterations")
+        l21_solver = _mixed_norm_solver_prox
+        lc = 1.01 * linalg.norm(G, ord=2) ** 2
+
+    if active_set_size is not None:
+        E = list()
+        X_init = None
+        active_set = np.zeros(n_dipoles, dtype=np.bool)
+        idx_large_corr = np.argsort(groups_norm2(np.dot(G.T, M), n_orient))
+        new_active_idx = idx_large_corr[-active_set_size:]
+        if n_orient > 1:
+            new_active_idx = (n_orient * new_active_idx[:, None] +
+                              np.arange(n_orient)[None, :]).ravel()
+        active_set[new_active_idx] = True
+        as_size = np.sum(active_set)
+        for k in range(maxit):
+            if solver == 'bcd':
+                lc_tmp = lc[active_set[::n_orient]]
+            elif solver == 'cd':
+                lc_tmp = None
+            else:
+                lc_tmp = 1.01 * linalg.norm(G[:, active_set], ord=2) ** 2
+            X, as_, _ = l21_solver(M, G[:, active_set], alpha, lc_tmp,
+                                   maxit=maxit, tol=tol, init=X_init,
+                                   n_orient=n_orient)
+            active_set[active_set] = as_.copy()
+            idx_old_active_set = np.where(active_set)[0]
+
+            gap, pobj, dobj, R = dgap_l21(M, G, X, active_set, alpha,
+                                          n_orient)
+            E.append(pobj)
+            logger.info("Iteration %d :: pobj %f :: dgap %f ::"
+                        "n_active_start %d :: n_active_end %d" % (
+                            k + 1, pobj, gap, as_size // n_orient,
+                            np.sum(active_set) // n_orient))
+            if gap < tol:
+                logger.info('Convergence reached ! (gap: %s < %s)'
+                            % (gap, tol))
+                break
+
+            # add sources if not last iteration
+            if k < (maxit - 1):
+                idx_large_corr = np.argsort(groups_norm2(np.dot(G.T, R),
+                                            n_orient))
+                new_active_idx = idx_large_corr[-active_set_size:]
+                if n_orient > 1:
+                    new_active_idx = (n_orient * new_active_idx[:, None] +
+                                      np.arange(n_orient)[None, :])
+                    new_active_idx = new_active_idx.ravel()
+                active_set[new_active_idx] = True
+                idx_active_set = np.where(active_set)[0]
+                as_size = np.sum(active_set)
+                X_init = np.zeros((as_size, n_times), dtype=X.dtype)
+                idx = np.searchsorted(idx_active_set, idx_old_active_set)
+                X_init[idx] = X
+        else:
+            logger.warning('Did NOT converge ! (gap: %s > %s)' % (gap, tol))
+    else:
+        X, active_set, E = l21_solver(M, G, alpha, lc, maxit=maxit,
+                                      tol=tol, n_orient=n_orient, init=None)
+
+    if np.any(active_set) and debias:
+        bias = compute_bias(M, G[:, active_set], X, n_orient=n_orient)
+        X *= bias[:, np.newaxis]
+
+    logger.info('Final active set size: %s' % (np.sum(active_set) // n_orient))
+
+    return X, active_set, E
+
+
+ at verbose
+def iterative_mixed_norm_solver(M, G, alpha, n_mxne_iter, maxit=3000,
+                                tol=1e-8, verbose=None, active_set_size=50,
+                                debias=True, n_orient=1, solver='auto'):
+    """Solves L0.5/L2 mixed-norm inverse problem with active set strategy
+
+    Algorithm is detailed in:
+
+    Strohmeier D., Haueisen J., and Gramfort A.:
+    Improved MEG/EEG source localization with reweighted mixed-norms,
+    4th International Workshop on Pattern Recognition in Neuroimaging,
+    Tuebingen, 2014
+
+    Parameters
+    ----------
+    M : array, shape (n_sensors, n_times)
+        The data.
+    G : array, shape (n_sensors, n_dipoles)
+        The gain matrix a.k.a. lead field.
+    alpha : float
+        The regularization parameter. It should be between 0 and 100.
+        A value of 100 will lead to an empty active set (no active source).
+    n_mxne_iter : int
+        The number of MxNE iterations. If > 1, iterative reweighting
+        is applied.
+    maxit : int
+        The number of iterations.
+    tol : float
+        Tolerance on dual gap for convergence checking.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+    active_set_size : int
+        Size of active set increase at each iteration.
+    debias : bool
+        Debias source estimates.
+    n_orient : int
+        The number of orientation (1 : fixed or 3 : free or loose).
+    solver : 'prox' | 'cd' | 'bcd' | 'auto'
+        The algorithm to use for the optimization.
+
+    Returns
+    -------
+    X : array, shape (n_active, n_times)
+        The source estimates.
+    active_set : array
+        The mask of active sources.
+    E : list
+        The value of the objective function over the iterations.
+    """
+    def g(w):
+        return np.sqrt(np.sqrt(groups_norm2(w.copy(), n_orient)))
+
+    def gprime(w):
+        return 2. * np.repeat(g(w), n_orient).ravel()
+
+    E = list()
+
+    active_set = np.ones(G.shape[1], dtype=np.bool)
+    weights = np.ones(G.shape[1])
+    X = np.zeros((G.shape[1], M.shape[1]))
+
+    for k in range(n_mxne_iter):
+        X0 = X.copy()
+        active_set_0 = active_set.copy()
+        G_tmp = G[:, active_set] * weights[np.newaxis, :]
+
+        if active_set_size is not None:
+            if np.sum(active_set) > (active_set_size * n_orient):
+                X, _active_set, _ = mixed_norm_solver(
+                    M, G_tmp, alpha, debias=False, n_orient=n_orient,
+                    maxit=maxit, tol=tol, active_set_size=active_set_size,
+                    solver=solver, verbose=verbose)
+            else:
+                X, _active_set, _ = mixed_norm_solver(
+                    M, G_tmp, alpha, debias=False, n_orient=n_orient,
+                    maxit=maxit, tol=tol, active_set_size=None, solver=solver,
+                    verbose=verbose)
+        else:
+            X, _active_set, _ = mixed_norm_solver(
+                M, G_tmp, alpha, debias=False, n_orient=n_orient,
+                maxit=maxit, tol=tol, active_set_size=None, solver=solver,
+                verbose=verbose)
+
+        logger.info('active set size %d' % (_active_set.sum() / n_orient))
+
+        if _active_set.sum() > 0:
+            active_set[active_set] = _active_set
+
+            # Reapply weights to have correct unit
+            X *= weights[_active_set][:, np.newaxis]
+            weights = gprime(X)
+            p_obj = 0.5 * linalg.norm(M - np.dot(G[:, active_set],  X),
+                                      'fro') ** 2. + alpha * np.sum(g(X))
+            E.append(p_obj)
+
+            # Check convergence
+            if ((k >= 1) and np.all(active_set == active_set_0) and
+                    np.all(np.abs(X - X0) < tol)):
+                print('Convergence reached after %d reweightings!' % k)
+                break
+        else:
+            active_set = np.zeros_like(active_set)
+            p_obj = 0.5 * linalg.norm(M) ** 2.
+            E.append(p_obj)
+            break
+
+    if np.any(active_set) and debias:
+        bias = compute_bias(M, G[:, active_set], X, n_orient=n_orient)
+        X *= bias[:, np.newaxis]
+
+    return X, active_set, E
+
+
+###############################################################################
+# TF-MxNE
+
+ at verbose
+def tf_lipschitz_constant(M, G, phi, phiT, tol=1e-3, verbose=None):
+    """Compute lipschitz constant for FISTA
+
+    It uses a power iteration method.
+    """
+    n_times = M.shape[1]
+    n_points = G.shape[1]
+    iv = np.ones((n_points, n_times), dtype=np.float)
+    v = phi(iv)
+    L = 1e100
+    for it in range(100):
+        L_old = L
+        logger.info('Lipschitz estimation: iteration = %d' % it)
+        iv = np.real(phiT(v))
+        Gv = np.dot(G, iv)
+        GtGv = np.dot(G.T, Gv)
+        w = phi(GtGv)
+        L = np.max(np.abs(w))  # l_inf norm
+        v = w / L
+        if abs((L - L_old) / L_old) < tol:
+            break
+    return L
+
+
+def safe_max_abs(A, ia):
+    """Compute np.max(np.abs(A[ia])) possible with empty A"""
+    if np.sum(ia):  # ia is not empty
+        return np.max(np.abs(A[ia]))
+    else:
+        return 0.
+
+
+def safe_max_abs_diff(A, ia, B, ib):
+    """Compute np.max(np.abs(A)) possible with empty A"""
+    A = A[ia] if np.sum(ia) else 0.0
+    B = B[ib] if np.sum(ia) else 0.0
+    return np.max(np.abs(A - B))
+
+
+class _Phi(object):
+    """Util class to have phi stft as callable without using
+    a lambda that does not pickle"""
+    def __init__(self, wsize, tstep, n_coefs):
+        self.wsize = wsize
+        self.tstep = tstep
+        self.n_coefs = n_coefs
+
+    def __call__(self, x):
+        return stft(x, self.wsize, self.tstep,
+                    verbose=False).reshape(-1, self.n_coefs)
+
+
+class _PhiT(object):
+    """Util class to have phi.T istft as callable without using
+    a lambda that does not pickle"""
+    def __init__(self, tstep, n_freq, n_step, n_times):
+        self.tstep = tstep
+        self.n_freq = n_freq
+        self.n_step = n_step
+        self.n_times = n_times
+
+    def __call__(self, z):
+        return istft(z.reshape(-1, self.n_freq, self.n_step), self.tstep,
+                     self.n_times)
+
+
+def norm_l21_tf(Z, shape, n_orient):
+    if Z.shape[0]:
+        Z2 = Z.reshape(*shape)
+        l21_norm = np.sqrt(stft_norm2(Z2).reshape(-1, n_orient).sum(axis=1))
+        l21_norm = l21_norm.sum()
+    else:
+        l21_norm = 0.
+    return l21_norm
+
+
+def norm_l1_tf(Z, shape, n_orient):
+    if Z.shape[0]:
+        n_positions = Z.shape[0] // n_orient
+        Z_ = np.sqrt(np.sum((np.abs(Z) ** 2.).reshape((n_orient, -1),
+                     order='F'), axis=0))
+        Z_ = Z_.reshape((n_positions, -1), order='F').reshape(*shape)
+        l1_norm = (2. * Z_.sum(axis=2).sum(axis=1) - np.sum(Z_[:, 0, :],
+                   axis=1) - np.sum(Z_[:, -1, :], axis=1))
+        l1_norm = l1_norm.sum()
+    else:
+        l1_norm = 0.
+    return l1_norm
+
+
+ at verbose
+def _tf_mixed_norm_solver_bcd_(M, G, Z, active_set, alpha_space, alpha_time,
+                               lipschitz_constant, phi, phiT,
+                               wsize=64, tstep=4, n_orient=1,
+                               maxit=200, tol=1e-8, log_objective=True,
+                               perc=None, verbose=None):
+    # First make G fortran for faster access to blocks of columns
+    G = np.asfortranarray(G)
+
+    n_sensors, n_times = M.shape
+    n_sources = G.shape[1]
+    n_positions = n_sources // n_orient
+
+    n_step = int(ceil(n_times / float(tstep)))
+    n_freq = wsize // 2 + 1
+    shape = (-1, n_freq, n_step)
+
+    G = dict(zip(np.arange(n_positions), np.hsplit(G, n_positions)))
+    R = M.copy()  # residual
+    active = np.where(active_set)[0][::n_orient] // n_orient
+    for idx in active:
+        R -= np.dot(G[idx], phiT(Z[idx]))
+
+    E = []  # track cost function
+
+    alpha_time_lc = alpha_time / lipschitz_constant
+    alpha_space_lc = alpha_space / lipschitz_constant
+
+    converged = False
+
+    for i in range(maxit):
+        val_norm_l21_tf = 0.0
+        val_norm_l1_tf = 0.0
+        max_diff = 0.0
+        active_set_0 = active_set.copy()
+        for j in range(n_positions):
+            ids = j * n_orient
+            ide = ids + n_orient
+
+            G_j = G[j]
+            Z_j = Z[j]
+            active_set_j = active_set[ids:ide]
+
+            Z0 = deepcopy(Z_j)
+
+            was_active = np.any(active_set_j)
+
+            # gradient step
+            GTR = np.dot(G_j.T, R) / lipschitz_constant[j]
+            X_j_new = GTR.copy()
+
+            if was_active:
+                X_j = phiT(Z_j)
+                R += np.dot(G_j, X_j)
+                X_j_new += X_j
+
+            rows_norm = linalg.norm(X_j_new, 'fro')
+            if rows_norm <= alpha_space_lc[j]:
+                if was_active:
+                    Z[j] = 0.0
+                    active_set_j[:] = False
+            else:
+                if was_active:
+                    Z_j_new = Z_j + phi(GTR)
+                else:
+                    Z_j_new = phi(GTR)
+
+                col_norm = np.sqrt(np.sum(np.abs(Z_j_new) ** 2, axis=0))
+
+                if np.all(col_norm <= alpha_time_lc[j]):
+                    Z[j] = 0.0
+                    active_set_j[:] = False
+                else:
+                    # l1
+                    shrink = np.maximum(1.0 - alpha_time_lc[j] / np.maximum(
+                                        col_norm, alpha_time_lc[j]), 0.0)
+                    Z_j_new *= shrink[np.newaxis, :]
+
+                    # l21
+                    shape_init = Z_j_new.shape
+                    Z_j_new = Z_j_new.reshape(*shape)
+                    row_norm = np.sqrt(stft_norm2(Z_j_new).sum())
+                    if row_norm <= alpha_space_lc[j]:
+                        Z[j] = 0.0
+                        active_set_j[:] = False
+                    else:
+                        shrink = np.maximum(1.0 - alpha_space_lc[j] /
+                                            np.maximum(row_norm,
+                                            alpha_space_lc[j]), 0.0)
+                        Z_j_new *= shrink
+                        Z[j] = Z_j_new.reshape(-1, *shape_init[1:]).copy()
+                        active_set_j[:] = True
+                        R -= np.dot(G_j, phiT(Z[j]))
+
+                        if log_objective:
+                            val_norm_l21_tf += norm_l21_tf(
+                                Z[j], shape, n_orient)
+                            val_norm_l1_tf += norm_l1_tf(
+                                Z[j], shape, n_orient)
+
+            max_diff = np.maximum(max_diff, np.max(np.abs(Z[j] - Z0)))
+
+        if log_objective:  # log cost function value
+            pobj = (0.5 * (R ** 2.).sum() + alpha_space * val_norm_l21_tf +
+                    alpha_time * val_norm_l1_tf)
+            E.append(pobj)
+            logger.info("Iteration %d :: pobj %f :: n_active %d" % (i + 1,
+                        pobj, np.sum(active_set) / n_orient))
+        else:
+            logger.info("Iteration %d" % (i + 1))
+
+        if perc is not None:
+            if np.sum(active_set) / float(n_orient) <= perc * n_positions:
+                break
+
+        if np.array_equal(active_set, active_set_0):
+            if max_diff < tol:
+                logger.info("Convergence reached !")
+                converged = True
+                break
+
+    return Z, active_set, E, converged
+
+
+ at verbose
+def _tf_mixed_norm_solver_bcd_active_set(
+        M, G, alpha_space, alpha_time, lipschitz_constant, phi, phiT,
+        Z_init=None, wsize=64, tstep=4, n_orient=1, maxit=200, tol=1e-8,
+        log_objective=True, perc=None, verbose=None):
+    """Solves TF L21+L1 inverse solver with BCD and active set approach
+
+    Algorithm is detailed in:
+
+    Strohmeier D., Gramfort A., and Haueisen J.:
+    MEG/EEG source imaging with a non-convex penalty in the time-
+    frequency domain,
+    5th International Workshop on Pattern Recognition in Neuroimaging,
+    Stanford University, 2015
+
+    Parameters
+    ----------
+    M : array
+        The data.
+    G : array
+        The forward operator.
+    alpha_space : float in [0, 100]
+        Regularization parameter for spatial sparsity. If larger than 100,
+        then no source will be active.
+    alpha_time : float in [0, 100]
+        Regularization parameter for temporal sparsity. It set to 0,
+        no temporal regularization is applied. It this case, TF-MxNE is
+        equivalent to MxNE with L21 norm.
+    lipschitz_constant : float
+        The lipschitz constant of the spatio temporal linear operator.
+    phi : instance of _Phi
+        The TF operator.
+    phiT : instance of _PhiT
+        The transpose of the TF operator.
+    Z_init : None | array
+        The initialization of the TF coefficient matrix. If None, zeros
+        will be used for all coefficients.
+    wsize: int
+        length of the STFT window in samples (must be a multiple of 4).
+    tstep: int
+        step between successive windows in samples (must be a multiple of 2,
+        a divider of wsize and smaller than wsize/2) (default: wsize/2).
+    n_orient : int
+        The number of orientation (1 : fixed or 3 : free or loose).
+    maxit : int
+        The number of iterations.
+    tol : float
+        If absolute difference between estimates at 2 successive iterations
+        is lower than tol, the convergence is reached.
+    log_objective : bool
+        If True, the value of the minimized objective function is computed
+        and stored at every iteration.
+    perc : None | float in [0, 1]
+        The early stopping parameter used for BCD with active set approach.
+        If the active set size is smaller than perc * n_sources, the
+        subproblem limited to the active set is stopped. If None, full
+        convergence will be achieved.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    X : array
+        The source estimates.
+    active_set : array
+        The mask of active sources.
+    E : list
+        The value of the objective function at each iteration. If log_objective
+        is False, it will be empty.
+    """
+    n_sources = G.shape[1]
+    n_positions = n_sources // n_orient
+
+    if Z_init is None:
+        Z = dict.fromkeys(range(n_positions), 0.0)
+        active_set = np.zeros(n_sources, dtype=np.bool)
+    else:
+        active_set = np.zeros(n_sources, dtype=np.bool)
+        active = list()
+        for i in range(n_positions):
+            if np.any(Z_init[i * n_orient:(i + 1) * n_orient]):
+                active_set[i * n_orient:(i + 1) * n_orient] = True
+                active.append(i)
+        Z = dict.fromkeys(range(n_positions), 0.0)
+        if len(active):
+            Z.update(dict(zip(active, np.vsplit(Z_init[active_set],
+                     len(active)))))
+
+    Z, active_set, E, _ = _tf_mixed_norm_solver_bcd_(
+        M, G, Z, active_set, alpha_space, alpha_time, lipschitz_constant,
+        phi, phiT, wsize=wsize, tstep=tstep, n_orient=n_orient, maxit=1,
+        tol=tol, log_objective=log_objective, perc=None, verbose=verbose)
+
+    while active_set.sum():
+        active = np.where(active_set)[0][::n_orient] // n_orient
+        Z_init = dict(zip(range(len(active)), [Z[idx] for idx in active]))
+        Z, as_, E_tmp, converged = _tf_mixed_norm_solver_bcd_(
+            M, G[:, active_set], Z_init,
+            np.ones(len(active) * n_orient, dtype=np.bool),
+            alpha_space, alpha_time,
+            lipschitz_constant[active_set[::n_orient]],
+            phi, phiT, wsize=wsize, tstep=tstep, n_orient=n_orient,
+            maxit=maxit, tol=tol, log_objective=log_objective,
+            perc=0.5, verbose=verbose)
+        E += E_tmp
+        active = np.where(active_set)[0][::n_orient] // n_orient
+        Z_init = dict.fromkeys(range(n_positions), 0.0)
+        Z_init.update(dict(zip(active, Z.values())))
+        active_set[active_set] = as_
+        active_set_0 = active_set.copy()
+        Z, active_set, E_tmp, _ = _tf_mixed_norm_solver_bcd_(
+            M, G, Z_init, active_set, alpha_space, alpha_time,
+            lipschitz_constant, phi, phiT, wsize=wsize, tstep=tstep,
+            n_orient=n_orient, maxit=1, tol=tol, log_objective=log_objective,
+            perc=None, verbose=verbose)
+        E += E_tmp
+        if converged:
+            if np.array_equal(active_set_0, active_set):
+                break
+
+    if active_set.sum():
+        Z = np.vstack([Z_ for Z_ in list(Z.values()) if np.any(Z_)])
+        X = phiT(Z)
+    else:
+        n_sensors, n_times = M.shape
+        n_step = int(ceil(n_times / float(tstep)))
+        n_freq = wsize // 2 + 1
+        Z = np.zeros((0, n_step * n_freq), dtype=np.complex)
+        X = np.zeros((0, n_times))
+
+    return X, Z, active_set, E
+
+
+ at verbose
+def tf_mixed_norm_solver(M, G, alpha_space, alpha_time, wsize=64, tstep=4,
+                         n_orient=1, maxit=200, tol=1e-8, log_objective=True,
+                         debias=True, verbose=None):
+    """Solves TF L21+L1 inverse solver with BCD and active set approach
+
+    Algorithm is detailed in:
+
+    A. Gramfort, D. Strohmeier, J. Haueisen, M. Hamalainen, M. Kowalski
+    Time-Frequency Mixed-Norm Estimates: Sparse M/EEG imaging with
+    non-stationary source activations
+    Neuroimage, Volume 70, 15 April 2013, Pages 410-422, ISSN 1053-8119,
+    DOI: 10.1016/j.neuroimage.2012.12.051.
+
+    Functional Brain Imaging with M/EEG Using Structured Sparsity in
+    Time-Frequency Dictionaries
+    Gramfort A., Strohmeier D., Haueisen J., Hamalainen M. and Kowalski M.
+    INFORMATION PROCESSING IN MEDICAL IMAGING
+    Lecture Notes in Computer Science, 2011, Volume 6801/2011,
+    600-611, DOI: 10.1007/978-3-642-22092-0_49
+    http://dx.doi.org/10.1007/978-3-642-22092-0_49
+
+    Parameters
+    ----------
+    M : array, shape (n_sensors, n_times)
+        The data.
+    G : array, shape (n_sensors, n_dipoles)
+        The gain matrix a.k.a. lead field.
+    alpha_space : float
+        The spatial regularization parameter. It should be between 0 and 100.
+    alpha_time : float
+        The temporal regularization parameter. The higher it is the smoother
+        will be the estimated time series.
+    wsize: int
+        length of the STFT window in samples (must be a multiple of 4).
+    tstep: int
+        step between successive windows in samples (must be a multiple of 2,
+        a divider of wsize and smaller than wsize/2) (default: wsize/2).
+    n_orient : int
+        The number of orientation (1 : fixed or 3 : free or loose).
+    maxit : int
+        The number of iterations.
+    tol : float
+        If absolute difference between estimates at 2 successive iterations
+        is lower than tol, the convergence is reached.
+    log_objective : bool
+        If True, the value of the minimized objective function is computed
+        and stored at every iteration.
+    debias : bool
+        Debias source estimates.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    X : array, shape (n_active, n_times)
+        The source estimates.
+    active_set : array
+        The mask of active sources.
+    E : list
+        The value of the objective function at each iteration. If log_objective
+        is False, it will be empty.
+    """
+    n_sensors, n_times = M.shape
+    n_sensors, n_sources = G.shape
+    n_positions = n_sources // n_orient
+
+    n_step = int(ceil(n_times / float(tstep)))
+    n_freq = wsize // 2 + 1
+    n_coefs = n_step * n_freq
+    phi = _Phi(wsize, tstep, n_coefs)
+    phiT = _PhiT(tstep, n_freq, n_step, n_times)
+
+    if n_orient == 1:
+        lc = np.sum(G * G, axis=0)
+    else:
+        lc = np.empty(n_positions)
+        for j in range(n_positions):
+            G_tmp = G[:, (j * n_orient):((j + 1) * n_orient)]
+            lc[j] = linalg.norm(np.dot(G_tmp.T, G_tmp), ord=2)
+
+    logger.info("Using block coordinate descent and active set approach")
+    X, Z, active_set, E = _tf_mixed_norm_solver_bcd_active_set(
+        M, G, alpha_space, alpha_time, lc, phi, phiT, Z_init=None,
+        wsize=wsize, tstep=tstep, n_orient=n_orient, maxit=maxit, tol=tol,
+        log_objective=log_objective, verbose=None)
+
+    if np.any(active_set) and debias:
+        bias = compute_bias(M, G[:, active_set], X, n_orient=n_orient)
+        X *= bias[:, np.newaxis]
+
+    return X, active_set, E
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/inverse_sparse/tests/__init__.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/inverse_sparse/tests/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/inverse_sparse/tests/test_gamma_map.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/inverse_sparse/tests/test_gamma_map.py
new file mode 100644
index 0000000..2a36d87
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/inverse_sparse/tests/test_gamma_map.py
@@ -0,0 +1,64 @@
+# Author: Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#
+# License: Simplified BSD
+
+import os.path as op
+import numpy as np
+from nose.tools import assert_true
+from numpy.testing import assert_array_almost_equal
+
+from mne.datasets import testing
+from mne import read_cov, read_forward_solution, read_evokeds
+from mne.cov import regularize
+from mne.inverse_sparse import gamma_map
+from mne import pick_types_forward
+from mne.utils import run_tests_if_main, slow_test
+
+data_path = testing.data_path(download=False)
+fname_evoked = op.join(data_path, 'MEG', 'sample',
+                       'sample_audvis-ave.fif')
+fname_cov = op.join(data_path, 'MEG', 'sample', 'sample_audvis-cov.fif')
+fname_fwd = op.join(data_path, 'MEG', 'sample',
+                    'sample_audvis_trunc-meg-eeg-oct-6-fwd.fif')
+subjects_dir = op.join(data_path, 'subjects')
+
+
+ at slow_test
+ at testing.requires_testing_data
+def test_gamma_map():
+    """Test Gamma MAP inverse"""
+
+    forward = read_forward_solution(fname_fwd, force_fixed=False,
+                                    surf_ori=True)
+    forward = pick_types_forward(forward, meg=False, eeg=True)
+    evoked = read_evokeds(fname_evoked, condition=0, baseline=(None, 0))
+    evoked.resample(50)
+    evoked.crop(tmin=0, tmax=0.3)
+
+    cov = read_cov(fname_cov)
+    cov = regularize(cov, evoked.info)
+
+    alpha = 0.2
+    stc = gamma_map(evoked, forward, cov, alpha, tol=1e-5,
+                    xyz_same_gamma=True, update_mode=1, verbose=False)
+    assert_array_almost_equal(stc.times, evoked.times, 5)
+    idx = np.argmax(np.sum(stc.data ** 2, axis=1))
+    assert_true(np.concatenate(stc.vertices)[idx] == 96397)
+
+    stc = gamma_map(evoked, forward, cov, alpha, tol=1e-5,
+                    xyz_same_gamma=False, update_mode=1, verbose=False)
+    assert_array_almost_equal(stc.times, evoked.times, 5)
+    idx = np.argmax(np.sum(stc.data ** 2, axis=1))
+    assert_true(np.concatenate(stc.vertices)[idx] == 82010)
+
+    # force fixed orientation
+    stc, res = gamma_map(evoked, forward, cov, alpha, tol=1e-5,
+                         xyz_same_gamma=False, update_mode=2,
+                         loose=None, return_residual=True, verbose=False)
+    assert_array_almost_equal(stc.times, evoked.times, 5)
+    idx = np.argmax(np.sum(stc.data ** 2, axis=1))
+    # assert_true(np.concatenate(stc.vertices)[idx] == 83398)  # XXX FIX
+    assert_array_almost_equal(evoked.times, res.times)
+
+
+run_tests_if_main()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/inverse_sparse/tests/test_mxne_debiasing.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/inverse_sparse/tests/test_mxne_debiasing.py
new file mode 100644
index 0000000..fb11586
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/inverse_sparse/tests/test_mxne_debiasing.py
@@ -0,0 +1,22 @@
+# Authors: Daniel Strohmeier <daniel.strohmeier at tu-ilmenau.de>
+#          Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#
+# License: BSD (3-clause)
+
+import numpy as np
+from numpy.testing import assert_almost_equal
+
+from mne.inverse_sparse.mxne_debiasing import compute_bias
+
+
+def test_compute_debiasing():
+    """Test source amplitude debiasing"""
+    rng = np.random.RandomState(42)
+    G = rng.randn(10, 4)
+    X = rng.randn(4, 20)
+    debias_true = np.arange(1, 5, dtype=np.float)
+    M = np.dot(G, X * debias_true[:, np.newaxis])
+    debias = compute_bias(M, G, X, max_iter=10000, n_orient=1, tol=1e-7)
+    assert_almost_equal(debias, debias_true, decimal=5)
+    debias = compute_bias(M, G, X, max_iter=10000, n_orient=2, tol=1e-5)
+    assert_almost_equal(debias, [1.8, 1.8, 3.72, 3.72], decimal=2)
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/inverse_sparse/tests/test_mxne_inverse.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/inverse_sparse/tests/test_mxne_inverse.py
new file mode 100644
index 0000000..9b0c134
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/inverse_sparse/tests/test_mxne_inverse.py
@@ -0,0 +1,114 @@
+# Author: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#         Daniel Strohmeier <daniel.strohmeier at tu-ilmenau.de>
+#
+# License: Simplified BSD
+
+import os.path as op
+import numpy as np
+from numpy.testing import assert_array_almost_equal, assert_allclose
+from nose.tools import assert_true, assert_equal
+
+from mne.datasets import testing
+from mne.label import read_label
+from mne import read_cov, read_forward_solution, read_evokeds
+from mne.inverse_sparse import mixed_norm, tf_mixed_norm
+from mne.minimum_norm import apply_inverse, make_inverse_operator
+from mne.utils import run_tests_if_main, slow_test
+
+
+data_path = testing.data_path(download=False)
+# NOTE: These use the ave and cov from sample dataset (no _trunc)
+fname_data = op.join(data_path, 'MEG', 'sample', 'sample_audvis-ave.fif')
+fname_cov = op.join(data_path, 'MEG', 'sample', 'sample_audvis-cov.fif')
+fname_fwd = op.join(data_path, 'MEG', 'sample',
+                    'sample_audvis_trunc-meg-eeg-oct-6-fwd.fif')
+label = 'Aud-rh'
+fname_label = op.join(data_path, 'MEG', 'sample', 'labels', '%s.label' % label)
+
+
+ at slow_test
+ at testing.requires_testing_data
+def test_mxne_inverse():
+    """Test (TF-)MxNE inverse computation"""
+    # Read noise covariance matrix
+    cov = read_cov(fname_cov)
+
+    # Handling average file
+    loose = None
+    depth = 0.9
+
+    evoked = read_evokeds(fname_data, condition=0, baseline=(None, 0))
+    evoked.crop(tmin=-0.05, tmax=0.2)
+
+    evoked_l21 = evoked.copy()
+    evoked_l21.crop(tmin=0.08, tmax=0.1)
+    label = read_label(fname_label)
+
+    forward = read_forward_solution(fname_fwd, force_fixed=False,
+                                    surf_ori=True)
+
+    # Reduce source space to make test computation faster
+    inverse_operator = make_inverse_operator(evoked_l21.info, forward, cov,
+                                             loose=loose, depth=depth,
+                                             fixed=True)
+    stc_dspm = apply_inverse(evoked_l21, inverse_operator, lambda2=1. / 9.,
+                             method='dSPM')
+    stc_dspm.data[np.abs(stc_dspm.data) < 12] = 0.0
+    stc_dspm.data[np.abs(stc_dspm.data) >= 12] = 1.
+    weights_min = 0.5
+
+    # MxNE tests
+    alpha = 70  # spatial regularization parameter
+
+    stc_prox = mixed_norm(evoked_l21, forward, cov, alpha, loose=loose,
+                          depth=depth, maxit=500, tol=1e-8,
+                          active_set_size=10, weights=stc_dspm,
+                          weights_min=weights_min, solver='prox')
+    stc_cd = mixed_norm(evoked_l21, forward, cov, alpha, loose=loose,
+                        depth=depth, maxit=500, tol=1e-8, active_set_size=10,
+                        weights=stc_dspm, weights_min=weights_min,
+                        solver='cd')
+    stc_bcd = mixed_norm(evoked_l21, forward, cov, alpha, loose=loose,
+                         depth=depth, maxit=500, tol=1e-8, active_set_size=10,
+                         weights=stc_dspm, weights_min=weights_min,
+                         solver='bcd')
+    assert_array_almost_equal(stc_prox.times, evoked_l21.times, 5)
+    assert_array_almost_equal(stc_cd.times, evoked_l21.times, 5)
+
+    assert_array_almost_equal(stc_bcd.times, evoked_l21.times, 5)
+    assert_allclose(stc_prox.data, stc_cd.data, rtol=1e-3, atol=0.0)
+    assert_allclose(stc_prox.data, stc_bcd.data, rtol=1e-3, atol=0.0)
+    assert_allclose(stc_cd.data, stc_bcd.data, rtol=1e-3, atol=0.0)
+    assert_true(stc_prox.vertices[1][0] in label.vertices)
+    assert_true(stc_cd.vertices[1][0] in label.vertices)
+    assert_true(stc_bcd.vertices[1][0] in label.vertices)
+
+    stc, _ = mixed_norm(evoked_l21, forward, cov, alpha, loose=loose,
+                        depth=depth, maxit=500, tol=1e-8,
+                        active_set_size=10, return_residual=True,
+                        solver='cd')
+    assert_array_almost_equal(stc.times, evoked_l21.times, 5)
+    assert_true(stc.vertices[1][0] in label.vertices)
+
+    # irMxNE tests
+    stc = mixed_norm(evoked_l21, forward, cov, alpha,
+                     n_mxne_iter=5, loose=loose, depth=depth,
+                     maxit=500, tol=1e-8, active_set_size=10,
+                     solver='cd')
+    assert_array_almost_equal(stc.times, evoked_l21.times, 5)
+    assert_true(stc.vertices[1][0] in label.vertices)
+    assert_equal(stc.vertices, [[63152], [79017]])
+
+    # Do with TF-MxNE for test memory savings
+    alpha_space = 60.  # spatial regularization parameter
+    alpha_time = 1.  # temporal regularization parameter
+
+    stc, _ = tf_mixed_norm(evoked, forward, cov, alpha_space, alpha_time,
+                           loose=loose, depth=depth, maxit=100, tol=1e-4,
+                           tstep=4, wsize=16, window=0.1, weights=stc_dspm,
+                           weights_min=weights_min, return_residual=True)
+    assert_array_almost_equal(stc.times, evoked.times, 5)
+    assert_true(stc.vertices[1][0] in label.vertices)
+
+
+run_tests_if_main()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/inverse_sparse/tests/test_mxne_optim.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/inverse_sparse/tests/test_mxne_optim.py
new file mode 100644
index 0000000..ba49be7
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/inverse_sparse/tests/test_mxne_optim.py
@@ -0,0 +1,196 @@
+# Author: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#         Daniel Strohmeier <daniel.strohmeier at gmail.com>
+#
+# License: Simplified BSD
+
+import numpy as np
+import warnings
+from numpy.testing import assert_array_equal, assert_array_almost_equal
+from numpy.testing import assert_allclose
+
+from mne.inverse_sparse.mxne_optim import (mixed_norm_solver,
+                                           tf_mixed_norm_solver,
+                                           iterative_mixed_norm_solver)
+
+warnings.simplefilter('always')  # enable b/c these tests throw warnings
+
+
+def _generate_tf_data():
+    n, p, t = 30, 40, 64
+    rng = np.random.RandomState(0)
+    G = rng.randn(n, p)
+    G /= np.std(G, axis=0)[None, :]
+    X = np.zeros((p, t))
+    active_set = [0, 4]
+    times = np.linspace(0, 2 * np.pi, t)
+    X[0] = np.sin(times)
+    X[4] = -2 * np.sin(4 * times)
+    X[4, times <= np.pi / 2] = 0
+    X[4, times >= np.pi] = 0
+    M = np.dot(G, X)
+    M += 1 * rng.randn(*M.shape)
+    return M, G, active_set
+
+
+def test_l21_mxne():
+    """Test convergence of MxNE solver"""
+    n, p, t, alpha = 30, 40, 20, 1.
+    rng = np.random.RandomState(0)
+    G = rng.randn(n, p)
+    G /= np.std(G, axis=0)[None, :]
+    X = np.zeros((p, t))
+    X[0] = 3
+    X[4] = -2
+    M = np.dot(G, X)
+
+    args = (M, G, alpha, 1000, 1e-8)
+    X_hat_prox, active_set, _ = mixed_norm_solver(
+        *args, active_set_size=None,
+        debias=True, solver='prox')
+    assert_array_equal(np.where(active_set)[0], [0, 4])
+    X_hat_cd, active_set, _ = mixed_norm_solver(
+        *args, active_set_size=None,
+        debias=True, solver='cd')
+    assert_array_equal(np.where(active_set)[0], [0, 4])
+    X_hat_bcd, active_set, _ = mixed_norm_solver(
+        M, G, alpha, maxit=1000, tol=1e-8, active_set_size=None,
+        debias=True, solver='bcd')
+    assert_array_equal(np.where(active_set)[0], [0, 4])
+    assert_allclose(X_hat_prox, X_hat_cd, rtol=1e-2)
+    assert_allclose(X_hat_prox, X_hat_bcd, rtol=1e-2)
+    assert_allclose(X_hat_bcd, X_hat_cd, rtol=1e-2)
+
+    X_hat_prox, active_set, _ = mixed_norm_solver(
+        *args, active_set_size=2, debias=True, solver='prox')
+    assert_array_equal(np.where(active_set)[0], [0, 4])
+    X_hat_cd, active_set, _ = mixed_norm_solver(
+        *args, active_set_size=2, debias=True, solver='cd')
+    assert_array_equal(np.where(active_set)[0], [0, 4])
+    X_hat_bcd, active_set, _ = mixed_norm_solver(
+        *args, active_set_size=2, debias=True, solver='bcd')
+    assert_array_equal(np.where(active_set)[0], [0, 4])
+    assert_allclose(X_hat_bcd, X_hat_cd, rtol=1e-2)
+    assert_allclose(X_hat_bcd, X_hat_prox, rtol=1e-2)
+
+    X_hat_prox, active_set, _ = mixed_norm_solver(
+        *args, active_set_size=2, debias=True, n_orient=2, solver='prox')
+    assert_array_equal(np.where(active_set)[0], [0, 1, 4, 5])
+    X_hat_bcd, active_set, _ = mixed_norm_solver(
+        *args, active_set_size=2, debias=True, n_orient=2, solver='bcd')
+    assert_array_equal(np.where(active_set)[0], [0, 1, 4, 5])
+
+    # suppress a coordinate-descent warning here
+    with warnings.catch_warnings(record=True):
+        X_hat_cd, active_set, _ = mixed_norm_solver(
+            *args, active_set_size=2, debias=True, n_orient=2, solver='cd')
+    assert_array_equal(np.where(active_set)[0], [0, 1, 4, 5])
+    assert_allclose(X_hat_bcd, X_hat_prox, rtol=1e-2)
+    assert_allclose(X_hat_bcd, X_hat_cd, rtol=1e-2)
+
+    X_hat_bcd, active_set, _ = mixed_norm_solver(
+        *args, active_set_size=2, debias=True, n_orient=5, solver='bcd')
+    assert_array_equal(np.where(active_set)[0], [0, 1, 2, 3, 4])
+    X_hat_prox, active_set, _ = mixed_norm_solver(
+        *args, active_set_size=2, debias=True, n_orient=5, solver='prox')
+    assert_array_equal(np.where(active_set)[0], [0, 1, 2, 3, 4])
+    with warnings.catch_warnings(record=True):  # coordinate-ascent warning
+        X_hat_cd, active_set, _ = mixed_norm_solver(
+            *args, active_set_size=2, debias=True, n_orient=5, solver='cd')
+
+    assert_array_equal(np.where(active_set)[0], [0, 1, 2, 3, 4])
+    assert_array_equal(X_hat_bcd, X_hat_cd)
+    assert_allclose(X_hat_bcd, X_hat_prox, rtol=1e-2)
+
+
+def test_tf_mxne():
+    """Test convergence of TF-MxNE solver"""
+    alpha_space = 10.
+    alpha_time = 5.
+
+    M, G, active_set = _generate_tf_data()
+
+    X_hat_tf, active_set_hat_tf, E = tf_mixed_norm_solver(
+        M, G, alpha_space, alpha_time, maxit=200, tol=1e-8, verbose=True,
+        n_orient=1, tstep=4, wsize=32)
+    assert_array_equal(np.where(active_set_hat_tf)[0], active_set)
+
+
+def test_tf_mxne_vs_mxne():
+    """Test equivalence of TF-MxNE (with alpha_time=0) and MxNE"""
+    alpha_space = 60.
+    alpha_time = 0.
+
+    M, G, active_set = _generate_tf_data()
+
+    X_hat_tf, active_set_hat_tf, E = tf_mixed_norm_solver(
+        M, G, alpha_space, alpha_time, maxit=200, tol=1e-8, verbose=True,
+        debias=False, n_orient=1, tstep=4, wsize=32)
+
+    # Also run L21 and check that we get the same
+    X_hat_l21, _, _ = mixed_norm_solver(
+        M, G, alpha_space, maxit=200, tol=1e-8, verbose=False, n_orient=1,
+        active_set_size=None, debias=False)
+
+    assert_allclose(X_hat_tf, X_hat_l21, rtol=1e-1)
+
+
+def test_iterative_reweighted_mxne():
+    """Test convergence of irMxNE solver"""
+    n, p, t, alpha = 30, 40, 20, 1
+    rng = np.random.RandomState(0)
+    G = rng.randn(n, p)
+    G /= np.std(G, axis=0)[None, :]
+    X = np.zeros((p, t))
+    X[0] = 3
+    X[4] = -2
+    M = np.dot(G, X)
+
+    X_hat_l21, _, _ = mixed_norm_solver(
+        M, G, alpha, maxit=1000, tol=1e-8, verbose=False, n_orient=1,
+        active_set_size=None, debias=False, solver='bcd')
+    X_hat_bcd, active_set, _ = iterative_mixed_norm_solver(
+        M, G, alpha, 1, maxit=1000, tol=1e-8, active_set_size=None,
+        debias=False, solver='bcd')
+    X_hat_prox, active_set, _ = iterative_mixed_norm_solver(
+        M, G, alpha, 1, maxit=1000, tol=1e-8, active_set_size=None,
+        debias=False, solver='prox')
+    assert_allclose(X_hat_bcd, X_hat_l21, rtol=1e-3)
+    assert_allclose(X_hat_prox, X_hat_l21, rtol=1e-3)
+
+    X_hat_prox, active_set, _ = iterative_mixed_norm_solver(
+        M, G, alpha, 5, maxit=1000, tol=1e-8, active_set_size=None,
+        debias=True, solver='prox')
+    assert_array_equal(np.where(active_set)[0], [0, 4])
+    X_hat_bcd, active_set, _ = iterative_mixed_norm_solver(
+        M, G, alpha, 5, maxit=1000, tol=1e-8, active_set_size=2,
+        debias=True, solver='bcd')
+    assert_array_equal(np.where(active_set)[0], [0, 4])
+    X_hat_cd, active_set, _ = iterative_mixed_norm_solver(
+        M, G, alpha, 5, maxit=1000, tol=1e-8, active_set_size=None,
+        debias=True, solver='cd')
+    assert_array_equal(np.where(active_set)[0], [0, 4])
+    assert_array_almost_equal(X_hat_prox, X_hat_cd, 5)
+    assert_array_almost_equal(X_hat_bcd, X_hat_cd, 5)
+
+    X_hat_bcd, active_set, _ = iterative_mixed_norm_solver(
+        M, G, alpha, 5, maxit=1000, tol=1e-8, active_set_size=2,
+        debias=True, n_orient=2, solver='bcd')
+    assert_array_equal(np.where(active_set)[0], [0, 1, 4, 5])
+    # suppress a coordinate-descent warning here
+    with warnings.catch_warnings(record=True):
+        X_hat_cd, active_set, _ = iterative_mixed_norm_solver(
+            M, G, alpha, 5, maxit=1000, tol=1e-8, active_set_size=2,
+            debias=True, n_orient=2, solver='cd')
+    assert_array_equal(np.where(active_set)[0], [0, 1, 4, 5])
+    assert_array_equal(X_hat_bcd, X_hat_cd, 5)
+
+    X_hat_bcd, active_set, _ = iterative_mixed_norm_solver(
+        M, G, alpha, 5, maxit=1000, tol=1e-8, active_set_size=2, debias=True,
+        n_orient=5)
+    assert_array_equal(np.where(active_set)[0], [0, 1, 2, 3, 4])
+    with warnings.catch_warnings(record=True):  # coordinate-ascent warning
+        X_hat_cd, active_set, _ = iterative_mixed_norm_solver(
+            M, G, alpha, 5, maxit=1000, tol=1e-8, active_set_size=2,
+            debias=True, n_orient=5, solver='cd')
+    assert_array_equal(np.where(active_set)[0], [0, 1, 2, 3, 4])
+    assert_array_equal(X_hat_bcd, X_hat_cd, 5)
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/__init__.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/__init__.py
new file mode 100644
index 0000000..38b60f3
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/__init__.py
@@ -0,0 +1,85 @@
+"""FIF module for IO with .fif files"""
+
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+from .open import fiff_open, show_fiff, _fiff_get_fid
+from .meas_info import (read_fiducials, write_fiducials, read_info, write_info,
+                        _empty_info)
+
+from .proj import make_eeg_average_ref_proj
+from .tag import _loc_to_coil_trans, _coil_trans_to_loc, _loc_to_eeg_loc
+from .base import _BaseRaw
+
+from . import array
+from . import base
+from . import brainvision
+from . import bti
+from . import constants
+from . import edf
+from . import egi
+from . import fiff
+from . import kit
+from . import pick
+
+from .array import RawArray
+from .brainvision import read_raw_brainvision
+from .bti import read_raw_bti
+from .edf import read_raw_edf
+from .egi import read_raw_egi
+from .kit import read_raw_kit, read_epochs_kit
+from .fiff import read_raw_fif
+
+# for backward compatibility
+from .fiff import RawFIF
+from .fiff import RawFIF as Raw
+from .base import concatenate_raws
+from .reference import (set_eeg_reference, set_bipolar_reference,
+                        add_reference_channels)
+from ..utils import deprecated
+
+
+ at deprecated('mne.io.get_chpi_positions is deprecated and will be removed in '
+            'v0.11, please use mne.get_chpi_positions')
+def get_chpi_positions(raw, t_step=None, verbose=None):
+    """Extract head positions
+
+    Note that the raw instance must have CHPI channels recorded.
+
+    Parameters
+    ----------
+    raw : instance of Raw | str
+        Raw instance to extract the head positions from. Can also be a
+        path to a Maxfilter log file (str).
+    t_step : float | None
+        Sampling interval to use when converting data. If None, it will
+        be automatically determined. By default, a sampling interval of
+        1 second is used if processing a raw data. If processing a
+        Maxfilter log file, this must be None because the log file
+        itself will determine the sampling interval.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    translation : ndarray, shape (N, 3)
+        Translations at each time point.
+    rotation : ndarray, shape (N, 3, 3)
+        Rotations at each time point.
+    t : ndarray, shape (N,)
+        The time points.
+
+    Notes
+    -----
+    The digitized HPI head frame y is related to the frame position X as:
+
+        Y = np.dot(rotation, X) + translation
+
+    Note that if a Maxfilter log file is being processed, the start time
+    may not use the same reference point as the rest of mne-python (i.e.,
+    it could be referenced relative to raw.first_samp or something else).
+    """
+    from ..chpi import get_chpi_positions
+    return get_chpi_positions(raw, t_step, verbose)
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/array/__init__.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/array/__init__.py
new file mode 100644
index 0000000..112d5d8
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/array/__init__.py
@@ -0,0 +1,5 @@
+"""Module to convert user data to FIF"""
+
+# Author: Eric Larson <larson.eric.d at gmail.com>
+
+from .array import RawArray
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/array/array.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/array/array.py
new file mode 100644
index 0000000..8231c61
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/array/array.py
@@ -0,0 +1,50 @@
+"""Tools for creating Raw objects from numpy arrays"""
+
+# Authors: Eric Larson <larson.eric.d at gmail.com>
+#
+# License: BSD (3-clause)
+
+import numpy as np
+
+from ..base import _BaseRaw
+from ...utils import verbose, logger
+
+
+class RawArray(_BaseRaw):
+    """Raw object from numpy array
+
+    Parameters
+    ----------
+    data : array, shape (n_channels, n_times)
+        The channels' time series.
+    info : instance of Info
+        Info dictionary. Consider using `create_info` to populate
+        this structure.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    See Also
+    --------
+    EpochsArray, EvokedArray, create_info
+    """
+    @verbose
+    def __init__(self, data, info, verbose=None):
+        dtype = np.complex128 if np.any(np.iscomplex(data)) else np.float64
+        data = np.asanyarray(data, dtype=dtype)
+
+        if data.ndim != 2:
+            raise ValueError('Data must be a 2D array of shape (n_channels, '
+                             'n_samples')
+
+        logger.info('Creating RawArray with %s data, n_channels=%s, n_times=%s'
+                    % (dtype.__name__, data.shape[0], data.shape[1]))
+
+        if len(data) != len(info['ch_names']):
+            raise ValueError('len(data) does not match len(info["ch_names"])')
+        assert len(info['ch_names']) == info['nchan']
+        super(RawArray, self).__init__(info, data, verbose=verbose)
+        logger.info('    Range : %d ... %d =  %9.3f ... %9.3f secs' % (
+                    self.first_samp, self.last_samp,
+                    float(self.first_samp) / info['sfreq'],
+                    float(self.last_samp) / info['sfreq']))
+        logger.info('Ready.')
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/array/tests/__init__.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/array/tests/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/array/tests/test_array.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/array/tests/test_array.py
new file mode 100644
index 0000000..3e58b1b
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/array/tests/test_array.py
@@ -0,0 +1,114 @@
+from __future__ import print_function
+
+# Author: Eric Larson <larson.eric.d at gmail.com>
+#
+# License: BSD (3-clause)
+
+import os.path as op
+import warnings
+import matplotlib
+
+from numpy.testing import assert_array_almost_equal, assert_allclose
+from nose.tools import assert_equal, assert_raises, assert_true
+from mne import find_events, Epochs, pick_types, concatenate_raws
+from mne.io import Raw
+from mne.io.array import RawArray
+from mne.io.meas_info import create_info, _kind_dict
+from mne.utils import _TempDir, slow_test, requires_version
+
+matplotlib.use('Agg')  # for testing don't use X server
+
+warnings.simplefilter('always')  # enable b/c these tests might throw warnings
+
+base_dir = op.join(op.dirname(__file__), '..', '..', 'tests', 'data')
+fif_fname = op.join(base_dir, 'test_raw.fif')
+
+
+ at slow_test
+ at requires_version('scipy', '0.12')
+def test_array_raw():
+    """Test creating raw from array
+    """
+    import matplotlib.pyplot as plt
+    tempdir = _TempDir()
+    # creating
+    raw = Raw(fif_fname).crop(2, 5, copy=False)
+    data, times = raw[:, :]
+    sfreq = raw.info['sfreq']
+    ch_names = [(ch[4:] if 'STI' not in ch else ch)
+                for ch in raw.info['ch_names']]  # change them, why not
+    # del raw
+    types = list()
+    for ci in range(102):
+        types.extend(('grad', 'grad', 'mag'))
+    types.extend(['stim'] * 9)
+    types.extend(['eeg'] * 60)
+    # wrong length
+    assert_raises(ValueError, create_info, ch_names, sfreq, types)
+    # bad entry
+    types.append('foo')
+    assert_raises(KeyError, create_info, ch_names, sfreq, types)
+    types[-1] = 'eog'
+    # default type
+    info = create_info(ch_names, sfreq)
+    assert_equal(info['chs'][0]['kind'], _kind_dict['misc'][0])
+    # use real types
+    info = create_info(ch_names, sfreq, types)
+    raw2 = RawArray(data, info)
+    data2, times2 = raw2[:, :]
+    assert_allclose(data, data2)
+    assert_allclose(times, times2)
+    # Make sure concatenation works
+    raw_concat = concatenate_raws([raw2.copy(), raw2])
+    assert_equal(raw_concat.n_times, 2 * raw2.n_times)
+    assert_true('RawArray' in repr(raw2))
+
+    # saving
+    temp_fname = op.join(tempdir, 'raw.fif')
+    raw2.save(temp_fname)
+    raw3 = Raw(temp_fname)
+    data3, times3 = raw3[:, :]
+    assert_allclose(data, data3)
+    assert_allclose(times, times3)
+
+    # filtering
+    picks = pick_types(raw2.info, misc=True, exclude='bads')[:4]
+    assert_equal(len(picks), 4)
+    raw_lp = raw2.copy()
+    with warnings.catch_warnings(record=True):
+        raw_lp.filter(0., 4.0 - 0.25, picks=picks, n_jobs=2)
+    raw_hp = raw2.copy()
+    with warnings.catch_warnings(record=True):
+        raw_hp.filter(8.0 + 0.25, None, picks=picks, n_jobs=2)
+    raw_bp = raw2.copy()
+    with warnings.catch_warnings(record=True):
+        raw_bp.filter(4.0 + 0.25, 8.0 - 0.25, picks=picks)
+    raw_bs = raw2.copy()
+    with warnings.catch_warnings(record=True):
+        raw_bs.filter(8.0 + 0.25, 4.0 - 0.25, picks=picks, n_jobs=2)
+    data, _ = raw2[picks, :]
+    lp_data, _ = raw_lp[picks, :]
+    hp_data, _ = raw_hp[picks, :]
+    bp_data, _ = raw_bp[picks, :]
+    bs_data, _ = raw_bs[picks, :]
+    sig_dec = 11
+    assert_array_almost_equal(data, lp_data + bp_data + hp_data, sig_dec)
+    assert_array_almost_equal(data, bp_data + bs_data, sig_dec)
+
+    # plotting
+    raw2.plot()
+    raw2.plot_psd()
+    plt.close('all')
+
+    # epoching
+    events = find_events(raw2, stim_channel='STI 014')
+    events[:, 2] = 1
+    assert_true(len(events) > 2)
+    epochs = Epochs(raw2, events, 1, -0.2, 0.4, preload=True)
+    epochs.plot_drop_log()
+    with warnings.catch_warnings(record=True):  # deprecation
+        warnings.simplefilter('always')
+        epochs.plot()
+    evoked = epochs.average()
+    evoked.plot()
+    plt.close('all')
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/base.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/base.py
new file mode 100644
index 0000000..ab5e16e
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/base.py
@@ -0,0 +1,2218 @@
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#          Denis Engemann <denis.engemann at gmail.com>
+#          Teon Brooks <teon.brooks at gmail.com>
+#          Marijn van Vliet <w.m.vanvliet at gmail.com>
+#
+# License: BSD (3-clause)
+
+import copy
+from copy import deepcopy
+import warnings
+import os
+import os.path as op
+
+import numpy as np
+from scipy import linalg
+
+from .constants import FIFF
+from .pick import pick_types, channel_type, pick_channels, pick_info
+from .meas_info import write_meas_info
+from .proj import setup_proj, activate_proj, _proj_equal, ProjMixin
+from ..channels.channels import (ContainsMixin, UpdateChannelsMixin,
+                                 SetChannelsMixin, InterpolationMixin)
+from ..channels.montage import read_montage, _set_montage, Montage
+from .compensator import set_current_comp
+from .write import (start_file, end_file, start_block, end_block,
+                    write_dau_pack16, write_float, write_double,
+                    write_complex64, write_complex128, write_int,
+                    write_id, write_string, _get_split_size)
+
+from ..filter import (low_pass_filter, high_pass_filter, band_pass_filter,
+                      notch_filter, band_stop_filter, resample,
+                      _resample_stim_channels)
+from ..fixes import in1d
+from ..parallel import parallel_func
+from ..utils import (_check_fname, _check_pandas_installed,
+                     _check_pandas_index_arguments,
+                     check_fname, _get_stim_channel, object_hash,
+                     logger, verbose, _time_mask, deprecated)
+from ..viz import plot_raw, plot_raw_psd
+from ..defaults import _handle_default
+from ..externals.six import string_types
+from ..event import find_events, concatenate_events
+
+
+class ToDataFrameMixin(object):
+    '''Class to add to_data_frame capabilities to certain classes.'''
+    def _get_check_picks(self, picks, picks_check):
+        if picks is None:
+            picks = list(range(self.info['nchan']))
+        else:
+            if not in1d(picks, np.arange(len(picks_check))).all():
+                raise ValueError('At least one picked channel is not present '
+                                 'in this object instance.')
+        return picks
+
+    def to_data_frame(self, picks=None, index=None, scale_time=1e3,
+                      scalings=None, copy=True, start=None, stop=None):
+        """Export data in tabular structure as a pandas DataFrame.
+
+        Columns and indices will depend on the object being converted.
+        Generally this will include as much relevant information as
+        possible for the data type being converted. This makes it easy
+        to convert data for use in packages that utilize dataframes,
+        such as statsmodels or seaborn.
+
+        Parameters
+        ----------
+        picks : array-like of int | None
+            If None only MEG and EEG channels are kept
+            otherwise the channels indices in picks are kept.
+        index : tuple of str | None
+            Column to be used as index for the data. Valid string options
+            are 'epoch', 'time' and 'condition'. If None, all three info
+            columns will be included in the table as categorial data.
+        scale_time : float
+            Scaling to be applied to time units.
+        scalings : dict | None
+            Scaling to be applied to the channels picked. If None, defaults to
+            ``scalings=dict(eeg=1e6, grad=1e13, mag=1e15, misc=1.0)``.
+        copy : bool
+            If true, data will be copied. Else data may be modified in place.
+        start : int | None
+            If it is a Raw object, this defines a starting index for creating
+            the dataframe from a slice. The times will be interpolated from the
+            index and the sampling rate of the signal.
+        stop : int | None
+            If it is a Raw object, this defines a stop index for creating
+            the dataframe from a slice. The times will be interpolated from the
+            index and the sampling rate of the signal.
+
+        Returns
+        -------
+        df : instance of pandas.core.DataFrame
+            A dataframe suitable for usage with other
+            statistical/plotting/analysis packages. Column/Index values will
+            depend on the object type being converted, but should be
+            human-readable.
+        """
+        from ..epochs import _BaseEpochs
+        from ..evoked import Evoked
+        from ..source_estimate import _BaseSourceEstimate
+
+        pd = _check_pandas_installed()
+        mindex = list()
+        # Treat SourceEstimates special because they don't have the same info
+        if isinstance(self, _BaseSourceEstimate):
+            if self.subject is None:
+                default_index = ['time']
+            else:
+                default_index = ['subject', 'time']
+            data = self.data.T
+            times = self.times
+            shape = data.shape
+            mindex.append(('subject', np.repeat(self.subject, shape[0])))
+
+            if isinstance(self.vertices, list):
+                # surface source estimates
+                col_names = [i for e in [
+                    ['{0} {1}'.format('LH' if ii < 1 else 'RH', vert)
+                     for vert in vertno]
+                    for ii, vertno in enumerate(self.vertices)]
+                    for i in e]
+            else:
+                # volume source estimates
+                col_names = ['VOL {0}'.format(vert) for vert in self.vertices]
+        elif isinstance(self, (_BaseEpochs, _BaseRaw, Evoked)):
+            picks = self._get_check_picks(picks, self.ch_names)
+            if isinstance(self, _BaseEpochs):
+                default_index = ['condition', 'epoch', 'time']
+                data = self.get_data()[:, picks, :]
+                times = self.times
+                n_epochs, n_picks, n_times = data.shape
+                data = np.hstack(data).T  # (time*epochs) x signals
+
+                # Multi-index creation
+                times = np.tile(times, n_epochs)
+                id_swapped = dict((v, k) for k, v in self.event_id.items())
+                names = [id_swapped[k] for k in self.events[:, 2]]
+                mindex.append(('condition', np.repeat(names, n_times)))
+                mindex.append(('epoch',
+                              np.repeat(np.arange(n_epochs), n_times)))
+                col_names = [self.ch_names[k] for k in picks]
+
+            elif isinstance(self, (_BaseRaw, Evoked)):
+                default_index = ['time']
+                if isinstance(self, _BaseRaw):
+                    data, times = self[picks, start:stop]
+                elif isinstance(self, Evoked):
+                    data = self.data[picks, :]
+                    times = self.times
+                    n_picks, n_times = data.shape
+                data = data.T
+                col_names = [self.ch_names[k] for k in picks]
+
+            types = [channel_type(self.info, idx) for idx in picks]
+            n_channel_types = 0
+            ch_types_used = []
+
+            scalings = _handle_default('scalings', scalings)
+            for t in scalings.keys():
+                if t in types:
+                    n_channel_types += 1
+                    ch_types_used.append(t)
+
+            for t in ch_types_used:
+                scaling = scalings[t]
+                idx = [picks[i] for i in range(len(picks)) if types[i] == t]
+                if len(idx) > 0:
+                    data[:, idx] *= scaling
+        else:
+            # In case some other object gets this mixin w/o an explicit check
+            raise NameError('Object must be one of Raw, Epochs, Evoked,  or ' +
+                            'SourceEstimate. This is {0}'.format(type(self)))
+
+        # Make sure that the time index is scaled correctly
+        times = np.round(times * scale_time)
+        mindex.append(('time', times))
+
+        if index is not None:
+            _check_pandas_index_arguments(index, default_index)
+        else:
+            index = default_index
+
+        if copy is True:
+            data = data.copy()
+
+        assert all(len(mdx) == len(mindex[0]) for mdx in mindex)
+
+        df = pd.DataFrame(data, columns=col_names)
+        for i, (k, v) in enumerate(mindex):
+            df.insert(i, k, v)
+        if index is not None:
+            if 'time' in index:
+                logger.info('Converting time column to int64...')
+                df['time'] = df['time'].astype(np.int64)
+            df.set_index(index, inplace=True)
+        if all(i in default_index for i in index):
+            df.columns.name = 'signal'
+        return df
+
+
+def _check_fun(fun, d, *args, **kwargs):
+    want_shape = d.shape
+    d = fun(d, *args, **kwargs)
+    if not isinstance(d, np.ndarray):
+        raise TypeError('Return value must be an ndarray')
+    if d.shape != want_shape:
+        raise ValueError('Return data must have shape %s not %s'
+                         % (want_shape, d.shape))
+    return d
+
+
+class _BaseRaw(ProjMixin, ContainsMixin, UpdateChannelsMixin,
+               SetChannelsMixin, InterpolationMixin, ToDataFrameMixin):
+    """Base class for Raw data
+
+    Subclasses must provide the following methods:
+
+        * _read_segment_file(self, data, idx, offset, fi, start, stop,
+                             cals, mult)
+          (only needed for types that support on-demand disk reads)
+
+    The `_BaseRaw._raw_extras` list can contain whatever data is necessary for
+    such on-demand reads. For `RawFIF` this means a list of variables formerly
+    known as ``_rawdirs``.
+    """
+    @verbose
+    def __init__(self, info, preload=False,
+                 first_samps=(0,), last_samps=None,
+                 filenames=(None,), raw_extras=(None,),
+                 comp=None, orig_comp_grade=None,
+                 orig_format='double', dtype=np.float64,
+                 verbose=None):
+        # wait until the end to preload data, but triage here
+        if isinstance(preload, np.ndarray):
+            # some functions (e.g., filtering) only work w/64-bit data
+            if preload.dtype not in (np.float64, np.complex128):
+                raise RuntimeError('datatype must be float64 or complex128, '
+                                   'not %s' % preload.dtype)
+            if preload.dtype != dtype:
+                raise ValueError('preload and dtype must match')
+            self._data = preload
+            self.preload = True
+            last_samps = [self._data.shape[1] - 1]
+            load_from_disk = False
+        else:
+            if last_samps is None:
+                raise ValueError('last_samps must be given unless preload is '
+                                 'an ndarray')
+            if preload is False:
+                self.preload = False
+                load_from_disk = False
+            elif preload is not True and not isinstance(preload, string_types):
+                raise ValueError('bad preload: %s' % preload)
+            else:
+                load_from_disk = True
+        self._last_samps = np.array(last_samps)
+        self._first_samps = np.array(first_samps)
+        info._check_consistency()  # make sure subclass did a good job
+        self.info = info
+        cals = np.empty(info['nchan'])
+        for k in range(info['nchan']):
+            cals[k] = info['chs'][k]['range'] * info['chs'][k]['cal']
+        self.verbose = verbose
+        self._cals = cals
+        self._raw_extras = list(raw_extras)
+        self.comp = comp
+        self._orig_comp_grade = orig_comp_grade
+        self._filenames = list(filenames)
+        self.orig_format = orig_format
+        self._projectors = list()
+        self._projector = None
+        self._dtype_ = dtype
+        # If we have True or a string, actually do the preloading
+        if load_from_disk:
+            self._preload_data(preload)
+        self._update_times()
+
+    @property
+    def _dtype(self):
+        """dtype for loading data (property so subclasses can override)"""
+        # most classes only store real data, they won't need anything special
+        return self._dtype_
+
+    def _read_segment(self, start=0, stop=None, sel=None, data_buffer=None,
+                      projector=None, verbose=None):
+        """Read a chunk of raw data
+
+        Parameters
+        ----------
+        start : int, (optional)
+            first sample to include (first is 0). If omitted, defaults to the
+            first sample in data.
+        stop : int, (optional)
+            First sample to not include.
+            If omitted, data is included to the end.
+        sel : array, optional
+            Indices of channels to select.
+        data_buffer : array or str, optional
+            numpy array to fill with data read, must have the correct shape.
+            If str, a np.memmap with the correct data type will be used
+            to store the data.
+        projector : array
+            SSP operator to apply to the data.
+        verbose : bool, str, int, or None
+            If not None, override default verbose level (see mne.verbose).
+
+        Returns
+        -------
+        data : array, [channels x samples]
+           the data matrix (channels x samples).
+        times : array, [samples]
+            returns the time values corresponding to the samples.
+        """
+        #  Initial checks
+        start = int(start)
+        stop = self.n_times if stop is None else min([int(stop), self.n_times])
+
+        if start >= stop:
+            raise ValueError('No data in this range')
+
+        logger.info('Reading %d ... %d  =  %9.3f ... %9.3f secs...' %
+                    (start, stop - 1, start / float(self.info['sfreq']),
+                     (stop - 1) / float(self.info['sfreq'])))
+
+        #  Initialize the data and calibration vector
+        n_sel_channels = self.info['nchan'] if sel is None else len(sel)
+        # convert sel to a slice if possible for efficiency
+        if sel is not None and len(sel) > 1 and np.all(np.diff(sel) == 1):
+            sel = slice(sel[0], sel[-1] + 1)
+        idx = slice(None, None, None) if sel is None else sel
+        data_shape = (n_sel_channels, stop - start)
+        dtype = self._dtype
+        if isinstance(data_buffer, np.ndarray):
+            if data_buffer.shape != data_shape:
+                raise ValueError('data_buffer has incorrect shape')
+            data = data_buffer
+        elif isinstance(data_buffer, string_types):
+            # use a memmap
+            data = np.memmap(data_buffer, mode='w+',
+                             dtype=dtype, shape=data_shape)
+        else:
+            data = np.zeros(data_shape, dtype=dtype)
+
+        # deal with having multiple files accessed by the raw object
+        cumul_lens = np.concatenate(([0], np.array(self._raw_lengths,
+                                                   dtype='int')))
+        cumul_lens = np.cumsum(cumul_lens)
+        files_used = np.logical_and(np.less(start, cumul_lens[1:]),
+                                    np.greater_equal(stop - 1,
+                                                     cumul_lens[:-1]))
+
+        # set up cals and mult (cals, compensation, and projector)
+        cals = self._cals.ravel()[np.newaxis, :]
+        if self.comp is None and projector is None:
+            mult = None
+        else:
+            mult = list()
+            for ri in range(len(self._first_samps)):
+                if self.comp is not None:
+                    if projector is not None:
+                        mul = self.comp * cals
+                        mul = np.dot(projector[idx], mul)
+                    else:
+                        mul = self.comp[idx] * cals
+                elif projector is not None:
+                    mul = projector[idx] * cals
+                else:
+                    mul = np.diag(self._cals.ravel())[idx]
+                mult.append(mul)
+        cals = cals.T[idx]
+
+        # read from necessary files
+        offset = 0
+        for fi in np.nonzero(files_used)[0]:
+            start_file = self._first_samps[fi]
+            # first iteration (only) could start in the middle somewhere
+            if offset == 0:
+                start_file += start - cumul_lens[fi]
+            stop_file = np.min([stop - 1 - cumul_lens[fi] +
+                                self._first_samps[fi], self._last_samps[fi]])
+            if start_file < self._first_samps[fi] or \
+                    stop_file > self._last_samps[fi] or \
+                    stop_file < start_file or start_file > stop_file:
+                raise ValueError('Bad array indexing, could be a bug')
+
+            self._read_segment_file(data, idx, offset, fi,
+                                    start_file, stop_file, cals, mult)
+            offset += stop_file - start_file + 1
+
+        logger.info('[done]')
+        times = np.arange(start, stop) / self.info['sfreq']
+        return data, times
+
+    def _read_segment_file(self, data, idx, offset, fi, start, stop,
+                           cals, mult):
+        """Read a segment of data from a file
+
+        Only needs to be implemented for readers that support
+        ``preload=False``.
+
+        Parameters
+        ----------
+        data : ndarray, shape (len(idx), n_samp)
+            The data array. Should be modified inplace.
+        idx : ndarray | slice
+            The requested channel indices.
+        offset : int
+            Offset. Data should be stored in something like::
+
+                data[:, offset:offset + (start - stop + 1)] = r[idx]
+
+        fi : int
+            The file index that must be read from.
+        start : int
+            The start sample in the given file.
+        stop : int
+            The stop sample in the given file (inclusive).
+        cals : ndarray, shape (len(idx), 1)
+            Channel calibrations (already sub-indexed).
+        mult : ndarray, shape (len(idx), len(info['chs']) | None
+            The compensation + projection + cals matrix, if applicable.
+        """
+        raise NotImplementedError
+
+    @deprecated("This method has been renamed 'load_data' and will be removed "
+                "in v0.11.")
+    def preload_data(self, verbose=None):
+        """Preload raw data
+
+        Parameters
+        ----------
+        verbose : bool, str, int, or None
+            If not None, override default verbose level (see mne.verbose).
+
+        Returns
+        -------
+        raw : instance of Raw
+            The raw object with data.
+
+        Notes
+        -----
+        This function will load raw data if it was not already preloaded.
+        If data were already preloaded, it will do nothing.
+        """
+        return self.load_data(verbose=verbose)
+
+    @verbose
+    def load_data(self, verbose=None):
+        """Load raw data
+
+        Parameters
+        ----------
+        verbose : bool, str, int, or None
+            If not None, override default verbose level (see mne.verbose).
+
+        Returns
+        -------
+        raw : instance of Raw
+            The raw object with data.
+
+        Notes
+        -----
+        This function will load raw data if it was not already preloaded.
+        If data were already preloaded, it will do nothing.
+
+        .. versionadded:: 0.10.0
+        """
+        if not self.preload:
+            self._preload_data(True)
+        return self
+
+    def _preload_data(self, preload):
+        """This function actually preloads the data"""
+        data_buffer = preload if isinstance(preload, string_types) else None
+        self._data = self._read_segment(data_buffer=data_buffer)[0]
+        assert len(self._data) == self.info['nchan']
+        self.preload = True
+        self.close()
+
+    def _update_times(self):
+        """Helper to update times"""
+        self._times = np.arange(self.n_times) / float(self.info['sfreq'])
+        # make it immutable
+        self._times.flags.writeable = False
+
+    @property
+    def first_samp(self):
+        return self._first_samps[0]
+
+    @property
+    def last_samp(self):
+        return self.first_samp + sum(self._raw_lengths) - 1
+
+    @property
+    def _raw_lengths(self):
+        return [l - f + 1 for f, l in zip(self._first_samps, self._last_samps)]
+
+    def __del__(self):
+        # remove file for memmap
+        if hasattr(self, '_data') and hasattr(self._data, 'filename'):
+            # First, close the file out; happens automatically on del
+            filename = self._data.filename
+            del self._data
+            # Now file can be removed
+            try:
+                os.remove(filename)
+            except OSError:
+                pass  # ignore file that no longer exists
+
+    def __enter__(self):
+        """ Entering with block """
+        return self
+
+    def __exit__(self, exception_type, exception_val, trace):
+        """ Exiting with block """
+        try:
+            self.close()
+        except:
+            return exception_type, exception_val, trace
+
+    def __hash__(self):
+        if not self.preload:
+            raise RuntimeError('Cannot hash raw unless preloaded')
+        return object_hash(dict(info=self.info, data=self._data))
+
+    def _parse_get_set_params(self, item):
+        # make sure item is a tuple
+        if not isinstance(item, tuple):  # only channel selection passed
+            item = (item, slice(None, None, None))
+
+        if len(item) != 2:  # should be channels and time instants
+            raise RuntimeError("Unable to access raw data (need both channels "
+                               "and time)")
+
+        if isinstance(item[0], slice):
+            start = item[0].start if item[0].start is not None else 0
+            nchan = self.info['nchan']
+            stop = item[0].stop if item[0].stop is not None else nchan
+            step = item[0].step if item[0].step is not None else 1
+            sel = list(range(start, stop, step))
+        else:
+            sel = item[0]
+
+        if isinstance(item[1], slice):
+            time_slice = item[1]
+            start, stop, step = (time_slice.start, time_slice.stop,
+                                 time_slice.step)
+        else:
+            item1 = item[1]
+            # Let's do automated type conversion to integer here
+            if np.array(item[1]).dtype.kind == 'i':
+                item1 = int(item1)
+            if isinstance(item1, (int, np.integer)):
+                start, stop, step = item1, item1 + 1, 1
+            else:
+                raise ValueError('Must pass int or slice to __getitem__')
+
+        if start is None:
+            start = 0
+        if (step is not None) and (step is not 1):
+            raise ValueError('step needs to be 1 : %d given' % step)
+
+        if isinstance(sel, (int, np.integer)):
+            sel = np.array([sel])
+
+        if sel is not None and len(sel) == 0:
+            raise ValueError("Empty channel list")
+
+        return sel, start, stop
+
+    def __getitem__(self, item):
+        """getting raw data content with python slicing"""
+        sel, start, stop = self._parse_get_set_params(item)
+        if self.preload:
+            data, times = self._data[sel, start:stop], self.times[start:stop]
+        else:
+            data, times = self._read_segment(start=start, stop=stop, sel=sel,
+                                             projector=self._projector,
+                                             verbose=self.verbose)
+        return data, times
+
+    def __setitem__(self, item, value):
+        """setting raw data content with python slicing"""
+        if not self.preload:
+            raise RuntimeError('Modifying data of Raw is only supported '
+                               'when preloading is used. Use preload=True '
+                               '(or string) in the constructor.')
+        sel, start, stop = self._parse_get_set_params(item)
+        # set the data
+        self._data[sel, start:stop] = value
+
+    def anonymize(self):
+        """Anonymize data
+
+        This function will remove info['subject_info'] if it exists.
+
+        Returns
+        -------
+        raw : instance of Raw
+            The raw object. Operates in place.
+        """
+        self.info._anonymize()
+        return self
+
+    @verbose
+    def apply_function(self, fun, picks, dtype, n_jobs, *args, **kwargs):
+        """ Apply a function to a subset of channels.
+
+        The function "fun" is applied to the channels defined in "picks". The
+        data of the Raw object is modified inplace. If the function returns
+        a different data type (e.g. numpy.complex) it must be specified using
+        the dtype parameter, which causes the data type used for representing
+        the raw data to change.
+
+        The Raw object has to be constructed using preload=True (or string).
+
+        Note: If n_jobs > 1, more memory is required as "len(picks) * n_times"
+              additional time points need to be temporaily stored in memory.
+
+        Note: If the data type changes (dtype != None), more memory is required
+              since the original and the converted data needs to be stored in
+              memory.
+
+        Parameters
+        ----------
+        fun : function
+            A function to be applied to the channels. The first argument of
+            fun has to be a timeseries (numpy.ndarray). The function must
+            return an numpy.ndarray with the same size as the input.
+        picks : array-like of int | None
+            Indices of channels to apply the function to. If None, all
+            M-EEG channels are used.
+        dtype : numpy.dtype
+            Data type to use for raw data after applying the function. If None
+            the data type is not modified.
+        n_jobs: int
+            Number of jobs to run in parallel.
+        *args :
+            Additional positional arguments to pass to fun (first pos. argument
+            of fun is the timeseries of a channel).
+        **kwargs :
+            Keyword arguments to pass to fun. Note that if "verbose" is passed
+            as a member of ``kwargs``, it will be consumed and will override
+            the default mne-python verbose level (see mne.verbose).
+        """
+        if not self.preload:
+            raise RuntimeError('Raw data needs to be preloaded. Use '
+                               'preload=True (or string) in the constructor.')
+        if picks is None:
+            picks = pick_types(self.info, meg=True, eeg=True, exclude=[])
+
+        if not callable(fun):
+            raise ValueError('fun needs to be a function')
+
+        data_in = self._data
+        if dtype is not None and dtype != self._data.dtype:
+            self._data = self._data.astype(dtype)
+
+        if n_jobs == 1:
+            # modify data inplace to save memory
+            for idx in picks:
+                self._data[idx, :] = _check_fun(fun, data_in[idx, :],
+                                                *args, **kwargs)
+        else:
+            # use parallel function
+            parallel, p_fun, _ = parallel_func(_check_fun, n_jobs)
+            data_picks_new = parallel(p_fun(fun, data_in[p], *args, **kwargs)
+                                      for p in picks)
+            for pp, p in enumerate(picks):
+                self._data[p, :] = data_picks_new[pp]
+
+    @verbose
+    def apply_hilbert(self, picks, envelope=False, n_jobs=1, n_fft=None,
+                      verbose=None):
+        """ Compute analytic signal or envelope for a subset of channels.
+
+        If envelope=False, the analytic signal for the channels defined in
+        "picks" is computed and the data of the Raw object is converted to
+        a complex representation (the analytic signal is complex valued).
+
+        If envelope=True, the absolute value of the analytic signal for the
+        channels defined in "picks" is computed, resulting in the envelope
+        signal.
+
+        Note: DO NOT use envelope=True if you intend to compute an inverse
+              solution from the raw data. If you want to compute the
+              envelope in source space, use envelope=False and compute the
+              envelope after the inverse solution has been obtained.
+
+        Note: If envelope=False, more memory is required since the original
+              raw data as well as the analytic signal have temporarily to
+              be stored in memory.
+
+        Note: If n_jobs > 1 and envelope=True, more memory is required as
+              "len(picks) * n_times" additional time points need to be
+              temporaily stored in memory.
+
+        Parameters
+        ----------
+        picks : array-like of int
+            Indices of channels to apply the function to.
+        envelope : bool (default: False)
+            Compute the envelope signal of each channel.
+        n_jobs: int
+            Number of jobs to run in parallel.
+        n_fft : int > self.n_times | None
+            Points to use in the FFT for Hilbert transformation. The signal
+            will be padded with zeros before computing Hilbert, then cut back
+            to original length. If None, n == self.n_times.
+        verbose : bool, str, int, or None
+            If not None, override default verbose level (see mne.verbose).
+            Defaults to self.verbose.
+
+        Notes
+        -----
+        The analytic signal "x_a(t)" of "x(t)" is::
+
+            x_a = F^{-1}(F(x) 2U) = x + i y
+
+        where "F" is the Fourier transform, "U" the unit step function,
+        and "y" the Hilbert transform of "x". One usage of the analytic
+        signal is the computation of the envelope signal, which is given by
+        "e(t) = abs(x_a(t))". Due to the linearity of Hilbert transform and the
+        MNE inverse solution, the enevlope in source space can be obtained
+        by computing the analytic signal in sensor space, applying the MNE
+        inverse, and computing the envelope in source space.
+
+        Also note that the n_fft parameter will allow you to pad the signal
+        with zeros before performing the Hilbert transform. This padding
+        is cut off, but it may result in a slightly different result
+        (particularly around the edges). Use at your own risk.
+        """
+        n_fft = self.n_times if n_fft is None else n_fft
+        if n_fft < self.n_times:
+            raise ValueError("n_fft must be greater than n_times")
+        if envelope is True:
+            self.apply_function(_my_hilbert, picks, None, n_jobs, n_fft,
+                                envelope=envelope)
+        else:
+            self.apply_function(_my_hilbert, picks, np.complex64, n_jobs,
+                                n_fft, envelope=envelope)
+
+    @verbose
+    def filter(self, l_freq, h_freq, picks=None, filter_length='10s',
+               l_trans_bandwidth=0.5, h_trans_bandwidth=0.5, n_jobs=1,
+               method='fft', iir_params=None, verbose=None):
+        """Filter a subset of channels.
+
+        Applies a zero-phase low-pass, high-pass, band-pass, or band-stop
+        filter to the channels selected by "picks". The data of the Raw
+        object is modified inplace.
+
+        The Raw object has to be constructed using preload=True (or string).
+
+        l_freq and h_freq are the frequencies below which and above which,
+        respectively, to filter out of the data. Thus the uses are:
+
+            * ``l_freq < h_freq``: band-pass filter
+            * ``l_freq > h_freq``: band-stop filter
+            * ``l_freq is not None and h_freq is None``: high-pass filter
+            * ``l_freq is None and h_freq is not None``: low-pass filter
+
+        If n_jobs > 1, more memory is required as "len(picks) * n_times"
+        additional time points need to be temporarily stored in memory.
+
+        self.info['lowpass'] and self.info['highpass'] are only updated
+        with picks=None.
+
+        Parameters
+        ----------
+        l_freq : float | None
+            Low cut-off frequency in Hz. If None the data are only low-passed.
+        h_freq : float | None
+            High cut-off frequency in Hz. If None the data are only
+            high-passed.
+        picks : array-like of int | None
+            Indices of channels to filter. If None only the data (MEG/EEG)
+            channels will be filtered.
+        filter_length : str (Default: '10s') | int | None
+            Length of the filter to use. If None or "len(x) < filter_length",
+            the filter length used is len(x). Otherwise, if int, overlap-add
+            filtering with a filter of the specified length in samples) is
+            used (faster for long signals). If str, a human-readable time in
+            units of "s" or "ms" (e.g., "10s" or "5500ms") will be converted
+            to the shortest power-of-two length at least that duration.
+            Not used for 'iir' filters.
+        l_trans_bandwidth : float
+            Width of the transition band at the low cut-off frequency in Hz
+            (high pass or cutoff 1 in bandpass). Not used if 'order' is
+            specified in iir_params.
+        h_trans_bandwidth : float
+            Width of the transition band at the high cut-off frequency in Hz
+            (low pass or cutoff 2 in bandpass). Not used if 'order' is
+            specified in iir_params.
+        n_jobs : int | str
+            Number of jobs to run in parallel. Can be 'cuda' if scikits.cuda
+            is installed properly, CUDA is initialized, and method='fft'.
+        method : str
+            'fft' will use overlap-add FIR filtering, 'iir' will use IIR
+            forward-backward filtering (via filtfilt).
+        iir_params : dict | None
+            Dictionary of parameters to use for IIR filtering.
+            See mne.filter.construct_iir_filter for details. If iir_params
+            is None and method="iir", 4th order Butterworth will be used.
+        verbose : bool, str, int, or None
+            If not None, override default verbose level (see mne.verbose).
+            Defaults to self.verbose.
+
+        See Also
+        --------
+        mne.Epochs.savgol_filter
+        """
+        if verbose is None:
+            verbose = self.verbose
+        fs = float(self.info['sfreq'])
+        if l_freq == 0:
+            l_freq = None
+        if h_freq is not None and h_freq > (fs / 2.):
+            h_freq = None
+        if l_freq is not None and not isinstance(l_freq, float):
+            l_freq = float(l_freq)
+        if h_freq is not None and not isinstance(h_freq, float):
+            h_freq = float(h_freq)
+
+        if not self.preload:
+            raise RuntimeError('Raw data needs to be preloaded to filter. Use '
+                               'preload=True (or string) in the constructor.')
+        if picks is None:
+            if 'ICA ' in ','.join(self.ch_names):
+                pick_parameters = dict(misc=True, ref_meg=False)
+            else:
+                pick_parameters = dict(meg=True, eeg=True, ref_meg=False)
+            picks = pick_types(self.info, exclude=[], **pick_parameters)
+            # let's be safe.
+            if len(picks) < 1:
+                raise RuntimeError('Could not find any valid channels for '
+                                   'your Raw object. Please contact the '
+                                   'MNE-Python developers.')
+
+            # update info if filter is applied to all data channels,
+            # and it's not a band-stop filter
+            if h_freq is not None:
+                if (l_freq is None or l_freq < h_freq) and \
+                   (self.info["lowpass"] is None or
+                   h_freq < self.info['lowpass']):
+                        self.info['lowpass'] = h_freq
+            if l_freq is not None:
+                if (h_freq is None or l_freq < h_freq) and \
+                   (self.info["highpass"] is None or
+                   l_freq > self.info['highpass']):
+                        self.info['highpass'] = l_freq
+        if l_freq is None and h_freq is not None:
+            logger.info('Low-pass filtering at %0.2g Hz' % h_freq)
+            low_pass_filter(self._data, fs, h_freq,
+                            filter_length=filter_length,
+                            trans_bandwidth=h_trans_bandwidth, method=method,
+                            iir_params=iir_params, picks=picks, n_jobs=n_jobs,
+                            copy=False)
+        if l_freq is not None and h_freq is None:
+            logger.info('High-pass filtering at %0.2g Hz' % l_freq)
+            high_pass_filter(self._data, fs, l_freq,
+                             filter_length=filter_length,
+                             trans_bandwidth=l_trans_bandwidth, method=method,
+                             iir_params=iir_params, picks=picks, n_jobs=n_jobs,
+                             copy=False)
+        if l_freq is not None and h_freq is not None:
+            if l_freq < h_freq:
+                logger.info('Band-pass filtering from %0.2g - %0.2g Hz'
+                            % (l_freq, h_freq))
+                self._data = band_pass_filter(
+                    self._data, fs, l_freq, h_freq,
+                    filter_length=filter_length,
+                    l_trans_bandwidth=l_trans_bandwidth,
+                    h_trans_bandwidth=h_trans_bandwidth,
+                    method=method, iir_params=iir_params, picks=picks,
+                    n_jobs=n_jobs, copy=False)
+            else:
+                logger.info('Band-stop filtering from %0.2g - %0.2g Hz'
+                            % (h_freq, l_freq))
+                self._data = band_stop_filter(
+                    self._data, fs, h_freq, l_freq,
+                    filter_length=filter_length,
+                    l_trans_bandwidth=h_trans_bandwidth,
+                    h_trans_bandwidth=l_trans_bandwidth, method=method,
+                    iir_params=iir_params, picks=picks, n_jobs=n_jobs,
+                    copy=False)
+
+    @verbose
+    def notch_filter(self, freqs, picks=None, filter_length='10s',
+                     notch_widths=None, trans_bandwidth=1.0, n_jobs=1,
+                     method='fft', iir_params=None,
+                     mt_bandwidth=None, p_value=0.05, verbose=None):
+        """Notch filter a subset of channels.
+
+        Applies a zero-phase notch filter to the channels selected by
+        "picks". The data of the Raw object is modified inplace.
+
+        The Raw object has to be constructed using preload=True (or string).
+
+        Note: If n_jobs > 1, more memory is required as "len(picks) * n_times"
+              additional time points need to be temporaily stored in memory.
+
+        Parameters
+        ----------
+        freqs : float | array of float | None
+            Specific frequencies to filter out from data, e.g.,
+            np.arange(60, 241, 60) in the US or np.arange(50, 251, 50) in
+            Europe. None can only be used with the mode 'spectrum_fit',
+            where an F test is used to find sinusoidal components.
+        picks : array-like of int | None
+            Indices of channels to filter. If None only the data (MEG/EEG)
+            channels will be filtered.
+        filter_length : str (Default: '10s') | int | None
+            Length of the filter to use. If None or "len(x) < filter_length",
+            the filter length used is len(x). Otherwise, if int, overlap-add
+            filtering with a filter of the specified length in samples) is
+            used (faster for long signals). If str, a human-readable time in
+            units of "s" or "ms" (e.g., "10s" or "5500ms") will be converted
+            to the shortest power-of-two length at least that duration.
+            Not used for 'iir' filters.
+        notch_widths : float | array of float | None
+            Width of each stop band (centred at each freq in freqs) in Hz.
+            If None, freqs / 200 is used.
+        trans_bandwidth : float
+            Width of the transition band in Hz.
+        n_jobs : int | str
+            Number of jobs to run in parallel. Can be 'cuda' if scikits.cuda
+            is installed properly, CUDA is initialized, and method='fft'.
+        method : str
+            'fft' will use overlap-add FIR filtering, 'iir' will use IIR
+            forward-backward filtering (via filtfilt). 'spectrum_fit' will
+            use multi-taper estimation of sinusoidal components.
+        iir_params : dict | None
+            Dictionary of parameters to use for IIR filtering.
+            See mne.filter.construct_iir_filter for details. If iir_params
+            is None and method="iir", 4th order Butterworth will be used.
+        mt_bandwidth : float | None
+            The bandwidth of the multitaper windowing function in Hz.
+            Only used in 'spectrum_fit' mode.
+        p_value : float
+            p-value to use in F-test thresholding to determine significant
+            sinusoidal components to remove when method='spectrum_fit' and
+            freqs=None. Note that this will be Bonferroni corrected for the
+            number of frequencies, so large p-values may be justified.
+        verbose : bool, str, int, or None
+            If not None, override default verbose level (see mne.verbose).
+            Defaults to self.verbose.
+
+        Notes
+        -----
+        For details, see mne.filter.notch_filter.
+        """
+        if verbose is None:
+            verbose = self.verbose
+        fs = float(self.info['sfreq'])
+        if picks is None:
+            if 'ICA ' in ','.join(self.ch_names):
+                pick_parameters = dict(misc=True)
+            else:
+                pick_parameters = dict(meg=True, eeg=True)
+            picks = pick_types(self.info, exclude=[], **pick_parameters)
+            # let's be safe.
+            if len(picks) < 1:
+                raise RuntimeError('Could not find any valid channels for '
+                                   'your Raw object. Please contact the '
+                                   'MNE-Python developers.')
+        if not self.preload:
+            raise RuntimeError('Raw data needs to be preloaded to filter. Use '
+                               'preload=True (or string) in the constructor.')
+
+        self._data = notch_filter(self._data, fs, freqs,
+                                  filter_length=filter_length,
+                                  notch_widths=notch_widths,
+                                  trans_bandwidth=trans_bandwidth,
+                                  method=method, iir_params=iir_params,
+                                  mt_bandwidth=mt_bandwidth, p_value=p_value,
+                                  picks=picks, n_jobs=n_jobs, copy=False)
+
+    @verbose
+    def resample(self, sfreq, npad=100, window='boxcar', stim_picks=None,
+                 n_jobs=1, events=None, copy=False, verbose=None):
+        """Resample data channels.
+
+        Resamples all channels.
+
+        The Raw object has to be constructed using preload=True (or string).
+
+        .. warning:: The intended purpose of this function is primarily to
+                     speed up computations (e.g., projection calculation) when
+                     precise timing of events is not required, as downsampling
+                     raw data effectively jitters trigger timings. It is
+                     generally recommended not to epoch downsampled data,
+                     but instead epoch and then downsample, as epoching
+                     downsampled data jitters triggers.
+                     See here for an example:
+
+                         https://gist.github.com/Eric89GXL/01642cb3789992fbca59
+
+                     If resampling the continuous data is desired, it is
+                     recommended to construct events using the original data.
+                     The event onsets can be jointly resampled with the raw
+                     data using the 'events' parameter.
+
+        Parameters
+        ----------
+        sfreq : float
+            New sample rate to use.
+        npad : int
+            Amount to pad the start and end of the data.
+        window : string or tuple
+            Window to use in resampling. See scipy.signal.resample.
+        stim_picks : array of int | None
+            Stim channels. These channels are simply subsampled or
+            supersampled (without applying any filtering). This reduces
+            resampling artifacts in stim channels, but may lead to missing
+            triggers. If None, stim channels are automatically chosen using
+            mne.pick_types(raw.info, meg=False, stim=True, exclude=[]).
+        n_jobs : int | str
+            Number of jobs to run in parallel. Can be 'cuda' if scikits.cuda
+            is installed properly and CUDA is initialized.
+        events : 2D array, shape (n_events, 3) | None
+            An optional event matrix. When specified, the onsets of the events
+            are resampled jointly with the data.
+        copy : bool
+            Whether to operate on a copy of the data (True) or modify data
+            in-place (False). Defaults to False.
+        verbose : bool, str, int, or None
+            If not None, override default verbose level (see mne.verbose).
+            Defaults to self.verbose.
+
+        Returns
+        -------
+        raw : instance of Raw
+            The resampled version of the raw object.
+
+        Notes
+        -----
+        For some data, it may be more accurate to use npad=0 to reduce
+        artifacts. This is dataset dependent -- check your data!
+        """
+        if not self.preload:
+            raise RuntimeError('Can only resample preloaded data')
+
+        inst = self.copy() if copy else self
+
+        # When no event object is supplied, some basic detection of dropped
+        # events is performed to generate a warning. Finding events can fail
+        # for a variety of reasons, e.g. if no stim channel is present or it is
+        # corrupted. This should not stop the resampling from working. The
+        # warning should simply not be generated in this case.
+        if events is None:
+            try:
+                original_events = find_events(inst)
+            except:
+                pass
+
+        sfreq = float(sfreq)
+        o_sfreq = float(inst.info['sfreq'])
+
+        offsets = np.concatenate(([0], np.cumsum(inst._raw_lengths)))
+        new_data = list()
+
+        ratio = sfreq / o_sfreq
+
+        # set up stim channel processing
+        if stim_picks is None:
+            stim_picks = pick_types(inst.info, meg=False, ref_meg=False,
+                                    stim=True, exclude=[])
+        stim_picks = np.asanyarray(stim_picks)
+
+        for ri in range(len(inst._raw_lengths)):
+            data_chunk = inst._data[:, offsets[ri]:offsets[ri + 1]]
+            new_data.append(resample(data_chunk, sfreq, o_sfreq, npad,
+                                     n_jobs=n_jobs))
+            new_ntimes = new_data[ri].shape[1]
+
+            # In empirical testing, it was faster to resample all channels
+            # (above) and then replace the stim channels than it was to only
+            # resample the proper subset of channels and then use np.insert()
+            # to restore the stims.
+            if len(stim_picks) > 0:
+                stim_resampled = _resample_stim_channels(
+                    data_chunk[stim_picks], new_data[ri].shape[1],
+                    data_chunk.shape[1])
+                new_data[ri][stim_picks] = stim_resampled
+
+            inst._first_samps[ri] = int(inst._first_samps[ri] * ratio)
+            inst._last_samps[ri] = inst._first_samps[ri] + new_ntimes - 1
+            inst._raw_lengths[ri] = new_ntimes
+
+        inst._data = np.concatenate(new_data, axis=1)
+        inst.info['sfreq'] = sfreq
+        inst._update_times()
+
+        # See the comment above why we ignore all errors here.
+        if events is None:
+            try:
+                # Did we loose events?
+                resampled_events = find_events(inst)
+                if len(resampled_events) != len(original_events):
+                    warnings.warn(
+                        'Resampling of the stim channels caused event '
+                        'information to become unreliable. Consider finding '
+                        'events on the original data and passing the event '
+                        'matrix as a parameter.'
+                    )
+            except:
+                pass
+
+            return inst
+        else:
+            if copy:
+                events = events.copy()
+
+            events[:, 0] = np.minimum(
+                np.round(events[:, 0] * ratio).astype(int),
+                inst._data.shape[1]
+            )
+            return inst, events
+
+    def crop(self, tmin=0.0, tmax=None, copy=True):
+        """Crop raw data file.
+
+        Limit the data from the raw file to go between specific times. Note
+        that the new tmin is assumed to be t=0 for all subsequently called
+        functions (e.g., time_as_index, or Epochs). New first_samp and
+        last_samp are set accordingly. And data are modified in-place when
+        called with copy=False.
+
+        Parameters
+        ----------
+        tmin : float
+            New start time in seconds (must be >= 0).
+        tmax : float | None
+            New end time in seconds of the data (cannot exceed data duration).
+        copy : bool
+            If False Raw is cropped in place.
+
+        Returns
+        -------
+        raw : instance of Raw
+            The cropped raw object.
+        """
+        raw = self.copy() if copy is True else self
+        max_time = (raw.n_times - 1) / raw.info['sfreq']
+        if tmax is None:
+            tmax = max_time
+
+        if tmin > tmax:
+            raise ValueError('tmin must be less than tmax')
+        if tmin < 0.0:
+            raise ValueError('tmin must be >= 0')
+        elif tmax > max_time:
+            raise ValueError('tmax must be less than or equal to the max raw '
+                             'time (%0.4f sec)' % max_time)
+
+        smin, smax = np.where(_time_mask(self.times, tmin, tmax))[0][[0, -1]]
+        cumul_lens = np.concatenate(([0], np.array(raw._raw_lengths,
+                                                   dtype='int')))
+        cumul_lens = np.cumsum(cumul_lens)
+        keepers = np.logical_and(np.less(smin, cumul_lens[1:]),
+                                 np.greater_equal(smax, cumul_lens[:-1]))
+        keepers = np.where(keepers)[0]
+        raw._first_samps = np.atleast_1d(raw._first_samps[keepers])
+        # Adjust first_samp of first used file!
+        raw._first_samps[0] += smin - cumul_lens[keepers[0]]
+        raw._last_samps = np.atleast_1d(raw._last_samps[keepers])
+        raw._last_samps[-1] -= cumul_lens[keepers[-1] + 1] - 1 - smax
+        raw._raw_extras = [r for ri, r in enumerate(raw._raw_extras)
+                           if ri in keepers]
+        raw._filenames = [r for ri, r in enumerate(raw._filenames)
+                          if ri in keepers]
+        if raw.preload:
+            # slice and copy to avoid the reference to large array
+            raw._data = raw._data[:, smin:smax + 1].copy()
+        raw._update_times()
+        return raw
+
+    @verbose
+    def save(self, fname, picks=None, tmin=0, tmax=None, buffer_size_sec=10,
+             drop_small_buffer=False, proj=False, fmt='single',
+             overwrite=False, split_size='2GB', verbose=None):
+        """Save raw data to file
+
+        Parameters
+        ----------
+        fname : string
+            File name of the new dataset. This has to be a new filename
+            unless data have been preloaded. Filenames should end with
+            raw.fif, raw.fif.gz, raw_sss.fif, raw_sss.fif.gz, raw_tsss.fif
+            or raw_tsss.fif.gz.
+        picks : array-like of int | None
+            Indices of channels to include. If None all channels are kept.
+        tmin : float | None
+            Time in seconds of first sample to save. If None first sample
+            is used.
+        tmax : float | None
+            Time in seconds of last sample to save. If None last sample
+            is used.
+        buffer_size_sec : float | None
+            Size of data chunks in seconds. If None, the buffer size of
+            the original file is used.
+        drop_small_buffer : bool
+            Drop or not the last buffer. It is required by maxfilter (SSS)
+            that only accepts raw files with buffers of the same size.
+        proj : bool
+            If True the data is saved with the projections applied (active).
+            Note: If apply_proj() was used to apply the projections,
+            the projectons will be active even if proj is False.
+        fmt : str
+            Format to use to save raw data. Valid options are 'double',
+            'single', 'int', and 'short' for 64- or 32-bit float, or 32- or
+            16-bit integers, respectively. It is **strongly** recommended to
+            use 'single', as this is backward-compatible, and is standard for
+            maintaining precision. Note that using 'short' or 'int' may result
+            in loss of precision, complex data cannot be saved as 'short',
+            and neither complex data types nor real data stored as 'double'
+            can be loaded with the MNE command-line tools. See raw.orig_format
+            to determine the format the original data were stored in.
+        overwrite : bool
+            If True, the destination file (if it exists) will be overwritten.
+            If False (default), an error will be raised if the file exists.
+        split_size : string | int
+            Large raw files are automatically split into multiple pieces. This
+            parameter specifies the maximum size of each piece. If the
+            parameter is an integer, it specifies the size in Bytes. It is
+            also possible to pass a human-readable string, e.g., 100MB.
+            Note: Due to FIFF file limitations, the maximum split size is 2GB.
+        verbose : bool, str, int, or None
+            If not None, override default verbose level (see mne.verbose).
+            Defaults to self.verbose.
+
+        Notes
+        -----
+        If Raw is a concatenation of several raw files, **be warned** that
+        only the measurement information from the first raw file is stored.
+        This likely means that certain operations with external tools may not
+        work properly on a saved concatenated file (e.g., probably some
+        or all forms of SSS). It is recommended not to concatenate and
+        then save raw files for this reason.
+        """
+        check_fname(fname, 'raw', ('raw.fif', 'raw_sss.fif', 'raw_tsss.fif',
+                                   'raw.fif.gz', 'raw_sss.fif.gz',
+                                   'raw_tsss.fif.gz'))
+
+        split_size = _get_split_size(split_size)
+
+        fname = op.realpath(fname)
+        if not self.preload and fname in self._filenames:
+            raise ValueError('You cannot save data to the same file.'
+                             ' Please use a different filename.')
+
+        if self.preload:
+            if np.iscomplexobj(self._data):
+                warnings.warn('Saving raw file with complex data. Loading '
+                              'with command-line MNE tools will not work.')
+
+        type_dict = dict(short=FIFF.FIFFT_DAU_PACK16,
+                         int=FIFF.FIFFT_INT,
+                         single=FIFF.FIFFT_FLOAT,
+                         double=FIFF.FIFFT_DOUBLE)
+        if fmt not in type_dict.keys():
+            raise ValueError('fmt must be "short", "int", "single", '
+                             'or "double"')
+        reset_dict = dict(short=False, int=False, single=True, double=True)
+        reset_range = reset_dict[fmt]
+        data_type = type_dict[fmt]
+
+        data_test = self[0, 0][0]
+        if fmt == 'short' and np.iscomplexobj(data_test):
+            raise ValueError('Complex data must be saved as "single" or '
+                             '"double", not "short"')
+
+        # check for file existence
+        _check_fname(fname, overwrite)
+
+        if proj:
+            info = copy.deepcopy(self.info)
+            projector, info = setup_proj(info)
+            activate_proj(info['projs'], copy=False)
+        else:
+            info = self.info
+            projector = None
+
+        # set the correct compensation grade and make inverse compensator
+        inv_comp = None
+        if self.comp is not None:
+            inv_comp = linalg.inv(self.comp)
+            set_current_comp(info, self._orig_comp_grade)
+
+        #
+        #   Set up the reading parameters
+        #
+
+        #   Convert to samples
+        start = int(np.floor(tmin * self.info['sfreq']))
+
+        if tmax is None:
+            stop = self.last_samp + 1 - self.first_samp
+        else:
+            stop = int(np.floor(tmax * self.info['sfreq']))
+        buffer_size = self._get_buffer_size(buffer_size_sec)
+
+        # write the raw file
+        _write_raw(fname, self, info, picks, fmt, data_type, reset_range,
+                   start, stop, buffer_size, projector, inv_comp,
+                   drop_small_buffer, split_size, 0, None)
+
+    def plot(self, events=None, duration=10.0, start=0.0, n_channels=20,
+             bgcolor='w', color=None, bad_color=(0.8, 0.8, 0.8),
+             event_color='cyan', scalings=None, remove_dc=True, order='type',
+             show_options=False, title=None, show=True, block=False,
+             highpass=None, lowpass=None, filtorder=4, clipping=None):
+        """Plot raw data
+
+        Parameters
+        ----------
+        events : array | None
+            Events to show with vertical bars.
+        duration : float
+            Time window (sec) to plot in a given time.
+        start : float
+            Initial time to show (can be changed dynamically once plotted).
+        n_channels : int
+            Number of channels to plot at once.
+        bgcolor : color object
+            Color of the background.
+        color : dict | color object | None
+            Color for the data traces. If None, defaults to::
+
+                dict(mag='darkblue', grad='b', eeg='k', eog='k', ecg='r',
+                     emg='k', ref_meg='steelblue', misc='k', stim='k',
+                     resp='k', chpi='k')
+
+        bad_color : color object
+            Color to make bad channels.
+        event_color : color object
+            Color to use for events.
+        scalings : dict | None
+            Scale factors for the traces. If None, defaults to::
+
+                dict(mag=1e-12, grad=4e-11, eeg=20e-6, eog=150e-6, ecg=5e-4,
+                     emg=1e-3, ref_meg=1e-12, misc=1e-3, stim=1,
+                     resp=1, chpi=1e-4)
+
+        remove_dc : bool
+            If True remove DC component when plotting data.
+        order : 'type' | 'original' | array
+            Order in which to plot data. 'type' groups by channel type,
+            'original' plots in the order of ch_names, array gives the
+            indices to use in plotting.
+        show_options : bool
+            If True, a dialog for options related to projection is shown.
+        title : str | None
+            The title of the window. If None, and either the filename of the
+            raw object or '<unknown>' will be displayed as title.
+        show : bool
+            Show figures if True
+        block : bool
+            Whether to halt program execution until the figure is closed.
+            Useful for setting bad channels on the fly (click on line).
+            May not work on all systems / platforms.
+        highpass : float | None
+            Highpass to apply when displaying data.
+        lowpass : float | None
+            Lowpass to apply when displaying data.
+        filtorder : int
+            Filtering order. Note that for efficiency and simplicity,
+            filtering during plotting uses forward-backward IIR filtering,
+            so the effective filter order will be twice ``filtorder``.
+            Filtering the lines for display may also produce some edge
+            artifacts (at the left and right edges) of the signals
+            during display. Filtering requires scipy >= 0.10.
+        clipping : str | None
+            If None, channels are allowed to exceed their designated bounds in
+            the plot. If "clamp", then values are clamped to the appropriate
+            range for display, creating step-like artifacts. If "transparent",
+            then excessive values are not shown, creating gaps in the traces.
+
+        Returns
+        -------
+        fig : Instance of matplotlib.figure.Figure
+            Raw traces.
+
+        Notes
+        -----
+        The arrow keys (up/down/left/right) can typically be used to navigate
+        between channels and time ranges, but this depends on the backend
+        matplotlib is configured to use (e.g., mpl.use('TkAgg') should work).
+        The scaling can be adjusted with - and + (or =) keys. The viewport
+        dimensions can be adjusted with page up/page down and home/end keys.
+        Full screen mode can be to toggled with f11 key. To mark or un-mark a
+        channel as bad, click on the rather flat segments of a channel's time
+        series. The changes will be reflected immediately in the raw object's
+        ``raw.info['bads']`` entry.
+        """
+        return plot_raw(self, events, duration, start, n_channels, bgcolor,
+                        color, bad_color, event_color, scalings, remove_dc,
+                        order, show_options, title, show, block, highpass,
+                        lowpass, filtorder, clipping)
+
+    @verbose
+    def plot_psd(self, tmin=0.0, tmax=60.0, fmin=0, fmax=np.inf,
+                 proj=False, n_fft=2048, picks=None, ax=None,
+                 color='black', area_mode='std', area_alpha=0.33,
+                 n_overlap=0, dB=True, show=True, n_jobs=1, verbose=None):
+        """Plot the power spectral density across channels
+
+        Parameters
+        ----------
+        tmin : float
+            Start time for calculations.
+        tmax : float
+            End time for calculations.
+        fmin : float
+            Start frequency to consider.
+        fmax : float
+            End frequency to consider.
+        proj : bool
+            Apply projection.
+        n_fft : int
+            Number of points to use in Welch FFT calculations.
+        picks : array-like of int | None
+            List of channels to use. Cannot be None if `ax` is supplied. If
+            both `picks` and `ax` are None, separate subplots will be created
+            for each standard channel type (`mag`, `grad`, and `eeg`).
+        ax : instance of matplotlib Axes | None
+            Axes to plot into. If None, axes will be created.
+        color : str | tuple
+            A matplotlib-compatible color to use.
+        area_mode : str | None
+            How to plot area. If 'std', the mean +/- 1 STD (across channels)
+            will be plotted. If 'range', the min and max (across channels)
+            will be plotted. Bad channels will be excluded from these
+            calculations. If None, no area will be plotted.
+        area_alpha : float
+            Alpha for the area.
+        n_overlap : int
+            The number of points of overlap between blocks. The default value
+            is 0 (no overlap).
+        dB : bool
+            If True, transform data to decibels.
+        show : bool
+            Call pyplot.show() at the end.
+        n_jobs : int
+            Number of jobs to run in parallel.
+        verbose : bool, str, int, or None
+            If not None, override default verbose level (see mne.verbose).
+
+        Returns
+        -------
+        fig : instance of matplotlib figure
+            Figure distributing one image per channel across sensor topography.
+        """
+        return plot_raw_psd(self, tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax,
+                            proj=proj, n_fft=n_fft, picks=picks, ax=ax,
+                            color=color, area_mode=area_mode,
+                            area_alpha=area_alpha, n_overlap=n_overlap,
+                            dB=dB, show=show, n_jobs=n_jobs)
+
+    def time_as_index(self, times, use_first_samp=False, use_rounding=False):
+        """Convert time to indices
+
+        Parameters
+        ----------
+        times : list-like | float | int
+            List of numbers or a number representing points in time.
+        use_first_samp : boolean
+            If True, time is treated as relative to the session onset, else
+            as relative to the recording onset.
+        use_rounding : boolean
+            If True, use rounding (instead of truncation) when converting
+            times to indicies. This can help avoid non-unique indices.
+
+        Returns
+        -------
+        index : ndarray
+            Indices corresponding to the times supplied.
+        """
+        return _time_as_index(times, self.info['sfreq'], self.first_samp,
+                              use_first_samp, use_rounding=use_rounding)
+
+    def index_as_time(self, index, use_first_samp=False):
+        """Convert indices to time
+
+        Parameters
+        ----------
+        index : list-like | int
+            List of ints or int representing points in time.
+        use_first_samp : boolean
+            If True, the time returned is relative to the session onset, else
+            relative to the recording onset.
+
+        Returns
+        -------
+        times : ndarray
+            Times corresponding to the index supplied.
+        """
+        return _index_as_time(index, self.info['sfreq'], self.first_samp,
+                              use_first_samp)
+
+    def estimate_rank(self, tstart=0.0, tstop=30.0, tol=1e-4,
+                      return_singular=False, picks=None, scalings='norm'):
+        """Estimate rank of the raw data
+
+        This function is meant to provide a reasonable estimate of the rank.
+        The true rank of the data depends on many factors, so use at your
+        own risk.
+
+        Parameters
+        ----------
+        tstart : float
+            Start time to use for rank estimation. Default is 0.0.
+        tstop : float | None
+            End time to use for rank estimation. Default is 30.0.
+            If None, the end time of the raw file is used.
+        tol : float
+            Tolerance for singular values to consider non-zero in
+            calculating the rank. The singular values are calculated
+            in this method such that independent data are expected to
+            have singular value around one.
+        return_singular : bool
+            If True, also return the singular values that were used
+            to determine the rank.
+        picks : array_like of int, shape (n_selected_channels,)
+            The channels to be considered for rank estimation.
+            If None (default) meg and eeg channels are included.
+        scalings : dict | 'norm'
+            To achieve reliable rank estimation on multiple sensors,
+            sensors have to be rescaled. This parameter controls the
+            rescaling. If dict, it will update the
+            following dict of defaults:
+
+                dict(mag=1e11, grad=1e9, eeg=1e5)
+
+            If 'norm' data will be scaled by internally computed
+            channel-wise norms.
+            Defaults to 'norm'.
+
+        Returns
+        -------
+        rank : int
+            Estimated rank of the data.
+        s : array
+            If return_singular is True, the singular values that were
+            thresholded to determine the rank are also returned.
+
+        Notes
+        -----
+        If data are not pre-loaded, the appropriate data will be loaded
+        by this function (can be memory intensive).
+
+        Projectors are not taken into account unless they have been applied
+        to the data using apply_proj(), since it is not always possible
+        to tell whether or not projectors have been applied previously.
+
+        Bad channels will be excluded from calculations.
+        """
+        from ..cov import _estimate_rank_meeg_signals
+
+        start = max(0, self.time_as_index(tstart)[0])
+        if tstop is None:
+            stop = self.n_times - 1
+        else:
+            stop = min(self.n_times - 1, self.time_as_index(tstop)[0])
+        tslice = slice(start, stop + 1)
+        if picks is None:
+            picks = pick_types(self.info, meg=True, eeg=True, ref_meg=False,
+                               exclude='bads')
+        # ensure we don't get a view of data
+        if len(picks) == 1:
+            return 1.0, 1.0
+        # this should already be a copy, so we can overwrite it
+        data = self[picks, tslice][0]
+        out = _estimate_rank_meeg_signals(
+            data, pick_info(self.info, picks),
+            scalings=scalings, tol=tol, return_singular=return_singular,
+            copy=False)
+
+        return out
+
+    @property
+    def ch_names(self):
+        """Channel names"""
+        return self.info['ch_names']
+
+    @property
+    def times(self):
+        """Time points"""
+        return self._times
+
+    @property
+    def n_times(self):
+        """Number of time points"""
+        return self.last_samp - self.first_samp + 1
+
+    def __len__(self):
+        return self.n_times
+
+    def load_bad_channels(self, bad_file=None, force=False):
+        """
+        Mark channels as bad from a text file, in the style
+        (mostly) of the C function mne_mark_bad_channels
+
+        Parameters
+        ----------
+        bad_file : string
+            File name of the text file containing bad channels
+            If bad_file = None, bad channels are cleared, but this
+            is more easily done directly as raw.info['bads'] = [].
+
+        force : boolean
+            Whether or not to force bad channel marking (of those
+            that exist) if channels are not found, instead of
+            raising an error.
+        """
+
+        if bad_file is not None:
+            # Check to make sure bad channels are there
+            names = frozenset(self.info['ch_names'])
+            with open(bad_file) as fid:
+                bad_names = [l for l in fid.read().splitlines() if l]
+            names_there = [ci for ci in bad_names if ci in names]
+            count_diff = len(bad_names) - len(names_there)
+
+            if count_diff > 0:
+                if not force:
+                    raise ValueError('Bad channels from:\n%s\n not found '
+                                     'in:\n%s' % (bad_file,
+                                                  self._filenames[0]))
+                else:
+                    warnings.warn('%d bad channels from:\n%s\nnot found '
+                                  'in:\n%s' % (count_diff, bad_file,
+                                               self._filenames[0]))
+            self.info['bads'] = names_there
+        else:
+            self.info['bads'] = []
+
+    def append(self, raws, preload=None):
+        """Concatenate raw instances as if they were continuous
+
+        Parameters
+        ----------
+        raws : list, or Raw instance
+            list of Raw instances to concatenate to the current instance
+            (in order), or a single raw instance to concatenate.
+        preload : bool, str, or None (default None)
+            Preload data into memory for data manipulation and faster indexing.
+            If True, the data will be preloaded into memory (fast, requires
+            large amount of memory). If preload is a string, preload is the
+            file name of a memory-mapped file which is used to store the data
+            on the hard drive (slower, requires less memory). If preload is
+            None, preload=True or False is inferred using the preload status
+            of the raw files passed in.
+        """
+        from .fiff.raw import RawFIF
+        from .kit.kit import RawKIT
+        from .edf.edf import RawEDF
+
+        if not isinstance(raws, list):
+            raws = [raws]
+
+        # make sure the raws are compatible
+        all_raws = [self]
+        all_raws += raws
+        _check_raw_compatibility(all_raws)
+
+        # deal with preloading data first (while files are separate)
+        all_preloaded = self.preload and all(r.preload for r in raws)
+        if preload is None:
+            if all_preloaded:
+                preload = True
+            else:
+                preload = False
+
+        if not preload and not isinstance(self, (RawFIF, RawKIT, RawEDF)):
+            raise RuntimeError('preload must be True to concatenate '
+                               'files unless they are FIF, KIT, or EDF')
+        if preload is False:
+            if self.preload:
+                self._data = None
+            self.preload = False
+        else:
+            # do the concatenation ourselves since preload might be a string
+            nchan = self.info['nchan']
+            c_ns = np.cumsum([rr.n_times for rr in ([self] + raws)])
+            nsamp = c_ns[-1]
+
+            if not self.preload:
+                this_data = self._read_segment()[0]
+            else:
+                this_data = self._data
+
+            # allocate the buffer
+            if isinstance(preload, string_types):
+                _data = np.memmap(preload, mode='w+', dtype=this_data.dtype,
+                                  shape=(nchan, nsamp))
+            else:
+                _data = np.empty((nchan, nsamp), dtype=this_data.dtype)
+
+            _data[:, 0:c_ns[0]] = this_data
+
+            for ri in range(len(raws)):
+                if not raws[ri].preload:
+                    # read the data directly into the buffer
+                    data_buffer = _data[:, c_ns[ri]:c_ns[ri + 1]]
+                    raws[ri]._read_segment(data_buffer=data_buffer)
+                else:
+                    _data[:, c_ns[ri]:c_ns[ri + 1]] = raws[ri]._data
+            self._data = _data
+            self.preload = True
+
+        # now combine information from each raw file to construct new self
+        for r in raws:
+            self._first_samps = np.r_[self._first_samps, r._first_samps]
+            self._last_samps = np.r_[self._last_samps, r._last_samps]
+            self._raw_extras += r._raw_extras
+            self._filenames += r._filenames
+        self._update_times()
+
+        if not (len(self._first_samps) == len(self._last_samps) ==
+                len(self._raw_extras) == len(self._filenames)):
+            raise RuntimeError('Append error')  # should never happen
+
+    def close(self):
+        """Clean up the object.
+
+        Does nothing for objects that close their file descriptors.
+        Things like RawFIF will override this method.
+        """
+        pass
+
+    def copy(self):
+        """ Return copy of Raw instance
+        """
+        return deepcopy(self)
+
+    def __repr__(self):
+        name = self._filenames[0]
+        name = 'None' if name is None else op.basename(name)
+        s = ', '.join(('%r' % name, "n_channels x n_times : %s x %s"
+                       % (len(self.ch_names), self.n_times)))
+        s = "n_channels x n_times : %s x %s" % (len(self.info['ch_names']),
+                                                self.n_times)
+        return "<%s  |  %s>" % (self.__class__.__name__, s)
+
+    def add_events(self, events, stim_channel=None):
+        """Add events to stim channel
+
+        Parameters
+        ----------
+        events : ndarray, shape (n_events, 3)
+            Events to add. The first column specifies the sample number of
+            each event, the second column is ignored, and the third column
+            provides the event value. If events already exist in the Raw
+            instance at the given sample numbers, the event values will be
+            added together.
+        stim_channel : str | None
+            Name of the stim channel to add to. If None, the config variable
+            'MNE_STIM_CHANNEL' is used. If this is not found, it will default
+            to 'STI 014'.
+
+        Notes
+        -----
+        Data must be preloaded in order to add events.
+        """
+        if not self.preload:
+            raise RuntimeError('cannot add events unless data are preloaded')
+        events = np.asarray(events)
+        if events.ndim != 2 or events.shape[1] != 3:
+            raise ValueError('events must be shape (n_events, 3)')
+        stim_channel = _get_stim_channel(stim_channel, self.info)
+        pick = pick_channels(self.ch_names, stim_channel)
+        if len(pick) == 0:
+            raise ValueError('Channel %s not found' % stim_channel)
+        pick = pick[0]
+        idx = events[:, 0].astype(int)
+        if np.any(idx < self.first_samp) or np.any(idx > self.last_samp):
+            raise ValueError('event sample numbers must be between %s and %s'
+                             % (self.first_samp, self.last_samp))
+        if not all(idx == events[:, 0]):
+            raise ValueError('event sample numbers must be integers')
+        self._data[pick, idx - self.first_samp] += events[:, 2]
+
+    def _get_buffer_size(self, buffer_size_sec=None):
+        """Helper to get the buffer size"""
+        if buffer_size_sec is None:
+            if 'buffer_size_sec' in self.info:
+                buffer_size_sec = self.info['buffer_size_sec']
+            else:
+                buffer_size_sec = 10.0
+        return int(np.ceil(buffer_size_sec * self.info['sfreq']))
+
+
+def _allocate_data(data, data_buffer, data_shape, dtype):
+    if data is None:
+        # if not already done, allocate array with right type
+        if isinstance(data_buffer, string_types):
+            # use a memmap
+            data = np.memmap(data_buffer, mode='w+',
+                             dtype=dtype, shape=data_shape)
+        else:
+            data = np.zeros(data_shape, dtype=dtype)
+    return data
+
+
+def _time_as_index(times, sfreq, first_samp=0, use_first_samp=False,
+                   use_rounding=False):
+    """Convert time to indices
+
+    Parameters
+    ----------
+    times : list-like | float | int
+        List of numbers or a number representing points in time.
+    sfreq : float | int
+        Sample frequency.
+    first_samp : int
+       Index to use as first time point.
+    use_first_samp : boolean
+        If True, time is treated as relative to the session onset, else
+        as relative to the recording onset.
+    use_rounding : boolean
+        If True, use rounding (instead of truncation) when converting times to
+        indicies. This can help avoid non-unique indices.
+
+    Returns
+    -------
+    index : ndarray
+        Indices corresponding to the times supplied.
+
+    Notes
+    -----
+    np.round will return the nearest even number for values exactly between
+        two integers.
+    """
+    index = np.atleast_1d(times) * sfreq
+    index -= (first_samp if use_first_samp else 0)
+
+    # Round or truncate time indices
+    if use_rounding:
+        return np.round(index).astype(int)
+    else:
+        return index.astype(int)
+
+
+def _index_as_time(index, sfreq, first_samp=0, use_first_samp=False):
+    """Convert indices to time
+
+    Parameters
+    ----------
+    index : list-like | int
+        List of ints or int representing points in time.
+    use_first_samp : boolean
+        If True, the time returned is relative to the session onset, else
+        relative to the recording onset.
+
+    Returns
+    -------
+    times : ndarray
+        Times corresponding to the index supplied.
+    """
+    times = np.atleast_1d(index) + (first_samp if use_first_samp else 0)
+    return times / sfreq
+
+
+class _RawShell():
+    """Used for creating a temporary raw object"""
+
+    def __init__(self):
+        self.first_samp = None
+        self.last_samp = None
+        self._cals = None
+        self._rawdir = None
+        self._projector = None
+
+    @property
+    def n_times(self):
+        return self.last_samp - self.first_samp + 1
+
+
+###############################################################################
+# Writing
+def _write_raw(fname, raw, info, picks, fmt, data_type, reset_range, start,
+               stop, buffer_size, projector, inv_comp, drop_small_buffer,
+               split_size, part_idx, prev_fname):
+    """Write raw file with splitting
+    """
+
+    if part_idx > 0:
+        # insert index in filename
+        path, base = op.split(fname)
+        idx = base.find('.')
+        use_fname = op.join(path, '%s-%d.%s' % (base[:idx], part_idx,
+                                                base[idx + 1:]))
+    else:
+        use_fname = fname
+    logger.info('Writing %s' % use_fname)
+
+    meas_id = info['meas_id']
+
+    fid, cals = _start_writing_raw(use_fname, info, picks, data_type,
+                                   reset_range)
+
+    first_samp = raw.first_samp + start
+    if first_samp != 0:
+        write_int(fid, FIFF.FIFF_FIRST_SAMPLE, first_samp)
+
+    # previous file name and id
+    if part_idx > 0 and prev_fname is not None:
+        start_block(fid, FIFF.FIFFB_REF)
+        write_int(fid, FIFF.FIFF_REF_ROLE, FIFF.FIFFV_ROLE_PREV_FILE)
+        write_string(fid, FIFF.FIFF_REF_FILE_NAME, prev_fname)
+        if meas_id is not None:
+            write_id(fid, FIFF.FIFF_REF_FILE_ID, meas_id)
+        write_int(fid, FIFF.FIFF_REF_FILE_NUM, part_idx - 1)
+        end_block(fid, FIFF.FIFFB_REF)
+
+    pos_prev = None
+    for first in range(start, stop, buffer_size):
+        last = first + buffer_size
+        if last >= stop:
+            last = stop + 1
+
+        if picks is None:
+            data, times = raw[:, first:last]
+        else:
+            data, times = raw[picks, first:last]
+
+        if projector is not None:
+            data = np.dot(projector, data)
+
+        if ((drop_small_buffer and (first > start) and
+             (len(times) < buffer_size))):
+            logger.info('Skipping data chunk due to small buffer ... '
+                        '[done]')
+            break
+        logger.info('Writing ...')
+
+        if pos_prev is None:
+            pos_prev = fid.tell()
+
+        _write_raw_buffer(fid, data, cals, fmt, inv_comp)
+
+        pos = fid.tell()
+        this_buff_size_bytes = pos - pos_prev
+        if this_buff_size_bytes > split_size / 2:
+            raise ValueError('buffer size is too large for the given split'
+                             'size: decrease "buffer_size_sec" or increase'
+                             '"split_size".')
+        if pos > split_size:
+            raise logger.warning('file is larger than "split_size"')
+
+        # Split files if necessary, leave some space for next file info
+        if pos >= split_size - this_buff_size_bytes - 2 ** 20:
+            next_fname, next_idx = _write_raw(
+                fname, raw, info, picks, fmt,
+                data_type, reset_range, first + buffer_size, stop, buffer_size,
+                projector, inv_comp, drop_small_buffer, split_size,
+                part_idx + 1, use_fname)
+
+            start_block(fid, FIFF.FIFFB_REF)
+            write_int(fid, FIFF.FIFF_REF_ROLE, FIFF.FIFFV_ROLE_NEXT_FILE)
+            write_string(fid, FIFF.FIFF_REF_FILE_NAME, op.basename(next_fname))
+            if meas_id is not None:
+                write_id(fid, FIFF.FIFF_REF_FILE_ID, meas_id)
+            write_int(fid, FIFF.FIFF_REF_FILE_NUM, next_idx)
+            end_block(fid, FIFF.FIFFB_REF)
+            break
+
+        pos_prev = pos
+
+    logger.info('Closing %s [done]' % use_fname)
+    if info.get('maxshield', False):
+        end_block(fid, FIFF.FIFFB_SMSH_RAW_DATA)
+    else:
+        end_block(fid, FIFF.FIFFB_RAW_DATA)
+    end_block(fid, FIFF.FIFFB_MEAS)
+    end_file(fid)
+    return use_fname, part_idx
+
+
+def _start_writing_raw(name, info, sel=None, data_type=FIFF.FIFFT_FLOAT,
+                       reset_range=True):
+    """Start write raw data in file
+
+    Data will be written in float
+
+    Parameters
+    ----------
+    name : string
+        Name of the file to create.
+    info : dict
+        Measurement info.
+    sel : array of int, optional
+        Indices of channels to include. By default all channels are included.
+    data_type : int
+        The data_type in case it is necessary. Should be 4 (FIFFT_FLOAT),
+        5 (FIFFT_DOUBLE), 16 (FIFFT_DAU_PACK16), or 3 (FIFFT_INT) for raw data.
+    reset_range : bool
+        If True, the info['chs'][k]['range'] parameter will be set to unity.
+
+    Returns
+    -------
+    fid : file
+        The file descriptor.
+    cals : list
+        calibration factors.
+    """
+    #
+    #    Measurement info
+    #
+    info = pick_info(info, sel, copy=True)
+
+    #
+    #  Create the file and save the essentials
+    #
+    fid = start_file(name)
+    start_block(fid, FIFF.FIFFB_MEAS)
+    write_id(fid, FIFF.FIFF_BLOCK_ID)
+    if info['meas_id'] is not None:
+        write_id(fid, FIFF.FIFF_PARENT_BLOCK_ID, info['meas_id'])
+
+    cals = []
+    for k in range(info['nchan']):
+        #
+        #   Scan numbers may have been messed up
+        #
+        info['chs'][k]['scanno'] = k + 1  # scanno starts at 1 in FIF format
+        if reset_range is True:
+            info['chs'][k]['range'] = 1.0
+        cals.append(info['chs'][k]['cal'] * info['chs'][k]['range'])
+
+    write_meas_info(fid, info, data_type=data_type, reset_range=reset_range)
+
+    #
+    # Start the raw data
+    #
+    if info.get('maxshield', False):
+        start_block(fid, FIFF.FIFFB_SMSH_RAW_DATA)
+    else:
+        start_block(fid, FIFF.FIFFB_RAW_DATA)
+
+    return fid, cals
+
+
+def _write_raw_buffer(fid, buf, cals, fmt, inv_comp):
+    """Write raw buffer
+
+    Parameters
+    ----------
+    fid : file descriptor
+        an open raw data file.
+    buf : array
+        The buffer to write.
+    cals : array
+        Calibration factors.
+    fmt : str
+        'short', 'int', 'single', or 'double' for 16/32 bit int or 32/64 bit
+        float for each item. This will be doubled for complex datatypes. Note
+        that short and int formats cannot be used for complex data.
+    inv_comp : array | None
+        The CTF compensation matrix used to revert compensation
+        change when reading.
+    """
+    if buf.shape[0] != len(cals):
+        raise ValueError('buffer and calibration sizes do not match')
+
+    if fmt not in ['short', 'int', 'single', 'double']:
+        raise ValueError('fmt must be "short", "single", or "double"')
+
+    if np.isrealobj(buf):
+        if fmt == 'short':
+            write_function = write_dau_pack16
+        elif fmt == 'int':
+            write_function = write_int
+        elif fmt == 'single':
+            write_function = write_float
+        else:
+            write_function = write_double
+    else:
+        if fmt == 'single':
+            write_function = write_complex64
+        elif fmt == 'double':
+            write_function = write_complex128
+        else:
+            raise ValueError('only "single" and "double" supported for '
+                             'writing complex data')
+
+    if inv_comp is not None:
+        buf = np.dot(inv_comp / np.ravel(cals)[:, None], buf)
+    else:
+        buf = buf / np.ravel(cals)[:, None]
+
+    write_function(fid, FIFF.FIFF_DATA_BUFFER, buf)
+
+
+def _my_hilbert(x, n_fft=None, envelope=False):
+    """ Compute Hilbert transform of signals w/ zero padding.
+
+    Parameters
+    ----------
+    x : array, shape (n_times)
+        The signal to convert
+    n_fft : int, length > x.shape[-1] | None
+        How much to pad the signal before Hilbert transform.
+        Note that signal will then be cut back to original length.
+    envelope : bool
+        Whether to compute amplitude of the hilbert transform in order
+        to return the signal envelope.
+
+    Returns
+    -------
+    out : array, shape (n_times)
+        The hilbert transform of the signal, or the envelope.
+    """
+    from scipy.signal import hilbert
+    n_fft = x.shape[-1] if n_fft is None else n_fft
+    n_x = x.shape[-1]
+    out = hilbert(x, N=n_fft)[:n_x]
+    if envelope is True:
+        out = np.abs(out)
+    return out
+
+
+def _check_raw_compatibility(raw):
+    """Check to make sure all instances of Raw
+    in the input list raw have compatible parameters"""
+    for ri in range(1, len(raw)):
+        if not isinstance(raw[ri], type(raw[0])):
+            raise ValueError('raw[%d] type must match' % ri)
+        if not raw[ri].info['nchan'] == raw[0].info['nchan']:
+            raise ValueError('raw[%d][\'info\'][\'nchan\'] must match' % ri)
+        if not raw[ri].info['bads'] == raw[0].info['bads']:
+            raise ValueError('raw[%d][\'info\'][\'bads\'] must match' % ri)
+        if not raw[ri].info['sfreq'] == raw[0].info['sfreq']:
+            raise ValueError('raw[%d][\'info\'][\'sfreq\'] must match' % ri)
+        if not set(raw[ri].info['ch_names']) == set(raw[0].info['ch_names']):
+            raise ValueError('raw[%d][\'info\'][\'ch_names\'] must match' % ri)
+        if not all(raw[ri]._cals == raw[0]._cals):
+            raise ValueError('raw[%d]._cals must match' % ri)
+        if len(raw[0].info['projs']) != len(raw[ri].info['projs']):
+            raise ValueError('SSP projectors in raw files must be the same')
+        if not all(_proj_equal(p1, p2) for p1, p2 in
+                   zip(raw[0].info['projs'], raw[ri].info['projs'])):
+            raise ValueError('SSP projectors in raw files must be the same')
+    if not all(r.orig_format == raw[0].orig_format for r in raw):
+        warnings.warn('raw files do not all have the same data format, '
+                      'could result in precision mismatch. Setting '
+                      'raw.orig_format="unknown"')
+        raw[0].orig_format = 'unknown'
+
+
+def concatenate_raws(raws, preload=None, events_list=None):
+    """Concatenate raw instances as if they were continuous. Note that raws[0]
+    is modified in-place to achieve the concatenation.
+
+    Parameters
+    ----------
+    raws : list
+        list of Raw instances to concatenate (in order).
+    preload : bool, or None
+        If None, preload status is inferred using the preload status of the
+        raw files passed in. True or False sets the resulting raw file to
+        have or not have data preloaded.
+    events_list : None | list
+        The events to concatenate. Defaults to None.
+
+    Returns
+    -------
+    raw : instance of Raw
+        The result of the concatenation (first Raw instance passed in).
+    events : ndarray of int, shape (n events, 3)
+        The events. Only returned if `event_list` is not None.
+    """
+    if events_list is not None:
+        if len(events_list) != len(raws):
+            raise ValueError('`raws` and `event_list` are required '
+                             'to be of the same length')
+        first, last = zip(*[(r.first_samp, r.last_samp) for r in raws])
+        events = concatenate_events(events_list, first, last)
+    raws[0].append(raws[1:], preload)
+
+    if events_list is None:
+        return raws[0]
+    else:
+        return raws[0], events
+
+
+def _check_update_montage(info, montage):
+    """ Helper function for eeg readers to add montage"""
+    if montage is not None:
+        if not isinstance(montage, (str, Montage)):
+            err = ("Montage must be str, None, or instance of Montage. "
+                   "%s was provided" % type(montage))
+            raise TypeError(err)
+        if montage is not None:
+            if isinstance(montage, str):
+                montage = read_montage(montage)
+            _set_montage(info, montage)
+
+            missing_positions = []
+            exclude = (FIFF.FIFFV_EOG_CH, FIFF.FIFFV_MISC_CH,
+                       FIFF.FIFFV_STIM_CH)
+            for ch in info['chs']:
+                if not ch['kind'] in exclude:
+                    if np.unique(ch['loc']).size == 1:
+                        missing_positions.append(ch['ch_name'])
+
+            # raise error if positions are missing
+            if missing_positions:
+                err = ("The following positions are missing from the montage "
+                       "definitions: %s. If those channels lack positions "
+                       "because they are EOG channels use the eog parameter."
+                       % str(missing_positions))
+                raise KeyError(err)
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/brainvision/__init__.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/brainvision/__init__.py
new file mode 100644
index 0000000..17a7db2
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/brainvision/__init__.py
@@ -0,0 +1,7 @@
+"""Brainvision module for conversion to FIF"""
+
+# Author: Teon Brooks <teon.brooks at gmail.com>
+#
+# License: BSD (3-clause)
+
+from .brainvision import read_raw_brainvision
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/brainvision/brainvision.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/brainvision/brainvision.py
new file mode 100644
index 0000000..f030d9c
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/brainvision/brainvision.py
@@ -0,0 +1,512 @@
+# -*- coding: utf-8 -*-
+"""Conversion tool from Brain Vision EEG to FIF"""
+
+# Authors: Teon Brooks <teon.brooks at gmail.com>
+#          Christian Brodbeck <christianbrodbeck at nyu.edu>
+#          Eric Larson <larson.eric.d at gmail.com>
+#
+# License: BSD (3-clause)
+
+import os
+import time
+import re
+import warnings
+
+import numpy as np
+
+from ...utils import verbose, logger
+from ..constants import FIFF
+from ..meas_info import _empty_info
+from ..base import _BaseRaw, _check_update_montage
+from ..reference import add_reference_channels
+
+from ...externals.six import StringIO, u
+from ...externals.six.moves import configparser
+
+
+class RawBrainVision(_BaseRaw):
+    """Raw object from Brain Vision EEG file
+
+    Parameters
+    ----------
+    vhdr_fname : str
+        Path to the EEG header file.
+    montage : str | None | instance of Montage
+        Path or instance of montage containing electrode positions.
+        If None, sensor locations are (0,0,0). See the documentation of
+        :func:`mne.channels.read_montage` for more information.
+    eog : list or tuple
+        Names of channels or list of indices that should be designated
+        EOG channels. Values should correspond to the vhdr file.
+        Default is ``('HEOGL', 'HEOGR', 'VEOGb')``.
+    misc : list or tuple
+        Names of channels or list of indices that should be designated
+        MISC channels. Values should correspond to the electrodes
+        in the vhdr file. Default is ``()``.
+    reference : None | str
+        **Deprecated**, use `add_reference_channel` instead.
+        Name of the electrode which served as the reference in the recording.
+        If a name is provided, a corresponding channel is added and its data
+        is set to 0. This is useful for later re-referencing. The name should
+        correspond to a name in elp_names. Data must be preloaded.
+    scale : float
+        The scaling factor for EEG data. Units are in volts. Default scale
+        factor is 1. For microvolts, the scale factor would be 1e-6. This is
+        used when the header file does not specify the scale factor.
+    preload : bool
+        If True, all data are loaded at initialization.
+        If False, data are not read until save.
+    response_trig_shift : int | None
+        An integer that will be added to all response triggers when reading
+        events (stimulus triggers will be unaffected). If None, response
+        triggers will be ignored. Default is 0 for backwards compatibility, but
+        typically another value or None will be necessary.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    See Also
+    --------
+    mne.io.Raw : Documentation of attribute and methods.
+    """
+    @verbose
+    def __init__(self, vhdr_fname, montage=None,
+                 eog=('HEOGL', 'HEOGR', 'VEOGb'), misc=(), reference=None,
+                 scale=1., preload=False, response_trig_shift=0, verbose=None):
+        # Channel info and events
+        logger.info('Extracting parameters from %s...' % vhdr_fname)
+        vhdr_fname = os.path.abspath(vhdr_fname)
+        info, fmt, self._order, events = _get_vhdr_info(
+            vhdr_fname, eog, misc, response_trig_shift, scale)
+        _check_update_montage(info, montage)
+        with open(info['filename'], 'rb') as f:
+            f.seek(0, os.SEEK_END)
+            n_samples = f.tell()
+        dtype_bytes = _fmt_byte_dict[fmt]
+        self.preload = False  # so the event-setting works
+        self.set_brainvision_events(events)
+        last_samps = [(n_samples // (dtype_bytes * (info['nchan'] - 1))) - 1]
+        super(RawBrainVision, self).__init__(
+            info, last_samps=last_samps, filenames=[info['filename']],
+            orig_format=fmt, preload=preload, verbose=verbose)
+
+        # add reference
+        if reference is not None:
+            warnings.warn('reference is deprecated and will be removed in '
+                          'v0.11. Use add_reference_channels instead.')
+            if preload is False:
+                raise ValueError("Preload must be set to True if reference is "
+                                 "specified.")
+            add_reference_channels(self, reference, copy=False)
+
+    def _read_segment_file(self, data, idx, offset, fi, start, stop,
+                           cals, mult):
+        """Read a chunk of raw data"""
+        # read data
+        n_data_ch = len(self.ch_names) - 1
+        n_times = stop - start + 1
+        pointer = start * n_data_ch * _fmt_byte_dict[self.orig_format]
+        with open(self._filenames[fi], 'rb') as f:
+            f.seek(pointer)
+            # extract data
+            data_buffer = np.fromfile(
+                f, dtype=_fmt_dtype_dict[self.orig_format],
+                count=n_times * n_data_ch)
+        data_buffer = data_buffer.reshape((n_data_ch, n_times),
+                                          order=self._order)
+
+        data_ = np.empty((n_data_ch + 1, n_times), dtype=np.float64)
+        data_[:-1] = data_buffer  # cast to float64
+        del data_buffer
+        data_[-1] = _synthesize_stim_channel(self._events, start, stop + 1)
+        data_ *= self._cals[:, np.newaxis]
+        data[:, offset:offset + stop - start + 1] = \
+            np.dot(mult, data_) if mult is not None else data_[idx]
+
+    def get_brainvision_events(self):
+        """Retrieve the events associated with the Brain Vision Raw object
+
+        Returns
+        -------
+        events : array, shape (n_events, 3)
+            Events, each row consisting of an (onset, duration, trigger)
+            sequence.
+        """
+        return self._events.copy()
+
+    def set_brainvision_events(self, events):
+        """Set the events and update the synthesized stim channel
+
+        Parameters
+        ----------
+        events : array, shape (n_events, 3)
+            Events, each row consisting of an (onset, duration, trigger)
+            sequence.
+        """
+        events = np.array(events, int)
+        if events.ndim != 2 or events.shape[1] != 3:
+            raise ValueError("[n_events x 3] shaped array required")
+        # update events
+        self._events = events
+        if self.preload:
+            start = self.first_samp
+            stop = self.last_samp + 1
+            self._data[-1] = _synthesize_stim_channel(events, start, stop)
+
+
+def _read_vmrk_events(fname, response_trig_shift=0):
+    """Read events from a vmrk file
+
+    Parameters
+    ----------
+    fname : str
+        vmrk file to be read.
+    response_trig_shift : int | None
+        Integer to shift response triggers by. None ignores response triggers.
+
+    Returns
+    -------
+    events : array, shape (n_events, 3)
+        An array containing the whole recording's events, each row representing
+        an event as (onset, duration, trigger) sequence.
+    """
+    # read vmrk file
+    with open(fname) as fid:
+        txt = fid.read()
+
+    header = txt.split('\n')[0].strip()
+    start_tag = 'Brain Vision Data Exchange Marker File'
+    if not header.startswith(start_tag):
+        raise ValueError("vmrk file should start with %r" % start_tag)
+    end_tag = 'Version 1.0'
+    if not header.endswith(end_tag):
+        raise ValueError("vmrk file should be %r" % end_tag)
+    if (response_trig_shift is not None and
+            not isinstance(response_trig_shift, int)):
+        raise TypeError("response_trig_shift must be an integer or None")
+
+    # extract Marker Infos block
+    m = re.search("\[Marker Infos\]", txt)
+    if not m:
+        return np.zeros(0)
+    mk_txt = txt[m.end():]
+    m = re.search("\[.*\]", mk_txt)
+    if m:
+        mk_txt = mk_txt[:m.start()]
+
+    # extract event information
+    items = re.findall("^Mk\d+=(.*)", mk_txt, re.MULTILINE)
+    events = []
+    for info in items:
+        mtype, mdesc, onset, duration = info.split(',')[:4]
+        try:
+            trigger = int(re.findall('[A-Za-z]*\s*?(\d+)', mdesc)[0])
+            if mdesc[0].lower() == 's' or response_trig_shift is not None:
+                if mdesc[0].lower() == 'r':
+                    trigger += response_trig_shift
+                onset = int(onset)
+                duration = int(duration)
+                events.append((onset, duration, trigger))
+        except IndexError:
+            pass
+
+    events = np.array(events).reshape(-1, 3)
+    return events
+
+
+def _synthesize_stim_channel(events, start, stop):
+    """Synthesize a stim channel from events read from a vmrk file
+
+    Parameters
+    ----------
+    events : array, shape (n_events, 3)
+        Each row representing an event as (onset, duration, trigger) sequence
+        (the format returned by _read_vmrk_events).
+    start : int
+        First sample to return.
+    stop : int
+        Last sample to return.
+
+    Returns
+    -------
+    stim_channel : array, shape (n_samples,)
+        An array containing the whole recording's event marking
+    """
+    # select events overlapping buffer
+    onset = events[:, 0]
+    offset = onset + events[:, 1]
+    idx = np.logical_and(onset < stop, offset > start)
+    if idx.sum() > 0:  # fix for old numpy
+        events = events[idx]
+
+    # make onset relative to buffer
+    events[:, 0] -= start
+
+    # fix onsets before buffer start
+    idx = events[:, 0] < 0
+    events[idx, 0] = 0
+
+    # create output buffer
+    stim_channel = np.zeros(stop - start)
+    for onset, duration, trigger in events:
+        stim_channel[onset:onset + duration] = trigger
+
+    return stim_channel
+
+
+_orientation_dict = dict(MULTIPLEXED='F', VECTORIZED='C')
+_fmt_dict = dict(INT_16='short', INT_32='int', IEEE_FLOAT_32='single')
+_fmt_byte_dict = dict(short=2, int=4, single=4)
+_fmt_dtype_dict = dict(short='<i2', int='<i4', single='<f4')
+_unit_dict = {'V': 1., u'µV': 1e-6}
+
+
+def _get_vhdr_info(vhdr_fname, eog, misc, response_trig_shift, scale):
+    """Extracts all the information from the header file.
+
+    Parameters
+    ----------
+    vhdr_fname : str
+        Raw EEG header to be read.
+    eog : list of str
+        Names of channels that should be designated EOG channels. Names should
+        correspond to the vhdr file.
+    misc : list of str
+        Names of channels that should be designated MISC channels. Names
+        should correspond to the electrodes in the vhdr file.
+    response_trig_shift : int | None
+        Integer to shift response triggers by. None ignores response triggers.
+    scale : float
+        The scaling factor for EEG data. Units are in volts. Default scale
+        factor is 1.. For microvolts, the scale factor would be 1e-6. This is
+        used when the header file does not specify the scale factor.
+
+    Returns
+    -------
+    info : Info
+        The measurement info.
+    fmt : str
+        The data format in the file.
+    edf_info : dict
+        A dict containing Brain Vision specific parameters.
+    events : array, shape (n_events, 3)
+        Events from the corresponding vmrk file.
+    """
+    scale = float(scale)
+    info = _empty_info()
+
+    ext = os.path.splitext(vhdr_fname)[-1]
+    if ext != '.vhdr':
+        raise IOError("The header file must be given to read the data, "
+                      "not the '%s' file." % ext)
+    with open(vhdr_fname, 'r') as f:
+        # extract the first section to resemble a cfg
+        l = f.readline().strip()
+        assert l == 'Brain Vision Data Exchange Header File Version 1.0'
+        settings = f.read()
+
+    if settings.find('[Comment]') != -1:
+        params, settings = settings.split('[Comment]')
+    else:
+        params, settings = settings, ''
+    cfg = configparser.ConfigParser()
+    if hasattr(cfg, 'read_file'):  # newer API
+        cfg.read_file(StringIO(params))
+    else:
+        cfg.readfp(StringIO(params))
+
+    # get sampling info
+    # Sampling interval is given in microsec
+    info['sfreq'] = 1e6 / cfg.getfloat('Common Infos', 'SamplingInterval')
+
+    # check binary format
+    assert cfg.get('Common Infos', 'DataFormat') == 'BINARY'
+    order = cfg.get('Common Infos', 'DataOrientation')
+    if order not in _orientation_dict:
+        raise NotImplementedError('Data Orientation %s is not supported'
+                                  % order)
+    order = _orientation_dict[order]
+
+    fmt = cfg.get('Binary Infos', 'BinaryFormat')
+    if fmt not in _fmt_dict:
+        raise NotImplementedError('Datatype %s is not supported' % fmt)
+    fmt = _fmt_dict[fmt]
+
+    # load channel labels
+    info['nchan'] = cfg.getint('Common Infos', 'NumberOfChannels') + 1
+    ch_names = [''] * info['nchan']
+    cals = np.empty(info['nchan'])
+    ranges = np.empty(info['nchan'])
+    cals.fill(np.nan)
+    for chan, props in cfg.items('Channel Infos'):
+        n = int(re.findall(r'ch(\d+)', chan)[0]) - 1
+        props = props.split(',')
+        if len(props) < 4:
+            props += ('V',)
+        name, _, resolution, unit = props[:4]
+        ch_names[n] = name
+        if resolution == "":  # For truncated vhdrs (e.g. EEGLAB export)
+            resolution = 0.000001
+        unit = unit.replace('\xc2', '')  # Remove unwanted control characters
+        cals[n] = float(resolution)
+        ranges[n] = _unit_dict.get(u(unit), unit) * scale
+    ch_names[-1] = 'STI 014'
+    cals[-1] = 1.
+    ranges[-1] = 1.
+    if np.isnan(cals).any():
+        raise RuntimeError('Missing channel units')
+
+    # Attempts to extract filtering info from header. If not found, both are
+    # set to zero.
+    settings = settings.splitlines()
+    idx = None
+    if 'Channels' in settings:
+        idx = settings.index('Channels')
+        settings = settings[idx + 1:]
+        for idx, setting in enumerate(settings):
+            if re.match('#\s+Name', setting):
+                break
+            else:
+                idx = None
+
+    if idx:
+        lowpass = []
+        highpass = []
+        for i, ch in enumerate(ch_names[:-1], 1):
+            line = settings[idx + i].split()
+            assert ch in line
+            highpass.append(line[5])
+            lowpass.append(line[6])
+        if len(highpass) == 0:
+            info['highpass'] = None
+        elif all(highpass):
+            if highpass[0] == 'NaN':
+                info['highpass'] = None
+            elif highpass[0] == 'DC':
+                info['highpass'] = 0.
+            else:
+                info['highpass'] = float(highpass[0])
+        else:
+            info['highpass'] = np.min(np.array(highpass, dtype=np.float))
+            warnings.warn('%s' % ('Channels contain different highpass '
+                                  'filters. Highest filter setting will '
+                                  'be stored.'))
+        if len(lowpass) == 0:
+            info['lowpass'] = None
+        elif all(lowpass):
+            if lowpass[0] == 'NaN':
+                info['lowpass'] = None
+            else:
+                info['lowpass'] = float(lowpass[0])
+        else:
+            info['lowpass'] = np.min(np.array(lowpass, dtype=np.float))
+            warnings.warn('%s' % ('Channels contain different lowpass filters.'
+                                  ' Lowest filter setting will be stored.'))
+
+        # Post process highpass and lowpass to take into account units
+        header = settings[idx].split('  ')
+        header = [h for h in header if len(h)]
+        if '[s]' in header[4] and info['highpass'] is not None \
+                and (info['highpass'] > 0):
+            info['highpass'] = 1. / info['highpass']
+        if '[s]' in header[5] and info['lowpass'] is not None:
+            info['lowpass'] = 1. / info['lowpass']
+    else:
+        info['highpass'] = None
+        info['lowpass'] = None
+
+    # locate EEG and marker files
+    path = os.path.dirname(vhdr_fname)
+    info['filename'] = os.path.join(path, cfg.get('Common Infos', 'DataFile'))
+    info['meas_date'] = int(time.time())
+
+    # Creates a list of dicts of eeg channels for raw.info
+    logger.info('Setting channel info structure...')
+    info['chs'] = []
+    info['ch_names'] = ch_names
+    for idx, ch_name in enumerate(ch_names):
+        if ch_name in eog or idx in eog or idx - info['nchan'] in eog:
+            kind = FIFF.FIFFV_EOG_CH
+            coil_type = FIFF.FIFFV_COIL_NONE
+            unit = FIFF.FIFF_UNIT_V
+        elif ch_name in misc or idx in misc or idx - info['nchan'] in misc:
+            kind = FIFF.FIFFV_MISC_CH
+            coil_type = FIFF.FIFFV_COIL_NONE
+            unit = FIFF.FIFF_UNIT_V
+        elif ch_name == 'STI 014':
+            kind = FIFF.FIFFV_STIM_CH
+            coil_type = FIFF.FIFFV_COIL_NONE
+            unit = FIFF.FIFF_UNIT_NONE
+        else:
+            kind = FIFF.FIFFV_EEG_CH
+            coil_type = FIFF.FIFFV_COIL_EEG
+            unit = FIFF.FIFF_UNIT_V
+        info['chs'].append(dict(
+            ch_name=ch_name, coil_type=coil_type, kind=kind, logno=idx + 1,
+            scanno=idx + 1, cal=cals[idx], range=ranges[idx], loc=np.zeros(12),
+            unit=unit, unit_mul=0.,  # always zero- mne manual pg. 273
+            coord_frame=FIFF.FIFFV_COORD_HEAD))
+
+    # for stim channel
+    marker_id = os.path.join(path, cfg.get('Common Infos', 'MarkerFile'))
+    events = _read_vmrk_events(marker_id, response_trig_shift)
+    info._check_consistency()
+    return info, fmt, order, events
+
+
+def read_raw_brainvision(vhdr_fname, montage=None,
+                         eog=('HEOGL', 'HEOGR', 'VEOGb'), misc=(),
+                         reference=None, scale=1., preload=False,
+                         response_trig_shift=0, verbose=None):
+    """Reader for Brain Vision EEG file
+
+    Parameters
+    ----------
+    vhdr_fname : str
+        Path to the EEG header file.
+    montage : str | None | instance of Montage
+        Path or instance of montage containing electrode positions.
+        If None, sensor locations are (0,0,0). See the documentation of
+        :func:`mne.channels.read_montage` for more information.
+    eog : list or tuple of str
+        Names of channels or list of indices that should be designated
+        EOG channels. Values should correspond to the vhdr file
+        Default is ``('HEOGL', 'HEOGR', 'VEOGb')``.
+    misc : list or tuple of str
+        Names of channels or list of indices that should be designated
+        MISC channels. Values should correspond to the electrodes
+        in the vhdr file. Default is ``()``.
+    reference : None | str
+        **Deprecated**, use `add_reference_channel` instead.
+        Name of the electrode which served as the reference in the recording.
+        If a name is provided, a corresponding channel is added and its data
+        is set to 0. This is useful for later re-referencing. The name should
+        correspond to a name in elp_names. Data must be preloaded.
+    scale : float
+        The scaling factor for EEG data. Units are in volts. Default scale
+        factor is 1. For microvolts, the scale factor would be 1e-6. This is
+        used when the header file does not specify the scale factor.
+    preload : bool
+        If True, all data are loaded at initialization.
+        If False, data are not read until save.
+    response_trig_shift : int | None
+        An integer that will be added to all response triggers when reading
+        events (stimulus triggers will be unaffected). If None, response
+        triggers will be ignored. Default is 0 for backwards compatibility, but
+        typically another value or None will be necessary.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    raw : instance of RawBrainVision
+        A Raw object containing BrainVision data.
+
+    See Also
+    --------
+    mne.io.Raw : Documentation of attribute and methods.
+    """
+    raw = RawBrainVision(vhdr_fname=vhdr_fname, montage=montage, eog=eog,
+                         misc=misc, reference=reference, scale=scale,
+                         preload=preload, verbose=verbose,
+                         response_trig_shift=response_trig_shift)
+    return raw
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/brainvision/tests/__init__.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/brainvision/tests/__init__.py
new file mode 100644
index 0000000..8b13789
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/brainvision/tests/__init__.py
@@ -0,0 +1 @@
+
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/brainvision/tests/test_brainvision.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/brainvision/tests/test_brainvision.py
new file mode 100644
index 0000000..ca338f4
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/brainvision/tests/test_brainvision.py
@@ -0,0 +1,207 @@
+"""Data Equivalence Tests"""
+from __future__ import print_function
+
+# Author: Teon Brooks <teon.brooks at gmail.com>
+#
+# License: BSD (3-clause)
+
+import os.path as op
+import inspect
+
+from nose.tools import assert_equal, assert_raises, assert_true
+import numpy as np
+from numpy.testing import (assert_array_almost_equal, assert_array_equal,
+                           assert_allclose)
+
+from mne.utils import _TempDir, run_tests_if_main
+from mne import pick_types, concatenate_raws, find_events
+from mne.io.constants import FIFF
+from mne.io import Raw, read_raw_brainvision
+
+FILE = inspect.getfile(inspect.currentframe())
+data_dir = op.join(op.dirname(op.abspath(FILE)), 'data')
+vhdr_path = op.join(data_dir, 'test.vhdr')
+vmrk_path = op.join(data_dir, 'test.vmrk')
+vhdr_highpass_path = op.join(data_dir, 'test_highpass.vhdr')
+montage = op.join(data_dir, 'test.hpts')
+eeg_bin = op.join(data_dir, 'test_bin_raw.fif')
+eog = ['HL', 'HR', 'Vb']
+
+
+def test_brainvision_data_filters():
+    """Test reading raw Brain Vision files
+    """
+    raw = read_raw_brainvision(vhdr_highpass_path, montage, eog=eog,
+                               preload=True)
+    assert_equal(raw.info['highpass'], 0.1)
+    assert_equal(raw.info['lowpass'], 250.)
+    raw.info["lowpass"] = None
+    raw.filter(1, 30)
+
+
+def test_brainvision_data():
+    """Test reading raw Brain Vision files
+    """
+    assert_raises(IOError, read_raw_brainvision, vmrk_path)
+    assert_raises(ValueError, read_raw_brainvision, vhdr_path, montage,
+                  preload=True, scale="foo")
+    raw_py = read_raw_brainvision(vhdr_path, montage, eog=eog, preload=True)
+    raw_py.load_data()  # currently does nothing
+    assert_true('RawBrainVision' in repr(raw_py))
+
+    assert_equal(raw_py.info['highpass'], 0.)
+    assert_equal(raw_py.info['lowpass'], 250.)
+
+    picks = pick_types(raw_py.info, meg=False, eeg=True, exclude='bads')
+    data_py, times_py = raw_py[picks]
+
+    print(raw_py)  # to test repr
+    print(raw_py.info)  # to test Info repr
+
+    # compare with a file that was generated using MNE-C
+    raw_bin = Raw(eeg_bin, preload=True)
+    picks = pick_types(raw_py.info, meg=False, eeg=True, exclude='bads')
+    data_bin, times_bin = raw_bin[picks]
+
+    assert_array_almost_equal(data_py, data_bin)
+    assert_array_almost_equal(times_py, times_bin)
+
+    # Make sure EOG channels are marked correctly
+    raw_py = read_raw_brainvision(vhdr_path, montage, eog=eog,
+                                  preload=True)
+    for ch in raw_py.info['chs']:
+        if ch['ch_name'] in eog:
+            assert_equal(ch['kind'], FIFF.FIFFV_EOG_CH)
+        elif ch['ch_name'] == 'STI 014':
+            assert_equal(ch['kind'], FIFF.FIFFV_STIM_CH)
+        elif ch['ch_name'] in raw_py.info['ch_names']:
+            assert_equal(ch['kind'], FIFF.FIFFV_EEG_CH)
+        else:
+            raise RuntimeError("Unknown Channel: %s" % ch['ch_name'])
+
+    # Make sure concatenation works
+    raw_concat = concatenate_raws([raw_py.copy(), raw_py])
+    assert_equal(raw_concat.n_times, 2 * raw_py.n_times)
+
+
+def test_events():
+    """Test reading and modifying events"""
+    tempdir = _TempDir()
+
+    # check that events are read and stim channel is synthesized correcly
+    raw = read_raw_brainvision(vhdr_path, eog=eog, preload=True)
+    events = raw.get_brainvision_events()
+    assert_array_equal(events, [[487, 1, 253],
+                                [497, 1, 255],
+                                [1770, 1, 254],
+                                [1780, 1, 255],
+                                [3253, 1, 254],
+                                [3263, 1, 255],
+                                [4936, 1, 253],
+                                [4946, 1, 255],
+                                [6000, 1, 255],
+                                [6620, 1, 254],
+                                [6630, 1, 255]])
+
+    # check that events are read and stim channel is synthesized correcly and
+    # response triggers are shifted like they're supposed to be.
+    raw = read_raw_brainvision(vhdr_path, eog=eog, preload=True,
+                               response_trig_shift=1000)
+    events = raw.get_brainvision_events()
+    assert_array_equal(events, [[487, 1, 253],
+                                [497, 1, 255],
+                                [1770, 1, 254],
+                                [1780, 1, 255],
+                                [3253, 1, 254],
+                                [3263, 1, 255],
+                                [4936, 1, 253],
+                                [4946, 1, 255],
+                                [6000, 1, 1255],
+                                [6620, 1, 254],
+                                [6630, 1, 255]])
+
+    # check that events are read and stim channel is synthesized correcly and
+    # response triggers are ignored.
+    raw = read_raw_brainvision(vhdr_path, eog=eog, preload=True,
+                               response_trig_shift=None)
+    events = raw.get_brainvision_events()
+    assert_array_equal(events, [[487, 1, 253],
+                                [497, 1, 255],
+                                [1770, 1, 254],
+                                [1780, 1, 255],
+                                [3253, 1, 254],
+                                [3263, 1, 255],
+                                [4936, 1, 253],
+                                [4946, 1, 255],
+                                [6620, 1, 254],
+                                [6630, 1, 255]])
+
+    assert_raises(TypeError, read_raw_brainvision, vhdr_path, eog=eog,
+                  preload=True, response_trig_shift=0.1)
+    assert_raises(TypeError, read_raw_brainvision, vhdr_path, eog=eog,
+                  preload=True, response_trig_shift=np.nan)
+
+    mne_events = find_events(raw, stim_channel='STI 014')
+    assert_array_equal(events[:, [0, 2]], mne_events[:, [0, 2]])
+
+    # modify events and check that stim channel is updated
+    index = events[:, 2] == 255
+    events = events[index]
+    raw.set_brainvision_events(events)
+    mne_events = find_events(raw, stim_channel='STI 014')
+    assert_array_equal(events[:, [0, 2]], mne_events[:, [0, 2]])
+
+    # remove events
+    nchan = raw.info['nchan']
+    ch_name = raw.info['chs'][-2]['ch_name']
+    events = np.empty((0, 3))
+    raw.set_brainvision_events(events)
+    assert_equal(raw.info['nchan'], nchan)
+    assert_equal(len(raw._data), nchan)
+    assert_equal(raw.info['chs'][-2]['ch_name'], ch_name)
+    assert_equal(len(find_events(raw, 'STI 014')), 0)
+    assert_allclose(raw[-1][0], 0.)
+    fname = op.join(tempdir, 'evt_raw.fif')
+    raw.save(fname)
+
+    # add events back in
+    events = [[10, 1, 2]]
+    raw.set_brainvision_events(events)
+    assert_equal(raw.info['nchan'], nchan)
+    assert_equal(len(raw._data), nchan)
+    assert_equal(raw.info['chs'][-1]['ch_name'], 'STI 014')
+
+
+def test_read_segment():
+    """Test writing raw eeg files when preload is False
+    """
+    tempdir = _TempDir()
+    raw1 = read_raw_brainvision(vhdr_path, eog=eog, preload=False)
+    raw1_file = op.join(tempdir, 'test1-raw.fif')
+    raw1.save(raw1_file, overwrite=True)
+    raw11 = Raw(raw1_file, preload=True)
+    data1, times1 = raw1[:, :]
+    data11, times11 = raw11[:, :]
+    assert_array_almost_equal(data1, data11, 8)
+    assert_array_almost_equal(times1, times11)
+    assert_equal(sorted(raw1.info.keys()), sorted(raw11.info.keys()))
+
+    raw2 = read_raw_brainvision(vhdr_path, eog=eog, preload=True)
+    raw2_file = op.join(tempdir, 'test2-raw.fif')
+    raw2.save(raw2_file, overwrite=True)
+    data2, times2 = raw2[:, :]
+    assert_array_equal(data1, data2)
+    assert_array_equal(times1, times2)
+
+    raw1 = Raw(raw1_file, preload=True)
+    raw2 = Raw(raw2_file, preload=True)
+    assert_array_equal(raw1._data, raw2._data)
+
+    # save with buffer size smaller than file
+    raw3_file = op.join(tempdir, 'test3-raw.fif')
+    raw3 = read_raw_brainvision(vhdr_path, eog=eog)
+    raw3.save(raw3_file, buffer_size_sec=2)
+    raw3 = Raw(raw3_file, preload=True)
+    assert_array_equal(raw3._data, raw1._data)
+
+run_tests_if_main()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/bti/__init__.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/bti/__init__.py
new file mode 100644
index 0000000..1272b62
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/bti/__init__.py
@@ -0,0 +1,5 @@
+"""Bti module for conversion to FIF"""
+
+# Author: Denis A. Engemann <denis.engemann at gmail.com>
+
+from .bti import read_raw_bti
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/bti/bti.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/bti/bti.py
new file mode 100644
index 0000000..caa1be4
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/bti/bti.py
@@ -0,0 +1,1365 @@
+
+# Authors: Denis A. Engemann  <denis.engemann at gmail.com>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#          Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
+#          Yuval Harpaz <yuvharpaz at gmail.com>
+#
+#          simplified BSD-3 license
+
+import os.path as op
+from itertools import count
+
+import numpy as np
+
+from ...utils import logger, verbose, sum_squared
+from ...transforms import (combine_transforms, invert_transform, apply_trans,
+                           Transform)
+from ..constants import FIFF
+from .. import _BaseRaw, _coil_trans_to_loc, _loc_to_coil_trans, _empty_info
+from .constants import BTI
+from .read import (read_int32, read_int16, read_str, read_float, read_double,
+                   read_transform, read_char, read_int64, read_uint16,
+                   read_uint32, read_double_matrix, read_float_matrix,
+                   read_int16_matrix)
+from ...externals import six
+
+FIFF_INFO_CHS_FIELDS = ('loc',
+                        'ch_name', 'unit_mul', 'coord_frame', 'coil_type',
+                        'range', 'unit', 'cal',
+                        'scanno', 'kind', 'logno')
+
+FIFF_INFO_CHS_DEFAULTS = (np.array([0, 0, 0, 1] * 3, dtype='f4'),
+                          None, 0, 0, 0,
+                          1.0, FIFF.FIFF_UNIT_V, 1.0,
+                          None, FIFF.FIFFV_ECG_CH, None)
+
+FIFF_INFO_DIG_FIELDS = ('kind', 'ident', 'r', 'coord_frame')
+FIFF_INFO_DIG_DEFAULTS = (None, None, None, FIFF.FIFFV_COORD_HEAD)
+
+BTI_WH2500_REF_MAG = ('MxA', 'MyA', 'MzA', 'MxaA', 'MyaA', 'MzaA')
+BTI_WH2500_REF_GRAD = ('GxxA', 'GyyA', 'GyxA', 'GzaA', 'GzyA')
+
+dtypes = zip(list(range(1, 5)), ('>i2', '>i4', '>f4', '>f8'))
+DTYPES = dict((i, np.dtype(t)) for i, t in dtypes)
+
+
+class _bytes_io_mock_context():
+
+    def __init__(self, target):
+        self.target = target
+
+    def __enter__(self):
+        return self.target
+
+    def __exit__(self, type, value, tb):
+        pass
+
+
+def _bti_open(fname, *args, **kwargs):
+    """Handle bytes io"""
+    if isinstance(fname, six.string_types):
+        return open(fname, *args, **kwargs)
+    elif isinstance(fname, six.BytesIO):
+        return _bytes_io_mock_context(fname)
+    else:
+        raise RuntimeError('Cannot mock this.')
+
+
+def _get_bti_dev_t(adjust=0., translation=(0.0, 0.02, 0.11)):
+    """Get the general Magnes3600WH to Neuromag coordinate transform
+
+    Parameters
+    ----------
+    adjust : float | None
+        Degrees to tilt x-axis for sensor frame misalignment.
+        If None, no adjustment will be applied.
+    translation : array-like
+        The translation to place the origin of coordinate system
+        to the center of the head.
+
+    Returns
+    -------
+    m_nm_t : ndarray
+        4 x 4 rotation, translation, scaling matrix.
+    """
+    flip_t = np.array([[0., -1., 0.],
+                       [1., 0., 0.],
+                       [0., 0., 1.]])
+    rad = np.deg2rad(adjust)
+    adjust_t = np.array([[1., 0., 0.],
+                         [0., np.cos(rad), -np.sin(rad)],
+                         [0., np.sin(rad), np.cos(rad)]])
+    m_nm_t = np.eye(4)
+    m_nm_t[:3, :3] = np.dot(flip_t, adjust_t)
+    m_nm_t[:3, 3] = translation
+    return m_nm_t
+
+
+def _rename_channels(names, ecg_ch='E31', eog_ch=('E63', 'E64')):
+    """Renames appropriately ordered list of channel names
+
+    Parameters
+    ----------
+    names : list of str
+        Lists of 4-D channel names in ascending order
+
+    Returns
+    -------
+    new : list
+        List of names, channel names in Neuromag style
+    """
+    new = list()
+    ref_mag, ref_grad, eog, eeg, ext = [count(1) for _ in range(5)]
+    for i, name in enumerate(names, 1):
+        if name.startswith('A'):
+            name = 'MEG %3.3d' % i
+        elif name == 'RESPONSE':
+            name = 'STI 013'
+        elif name == 'TRIGGER':
+            name = 'STI 014'
+        elif any(name == k for k in eog_ch):
+            name = 'EOG %3.3d' % six.advance_iterator(eog)
+        elif name == ecg_ch:
+            name = 'ECG 001'
+        elif name.startswith('E'):
+            name = 'EEG %3.3d' % six.advance_iterator(eeg)
+        elif name == 'UACurrent':
+            name = 'UTL 001'
+        elif name.startswith('M'):
+            name = 'RFM %3.3d' % six.advance_iterator(ref_mag)
+        elif name.startswith('G'):
+            name = 'RFG %3.3d' % six.advance_iterator(ref_grad)
+        elif name.startswith('X'):
+            name = 'EXT %3.3d' % six.advance_iterator(ext)
+
+        new += [name]
+
+    return new
+
+
+def _read_head_shape(fname):
+    """ Helper Function """
+
+    with _bti_open(fname, 'rb') as fid:
+        fid.seek(BTI.FILE_HS_N_DIGPOINTS)
+        _n_dig_points = read_int32(fid)
+        idx_points = read_double_matrix(fid, BTI.DATA_N_IDX_POINTS, 3)
+        dig_points = read_double_matrix(fid, _n_dig_points, 3)
+
+    return idx_points, dig_points
+
+
+def _get_ctf_head_to_head_t(idx_points):
+    """ Helper function """
+
+    fp = idx_points.astype('>f8')
+    dp = np.sum(fp[2] * (fp[0] - fp[1]))
+    tmp1, tmp2 = sum_squared(fp[2]), sum_squared(fp[0] - fp[1])
+    dcos = -dp / np.sqrt(tmp1 * tmp2)
+    dsin = np.sqrt(1. - dcos * dcos)
+    dt = dp / np.sqrt(tmp2)
+
+    # do the transformation
+    t = np.array([[dcos, -dsin, 0., dt],
+                  [dsin, dcos, 0., 0.],
+                  [0., 0., 1., 0.],
+                  [0., 0., 0., 1.]])
+    return Transform('ctf_head', 'head', t)
+
+
+def _flip_fiducials(idx_points_nm):
+    # adjust order of fiducials to Neuromag
+    # XXX presumably swap LPA and RPA
+    idx_points_nm[[1, 2]] = idx_points_nm[[2, 1]]
+    return idx_points_nm
+
+
+def _process_bti_headshape(fname, convert=True, use_hpi=True):
+    """Read index points and dig points from BTi head shape file
+
+    Parameters
+    ----------
+    fname : str
+        The absolute path to the head shape file
+    use_hpi : bool
+        Whether to treat additional hpi coils as digitization points or not.
+        If False, hpi coils will be discarded.
+
+    Returns
+    -------
+    dig : list of dicts
+        The list of dig point info structures needed for the fiff info
+        structure.
+    t : dict
+        The transformation that was used.
+    """
+    idx_points, dig_points = _read_head_shape(fname)
+    if convert:
+        ctf_head_t = _get_ctf_head_to_head_t(idx_points)
+    else:
+        ctf_head_t = Transform('ctf_head', 'ctf_head', np.eye(4))
+
+    if dig_points is not None:
+        # dig_points = apply_trans(ctf_head_t['trans'], dig_points)
+        all_points = np.r_[idx_points, dig_points]
+    else:
+        all_points = idx_points
+
+    if convert:
+        all_points = _convert_hs_points(all_points, ctf_head_t)
+
+    dig = _points_to_dig(all_points, len(idx_points), use_hpi)
+    return dig, ctf_head_t
+
+
+def _convert_hs_points(points, t):
+    """convert to Neuromag"""
+    points = apply_trans(t['trans'], points)
+    points = _flip_fiducials(points).astype(np.float32)
+    return points
+
+
+def _points_to_dig(points, n_idx_points, use_hpi):
+    """Put points in info dig structure"""
+    idx_idents = list(range(1, 4)) + list(range(1, (n_idx_points + 1) - 3))
+    dig = []
+    for idx in range(points.shape[0]):
+        point_info = dict(zip(FIFF_INFO_DIG_FIELDS, FIFF_INFO_DIG_DEFAULTS))
+        point_info['r'] = points[idx]
+        if idx < 3:
+            point_info['kind'] = FIFF.FIFFV_POINT_CARDINAL
+            point_info['ident'] = idx_idents[idx]
+        if 2 < idx < n_idx_points and use_hpi:
+            point_info['kind'] = FIFF.FIFFV_POINT_HPI
+            point_info['ident'] = idx_idents[idx]
+        elif idx > 4:
+            point_info['kind'] = FIFF.FIFFV_POINT_EXTRA
+            point_info['ident'] = (idx + 1) - len(idx_idents)
+
+        if 2 < idx < n_idx_points and not use_hpi:
+            pass
+        else:
+            dig += [point_info]
+
+    return dig
+
+
+def _convert_coil_trans(coil_trans, dev_ctf_t, bti_dev_t):
+    """ Helper Function """
+    t = combine_transforms(invert_transform(dev_ctf_t), bti_dev_t,
+                           'ctf_head', 'meg')
+    t = np.dot(t['trans'], coil_trans)
+    return t
+
+
+def _correct_offset(fid):
+    """ Align fid pointer """
+    current = fid.tell()
+    if ((current % BTI.FILE_CURPOS) != 0):
+        offset = current % BTI.FILE_CURPOS
+        fid.seek(BTI.FILE_CURPOS - (offset), 1)
+
+
+def _read_config(fname):
+    """Read BTi system config file
+
+    Parameters
+    ----------
+    fname : str
+        The absolute path to the config file
+
+    Returns
+    -------
+    cfg : dict
+        The config blocks found.
+
+    """
+
+    with _bti_open(fname, 'rb') as fid:
+        cfg = dict()
+        cfg['hdr'] = {'version': read_int16(fid),
+                      'site_name': read_str(fid, 32),
+                      'dap_hostname': read_str(fid, 16),
+                      'sys_type': read_int16(fid),
+                      'sys_options': read_int32(fid),
+                      'supply_freq': read_int16(fid),
+                      'total_chans': read_int16(fid),
+                      'system_fixed_gain': read_float(fid),
+                      'volts_per_bit': read_float(fid),
+                      'total_sensors': read_int16(fid),
+                      'total_user_blocks': read_int16(fid),
+                      'next_der_chan_no': read_int16(fid)}
+
+        fid.seek(2, 1)
+
+        cfg['checksum'] = read_uint32(fid)
+        cfg['reserved'] = read_char(fid, 32)
+        cfg['transforms'] = [read_transform(fid) for t in
+                             range(cfg['hdr']['total_sensors'])]
+
+        cfg['user_blocks'] = dict()
+        for block in range(cfg['hdr']['total_user_blocks']):
+            ub = dict()
+
+            ub['hdr'] = {'nbytes': read_int32(fid),
+                         'kind': read_str(fid, 20),
+                         'checksum': read_int32(fid),
+                         'username': read_str(fid, 32),
+                         'timestamp': read_int32(fid),
+                         'user_space_size': read_int32(fid),
+                         'reserved': read_char(fid, 32)}
+
+            _correct_offset(fid)
+            kind = ub['hdr'].pop('kind')
+            if not kind:  # make sure reading goes right. Should never be empty
+                raise RuntimeError('Could not read user block. Probably you '
+                                   'acquired data using a BTi version '
+                                   'currently not supported. Please contact '
+                                   'the mne-python developers.')
+            dta, cfg['user_blocks'][kind] = dict(), ub
+            if kind in [v for k, v in BTI.items() if k[:5] == 'UB_B_']:
+                if kind == BTI.UB_B_MAG_INFO:
+                    dta['version'] = read_int32(fid)
+                    fid.seek(20, 1)
+                    dta['headers'] = list()
+                    for hdr in range(6):
+                        d = {'name': read_str(fid, 16),
+                             'transform': read_transform(fid),
+                             'units_per_bit': read_float(fid)}
+                        dta['headers'] += [d]
+                        fid.seek(20, 1)
+
+                elif kind == BTI.UB_B_COH_POINTS:
+                    dta['n_points'] = read_int32(fid)
+                    dta['status'] = read_int32(fid)
+                    dta['points'] = []
+                    for pnt in range(16):
+                        d = {'pos': read_double_matrix(fid, 1, 3),
+                             'direction': read_double_matrix(fid, 1, 3),
+                             'error': read_double(fid)}
+                        dta['points'] += [d]
+
+                elif kind == BTI.UB_B_CCP_XFM_BLOCK:
+                    dta['method'] = read_int32(fid)
+                    # handle difference btw/ linux (0) and solaris (4)
+                    size = 0 if ub['hdr']['user_space_size'] == 132 else 4
+                    fid.seek(size, 1)
+                    dta['transform'] = read_transform(fid)
+
+                elif kind == BTI.UB_B_EEG_LOCS:
+                    dta['electrodes'] = []
+                    while True:
+                        d = {'label': read_str(fid, 16),
+                             'location': read_double_matrix(fid, 1, 3)}
+                        if not d['label']:
+                            break
+                        dta['electrodes'] += [d]
+
+                elif kind in [BTI.UB_B_WHC_CHAN_MAP_VER,
+                              BTI.UB_B_WHS_SUBSYS_VER]:
+                    dta['version'] = read_int16(fid)
+                    dta['struct_size'] = read_int16(fid)
+                    dta['entries'] = read_int16(fid)
+
+                    fid.seek(8, 1)
+
+                elif kind == BTI.UB_B_WHC_CHAN_MAP:
+                    num_channels = None
+                    for name, data in cfg['user_blocks'].items():
+                        if name == BTI.UB_B_WHC_CHAN_MAP_VER:
+                            num_channels = data['entries']
+                            break
+
+                    if num_channels is None:
+                        raise ValueError('Cannot find block %s to determine '
+                                         'number of channels'
+                                         % BTI.UB_B_WHC_CHAN_MAP_VER)
+
+                    dta['channels'] = list()
+                    for i in range(num_channels):
+                        d = {'subsys_type': read_int16(fid),
+                             'subsys_num': read_int16(fid),
+                             'card_num': read_int16(fid),
+                             'chan_num': read_int16(fid),
+                             'recdspnum': read_int16(fid)}
+                        dta['channels'] += [d]
+                        fid.seek(8, 1)
+
+                elif kind == BTI.UB_B_WHS_SUBSYS:
+                    num_subsys = None
+                    for name, data in cfg['user_blocks'].items():
+                        if name == BTI.UB_B_WHS_SUBSYS_VER:
+                            num_subsys = data['entries']
+                            break
+
+                    if num_subsys is None:
+                        raise ValueError('Cannot find block %s to determine'
+                                         ' number of subsystems'
+                                         % BTI.UB_B_WHS_SUBSYS_VER)
+
+                    dta['subsys'] = list()
+                    for sub_key in range(num_subsys):
+                        d = {'subsys_type': read_int16(fid),
+                             'subsys_num': read_int16(fid),
+                             'cards_per_sys': read_int16(fid),
+                             'channels_per_card': read_int16(fid),
+                             'card_version': read_int16(fid)}
+
+                        fid.seek(2, 1)
+
+                        d.update({'offsetdacgain': read_float(fid),
+                                  'squid_type': read_int32(fid),
+                                  'timesliceoffset': read_int16(fid),
+                                  'padding': read_int16(fid),
+                                  'volts_per_bit': read_float(fid)})
+
+                        dta['subsys'] += [d]
+
+                elif kind == BTI.UB_B_CH_LABELS:
+                    dta['version'] = read_int32(fid)
+                    dta['entries'] = read_int32(fid)
+                    fid.seek(16, 1)
+
+                    dta['labels'] = list()
+                    for label in range(dta['entries']):
+                        dta['labels'] += [read_str(fid, 16)]
+
+                elif kind == BTI.UB_B_CALIBRATION:
+                    dta['sensor_no'] = read_int16(fid)
+                    fid.seek(2, 1)
+                    dta['timestamp'] = read_int32(fid)
+                    dta['logdir'] = read_str(fid, 256)
+
+                elif kind == BTI.UB_B_SYS_CONFIG_TIME:
+                    # handle difference btw/ linux (256) and solaris (512)
+                    size = 256 if ub['hdr']['user_space_size'] == 260 else 512
+                    dta['sysconfig_name'] = read_str(fid, size)
+                    dta['timestamp'] = read_int32(fid)
+
+                elif kind == BTI.UB_B_DELTA_ENABLED:
+                    dta['delta_enabled'] = read_int16(fid)
+
+                elif kind in [BTI.UB_B_E_TABLE_USED, BTI.UB_B_E_TABLE]:
+                    dta['hdr'] = {'version': read_int32(fid),
+                                  'entry_size': read_int32(fid),
+                                  'n_entries': read_int32(fid),
+                                  'filtername': read_str(fid, 16),
+                                  'n_e_values': read_int32(fid),
+                                  'reserved': read_str(fid, 28)}
+
+                    if dta['hdr']['version'] == 2:
+                        size = 16
+                        dta['ch_names'] = [read_str(fid, size) for ch in
+                                           range(dta['hdr']['n_entries'])]
+                        dta['e_ch_names'] = [read_str(fid, size) for ch in
+                                             range(dta['hdr']['n_e_values'])]
+
+                        rows = dta['hdr']['n_entries']
+                        cols = dta['hdr']['n_e_values']
+                        dta['etable'] = read_float_matrix(fid, rows, cols)
+                    else:  # handle MAGNES2500 naming scheme
+                        dta['ch_names'] = ['WH2500'] * dta['hdr']['n_e_values']
+                        dta['hdr']['n_e_values'] = 6
+                        dta['e_ch_names'] = BTI_WH2500_REF_MAG
+                        rows = dta['hdr']['n_entries']
+                        cols = dta['hdr']['n_e_values']
+                        dta['etable'] = read_float_matrix(fid, rows, cols)
+
+                        _correct_offset(fid)
+
+                elif any([kind == BTI.UB_B_WEIGHTS_USED,
+                          kind[:4] == BTI.UB_B_WEIGHT_TABLE]):
+                    dta['hdr'] = {'version': read_int32(fid),
+                                  'entry_size': read_int32(fid),
+                                  'n_entries': read_int32(fid),
+                                  'name': read_str(fid, 32),
+                                  'description': read_str(fid, 80),
+                                  'n_anlg': read_int32(fid),
+                                  'n_dsp': read_int32(fid),
+                                  'reserved': read_str(fid, 72)}
+
+                    if dta['hdr']['version'] == 2:
+                        dta['ch_names'] = [read_str(fid, 16) for ch in
+                                           range(dta['hdr']['n_entries'])]
+                        dta['anlg_ch_names'] = [read_str(fid, 16) for ch in
+                                                range(dta['hdr']['n_anlg'])]
+
+                        dta['dsp_ch_names'] = [read_str(fid, 16) for ch in
+                                               range(dta['hdr']['n_dsp'])]
+
+                        rows = dta['hdr']['n_entries']
+                        cols = dta['hdr']['n_dsp']
+                        dta['dsp_wts'] = read_float_matrix(fid, rows, cols)
+                        cols = dta['hdr']['n_anlg']
+                        dta['anlg_wts'] = read_int16_matrix(fid, rows, cols)
+
+                    else:  # handle MAGNES2500 naming scheme
+                        dta['ch_names'] = ['WH2500'] * dta['hdr']['n_entries']
+                        dta['anlg_ch_names'] = BTI_WH2500_REF_MAG[:3]
+                        dta['hdr']['n_anlg'] = len(dta['anlg_ch_names'])
+                        dta['dsp_ch_names'] = BTI_WH2500_REF_GRAD
+                        dta['hdr.n_dsp'] = len(dta['dsp_ch_names'])
+                        dta['anlg_wts'] = np.zeros((dta['hdr']['n_entries'],
+                                                    dta['hdr']['n_anlg']),
+                                                   dtype='i2')
+                        dta['dsp_wts'] = np.zeros((dta['hdr']['n_entries'],
+                                                   dta['hdr']['n_dsp']),
+                                                  dtype='f4')
+                        for n in range(dta['hdr']['n_entries']):
+                            dta['anlg_wts'][d] = read_int16_matrix(
+                                fid, 1, dta['hdr']['n_anlg'])
+                            read_int16(fid)
+                            dta['dsp_wts'][d] = read_float_matrix(
+                                fid, 1, dta['hdr']['n_dsp'])
+
+                        _correct_offset(fid)
+
+                elif kind == BTI.UB_B_TRIG_MASK:
+                    dta['version'] = read_int32(fid)
+                    dta['entries'] = read_int32(fid)
+                    fid.seek(16, 1)
+
+                    dta['masks'] = []
+                    for entry in range(dta['entries']):
+                        d = {'name': read_str(fid, 20),
+                             'nbits': read_uint16(fid),
+                             'shift': read_uint16(fid),
+                             'mask': read_uint32(fid)}
+                        dta['masks'] += [d]
+                        fid.seek(8, 1)
+
+            else:
+                dta['unknown'] = {'hdr': read_char(fid,
+                                  ub['hdr']['user_space_size'])}
+
+            ub.update(dta)  # finally update the userblock data
+            _correct_offset(fid)  # after reading.
+
+        cfg['chs'] = list()
+
+        # prepare reading channels
+        def dev_header(x):
+            return dict(size=read_int32(x), checksum=read_int32(x),
+                        reserved=read_str(x, 32))
+
+        for channel in range(cfg['hdr']['total_chans']):
+            ch = {'name': read_str(fid, 16),
+                  'chan_no': read_int16(fid),
+                  'ch_type': read_uint16(fid),
+                  'sensor_no': read_int16(fid),
+                  'data': dict()}
+
+            fid.seek(2, 1)
+            ch.update({'gain': read_float(fid),
+                       'units_per_bit': read_float(fid),
+                       'yaxis_label': read_str(fid, 16),
+                       'aar_val': read_double(fid),
+                       'checksum': read_int32(fid),
+                       'reserved': read_str(fid, 32)})
+
+            cfg['chs'] += [ch]
+            _correct_offset(fid)  # before and after
+            dta = dict()
+            if ch['ch_type'] in [BTI.CHTYPE_MEG, BTI.CHTYPE_REFERENCE]:
+                dev = {'device_info': dev_header(fid),
+                       'inductance': read_float(fid),
+                       'padding': read_str(fid, 4),
+                       'transform': _correct_trans(read_transform(fid)),
+                       'xform_flag': read_int16(fid),
+                       'total_loops': read_int16(fid)}
+
+                fid.seek(4, 1)
+                dev['reserved'] = read_str(fid, 32)
+                dta.update({'dev': dev, 'loops': []})
+                for loop in range(dev['total_loops']):
+                    d = {'position': read_double_matrix(fid, 1, 3),
+                         'orientation': read_double_matrix(fid, 1, 3),
+                         'radius': read_double(fid),
+                         'wire_radius': read_double(fid),
+                         'turns': read_int16(fid)}
+                    fid.seek(2, 1)
+                    d['checksum'] = read_int32(fid)
+                    d['reserved'] = read_str(fid, 32)
+                    dta['loops'] += [d]
+
+            elif ch['ch_type'] == BTI.CHTYPE_EEG:
+                dta = {'device_info': dev_header(fid),
+                       'impedance': read_float(fid),
+                       'padding': read_str(fid, 4),
+                       'transform': read_transform(fid),
+                       'reserved': read_char(fid, 32)}
+
+            elif ch['ch_type'] == BTI.CHTYPE_EXTERNAL:
+                dta = {'device_info': dev_header(fid),
+                       'user_space_size': read_int32(fid),
+                       'reserved': read_str(fid, 32)}
+
+            elif ch['ch_type'] == BTI.CHTYPE_TRIGGER:
+                dta = {'device_info': dev_header(fid),
+                       'user_space_size': read_int32(fid)}
+                fid.seek(2, 1)
+                dta['reserved'] = read_str(fid, 32)
+
+            elif ch['ch_type'] in [BTI.CHTYPE_UTILITY, BTI.CHTYPE_DERIVED]:
+                dta = {'device_info': dev_header(fid),
+                       'user_space_size': read_int32(fid),
+                       'reserved': read_str(fid, 32)}
+
+            elif ch['ch_type'] == BTI.CHTYPE_SHORTED:
+                dta = {'device_info': dev_header(fid),
+                       'reserved': read_str(fid, 32)}
+
+            ch.update(dta)  # add data collected
+            _correct_offset(fid)  # after each reading
+
+    return cfg
+
+
+def _read_epoch(fid):
+    """Read BTi PDF epoch"""
+    out = {'pts_in_epoch': read_int32(fid),
+           'epoch_duration': read_float(fid),
+           'expected_iti': read_float(fid),
+           'actual_iti': read_float(fid),
+           'total_var_events': read_int32(fid),
+           'checksum': read_int32(fid),
+           'epoch_timestamp': read_int32(fid)}
+
+    fid.seek(28, 1)
+
+    return out
+
+
+def _read_channel(fid):
+    """Read BTi PDF channel"""
+    out = {'chan_label': read_str(fid, 16),
+           'chan_no': read_int16(fid),
+           'attributes': read_int16(fid),
+           'scale': read_float(fid),
+           'yaxis_label': read_str(fid, 16),
+           'valid_min_max': read_int16(fid)}
+
+    fid.seek(6, 1)
+    out.update({'ymin': read_double(fid),
+                'ymax': read_double(fid),
+                'index': read_int32(fid),
+                'checksum': read_int32(fid),
+                'off_flag': read_str(fid, 16),
+                'offset': read_float(fid)})
+
+    fid.seek(12, 1)
+
+    return out
+
+
+def _read_event(fid):
+    """Read BTi PDF event"""
+    out = {'event_name': read_str(fid, 16),
+           'start_lat': read_float(fid),
+           'end_lat': read_float(fid),
+           'step_size': read_float(fid),
+           'fixed_event': read_int16(fid),
+           'checksum': read_int32(fid)}
+
+    fid.seek(32, 1)
+    _correct_offset(fid)
+
+    return out
+
+
+def _read_process(fid):
+    """Read BTi PDF process"""
+
+    out = {'nbytes': read_int32(fid),
+           'process_type': read_str(fid, 20),
+           'checksum': read_int32(fid),
+           'user': read_str(fid, 32),
+           'timestamp': read_int32(fid),
+           'filename': read_str(fid, 256),
+           'total_steps': read_int32(fid)}
+
+    fid.seek(32, 1)
+    _correct_offset(fid)
+    out['processing_steps'] = list()
+    for step in range(out['total_steps']):
+        this_step = {'nbytes': read_int32(fid),
+                     'process_type': read_str(fid, 20),
+                     'checksum': read_int32(fid)}
+        ptype = this_step['process_type']
+        if ptype == BTI.PROC_DEFAULTS:
+            this_step['scale_option'] = read_int32(fid)
+
+            fid.seek(4, 1)
+            this_step['scale'] = read_double(fid)
+            this_step['dtype'] = read_int32(fid)
+            this_step['selected'] = read_int16(fid)
+            this_step['color_display'] = read_int16(fid)
+
+            fid.seek(32, 1)
+        elif ptype in BTI.PROC_FILTER:
+            this_step['freq'] = read_float(fid)
+            fid.seek(32, 1)
+        elif ptype in BTI.PROC_BPFILTER:
+            this_step['high_freq'] = read_float(fid)
+            this_step['low_frew'] = read_float(fid)
+        else:
+            jump = this_step['user_space_size'] = read_int32(fid)
+            fid.seek(32, 1)
+            fid.seek(jump, 1)
+
+        out['processing_steps'] += [this_step]
+        _correct_offset(fid)
+
+    return out
+
+
+def _read_assoc_file(fid):
+    """Read BTi PDF assocfile"""
+
+    out = {'file_id': read_int16(fid),
+           'length': read_int16(fid)}
+
+    fid.seek(32, 1)
+    out['checksum'] = read_int32(fid)
+
+    return out
+
+
+def _read_pfid_ed(fid):
+    """Read PDF ed file"""
+
+    out = {'comment_size': read_int32(fid),
+           'name': read_str(fid, 17)}
+
+    fid.seek(9, 1)
+    out.update({'pdf_number': read_int16(fid),
+                'total_events': read_int32(fid),
+                'timestamp': read_int32(fid),
+                'flags': read_int32(fid),
+                'de_process': read_int32(fid),
+                'checksum': read_int32(fid),
+                'ed_id': read_int32(fid),
+                'win_width': read_float(fid),
+                'win_offset': read_float(fid)})
+
+    fid.seek(8, 1)
+
+    return out
+
+
+def _read_coil_def(fid):
+    """ Read coil definition """
+    coildef = {'position': read_double_matrix(fid, 1, 3),
+               'orientation': read_double_matrix(fid, 1, 3),
+               'radius': read_double(fid),
+               'wire_radius': read_double(fid),
+               'turns': read_int16(fid)}
+
+    fid.seek(fid, 2, 1)
+    coildef['checksum'] = read_int32(fid)
+    coildef['reserved'] = read_str(fid, 32)
+
+
+def _read_ch_config(fid):
+    """Read BTi channel config"""
+
+    cfg = {'name': read_str(fid, BTI.FILE_CONF_CH_NAME),
+           'chan_no': read_int16(fid),
+           'ch_type': read_uint16(fid),
+           'sensor_no': read_int16(fid)}
+
+    fid.seek(fid, BTI.FILE_CONF_CH_NEXT, 1)
+
+    cfg.update({'gain': read_float(fid),
+                'units_per_bit': read_float(fid),
+                'yaxis_label': read_str(fid, BTI.FILE_CONF_CH_YLABEL),
+                'aar_val': read_double(fid),
+                'checksum': read_int32(fid),
+                'reserved': read_str(fid, BTI.FILE_CONF_CH_RESERVED)})
+
+    _correct_offset(fid)
+
+    # Then the channel info
+    ch_type, chan = cfg['ch_type'], dict()
+    chan['dev'] = {'size': read_int32(fid),
+                   'checksum': read_int32(fid),
+                   'reserved': read_str(fid, 32)}
+    if ch_type in [BTI.CHTYPE_MEG, BTI.CHTYPE_REF]:
+        chan['loops'] = [_read_coil_def(fid) for d in
+                         range(chan['dev']['total_loops'])]
+
+    elif ch_type == BTI.CHTYPE_EEG:
+        chan['impedance'] = read_float(fid)
+        chan['padding'] = read_str(fid, BTI.FILE_CONF_CH_PADDING)
+        chan['transform'] = read_transform(fid)
+        chan['reserved'] = read_char(fid, BTI.FILE_CONF_CH_RESERVED)
+
+    elif ch_type in [BTI.CHTYPE_TRIGGER,  BTI.CHTYPE_EXTERNAL,
+                     BTI.CHTYPE_UTILITY, BTI.CHTYPE_DERIVED]:
+        chan['user_space_size'] = read_int32(fid)
+        if ch_type == BTI.CHTYPE_TRIGGER:
+            fid.seek(2, 1)
+        chan['reserved'] = read_str(fid, BTI.FILE_CONF_CH_RESERVED)
+
+    elif ch_type == BTI.CHTYPE_SHORTED:
+        chan['reserved'] = read_str(fid, BTI.FILE_CONF_CH_RESERVED)
+
+    cfg['chan'] = chan
+
+    _correct_offset(fid)
+
+    return cfg
+
+
+def _read_bti_header_pdf(pdf_fname):
+    """Read header from pdf file"""
+    with _bti_open(pdf_fname, 'rb') as fid:
+        fid.seek(-8, 2)
+        start = fid.tell()
+        header_position = read_int64(fid)
+        check_value = header_position & BTI.FILE_MASK
+
+        if ((start + BTI.FILE_CURPOS - check_value) <= BTI.FILE_MASK):
+            header_position = check_value
+
+        # Check header position for alignment issues
+        if ((header_position % 8) != 0):
+            header_position += (8 - (header_position % 8))
+
+        fid.seek(header_position, 0)
+
+        # actual header starts here
+        info = {'version': read_int16(fid),
+                'file_type': read_str(fid, 5),
+                'hdr_size': start - header_position,  # add for convenience
+                'start': start}
+
+        fid.seek(1, 1)
+
+        info.update({'data_format': read_int16(fid),
+                     'acq_mode': read_int16(fid),
+                     'total_epochs': read_int32(fid),
+                     'input_epochs': read_int32(fid),
+                     'total_events': read_int32(fid),
+                     'total_fixed_events': read_int32(fid),
+                     'sample_period': read_float(fid),
+                     'xaxis_label': read_str(fid, 16),
+                     'total_processes': read_int32(fid),
+                     'total_chans': read_int16(fid)})
+
+        fid.seek(2, 1)
+        info.update({'checksum': read_int32(fid),
+                     'total_ed_classes': read_int32(fid),
+                     'total_associated_files': read_int16(fid),
+                     'last_file_index': read_int16(fid),
+                     'timestamp': read_int32(fid)})
+
+        fid.seek(20, 1)
+        _correct_offset(fid)
+
+        # actual header ends here, so dar seems ok.
+
+        info['epochs'] = [_read_epoch(fid) for epoch in
+                          range(info['total_epochs'])]
+
+        info['chs'] = [_read_channel(fid) for ch in
+                       range(info['total_chans'])]
+
+        info['events'] = [_read_event(fid) for event in
+                          range(info['total_events'])]
+
+        info['processes'] = [_read_process(fid) for process in
+                             range(info['total_processes'])]
+
+        info['assocfiles'] = [_read_assoc_file(fid) for af in
+                              range(info['total_associated_files'])]
+
+        info['edclasses'] = [_read_pfid_ed(fid) for ed_class in
+                             range(info['total_ed_classes'])]
+
+        info['extra_data'] = fid.read(start - fid.tell())
+        info['pdf_fname'] = pdf_fname
+
+    info['total_slices'] = sum(e['pts_in_epoch'] for e in
+                               info['epochs'])
+
+    info['dtype'] = DTYPES[info['data_format']]
+    bps = info['dtype'].itemsize * info['total_chans']
+    info['bytes_per_slice'] = bps
+    return info
+
+
+def _read_bti_header(pdf_fname, config_fname, sort_by_ch_name=True):
+    """ Read bti PDF header
+    """
+    info = _read_bti_header_pdf(pdf_fname) if pdf_fname else dict()
+
+    cfg = _read_config(config_fname)
+    info['bti_transform'] = cfg['transforms']
+
+    # augment channel list by according info from config.
+    # get channels from config present in PDF
+    chans = info.get('chs', None)
+    if chans is not None:
+        chans_cfg = [c for c in cfg['chs'] if c['chan_no']
+                     in [c_['chan_no'] for c_ in chans]]
+
+        # check all pdf chanels are present in config
+        match = [c['chan_no'] for c in chans_cfg] == \
+                [c['chan_no'] for c in chans]
+
+        if not match:
+            raise RuntimeError('Could not match raw data channels with'
+                               ' config channels. Some of the channels'
+                               ' found are not described in config.')
+    else:
+        chans_cfg = cfg['chs']
+        chans = [dict() for d in chans_cfg]
+
+    # transfer channel info from config to channel info
+    for ch, ch_cfg in zip(chans, chans_cfg):
+        ch['upb'] = ch_cfg['units_per_bit']
+        ch['gain'] = ch_cfg['gain']
+        ch['name'] = ch_cfg['name']
+        if ch_cfg.get('dev', dict()).get('transform', None) is not None:
+            ch['loc'] = _coil_trans_to_loc(ch_cfg['dev']['transform'])
+        else:
+            ch['loc'] = None
+        if pdf_fname:
+            if info['data_format'] <= 2:  # see DTYPES, implies integer
+                ch['cal'] = ch['scale'] * ch['upb'] / float(ch['gain'])
+            else:  # float
+                ch['cal'] = ch['scale'] * ch['gain']
+        else:
+            ch['scale'] = 1.0
+
+    if sort_by_ch_name:
+        by_index = [(i, d['index']) for i, d in enumerate(chans)]
+        by_index.sort(key=lambda c: c[1])
+        by_index = [idx[0] for idx in by_index]
+        chs = [chans[pos] for pos in by_index]
+
+        sort_by_name_idx = [(i, d['name']) for i, d in enumerate(chs)]
+        a_chs = [c for c in sort_by_name_idx if c[1].startswith('A')]
+        other_chs = [c for c in sort_by_name_idx if not c[1].startswith('A')]
+        sort_by_name_idx = sorted(
+            a_chs, key=lambda c: int(c[1][1:])) + sorted(other_chs)
+
+        sort_by_name_idx = [idx[0] for idx in sort_by_name_idx]
+
+        info['chs'] = [chans[pos] for pos in sort_by_name_idx]
+        info['order'] = sort_by_name_idx
+    else:
+        info['chs'] = chans
+        info['order'] = Ellipsis
+
+    # finally add some important fields from the config
+    info['e_table'] = cfg['user_blocks'][BTI.UB_B_E_TABLE_USED]
+    info['weights'] = cfg['user_blocks'][BTI.UB_B_WEIGHTS_USED]
+
+    return info
+
+
+def _read_data(info, start=None, stop=None):
+    """ Helper function: read Bti processed data file (PDF)
+
+    Parameters
+    ----------
+    info : dict
+        The measurement info.
+    start : int | None
+        The number of the first time slice to read. If None, all data will
+        be read from the beginning.
+    stop : int | None
+        The number of the last time slice to read. If None, all data will
+        be read to the end.
+    dtype : str | dtype object
+        The type the data are casted to.
+
+    Returns
+    -------
+    data : ndarray
+        The measurement data, a channels x time slices array.
+        The data will be cast to np.float64 for compatibility.
+    """
+
+    total_slices = info['total_slices']
+    if start is None:
+        start = 0
+    if stop is None:
+        stop = total_slices
+
+    if any([start < 0, stop > total_slices, start >= stop]):
+        raise RuntimeError('Invalid data range supplied:'
+                           ' %d, %d' % (start, stop))
+    fname = info['pdf_fname']
+    with _bti_open(fname, 'rb') as fid:
+        fid.seek(info['bytes_per_slice'] * start, 0)
+        cnt = (stop - start) * info['total_chans']
+        shape = [stop - start, info['total_chans']]
+
+        if isinstance(fid, six.BytesIO):
+            data = np.fromstring(fid.getvalue(),
+                                 dtype=info['dtype'], count=cnt)
+        else:
+            data = np.fromfile(fid, dtype=info['dtype'], count=cnt)
+        data = data.astype('f4').reshape(shape)
+
+    for ch in info['chs']:
+        data[:, ch['index']] *= ch['cal']
+
+    return data[:, info['order']].T.astype(np.float64)
+
+
+def _correct_trans(t):
+    """Helper to convert to a transformation matrix"""
+    t = np.array(t, np.float64)
+    t[:3, :3] *= t[3, :3][:, np.newaxis]  # apply scalings
+    t[3, :3] = 0.  # remove them
+    assert t[3, 3] == 1.
+    return t
+
+
+class RawBTi(_BaseRaw):
+    """ Raw object from 4D Neuroimaging MagnesWH3600 data
+
+    Parameters
+    ----------
+    pdf_fname : str
+        Path to the processed data file (PDF).
+    config_fname : str
+        Path to system config file.
+    head_shape_fname : str | None
+        Path to the head shape file.
+    rotation_x : float
+        Degrees to tilt x-axis for sensor frame misalignment. Ignored
+        if convert is True.
+    translation : array-like, shape (3,)
+        The translation to place the origin of coordinate system
+        to the center of the head. Ignored if convert is True.
+    convert : bool
+        Convert to Neuromag coordinates or not.
+    rename_channels : bool
+        Whether to keep original 4D channel labels or not. Defaults to True.
+    sort_by_ch_name : bool
+        Reorder channels according to channel label. 4D channels don't have
+        monotonically increasing numbers in their labels. Defaults to True.
+    ecg_ch : str | None
+        The 4D name of the ECG channel. If None, the channel will be treated
+        as regular EEG channel.
+    eog_ch : tuple of str | None
+        The 4D names of the EOG channels. If None, the channels will be treated
+        as regular EEG channels.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+    """
+    @verbose
+    def __init__(self, pdf_fname, config_fname='config',
+                 head_shape_fname='hs_file', rotation_x=0.,
+                 translation=(0.0, 0.02, 0.11), convert=True,
+                 rename_channels=True, sort_by_ch_name=True,
+                 ecg_ch='E31', eog_ch=('E63', 'E64'),
+                 verbose=None):
+
+        info, bti_info = _get_bti_info(
+            pdf_fname=pdf_fname, config_fname=config_fname,
+            head_shape_fname=head_shape_fname, rotation_x=rotation_x,
+            translation=translation, convert=convert, ecg_ch=ecg_ch,
+            rename_channels=rename_channels,
+            sort_by_ch_name=sort_by_ch_name, eog_ch=eog_ch)
+        logger.info('Reading raw data from %s...' % pdf_fname)
+        data = _read_data(bti_info)
+        assert len(data) == len(info['ch_names'])
+        self._projector_hashes = [None]
+        self.bti_ch_labels = [c['chan_label'] for c in bti_info['chs']]
+
+        # make Raw repr work if we have a BytesIO as input
+        if isinstance(pdf_fname, six.BytesIO):
+            pdf_fname = repr(pdf_fname)
+
+        super(RawBTi, self).__init__(
+            info, data, filenames=[pdf_fname], verbose=verbose)
+        logger.info('    Range : %d ... %d =  %9.3f ... %9.3f secs' % (
+                    self.first_samp, self.last_samp,
+                    float(self.first_samp) / info['sfreq'],
+                    float(self.last_samp) / info['sfreq']))
+        logger.info('Ready.')
+
+
+def _get_bti_info(pdf_fname, config_fname, head_shape_fname, rotation_x,
+                  translation, convert, ecg_ch, eog_ch, rename_channels=True,
+                  sort_by_ch_name=True):
+
+    if pdf_fname is not None and not isinstance(pdf_fname, six.BytesIO):
+        if not op.isabs(pdf_fname):
+            pdf_fname = op.abspath(pdf_fname)
+
+    if not isinstance(config_fname, six.BytesIO):
+        if not op.isabs(config_fname):
+            config_fname = op.abspath(config_fname)
+
+        if not op.exists(config_fname):
+            raise ValueError('Could not find the config file %s. Please check'
+                             ' whether you are in the right directory '
+                             'or pass the full name' % config_fname)
+
+    if head_shape_fname is not None and not isinstance(
+            head_shape_fname, six.BytesIO):
+        orig_name = head_shape_fname
+        if not op.isfile(head_shape_fname):
+            head_shape_fname = op.join(op.dirname(pdf_fname),
+                                       head_shape_fname)
+
+        if not op.isfile(head_shape_fname):
+            raise ValueError('Could not find the head_shape file "%s". '
+                             'You should check whether you are in the '
+                             'right directory or pass the full file name.'
+                             % orig_name)
+
+    logger.info('Reading 4D PDF file %s...' % pdf_fname)
+    bti_info = _read_bti_header(
+        pdf_fname, config_fname, sort_by_ch_name=sort_by_ch_name)
+
+    dev_ctf_t = Transform('ctf_meg', 'ctf_head',
+                          _correct_trans(bti_info['bti_transform'][0]))
+
+    # for old backward compatibility and external processing
+    rotation_x = 0. if rotation_x is None else rotation_x
+    if convert:
+        bti_dev_t = _get_bti_dev_t(rotation_x, translation)
+    else:
+        bti_dev_t = np.eye(4)
+    bti_dev_t = Transform('ctf_meg', 'meg', bti_dev_t)
+
+    use_hpi = False  # hard coded, but marked as later option.
+    logger.info('Creating Neuromag info structure ...')
+    info = _empty_info()
+    if pdf_fname is not None:
+        date = bti_info['processes'][0]['timestamp']
+        info['meas_date'] = [date, 0]
+        info['sfreq'] = 1e3 / bti_info['sample_period'] * 1e-3
+    else:  # for some use case we just want a partial info with channel geom.
+        info['meas_date'] = None
+        info['sfreq'] = None
+        bti_info['processes'] = list()
+    info['nchan'] = len(bti_info['chs'])
+
+    # browse processing info for filter specs.
+    # find better default
+    hp, lp = (0.0, info['sfreq'] * 0.4) if pdf_fname else (None, None)
+    for proc in bti_info['processes']:
+        if 'filt' in proc['process_type']:
+            for step in proc['processing_steps']:
+                if 'high_freq' in step:
+                    hp, lp = step['high_freq'], step['low_freq']
+                elif 'hp' in step['process_type']:
+                    hp = step['freq']
+                elif 'lp' in step['process_type']:
+                    lp = step['freq']
+
+    info['highpass'] = hp
+    info['lowpass'] = lp
+    info['acq_pars'] = info['acq_stim'] = info['hpi_subsystem'] = None
+    info['events'], info['hpi_results'], info['hpi_meas'] = [], [], []
+    chs = []
+
+    bti_ch_names = [ch['name'] for ch in bti_info['chs']]
+    neuromag_ch_names = _rename_channels(
+        bti_ch_names, ecg_ch=ecg_ch, eog_ch=eog_ch)
+    ch_mapping = zip(bti_ch_names, neuromag_ch_names)
+
+    logger.info('... Setting channel info structure.')
+    for idx, (chan_4d, chan_neuromag) in enumerate(ch_mapping):
+        chan_info = dict(zip(FIFF_INFO_CHS_FIELDS, FIFF_INFO_CHS_DEFAULTS))
+        chan_info['ch_name'] = chan_neuromag if rename_channels else chan_4d
+        chan_info['logno'] = idx + BTI.FIFF_LOGNO
+        chan_info['scanno'] = idx + 1
+        chan_info['cal'] = bti_info['chs'][idx]['scale']
+
+        if any(chan_4d.startswith(k) for k in ('A', 'M', 'G')):
+            loc = bti_info['chs'][idx]['loc']
+            if loc is not None:
+                if convert:
+                    if idx == 0:
+                        logger.info('... putting coil transforms in Neuromag '
+                                    'coordinates')
+                    t = _loc_to_coil_trans(bti_info['chs'][idx]['loc'])
+                    t = _convert_coil_trans(t, dev_ctf_t, bti_dev_t)
+                    loc = _coil_trans_to_loc(t)
+            chan_info['loc'] = loc
+
+        # BTI sensors are natively stored in 4D head coords we believe
+        meg_frame = (FIFF.FIFFV_COORD_DEVICE if convert else
+                     FIFF.FIFFV_MNE_COORD_4D_HEAD)
+        eeg_frame = (FIFF.FIFFV_COORD_HEAD if convert else
+                     FIFF.FIFFV_MNE_COORD_4D_HEAD)
+        if chan_4d.startswith('A'):
+            chan_info['kind'] = FIFF.FIFFV_MEG_CH
+            chan_info['coil_type'] = FIFF.FIFFV_COIL_MAGNES_MAG
+            chan_info['coord_frame'] = meg_frame
+            chan_info['unit'] = FIFF.FIFF_UNIT_T
+
+        elif chan_4d.startswith('M'):
+            chan_info['kind'] = FIFF.FIFFV_REF_MEG_CH
+            chan_info['coil_type'] = FIFF.FIFFV_COIL_MAGNES_R_MAG
+            chan_info['coord_frame'] = meg_frame
+            chan_info['unit'] = FIFF.FIFF_UNIT_T
+
+        elif chan_4d.startswith('G'):
+            chan_info['kind'] = FIFF.FIFFV_REF_MEG_CH
+            chan_info['coord_frame'] = meg_frame
+            chan_info['unit'] = FIFF.FIFF_UNIT_T_M
+            if chan_4d in ('GxxA', 'GyyA'):
+                chan_info['coil_type'] = FIFF.FIFFV_COIL_MAGNES_R_GRAD_DIA
+            elif chan_4d in ('GyxA', 'GzxA', 'GzyA'):
+                chan_info['coil_type'] = FIFF.FIFFV_COIL_MAGNES_R_GRAD_OFF
+
+        elif chan_4d.startswith('EEG'):
+            chan_info['kind'] = FIFF.FIFFV_EEG_CH
+            chan_info['coil_type'] = FIFF.FIFFV_COIL_EEG
+            chan_info['coord_frame'] = eeg_frame
+            chan_info['unit'] = FIFF.FIFF_UNIT_V
+
+        elif chan_4d == 'RESPONSE':
+            chan_info['kind'] = FIFF.FIFFV_RESP_CH
+        elif chan_4d == 'TRIGGER':
+            chan_info['kind'] = FIFF.FIFFV_STIM_CH
+        elif chan_4d.startswith('EOG'):
+            chan_info['kind'] = FIFF.FIFFV_EOG_CH
+        elif chan_4d == ecg_ch:
+            chan_info['kind'] = FIFF.FIFFV_ECG_CH
+        elif chan_4d.startswith('X'):
+            chan_info['kind'] = FIFF.FIFFV_MISC_CH
+        elif chan_4d == 'UACurrent':
+            chan_info['kind'] = FIFF.FIFFV_MISC_CH
+
+        chs.append(chan_info)
+
+    info['chs'] = chs
+    info['ch_names'] = neuromag_ch_names if rename_channels else bti_ch_names
+
+    if head_shape_fname:
+        logger.info('... Reading digitization points from %s' %
+                    head_shape_fname)
+        if convert:
+            logger.info('... putting digitization points in Neuromag c'
+                        'oordinates')
+        info['dig'], ctf_head_t = _process_bti_headshape(
+            head_shape_fname, convert=convert, use_hpi=use_hpi)
+
+        logger.info('... Computing new device to head transform.')
+        # DEV->CTF_DEV->CTF_HEAD->HEAD
+        if convert:
+            t = combine_transforms(invert_transform(bti_dev_t), dev_ctf_t,
+                                   'meg', 'ctf_head')
+            dev_head_t = combine_transforms(t, ctf_head_t, 'meg', 'head')
+        else:
+            dev_head_t = Transform('meg', 'head', np.eye(4))
+        logger.info('Done.')
+    else:
+        logger.info('... no headshape file supplied, doing nothing.')
+        dev_head_t = Transform('meg', 'head', np.eye(4))
+        ctf_head_t = Transform('ctf_head', 'head', np.eye(4))
+    info.update(dev_head_t=dev_head_t, dev_ctf_t=dev_ctf_t,
+                ctf_head_t=ctf_head_t)
+
+    if False:  # XXX : reminds us to support this as we go
+        # include digital weights from reference channel
+        comps = info['comps'] = list()
+        weights = bti_info['weights']
+
+        def by_name(x):
+            return x[1]
+        chn = dict(ch_mapping)
+        columns = [chn[k] for k in weights['dsp_ch_names']]
+        rows = [chn[k] for k in weights['ch_names']]
+        col_order, col_names = zip(*sorted(enumerate(columns),
+                                           key=by_name))
+        row_order, row_names = zip(*sorted(enumerate(rows), key=by_name))
+        # for some reason the C code would invert the signs, so we follow.
+        mat = -weights['dsp_wts'][row_order, :][:, col_order]
+        comp_data = dict(data=mat,
+                         col_names=col_names,
+                         row_names=row_names,
+                         nrow=mat.shape[0], ncol=mat.shape[1])
+        comps += [dict(data=comp_data, ctfkind=101,
+                       #  no idea how to calibrate, just ones.
+                       rowcals=np.ones(mat.shape[0], dtype='>f4'),
+                       colcals=np.ones(mat.shape[1], dtype='>f4'),
+                       save_calibrated=0)]
+    else:
+        logger.warning('Warning. Currently direct inclusion of 4D weight t'
+                       'ables is not supported. For critical use cases '
+                       '\nplease take into account the MNE command '
+                       '\'mne_create_comp_data\' to include weights as '
+                       'printed out \nby the 4D \'print_table\' routine.')
+
+    # check that the info is complete
+    info._check_consistency()
+    return info, bti_info
+
+
+ at verbose
+def read_raw_bti(pdf_fname, config_fname='config',
+                 head_shape_fname='hs_file', rotation_x=0.,
+                 translation=(0.0, 0.02, 0.11), convert=True,
+                 rename_channels=True, sort_by_ch_name=True,
+                 ecg_ch='E31', eog_ch=('E63', 'E64'), verbose=None):
+    """ Raw object from 4D Neuroimaging MagnesWH3600 data
+
+    .. note::
+        1. Currently direct inclusion of reference channel weights
+           is not supported. Please use ``mne_create_comp_data`` to include
+           the weights or use the low level functions from this module to
+           include them by yourself.
+        2. The informed guess for the 4D name is E31 for the ECG channel and
+           E63, E63 for the EOG channels. Pleas check and adjust if those
+           channels are present in your dataset but 'ECG 01' and 'EOG 01',
+           'EOG 02' don't appear in the channel names of the raw object.
+
+    Parameters
+    ----------
+    pdf_fname : str
+        Path to the processed data file (PDF).
+    config_fname : str
+        Path to system config file.
+    head_shape_fname : str | None
+        Path to the head shape file.
+    rotation_x : float
+        Degrees to tilt x-axis for sensor frame misalignment. Ignored
+        if convert is True.
+    translation : array-like, shape (3,)
+        The translation to place the origin of coordinate system
+        to the center of the head. Ignored if convert is True.
+    convert : bool
+        Convert to Neuromag coordinates or not.
+    rename_channels : bool
+        Whether to keep original 4D channel labels or not. Defaults to True.
+    sort_by_ch_name : bool
+        Reorder channels according to channel label. 4D channels don't have
+        monotonically increasing numbers in their labels. Defaults to True.
+    ecg_ch : str | None
+        The 4D name of the ECG channel. If None, the channel will be treated
+        as regular EEG channel.
+    eog_ch : tuple of str | None
+        The 4D names of the EOG channels. If None, the channels will be treated
+        as regular EEG channels.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    raw : instance of RawBTi
+        A Raw object containing BTI data.
+
+    See Also
+    --------
+    mne.io.Raw : Documentation of attribute and methods.
+    """
+    return RawBTi(pdf_fname, config_fname=config_fname,
+                  head_shape_fname=head_shape_fname,
+                  rotation_x=rotation_x, translation=translation,
+                  convert=convert, rename_channels=rename_channels,
+                  sort_by_ch_name=sort_by_ch_name, ecg_ch=ecg_ch,
+                  eog_ch=eog_ch, verbose=verbose)
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/bti/constants.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/bti/constants.py
new file mode 100644
index 0000000..459f252
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/bti/constants.py
@@ -0,0 +1,99 @@
+# Authors: Denis Engemann <denis.engemann at gmail.com?>
+#
+# License: BSD (3-clause)
+
+from ..constants import BunchConst
+
+BTI = BunchConst()
+
+BTI.ELEC_STATE_NOT_COLLECTED           = 0
+BTI.ELEC_STATE_COLLECTED               = 1
+BTI.ELEC_STATE_SKIPPED                 = 2
+BTI.ELEC_STATE_NOT_APPLICABLE          = 3
+#
+## Byte offesets and data sizes for  different files
+#
+BTI.FILE_MASK                          = 2147483647
+BTI.FILE_CURPOS                        = 8
+BTI.FILE_END                           = -8
+
+BTI.FILE_HS_VERSION                    = 0
+BTI.FILE_HS_TIMESTAMP                  = 4
+BTI.FILE_HS_CHECKSUM                   = 8
+BTI.FILE_HS_N_DIGPOINTS                = 12
+BTI.FILE_HS_N_INDEXPOINTS              = 16
+
+BTI.FILE_PDF_H_ENTER                   = 1
+BTI.FILE_PDF_H_FTYPE                   = 5
+BTI.FILE_PDF_H_XLABEL                  = 16
+BTI.FILE_PDF_H_NEXT                    = 2
+BTI.FILE_PDF_H_EXIT                    = 20
+
+BTI.FILE_PDF_EPOCH_EXIT                = 28
+
+BTI.FILE_PDF_CH_NEXT                   = 6
+BTI.FILE_PDF_CH_LABELSIZE              = 16
+BTI.FILE_PDF_CH_YLABEL                 = 16
+BTI.FILE_PDF_CH_OFF_FLAG               = 16
+BTI.FILE_PDF_CH_EXIT                   = 12
+
+BTI.FILE_PDF_EVENT_NAME                = 16
+BTI.FILE_PDF_EVENT_EXIT                = 32
+
+BTI.FILE_PDF_PROCESS_BLOCKTYPE         = 20
+BTI.FILE_PDF_PROCESS_USER              = 32
+BTI.FILE_PDF_PROCESS_FNAME             = 256
+BTI.FILE_PDF_PROCESS_EXIT              = 32
+
+BTI.FILE_PDF_ASSOC_NEXT                = 32
+
+BTI.FILE_PDFED_NAME                    = 17
+BTI.FILE_PDFED_NEXT                    = 9
+BTI.FILE_PDFED_EXIT                    = 8
+
+#
+## General data constants
+#
+BTI.DATA_N_IDX_POINTS                   = 5
+BTI.DATA_ROT_N_ROW                      = 3
+BTI.DATA_ROT_N_COL                      = 3
+BTI.DATA_XFM_N_COL                      = 4
+BTI.DATA_XFM_N_ROW                      = 4
+BTI.FIFF_LOGNO                          = 111
+#
+## Channel Types
+#
+BTI.CHTYPE_MEG                          = 1
+BTI.CHTYPE_EEG                          = 2
+BTI.CHTYPE_REFERENCE                    = 3
+BTI.CHTYPE_EXTERNAL                     = 4
+BTI.CHTYPE_TRIGGER                      = 5
+BTI.CHTYPE_UTILITY                      = 6
+BTI.CHTYPE_DERIVED                      = 7
+BTI.CHTYPE_SHORTED                      = 8
+#
+## Processes
+#
+BTI.PROC_DEFAULTS                      = 'BTi_defaults'
+BTI.PROC_FILTER                        = 'b_filt_hp,b_filt_lp,b_filt_notch'
+BTI.PROC_BPFILTER                      = 'b_filt_b_pass,b_filt_b_reject'
+#
+## User blocks
+#
+BTI.UB_B_MAG_INFO                      = 'B_Mag_Info'
+BTI.UB_B_COH_POINTS                    = 'B_COH_Points'
+BTI.UB_B_CCP_XFM_BLOCK                 = 'b_ccp_xfm_block'
+BTI.UB_B_EEG_LOCS                      = 'b_eeg_elec_locs'
+BTI.UB_B_WHC_CHAN_MAP_VER              = 'B_WHChanMapVer'
+BTI.UB_B_WHC_CHAN_MAP                  = 'B_WHChanMap'
+BTI.UB_B_WHS_SUBSYS_VER                = 'B_WHSubsysVer'  # B_WHSubsysVer
+BTI.UB_B_WHS_SUBSYS                    = 'B_WHSubsys'
+BTI.UB_B_CH_LABELS                     = 'B_ch_labels'
+BTI.UB_B_CALIBRATION                   = 'B_Calibration'
+BTI.UB_B_SYS_CONFIG_TIME               = 'B_SysConfigTime'
+BTI.UB_B_DELTA_ENABLED                 = 'B_DELTA_ENABLED'
+BTI.UB_B_E_TABLE_USED                  = 'B_E_table_used'
+BTI.UB_B_E_TABLE                       = 'B_E_TABLE'
+BTI.UB_B_WEIGHTS_USED                  = 'B_weights_used'
+BTI.UB_B_TRIG_MASK                     = 'B_trig_mask'
+BTI.UB_B_WEIGHT_TABLE                  = 'BWT_'
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/bti/read.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/bti/read.py
new file mode 100644
index 0000000..ebc78ce
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/bti/read.py
@@ -0,0 +1,120 @@
+# Authors: Denis A. Engemann  <denis.engemann at gmail.com>
+#          simplified BSD-3 license
+
+import numpy as np
+from ...externals.six import b
+
+
+def _unpack_matrix(fid, rows, cols, dtype, out_dtype):
+    """ Aux Function """
+    dtype = np.dtype(dtype)
+
+    string = fid.read(int(dtype.itemsize * rows * cols))
+    out = np.fromstring(string, dtype=dtype).reshape(
+        rows, cols).astype(out_dtype)
+    return out
+
+
+def _unpack_simple(fid, dtype, out_dtype):
+    """ Aux Function """
+    dtype = np.dtype(dtype)
+    string = fid.read(dtype.itemsize)
+    out = np.fromstring(string, dtype=dtype).astype(out_dtype)
+
+    if len(out) > 0:
+        out = out[0]
+    return out
+
+
+def read_str(fid, count=1):
+    """ Read string """
+    dtype = np.dtype('>S%i' % count)
+    string = fid.read(dtype.itemsize)
+    data = np.fromstring(string, dtype=dtype)[0]
+    bytestr = b('').join([data[0:data.index(b('\x00')) if
+                          b('\x00') in data else count]])
+
+    return str(bytestr.decode('ascii'))  # Return native str type for Py2/3
+
+
+def read_char(fid, count=1):
+    " Read character from bti file """
+    return _unpack_simple(fid, '>S%s' % count, 'S')
+
+
+def read_bool(fid):
+    """ Read bool value from bti file """
+    return _unpack_simple(fid, '>?', np.bool)
+
+
+def read_uint8(fid):
+    """ Read unsigned 8bit integer from bti file """
+    return _unpack_simple(fid, '>u1', np.uint8)
+
+
+def read_int8(fid):
+    """ Read 8bit integer from bti file """
+    return _unpack_simple(fid, '>i1', np.int8)
+
+
+def read_uint16(fid):
+    """ Read unsigned 16bit integer from bti file """
+    return _unpack_simple(fid, '>u2', np.uint16)
+
+
+def read_int16(fid):
+    """ Read 16bit integer from bti file """
+    return _unpack_simple(fid, '>i2', np.int16)
+
+
+def read_uint32(fid):
+    """ Read unsigned 32bit integer from bti file """
+    return _unpack_simple(fid, '>u4', np.uint32)
+
+
+def read_int32(fid):
+    """ Read 32bit integer from bti file """
+    return _unpack_simple(fid, '>i4', np.int32)
+
+
+def read_uint64(fid):
+    """ Read unsigned 64bit integer from bti file """
+    return _unpack_simple(fid, '>u8', np.uint64)
+
+
+def read_int64(fid):
+    """ Read 64bit integer from bti file """
+    return _unpack_simple(fid, '>u8', np.int64)
+
+
+def read_float(fid):
+    """ Read 32bit float from bti file """
+    return _unpack_simple(fid, '>f4', np.float32)
+
+
+def read_double(fid):
+    """ Read 64bit float from bti file """
+    return _unpack_simple(fid, '>f8', np.float64)
+
+
+def read_int16_matrix(fid, rows, cols):
+    """ Read 16bit integer matrix from bti file """
+    return _unpack_matrix(fid, rows, cols, dtype='>i2',
+                          out_dtype=np.int16)
+
+
+def read_float_matrix(fid, rows, cols):
+    """ Read 32bit float matrix from bti file """
+    return _unpack_matrix(fid, rows, cols, dtype='>f4',
+                          out_dtype=np.float32)
+
+
+def read_double_matrix(fid, rows, cols):
+    """ Read 64bit float matrix from bti file """
+    return _unpack_matrix(fid, rows, cols, dtype='>f8',
+                          out_dtype=np.float64)
+
+
+def read_transform(fid):
+    """ Read 64bit float matrix transform from bti file """
+    return read_double_matrix(fid, rows=4, cols=4)
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/bti/tests/__init__.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/bti/tests/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/bti/tests/test_bti.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/bti/tests/test_bti.py
new file mode 100644
index 0000000..5419d6c
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/bti/tests/test_bti.py
@@ -0,0 +1,258 @@
+from __future__ import print_function
+# Authors: Denis Engemann <denis.engemann at gmail.com>
+#
+# License: BSD (3-clause)
+
+import os
+import os.path as op
+from functools import reduce
+
+import numpy as np
+from numpy.testing import (assert_array_almost_equal, assert_array_equal,
+                           assert_allclose)
+from nose.tools import assert_true, assert_raises, assert_equal
+
+from mne.io import Raw, read_raw_bti
+from mne.io.bti.bti import (_read_config, _process_bti_headshape,
+                            _read_data, _read_bti_header, _get_bti_dev_t,
+                            _correct_trans, _get_bti_info)
+from mne.io.pick import pick_info
+from mne.io.constants import FIFF
+from mne import concatenate_raws, pick_types
+from mne.utils import run_tests_if_main
+from mne.transforms import Transform, combine_transforms, invert_transform
+from mne.externals import six
+from mne.fixes import partial
+
+
+base_dir = op.join(op.abspath(op.dirname(__file__)), 'data')
+
+archs = 'linux', 'solaris'
+pdf_fnames = [op.join(base_dir, 'test_pdf_%s' % a) for a in archs]
+config_fnames = [op.join(base_dir, 'test_config_%s' % a) for a in archs]
+hs_fnames = [op.join(base_dir, 'test_hs_%s' % a) for a in archs]
+exported_fnames = [op.join(base_dir, 'exported4D_%s_raw.fif' % a)
+                   for a in archs]
+tmp_raw_fname = op.join(base_dir, 'tmp_raw.fif')
+
+# the 4D exporter doesn't export all channels, so we confine our comparison
+NCH = 248
+
+
+def test_read_config():
+    """ Test read bti config file """
+    # for config in config_fname, config_solaris_fname:
+    for config in config_fnames:
+        cfg = _read_config(config)
+        assert_true(all('unknown' not in block.lower() and block != ''
+                        for block in cfg['user_blocks']))
+
+
+def test_read_pdf():
+    """ Test read bti PDF file """
+    for pdf, config in zip(pdf_fnames, config_fnames):
+        info = _read_bti_header(pdf, config)
+        data = _read_data(info)
+        shape = (info['total_chans'], info['total_slices'])
+        assert_true(data.shape == shape)
+
+
+def test_crop_append():
+    """ Test crop and append raw """
+    raw = read_raw_bti(pdf_fnames[0], config_fnames[0], hs_fnames[0])
+    raw.load_data()  # currently does nothing
+    y, t = raw[:]
+    t0, t1 = 0.25 * t[-1], 0.75 * t[-1]
+    mask = (t0 <= t) * (t <= t1)
+    raw_ = raw.crop(t0, t1)
+    y_, _ = raw_[:]
+    assert_true(y_.shape[1] == mask.sum())
+    assert_true(y_.shape[0] == y.shape[0])
+
+    raw2 = raw.copy()
+    assert_raises(RuntimeError, raw.append, raw2, preload=False)
+    raw.append(raw2)
+    assert_allclose(np.tile(raw2[:, :][0], (1, 2)), raw[:, :][0])
+
+
+def test_transforms():
+    """ Test transformations """
+    bti_trans = (0.0, 0.02, 0.11)
+    bti_dev_t = Transform('ctf_meg', 'meg', _get_bti_dev_t(0.0, bti_trans))
+    for pdf, config, hs, in zip(pdf_fnames, config_fnames, hs_fnames):
+        raw = read_raw_bti(pdf, config, hs)
+        dev_ctf_t = raw.info['dev_ctf_t']
+        dev_head_t_old = raw.info['dev_head_t']
+        ctf_head_t = raw.info['ctf_head_t']
+
+        # 1) get BTI->Neuromag
+        bti_dev_t = Transform('ctf_meg', 'meg', _get_bti_dev_t(0.0, bti_trans))
+
+        # 2) get Neuromag->BTI head
+        t = combine_transforms(invert_transform(bti_dev_t), dev_ctf_t,
+                               'meg', 'ctf_head')
+        # 3) get Neuromag->head
+        dev_head_t_new = combine_transforms(t, ctf_head_t, 'meg', 'head')
+
+        assert_array_equal(dev_head_t_new['trans'], dev_head_t_old['trans'])
+
+
+def test_raw():
+    """ Test bti conversion to Raw object """
+    for pdf, config, hs, exported in zip(pdf_fnames, config_fnames, hs_fnames,
+                                         exported_fnames):
+        # rx = 2 if 'linux' in pdf else 0
+        assert_raises(ValueError, read_raw_bti, pdf, 'eggs')
+        assert_raises(ValueError, read_raw_bti, pdf, config, 'spam')
+        if op.exists(tmp_raw_fname):
+            os.remove(tmp_raw_fname)
+        ex = Raw(exported, preload=True)
+        ra = read_raw_bti(pdf, config, hs)
+        assert_true('RawBTi' in repr(ra))
+        assert_equal(ex.ch_names[:NCH], ra.ch_names[:NCH])
+        assert_array_almost_equal(ex.info['dev_head_t']['trans'],
+                                  ra.info['dev_head_t']['trans'], 7)
+        dig1, dig2 = [np.array([d['r'] for d in r_.info['dig']])
+                      for r_ in (ra, ex)]
+        assert_array_almost_equal(dig1, dig2, 18)
+        coil1, coil2 = [np.concatenate([d['loc'].flatten()
+                        for d in r_.info['chs'][:NCH]])
+                        for r_ in (ra, ex)]
+        assert_array_almost_equal(coil1, coil2, 7)
+
+        loc1, loc2 = [np.concatenate([d['loc'].flatten()
+                      for d in r_.info['chs'][:NCH]])
+                      for r_ in (ra, ex)]
+        assert_allclose(loc1, loc2)
+
+        assert_array_equal(ra._data[:NCH], ex._data[:NCH])
+        assert_array_equal(ra._cals[:NCH], ex._cals[:NCH])
+
+        # check our transforms
+        for key in ('dev_head_t', 'dev_ctf_t', 'ctf_head_t'):
+            if ex.info[key] is None:
+                pass
+            else:
+                assert_true(ra.info[key] is not None)
+                for ent in ('to', 'from', 'trans'):
+                    assert_allclose(ex.info[key][ent],
+                                    ra.info[key][ent])
+
+        # Make sure concatenation works
+        raw_concat = concatenate_raws([ra.copy(), ra])
+        assert_equal(raw_concat.n_times, 2 * ra.n_times)
+
+        ra.save(tmp_raw_fname)
+        re = Raw(tmp_raw_fname)
+        print(re)
+        for key in ('dev_head_t', 'dev_ctf_t', 'ctf_head_t'):
+            assert_true(isinstance(re.info[key], dict))
+            this_t = re.info[key]['trans']
+            assert_equal(this_t.shape, (4, 4))
+            # cehck that matrix by is not identity
+            assert_true(not np.allclose(this_t, np.eye(4)))
+        os.remove(tmp_raw_fname)
+
+
+def test_info_no_rename_no_reorder():
+    """ Test private renaming and reordering option """
+    for pdf, config, hs in zip(pdf_fnames, config_fnames, hs_fnames):
+        info, bti_info = _get_bti_info(
+            pdf_fname=pdf, config_fname=config, head_shape_fname=hs,
+            rotation_x=0.0, translation=(0.0, 0.02, 0.11), convert=False,
+            ecg_ch='E31', eog_ch=('E63', 'E64'),
+            rename_channels=False, sort_by_ch_name=False)
+        assert_equal(info['ch_names'],
+                     [ch['ch_name'] for ch in info['chs']])
+        assert_equal([n for n in info['ch_names'] if n.startswith('A')][:5],
+                     ['A22', 'A2', 'A104', 'A241', 'A138'])
+        assert_equal([n for n in info['ch_names'] if n.startswith('A')][-5:],
+                     ['A133', 'A158', 'A44', 'A134', 'A216'])
+
+
+def test_no_conversion():
+    """ Test bti no-conversion option """
+
+    get_info = partial(
+        _get_bti_info,
+        pdf_fname=None,  # test skipping no pdf
+        rotation_x=0.0, translation=(0.0, 0.02, 0.11), convert=False,
+        ecg_ch='E31', eog_ch=('E63', 'E64'),
+        rename_channels=False, sort_by_ch_name=False)
+
+    for pdf, config, hs in zip(pdf_fnames, config_fnames, hs_fnames):
+        raw_info, _ = get_info(
+            config_fname=config, head_shape_fname=hs, convert=False)
+        raw_info_con = read_raw_bti(
+            pdf_fname=pdf,
+            config_fname=config, head_shape_fname=hs, convert=True).info
+
+        pick_info(raw_info_con,
+                  pick_types(raw_info_con, meg=True, ref_meg=True),
+                  copy=False)
+        pick_info(raw_info,
+                  pick_types(raw_info, meg=True, ref_meg=True), copy=False)
+        bti_info = _read_bti_header(pdf, config)
+        dev_ctf_t = _correct_trans(bti_info['bti_transform'][0])
+        assert_array_equal(dev_ctf_t, raw_info['dev_ctf_t']['trans'])
+        assert_array_equal(raw_info['dev_head_t']['trans'], np.eye(4))
+        assert_array_equal(raw_info['ctf_head_t']['trans'], np.eye(4))
+        dig, t = _process_bti_headshape(hs, convert=False, use_hpi=False)
+        assert_array_equal(t['trans'], np.eye(4))
+
+        for ii, (old, new, con) in enumerate(zip(
+                dig, raw_info['dig'], raw_info_con['dig'])):
+            assert_equal(old['ident'], new['ident'])
+            assert_array_equal(old['r'], new['r'])
+            assert_true(not np.allclose(old['r'], con['r']))
+
+            if ii > 10:
+                break
+
+        ch_map = dict((ch['chan_label'],
+                       ch['loc']) for ch in bti_info['chs'])
+
+        for ii, ch_label in enumerate(raw_info['ch_names']):
+            if not ch_label.startswith('A'):
+                continue
+            t1 = ch_map[ch_label]  # correction already performed in bti_info
+            t2 = raw_info['chs'][ii]['loc']
+            t3 = raw_info_con['chs'][ii]['loc']
+            assert_allclose(t1, t2, atol=1e-15)
+            assert_true(not np.allclose(t1, t3))
+            idx_a = raw_info_con['ch_names'].index('MEG 001')
+            idx_b = raw_info['ch_names'].index('A22')
+            assert_equal(
+                raw_info_con['chs'][idx_a]['coord_frame'],
+                FIFF.FIFFV_COORD_DEVICE)
+            assert_equal(
+                raw_info['chs'][idx_b]['coord_frame'],
+                FIFF.FIFFV_MNE_COORD_4D_HEAD)
+
+
+def test_bytes_io():
+    """ Test bti bytes-io API """
+    for pdf, config, hs in zip(pdf_fnames, config_fnames, hs_fnames):
+        raw = read_raw_bti(pdf, config, hs, convert=True)
+
+        with open(pdf, 'rb') as fid:
+            pdf = six.BytesIO(fid.read())
+        with open(config, 'rb') as fid:
+            config = six.BytesIO(fid.read())
+        with open(hs, 'rb') as fid:
+            hs = six.BytesIO(fid.read())
+        raw2 = read_raw_bti(pdf, config, hs, convert=True)
+        repr(raw2)
+        assert_array_equal(raw._data, raw2._data)
+
+
+def test_setup_headshape():
+    """ Test reading bti headshape """
+    for hs in hs_fnames:
+        dig, t = _process_bti_headshape(hs)
+        expected = set(['kind', 'ident', 'r'])
+        found = set(reduce(lambda x, y: list(x) + list(y),
+                           [d.keys() for d in dig]))
+        assert_true(not expected - found)
+
+run_tests_if_main()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/compensator.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/compensator.py
new file mode 100644
index 0000000..91b38cc
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/compensator.py
@@ -0,0 +1,160 @@
+import numpy as np
+
+from .constants import FIFF
+
+
+def get_current_comp(info):
+    """Get the current compensation in effect in the data
+    """
+    comp = None
+    first_comp = -1
+    for k, chan in enumerate(info['chs']):
+        if chan['kind'] == FIFF.FIFFV_MEG_CH:
+            comp = int(chan['coil_type']) >> 16
+            if first_comp < 0:
+                first_comp = comp
+            elif comp != first_comp:
+                raise ValueError('Compensation is not set equally on '
+                                 'all MEG channels')
+    return comp
+
+
+def set_current_comp(info, comp):
+    """Set the current compensation in effect in the data
+    """
+    comp_now = get_current_comp(info)
+    for k, chan in enumerate(info['chs']):
+        if chan['kind'] == FIFF.FIFFV_MEG_CH:
+            rem = chan['coil_type'] - (comp_now << 16)
+            chan['coil_type'] = int(rem + (comp << 16))
+
+
+def _make_compensator(info, kind):
+    """Auxiliary function for make_compensator
+    """
+    for k in range(len(info['comps'])):
+        if info['comps'][k]['kind'] == kind:
+            this_data = info['comps'][k]['data']
+
+            #   Create the preselector
+            presel = np.zeros((this_data['ncol'], info['nchan']))
+            for col, col_name in enumerate(this_data['col_names']):
+                ind = [k for k, ch in enumerate(info['ch_names'])
+                       if ch == col_name]
+                if len(ind) == 0:
+                    raise ValueError('Channel %s is not available in '
+                                     'data' % col_name)
+                elif len(ind) > 1:
+                    raise ValueError('Ambiguous channel %s' % col_name)
+                presel[col, ind[0]] = 1.0
+
+            #   Create the postselector
+            postsel = np.zeros((info['nchan'], this_data['nrow']))
+            for c, ch_name in enumerate(info['ch_names']):
+                ind = [k for k, ch in enumerate(this_data['row_names'])
+                       if ch == ch_name]
+                if len(ind) > 1:
+                    raise ValueError('Ambiguous channel %s' % ch_name)
+                elif len(ind) == 1:
+                    postsel[c, ind[0]] = 1.0
+            this_comp = np.dot(postsel, np.dot(this_data['data'], presel))
+            return this_comp
+
+    raise ValueError('Desired compensation matrix (kind = %d) not'
+                     ' found' % kind)
+
+
+def make_compensator(info, from_, to, exclude_comp_chs=False):
+    """Returns compensation matrix eg. for CTF system.
+
+    Create a compensation matrix to bring the data from one compensation
+    state to another.
+
+    Parameters
+    ----------
+    info : dict
+        The measurement info.
+    from_ : int
+        Compensation in the input data.
+    to : int
+        Desired compensation in the output.
+    exclude_comp_chs : bool
+        Exclude compensation channels from the output.
+
+    Returns
+    -------
+    comp : array | None.
+        The compensation matrix. Might be None if no compensation
+        is needed (from == to).
+    """
+    if from_ == to:
+        return None
+
+    if from_ == 0:
+        C1 = np.zeros((info['nchan'], info['nchan']))
+    else:
+        C1 = _make_compensator(info, from_)
+
+    if to == 0:
+        C2 = np.zeros((info['nchan'], info['nchan']))
+    else:
+        C2 = _make_compensator(info, to)
+
+    #   s_orig = s_from + C1*s_from = (I + C1)*s_from
+    #   s_to   = s_orig - C2*s_orig = (I - C2)*s_orig
+    #   s_to   = (I - C2)*(I + C1)*s_from = (I + C1 - C2 - C2*C1)*s_from
+    comp = np.eye(info['nchan']) + C1 - C2 - np.dot(C2, C1)
+
+    if exclude_comp_chs:
+        pick = [k for k, c in enumerate(info['chs'])
+                if c['kind'] != FIFF.FIFFV_REF_MEG_CH]
+
+        if len(pick) == 0:
+            raise ValueError('Nothing remains after excluding the '
+                             'compensation channels')
+
+        comp = comp[pick, :]
+
+    return comp
+
+
+# @verbose
+# def compensate_to(data, to, verbose=None):
+#     """
+#     %
+#     % [newdata] = mne_compensate_to(data,to)
+#     %
+#     % Apply compensation to the data as desired
+#     %
+#     """
+#
+#     newdata = data.copy()
+#     now = get_current_comp(newdata['info'])
+#
+#     #   Are we there already?
+#     if now == to:
+#         logger.info('Data are already compensated as desired')
+#
+#     #   Make the compensator and apply it to all data sets
+#     comp = make_compensator(newdata['info'], now, to)
+#     for k in range(len(newdata['evoked'])):
+#         newdata['evoked'][k]['epochs'] = np.dot(comp,
+#                                               newdata['evoked'][k]['epochs'])
+#
+#     #  Update the compensation info in the channel descriptors
+#     newdata['info']['chs'] = set_current_comp(newdata['info']['chs'], to)
+#     return newdata
+
+
+# def set_current_comp(chs, value):
+#     """Set the current compensation value in the channel info structures
+#     """
+#     new_chs = chs
+#
+#     lower_half = int('FFFF', 16) # hex2dec('FFFF')
+#     for k in range(len(chs)):
+#         if chs[k]['kind'] == FIFF.FIFFV_MEG_CH:
+#             coil_type = float(chs[k]['coil_type']) & lower_half
+#             new_chs[k]['coil_type'] = int(coil_type | (value << 16))
+#
+#     return new_chs
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/constants.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/constants.py
new file mode 100644
index 0000000..9db2ae8
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/constants.py
@@ -0,0 +1,797 @@
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+
+class Bunch(dict):
+    """ Container object for datasets: dictionnary-like object that
+        exposes its keys as attributes.
+    """
+
+    def __init__(self, **kwargs):
+        dict.__init__(self, kwargs)
+        self.__dict__ = self
+
+
+class BunchConst(Bunch):
+    """Class to prevent us from re-defining constants (DRY)"""
+    def __setattr__(self, attr, val):
+        if attr != '__dict__' and hasattr(self, attr):
+            raise AttributeError('Attribute "%s" already set' % attr)
+        super(BunchConst, self).__setattr__(attr, val)
+
+FIFF = BunchConst()
+#
+# Blocks
+#
+FIFF.FIFFB_ROOT               = 999
+FIFF.FIFFB_MEAS               = 100
+FIFF.FIFFB_MEAS_INFO          = 101
+FIFF.FIFFB_RAW_DATA           = 102
+FIFF.FIFFB_PROCESSED_DATA     = 103
+FIFF.FIFFB_EVOKED             = 104
+FIFF.FIFFB_ASPECT             = 105
+FIFF.FIFFB_SUBJECT            = 106
+FIFF.FIFFB_ISOTRAK            = 107
+FIFF.FIFFB_HPI_MEAS           = 108
+FIFF.FIFFB_HPI_RESULT         = 109
+FIFF.FIFFB_HPI_COIL           = 110
+FIFF.FIFFB_PROJECT            = 111
+FIFF.FIFFB_CONTINUOUS_DATA    = 112
+FIFF.FIFFB_VOID               = 114
+FIFF.FIFFB_EVENTS             = 115
+FIFF.FIFFB_INDEX              = 116
+FIFF.FIFFB_DACQ_PARS          = 117
+FIFF.FIFFB_REF                = 118
+FIFF.FIFFB_SMSH_RAW_DATA      = 119
+FIFF.FIFFB_SMSH_ASPECT        = 120
+FIFF.FIFFB_HPI_SUBSYSTEM      = 121
+FIFF.FIFFB_EPOCHS             = 122
+FIFF.FIFFB_ICA                = 123
+
+FIFF.FIFFB_SPHERE             = 300   # Concentric sphere model related
+FIFF.FIFFB_BEM                = 310   # Boundary-element method
+FIFF.FIFFB_BEM_SURF           = 311   # Boundary-element method surfaces
+FIFF.FIFFB_CONDUCTOR_MODEL    = 312   # One conductor model definition
+FIFF.FIFFB_PROJ               = 313
+FIFF.FIFFB_PROJ_ITEM          = 314
+FIFF.FIFFB_MRI                = 200
+FIFF.FIFFB_MRI_SET            = 201
+FIFF.FIFFB_MRI_SLICE          = 202
+FIFF.FIFFB_MRI_SCENERY        = 203     # These are for writing unrelated 'slices'
+FIFF.FIFFB_MRI_SCENE          = 204     # Which are actually 3D scenes...
+FIFF.FIFFB_MRI_SEG            = 205     # MRI segmentation data
+FIFF.FIFFB_MRI_SEG_REGION     = 206     # One MRI segmentation region
+FIFF.FIFFB_PROCESSING_HISTORY = 900
+FIFF.FIFFB_PROCESSING_RECORD  = 901
+
+FIFF.FIFFB_CHANNEL_DECOUPLER  = 501
+FIFF.FIFFB_SSS_INFO           = 502
+FIFF.FIFFB_SSS_CAL            = 503
+FIFF.FIFFB_SSS_ST_INFO        = 504
+FIFF.FIFFB_SSS_BASES          = 505
+FIFF.FIFFB_SMARTSHIELD        = 510
+#
+# Of general interest
+#
+FIFF.FIFF_FILE_ID         = 100
+FIFF.FIFF_DIR_POINTER     = 101
+FIFF.FIFF_BLOCK_ID        = 103
+FIFF.FIFF_BLOCK_START     = 104
+FIFF.FIFF_BLOCK_END       = 105
+FIFF.FIFF_FREE_LIST       = 106
+FIFF.FIFF_FREE_BLOCK      = 107
+FIFF.FIFF_NOP             = 108
+FIFF.FIFF_PARENT_FILE_ID  = 109
+FIFF.FIFF_PARENT_BLOCK_ID = 110
+FIFF.FIFF_BLOCK_NAME      = 111
+FIFF.FIFF_BLOCK_VERSION   = 112
+FIFF.FIFF_CREATOR         = 113  # Program that created the file (string)
+FIFF.FIFF_MODIFIER        = 114  # Program that modified the file (string)
+FIFF.FIFF_REF_ROLE        = 115
+FIFF.FIFF_REF_FILE_ID     = 116
+FIFF.FIFF_REF_FILE_NUM    = 117
+FIFF.FIFF_REF_FILE_NAME   = 118
+#
+#  Megacq saves the parameters in these tags
+#
+FIFF.FIFF_DACQ_PARS      = 150
+FIFF.FIFF_DACQ_STIM      = 151
+
+FIFF.FIFF_NCHAN       = 200
+FIFF.FIFF_SFREQ       = 201
+FIFF.FIFF_DATA_PACK   = 202
+FIFF.FIFF_CH_INFO     = 203
+FIFF.FIFF_MEAS_DATE   = 204
+FIFF.FIFF_SUBJECT     = 205
+FIFF.FIFF_COMMENT     = 206
+FIFF.FIFF_NAVE        = 207
+FIFF.FIFF_FIRST_SAMPLE = 208          # The first sample of an epoch
+FIFF.FIFF_LAST_SAMPLE  = 209          # The last sample of an epoch
+FIFF.FIFF_ASPECT_KIND  = 210
+FIFF.FIFF_REF_EVENT    = 211
+FIFF.FIFF_EXPERIMENTER = 212
+FIFF.FIFF_DIG_POINT   = 213
+FIFF.FIFF_CH_POS      = 214
+FIFF.FIFF_HPI_SLOPES  = 215
+FIFF.FIFF_HPI_NCOIL   = 216
+FIFF.FIFF_REQ_EVENT   = 217
+FIFF.FIFF_REQ_LIMIT   = 218
+FIFF.FIFF_LOWPASS     = 219
+FIFF.FIFF_BAD_CHS       = 220
+FIFF.FIFF_ARTEF_REMOVAL = 221
+FIFF.FIFF_COORD_TRANS = 222
+FIFF.FIFF_HIGHPASS    = 223
+FIFF.FIFF_CH_CALS        = 22     # This will not occur in new files
+FIFF.FIFF_HPI_BAD_CHS    = 225    # List of channels considered to be bad in hpi
+FIFF.FIFF_HPI_CORR_COEFF = 226    # Hpi curve fit correlations
+FIFF.FIFF_EVENT_COMMENT  = 227    # Comment about the events used in averaging
+FIFF.FIFF_NO_SAMPLES     = 228    # Number of samples in an epoch
+FIFF.FIFF_FIRST_TIME     = 229    # Time scale minimum
+
+FIFF.FIFF_SUBAVE_SIZE    = 230    # Size of a subaverage
+FIFF.FIFF_SUBAVE_FIRST   = 231    # The first epoch # contained in the subaverage
+FIFF.FIFF_NAME           = 233          # Intended to be a short name.
+FIFF.FIFF_DESCRIPTION    = FIFF.FIFF_COMMENT # (Textual) Description of an object
+FIFF.FIFF_DIG_STRING     = 234          # String of digitized points
+FIFF.FIFF_LINE_FREQ      = 235    # Line frequency
+FIFF.FIFF_CUSTOM_REF     = 236    # Whether a custom reference was applied to the data (NB: overlaps with HPI const #)
+#
+# HPI fitting program tags
+#
+FIFF.FIFF_HPI_COIL_FREQ          = 236   # HPI coil excitation frequency
+FIFF.FIFF_HPI_COIL_MOMENTS       = 240   # Estimated moment vectors for the HPI coil magnetic dipoles
+FIFF.FIFF_HPI_FIT_GOODNESS       = 241   # Three floats indicating the goodness of fit
+FIFF.FIFF_HPI_FIT_ACCEPT         = 242   # Bitmask indicating acceptance (see below)
+FIFF.FIFF_HPI_FIT_GOOD_LIMIT     = 243   # Limit for the goodness-of-fit
+FIFF.FIFF_HPI_FIT_DIST_LIMIT     = 244   # Limit for the coil distance difference
+FIFF.FIFF_HPI_COIL_NO            = 245   # Coil number listed by HPI measurement
+FIFF.FIFF_HPI_COILS_USED         = 246   # List of coils finally used when the transformation was computed
+FIFF.FIFF_HPI_DIGITIZATION_ORDER = 247   # Which Isotrak digitization point corresponds to each of the coils energized
+#
+# Pointers
+#
+FIFF.FIFFV_NEXT_SEQ    = 0
+FIFF.FIFFV_NEXT_NONE   = -1
+#
+# Channel types
+#
+FIFF.FIFFV_MEG_CH     =   1
+FIFF.FIFFV_REF_MEG_CH = 301
+FIFF.FIFFV_EEG_CH     =   2
+FIFF.FIFFV_MCG_CH     = 201
+FIFF.FIFFV_STIM_CH    =   3
+FIFF.FIFFV_EOG_CH     = 202
+FIFF.FIFFV_EMG_CH     = 302
+FIFF.FIFFV_ECG_CH     = 402
+FIFF.FIFFV_MISC_CH    = 502
+FIFF.FIFFV_RESP_CH    = 602  # Respiration monitoring
+FIFF.FIFFV_SEEG_CH    = 702  # stereotactic EEG
+FIFF.FIFFV_SYST_CH    = 900  # some system status information (on Triux systems only)
+FIFF.FIFFV_IAS_CH     = 910  # Internal Active Shielding data (maybe on Triux only)
+FIFF.FIFFV_EXCI_CH    = 920  # flux excitation channel used to be a stimulus channel
+
+#
+# Quaternion channels for head position monitoring
+#
+FIFF.FIFFV_QUAT_0   = 700   # Quaternion param q0 obsolete for unit quaternion
+FIFF.FIFFV_QUAT_1   = 701   # Quaternion param q1 rotation
+FIFF.FIFFV_QUAT_2   = 702   # Quaternion param q2 rotation
+FIFF.FIFFV_QUAT_3   = 703   # Quaternion param q3 rotation
+FIFF.FIFFV_QUAT_4   = 704   # Quaternion param q4 translation
+FIFF.FIFFV_QUAT_5   = 705   # Quaternion param q5 translation
+FIFF.FIFFV_QUAT_6   = 706   # Quaternion param q6 translation
+FIFF.FIFFV_HPI_G    = 707   # Goodness-of-fit in continuous hpi
+FIFF.FIFFV_HPI_ERR  = 708   # Estimation error in continuous hpi
+FIFF.FIFFV_HPI_MOV  = 709   # Estimated head movement speed in continuous hpi
+#
+# Coordinate frames
+#
+FIFF.FIFFV_COORD_UNKNOWN        = 0
+FIFF.FIFFV_COORD_DEVICE         = 1
+FIFF.FIFFV_COORD_ISOTRAK        = 2
+FIFF.FIFFV_COORD_HPI            = 3
+FIFF.FIFFV_COORD_HEAD           = 4
+FIFF.FIFFV_COORD_MRI            = 5
+FIFF.FIFFV_COORD_MRI_SLICE      = 6
+FIFF.FIFFV_COORD_MRI_DISPLAY    = 7
+FIFF.FIFFV_COORD_DICOM_DEVICE   = 8
+FIFF.FIFFV_COORD_IMAGING_DEVICE = 9
+#
+# Needed for raw and evoked-response data
+#
+FIFF.FIFF_DATA_BUFFER    = 300    # Buffer containing measurement data
+FIFF.FIFF_DATA_SKIP      = 301    # Data skip in buffers
+FIFF.FIFF_EPOCH          = 302    # Buffer containing one epoch and channel
+FIFF.FIFF_DATA_SKIP_SAMP = 303    # Data skip in samples
+FIFF.FIFF_MNE_BASELINE_MIN   = 304    # Time of baseline beginning
+FIFF.FIFF_MNE_BASELINE_MAX   = 305    # Time of baseline end
+#
+# Info on subject
+#
+FIFF.FIFF_SUBJ_ID           = 400  # Subject ID
+FIFF.FIFF_SUBJ_FIRST_NAME   = 401  # First name of the subject
+FIFF.FIFF_SUBJ_MIDDLE_NAME  = 402  # Middle name of the subject
+FIFF.FIFF_SUBJ_LAST_NAME    = 403  # Last name of the subject
+FIFF.FIFF_SUBJ_BIRTH_DAY    = 404  # Birthday of the subject
+FIFF.FIFF_SUBJ_SEX          = 405  # Sex of the subject
+FIFF.FIFF_SUBJ_HAND         = 406  # Handedness of the subject
+FIFF.FIFF_SUBJ_WEIGHT       = 407  # Weight of the subject
+FIFF.FIFF_SUBJ_HEIGHT       = 408  # Height of the subject
+FIFF.FIFF_SUBJ_COMMENT      = 409  # Comment about the subject
+FIFF.FIFF_SUBJ_HIS_ID       = 410  # ID used in the Hospital Information System
+
+FIFF.FIFF_PROJ_ID           = 500
+FIFF.FIFF_PROJ_NAME         = 501
+FIFF.FIFF_PROJ_AIM          = 502
+FIFF.FIFF_PROJ_PERSONS      = 503
+FIFF.FIFF_PROJ_COMMENT      = 504
+
+FIFF.FIFF_EVENT_CHANNELS    = 600  # Event channel numbers
+FIFF.FIFF_EVENT_LIST        = 601  # List of events (integers: <sample before after>
+FIFF.FIFF_EVENT_CHANNEL     = 602  # Event channel
+FIFF.FIFF_EVENT_BITS        = 603  # Event bits array
+
+#
+# Tags used in saving SQUID characteristics etc.
+#
+FIFF.FIFF_SQUID_BIAS        = 701
+FIFF.FIFF_SQUID_OFFSET      = 702
+FIFF.FIFF_SQUID_GATE        = 703
+#
+# Aspect values used to save charactersitic curves of SQUIDs. (mjk)
+#
+FIFF.FIFFV_ASPECT_IFII_LOW  = 1100
+FIFF.FIFFV_ASPECT_IFII_HIGH = 1101
+FIFF.FIFFV_ASPECT_GATE      = 1102
+
+#
+# Values for file references
+#
+FIFF.FIFFV_ROLE_PREV_FILE = 1
+FIFF.FIFFV_ROLE_NEXT_FILE = 2
+
+#
+# References
+#
+FIFF.FIFF_REF_PATH           = 1101
+
+#
+# Different aspects of data
+#
+FIFF.FIFFV_ASPECT_AVERAGE       = 100  # Normal average of epochs
+FIFF.FIFFV_ASPECT_STD_ERR       = 101  # Std. error of mean
+FIFF.FIFFV_ASPECT_SINGLE        = 102  # Single epoch cut out from the continuous data
+FIFF.FIFFV_ASPECT_SUBAVERAGE    = 103
+FIFF.FIFFV_ASPECT_ALTAVERAGE    = 104  # Alternating subaverage
+FIFF.FIFFV_ASPECT_SAMPLE        = 105  # A sample cut out by graph
+FIFF.FIFFV_ASPECT_POWER_DENSITY = 106  # Power density spectrum
+FIFF.FIFFV_ASPECT_DIPOLE_WAVE   = 200  # Dipole amplitude curve
+#
+# BEM surface IDs
+#
+FIFF.FIFFV_BEM_SURF_ID_UNKNOWN    = -1
+FIFF.FIFFV_BEM_SURF_ID_BRAIN      = 1
+FIFF.FIFFV_BEM_SURF_ID_SKULL      = 3
+FIFF.FIFFV_BEM_SURF_ID_HEAD       = 4
+
+FIFF.FIFF_BEM_SURF_ID           = 3101  # int    surface number
+FIFF.FIFF_BEM_SURF_NAME         = 3102  # string surface name
+FIFF.FIFF_BEM_SURF_NNODE        = 3103  # int    number of nodes on a surface
+FIFF.FIFF_BEM_SURF_NTRI         = 3104  # int     number of triangles on a surface
+FIFF.FIFF_BEM_SURF_NODES        = 3105  # float  surface nodes (nnode,3)
+FIFF.FIFF_BEM_SURF_TRIANGLES    = 3106  # int    surface triangles (ntri,3)
+FIFF.FIFF_BEM_SURF_NORMALS      = 3107  # float  surface node normal unit vectors
+
+FIFF.FIFF_BEM_POT_SOLUTION      = 3110  # float ** The solution matrix
+FIFF.FIFF_BEM_APPROX            = 3111  # int    approximation method, see below
+FIFF.FIFF_BEM_COORD_FRAME       = 3112  # The coordinate frame of the model
+FIFF.FIFF_BEM_SIGMA             = 3113  # Conductivity of a compartment
+FIFF.FIFFV_BEM_APPROX_CONST     = 1     # The constant potential approach
+FIFF.FIFFV_BEM_APPROX_LINEAR    = 2     # The linear potential approach
+
+#
+# More of those defined in MNE
+#
+FIFF.FIFFV_MNE_SURF_UNKNOWN       = -1
+FIFF.FIFFV_MNE_SURF_LEFT_HEMI     = 101
+FIFF.FIFFV_MNE_SURF_RIGHT_HEMI    = 102
+FIFF.FIFFV_MNE_SURF_MEG_HELMET    = 201               # Use this irrespective of the system
+#
+#   These relate to the Isotrak data
+#
+FIFF.FIFFV_POINT_CARDINAL = 1
+FIFF.FIFFV_POINT_HPI      = 2
+FIFF.FIFFV_POINT_EEG      = 3
+FIFF.FIFFV_POINT_ECG      = FIFF.FIFFV_POINT_EEG
+FIFF.FIFFV_POINT_EXTRA    = 4
+
+FIFF.FIFFV_POINT_LPA = 1
+FIFF.FIFFV_POINT_NASION = 2
+FIFF.FIFFV_POINT_RPA = 3
+#
+#   SSP
+#
+FIFF.FIFF_PROJ_ITEM_KIND         = 3411
+FIFF.FIFF_PROJ_ITEM_TIME         = 3412
+FIFF.FIFF_PROJ_ITEM_NVEC         = 3414
+FIFF.FIFF_PROJ_ITEM_VECTORS      = 3415
+FIFF.FIFF_PROJ_ITEM_DEFINITION   = 3416
+FIFF.FIFF_PROJ_ITEM_CH_NAME_LIST = 3417
+#
+#   MRIs
+#
+FIFF.FIFF_MRI_SOURCE_PATH       = FIFF.FIFF_REF_PATH
+FIFF.FIFF_MRI_SOURCE_FORMAT     = 2002
+FIFF.FIFF_MRI_PIXEL_ENCODING    = 2003
+FIFF.FIFF_MRI_PIXEL_DATA_OFFSET = 2004
+FIFF.FIFF_MRI_PIXEL_SCALE       = 2005
+FIFF.FIFF_MRI_PIXEL_DATA        = 2006
+FIFF.FIFF_MRI_PIXEL_OVERLAY_ENCODING = 2007
+FIFF.FIFF_MRI_PIXEL_OVERLAY_DATA     = 2008
+FIFF.FIFF_MRI_BOUNDING_BOX           = 2009
+FIFF.FIFF_MRI_WIDTH             = 2010
+FIFF.FIFF_MRI_WIDTH_M           = 2011
+FIFF.FIFF_MRI_HEIGHT            = 2012
+FIFF.FIFF_MRI_HEIGHT_M          = 2013
+FIFF.FIFF_MRI_DEPTH             = 2014
+FIFF.FIFF_MRI_DEPTH_M           = 2015
+FIFF.FIFF_MRI_THICKNESS         = 2016
+FIFF.FIFF_MRI_SCENE_AIM         = 2017
+FIFF.FIFF_MRI_ORIG_SOURCE_PATH       = 2020
+FIFF.FIFF_MRI_ORIG_SOURCE_FORMAT     = 2021
+FIFF.FIFF_MRI_ORIG_PIXEL_ENCODING    = 2022
+FIFF.FIFF_MRI_ORIG_PIXEL_DATA_OFFSET = 2023
+FIFF.FIFF_MRI_VOXEL_DATA             = 2030
+FIFF.FIFF_MRI_VOXEL_ENCODING         = 2031
+FIFF.FIFF_MRI_MRILAB_SETUP           = 2100
+FIFF.FIFF_MRI_SEG_REGION_ID          = 2200
+#
+FIFF.FIFFV_MRI_PIXEL_UNKNOWN    = 0
+FIFF.FIFFV_MRI_PIXEL_BYTE       = 1
+FIFF.FIFFV_MRI_PIXEL_WORD       = 2
+FIFF.FIFFV_MRI_PIXEL_SWAP_WORD  = 3
+FIFF.FIFFV_MRI_PIXEL_FLOAT      = 4
+FIFF.FIFFV_MRI_PIXEL_BYTE_INDEXED_COLOR = 5
+FIFF.FIFFV_MRI_PIXEL_BYTE_RGB_COLOR     = 6
+FIFF.FIFFV_MRI_PIXEL_BYTE_RLE_RGB_COLOR = 7
+FIFF.FIFFV_MRI_PIXEL_BIT_RLE            = 8
+#
+#   These are the MNE fiff definitions
+#
+FIFF.FIFFB_MNE                    = 350
+FIFF.FIFFB_MNE_SOURCE_SPACE       = 351
+FIFF.FIFFB_MNE_FORWARD_SOLUTION   = 352
+FIFF.FIFFB_MNE_PARENT_MRI_FILE    = 353
+FIFF.FIFFB_MNE_PARENT_MEAS_FILE   = 354
+FIFF.FIFFB_MNE_COV                = 355
+FIFF.FIFFB_MNE_INVERSE_SOLUTION   = 356
+FIFF.FIFFB_MNE_NAMED_MATRIX       = 357
+FIFF.FIFFB_MNE_ENV                = 358
+FIFF.FIFFB_MNE_BAD_CHANNELS       = 359
+FIFF.FIFFB_MNE_VERTEX_MAP         = 360
+FIFF.FIFFB_MNE_EVENTS             = 361
+FIFF.FIFFB_MNE_MORPH_MAP          = 362
+FIFF.FIFFB_MNE_SURFACE_MAP        = 363
+FIFF.FIFFB_MNE_SURFACE_MAP_GROUP  = 364
+
+#
+# CTF compensation data
+#
+FIFF.FIFFB_MNE_CTF_COMP           = 370
+FIFF.FIFFB_MNE_CTF_COMP_DATA      = 371
+FIFF.FIFFB_MNE_DERIVATIONS        = 372
+#
+# Fiff tags associated with MNE computations (3500...)
+#
+#
+# 3500... Bookkeeping
+#
+FIFF.FIFF_MNE_ROW_NAMES              = 3502
+FIFF.FIFF_MNE_COL_NAMES              = 3503
+FIFF.FIFF_MNE_NROW                   = 3504
+FIFF.FIFF_MNE_NCOL                   = 3505
+FIFF.FIFF_MNE_COORD_FRAME            = 3506  # Coordinate frame employed. Defaults:
+                          #  FIFFB_MNE_SOURCE_SPACE       FIFFV_COORD_MRI
+                          #  FIFFB_MNE_FORWARD_SOLUTION   FIFFV_COORD_HEAD
+                          #  FIFFB_MNE_INVERSE_SOLUTION   FIFFV_COORD_HEAD
+FIFF.FIFF_MNE_CH_NAME_LIST           = 3507
+FIFF.FIFF_MNE_FILE_NAME              = 3508  # This removes the collision with fiff_file.h (used to be 3501)
+#
+# 3510... 3590... Source space or surface
+#
+FIFF.FIFF_MNE_SOURCE_SPACE_POINTS        = 3510  # The vertices
+FIFF.FIFF_MNE_SOURCE_SPACE_NORMALS       = 3511  # The vertex normals
+FIFF.FIFF_MNE_SOURCE_SPACE_NPOINTS       = 3512  # How many vertices
+FIFF.FIFF_MNE_SOURCE_SPACE_SELECTION     = 3513  # Which are selected to the source space
+FIFF.FIFF_MNE_SOURCE_SPACE_NUSE          = 3514  # How many are in use
+FIFF.FIFF_MNE_SOURCE_SPACE_NEAREST       = 3515  # Nearest source space vertex for all vertices
+FIFF.FIFF_MNE_SOURCE_SPACE_NEAREST_DIST  = 3516  # Distance to the Nearest source space vertex for all vertices
+FIFF.FIFF_MNE_SOURCE_SPACE_ID            = 3517  # Identifier
+FIFF.FIFF_MNE_SOURCE_SPACE_TYPE          = 3518  # Surface or volume
+FIFF.FIFF_MNE_SOURCE_SPACE_VERTICES      = 3519  # List of vertices (zero based)
+
+FIFF.FIFF_MNE_SOURCE_SPACE_VOXEL_DIMS    = 3596  # Voxel space dimensions in a volume source space
+FIFF.FIFF_MNE_SOURCE_SPACE_INTERPOLATOR  = 3597  # Matrix to interpolate a volume source space into a mri volume
+FIFF.FIFF_MNE_SOURCE_SPACE_MRI_FILE      = 3598  # MRI file used in the interpolation
+
+FIFF.FIFF_MNE_SOURCE_SPACE_NTRI          = 3590  # Number of triangles
+FIFF.FIFF_MNE_SOURCE_SPACE_TRIANGLES     = 3591  # The triangulation
+FIFF.FIFF_MNE_SOURCE_SPACE_NUSE_TRI      = 3592  # Number of triangles corresponding to the number of vertices in use
+FIFF.FIFF_MNE_SOURCE_SPACE_USE_TRIANGLES = 3593  # The triangulation of the used vertices in the source space
+FIFF.FIFF_MNE_SOURCE_SPACE_NNEIGHBORS    = 3594  # Number of neighbors for each source space point (used for volume source spaces)
+FIFF.FIFF_MNE_SOURCE_SPACE_NEIGHBORS     = 3595  # Neighbors for each source space point (used for volume source spaces)
+
+FIFF.FIFF_MNE_SOURCE_SPACE_DIST          = 3599  # Distances between vertices in use (along the surface)
+FIFF.FIFF_MNE_SOURCE_SPACE_DIST_LIMIT    = 3600  # If distance is above this limit (in the volume) it has not been calculated
+
+FIFF.FIFF_MNE_SURFACE_MAP_DATA           = 3610  # Surface map data
+FIFF.FIFF_MNE_SURFACE_MAP_KIND           = 3611  # Type of map
+
+#
+# 3520... Forward solution
+#
+FIFF.FIFF_MNE_FORWARD_SOLUTION       = 3520
+FIFF.FIFF_MNE_SOURCE_ORIENTATION     = 3521  # Fixed or free
+FIFF.FIFF_MNE_INCLUDED_METHODS       = 3522
+FIFF.FIFF_MNE_FORWARD_SOLUTION_GRAD  = 3523
+#
+# 3530... Covariance matrix
+#
+FIFF.FIFF_MNE_COV_KIND               = 3530  # What kind of a covariance matrix
+FIFF.FIFF_MNE_COV_DIM                = 3531  # Matrix dimension
+FIFF.FIFF_MNE_COV                    = 3532  # Full matrix in packed representation (lower triangle)
+FIFF.FIFF_MNE_COV_DIAG               = 3533  # Diagonal matrix
+FIFF.FIFF_MNE_COV_EIGENVALUES        = 3534  # Eigenvalues and eigenvectors of the above
+FIFF.FIFF_MNE_COV_EIGENVECTORS       = 3535
+FIFF.FIFF_MNE_COV_NFREE              = 3536  # Number of degrees of freedom
+FIFF.FIFF_MNE_COV_METHOD             = 3537  # The estimator used
+FIFF.FIFF_MNE_COV_SCORE              = 3538  # Negative log-likelihood
+
+#
+# 3540... Inverse operator
+#
+# We store the inverse operator as the eigenleads, eigenfields,
+# and weights
+#
+FIFF.FIFF_MNE_INVERSE_LEADS              = 3540   # The eigenleads
+FIFF.FIFF_MNE_INVERSE_LEADS_WEIGHTED     = 3546   # The eigenleads (already weighted with R^0.5)
+FIFF.FIFF_MNE_INVERSE_FIELDS             = 3541   # The eigenfields
+FIFF.FIFF_MNE_INVERSE_SING               = 3542   # The singular values
+FIFF.FIFF_MNE_PRIORS_USED                = 3543   # Which kind of priors have been used for the source covariance matrix
+FIFF.FIFF_MNE_INVERSE_FULL               = 3544   # Inverse operator as one matrix
+                               # This matrix includes the whitening operator as well
+                           # The regularization is applied
+FIFF.FIFF_MNE_INVERSE_SOURCE_ORIENTATIONS = 3545  # Contains the orientation of one source per row
+                           # The source orientations must be expressed in the coordinate system
+                           # given by FIFF_MNE_COORD_FRAME
+FIFF.FIFF_MNE_INVERSE_SOURCE_UNIT         = 3547  # Are the sources given in Am or Am/m^2 ?
+#
+# 3550... Saved environment info
+#
+FIFF.FIFF_MNE_ENV_WORKING_DIR        = 3550     # Working directory where the file was created
+FIFF.FIFF_MNE_ENV_COMMAND_LINE       = 3551     # The command used to create the file
+FIFF.FIFF_MNE_EXTERNAL_BIG_ENDIAN    = 3552     # Reference to an external binary file (big-endian) */
+FIFF.FIFF_MNE_EXTERNAL_LITTLE_ENDIAN = 3553     # Reference to an external binary file (little-endian) */
+#
+# 3560... Miscellaneous
+#
+FIFF.FIFF_MNE_PROJ_ITEM_ACTIVE       = 3560     # Is this projection item active?
+FIFF.FIFF_MNE_EVENT_LIST             = 3561     # An event list (for STI 014)
+FIFF.FIFF_MNE_HEMI                   = 3562     # Hemisphere association for general purposes
+FIFF.FIFF_MNE_DATA_SKIP_NOP          = 3563     # A data skip turned off in the raw data
+FIFF.FIFF_MNE_ORIG_CH_INFO           = 3564     # Channel information before any changes
+FIFF.FIFF_MNE_EVENT_TRIGGER_MASK     = 3565     # Mask applied to the trigger channnel values
+FIFF.FIFF_MNE_EVENT_COMMENTS         = 3566     # Event comments merged into one long string
+#
+# 3570... Morphing maps
+#
+FIFF.FIFF_MNE_MORPH_MAP              = 3570     # Mapping of closest vertices on the sphere
+FIFF.FIFF_MNE_MORPH_MAP_FROM         = 3571     # Which subject is this map from
+FIFF.FIFF_MNE_MORPH_MAP_TO           = 3572     # Which subject is this map to
+#
+# 3580... CTF compensation data
+#
+FIFF.FIFF_MNE_CTF_COMP_KIND         = 3580     # What kind of compensation
+FIFF.FIFF_MNE_CTF_COMP_DATA         = 3581     # The compensation data itself
+FIFF.FIFF_MNE_CTF_COMP_CALIBRATED   = 3582     # Are the coefficients calibrated?
+
+FIFF.FIFF_MNE_DERIVATION_DATA       = 3585     # Used to store information about EEG and other derivations
+#
+# 3601... values associated with ICA decomposition
+#
+FIFF.FIFF_MNE_ICA_INTERFACE_PARAMS  = 3601     # ICA interface parameters
+FIFF.FIFF_MNE_ICA_CHANNEL_NAMES     = 3602     # ICA channel names
+FIFF.FIFF_MNE_ICA_WHITENER          = 3603     # ICA whitener
+FIFF.FIFF_MNE_ICA_PCA_COMPONENTS    = 3604     # PCA components
+FIFF.FIFF_MNE_ICA_PCA_EXPLAINED_VAR = 3605     # PCA explained variance
+FIFF.FIFF_MNE_ICA_PCA_MEAN          = 3606     # PCA mean
+FIFF.FIFF_MNE_ICA_MATRIX            = 3607     # ICA unmixing matrix
+FIFF.FIFF_MNE_ICA_BADS              = 3608     # ICA bad sources
+FIFF.FIFF_MNE_ICA_MISC_PARAMS       = 3609     # ICA misc params
+#
+# Maxfilter tags
+#
+FIFF.FIFF_SSS_FRAME                 = 263
+FIFF.FIFF_SSS_JOB                   = 264
+FIFF.FIFF_SSS_ORIGIN                = 265
+FIFF.FIFF_SSS_ORD_IN                = 266
+FIFF.FIFF_SSS_ORD_OUT               = 267
+FIFF.FIFF_SSS_NMAG                  = 268
+FIFF.FIFF_SSS_COMPONENTS            = 269
+FIFF.FIFF_SSS_CAL_CHANS             = 270
+FIFF.FIFF_SSS_CAL_CORRS             = 271
+FIFF.FIFF_SSS_ST_CORR               = 272
+FIFF.FIFF_SSS_NFREE                 = 278
+FIFF.FIFF_SSS_ST_LENGTH             = 279
+FIFF.FIFF_DECOUPLER_MATRIX          = 800
+#
+# Fiff values associated with MNE computations
+#
+FIFF.FIFFV_MNE_UNKNOWN_ORI          = 0
+FIFF.FIFFV_MNE_FIXED_ORI            = 1
+FIFF.FIFFV_MNE_FREE_ORI             = 2
+
+FIFF.FIFFV_MNE_MEG                  = 1
+FIFF.FIFFV_MNE_EEG                  = 2
+FIFF.FIFFV_MNE_MEG_EEG              = 3
+
+FIFF.FIFFV_MNE_PRIORS_NONE          = 0
+FIFF.FIFFV_MNE_PRIORS_DEPTH         = 1
+FIFF.FIFFV_MNE_PRIORS_LORETA        = 2
+FIFF.FIFFV_MNE_PRIORS_SULCI         = 3
+
+FIFF.FIFFV_MNE_UNKNOWN_COV          = 0
+FIFF.FIFFV_MNE_SENSOR_COV           = 1
+FIFF.FIFFV_MNE_NOISE_COV            = 1         # This is what it should have been called
+FIFF.FIFFV_MNE_SOURCE_COV           = 2
+FIFF.FIFFV_MNE_FMRI_PRIOR_COV       = 3
+FIFF.FIFFV_MNE_SIGNAL_COV           = 4         # This will be potentially employed in beamformers
+FIFF.FIFFV_MNE_DEPTH_PRIOR_COV      = 5         # The depth weighting prior
+FIFF.FIFFV_MNE_ORIENT_PRIOR_COV     = 6         # The orientation prior
+
+FIFF.FIFFV_MNE_PROJ_ITEM_EEG_AVREF  = 10        # Linear projection related to EEG average reference
+#
+# Output map types
+#
+FIFF.FIFFV_MNE_MAP_UNKNOWN                   = -1     # Unspecified
+FIFF.FIFFV_MNE_MAP_SCALAR_CURRENT            =  1     # Scalar current value
+FIFF.FIFFV_MNE_MAP_SCALAR_CURRENT_SIZE       =  2     # Absolute value of the above
+FIFF.FIFFV_MNE_MAP_VECTOR_CURRENT            =  3     # Current vector components
+FIFF.FIFFV_MNE_MAP_VECTOR_CURRENT_SIZE       =  4     # Vector current size
+FIFF.FIFFV_MNE_MAP_T_STAT                    =  5     # Student's t statistic
+FIFF.FIFFV_MNE_MAP_F_STAT                    =  6     # F statistic
+FIFF.FIFFV_MNE_MAP_F_STAT_SQRT               =  7     # Square root of the F statistic
+FIFF.FIFFV_MNE_MAP_CHI2_STAT                 =  8     # (Approximate) chi^2 statistic
+FIFF.FIFFV_MNE_MAP_CHI2_STAT_SQRT            =  9     # Square root of the (approximate) chi^2 statistic
+FIFF.FIFFV_MNE_MAP_SCALAR_CURRENT_NOISE      = 10     # Current noise approximation (scalar)
+FIFF.FIFFV_MNE_MAP_VECTOR_CURRENT_NOISE      = 11     # Current noise approximation (vector)
+#
+# Source space types (values of FIFF_MNE_SOURCE_SPACE_TYPE)
+#
+FIFF.FIFFV_MNE_SPACE_UNKNOWN  = -1
+FIFF.FIFFV_MNE_SPACE_SURFACE  = 1
+FIFF.FIFFV_MNE_SPACE_VOLUME   = 2
+FIFF.FIFFV_MNE_SPACE_DISCRETE = 3
+#
+# Covariance matrix channel classification
+#
+FIFF.FIFFV_MNE_COV_CH_UNKNOWN  = -1  # No idea
+FIFF.FIFFV_MNE_COV_CH_MEG_MAG  =  0  # Axial gradiometer or magnetometer [T]
+FIFF.FIFFV_MNE_COV_CH_MEG_GRAD =  1  # Planar gradiometer [T/m]
+FIFF.FIFFV_MNE_COV_CH_EEG      =  2  # EEG [V]
+#
+# Projection item kinds
+#
+FIFF.FIFFV_PROJ_ITEM_NONE           = 0
+FIFF.FIFFV_PROJ_ITEM_FIELD          = 1
+FIFF.FIFFV_PROJ_ITEM_DIP_FIX        = 2
+FIFF.FIFFV_PROJ_ITEM_DIP_ROT        = 3
+FIFF.FIFFV_PROJ_ITEM_HOMOG_GRAD     = 4
+FIFF.FIFFV_PROJ_ITEM_HOMOG_FIELD    = 5
+#
+# Additional coordinate frames
+#
+FIFF.FIFFV_MNE_COORD_TUFTS_EEG   =  300         # For Tufts EEG data
+FIFF.FIFFV_MNE_COORD_CTF_DEVICE  = 1001         # CTF device coordinates
+FIFF.FIFFV_MNE_COORD_CTF_HEAD    = 1004         # CTF head coordinates
+FIFF.FIFFV_MNE_COORD_DIGITIZER   = FIFF.FIFFV_COORD_ISOTRAK # Original (Polhemus) digitizer coordinates
+FIFF.FIFFV_MNE_COORD_SURFACE_RAS = FIFF.FIFFV_COORD_MRI     # The surface RAS coordinates
+FIFF.FIFFV_MNE_COORD_MRI_VOXEL   = 2001         # The MRI voxel coordinates
+FIFF.FIFFV_MNE_COORD_RAS         = 2002         # Surface RAS coordinates with non-zero origin
+FIFF.FIFFV_MNE_COORD_MNI_TAL     = 2003         # MNI Talairach coordinates
+FIFF.FIFFV_MNE_COORD_FS_TAL_GTZ  = 2004         # FreeSurfer Talairach coordinates (MNI z > 0)
+FIFF.FIFFV_MNE_COORD_FS_TAL_LTZ  = 2005         # FreeSurfer Talairach coordinates (MNI z < 0)
+FIFF.FIFFV_MNE_COORD_FS_TAL      = 2006         # FreeSurfer Talairach coordinates
+#
+# 4D and KIT use the same head coordinate system definition as CTF
+#
+FIFF.FIFFV_MNE_COORD_4D_HEAD     = FIFF.FIFFV_MNE_COORD_CTF_HEAD
+FIFF.FIFFV_MNE_COORD_KIT_HEAD    = FIFF.FIFFV_MNE_COORD_CTF_HEAD
+#
+# KIT system coil types
+#
+FIFF.FIFFV_COIL_KIT_GRAD         = 6001
+FIFF.FIFFV_COIL_KIT_REF_MAG      = 6002
+#
+# CTF coil and channel types
+#
+FIFF.FIFFV_COIL_CTF_GRAD             = 5001
+FIFF.FIFFV_COIL_CTF_REF_MAG          = 5002
+FIFF.FIFFV_COIL_CTF_REF_GRAD         = 5003
+FIFF.FIFFV_COIL_CTF_OFFDIAG_REF_GRAD = 5004
+#
+# Magnes reference sensors
+#
+FIFF.FIFFV_COIL_MAGNES_REF_MAG          = 4003
+FIFF.FIFFV_COIL_MAGNES_REF_GRAD         = 4004
+FIFF.FIFFV_COIL_MAGNES_OFFDIAG_REF_GRAD = 4005
+#
+# BabySQUID sensors
+#
+FIFF.FIFFV_COIL_BABY_GRAD               = 7001
+FIFF.FIFFV_COIL_BABY_MAG                = 7002
+FIFF.FIFFV_COIL_BABY_REF_MAG            = 7003
+#
+#   FWD Types
+#
+FIFF.FWD_COIL_UNKNOWN                   = 0
+FIFF.FWD_COILC_UNKNOWN                  = 0
+FIFF.FWD_COILC_EEG                      = 1000
+FIFF.FWD_COILC_MAG                      = 1
+FIFF.FWD_COILC_AXIAL_GRAD               = 2
+FIFF.FWD_COILC_PLANAR_GRAD              = 3
+FIFF.FWD_COILC_AXIAL_GRAD2              = 4
+
+FIFF.FWD_COIL_ACCURACY_POINT            = 0
+FIFF.FWD_COIL_ACCURACY_NORMAL           = 1
+FIFF.FWD_COIL_ACCURACY_ACCURATE         = 2
+
+FIFF.FWD_BEM_UNKNOWN                    = -1
+FIFF.FWD_BEM_CONSTANT_COLL              = 1
+FIFF.FWD_BEM_LINEAR_COLL                = 2
+
+FIFF.FWD_BEM_IP_APPROACH_LIMIT          = 0.1
+
+FIFF.FWD_BEM_LIN_FIELD_SIMPLE           = 1
+FIFF.FWD_BEM_LIN_FIELD_FERGUSON         = 2
+FIFF.FWD_BEM_LIN_FIELD_URANKAR          = 3
+
+#
+#   Data types
+#
+FIFF.FIFFT_VOID                  = 0
+FIFF.FIFFT_BYTE                  = 1
+FIFF.FIFFT_SHORT                 = 2
+FIFF.FIFFT_INT                   = 3
+FIFF.FIFFT_FLOAT                 = 4
+FIFF.FIFFT_DOUBLE                = 5
+FIFF.FIFFT_JULIAN                = 6
+FIFF.FIFFT_USHORT                = 7
+FIFF.FIFFT_UINT                  = 8
+FIFF.FIFFT_ULONG                 = 9
+FIFF.FIFFT_STRING                = 10
+FIFF.FIFFT_LONG                  = 11
+FIFF.FIFFT_DAU_PACK13            = 13
+FIFF.FIFFT_DAU_PACK14            = 14
+FIFF.FIFFT_DAU_PACK16            = 16
+FIFF.FIFFT_COMPLEX_FLOAT         = 20
+FIFF.FIFFT_COMPLEX_DOUBLE        = 21
+FIFF.FIFFT_OLD_PACK              = 23
+FIFF.FIFFT_CH_INFO_STRUCT        = 30
+FIFF.FIFFT_ID_STRUCT             = 31
+FIFF.FIFFT_DIR_ENTRY_STRUCT      = 32
+FIFF.FIFFT_DIG_POINT_STRUCT      = 33
+FIFF.FIFFT_CH_POS_STRUCT         = 34
+FIFF.FIFFT_COORD_TRANS_STRUCT    = 35
+FIFF.FIFFT_DIG_STRING_STRUCT     = 36
+FIFF.FIFFT_STREAM_SEGMENT_STRUCT = 37
+#
+# Units of measurement
+#
+FIFF.FIFF_UNIT_NONE = -1
+#
+# SI base units
+#
+FIFF.FIFF_UNIT_M   = 1
+FIFF.FIFF_UNIT_KG  = 2
+FIFF.FIFF_UNIT_SEC = 3
+FIFF.FIFF_UNIT_A   = 4
+FIFF.FIFF_UNIT_K   = 5
+FIFF.FIFF_UNIT_MOL = 6
+#
+# SI Supplementary units
+#
+FIFF.FIFF_UNIT_RAD = 7
+FIFF.FIFF_UNIT_SR  = 8
+#
+# SI base candela
+#
+FIFF.FIFF_UNIT_CD  = 9
+#
+# SI derived units
+#
+FIFF.FIFF_UNIT_HZ  = 101
+FIFF.FIFF_UNIT_N   = 102
+FIFF.FIFF_UNIT_PA  = 103
+FIFF.FIFF_UNIT_J   = 104
+FIFF.FIFF_UNIT_W   = 105
+FIFF.FIFF_UNIT_C   = 106
+FIFF.FIFF_UNIT_V   = 107
+FIFF.FIFF_UNIT_F   = 108
+FIFF.FIFF_UNIT_OHM = 109
+FIFF.FIFF_UNIT_MHO = 110
+FIFF.FIFF_UNIT_WB  = 111
+FIFF.FIFF_UNIT_T   = 112
+FIFF.FIFF_UNIT_H   = 113
+FIFF.FIFF_UNIT_CEL = 114
+FIFF.FIFF_UNIT_LM  = 115
+FIFF.FIFF_UNIT_LX  = 116
+#
+# Others we need
+#
+FIFF.FIFF_UNIT_T_M   = 201  # T/m
+FIFF.FIFF_UNIT_AM    = 202  # Am
+FIFF.FIFF_UNIT_AM_M2 = 203  # Am/m^2
+FIFF.FIFF_UNIT_AM_M3 = 204  # Am/m^3
+#
+# Multipliers
+#
+FIFF.FIFF_UNITM_E    = 18
+FIFF.FIFF_UNITM_PET  = 15
+FIFF.FIFF_UNITM_T    = 12
+FIFF.FIFF_UNITM_MEG  = 6
+FIFF.FIFF_UNITM_K    = 3
+FIFF.FIFF_UNITM_H    = 2
+FIFF.FIFF_UNITM_DA   = 1
+FIFF.FIFF_UNITM_NONE = 0
+FIFF.FIFF_UNITM_D    = -1
+FIFF.FIFF_UNITM_C    = -2
+FIFF.FIFF_UNITM_M    = -3
+FIFF.FIFF_UNITM_MU   = -6
+FIFF.FIFF_UNITM_N    = -9
+FIFF.FIFF_UNITM_P    = -12
+FIFF.FIFF_UNITM_F    = -15
+FIFF.FIFF_UNITM_A    = -18
+
+#
+# Coil types
+#
+FIFF.FIFFV_COIL_NONE                  = 0  # The location info contains no data
+FIFF.FIFFV_COIL_EEG                   = 1  # EEG electrode position in r0
+FIFF.FIFFV_COIL_NM_122                = 2  # Neuromag 122 coils
+FIFF.FIFFV_COIL_NM_24                 = 3  # Old 24 channel system in HUT
+FIFF.FIFFV_COIL_NM_MCG_AXIAL          = 4  # The axial devices in the HUCS MCG system
+FIFF.FIFFV_COIL_EEG_BIPOLAR           = 5  # Bipolar EEG lead
+
+FIFF.FIFFV_COIL_DIPOLE             = 200  # Time-varying dipole definition
+# The coil info contains dipole location (r0) and
+# direction (ex)
+FIFF.FIFFV_COIL_MCG_42             = 1000  # For testing the MCG software
+
+FIFF.FIFFV_COIL_POINT_MAGNETOMETER = 2000  # Simple point magnetometer
+FIFF.FIFFV_COIL_AXIAL_GRAD_5CM     = 2001  # Generic axial gradiometer
+
+FIFF.FIFFV_COIL_VV_PLANAR_W        = 3011  # VV prototype wirewound planar sensor
+FIFF.FIFFV_COIL_VV_PLANAR_T1       = 3012  # Vectorview SQ20483N planar gradiometer
+FIFF.FIFFV_COIL_VV_PLANAR_T2       = 3013  # Vectorview SQ20483N-A planar gradiometer
+FIFF.FIFFV_COIL_VV_PLANAR_T3       = 3014  # Vectorview SQ20950N planar gradiometer
+FIFF.FIFFV_COIL_VV_MAG_W           = 3021  # VV prototype wirewound magnetometer
+FIFF.FIFFV_COIL_VV_MAG_T1          = 3022  # Vectorview SQ20483N magnetometer
+FIFF.FIFFV_COIL_VV_MAG_T2          = 3023  # Vectorview SQ20483-A magnetometer
+FIFF.FIFFV_COIL_VV_MAG_T3          = 3024  # Vectorview SQ20950N magnetometer
+
+FIFF.FIFFV_COIL_MAGNES_MAG         = 4001  # Magnes WH magnetometer
+FIFF.FIFFV_COIL_MAGNES_GRAD        = 4002  # Magnes WH gradiometer
+FIFF.FIFFV_COIL_MAGNES_R_MAG       = 4003  # Magnes WH reference magnetometer
+FIFF.FIFFV_COIL_MAGNES_R_GRAD_DIA  = 4004  # Magnes WH reference diagonal gradioometer
+FIFF.FIFFV_COIL_MAGNES_R_GRAD_OFF  = 4005  # Magnes WH reference off-diagonal gradiometer
+
+# MNE RealTime
+FIFF.FIFF_MNE_RT_COMMAND           = 3700  # realtime command
+FIFF.FIFF_MNE_RT_CLIENT_ID         = 3701  # realtime client
+
+# MNE epochs bookkeeping
+FIFF.FIFFB_MNE_EPOCHS_SELECTION    = 3800  # the epochs selection
+FIFF.FIFFB_MNE_EPOCHS_DROP_LOG     = 3801  # the drop log
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/ctf.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/ctf.py
new file mode 100644
index 0000000..3bdb8e8
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/ctf.py
@@ -0,0 +1,256 @@
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
+#          Denis Engemann <denis.engemann at gmail.com>
+#
+# License: BSD (3-clause)
+
+from copy import deepcopy
+
+import numpy as np
+
+from .constants import FIFF
+from .tag import find_tag, has_tag, read_tag
+from .tree import dir_tree_find
+from .write import start_block, end_block, write_int
+from .matrix import write_named_matrix
+
+from ..utils import logger, verbose
+
+
+def hex2dec(s):
+    return int(s, 16)
+
+
+def _read_named_matrix(fid, node, matkind):
+    """read_named_matrix(fid,node)
+
+    Read named matrix from the given node
+
+    Parameters
+    ----------
+    fid : file
+        The file descriptor
+    node : dict
+        Node
+    matkind : mat kind
+        XXX
+    Returns
+    -------
+    mat : dict
+        The matrix with row and col names.
+    """
+
+    #   Descend one level if necessary
+    if node['block'] != FIFF.FIFFB_MNE_NAMED_MATRIX:
+        for k in range(node['nchild']):
+            if node['children'][k]['block'] == FIFF.FIFFB_MNE_NAMED_MATRIX:
+                if has_tag(node['children'][k], matkind):
+                    node = node['children'][k]
+                    break
+        else:
+            raise ValueError('Desired named matrix (kind = %d) not'
+                             ' available' % matkind)
+
+    else:
+        if not has_tag(node, matkind):
+            raise ValueError('Desired named matrix (kind = %d) not available'
+                             % matkind)
+
+    #   Read everything we need
+    tag = find_tag(fid, node, matkind)
+    if tag is None:
+        raise ValueError('Matrix data missing')
+    else:
+        data = tag.data
+
+    nrow, ncol = data.shape
+    tag = find_tag(fid, node, FIFF.FIFF_MNE_NROW)
+    if tag is not None:
+        if tag.data != nrow:
+            raise ValueError('Number of rows in matrix data and '
+                             'FIFF_MNE_NROW tag do not match')
+
+    tag = find_tag(fid, node, FIFF.FIFF_MNE_NCOL)
+    if tag is not None:
+        if tag.data != ncol:
+            raise ValueError('Number of columns in matrix data and '
+                             'FIFF_MNE_NCOL tag do not match')
+
+    tag = find_tag(fid, node, FIFF.FIFF_MNE_ROW_NAMES)
+    if tag is not None:
+        row_names = tag.data
+    else:
+        row_names = None
+
+    tag = find_tag(fid, node, FIFF.FIFF_MNE_COL_NAMES)
+    if tag is not None:
+        col_names = tag.data
+    else:
+        col_names = None
+
+    #   Put it together
+    mat = dict(nrow=nrow, ncol=ncol)
+    if row_names is not None:
+        mat['row_names'] = row_names.split(':')
+    else:
+        mat['row_names'] = None
+
+    if col_names is not None:
+        mat['col_names'] = col_names.split(':')
+    else:
+        mat['col_names'] = None
+
+    mat['data'] = data.astype(np.float)
+    return mat
+
+
+ at verbose
+def read_ctf_comp(fid, node, chs, verbose=None):
+    """Read the CTF software compensation data from the given node
+
+    Parameters
+    ----------
+    fid : file
+        The file descriptor.
+    node : dict
+        The node in the FIF tree.
+    chs : list
+        The list of channels # XXX unclear.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    compdata : list
+        The compensation data
+    """
+    compdata = []
+    comps = dir_tree_find(node, FIFF.FIFFB_MNE_CTF_COMP_DATA)
+
+    for node in comps:
+        #   Read the data we need
+        mat = _read_named_matrix(fid, node, FIFF.FIFF_MNE_CTF_COMP_DATA)
+        for p in range(node['nent']):
+            kind = node['directory'][p].kind
+            pos = node['directory'][p].pos
+            if kind == FIFF.FIFF_MNE_CTF_COMP_KIND:
+                tag = read_tag(fid, pos)
+                break
+        else:
+            raise Exception('Compensation type not found')
+
+        #   Get the compensation kind and map it to a simple number
+        one = dict(ctfkind=tag.data)
+        del tag
+
+        if one['ctfkind'] == int('47314252', 16):  # hex2dec('47314252'):
+            one['kind'] = 1
+        elif one['ctfkind'] == int('47324252', 16):  # hex2dec('47324252'):
+            one['kind'] = 2
+        elif one['ctfkind'] == int('47334252', 16):  # hex2dec('47334252'):
+            one['kind'] = 3
+        else:
+            one['kind'] = int(one['ctfkind'])
+
+        for p in range(node['nent']):
+            kind = node['directory'][p].kind
+            pos = node['directory'][p].pos
+            if kind == FIFF.FIFF_MNE_CTF_COMP_CALIBRATED:
+                tag = read_tag(fid, pos)
+                calibrated = tag.data
+                break
+        else:
+            calibrated = False
+
+        one['save_calibrated'] = calibrated
+        one['rowcals'] = np.ones(mat['data'].shape[0], dtype=np.float)
+        one['colcals'] = np.ones(mat['data'].shape[1], dtype=np.float)
+
+        row_cals, col_cals = None, None  # initialize cals
+
+        if not calibrated:
+            #
+            #   Calibrate...
+            #
+            #   Do the columns first
+            #
+            ch_names = [c['ch_name'] for c in chs]
+
+            col_cals = np.zeros(mat['data'].shape[1], dtype=np.float)
+            for col in range(mat['data'].shape[1]):
+                p = ch_names.count(mat['col_names'][col])
+                if p == 0:
+                    raise Exception('Channel %s is not available in data'
+                                    % mat['col_names'][col])
+                elif p > 1:
+                    raise Exception('Ambiguous channel %s' %
+                                    mat['col_names'][col])
+                idx = ch_names.index(mat['col_names'][col])
+                col_cals[col] = 1.0 / (chs[idx]['range'] * chs[idx]['cal'])
+
+            #    Then the rows
+            row_cals = np.zeros(mat['data'].shape[0])
+            for row in range(mat['data'].shape[0]):
+                p = ch_names.count(mat['row_names'][row])
+                if p == 0:
+                    raise Exception('Channel %s is not available in data'
+                                    % mat['row_names'][row])
+                elif p > 1:
+                    raise Exception('Ambiguous channel %s' %
+                                    mat['row_names'][row])
+                idx = ch_names.index(mat['row_names'][row])
+                row_cals[row] = chs[idx]['range'] * chs[idx]['cal']
+
+            mat['data'] = row_cals[:, None] * mat['data'] * col_cals[None, :]
+            one['rowcals'] = row_cals
+            one['colcals'] = col_cals
+
+        one['data'] = mat
+        compdata.append(one)
+        if row_cals is not None:
+            del row_cals
+        if col_cals is not None:
+            del col_cals
+
+    if len(compdata) > 0:
+        logger.info('    Read %d compensation matrices' % len(compdata))
+
+    return compdata
+
+
+###############################################################################
+# Writing
+
+def write_ctf_comp(fid, comps):
+    """Write the CTF compensation data into a fif file
+
+    Parameters
+    ----------
+    fid : file
+        The open FIF file descriptor
+
+    comps : list
+        The compensation data to write
+    """
+    if len(comps) <= 0:
+        return
+
+    #  This is very simple in fact
+    start_block(fid, FIFF.FIFFB_MNE_CTF_COMP)
+    for comp in comps:
+        start_block(fid, FIFF.FIFFB_MNE_CTF_COMP_DATA)
+        #    Write the compensation kind
+        write_int(fid, FIFF.FIFF_MNE_CTF_COMP_KIND, comp['ctfkind'])
+        write_int(fid, FIFF.FIFF_MNE_CTF_COMP_CALIBRATED,
+                  comp['save_calibrated'])
+
+        if not comp['save_calibrated']:
+            # Undo calibration
+            comp = deepcopy(comp)
+            data = ((1. / comp['rowcals'][:, None]) * comp['data']['data'] *
+                    (1. / comp['colcals'][None, :]))
+            comp['data']['data'] = data
+        write_named_matrix(fid, FIFF.FIFF_MNE_CTF_COMP_DATA, comp['data'])
+        end_block(fid, FIFF.FIFFB_MNE_CTF_COMP_DATA)
+
+    end_block(fid, FIFF.FIFFB_MNE_CTF_COMP)
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/diff.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/diff.py
new file mode 100644
index 0000000..9e1fd1c
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/diff.py
@@ -0,0 +1,39 @@
+# Author: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#
+# License: BSD Style.
+
+import numpy as np
+
+from ..utils import logger, verbose
+
+
+ at verbose
+def is_equal(first, second, verbose=None):
+    """ Says if 2 python structures are the same. Designed to
+    handle dict, list, np.ndarray etc.
+    """
+    all_equal = True
+    # Check all keys in first dict
+    if type(first) != type(second):
+        all_equal = False
+    if isinstance(first, dict):
+        for key in first.keys():
+            if (key not in second):
+                logger.info("Missing key %s in %s" % (key, second))
+                all_equal = False
+            else:
+                if not is_equal(first[key], second[key]):
+                    all_equal = False
+    elif isinstance(first, np.ndarray):
+        if not np.allclose(first, second):
+            all_equal = False
+    elif isinstance(first, list):
+        for a, b in zip(first, second):
+            if not is_equal(a, b):
+                logger.info('%s and\n%s are different' % (a, b))
+                all_equal = False
+    else:
+        if first != second:
+            logger.info('%s and\n%s are different' % (first, second))
+            all_equal = False
+    return all_equal
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/edf/__init__.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/edf/__init__.py
new file mode 100644
index 0000000..f712d3d
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/edf/__init__.py
@@ -0,0 +1,7 @@
+"""EDF+,BDF module for conversion to FIF"""
+
+# Author: Teon Brooks <teon.brooks at gmail.com>
+#
+# License: BSD (3-clause)
+
+from .edf import read_raw_edf
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/edf/edf.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/edf/edf.py
new file mode 100644
index 0000000..01509c4
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/edf/edf.py
@@ -0,0 +1,628 @@
+"""Conversion tool from EDF, EDF+, BDF to FIF
+
+"""
+
+# Authors: Teon Brooks <teon.brooks at gmail.com>
+#          Martin Billinger <martin.billinger at tugraz.at>
+#
+# License: BSD (3-clause)
+
+import os
+import calendar
+import datetime
+import re
+import warnings
+from math import ceil, floor
+
+import numpy as np
+
+from ...utils import verbose, logger
+from ..base import _BaseRaw, _check_update_montage
+from ..meas_info import _empty_info
+from ..pick import pick_types
+from ..constants import FIFF
+from ...filter import resample
+from ...externals.six.moves import zip
+
+
+class RawEDF(_BaseRaw):
+    """Raw object from EDF, EDF+, BDF file
+
+    Parameters
+    ----------
+    input_fname : str
+        Path to the EDF+,BDF file.
+    montage : str | None | instance of Montage
+        Path or instance of montage containing electrode positions.
+        If None, sensor locations are (0,0,0). See the documentation of
+        :func:`mne.channels.read_montage` for more information.
+    eog : list or tuple
+        Names of channels or list of indices that should be designated
+        EOG channels. Values should correspond to the electrodes in the
+        edf file. Default is None.
+    misc : list or tuple
+        Names of channels or list of indices that should be designated
+        MISC channels. Values should correspond to the electrodes in the
+        edf file. Default is None.
+    stim_channel : str | int | None
+        The channel name or channel index (starting at 0).
+        -1 corresponds to the last channel (default).
+        If None, there will be no stim channel added.
+    annot : str | None
+        Path to annotation file.
+        If None, no derived stim channel will be added (for files requiring
+        annotation file to interpret stim channel).
+    annotmap : str | None
+        Path to annotation map file containing mapping from label to trigger.
+        Must be specified if annot is not None.
+    preload : bool or str (default False)
+        Preload data into memory for data manipulation and faster indexing.
+        If True, the data will be preloaded into memory (fast, requires
+        large amount of memory). If preload is a string, preload is the
+        file name of a memory-mapped file which is used to store the data
+        on the hard drive (slower, requires less memory).
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    See Also
+    --------
+    mne.io.Raw : Documentation of attribute and methods.
+    """
+    @verbose
+    def __init__(self, input_fname, montage, eog=None, misc=None,
+                 stim_channel=-1, annot=None, annotmap=None,
+                 preload=False, verbose=None):
+        logger.info('Extracting edf Parameters from %s...' % input_fname)
+        input_fname = os.path.abspath(input_fname)
+        info, edf_info = _get_edf_info(input_fname, stim_channel,
+                                       annot, annotmap,
+                                       eog, misc, preload)
+        logger.info('Creating Raw.info structure...')
+        _check_update_montage(info, montage)
+
+        if bool(annot) != bool(annotmap):
+            warnings.warn(("Stimulus Channel will not be annotated. "
+                           "Both 'annot' and 'annotmap' must be specified."))
+
+        # Raw attributes
+        last_samps = [edf_info['nsamples'] - 1]
+        super(RawEDF, self).__init__(
+            info, preload, filenames=[input_fname], raw_extras=[edf_info],
+            last_samps=last_samps, orig_format='int',
+            verbose=verbose)
+
+        logger.info('Ready.')
+
+    @verbose
+    def _read_segment_file(self, data, idx, offset, fi, start, stop,
+                           cals, mult):
+        """Read a chunk of raw data"""
+        from scipy.interpolate import interp1d
+        if mult is not None:
+            # XXX "cals" here does not function the same way as in RawFIF,
+            # and for efficiency we want to be able to combine mult and cals
+            # so proj support will have to wait until this is resolved
+            raise NotImplementedError('mult is not supported yet')
+        # RawFIF and RawEDF think of "stop" differently, easiest to increment
+        # here and refactor later
+        stop += 1
+        sel = np.arange(self.info['nchan'])[idx]
+
+        n_samps = self._raw_extras[fi]['n_samps']
+        buf_len = self._raw_extras[fi]['max_samp']
+        sfreq = self.info['sfreq']
+        n_chan = self.info['nchan']
+        data_size = self._raw_extras[fi]['data_size']
+        data_offset = self._raw_extras[fi]['data_offset']
+        stim_channel = self._raw_extras[fi]['stim_channel']
+        tal_channel = self._raw_extras[fi]['tal_channel']
+        annot = self._raw_extras[fi]['annot']
+        annotmap = self._raw_extras[fi]['annotmap']
+        subtype = self._raw_extras[fi]['subtype']
+
+        # this is used to deal with indexing in the middle of a sampling period
+        blockstart = int(floor(float(start) / buf_len) * buf_len)
+        blockstop = int(ceil(float(stop) / buf_len) * buf_len)
+
+        # gain constructor
+        physical_range = np.array([ch['range'] for ch in self.info['chs']])
+        cal = np.array([ch['cal'] for ch in self.info['chs']])
+        gains = np.atleast_2d(self._raw_extras[fi]['units'] *
+                              (physical_range / cal))
+
+        # physical dimension in uV
+        physical_min = np.atleast_2d(self._raw_extras[fi]['units'] *
+                                     self._raw_extras[fi]['physical_min'])
+        digital_min = self._raw_extras[fi]['digital_min']
+
+        offsets = np.atleast_2d(physical_min - (digital_min * gains)).T
+        if tal_channel is not None:
+            offsets[tal_channel] = 0
+
+        read_size = blockstop - blockstart
+        this_data = np.empty((len(sel), buf_len))
+        data = data[:, offset:offset + (stop - start)]
+        """
+        Consider this example:
+
+        tmin, tmax = (2, 27)
+        read_size = 30
+        buf_len = 10
+        sfreq = 1.
+
+                        +---------+---------+---------+
+        File structure: |  buf0   |   buf1  |   buf2  |
+                        +---------+---------+---------+
+        File time:      0        10        20        30
+                        +---------+---------+---------+
+        Requested time:   2                       27
+
+                        |                             |
+                    blockstart                    blockstop
+                          |                        |
+                        start                    stop
+
+        We need 27 - 2 = 25 samples (per channel) to store our data, and
+        we need to read from 3 buffers (30 samples) to get all of our data.
+
+        On all reads but the first, the data we read starts at
+        the first sample of the buffer. On all reads but the last,
+        the data we read ends on the last sample of the buffer.
+
+        We call this_data the variable that stores the current buffer's data,
+        and data the variable that stores the total output.
+
+        On the first read, we need to do this::
+
+            >>> data[0:buf_len-2] = this_data[2:buf_len]
+
+        On the second read, we need to do::
+
+            >>> data[1*buf_len-2:2*buf_len-2] = this_data[0:buf_len]
+
+        On the final read, we need to do::
+
+            >>> data[2*buf_len-2:3*buf_len-2-3] = this_data[0:buf_len-3]
+
+        """
+        with open(self._filenames[fi], 'rb', buffering=0) as fid:
+            # extract data
+            fid.seek(data_offset + blockstart * n_chan * data_size)
+            n_blk = int(ceil(float(read_size) / buf_len))
+            start_offset = start - blockstart
+            end_offset = blockstop - stop
+            for bi in range(n_blk):
+                # Triage start (sidx) and end (eidx) indices for
+                # data (d) and read (r)
+                if bi == 0:
+                    d_sidx = 0
+                    r_sidx = start_offset
+                else:
+                    d_sidx = bi * buf_len - start_offset
+                    r_sidx = 0
+                if bi == n_blk - 1:
+                    d_eidx = data.shape[1]
+                    r_eidx = buf_len - end_offset
+                else:
+                    d_eidx = (bi + 1) * buf_len - start_offset
+                    r_eidx = buf_len
+                n_buf_samp = r_eidx - r_sidx
+                count = 0
+                for j, samp in enumerate(n_samps):
+                    # bdf data: 24bit data
+                    if j not in sel:
+                        fid.seek(samp * data_size, 1)
+                        continue
+                    if samp == buf_len:
+                        # use faster version with skips built in
+                        if r_sidx > 0:
+                            fid.seek(r_sidx * data_size, 1)
+                        ch_data = _read_ch(fid, subtype, n_buf_samp, data_size)
+                        if r_eidx < buf_len:
+                            fid.seek((buf_len - r_eidx) * data_size, 1)
+                    else:
+                        # read in all the data and triage appropriately
+                        ch_data = _read_ch(fid, subtype, samp, data_size)
+                        if j == tal_channel:
+                            # don't resample tal_channel,
+                            # pad with zeros instead.
+                            n_missing = int(buf_len - samp)
+                            ch_data = np.hstack([ch_data, [0] * n_missing])
+                            ch_data = ch_data[r_sidx:r_eidx]
+                        elif j == stim_channel:
+                            if annot and annotmap or \
+                                    tal_channel is not None:
+                                # don't bother with resampling the stim ch
+                                # because it gets overwritten later on.
+                                ch_data = np.zeros(n_buf_samp)
+                            else:
+                                warnings.warn('Interpolating stim channel.'
+                                              ' Events may jitter.')
+                                oldrange = np.linspace(0, 1, samp + 1, True)
+                                newrange = np.linspace(0, 1, buf_len, False)
+                                newrange = newrange[r_sidx:r_eidx]
+                                ch_data = interp1d(
+                                    oldrange, np.append(ch_data, 0),
+                                    kind='zero')(newrange)
+                        else:
+                            ch_data = resample(ch_data, buf_len, samp,
+                                               npad=0)[r_sidx:r_eidx]
+                    this_data[count, :n_buf_samp] = ch_data
+                    count += 1
+                data[:, d_sidx:d_eidx] = this_data[:, :n_buf_samp]
+        data *= gains.T[sel]
+        data += offsets[sel]
+
+        # only try to read the stim channel if it's not None and it's
+        # actually one of the requested channels
+        if stim_channel is not None and (sel == stim_channel).sum() > 0:
+            stim_channel_idx = np.where(sel == stim_channel)[0]
+            if annot and annotmap:
+                evts = _read_annot(annot, annotmap, sfreq,
+                                   self._last_samps[fi])
+                data[stim_channel_idx, :] = evts[start:stop]
+            elif tal_channel is not None:
+                tal_channel_idx = np.where(sel == tal_channel)[0][0]
+                evts = _parse_tal_channel(data[tal_channel_idx])
+                self._raw_extras[fi]['events'] = evts
+
+                unique_annots = sorted(set([e[2] for e in evts]))
+                mapping = dict((a, n + 1) for n, a in enumerate(unique_annots))
+
+                stim = np.zeros(read_size)
+                for t_start, t_duration, annotation in evts:
+                    evid = mapping[annotation]
+                    n_start = int(t_start * sfreq)
+                    n_stop = int(t_duration * sfreq) + n_start - 1
+                    # make sure events without duration get one sample
+                    n_stop = n_stop if n_stop > n_start else n_start + 1
+                    if any(stim[n_start:n_stop]):
+                        raise NotImplementedError('EDF+ with overlapping '
+                                                  'events not supported.')
+                    stim[n_start:n_stop] = evid
+                data[stim_channel_idx, :] = stim[start:stop]
+            else:
+                # Allows support for up to 16-bit trigger values (2 ** 16 - 1)
+                stim = np.bitwise_and(data[stim_channel_idx].astype(int),
+                                      65535)
+                data[stim_channel_idx, :] = stim
+
+
+def _read_ch(fid, subtype, samp, data_size):
+    """Helper to read a number of samples for a single channel"""
+    if subtype in ('24BIT', 'bdf'):
+        ch_data = np.fromfile(fid, dtype=np.uint8,
+                              count=samp * data_size)
+        ch_data = ch_data.reshape(-1, 3).astype(np.int32)
+        ch_data = ((ch_data[:, 0]) +
+                   (ch_data[:, 1] << 8) +
+                   (ch_data[:, 2] << 16))
+        # 24th bit determines the sign
+        ch_data[ch_data >= (1 << 23)] -= (1 << 24)
+    # edf data: 16bit data
+    else:
+        ch_data = np.fromfile(fid, dtype='<i2', count=samp)
+    return ch_data
+
+
+def _parse_tal_channel(tal_channel_data):
+    """Parse time-stamped annotation lists (TALs) in stim_channel
+    and return list of events.
+
+    Parameters
+    ----------
+    tal_channel_data : ndarray, shape = [n_samples]
+        channel data in EDF+ TAL format
+
+    Returns
+    -------
+    events : list
+        List of events. Each event contains [start, duration, annotation].
+
+    References
+    ----------
+    http://www.edfplus.info/specs/edfplus.html#tal
+    """
+
+    # convert tal_channel to an ascii string
+    tals = bytearray()
+    for s in tal_channel_data:
+        i = int(s)
+        tals.extend([i % 256, i // 256])
+
+    regex_tal = '([+-]\d+\.?\d*)(\x15(\d+\.?\d*))?(\x14.*?)\x14\x00'
+    tal_list = re.findall(regex_tal, tals.decode('ascii'))
+    events = []
+    for ev in tal_list:
+        onset = float(ev[0])
+        duration = float(ev[2]) if ev[2] else 0
+        for annotation in ev[3].split('\x14')[1:]:
+            if annotation:
+                events.append([onset, duration, annotation])
+
+    return events
+
+
+def _get_edf_info(fname, stim_channel, annot, annotmap, eog, misc, preload):
+    """Extracts all the information from the EDF+,BDF file"""
+
+    if eog is None:
+        eog = []
+    if misc is None:
+        misc = []
+    info = _empty_info()
+    info['filename'] = fname
+
+    edf_info = dict()
+    edf_info['annot'] = annot
+    edf_info['annotmap'] = annotmap
+    edf_info['events'] = []
+
+    with open(fname, 'rb') as fid:
+        assert(fid.tell() == 0)
+        fid.seek(8)
+
+        fid.read(80).strip().decode()  # subject id
+        fid.read(80).strip().decode()  # recording id
+        day, month, year = [int(x) for x in re.findall('(\d+)',
+                                                       fid.read(8).decode())]
+        hour, minute, sec = [int(x) for x in re.findall('(\d+)',
+                                                        fid.read(8).decode())]
+        date = datetime.datetime(year + 2000, month, day, hour, minute, sec)
+        info['meas_date'] = calendar.timegm(date.utctimetuple())
+
+        edf_info['data_offset'] = header_nbytes = int(fid.read(8).decode())
+        subtype = fid.read(44).strip().decode()[:5]
+        if len(subtype) > 0:
+            edf_info['subtype'] = subtype
+        else:
+            edf_info['subtype'] = os.path.splitext(fname)[1][1:].lower()
+
+        edf_info['n_records'] = n_records = int(fid.read(8).decode())
+        # record length in seconds
+        record_length = float(fid.read(8).decode())
+        if record_length == 0:
+            edf_info['record_length'] = record_length = 1.
+            warnings.warn('Header information is incorrect for record length. '
+                          'Default record length set to 1.')
+        else:
+            edf_info['record_length'] = record_length
+        info['nchan'] = nchan = int(fid.read(4).decode())
+        channels = list(range(info['nchan']))
+        ch_names = [fid.read(16).strip().decode() for ch in channels]
+        for ch in channels:
+            fid.read(80)  # transducer
+        units = [fid.read(8).strip().decode() for ch in channels]
+        for i, unit in enumerate(units):
+            if unit == 'uV':
+                units[i] = 1e-6
+            else:
+                units[i] = 1
+        edf_info['units'] = units
+        physical_min = np.array([float(fid.read(8).decode())
+                                 for ch in channels])
+        edf_info['physical_min'] = physical_min
+        physical_max = np.array([float(fid.read(8).decode())
+                                 for ch in channels])
+        digital_min = np.array([float(fid.read(8).decode())
+                                for ch in channels])
+        edf_info['digital_min'] = digital_min
+        digital_max = np.array([float(fid.read(8).decode())
+                                for ch in channels])
+        prefiltering = [fid.read(80).strip().decode() for ch in channels][:-1]
+        highpass = np.ravel([re.findall('HP:\s+(\w+)', filt)
+                             for filt in prefiltering])
+        lowpass = np.ravel([re.findall('LP:\s+(\w+)', filt)
+                            for filt in prefiltering])
+
+        high_pass_default = 0.
+        if highpass.size == 0:
+            info['highpass'] = high_pass_default
+        elif all(highpass):
+            if highpass[0] == 'NaN':
+                info['highpass'] = high_pass_default
+            elif highpass[0] == 'DC':
+                info['highpass'] = 0.
+            else:
+                info['highpass'] = float(highpass[0])
+        else:
+            info['highpass'] = float(np.min(highpass))
+            warnings.warn('Channels contain different highpass filters. '
+                          'Highest filter setting will be stored.')
+
+        if lowpass.size == 0:
+            info['lowpass'] = None
+        elif all(lowpass):
+            if lowpass[0] == 'NaN':
+                info['lowpass'] = None
+            else:
+                info['lowpass'] = float(lowpass[0])
+        else:
+            info['lowpass'] = float(np.min(lowpass))
+            warnings.warn('%s' % ('Channels contain different lowpass filters.'
+                                  ' Lowest filter setting will be stored.'))
+        # number of samples per record
+        n_samps = np.array([int(fid.read(8).decode()) for ch in channels])
+        edf_info['n_samps'] = n_samps
+
+        fid.read(32 * info['nchan']).decode()  # reserved
+        assert fid.tell() == header_nbytes
+
+    physical_ranges = physical_max - physical_min
+    cals = digital_max - digital_min
+
+    # Some keys to be consistent with FIF measurement info
+    info['description'] = None
+    info['buffer_size_sec'] = 10.
+
+    if edf_info['subtype'] in ('24BIT', 'bdf'):
+        edf_info['data_size'] = 3  # 24-bit (3 byte) integers
+    else:
+        edf_info['data_size'] = 2  # 16-bit (2 byte) integers
+
+    # Creates a list of dicts of eeg channels for raw.info
+    logger.info('Setting channel info structure...')
+    info['chs'] = []
+    info['ch_names'] = ch_names
+    tal_ch_name = 'EDF Annotations'
+    if tal_ch_name in ch_names:
+        tal_channel = ch_names.index(tal_ch_name)
+    else:
+        tal_channel = None
+    edf_info['tal_channel'] = tal_channel
+    if tal_channel is not None and stim_channel is not None and not preload:
+        raise RuntimeError('%s' % ('EDF+ Annotations (TAL) channel needs to be'
+                                   ' parsed completely on loading.'
+                                   ' You must set preload parameter to True.'))
+    if stim_channel == -1:
+        stim_channel = info['nchan'] - 1
+    for idx, ch_info in enumerate(zip(ch_names, physical_ranges, cals)):
+        ch_name, physical_range, cal = ch_info
+        chan_info = {}
+        chan_info['cal'] = cal
+        chan_info['logno'] = idx + 1
+        chan_info['scanno'] = idx + 1
+        chan_info['range'] = physical_range
+        chan_info['unit_mul'] = 0.
+        chan_info['ch_name'] = ch_name
+        chan_info['unit'] = FIFF.FIFF_UNIT_V
+        chan_info['coord_frame'] = FIFF.FIFFV_COORD_HEAD
+        chan_info['coil_type'] = FIFF.FIFFV_COIL_EEG
+        chan_info['kind'] = FIFF.FIFFV_EEG_CH
+        chan_info['loc'] = np.zeros(12)
+        if ch_name in eog or idx in eog or idx - nchan in eog:
+            chan_info['coil_type'] = FIFF.FIFFV_COIL_NONE
+            chan_info['kind'] = FIFF.FIFFV_EOG_CH
+        if ch_name in misc or idx in misc or idx - nchan in misc:
+            chan_info['coil_type'] = FIFF.FIFFV_COIL_NONE
+            chan_info['kind'] = FIFF.FIFFV_MISC_CH
+        check1 = stim_channel == ch_name
+        check2 = stim_channel == idx
+        check3 = info['nchan'] > 1
+        stim_check = np.logical_and(np.logical_or(check1, check2), check3)
+        if stim_check:
+            chan_info['coil_type'] = FIFF.FIFFV_COIL_NONE
+            chan_info['unit'] = FIFF.FIFF_UNIT_NONE
+            chan_info['kind'] = FIFF.FIFFV_STIM_CH
+            chan_info['ch_name'] = 'STI 014'
+            info['ch_names'][idx] = chan_info['ch_name']
+            units[idx] = 1
+            if isinstance(stim_channel, str):
+                stim_channel = idx
+        if tal_channel == idx:
+            chan_info['range'] = 1
+            chan_info['cal'] = 1
+            chan_info['coil_type'] = FIFF.FIFFV_COIL_NONE
+            chan_info['unit'] = FIFF.FIFF_UNIT_NONE
+            chan_info['kind'] = FIFF.FIFFV_MISC_CH
+        info['chs'].append(chan_info)
+    edf_info['stim_channel'] = stim_channel
+
+    # sfreq defined as the max sampling rate of eeg
+    picks = pick_types(info, meg=False, eeg=True)
+    if len(picks) == 0:
+        edf_info['max_samp'] = max_samp = n_samps.max()
+    else:
+        edf_info['max_samp'] = max_samp = n_samps[picks].max()
+    info['sfreq'] = max_samp / record_length
+    edf_info['nsamples'] = int(n_records * max_samp)
+
+    if info['lowpass'] is None:
+        info['lowpass'] = info['sfreq'] / 2.
+
+    return info, edf_info
+
+
+def _read_annot(annot, annotmap, sfreq, data_length):
+    """Annotation File Reader
+
+    Parameters
+    ----------
+    annot : str
+        Path to annotation file.
+    annotmap : str
+        Path to annotation map file containing mapping from label to trigger.
+    sfreq : float
+        Sampling frequency.
+    data_length : int
+        Length of the data file.
+
+    Returns
+    -------
+    stim_channel : ndarray
+        An array containing stimulus trigger events.
+    """
+    pat = '([+/-]\d+.\d+),(\w+)'
+    annot = open(annot).read()
+    triggers = re.findall(pat, annot)
+    times, values = zip(*triggers)
+    times = [float(time) * sfreq for time in times]
+
+    pat = '(\w+):(\d+)'
+    annotmap = open(annotmap).read()
+    mappings = re.findall(pat, annotmap)
+    maps = {}
+    for mapping in mappings:
+        maps[mapping[0]] = mapping[1]
+    triggers = [int(maps[value]) for value in values]
+
+    stim_channel = np.zeros(data_length)
+    for time, trigger in zip(times, triggers):
+        stim_channel[time] = trigger
+
+    return stim_channel
+
+
+def read_raw_edf(input_fname, montage=None, eog=None, misc=None,
+                 stim_channel=-1, annot=None, annotmap=None,
+                 preload=False, verbose=None):
+    """Reader function for EDF+, BDF conversion to FIF
+
+    Parameters
+    ----------
+    input_fname : str
+        Path to the EDF+,BDF file.
+    montage : str | None | instance of Montage
+        Path or instance of montage containing electrode positions.
+        If None, sensor locations are (0,0,0). See the documentation of
+        :func:`mne.channels.read_montage` for more information.
+    eog : list or tuple
+        Names of channels or list of indices that should be designated
+        EOG channels. Values should correspond to the electrodes in the
+        edf file. Default is None.
+    misc : list or tuple
+        Names of channels or list of indices that should be designated
+        MISC channels. Values should correspond to the electrodes in the
+        edf file. Default is None.
+    stim_channel : str | int | None
+        The channel name or channel index (starting at 0).
+        -1 corresponds to the last channel (default).
+        If None, there will be no stim channel added.
+    annot : str | None
+        Path to annotation file.
+        If None, no derived stim channel will be added (for files requiring
+        annotation file to interpret stim channel).
+    annotmap : str | None
+        Path to annotation map file containing mapping from label to trigger.
+        Must be specified if annot is not None.
+    preload : bool or str (default False)
+        Preload data into memory for data manipulation and faster indexing.
+        If True, the data will be preloaded into memory (fast, requires
+        large amount of memory). If preload is a string, preload is the
+        file name of a memory-mapped file which is used to store the data
+        on the hard drive (slower, requires less memory).
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    raw : Instance of RawEDF
+        A Raw object containing EDF data.
+
+    See Also
+    --------
+    mne.io.Raw : Documentation of attribute and methods.
+    """
+    return RawEDF(input_fname=input_fname, montage=montage, eog=eog, misc=misc,
+                  stim_channel=stim_channel, annot=annot, annotmap=annotmap,
+                  preload=preload, verbose=verbose)
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/edf/tests/__init__.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/edf/tests/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/edf/tests/test_edf.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/edf/tests/test_edf.py
new file mode 100644
index 0000000..7d68102
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/edf/tests/test_edf.py
@@ -0,0 +1,275 @@
+"""Data Equivalence Tests"""
+from __future__ import print_function
+
+# Authors: Teon Brooks <teon.brooks at gmail.com>
+#          Martin Billinger <martin.billinger at tugraz.at>
+#          Alan Leggitt <alan.leggitt at ucsf.edu>
+#          Alexandre Barachant <alexandre.barachant at gmail.com>
+#
+# License: BSD (3-clause)
+
+import os.path as op
+import inspect
+import warnings
+
+from nose.tools import assert_equal, assert_true
+from numpy.testing import (assert_array_almost_equal, assert_array_equal,
+                           assert_raises, assert_allclose)
+from scipy import io
+import numpy as np
+
+from mne import pick_types, concatenate_raws
+from mne.externals.six import iterbytes
+from mne.utils import _TempDir, run_tests_if_main, requires_pandas
+from mne.io import Raw, read_raw_edf, RawArray
+from mne.io.tests.test_raw import _test_concat
+import mne.io.edf.edf as edfmodule
+from mne.event import find_events
+
+warnings.simplefilter('always')
+
+FILE = inspect.getfile(inspect.currentframe())
+data_dir = op.join(op.dirname(op.abspath(FILE)), 'data')
+montage_path = op.join(data_dir, 'biosemi.hpts')
+bdf_path = op.join(data_dir, 'test.bdf')
+edf_path = op.join(data_dir, 'test.edf')
+edf_uneven_path = op.join(data_dir, 'test_uneven_samp.edf')
+bdf_eeglab_path = op.join(data_dir, 'test_bdf_eeglab.mat')
+edf_eeglab_path = op.join(data_dir, 'test_edf_eeglab.mat')
+edf_uneven_eeglab_path = op.join(data_dir, 'test_uneven_samp.mat')
+edf_stim_channel_path = op.join(data_dir, 'test_edf_stim_channel.edf')
+edf_txt_stim_channel_path = op.join(data_dir, 'test_edf_stim_channel.txt')
+
+
+eog = ['REOG', 'LEOG', 'IEOG']
+misc = ['EXG1', 'EXG5', 'EXG8', 'M1', 'M2']
+
+
+def test_concat():
+    """Test EDF concatenation"""
+    _test_concat(read_raw_edf, bdf_path)
+
+
+def test_bdf_data():
+    """Test reading raw bdf files"""
+    raw_py = read_raw_edf(bdf_path, montage=montage_path, eog=eog,
+                          misc=misc, preload=True)
+    assert_true('RawEDF' in repr(raw_py))
+    picks = pick_types(raw_py.info, meg=False, eeg=True, exclude='bads')
+    data_py, _ = raw_py[picks]
+
+    # this .mat was generated using the EEG Lab Biosemi Reader
+    raw_eeglab = io.loadmat(bdf_eeglab_path)
+    raw_eeglab = raw_eeglab['data'] * 1e-6  # data are stored in microvolts
+    data_eeglab = raw_eeglab[picks]
+    # bdf saved as a single, resolution to seven decimal points in matlab
+    assert_array_almost_equal(data_py, data_eeglab, 8)
+
+    # Manually checking that float coordinates are imported
+    assert_true((raw_py.info['chs'][0]['loc']).any())
+    assert_true((raw_py.info['chs'][25]['loc']).any())
+    assert_true((raw_py.info['chs'][63]['loc']).any())
+
+    # Make sure concatenation works
+    raw_concat = concatenate_raws([raw_py.copy(), raw_py])
+    assert_equal(raw_concat.n_times, 2 * raw_py.n_times)
+
+
+def test_edf_data():
+    """Test reading raw edf files"""
+    raw_py = read_raw_edf(edf_path, misc=range(-4, 0), stim_channel=139,
+                          preload=True)
+
+    picks = pick_types(raw_py.info, meg=False, eeg=True,
+                       exclude=['EDF Annotations'])
+    data_py, _ = raw_py[picks]
+
+    print(raw_py)  # to test repr
+    print(raw_py.info)  # to test Info repr
+
+    # this .mat was generated using the EEG Lab Biosemi Reader
+    raw_eeglab = io.loadmat(edf_eeglab_path)
+    raw_eeglab = raw_eeglab['data'] * 1e-6  # data are stored in microvolts
+    data_eeglab = raw_eeglab[picks]
+
+    assert_array_almost_equal(data_py, data_eeglab, 10)
+
+    # Make sure concatenation works
+    raw_concat = concatenate_raws([raw_py.copy(), raw_py])
+    assert_equal(raw_concat.n_times, 2 * raw_py.n_times)
+
+    # Test uneven sampling
+    raw_py = read_raw_edf(edf_uneven_path, stim_channel=None)
+    data_py, _ = raw_py[0]
+    # this .mat was generated using the EEG Lab Biosemi Reader
+    raw_eeglab = io.loadmat(edf_uneven_eeglab_path)
+    raw_eeglab = raw_eeglab['data']
+    data_eeglab = raw_eeglab[0]
+
+    # match upsampling
+    upsample = len(data_eeglab) / len(raw_py)
+    data_py = np.repeat(data_py, repeats=upsample)
+    assert_array_equal(data_py, data_eeglab)
+
+
+def test_read_segment():
+    """Test writing raw edf files when preload is False"""
+    tempdir = _TempDir()
+    raw1 = read_raw_edf(edf_path, stim_channel=None, preload=False)
+    raw1_file = op.join(tempdir, 'test1-raw.fif')
+    raw1.save(raw1_file, overwrite=True, buffer_size_sec=1)
+    raw11 = Raw(raw1_file, preload=True)
+    data1, times1 = raw1[:139, :]
+    data11, times11 = raw11[:139, :]
+    assert_allclose(data1, data11, rtol=1e-6)
+    assert_array_almost_equal(times1, times11)
+    assert_equal(sorted(raw1.info.keys()), sorted(raw11.info.keys()))
+    data2, times2 = raw1[0, 0:1]
+    assert_array_equal(data2[0], data1[0, 0:1])
+    assert_array_equal(times2, times1[0:1])
+
+    buffer_fname = op.join(tempdir, 'buffer')
+    for preload in (buffer_fname, True, False):  # false here means "delayed"
+        raw2 = read_raw_edf(edf_path, stim_channel=None, preload=preload)
+        if preload is False:
+            raw2.load_data()
+        raw2_file = op.join(tempdir, 'test2-raw.fif')
+        raw2.save(raw2_file, overwrite=True)
+        data2, times2 = raw2[:139, :]
+        assert_allclose(data1, data2, rtol=1e-6)
+        assert_array_equal(times1, times2)
+
+    raw1 = Raw(raw1_file, preload=True)
+    raw2 = Raw(raw2_file, preload=True)
+    assert_array_equal(raw1._data, raw2._data)
+
+    # test the _read_segment function by only loading some of the data
+    raw1 = read_raw_edf(edf_path, stim_channel=None, preload=False)
+    raw2 = read_raw_edf(edf_path, stim_channel=None, preload=True)
+
+    # select some random range of data to compare
+    data1, times1 = raw1[:, 345:417]
+    data2, times2 = raw2[:, 345:417]
+    assert_array_equal(data1, data2)
+    assert_array_equal(times1, times2)
+
+
+def test_append():
+    """Test appending raw edf objects using Raw.append"""
+    for preload in (True, False):
+        raw = read_raw_edf(bdf_path, preload=False)
+        raw0 = raw.copy()
+        raw1 = raw.copy()
+        raw0.append(raw1)
+        assert_true(2 * len(raw) == len(raw0))
+        assert_allclose(np.tile(raw[:, :][0], (1, 2)), raw0[:, :][0])
+
+    # different types can't combine
+    raw = read_raw_edf(bdf_path, preload=True)
+    raw0 = raw.copy()
+    raw1 = raw.copy()
+    raw2 = RawArray(raw[:, :][0], raw.info)
+    assert_raises(ValueError, raw.append, raw2)
+
+
+def test_parse_annotation():
+    """Test parsing the tal channel"""
+
+    # test the parser
+    annot = (b'+180\x14Lights off\x14Close door\x14\x00\x00\x00\x00\x00'
+             b'+180\x14Lights off\x14\x00\x00\x00\x00\x00\x00\x00\x00'
+             b'+180\x14Close door\x14\x00\x00\x00\x00\x00\x00\x00\x00'
+             b'+3.14\x1504.20\x14nothing\x14\x00\x00\x00\x00'
+             b'+1800.2\x1525.5\x14Apnea\x14\x00\x00\x00\x00\x00\x00\x00'
+             b'+123\x14\x14\x00\x00\x00\x00\x00\x00\x00')
+    annot = [a for a in iterbytes(annot)]
+    annot[1::2] = [a * 256 for a in annot[1::2]]
+    tal_channel = map(sum, zip(annot[0::2], annot[1::2]))
+    events = edfmodule._parse_tal_channel(tal_channel)
+    assert_equal(events, [[180.0, 0, 'Lights off'],
+                          [180.0, 0, 'Close door'],
+                          [180.0, 0, 'Lights off'],
+                          [180.0, 0, 'Close door'],
+                          [3.14, 4.2, 'nothing'],
+                          [1800.2, 25.5, 'Apnea']])
+
+
+def test_edf_annotations():
+    """Test if events are detected correctly in a typical MNE workflow."""
+
+    # test an actual file
+    raw = read_raw_edf(edf_path, preload=True)
+    edf_events = find_events(raw, output='step', shortest_event=0,
+                             stim_channel='STI 014')
+
+    # onset, duration, id
+    events = [[0.1344, 0.2560, 2],
+              [0.3904, 1.0000, 2],
+              [2.0000, 0.0000, 3],
+              [2.5000, 2.5000, 2]]
+    events = np.array(events)
+    events[:, :2] *= 512  # convert time to samples
+    events = np.array(events, dtype=int)
+    events[:, 1] -= 1
+    events[events[:, 1] <= 0, 1] = 1
+    events[:, 1] += events[:, 0]
+
+    onsets = events[:, [0, 2]]
+    offsets = events[:, [1, 2]]
+
+    events = np.zeros((2 * events.shape[0], 3), dtype=int)
+    events[0::2, [0, 2]] = onsets
+    events[1::2, [0, 1]] = offsets
+
+    assert_array_equal(edf_events, events)
+
+
+def test_write_annotations():
+    """Test writing raw files when annotations were parsed."""
+    tempdir = _TempDir()
+    raw1 = read_raw_edf(edf_path, preload=True)
+    raw1_file = op.join(tempdir, 'test1-raw.fif')
+    raw1.save(raw1_file, overwrite=True, buffer_size_sec=1)
+    raw11 = Raw(raw1_file, preload=True)
+    data1, times1 = raw1[:, :]
+    data11, times11 = raw11[:, :]
+
+    assert_array_almost_equal(data1, data11)
+    assert_array_almost_equal(times1, times11)
+    assert_equal(sorted(raw1.info.keys()), sorted(raw11.info.keys()))
+
+    assert_raises(RuntimeError, read_raw_edf, edf_path, preload=False)
+
+
+def test_edf_stim_channel():
+    """Test stim channel for edf file"""
+    raw = read_raw_edf(edf_stim_channel_path, preload=True,
+                       stim_channel=-1)
+    true_data = np.loadtxt(edf_txt_stim_channel_path).T
+
+    # EDF writer pad data if file to small
+    _, ns = true_data.shape
+    edf_data = raw._data[:, :ns]
+
+    # assert stim channels are equal
+    assert_array_equal(true_data[-1], edf_data[-1])
+
+    # assert data are equal
+    assert_array_almost_equal(true_data[0:-1] * 1e-6, edf_data[0:-1])
+
+
+ at requires_pandas
+def test_to_data_frame():
+    """Test edf Raw Pandas exporter"""
+    for path in [edf_path, bdf_path]:
+        raw = read_raw_edf(path, stim_channel=None, preload=True)
+        _, times = raw[0, :10]
+        df = raw.to_data_frame()
+        assert_true((df.columns == raw.ch_names).all())
+        assert_array_equal(np.round(times * 1e3), df.index.values[:10])
+        df = raw.to_data_frame(index=None, scalings={'eeg': 1e13})
+        assert_true('time' in df.index.names)
+        assert_array_equal(df.values[:, 0], raw._data[0] * 1e13)
+
+
+run_tests_if_main()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/egi/__init__.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/egi/__init__.py
new file mode 100644
index 0000000..59f9db1
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/egi/__init__.py
@@ -0,0 +1,5 @@
+"""EGI module for conversion to FIF"""
+
+# Author: Denis A. Engemann <denis.engemann at gmail.com>
+
+from .egi import read_raw_egi, _combine_triggers
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/egi/egi.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/egi/egi.py
new file mode 100644
index 0000000..7b38a5b
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/egi/egi.py
@@ -0,0 +1,330 @@
+# Authors: Denis A. Engemann  <denis.engemann at gmail.com>
+#          Teon Brooks <teon.brooks at gmail.com>
+#
+#          simplified BSD-3 license
+
+import datetime
+import time
+import warnings
+
+import numpy as np
+
+from ..base import _BaseRaw, _check_update_montage
+from ..meas_info import _empty_info
+from ..constants import FIFF
+from ...utils import verbose, logger
+
+
+def _read_header(fid):
+    """Read EGI binary header"""
+
+    version = np.fromfile(fid, np.int32, 1)[0]
+
+    if version > 6 & ~np.bitwise_and(version, 6):
+        version = version.byteswap().astype(np.uint32)
+    else:
+        ValueError('Watchout. This does not seem to be a simple '
+                   'binary EGI file.')
+
+    def my_fread(*x, **y):
+        return np.fromfile(*x, **y)[0]
+
+    info = dict(
+        version=version,
+        year=my_fread(fid, '>i2', 1),
+        month=my_fread(fid, '>i2', 1),
+        day=my_fread(fid, '>i2', 1),
+        hour=my_fread(fid, '>i2', 1),
+        minute=my_fread(fid, '>i2', 1),
+        second=my_fread(fid, '>i2', 1),
+        millisecond=my_fread(fid, '>i4', 1),
+        samp_rate=my_fread(fid, '>i2', 1),
+        n_channels=my_fread(fid, '>i2', 1),
+        gain=my_fread(fid, '>i2', 1),
+        bits=my_fread(fid, '>i2', 1),
+        value_range=my_fread(fid, '>i2', 1)
+    )
+
+    unsegmented = 1 if np.bitwise_and(version, 1) == 0 else 0
+    precision = np.bitwise_and(version, 6)
+    if precision == 0:
+        RuntimeError('Floating point precision is undefined.')
+
+    if unsegmented:
+        info.update(dict(n_categories=0,
+                         n_segments=1,
+                         n_samples=np.fromfile(fid, '>i4', 1)[0],
+                         n_events=np.fromfile(fid, '>i2', 1)[0],
+                         event_codes=[],
+                         category_names=[],
+                         category_lengths=[],
+                         pre_baseline=0))
+        for event in range(info['n_events']):
+            event_codes = ''.join(np.fromfile(fid, 'S1', 4).astype('U1'))
+            info['event_codes'].append(event_codes)
+        info['event_codes'] = np.array(info['event_codes'])
+    else:
+        raise NotImplementedError('Only continous files are supported')
+
+    info.update(dict(precision=precision, unsegmented=unsegmented))
+
+    return info
+
+
+def _read_events(fid, info):
+    """Read events"""
+    unpack = [info[k] for k in ['n_events', 'n_segments', 'n_channels']]
+    n_events, n_segments, n_channels = unpack
+    n_samples = 1 if info['unsegmented'] else info['n_samples']
+    events = np.zeros([n_events, n_segments * info['n_samples']])
+    dtype, bytesize = {2: ('>i2', 2), 4: ('>f4', 4),
+                       6: ('>f8', 8)}[info['precision']]
+
+    info.update({'dtype': dtype, 'bytesize': bytesize})
+    beg_dat = fid.tell()
+
+    for ii in range(info['n_events']):
+        fid.seek(beg_dat + (int(n_channels) + ii) * bytesize, 0)
+        events[ii] = np.fromfile(fid, dtype, n_samples)
+        fid.seek(int((n_channels + n_events) * bytesize), 1)
+    return events
+
+
+def _read_data(fid, info):
+    """Aux function"""
+    if not info['unsegmented']:
+        raise NotImplementedError('Only continous files are supported')
+
+    fid.seek(36 + info['n_events'] * 4, 0)  # skip header
+    readsize = (info['n_channels'] + info['n_events']) * info['n_samples']
+    final_shape = (info['n_samples'], info['n_channels'] + info['n_events'])
+    data = np.fromfile(fid, info['dtype'], readsize).reshape(final_shape).T
+    return data
+
+
+def _combine_triggers(data, remapping=None):
+    """Combine binary triggers"""
+    new_trigger = np.zeros(data[0].shape)
+    first = np.nonzero(data[0])[0]
+    for d in data[1:]:
+        if np.intersect1d(d.nonzero()[0], first).any():
+            raise RuntimeError('Events must be mutually exclusive')
+
+    if remapping is None:
+        remapping = np.arange(data) + 1
+
+    for d, event_id in zip(data, remapping):
+        idx = d.nonzero()
+        if np.any(idx):
+            new_trigger[idx] += event_id
+
+    return new_trigger[None]
+
+
+ at verbose
+def read_raw_egi(input_fname, montage=None, eog=None, misc=None,
+                 include=None, exclude=None, verbose=None):
+    """Read EGI simple binary as raw object
+
+    Note. The trigger channel names are based on the
+    arbitrary user dependent event codes used. However this
+    function will attempt to generate a synthetic trigger channel
+    named ``STI 014`` in accordance with the general Neuromag / MNE
+    naming pattern.
+    The event_id assignment equals np.arange(n_events - n_excluded) + 1.
+    The resulting `event_id` mapping is stored as attribute to
+    the resulting raw object but will be ignored when saving to a fiff.
+    Note. The trigger channel is artificially constructed based on
+    timestamps received by the Netstation. As a consequence, triggers
+    have only short durations.
+    This step will fail if events are not mutually exclusive.
+
+    Parameters
+    ----------
+    input_fname : str
+        Path to the raw file.
+    montage : str | None | instance of montage
+        Path or instance of montage containing electrode positions.
+        If None, sensor locations are (0,0,0). See the documentation of
+        :func:`mne.channels.read_montage` for more information.
+    eog : list or tuple
+        Names of channels or list of indices that should be designated
+        EOG channels. Default is None.
+    misc : list or tuple
+        Names of channels or list of indices that should be designated
+        MISC channels. Default is None.
+    include : None | list
+       The event channels to be ignored when creating the synthetic
+       trigger. Defaults to None.
+       Note. Overrides `exclude` parameter.
+    exclude : None | list
+       The event channels to be ignored when creating the synthetic
+       trigger. Defaults to None. If None, channels that have more than
+       one event and the ``sync`` and ``TREV`` channels will be
+       ignored.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    raw : Instance of RawEGI
+        A Raw object containing EGI data.
+
+    See Also
+    --------
+    mne.io.Raw : Documentation of attribute and methods.
+    """
+    return RawEGI(input_fname, montage, eog, misc, include, exclude, verbose)
+
+
+class RawEGI(_BaseRaw):
+    """Raw object from EGI simple binary file
+    """
+    @verbose
+    def __init__(self, input_fname, montage=None, eog=None, misc=None,
+                 include=None, exclude=None, verbose=None):
+        """docstring for __init__"""
+        if eog is None:
+            eog = []
+        if misc is None:
+            misc = []
+        with open(input_fname, 'rb') as fid:  # 'rb' important for py3k
+            logger.info('Reading EGI header from %s...' % input_fname)
+            egi_info = _read_header(fid)
+            logger.info('    Reading events ...')
+            _read_events(fid, egi_info)  # update info + jump
+            logger.info('    Reading data ...')
+            # reads events as well
+            data = _read_data(fid, egi_info).astype(np.float64)
+            if egi_info['value_range'] != 0 and egi_info['bits'] != 0:
+                cal = egi_info['value_range'] / 2 ** egi_info['bits']
+            else:
+                cal = 1e-6
+            data[:egi_info['n_channels']] = data[:egi_info['n_channels']] * cal
+
+        logger.info('    Assembling measurement info ...')
+
+        if egi_info['n_events'] > 0:
+            event_codes = list(egi_info['event_codes'])
+            egi_events = data[-egi_info['n_events']:]
+
+            if include is None:
+                exclude_list = ['sync', 'TREV'] if exclude is None else exclude
+                exclude_inds = [i for i, k in enumerate(event_codes) if k in
+                                exclude_list]
+                more_excludes = []
+                if exclude is None:
+                    for ii, event in enumerate(egi_events):
+                        if event.sum() <= 1 and event_codes[ii]:
+                            more_excludes.append(ii)
+                if len(exclude_inds) + len(more_excludes) == len(event_codes):
+                    warnings.warn('Did not find any event code with more '
+                                  'than one event.', RuntimeWarning)
+                else:
+                    exclude_inds.extend(more_excludes)
+
+                exclude_inds.sort()
+                include_ = [i for i in np.arange(egi_info['n_events']) if
+                            i not in exclude_inds]
+                include_names = [k for i, k in enumerate(event_codes)
+                                 if i in include_]
+            else:
+                include_ = [i for i, k in enumerate(event_codes)
+                            if k in include]
+                include_names = include
+
+            for kk, v in [('include', include_names), ('exclude', exclude)]:
+                if isinstance(v, list):
+                    for k in v:
+                        if k not in event_codes:
+                            raise ValueError('Could find event named "%s"' % k)
+                elif v is not None:
+                    raise ValueError('`%s` must be None or of type list' % kk)
+
+            event_ids = np.arange(len(include_)) + 1
+            try:
+                logger.info('    Synthesizing trigger channel "STI 014" ...')
+                logger.info('    Excluding events {%s} ...' %
+                            ", ".join([k for i, k in enumerate(event_codes)
+                                       if i not in include_]))
+                new_trigger = _combine_triggers(egi_events[include_],
+                                                remapping=event_ids)
+                data = np.concatenate([data, new_trigger])
+            except RuntimeError:
+                logger.info('    Found multiple events at the same time '
+                            'sample. Could not create trigger channel.')
+                new_trigger = None
+
+            self.event_id = dict(zip([e for e in event_codes if e in
+                                      include_names], event_ids))
+        else:
+            # No events
+            self.event_id = None
+            new_trigger = None
+        info = _empty_info()
+        info['hpi_subsystem'] = None
+        info['events'], info['hpi_results'], info['hpi_meas'] = [], [], []
+        info['sfreq'] = float(egi_info['samp_rate'])
+        info['filename'] = input_fname
+        my_time = datetime.datetime(
+            egi_info['year'],
+            egi_info['month'],
+            egi_info['day'],
+            egi_info['hour'],
+            egi_info['minute'],
+            egi_info['second']
+        )
+        my_timestamp = time.mktime(my_time.timetuple())
+        info['meas_date'] = np.array([my_timestamp], dtype=np.float32)
+        info['projs'] = []
+        ch_names = ['EEG %03d' % (i + 1) for i in
+                    range(egi_info['n_channels'])]
+        ch_names.extend(list(egi_info['event_codes']))
+        if new_trigger is not None:
+            ch_names.append('STI 014')  # our new_trigger
+        info['nchan'] = nchan = len(data)
+        info['chs'] = []
+        info['ch_names'] = ch_names
+        info['bads'] = []
+        info['comps'] = []
+        info['custom_ref_applied'] = False
+        for ii, ch_name in enumerate(ch_names):
+            ch_info = {'cal': cal,
+                       'logno': ii + 1,
+                       'scanno': ii + 1,
+                       'range': 1.0,
+                       'unit_mul': 0,
+                       'ch_name': ch_name,
+                       'unit': FIFF.FIFF_UNIT_V,
+                       'coord_frame': FIFF.FIFFV_COORD_HEAD,
+                       'coil_type': FIFF.FIFFV_COIL_EEG,
+                       'kind': FIFF.FIFFV_EEG_CH,
+                       'loc': np.array([0, 0, 0, 1] * 3, dtype='f4')}
+            if ch_name in eog or ii in eog or ii - nchan in eog:
+                ch_info['coil_type'] = FIFF.FIFFV_COIL_NONE
+                ch_info['kind'] = FIFF.FIFFV_EOG_CH
+            if ch_name in misc or ii in misc or ii - nchan in misc:
+                ch_info['coil_type'] = FIFF.FIFFV_COIL_NONE
+                ch_info['kind'] = FIFF.FIFFV_MISC_CH
+
+            if len(ch_name) == 4 or ch_name.startswith('STI'):
+                u = {'unit_mul': 0,
+                     'cal': 1,
+                     'coil_type': FIFF.FIFFV_COIL_NONE,
+                     'unit': FIFF.FIFF_UNIT_NONE,
+                     'kind': FIFF.FIFFV_STIM_CH}
+                ch_info.update(u)
+            info['chs'].append(ch_info)
+
+        _check_update_montage(info, montage)
+        orig_format = {'>f2': 'single', '>f4': 'double',
+                       '>i2': 'int'}[egi_info['dtype']]
+        super(RawEGI, self).__init__(
+            info, data, filenames=[input_fname], orig_format=orig_format,
+            verbose=verbose)
+        logger.info('    Range : %d ... %d =  %9.3f ... %9.3f secs'
+                    % (self.first_samp, self.last_samp,
+                       float(self.first_samp) / self.info['sfreq'],
+                       float(self.last_samp) / self.info['sfreq']))
+        # use information from egi
+        logger.info('Ready.')
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/egi/tests/__init__.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/egi/tests/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/egi/tests/test_egi.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/egi/tests/test_egi.py
new file mode 100644
index 0000000..73274bd
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/egi/tests/test_egi.py
@@ -0,0 +1,82 @@
+# Authors: Denis A. Engemann  <denis.engemann at gmail.com>
+#          simplified BSD-3 license
+
+
+import os.path as op
+import warnings
+
+import numpy as np
+from numpy.testing import assert_array_almost_equal, assert_array_equal
+from nose.tools import assert_true, assert_raises, assert_equal
+
+from mne import find_events, pick_types, concatenate_raws
+from mne.io import read_raw_egi, Raw
+from mne.io.egi import _combine_triggers
+from mne.utils import _TempDir
+
+warnings.simplefilter('always')  # enable b/c these tests throw warnings
+
+base_dir = op.join(op.dirname(op.realpath(__file__)), 'data')
+egi_fname = op.join(base_dir, 'test_egi.raw')
+
+
+def test_io_egi():
+    """Test importing EGI simple binary files"""
+    # test default
+    tempdir = _TempDir()
+    with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter('always', category=RuntimeWarning)
+        raw = read_raw_egi(egi_fname, include=None)
+        assert_true('RawEGI' in repr(raw))
+        raw.load_data()  # currently does nothing
+        assert_equal(len(w), 1)
+        assert_true(w[0].category == RuntimeWarning)
+        msg = 'Did not find any event code with more than one event.'
+        assert_true(msg in '%s' % w[0].message)
+
+    include = ['TRSP', 'XXX1']
+    raw = read_raw_egi(egi_fname, include=include)
+    repr(raw)
+    repr(raw.info)
+
+    assert_equal('eeg' in raw, True)
+    out_fname = op.join(tempdir, 'test_egi_raw.fif')
+    raw.save(out_fname)
+
+    raw2 = Raw(out_fname, preload=True)
+    data1, times1 = raw[:10, :]
+    data2, times2 = raw2[:10, :]
+    assert_array_almost_equal(data1, data2, 9)
+    assert_array_almost_equal(times1, times2)
+
+    eeg_chan = [c for c in raw.ch_names if 'EEG' in c]
+    assert_equal(len(eeg_chan), 256)
+    picks = pick_types(raw.info, eeg=True)
+    assert_equal(len(picks), 256)
+    assert_equal('STI 014' in raw.ch_names, True)
+
+    events = find_events(raw, stim_channel='STI 014')
+    assert_equal(len(events), 2)  # ground truth
+    assert_equal(np.unique(events[:, 1])[0], 0)
+    assert_true(np.unique(events[:, 0])[0] != 0)
+    assert_true(np.unique(events[:, 2])[0] != 0)
+    triggers = np.array([[0, 1, 1, 0], [0, 0, 1, 0]])
+
+    # test trigger functionality
+    assert_raises(RuntimeError, _combine_triggers, triggers, None)
+    triggers = np.array([[0, 1, 0, 0], [0, 0, 1, 0]])
+    events_ids = [12, 24]
+    new_trigger = _combine_triggers(triggers, events_ids)
+    assert_array_equal(np.unique(new_trigger), np.unique([0, 12, 24]))
+
+    assert_raises(ValueError, read_raw_egi, egi_fname,
+                  include=['Foo'])
+    assert_raises(ValueError, read_raw_egi, egi_fname,
+                  exclude=['Bar'])
+    for ii, k in enumerate(include, 1):
+        assert_true(k in raw.event_id)
+        assert_true(raw.event_id[k] == ii)
+
+    # Make sure concatenation works
+    raw_concat = concatenate_raws([raw.copy(), raw])
+    assert_equal(raw_concat.n_times, 2 * raw.n_times)
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/fiff/__init__.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/fiff/__init__.py
new file mode 100644
index 0000000..1a9952e
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/fiff/__init__.py
@@ -0,0 +1,2 @@
+from .raw import RawFIF
+from .raw import read_raw_fif
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/fiff/raw.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/fiff/raw.py
new file mode 100644
index 0000000..5d1fc42
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/fiff/raw.py
@@ -0,0 +1,487 @@
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#          Denis Engemann <denis.engemann at gmail.com>
+#          Teon Brooks <teon.brooks at gmail.com>
+#
+# License: BSD (3-clause)
+
+import copy
+import warnings
+import os
+import os.path as op
+
+import numpy as np
+
+from ..constants import FIFF
+from ..open import fiff_open, _fiff_get_fid, _get_next_fname
+from ..meas_info import read_meas_info
+from ..tree import dir_tree_find
+from ..tag import read_tag, read_tag_info
+from ..proj import make_eeg_average_ref_proj, _needs_eeg_average_ref_proj
+from ..compensator import get_current_comp, set_current_comp, make_compensator
+from ..base import _BaseRaw, _RawShell, _check_raw_compatibility
+
+from ...utils import check_fname, logger, verbose
+
+
+class RawFIF(_BaseRaw):
+    """Raw data
+
+    Parameters
+    ----------
+    fnames : list, or string
+        A list of the raw files to treat as a Raw instance, or a single
+        raw file. For files that have automatically been split, only the
+        name of the first file has to be specified. Filenames should end
+        with raw.fif, raw.fif.gz, raw_sss.fif, raw_sss.fif.gz,
+        raw_tsss.fif or raw_tsss.fif.gz.
+    allow_maxshield : bool, (default False)
+        allow_maxshield if True, allow loading of data that has been
+        processed with Maxshield. Maxshield-processed data should generally
+        not be loaded directly, but should be processed using SSS first.
+    preload : bool or str (default False)
+        Preload data into memory for data manipulation and faster indexing.
+        If True, the data will be preloaded into memory (fast, requires
+        large amount of memory). If preload is a string, preload is the
+        file name of a memory-mapped file which is used to store the data
+        on the hard drive (slower, requires less memory).
+    proj : bool
+        Apply the signal space projection (SSP) operators present in
+        the file to the data. Note: Once the projectors have been
+        applied, they can no longer be removed. It is usually not
+        recommended to apply the projectors at this point as they are
+        applied automatically later on (e.g. when computing inverse
+        solutions).
+    compensation : None | int
+        If None the compensation in the data is not modified.
+        If set to n, e.g. 3, apply gradient compensation of grade n as
+        for CTF systems.
+    add_eeg_ref : bool
+        If True, add average EEG reference projector (if it's not already
+        present).
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Attributes
+    ----------
+    info : dict
+        Measurement info.
+    ch_names : list of string
+        List of channels' names.
+    n_times : int
+        Total number of time points in the raw file.
+    preload : bool
+        Indicates whether raw data are in memory.
+    verbose : bool, str, int, or None
+        See above.
+    """
+    @verbose
+    def __init__(self, fnames, allow_maxshield=False, preload=False,
+                 proj=False, compensation=None, add_eeg_ref=True,
+                 verbose=None):
+
+        if not isinstance(fnames, list):
+            fnames = [fnames]
+        fnames = [op.realpath(f) for f in fnames]
+        split_fnames = []
+
+        raws = []
+        for ii, fname in enumerate(fnames):
+            do_check_fname = fname not in split_fnames
+            raw, next_fname = self._read_raw_file(fname, allow_maxshield,
+                                                  preload, compensation,
+                                                  do_check_fname)
+            raws.append(raw)
+            if next_fname is not None:
+                if not op.exists(next_fname):
+                    logger.warning('Split raw file detected but next file %s '
+                                   'does not exist.' % next_fname)
+                    continue
+                if next_fname in fnames:
+                    # the user manually specified the split files
+                    logger.info('Note: %s is part of a split raw file. It is '
+                                'not necessary to manually specify the parts '
+                                'in this case; simply construct Raw using '
+                                'the name of the first file.' % next_fname)
+                    continue
+
+                # process this file next
+                fnames.insert(ii + 1, next_fname)
+                split_fnames.append(next_fname)
+
+        _check_raw_compatibility(raws)
+
+        super(RawFIF, self).__init__(
+            copy.deepcopy(raws[0].info), False,
+            [r.first_samp for r in raws], [r.last_samp for r in raws],
+            [r.filename for r in raws], [r._raw_extras for r in raws],
+            copy.deepcopy(raws[0].comp), raws[0]._orig_comp_grade,
+            raws[0].orig_format, None, verbose=verbose)
+
+        # combine information from each raw file to construct self
+        if add_eeg_ref and _needs_eeg_average_ref_proj(self.info):
+            eeg_ref = make_eeg_average_ref_proj(self.info, activate=False)
+            self.add_proj(eeg_ref)
+
+        if preload:
+            self._preload_data(preload)
+        else:
+            self.preload = False
+
+        # setup the SSP projector
+        if proj:
+            self.apply_proj()
+
+    @verbose
+    def _read_raw_file(self, fname, allow_maxshield, preload, compensation,
+                       do_check_fname=True, verbose=None):
+        """Read in header information from a raw file"""
+        logger.info('Opening raw data file %s...' % fname)
+
+        if do_check_fname:
+            check_fname(fname, 'raw', ('raw.fif', 'raw_sss.fif',
+                                       'raw_tsss.fif', 'raw.fif.gz',
+                                       'raw_sss.fif.gz', 'raw_tsss.fif.gz'))
+
+        #   Read in the whole file if preload is on and .fif.gz (saves time)
+        ext = os.path.splitext(fname)[1].lower()
+        whole_file = preload if '.gz' in ext else False
+        ff, tree, _ = fiff_open(fname, preload=whole_file)
+        with ff as fid:
+            #   Read the measurement info
+
+            info, meas = read_meas_info(fid, tree)
+
+            #   Locate the data of interest
+            raw_node = dir_tree_find(meas, FIFF.FIFFB_RAW_DATA)
+            if len(raw_node) == 0:
+                raw_node = dir_tree_find(meas, FIFF.FIFFB_CONTINUOUS_DATA)
+                if (len(raw_node) == 0):
+                    raw_node = dir_tree_find(meas, FIFF.FIFFB_SMSH_RAW_DATA)
+                    msg = ('This file contains raw Internal Active '
+                           'Shielding data. It may be distorted. Elekta '
+                           'recommends it be run through MaxFilter to '
+                           'produce reliable results. Consider closing '
+                           'the file and running MaxFilter on the data.')
+                    if (len(raw_node) == 0):
+                        raise ValueError('No raw data in %s' % fname)
+                    elif allow_maxshield:
+                        info['maxshield'] = True
+                        warnings.warn(msg)
+                    else:
+                        msg += (' Use allow_maxshield=True if you are sure you'
+                                ' want to load the data despite this warning.')
+                        raise ValueError(msg)
+
+            if len(raw_node) == 1:
+                raw_node = raw_node[0]
+
+            #   Set up the output structure
+            info['filename'] = fname
+
+            #   Process the directory
+            directory = raw_node['directory']
+            nent = raw_node['nent']
+            nchan = int(info['nchan'])
+            first = 0
+            first_samp = 0
+            first_skip = 0
+
+            #   Get first sample tag if it is there
+            if directory[first].kind == FIFF.FIFF_FIRST_SAMPLE:
+                tag = read_tag(fid, directory[first].pos)
+                first_samp = int(tag.data)
+                first += 1
+
+            #   Omit initial skip
+            if directory[first].kind == FIFF.FIFF_DATA_SKIP:
+                # This first skip can be applied only after we know the bufsize
+                tag = read_tag(fid, directory[first].pos)
+                first_skip = int(tag.data)
+                first += 1
+
+            raw = _RawShell()
+            raw.filename = fname
+            raw.first_samp = first_samp
+
+            #   Go through the remaining tags in the directory
+            raw_extras = list()
+            nskip = 0
+            orig_format = None
+            for k in range(first, nent):
+                ent = directory[k]
+                if ent.kind == FIFF.FIFF_DATA_SKIP:
+                    tag = read_tag(fid, ent.pos)
+                    nskip = int(tag.data)
+                elif ent.kind == FIFF.FIFF_DATA_BUFFER:
+                    #   Figure out the number of samples in this buffer
+                    if ent.type == FIFF.FIFFT_DAU_PACK16:
+                        nsamp = ent.size // (2 * nchan)
+                    elif ent.type == FIFF.FIFFT_SHORT:
+                        nsamp = ent.size // (2 * nchan)
+                    elif ent.type == FIFF.FIFFT_FLOAT:
+                        nsamp = ent.size // (4 * nchan)
+                    elif ent.type == FIFF.FIFFT_DOUBLE:
+                        nsamp = ent.size // (8 * nchan)
+                    elif ent.type == FIFF.FIFFT_INT:
+                        nsamp = ent.size // (4 * nchan)
+                    elif ent.type == FIFF.FIFFT_COMPLEX_FLOAT:
+                        nsamp = ent.size // (8 * nchan)
+                    elif ent.type == FIFF.FIFFT_COMPLEX_DOUBLE:
+                        nsamp = ent.size // (16 * nchan)
+                    else:
+                        raise ValueError('Cannot handle data buffers of type '
+                                         '%d' % ent.type)
+                    if orig_format is None:
+                        if ent.type == FIFF.FIFFT_DAU_PACK16:
+                            orig_format = 'short'
+                        elif ent.type == FIFF.FIFFT_SHORT:
+                            orig_format = 'short'
+                        elif ent.type == FIFF.FIFFT_FLOAT:
+                            orig_format = 'single'
+                        elif ent.type == FIFF.FIFFT_DOUBLE:
+                            orig_format = 'double'
+                        elif ent.type == FIFF.FIFFT_INT:
+                            orig_format = 'int'
+                        elif ent.type == FIFF.FIFFT_COMPLEX_FLOAT:
+                            orig_format = 'single'
+                        elif ent.type == FIFF.FIFFT_COMPLEX_DOUBLE:
+                            orig_format = 'double'
+
+                    #  Do we have an initial skip pending?
+                    if first_skip > 0:
+                        first_samp += nsamp * first_skip
+                        raw.first_samp = first_samp
+                        first_skip = 0
+
+                    #  Do we have a skip pending?
+                    if nskip > 0:
+                        raw_extras.append(dict(
+                            ent=None, first=first_samp, nsamp=nskip * nsamp,
+                            last=first_samp + nskip * nsamp - 1))
+                        first_samp += nskip * nsamp
+                        nskip = 0
+
+                    #  Add a data buffer
+                    raw_extras.append(dict(ent=ent, first=first_samp,
+                                           last=first_samp + nsamp - 1,
+                                           nsamp=nsamp))
+                    first_samp += nsamp
+
+            next_fname = _get_next_fname(fid, fname, tree)
+
+        raw.last_samp = first_samp - 1
+        raw.orig_format = orig_format
+
+        #   Add the calibration factors
+        cals = np.zeros(info['nchan'])
+        for k in range(info['nchan']):
+            cals[k] = info['chs'][k]['range'] * info['chs'][k]['cal']
+
+        raw._cals = cals
+        raw._raw_extras = raw_extras
+        raw.comp = None
+        raw._orig_comp_grade = None
+
+        #   Set up the CTF compensator
+        current_comp = get_current_comp(info)
+        if current_comp is not None:
+            logger.info('Current compensation grade : %d' % current_comp)
+
+        if compensation is not None:
+            raw.comp = make_compensator(info, current_comp, compensation)
+            if raw.comp is not None:
+                logger.info('Appropriate compensator added to change to '
+                            'grade %d.' % (compensation))
+                raw._orig_comp_grade = current_comp
+                set_current_comp(info, compensation)
+
+        logger.info('    Range : %d ... %d =  %9.3f ... %9.3f secs' % (
+                    raw.first_samp, raw.last_samp,
+                    float(raw.first_samp) / info['sfreq'],
+                    float(raw.last_samp) / info['sfreq']))
+
+        # store the original buffer size
+        info['buffer_size_sec'] = (np.median([r['nsamp']
+                                              for r in raw_extras]) /
+                                   info['sfreq'])
+
+        raw.info = info
+        raw.verbose = verbose
+
+        logger.info('Ready.')
+
+        return raw, next_fname
+
+    @property
+    def _dtype(self):
+        """Get the dtype to use to store data from disk"""
+        if self._dtype_ is not None:
+            return self._dtype_
+        dtype = None
+        for raw_extra, filename in zip(self._raw_extras, self._filenames):
+            for this in raw_extra:
+                if this['ent'] is not None:
+                    with _fiff_get_fid(filename) as fid:
+                        fid.seek(this['ent'].pos, 0)
+                        tag = read_tag_info(fid)
+                        if tag is not None:
+                            if tag.type in (FIFF.FIFFT_COMPLEX_FLOAT,
+                                            FIFF.FIFFT_COMPLEX_DOUBLE):
+                                dtype = np.complex128
+                            else:
+                                dtype = np.float64
+                    if dtype is not None:
+                        break
+            if dtype is not None:
+                break
+        if dtype is None:
+            raise RuntimeError('bug in reading')
+        self._dtype_ = dtype
+        return dtype
+
+    def _read_segment_file(self, data, idx, offset, fi, start, stop,
+                           cals, mult):
+        """Read a segment of data from a file"""
+        with _fiff_get_fid(self._filenames[fi]) as fid:
+            for this in self._raw_extras[fi]:
+                #  Do we need this buffer
+                if this['last'] >= start:
+                    #  The picking logic is a bit complicated
+                    if stop > this['last'] and start < this['first']:
+                        #    We need the whole buffer
+                        first_pick = 0
+                        last_pick = this['nsamp']
+                        logger.debug('W')
+
+                    elif start >= this['first']:
+                        first_pick = start - this['first']
+                        if stop <= this['last']:
+                            #   Something from the middle
+                            last_pick = this['nsamp'] + stop - this['last']
+                            logger.debug('M')
+                        else:
+                            #   From the middle to the end
+                            last_pick = this['nsamp']
+                            logger.debug('E')
+                    else:
+                        #    From the beginning to the middle
+                        first_pick = 0
+                        last_pick = stop - this['first'] + 1
+                        logger.debug('B')
+
+                    #   Now we are ready to pick
+                    picksamp = last_pick - first_pick
+                    if picksamp > 0:
+                        # only read data if it exists
+                        if this['ent'] is not None:
+                            one = read_tag(fid, this['ent'].pos,
+                                           shape=(this['nsamp'],
+                                                  self.info['nchan']),
+                                           rlims=(first_pick, last_pick)).data
+                            one.shape = (picksamp, self.info['nchan'])
+                            one = one.T.astype(data.dtype)
+                            data_view = data[:, offset:(offset + picksamp)]
+                            if mult is not None:
+                                data_view[:] = np.dot(mult[fi], one)
+                            else:  # cals is not None
+                                if isinstance(idx, slice):
+                                    data_view[:] = one[idx]
+                                else:
+                                    # faster to iterate than doing
+                                    # one = one[idx]
+                                    for ii, ix in enumerate(idx):
+                                        data_view[ii] = one[ix]
+                                data_view *= cals
+                        offset += picksamp
+
+                #   Done?
+                if this['last'] >= stop:
+                    break
+
+    def fix_mag_coil_types(self):
+        """Fix Elekta magnetometer coil types
+
+        Returns
+        -------
+        raw : instance of Raw
+            The raw object. Operates in place.
+
+        Notes
+        -----
+        This function changes magnetometer coil types 3022 (T1: SQ20483N) and
+        3023 (T2: SQ20483-A) to 3024 (T3: SQ20950N) in the channel definition
+        records in the info structure.
+
+        Neuromag Vectorview systems can contain magnetometers with two
+        different coil sizes (3022 and 3023 vs. 3024). The systems
+        incorporating coils of type 3024 were introduced last and are used at
+        the majority of MEG sites. At some sites with 3024 magnetometers,
+        the data files have still defined the magnetometers to be of type
+        3022 to ensure compatibility with older versions of Neuromag software.
+        In the MNE software as well as in the present version of Neuromag
+        software coil type 3024 is fully supported. Therefore, it is now safe
+        to upgrade the data files to use the true coil type.
+
+        .. note:: The effect of the difference between the coil sizes on the
+                  current estimates computed by the MNE software is very small.
+                  Therefore the use of mne_fix_mag_coil_types is not mandatory.
+        """
+        from ...channels import fix_mag_coil_types
+        fix_mag_coil_types(self.info)
+        return self
+
+
+def read_raw_fif(fnames, allow_maxshield=False, preload=False,
+                 proj=False, compensation=None, add_eeg_ref=True,
+                 verbose=None):
+    """Reader function for Raw FIF data
+
+    Parameters
+    ----------
+    fnames : list, or string
+        A list of the raw files to treat as a Raw instance, or a single
+        raw file. For files that have automatically been split, only the
+        name of the first file has to be specified. Filenames should end
+        with raw.fif, raw.fif.gz, raw_sss.fif, raw_sss.fif.gz,
+        raw_tsss.fif or raw_tsss.fif.gz.
+    allow_maxshield : bool, (default False)
+        allow_maxshield if True, allow loading of data that has been
+        processed with Maxshield. Maxshield-processed data should generally
+        not be loaded directly, but should be processed using SSS first.
+    preload : bool or str (default False)
+        Preload data into memory for data manipulation and faster indexing.
+        If True, the data will be preloaded into memory (fast, requires
+        large amount of memory). If preload is a string, preload is the
+        file name of a memory-mapped file which is used to store the data
+        on the hard drive (slower, requires less memory).
+    proj : bool
+        Apply the signal space projection (SSP) operators present in
+        the file to the data. Note: Once the projectors have been
+        applied, they can no longer be removed. It is usually not
+        recommended to apply the projectors at this point as they are
+        applied automatically later on (e.g. when computing inverse
+        solutions).
+    compensation : None | int
+        If None the compensation in the data is not modified.
+        If set to n, e.g. 3, apply gradient compensation of grade n as
+        for CTF systems.
+    add_eeg_ref : bool
+        If True, add average EEG reference projector (if it's not already
+        present).
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    raw : Instance of RawFIF
+        A Raw object containing FIF data.
+
+    Notes
+    -----
+    .. versionadded:: 0.9.0
+    """
+    return RawFIF(fnames=fnames, allow_maxshield=allow_maxshield,
+                  preload=preload, proj=proj, compensation=compensation,
+                  add_eeg_ref=add_eeg_ref, verbose=verbose)
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/fiff/tests/__init__.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/fiff/tests/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/fiff/tests/test_raw.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/fiff/tests/test_raw.py
new file mode 100644
index 0000000..e3f561e
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/fiff/tests/test_raw.py
@@ -0,0 +1,1188 @@
+from __future__ import print_function
+
+# Author: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#         Denis Engemann <denis.engemann at gmail.com>
+#
+# License: BSD (3-clause)
+
+import os
+import os.path as op
+import glob
+from copy import deepcopy
+import warnings
+import itertools as itt
+
+import numpy as np
+from numpy.testing import (assert_array_almost_equal, assert_array_equal,
+                           assert_allclose, assert_equal)
+from nose.tools import assert_true, assert_raises, assert_not_equal
+
+from mne.datasets import testing
+from mne.io.constants import FIFF
+from mne.io import Raw, RawArray, concatenate_raws, read_raw_fif
+from mne.io.tests.test_raw import _test_concat
+from mne import (concatenate_events, find_events, equalize_channels,
+                 compute_proj_raw, pick_types, pick_channels, create_info)
+from mne.utils import (_TempDir, requires_pandas, slow_test,
+                       requires_mne, run_subprocess, run_tests_if_main)
+from mne.externals.six.moves import zip, cPickle as pickle
+from mne.io.proc_history import _get_sss_rank
+from mne.io.pick import _picks_by_type
+
+warnings.simplefilter('always')  # enable b/c these tests throw warnings
+
+data_dir = op.join(testing.data_path(download=False), 'MEG', 'sample')
+fif_fname = op.join(data_dir, 'sample_audvis_trunc_raw.fif')
+
+base_dir = op.join(op.dirname(__file__), '..', '..', 'tests', 'data')
+test_fif_fname = op.join(base_dir, 'test_raw.fif')
+test_fif_gz_fname = op.join(base_dir, 'test_raw.fif.gz')
+ctf_fname = op.join(base_dir, 'test_ctf_raw.fif')
+ctf_comp_fname = op.join(base_dir, 'test_ctf_comp_raw.fif')
+fif_bad_marked_fname = op.join(base_dir, 'test_withbads_raw.fif')
+bad_file_works = op.join(base_dir, 'test_bads.txt')
+bad_file_wrong = op.join(base_dir, 'test_wrong_bads.txt')
+hp_fname = op.join(base_dir, 'test_chpi_raw_hp.txt')
+hp_fif_fname = op.join(base_dir, 'test_chpi_raw_sss.fif')
+
+
+def test_fix_types():
+    """Test fixing of channel types
+    """
+    for fname, change in ((hp_fif_fname, True), (test_fif_fname, False),
+                          (ctf_fname, False)):
+        raw = Raw(fname)
+        mag_picks = pick_types(raw.info, meg='mag')
+        other_picks = np.setdiff1d(np.arange(len(raw.ch_names)), mag_picks)
+        # we don't actually have any files suffering from this problem, so
+        # fake it
+        if change:
+            for ii in mag_picks:
+                raw.info['chs'][ii]['coil_type'] = FIFF.FIFFV_COIL_VV_MAG_T2
+        orig_types = np.array([ch['coil_type'] for ch in raw.info['chs']])
+        raw.fix_mag_coil_types()
+        new_types = np.array([ch['coil_type'] for ch in raw.info['chs']])
+        if not change:
+            assert_array_equal(orig_types, new_types)
+        else:
+            assert_array_equal(orig_types[other_picks], new_types[other_picks])
+            assert_true((orig_types[mag_picks] != new_types[mag_picks]).all())
+            assert_true((new_types[mag_picks] ==
+                         FIFF.FIFFV_COIL_VV_MAG_T3).all())
+
+
+def test_concat():
+    """Test RawFIF concatenation
+    """
+    # we trim the file to save lots of memory and some time
+    tempdir = _TempDir()
+    raw = read_raw_fif(test_fif_fname)
+    raw.crop(0, 2., copy=False)
+    test_name = op.join(tempdir, 'test_raw.fif')
+    raw.save(test_name)
+    # now run the standard test
+    _test_concat(read_raw_fif, test_name)
+
+
+ at testing.requires_testing_data
+def test_hash_raw():
+    """Test hashing raw objects
+    """
+    raw = read_raw_fif(fif_fname)
+    assert_raises(RuntimeError, raw.__hash__)
+    raw = Raw(fif_fname).crop(0, 0.5, False)
+    raw.load_data()
+    raw_2 = Raw(fif_fname).crop(0, 0.5, False)
+    raw_2.load_data()
+    assert_equal(hash(raw), hash(raw_2))
+    # do NOT use assert_equal here, failing output is terrible
+    assert_equal(pickle.dumps(raw), pickle.dumps(raw_2))
+
+    raw_2._data[0, 0] -= 1
+    assert_not_equal(hash(raw), hash(raw_2))
+
+
+ at testing.requires_testing_data
+def test_subject_info():
+    """Test reading subject information
+    """
+    tempdir = _TempDir()
+    raw = Raw(fif_fname).crop(0, 1, False)
+    assert_true(raw.info['subject_info'] is None)
+    # fake some subject data
+    keys = ['id', 'his_id', 'last_name', 'first_name', 'birthday', 'sex',
+            'hand']
+    vals = [1, 'foobar', 'bar', 'foo', (1901, 2, 3), 0, 1]
+    subject_info = dict()
+    for key, val in zip(keys, vals):
+        subject_info[key] = val
+    raw.info['subject_info'] = subject_info
+    out_fname = op.join(tempdir, 'test_subj_info_raw.fif')
+    raw.save(out_fname, overwrite=True)
+    raw_read = Raw(out_fname)
+    for key in keys:
+        assert_equal(subject_info[key], raw_read.info['subject_info'][key])
+    raw_read.anonymize()
+    assert_true(raw_read.info.get('subject_info') is None)
+    out_fname_anon = op.join(tempdir, 'test_subj_info_anon_raw.fif')
+    raw_read.save(out_fname_anon, overwrite=True)
+    raw_read = Raw(out_fname_anon)
+    assert_true(raw_read.info.get('subject_info') is None)
+
+
+ at testing.requires_testing_data
+def test_copy_append():
+    """Test raw copying and appending combinations
+    """
+    raw = Raw(fif_fname, preload=True).copy()
+    raw_full = Raw(fif_fname)
+    raw_full.append(raw)
+    data = raw_full[:, :][0]
+    assert_equal(data.shape[1], 2 * raw._data.shape[1])
+
+
+ at slow_test
+ at testing.requires_testing_data
+def test_rank_estimation():
+    """Test raw rank estimation
+    """
+    iter_tests = itt.product(
+        [fif_fname, hp_fif_fname],  # sss
+        ['norm', dict(mag=1e11, grad=1e9, eeg=1e5)]
+    )
+    for fname, scalings in iter_tests:
+        raw = Raw(fname)
+        (_, picks_meg), (_, picks_eeg) = _picks_by_type(raw.info,
+                                                        meg_combined=True)
+        n_meg = len(picks_meg)
+        n_eeg = len(picks_eeg)
+
+        raw = Raw(fname, preload=True)
+        if 'proc_history' not in raw.info:
+            expected_rank = n_meg + n_eeg
+        else:
+            mf = raw.info['proc_history'][0]['max_info']
+            expected_rank = _get_sss_rank(mf) + n_eeg
+        assert_array_equal(raw.estimate_rank(scalings=scalings), expected_rank)
+
+        assert_array_equal(raw.estimate_rank(picks=picks_eeg,
+                                             scalings=scalings),
+                           n_eeg)
+
+        raw = Raw(fname, preload=False)
+        if 'sss' in fname:
+            tstart, tstop = 0., 30.
+            raw.add_proj(compute_proj_raw(raw))
+            raw.apply_proj()
+        else:
+            tstart, tstop = 10., 20.
+
+        raw.apply_proj()
+        n_proj = len(raw.info['projs'])
+
+        assert_array_equal(raw.estimate_rank(tstart=tstart, tstop=tstop,
+                                             scalings=scalings),
+                           expected_rank - (1 if 'sss' in fname else n_proj))
+
+
+ at testing.requires_testing_data
+def test_output_formats():
+    """Test saving and loading raw data using multiple formats
+    """
+    tempdir = _TempDir()
+    formats = ['short', 'int', 'single', 'double']
+    tols = [1e-4, 1e-7, 1e-7, 1e-15]
+
+    # let's fake a raw file with different formats
+    raw = Raw(test_fif_fname).crop(0, 1, copy=False)
+
+    temp_file = op.join(tempdir, 'raw.fif')
+    for ii, (fmt, tol) in enumerate(zip(formats, tols)):
+        # Let's test the overwriting error throwing while we're at it
+        if ii > 0:
+            assert_raises(IOError, raw.save, temp_file, fmt=fmt)
+        raw.save(temp_file, fmt=fmt, overwrite=True)
+        raw2 = Raw(temp_file)
+        raw2_data = raw2[:, :][0]
+        assert_allclose(raw2_data, raw[:, :][0], rtol=tol, atol=1e-25)
+        assert_equal(raw2.orig_format, fmt)
+
+
+def _compare_combo(raw, new, times, n_times):
+    for ti in times:  # let's do a subset of points for speed
+        orig = raw[:, ti % n_times][0]
+        # these are almost_equals because of possible dtype differences
+        assert_allclose(orig, new[:, ti][0])
+
+
+ at slow_test
+ at testing.requires_testing_data
+def test_multiple_files():
+    """Test loading multiple files simultaneously
+    """
+    # split file
+    tempdir = _TempDir()
+    raw = Raw(fif_fname).crop(0, 10, False)
+    raw.load_data()
+    raw.load_data()  # test no operation
+    split_size = 3.  # in seconds
+    sfreq = raw.info['sfreq']
+    nsamp = (raw.last_samp - raw.first_samp)
+    tmins = np.round(np.arange(0., nsamp, split_size * sfreq))
+    tmaxs = np.concatenate((tmins[1:] - 1, [nsamp]))
+    tmaxs /= sfreq
+    tmins /= sfreq
+    assert_equal(raw.n_times, len(raw.times))
+
+    # going in reverse order so the last fname is the first file (need later)
+    raws = [None] * len(tmins)
+    for ri in range(len(tmins) - 1, -1, -1):
+        fname = op.join(tempdir, 'test_raw_split-%d_raw.fif' % ri)
+        raw.save(fname, tmin=tmins[ri], tmax=tmaxs[ri])
+        raws[ri] = Raw(fname)
+    events = [find_events(r, stim_channel='STI 014') for r in raws]
+    last_samps = [r.last_samp for r in raws]
+    first_samps = [r.first_samp for r in raws]
+
+    # test concatenation of split file
+    assert_raises(ValueError, concatenate_raws, raws, True, events[1:])
+    all_raw_1, events1 = concatenate_raws(raws, preload=False,
+                                          events_list=events)
+    assert_equal(raw.first_samp, all_raw_1.first_samp)
+    assert_equal(raw.last_samp, all_raw_1.last_samp)
+    assert_allclose(raw[:, :][0], all_raw_1[:, :][0])
+    raws[0] = Raw(fname)
+    all_raw_2 = concatenate_raws(raws, preload=True)
+    assert_allclose(raw[:, :][0], all_raw_2[:, :][0])
+
+    # test proper event treatment for split files
+    events2 = concatenate_events(events, first_samps, last_samps)
+    events3 = find_events(all_raw_2, stim_channel='STI 014')
+    assert_array_equal(events1, events2)
+    assert_array_equal(events1, events3)
+
+    # test various methods of combining files
+    raw = Raw(fif_fname, preload=True)
+    n_times = raw.n_times
+    # make sure that all our data match
+    times = list(range(0, 2 * n_times, 999))
+    # add potentially problematic points
+    times.extend([n_times - 1, n_times, 2 * n_times - 1])
+
+    raw_combo0 = Raw([fif_fname, fif_fname], preload=True)
+    _compare_combo(raw, raw_combo0, times, n_times)
+    raw_combo = Raw([fif_fname, fif_fname], preload=False)
+    _compare_combo(raw, raw_combo, times, n_times)
+    raw_combo = Raw([fif_fname, fif_fname], preload='memmap8.dat')
+    _compare_combo(raw, raw_combo, times, n_times)
+    assert_raises(ValueError, Raw, [fif_fname, ctf_fname])
+    assert_raises(ValueError, Raw, [fif_fname, fif_bad_marked_fname])
+    assert_equal(raw[:, :][0].shape[1] * 2, raw_combo0[:, :][0].shape[1])
+    assert_equal(raw_combo0[:, :][0].shape[1], raw_combo0.n_times)
+
+    # with all data preloaded, result should be preloaded
+    raw_combo = Raw(fif_fname, preload=True)
+    raw_combo.append(Raw(fif_fname, preload=True))
+    assert_true(raw_combo.preload is True)
+    assert_equal(raw_combo.n_times, raw_combo._data.shape[1])
+    _compare_combo(raw, raw_combo, times, n_times)
+
+    # with any data not preloaded, don't set result as preloaded
+    raw_combo = concatenate_raws([Raw(fif_fname, preload=True),
+                                  Raw(fif_fname, preload=False)])
+    assert_true(raw_combo.preload is False)
+    assert_array_equal(find_events(raw_combo, stim_channel='STI 014'),
+                       find_events(raw_combo0, stim_channel='STI 014'))
+    _compare_combo(raw, raw_combo, times, n_times)
+
+    # user should be able to force data to be preloaded upon concat
+    raw_combo = concatenate_raws([Raw(fif_fname, preload=False),
+                                  Raw(fif_fname, preload=True)],
+                                 preload=True)
+    assert_true(raw_combo.preload is True)
+    _compare_combo(raw, raw_combo, times, n_times)
+
+    raw_combo = concatenate_raws([Raw(fif_fname, preload=False),
+                                  Raw(fif_fname, preload=True)],
+                                 preload='memmap3.dat')
+    _compare_combo(raw, raw_combo, times, n_times)
+
+    raw_combo = concatenate_raws([Raw(fif_fname, preload=True),
+                                  Raw(fif_fname, preload=True)],
+                                 preload='memmap4.dat')
+    _compare_combo(raw, raw_combo, times, n_times)
+
+    raw_combo = concatenate_raws([Raw(fif_fname, preload=False),
+                                  Raw(fif_fname, preload=False)],
+                                 preload='memmap5.dat')
+    _compare_combo(raw, raw_combo, times, n_times)
+
+    # verify that combining raws with different projectors throws an exception
+    raw.add_proj([], remove_existing=True)
+    assert_raises(ValueError, raw.append, Raw(fif_fname, preload=True))
+
+    # now test event treatment for concatenated raw files
+    events = [find_events(raw, stim_channel='STI 014'),
+              find_events(raw, stim_channel='STI 014')]
+    last_samps = [raw.last_samp, raw.last_samp]
+    first_samps = [raw.first_samp, raw.first_samp]
+    events = concatenate_events(events, first_samps, last_samps)
+    events2 = find_events(raw_combo0, stim_channel='STI 014')
+    assert_array_equal(events, events2)
+
+    # check out the len method
+    assert_equal(len(raw), raw.n_times)
+    assert_equal(len(raw), raw.last_samp - raw.first_samp + 1)
+
+
+ at testing.requires_testing_data
+def test_split_files():
+    """Test writing and reading of split raw files
+    """
+    tempdir = _TempDir()
+    raw_1 = Raw(fif_fname, preload=True)
+    assert_allclose(raw_1.info['buffer_size_sec'], 10., atol=1e-2)  # samp rate
+    split_fname = op.join(tempdir, 'split_raw.fif')
+    raw_1.save(split_fname, buffer_size_sec=1.0, split_size='10MB')
+
+    raw_2 = Raw(split_fname)
+    assert_allclose(raw_2.info['buffer_size_sec'], 1., atol=1e-2)  # samp rate
+    data_1, times_1 = raw_1[:, :]
+    data_2, times_2 = raw_2[:, :]
+    assert_array_equal(data_1, data_2)
+    assert_array_equal(times_1, times_2)
+
+    # test the case where the silly user specifies the split files
+    fnames = [split_fname]
+    fnames.extend(sorted(glob.glob(op.join(tempdir, 'split_raw-*.fif'))))
+    with warnings.catch_warnings(record=True):
+        warnings.simplefilter('always')
+        raw_2 = Raw(fnames)
+    data_2, times_2 = raw_2[:, :]
+    assert_array_equal(data_1, data_2)
+    assert_array_equal(times_1, times_2)
+
+
+def test_load_bad_channels():
+    """Test reading/writing of bad channels
+    """
+    tempdir = _TempDir()
+    # Load correctly marked file (manually done in mne_process_raw)
+    raw_marked = Raw(fif_bad_marked_fname)
+    correct_bads = raw_marked.info['bads']
+    raw = Raw(test_fif_fname)
+    # Make sure it starts clean
+    assert_array_equal(raw.info['bads'], [])
+
+    # Test normal case
+    raw.load_bad_channels(bad_file_works)
+    # Write it out, read it in, and check
+    raw.save(op.join(tempdir, 'foo_raw.fif'))
+    raw_new = Raw(op.join(tempdir, 'foo_raw.fif'))
+    assert_equal(correct_bads, raw_new.info['bads'])
+    # Reset it
+    raw.info['bads'] = []
+
+    # Test bad case
+    assert_raises(ValueError, raw.load_bad_channels, bad_file_wrong)
+
+    # Test forcing the bad case
+    with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter('always')
+        raw.load_bad_channels(bad_file_wrong, force=True)
+        n_found = sum(['1 bad channel' in str(ww.message) for ww in w])
+        assert_equal(n_found, 1)  # there could be other irrelevant errors
+        # write it out, read it in, and check
+        raw.save(op.join(tempdir, 'foo_raw.fif'), overwrite=True)
+        raw_new = Raw(op.join(tempdir, 'foo_raw.fif'))
+        assert_equal(correct_bads, raw_new.info['bads'])
+
+    # Check that bad channels are cleared
+    raw.load_bad_channels(None)
+    raw.save(op.join(tempdir, 'foo_raw.fif'), overwrite=True)
+    raw_new = Raw(op.join(tempdir, 'foo_raw.fif'))
+    assert_equal([], raw_new.info['bads'])
+
+
+ at slow_test
+ at testing.requires_testing_data
+def test_io_raw():
+    """Test IO for raw data (Neuromag + CTF + gz)
+    """
+    tempdir = _TempDir()
+    # test unicode io
+    for chars in [b'\xc3\xa4\xc3\xb6\xc3\xa9', b'a']:
+        with Raw(fif_fname) as r:
+            assert_true('Raw' in repr(r))
+            desc1 = r.info['description'] = chars.decode('utf-8')
+            temp_file = op.join(tempdir, 'raw.fif')
+            r.save(temp_file, overwrite=True)
+            with Raw(temp_file) as r2:
+                desc2 = r2.info['description']
+            assert_equal(desc1, desc2)
+
+    # Let's construct a simple test for IO first
+    raw = Raw(fif_fname).crop(0, 3.5, False)
+    raw.load_data()
+    # put in some data that we know the values of
+    data = np.random.randn(raw._data.shape[0], raw._data.shape[1])
+    raw._data[:, :] = data
+    # save it somewhere
+    fname = op.join(tempdir, 'test_copy_raw.fif')
+    raw.save(fname, buffer_size_sec=1.0)
+    # read it in, make sure the whole thing matches
+    raw = Raw(fname)
+    assert_allclose(data, raw[:, :][0], rtol=1e-6, atol=1e-20)
+    # let's read portions across the 1-sec tag boundary, too
+    inds = raw.time_as_index([1.75, 2.25])
+    sl = slice(inds[0], inds[1])
+    assert_allclose(data[:, sl], raw[:, sl][0], rtol=1e-6, atol=1e-20)
+
+    # now let's do some real I/O
+    fnames_in = [fif_fname, test_fif_gz_fname, ctf_fname]
+    fnames_out = ['raw.fif', 'raw.fif.gz', 'raw.fif']
+    for fname_in, fname_out in zip(fnames_in, fnames_out):
+        fname_out = op.join(tempdir, fname_out)
+        raw = Raw(fname_in)
+
+        nchan = raw.info['nchan']
+        ch_names = raw.info['ch_names']
+        meg_channels_idx = [k for k in range(nchan)
+                            if ch_names[k][0] == 'M']
+        n_channels = 100
+        meg_channels_idx = meg_channels_idx[:n_channels]
+        start, stop = raw.time_as_index([0, 5])
+        data, times = raw[meg_channels_idx, start:(stop + 1)]
+        meg_ch_names = [ch_names[k] for k in meg_channels_idx]
+
+        # Set up pick list: MEG + STI 014 - bad channels
+        include = ['STI 014']
+        include += meg_ch_names
+        picks = pick_types(raw.info, meg=True, eeg=False, stim=True,
+                           misc=True, ref_meg=True, include=include,
+                           exclude='bads')
+
+        # Writing with drop_small_buffer True
+        raw.save(fname_out, picks, tmin=0, tmax=4, buffer_size_sec=3,
+                 drop_small_buffer=True, overwrite=True)
+        raw2 = Raw(fname_out)
+
+        sel = pick_channels(raw2.ch_names, meg_ch_names)
+        data2, times2 = raw2[sel, :]
+        assert_true(times2.max() <= 3)
+
+        # Writing
+        raw.save(fname_out, picks, tmin=0, tmax=5, overwrite=True)
+
+        if fname_in == fif_fname or fname_in == fif_fname + '.gz':
+            assert_equal(len(raw.info['dig']), 146)
+
+        raw2 = Raw(fname_out)
+
+        sel = pick_channels(raw2.ch_names, meg_ch_names)
+        data2, times2 = raw2[sel, :]
+
+        assert_allclose(data, data2, rtol=1e-6, atol=1e-20)
+        assert_allclose(times, times2)
+        assert_allclose(raw.info['sfreq'], raw2.info['sfreq'], rtol=1e-5)
+
+        # check transformations
+        for trans in ['dev_head_t', 'dev_ctf_t', 'ctf_head_t']:
+            if raw.info[trans] is None:
+                assert_true(raw2.info[trans] is None)
+            else:
+                assert_array_equal(raw.info[trans]['trans'],
+                                   raw2.info[trans]['trans'])
+
+                # check transformation 'from' and 'to'
+                if trans.startswith('dev'):
+                    from_id = FIFF.FIFFV_COORD_DEVICE
+                else:
+                    from_id = FIFF.FIFFV_MNE_COORD_CTF_HEAD
+                if trans[4:8] == 'head':
+                    to_id = FIFF.FIFFV_COORD_HEAD
+                else:
+                    to_id = FIFF.FIFFV_MNE_COORD_CTF_HEAD
+                for raw_ in [raw, raw2]:
+                    assert_equal(raw_.info[trans]['from'], from_id)
+                    assert_equal(raw_.info[trans]['to'], to_id)
+
+        if fname_in == fif_fname or fname_in == fif_fname + '.gz':
+            assert_allclose(raw.info['dig'][0]['r'], raw2.info['dig'][0]['r'])
+
+    # test warnings on bad filenames
+    with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter("always")
+        raw_badname = op.join(tempdir, 'test-bad-name.fif.gz')
+        raw.save(raw_badname)
+        Raw(raw_badname)
+    assert_true(len(w) > 0)  # len(w) should be 2 but Travis sometimes has more
+
+
+ at testing.requires_testing_data
+def test_io_complex():
+    """Test IO with complex data types
+    """
+    tempdir = _TempDir()
+    dtypes = [np.complex64, np.complex128]
+
+    raw = Raw(fif_fname, preload=True)
+    picks = np.arange(5)
+    start, stop = raw.time_as_index([0, 5])
+
+    data_orig, _ = raw[picks, start:stop]
+
+    for di, dtype in enumerate(dtypes):
+        imag_rand = np.array(1j * np.random.randn(data_orig.shape[0],
+                             data_orig.shape[1]), dtype)
+
+        raw_cp = raw.copy()
+        raw_cp._data = np.array(raw_cp._data, dtype)
+        raw_cp._data[picks, start:stop] += imag_rand
+        # this should throw an error because it's complex
+        with warnings.catch_warnings(record=True) as w:
+            warnings.simplefilter('always')
+            raw_cp.save(op.join(tempdir, 'raw.fif'), picks, tmin=0, tmax=5,
+                        overwrite=True)
+            # warning gets thrown on every instance b/c simplifilter('always')
+            assert_equal(len(w), 1)
+
+        raw2 = Raw(op.join(tempdir, 'raw.fif'))
+        raw2_data, _ = raw2[picks, :]
+        n_samp = raw2_data.shape[1]
+        assert_allclose(raw2_data[:, :n_samp], raw_cp._data[picks, :n_samp])
+        # with preloading
+        raw2 = Raw(op.join(tempdir, 'raw.fif'), preload=True)
+        raw2_data, _ = raw2[picks, :]
+        n_samp = raw2_data.shape[1]
+        assert_allclose(raw2_data[:, :n_samp], raw_cp._data[picks, :n_samp])
+
+
+ at testing.requires_testing_data
+def test_getitem():
+    """Test getitem/indexing of Raw
+    """
+    for preload in [False, True, 'memmap.dat']:
+        raw = Raw(fif_fname, preload=preload)
+        data, times = raw[0, :]
+        data1, times1 = raw[0]
+        assert_array_equal(data, data1)
+        assert_array_equal(times, times1)
+        data, times = raw[0:2, :]
+        data1, times1 = raw[0:2]
+        assert_array_equal(data, data1)
+        assert_array_equal(times, times1)
+        data1, times1 = raw[[0, 1]]
+        assert_array_equal(data, data1)
+        assert_array_equal(times, times1)
+
+
+ at testing.requires_testing_data
+def test_proj():
+    """Test SSP proj operations
+    """
+    tempdir = _TempDir()
+    for proj in [True, False]:
+        raw = Raw(fif_fname, preload=False, proj=proj)
+        assert_true(all(p['active'] == proj for p in raw.info['projs']))
+
+        data, times = raw[0:2, :]
+        data1, times1 = raw[0:2]
+        assert_array_equal(data, data1)
+        assert_array_equal(times, times1)
+
+        # test adding / deleting proj
+        if proj:
+            assert_raises(ValueError, raw.add_proj, [],
+                          {'remove_existing': True})
+            assert_raises(ValueError, raw.del_proj, 0)
+        else:
+            projs = deepcopy(raw.info['projs'])
+            n_proj = len(raw.info['projs'])
+            raw.del_proj(0)
+            assert_equal(len(raw.info['projs']), n_proj - 1)
+            raw.add_proj(projs, remove_existing=False)
+            assert_equal(len(raw.info['projs']), 2 * n_proj - 1)
+            raw.add_proj(projs, remove_existing=True)
+            assert_equal(len(raw.info['projs']), n_proj)
+
+    # test apply_proj() with and without preload
+    for preload in [True, False]:
+        raw = Raw(fif_fname, preload=preload, proj=False)
+        data, times = raw[:, 0:2]
+        raw.apply_proj()
+        data_proj_1 = np.dot(raw._projector, data)
+
+        # load the file again without proj
+        raw = Raw(fif_fname, preload=preload, proj=False)
+
+        # write the file with proj. activated, make sure proj has been applied
+        raw.save(op.join(tempdir, 'raw.fif'), proj=True, overwrite=True)
+        raw2 = Raw(op.join(tempdir, 'raw.fif'), proj=False)
+        data_proj_2, _ = raw2[:, 0:2]
+        assert_allclose(data_proj_1, data_proj_2)
+        assert_true(all(p['active'] for p in raw2.info['projs']))
+
+        # read orig file with proj. active
+        raw2 = Raw(fif_fname, preload=preload, proj=True)
+        data_proj_2, _ = raw2[:, 0:2]
+        assert_allclose(data_proj_1, data_proj_2)
+        assert_true(all(p['active'] for p in raw2.info['projs']))
+
+        # test that apply_proj works
+        raw.apply_proj()
+        data_proj_2, _ = raw[:, 0:2]
+        assert_allclose(data_proj_1, data_proj_2)
+        assert_allclose(data_proj_2, np.dot(raw._projector, data_proj_2))
+
+    tempdir = _TempDir()
+    out_fname = op.join(tempdir, 'test_raw.fif')
+    raw = read_raw_fif(test_fif_fname, preload=True).crop(0, 0.002, copy=False)
+    raw.pick_types(meg=False, eeg=True)
+    raw.info['projs'] = [raw.info['projs'][-1]]
+    raw._data.fill(0)
+    raw._data[-1] = 1.
+    raw.save(out_fname)
+    raw = read_raw_fif(out_fname, proj=True, preload=False)
+    assert_allclose(raw[:, :][0][:1], raw[0, :][0])
+
+
+ at testing.requires_testing_data
+def test_preload_modify():
+    """Test preloading and modifying data
+    """
+    tempdir = _TempDir()
+    for preload in [False, True, 'memmap.dat']:
+        raw = Raw(fif_fname, preload=preload)
+
+        nsamp = raw.last_samp - raw.first_samp + 1
+        picks = pick_types(raw.info, meg='grad', exclude='bads')
+
+        data = np.random.randn(len(picks), nsamp // 2)
+
+        try:
+            raw[picks, :nsamp // 2] = data
+        except RuntimeError as err:
+            if not preload:
+                continue
+            else:
+                raise err
+
+        tmp_fname = op.join(tempdir, 'raw.fif')
+        raw.save(tmp_fname, overwrite=True)
+
+        raw_new = Raw(tmp_fname)
+        data_new, _ = raw_new[picks, :nsamp / 2]
+
+        assert_allclose(data, data_new)
+
+
+ at slow_test
+ at testing.requires_testing_data
+def test_filter():
+    """Test filtering (FIR and IIR) and Raw.apply_function interface
+    """
+    raw = Raw(fif_fname).crop(0, 7, False)
+    raw.load_data()
+    sig_dec = 11
+    sig_dec_notch = 12
+    sig_dec_notch_fit = 12
+    picks_meg = pick_types(raw.info, meg=True, exclude='bads')
+    picks = picks_meg[:4]
+
+    raw_lp = raw.copy()
+    raw_lp.filter(0., 4.0 - 0.25, picks=picks, n_jobs=2)
+
+    raw_hp = raw.copy()
+    raw_hp.filter(8.0 + 0.25, None, picks=picks, n_jobs=2)
+
+    raw_bp = raw.copy()
+    raw_bp.filter(4.0 + 0.25, 8.0 - 0.25, picks=picks)
+
+    raw_bs = raw.copy()
+    raw_bs.filter(8.0 + 0.25, 4.0 - 0.25, picks=picks, n_jobs=2)
+
+    data, _ = raw[picks, :]
+
+    lp_data, _ = raw_lp[picks, :]
+    hp_data, _ = raw_hp[picks, :]
+    bp_data, _ = raw_bp[picks, :]
+    bs_data, _ = raw_bs[picks, :]
+
+    assert_array_almost_equal(data, lp_data + bp_data + hp_data, sig_dec)
+    assert_array_almost_equal(data, bp_data + bs_data, sig_dec)
+
+    raw_lp_iir = raw.copy()
+    raw_lp_iir.filter(0., 4.0, picks=picks, n_jobs=2, method='iir')
+    raw_hp_iir = raw.copy()
+    raw_hp_iir.filter(8.0, None, picks=picks, n_jobs=2, method='iir')
+    raw_bp_iir = raw.copy()
+    raw_bp_iir.filter(4.0, 8.0, picks=picks, method='iir')
+    lp_data_iir, _ = raw_lp_iir[picks, :]
+    hp_data_iir, _ = raw_hp_iir[picks, :]
+    bp_data_iir, _ = raw_bp_iir[picks, :]
+    summation = lp_data_iir + hp_data_iir + bp_data_iir
+    assert_array_almost_equal(data[:, 100:-100], summation[:, 100:-100],
+                              sig_dec)
+
+    # make sure we didn't touch other channels
+    data, _ = raw[picks_meg[4:], :]
+    bp_data, _ = raw_bp[picks_meg[4:], :]
+    assert_array_equal(data, bp_data)
+    bp_data_iir, _ = raw_bp_iir[picks_meg[4:], :]
+    assert_array_equal(data, bp_data_iir)
+
+    # do a very simple check on line filtering
+    raw_bs = raw.copy()
+    with warnings.catch_warnings(record=True):
+        warnings.simplefilter('always')
+        raw_bs.filter(60.0 + 0.5, 60.0 - 0.5, picks=picks, n_jobs=2)
+        data_bs, _ = raw_bs[picks, :]
+        raw_notch = raw.copy()
+        raw_notch.notch_filter(60.0, picks=picks, n_jobs=2, method='fft')
+    data_notch, _ = raw_notch[picks, :]
+    assert_array_almost_equal(data_bs, data_notch, sig_dec_notch)
+
+    # now use the sinusoidal fitting
+    raw_notch = raw.copy()
+    raw_notch.notch_filter(None, picks=picks, n_jobs=2, method='spectrum_fit')
+    data_notch, _ = raw_notch[picks, :]
+    data, _ = raw[picks, :]
+    assert_array_almost_equal(data, data_notch, sig_dec_notch_fit)
+
+
+ at testing.requires_testing_data
+def test_crop():
+    """Test cropping raw files
+    """
+    # split a concatenated file to test a difficult case
+    raw = Raw([fif_fname, fif_fname], preload=False)
+    split_size = 10.  # in seconds
+    sfreq = raw.info['sfreq']
+    nsamp = (raw.last_samp - raw.first_samp + 1)
+
+    # do an annoying case (off-by-one splitting)
+    tmins = np.r_[1., np.round(np.arange(0., nsamp - 1, split_size * sfreq))]
+    tmins = np.sort(tmins)
+    tmaxs = np.concatenate((tmins[1:] - 1, [nsamp - 1]))
+    tmaxs /= sfreq
+    tmins /= sfreq
+    raws = [None] * len(tmins)
+    for ri, (tmin, tmax) in enumerate(zip(tmins, tmaxs)):
+        raws[ri] = raw.crop(tmin, tmax, True)
+    all_raw_2 = concatenate_raws(raws, preload=False)
+    assert_equal(raw.first_samp, all_raw_2.first_samp)
+    assert_equal(raw.last_samp, all_raw_2.last_samp)
+    assert_array_equal(raw[:, :][0], all_raw_2[:, :][0])
+
+    tmins = np.round(np.arange(0., nsamp - 1, split_size * sfreq))
+    tmaxs = np.concatenate((tmins[1:] - 1, [nsamp - 1]))
+    tmaxs /= sfreq
+    tmins /= sfreq
+
+    # going in revere order so the last fname is the first file (need it later)
+    raws = [None] * len(tmins)
+    for ri, (tmin, tmax) in enumerate(zip(tmins, tmaxs)):
+        raws[ri] = raw.copy()
+        raws[ri].crop(tmin, tmax, False)
+    # test concatenation of split file
+    all_raw_1 = concatenate_raws(raws, preload=False)
+
+    all_raw_2 = raw.crop(0, None, True)
+    for ar in [all_raw_1, all_raw_2]:
+        assert_equal(raw.first_samp, ar.first_samp)
+        assert_equal(raw.last_samp, ar.last_samp)
+        assert_array_equal(raw[:, :][0], ar[:, :][0])
+
+
+ at testing.requires_testing_data
+def test_resample():
+    """Test resample (with I/O and multiple files)
+    """
+    tempdir = _TempDir()
+    raw = Raw(fif_fname).crop(0, 3, False)
+    raw.load_data()
+    raw_resamp = raw.copy()
+    sfreq = raw.info['sfreq']
+    # test parallel on upsample
+    raw_resamp.resample(sfreq * 2, n_jobs=2)
+    assert_equal(raw_resamp.n_times, len(raw_resamp.times))
+    raw_resamp.save(op.join(tempdir, 'raw_resamp-raw.fif'))
+    raw_resamp = Raw(op.join(tempdir, 'raw_resamp-raw.fif'), preload=True)
+    assert_equal(sfreq, raw_resamp.info['sfreq'] / 2)
+    assert_equal(raw.n_times, raw_resamp.n_times / 2)
+    assert_equal(raw_resamp._data.shape[1], raw_resamp.n_times)
+    assert_equal(raw._data.shape[0], raw_resamp._data.shape[0])
+    # test non-parallel on downsample
+    raw_resamp.resample(sfreq, n_jobs=1)
+    assert_equal(raw_resamp.info['sfreq'], sfreq)
+    assert_equal(raw._data.shape, raw_resamp._data.shape)
+    assert_equal(raw.first_samp, raw_resamp.first_samp)
+    assert_equal(raw.last_samp, raw.last_samp)
+    # upsampling then downsampling doubles resampling error, but this still
+    # works (hooray). Note that the stim channels had to be sub-sampled
+    # without filtering to be accurately preserved
+    # note we have to treat MEG and EEG+STIM channels differently (tols)
+    assert_allclose(raw._data[:306, 200:-200],
+                    raw_resamp._data[:306, 200:-200],
+                    rtol=1e-2, atol=1e-12)
+    assert_allclose(raw._data[306:, 200:-200],
+                    raw_resamp._data[306:, 200:-200],
+                    rtol=1e-2, atol=1e-7)
+
+    # now check multiple file support w/resampling, as order of operations
+    # (concat, resample) should not affect our data
+    raw1 = raw.copy()
+    raw2 = raw.copy()
+    raw3 = raw.copy()
+    raw4 = raw.copy()
+    raw1 = concatenate_raws([raw1, raw2])
+    raw1.resample(10.)
+    raw3.resample(10.)
+    raw4.resample(10.)
+    raw3 = concatenate_raws([raw3, raw4])
+    assert_array_equal(raw1._data, raw3._data)
+    assert_array_equal(raw1._first_samps, raw3._first_samps)
+    assert_array_equal(raw1._last_samps, raw3._last_samps)
+    assert_array_equal(raw1._raw_lengths, raw3._raw_lengths)
+    assert_equal(raw1.first_samp, raw3.first_samp)
+    assert_equal(raw1.last_samp, raw3.last_samp)
+    assert_equal(raw1.info['sfreq'], raw3.info['sfreq'])
+
+    # test resampling of stim channel
+
+    # basic decimation
+    stim = [1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0]
+    raw = RawArray([stim], create_info(1, len(stim), ['stim']))
+    assert_allclose(raw.resample(8.)._data,
+                    [[1, 1, 0, 0, 1, 1, 0, 0]])
+
+    # decimation of multiple stim channels
+    raw = RawArray(2 * [stim], create_info(2, len(stim), 2 * ['stim']))
+    assert_allclose(raw.resample(8.)._data,
+                    [[1, 1, 0, 0, 1, 1, 0, 0],
+                     [1, 1, 0, 0, 1, 1, 0, 0]])
+
+    # decimation that could potentially drop events if the decimation is
+    # done naively
+    stim = [0, 0, 0, 1, 1, 0, 0, 0]
+    raw = RawArray([stim], create_info(1, len(stim), ['stim']))
+    assert_allclose(raw.resample(4.)._data,
+                    [[0, 1, 1, 0]])
+
+    # two events are merged in this case (warning)
+    stim = [0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0]
+    raw = RawArray([stim], create_info(1, len(stim), ['stim']))
+    with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter('always')
+        raw.resample(8.)
+        assert_true(len(w) == 1)
+
+    # events are dropped in this case (warning)
+    stim = [0, 1, 1, 0, 0, 1, 1, 0]
+    raw = RawArray([stim], create_info(1, len(stim), ['stim']))
+    with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter('always')
+        raw.resample(4.)
+        assert_true(len(w) == 1)
+
+    # test resampling events: this should no longer give a warning
+    stim = [0, 1, 1, 0, 0, 1, 1, 0]
+    raw = RawArray([stim], create_info(1, len(stim), ['stim']))
+    events = find_events(raw)
+    raw, events = raw.resample(4., events=events)
+    assert_equal(events, np.array([[0, 0, 1], [2, 0, 1]]))
+
+    # test copy flag
+    stim = [1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0]
+    raw = RawArray([stim], create_info(1, len(stim), ['stim']))
+    raw_resampled = raw.resample(4., copy=True)
+    assert_true(raw_resampled is not raw)
+    raw_resampled = raw.resample(4., copy=False)
+    assert_true(raw_resampled is raw)
+
+    # resample should still work even when no stim channel is present
+    raw = RawArray(np.random.randn(1, 100), create_info(1, 100, ['eeg']))
+    raw.resample(10)
+    assert_true(len(raw) == 10)
+
+
+ at testing.requires_testing_data
+def test_hilbert():
+    """Test computation of analytic signal using hilbert
+    """
+    raw = Raw(fif_fname, preload=True)
+    picks_meg = pick_types(raw.info, meg=True, exclude='bads')
+    picks = picks_meg[:4]
+
+    raw_filt = raw.copy()
+    raw_filt.filter(10, 20)
+    raw_filt_2 = raw_filt.copy()
+
+    raw2 = raw.copy()
+    raw3 = raw.copy()
+    raw.apply_hilbert(picks)
+    raw2.apply_hilbert(picks, envelope=True, n_jobs=2)
+
+    # Test custom n_fft
+    raw_filt.apply_hilbert(picks)
+    raw_filt_2.apply_hilbert(picks, n_fft=raw_filt_2.n_times + 1000)
+    assert_equal(raw_filt._data.shape, raw_filt_2._data.shape)
+    assert_allclose(raw_filt._data[:, 50:-50], raw_filt_2._data[:, 50:-50],
+                    atol=1e-13, rtol=1e-2)
+    assert_raises(ValueError, raw3.apply_hilbert, picks,
+                  n_fft=raw3.n_times - 100)
+
+    env = np.abs(raw._data[picks, :])
+    assert_allclose(env, raw2._data[picks, :], rtol=1e-2, atol=1e-13)
+
+
+ at testing.requires_testing_data
+def test_raw_copy():
+    """Test Raw copy
+    """
+    raw = Raw(fif_fname, preload=True)
+    data, _ = raw[:, :]
+    copied = raw.copy()
+    copied_data, _ = copied[:, :]
+    assert_array_equal(data, copied_data)
+    assert_equal(sorted(raw.__dict__.keys()),
+                 sorted(copied.__dict__.keys()))
+
+    raw = Raw(fif_fname, preload=False)
+    data, _ = raw[:, :]
+    copied = raw.copy()
+    copied_data, _ = copied[:, :]
+    assert_array_equal(data, copied_data)
+    assert_equal(sorted(raw.__dict__.keys()),
+                 sorted(copied.__dict__.keys()))
+
+
+ at requires_pandas
+def test_to_data_frame():
+    """Test raw Pandas exporter"""
+    raw = Raw(test_fif_fname, preload=True)
+    _, times = raw[0, :10]
+    df = raw.to_data_frame()
+    assert_true((df.columns == raw.ch_names).all())
+    assert_array_equal(np.round(times * 1e3), df.index.values[:10])
+    df = raw.to_data_frame(index=None)
+    assert_true('time' in df.index.names)
+    assert_array_equal(df.values[:, 0], raw._data[0] * 1e13)
+    assert_array_equal(df.values[:, 2], raw._data[2] * 1e15)
+
+
+ at testing.requires_testing_data
+def test_raw_index_as_time():
+    """ Test index as time conversion"""
+    raw = Raw(fif_fname, preload=True)
+    t0 = raw.index_as_time([0], True)[0]
+    t1 = raw.index_as_time([100], False)[0]
+    t2 = raw.index_as_time([100], True)[0]
+    assert_equal(t2 - t1, t0)
+    # ensure we can go back and forth
+    t3 = raw.index_as_time(raw.time_as_index([0], True), True)
+    assert_array_almost_equal(t3, [0.0], 2)
+    t3 = raw.index_as_time(raw.time_as_index(raw.info['sfreq'], True), True)
+    assert_array_almost_equal(t3, [raw.info['sfreq']], 2)
+    t3 = raw.index_as_time(raw.time_as_index(raw.info['sfreq'], False), False)
+    assert_array_almost_equal(t3, [raw.info['sfreq']], 2)
+    i0 = raw.time_as_index(raw.index_as_time([0], True), True)
+    assert_equal(i0[0], 0)
+    i1 = raw.time_as_index(raw.index_as_time([100], True), True)
+    assert_equal(i1[0], 100)
+    # Have to add small amount of time because we truncate via int casting
+    i1 = raw.time_as_index(raw.index_as_time([100.0001], False), False)
+    assert_equal(i1[0], 100)
+
+
+def test_add_channels():
+    """Test raw splitting / re-appending channel types
+    """
+    raw = Raw(test_fif_fname).crop(0, 1).load_data()
+    raw_nopre = Raw(test_fif_fname, preload=False)
+    raw_eeg_meg = raw.pick_types(meg=True, eeg=True, copy=True)
+    raw_eeg = raw.pick_types(meg=False, eeg=True, copy=True)
+    raw_meg = raw.pick_types(meg=True, eeg=False, copy=True)
+    raw_stim = raw.pick_types(meg=False, eeg=False, stim=True, copy=True)
+    raw_new = raw_meg.add_channels([raw_eeg, raw_stim], copy=True)
+    assert_true(all(ch in raw_new.ch_names
+                    for ch in raw_stim.ch_names + raw_meg.ch_names))
+    raw_new = raw_meg.add_channels([raw_eeg], copy=True)
+
+    assert_true(ch in raw_new.ch_names for ch in raw.ch_names)
+    assert_array_equal(raw_new[:, :][0], raw_eeg_meg[:, :][0])
+    assert_array_equal(raw_new[:, :][1], raw[:, :][1])
+    assert_true(all(ch not in raw_new.ch_names for ch in raw_stim.ch_names))
+
+    # Now test errors
+    raw_badsf = raw_eeg.copy()
+    raw_badsf.info['sfreq'] = 3.1415927
+    raw_eeg = raw_eeg.crop(.5)
+
+    assert_raises(AssertionError, raw_meg.add_channels, [raw_nopre])
+    assert_raises(RuntimeError, raw_meg.add_channels, [raw_badsf])
+    assert_raises(AssertionError, raw_meg.add_channels, [raw_eeg])
+    assert_raises(ValueError, raw_meg.add_channels, [raw_meg])
+    assert_raises(AssertionError, raw_meg.add_channels, raw_badsf)
+
+
+ at testing.requires_testing_data
+def test_raw_time_as_index():
+    """ Test time as index conversion"""
+    raw = Raw(fif_fname, preload=True)
+    first_samp = raw.time_as_index([0], True)[0]
+    assert_equal(raw.first_samp, -first_samp)
+
+
+ at testing.requires_testing_data
+def test_save():
+    """ Test saving raw"""
+    tempdir = _TempDir()
+    raw = Raw(fif_fname, preload=False)
+    # can't write over file being read
+    assert_raises(ValueError, raw.save, fif_fname)
+    raw = Raw(fif_fname, preload=True)
+    # can't overwrite file without overwrite=True
+    assert_raises(IOError, raw.save, fif_fname)
+
+    # test abspath support
+    new_fname = op.join(op.abspath(op.curdir), 'break-raw.fif')
+    raw.save(op.join(tempdir, new_fname), overwrite=True)
+    new_raw = Raw(op.join(tempdir, new_fname), preload=False)
+    assert_raises(ValueError, new_raw.save, new_fname)
+    # make sure we can overwrite the file we loaded when preload=True
+    new_raw = Raw(op.join(tempdir, new_fname), preload=True)
+    new_raw.save(op.join(tempdir, new_fname), overwrite=True)
+    os.remove(new_fname)
+
+
+ at testing.requires_testing_data
+def test_with_statement():
+    """ Test with statement """
+    for preload in [True, False]:
+        with Raw(fif_fname, preload=preload) as raw_:
+            print(raw_)
+
+
+def test_compensation_raw():
+    """Test Raw compensation
+    """
+    tempdir = _TempDir()
+    raw1 = Raw(ctf_comp_fname, compensation=None)
+    assert_true(raw1.comp is None)
+    data1, times1 = raw1[:, :]
+    raw2 = Raw(ctf_comp_fname, compensation=3)
+    data2, times2 = raw2[:, :]
+    assert_true(raw2.comp is None)  # unchanged (data come with grade 3)
+    assert_array_equal(times1, times2)
+    assert_array_equal(data1, data2)
+    raw3 = Raw(ctf_comp_fname, compensation=1)
+    data3, times3 = raw3[:, :]
+    assert_true(raw3.comp is not None)
+    assert_array_equal(times1, times3)
+    # make sure it's different with a different compensation:
+    assert_true(np.mean(np.abs(data1 - data3)) > 1e-12)
+    assert_raises(ValueError, Raw, ctf_comp_fname, compensation=33)
+
+    # Try IO with compensation
+    temp_file = op.join(tempdir, 'raw.fif')
+
+    raw1.save(temp_file, overwrite=True)
+    raw4 = Raw(temp_file)
+    data4, times4 = raw4[:, :]
+    assert_array_equal(times1, times4)
+    assert_array_equal(data1, data4)
+
+    # Now save the file that has modified compensation
+    # and make sure we can the same data as input ie. compensation
+    # is undone
+    raw3.save(temp_file, overwrite=True)
+    raw5 = Raw(temp_file)
+    data5, times5 = raw5[:, :]
+    assert_array_equal(times1, times5)
+    assert_allclose(data1, data5, rtol=1e-12, atol=1e-22)
+
+
+ at requires_mne
+def test_compensation_raw_mne():
+    """Test Raw compensation by comparing with MNE
+    """
+    tempdir = _TempDir()
+
+    def compensate_mne(fname, grad):
+        tmp_fname = op.join(tempdir, 'mne_ctf_test_raw.fif')
+        cmd = ['mne_process_raw', '--raw', fname, '--save', tmp_fname,
+               '--grad', str(grad), '--projoff', '--filteroff']
+        run_subprocess(cmd)
+        return Raw(tmp_fname, preload=True)
+
+    for grad in [0, 2, 3]:
+        raw_py = Raw(ctf_comp_fname, preload=True, compensation=grad)
+        raw_c = compensate_mne(ctf_comp_fname, grad)
+        assert_allclose(raw_py._data, raw_c._data, rtol=1e-6, atol=1e-17)
+
+
+ at testing.requires_testing_data
+def test_drop_channels_mixin():
+    """Test channels-dropping functionality
+    """
+    raw = Raw(fif_fname, preload=True)
+    drop_ch = raw.ch_names[:3]
+    ch_names = raw.ch_names[3:]
+
+    ch_names_orig = raw.ch_names
+    dummy = raw.drop_channels(drop_ch, copy=True)
+    assert_equal(ch_names, dummy.ch_names)
+    assert_equal(ch_names_orig, raw.ch_names)
+    assert_equal(len(ch_names_orig), raw._data.shape[0])
+
+    raw.drop_channels(drop_ch)
+    assert_equal(ch_names, raw.ch_names)
+    assert_equal(len(ch_names), len(raw._cals))
+    assert_equal(len(ch_names), raw._data.shape[0])
+
+
+ at testing.requires_testing_data
+def test_pick_channels_mixin():
+    """Test channel-picking functionality
+    """
+    # preload is True
+
+    raw = Raw(fif_fname, preload=True)
+    ch_names = raw.ch_names[:3]
+
+    ch_names_orig = raw.ch_names
+    dummy = raw.pick_channels(ch_names, copy=True)  # copy is True
+    assert_equal(ch_names, dummy.ch_names)
+    assert_equal(ch_names_orig, raw.ch_names)
+    assert_equal(len(ch_names_orig), raw._data.shape[0])
+
+    raw.pick_channels(ch_names, copy=False)  # copy is False
+    assert_equal(ch_names, raw.ch_names)
+    assert_equal(len(ch_names), len(raw._cals))
+    assert_equal(len(ch_names), raw._data.shape[0])
+    assert_raises(ValueError, raw.pick_channels, ch_names[0])
+
+    raw = Raw(fif_fname, preload=False)
+    assert_raises(RuntimeError, raw.pick_channels, ch_names)
+    assert_raises(RuntimeError, raw.drop_channels, ch_names)
+
+
+ at testing.requires_testing_data
+def test_equalize_channels():
+    """Test equalization of channels
+    """
+    raw1 = Raw(fif_fname, preload=True)
+
+    raw2 = raw1.copy()
+    ch_names = raw1.ch_names[2:]
+    raw1.drop_channels(raw1.ch_names[:1])
+    raw2.drop_channels(raw2.ch_names[1:2])
+    my_comparison = [raw1, raw2]
+    equalize_channels(my_comparison)
+    for e in my_comparison:
+        assert_equal(ch_names, e.ch_names)
+
+
+run_tests_if_main()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/kit/__init__.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/kit/__init__.py
new file mode 100644
index 0000000..a3d74cc
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/kit/__init__.py
@@ -0,0 +1,8 @@
+"""KIT module for conversion to FIF"""
+
+# Author: Teon Brooks <teon.brooks at gmail.com>
+#
+# License: BSD (3-clause)
+
+from .kit import read_raw_kit, read_epochs_kit
+from .coreg import read_mrk
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/kit/constants.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/kit/constants.py
new file mode 100644
index 0000000..7941223
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/kit/constants.py
@@ -0,0 +1,99 @@
+"""KIT constants"""
+
+# Author: Teon Brooks <teon.brooks at gmail.com>
+#
+# License: BSD (3-clause)
+
+from ..constants import Bunch
+
+
+KIT = Bunch()
+
+# byte values
+KIT.SHORT = 2
+KIT.INT = 4
+KIT.DOUBLE = 8
+KIT.STRING = 128
+
+# pointer locations
+KIT.AMPLIFIER_INFO = 112
+KIT.BASIC_INFO = 16
+KIT.CHAN_SENS = 80
+KIT.RAW_OFFSET = 144
+KIT.AVE_OFFSET = 160
+KIT.SAMPLE_INFO = 128
+KIT.MRK_INFO = 192
+KIT.CHAN_LOC_OFFSET = 64
+
+# parameters
+KIT.VOLTAGE_RANGE = 5.
+KIT.CALIB_FACTOR = 1.0  # mne_manual p.272
+KIT.RANGE = 1.  # mne_manual p.272
+KIT.UNIT_MUL = 0  # default is 0 mne_manual p.273
+
+# gain: 0:x1, 1:x2, 2:x5, 3:x10, 4:x20, 5:x50, 6:x100, 7:x200
+KIT.GAINS = [1, 2, 5, 10, 20, 50, 100, 200]
+# BEF options: 0:THRU, 1:50Hz, 2:60Hz, 3:50Hz
+KIT.BEFS = [0, 50, 60, 50]
+
+# coreg constants
+KIT.DIG_POINTS = 10000
+
+# create system specific dicts
+KIT_NY = Bunch(**KIT)
+KIT_AD = Bunch(**KIT)
+
+# NYU-system channel information
+KIT_NY.NCHAN = 192
+KIT_NY.NMEGCHAN = 157
+KIT_NY.NREFCHAN = 3
+KIT_NY.NMISCCHAN = 32
+KIT_NY.N_SENS = KIT_NY.NMEGCHAN + KIT_NY.NREFCHAN
+# 12-bit A-to-D converter, one bit for signed integer. range +/- 2048
+KIT_NY.DYNAMIC_RANGE = 2 ** 12 / 2
+# amplifier information
+KIT_NY.GAIN1_BIT = 11  # stored in Bit 11-12
+KIT_NY.GAIN1_MASK = 2 ** 11 + 2 ** 12
+KIT_NY.GAIN2_BIT = 0  # stored in Bit 0-2
+KIT_NY.GAIN2_MASK = 2 ** 0 + 2 ** 1 + 2 ** 2  # (0x0007)
+KIT_NY.GAIN3_BIT = None
+KIT_NY.GAIN3_MASK = None
+KIT_NY.HPF_BIT = 4  # stored in Bit 4-5
+KIT_NY.HPF_MASK = 2 ** 4 + 2 ** 5
+KIT_NY.LPF_BIT = 8  # stored in Bit 8-10
+KIT_NY.LPF_MASK = 2 ** 8 + 2 ** 9 + 2 ** 10
+KIT_NY.BEF_BIT = 14  # stored in Bit 14-15
+KIT_NY.BEF_MASK = 2 ** 14 + 2 ** 15
+# HPF options: 0:0, 1:1, 2:3
+KIT_NY.HPFS = [0, 1, 3]
+# LPF options: 0:10Hz, 1:20Hz, 2:50Hz, 3:100Hz, 4:200Hz, 5:500Hz,
+#              6:1,000Hz, 7:2,000Hz
+KIT_NY.LPFS = [10, 20, 50, 100, 200, 500, 1000, 2000]
+
+
+# AD-system channel information
+KIT_AD.NCHAN = 256
+KIT_AD.NMEGCHAN = 208
+KIT_AD.NREFCHAN = 16
+KIT_AD.NMISCCHAN = 32
+KIT_AD.N_SENS = KIT_AD.NMEGCHAN + KIT_AD.NREFCHAN
+# 16-bit A-to-D converter, one bit for signed integer. range +/- 32768
+KIT_AD.DYNAMIC_RANGE = 2 ** 16 / 2
+# amplifier information
+KIT_AD.GAIN1_BIT = 12  # stored in Bit 12-14
+KIT_AD.GAIN1_MASK = 2 ** 12 + 2 ** 13 + 2 ** 14
+KIT_AD.GAIN2_BIT = 28  # stored in Bit 28-30
+KIT_AD.GAIN2_MASK = 2 ** 28 + 2 ** 29 + 2 ** 30
+KIT_AD.GAIN3_BIT = 24  # stored in Bit 24-26
+KIT_AD.GAIN3_MASK = 2 ** 24 + 2 ** 25 + 2 ** 26
+KIT_AD.HPF_BIT = 8  # stored in Bit 8-10
+KIT_AD.HPF_MASK = 2 ** 8 + 2 ** 9 + 2 ** 10
+KIT_AD.LPF_BIT = 16  # stored in Bit 16-18
+KIT_AD.LPF_MASK = 2 ** 16 + 2 ** 17 + 2 ** 18
+KIT_AD.BEF_BIT = 0  # stored in Bit 0-1
+KIT_AD.BEF_MASK = 2 ** 0 + 2 ** 1
+# HPF options: 0:0Hz, 1:0.03Hz, 2:0.1Hz, 3:0.3Hz, 4:1Hz, 5:3Hz, 6:10Hz, 7:30Hz
+KIT_AD.HPFS = [0, 0.03, 0.1, 0.3, 1, 3, 10, 30]
+# LPF options: 0:10Hz, 1:20Hz, 2:50Hz, 3:100Hz, 4:200Hz, 5:500Hz,
+#              6:1,000Hz, 7:10,000Hz
+KIT_AD.LPFS = [10, 20, 50, 100, 200, 500, 1000, 10000]
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/kit/coreg.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/kit/coreg.py
new file mode 100644
index 0000000..48b56cd
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/kit/coreg.py
@@ -0,0 +1,87 @@
+"""Coordinate Point Extractor for KIT system"""
+
+# Author: Teon Brooks <teon.brooks at gmail.com>
+#
+# License: BSD (3-clause)
+
+from os import SEEK_CUR, path as op
+import re
+from struct import unpack
+import numpy as np
+from .constants import KIT
+from ..meas_info import _read_dig_points
+from ...externals.six.moves import cPickle as pickle
+
+
+def read_mrk(fname):
+    """Marker Point Extraction in MEG space directly from sqd
+
+    Parameters
+    ----------
+    fname : str
+        Absolute path to Marker file.
+        File formats allowed: \*.sqd, \*.mrk, \*.txt, \*.pickled.
+
+    Returns
+    -------
+    mrk_points : numpy.array, shape = (n_points, 3)
+        Marker points in MEG space [m].
+    """
+    ext = op.splitext(fname)[-1]
+    if ext in ('.sqd', '.mrk'):
+        with open(fname, 'rb', buffering=0) as fid:
+            fid.seek(KIT.MRK_INFO)
+            mrk_offset = unpack('i', fid.read(KIT.INT))[0]
+            fid.seek(mrk_offset)
+            # skips match_done, meg_to_mri and mri_to_meg
+            fid.seek(KIT.INT + (2 * KIT.DOUBLE * 4 ** 2), SEEK_CUR)
+            mrk_count = unpack('i', fid.read(KIT.INT))[0]
+            pts = []
+            for _ in range(mrk_count):
+                # skips mri/meg mrk_type and done, mri_marker
+                fid.seek(KIT.INT * 4 + (KIT.DOUBLE * 3), SEEK_CUR)
+                pts.append(np.fromfile(fid, dtype='d', count=3))
+                mrk_points = np.array(pts)
+    elif ext == '.txt':
+        mrk_points = _read_dig_points(fname)
+    elif ext == '.pickled':
+        with open(fname, 'rb') as fid:
+            food = pickle.load(fid)
+        try:
+            mrk_points = food['mrk']
+        except:
+            err = ("%r does not contain marker points." % fname)
+            raise ValueError(err)
+    else:
+        err = ('KIT marker file must be *.sqd, *.txt or *.pickled, '
+               'not *%s.' % ext)
+        raise ValueError(err)
+
+    # check output
+    mrk_points = np.asarray(mrk_points)
+    if mrk_points.shape != (5, 3):
+        err = ("%r is no marker file, shape is "
+               "%s" % (fname, mrk_points.shape))
+        raise ValueError(err)
+    return mrk_points
+
+
+def read_sns(fname):
+    """Sensor coordinate extraction in MEG space
+
+    Parameters
+    ----------
+    fname : str
+        Absolute path to sensor definition file.
+
+    Returns
+    -------
+    locs : numpy.array, shape = (n_points, 3)
+        Sensor coil location.
+    """
+    p = re.compile(r'\d,[A-Za-z]*,([\.\-0-9]+),' +
+                   r'([\.\-0-9]+),([\.\-0-9]+),' +
+                   r'([\.\-0-9]+),([\.\-0-9]+)')
+    with open(fname) as fid:
+        locs = np.array(p.findall(fid.read()), dtype=float)
+    return locs
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/kit/kit.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/kit/kit.py
new file mode 100644
index 0000000..df0eb35
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/kit/kit.py
@@ -0,0 +1,827 @@
+"""Conversion tool from SQD to FIF
+
+RawKIT class is adapted from Denis Engemann et al.'s mne_bti2fiff.py
+
+"""
+
+# Author: Teon Brooks <teon.brooks at gmail.com>
+#
+# License: BSD (3-clause)
+
+from os import SEEK_CUR, path as op
+from struct import unpack
+import time
+
+import numpy as np
+from scipy import linalg
+
+from ..pick import pick_types
+from ...coreg import fit_matched_points, _decimate_points
+from ...utils import verbose, logger
+from ...transforms import (apply_trans, als_ras_trans, als_ras_trans_mm,
+                           get_ras_to_neuromag_trans, Transform)
+from ..base import _BaseRaw
+from ...epochs import _BaseEpochs
+from ..constants import FIFF
+from ..meas_info import _empty_info, _read_dig_points, _make_dig_points
+from .constants import KIT, KIT_NY, KIT_AD
+from .coreg import read_mrk
+from ...externals.six import string_types
+from ...event import read_events
+
+
+class RawKIT(_BaseRaw):
+    """Raw object from KIT SQD file adapted from bti/raw.py
+
+    Parameters
+    ----------
+    input_fname : str
+        Path to the sqd file.
+    mrk : None | str | array_like, shape = (5, 3) | list of str or array_like
+        Marker points representing the location of the marker coils with
+        respect to the MEG Sensors, or path to a marker file.
+        If list, all of the markers will be averaged together.
+    elp : None | str | array_like, shape = (8, 3)
+        Digitizer points representing the location of the fiducials and the
+        marker coils with respect to the digitized head shape, or path to a
+        file containing these points.
+    hsp : None | str | array, shape = (n_points, 3)
+        Digitizer head shape points, or path to head shape file. If more than
+        10`000 points are in the head shape, they are automatically decimated.
+    stim : list of int | '<' | '>'
+        Channel-value correspondence when converting KIT trigger channels to a
+        Neuromag-style stim channel. For '<', the largest values are assigned
+        to the first channel (default). For '>', the largest values are
+        assigned to the last channel. Can also be specified as a list of
+        trigger channel indexes.
+    slope : '+' | '-'
+        How to interpret values on KIT trigger channels when synthesizing a
+        Neuromag-style stim channel. With '+', a positive slope (low-to-high)
+        is interpreted as an event. With '-', a negative slope (high-to-low)
+        is interpreted as an event.
+    stimthresh : float
+        The threshold level for accepting voltage changes in KIT trigger
+        channels as a trigger event. If None, stim must also be set to None.
+    preload : bool or str (default False)
+        Preload data into memory for data manipulation and faster indexing.
+        If True, the data will be preloaded into memory (fast, requires
+        large amount of memory). If preload is a string, preload is the
+        file name of a memory-mapped file which is used to store the data
+        on the hard drive (slower, requires less memory).
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Notes
+    -----
+    ``elp`` and ``hsp`` are usually the exported text files (*.txt) from the
+    Polhemus FastScan system. hsp refers to the headshape surface points. elp
+    refers to the points in head-space that corresponds to the HPI points.
+    Currently, '*.elp' and '*.hsp' files are NOT supported.
+
+    See Also
+    --------
+    mne.io.Raw : Documentation of attribute and methods.
+    """
+    @verbose
+    def __init__(self, input_fname, mrk=None, elp=None, hsp=None, stim='>',
+                 slope='-', stimthresh=1, preload=False, verbose=None):
+        logger.info('Extracting SQD Parameters from %s...' % input_fname)
+        input_fname = op.abspath(input_fname)
+        self.preload = False
+        logger.info('Creating Raw.info structure...')
+        info, kit_info = get_kit_info(input_fname)
+        kit_info['slope'] = slope
+        kit_info['stimthresh'] = stimthresh
+        if kit_info['acq_type'] != 1:
+            err = 'SQD file contains epochs, not raw data. Wrong reader.'
+            raise TypeError(err)
+        logger.info('Creating Info structure...')
+
+        last_samps = [kit_info['n_samples'] - 1]
+        self._raw_extras = [kit_info]
+        self._set_stimchannels(info, stim)
+        super(RawKIT, self).__init__(
+            info, preload, last_samps=last_samps, filenames=[input_fname],
+            raw_extras=self._raw_extras, verbose=verbose)
+
+        if isinstance(mrk, list):
+            mrk = [read_mrk(marker) if isinstance(marker, string_types)
+                   else marker for marker in mrk]
+            mrk = np.mean(mrk, axis=0)
+        if (mrk is not None and elp is not None and hsp is not None):
+            dig_points, dev_head_t = _set_dig_kit(mrk, elp, hsp)
+            self.info['dig'] = dig_points
+            self.info['dev_head_t'] = dev_head_t
+        elif (mrk is not None or elp is not None or hsp is not None):
+            raise ValueError('mrk, elp and hsp need to be provided as a group '
+                             '(all or none)')
+
+        logger.info('Ready.')
+
+    def read_stim_ch(self, buffer_size=1e5):
+        """Read events from data
+
+        Parameter
+        ---------
+        buffer_size : int
+            The size of chunk to by which the data are scanned.
+
+        Returns
+        -------
+        events : array, [samples]
+           The event vector (1 x samples).
+        """
+        buffer_size = int(buffer_size)
+        start = int(self.first_samp)
+        stop = int(self.last_samp + 1)
+
+        pick = pick_types(self.info, meg=False, ref_meg=False,
+                          stim=True, exclude=[])
+        stim_ch = np.empty((1, stop), dtype=np.int)
+        for b_start in range(start, stop, buffer_size):
+            b_stop = b_start + buffer_size
+            x = self[pick, b_start:b_stop][0]
+            stim_ch[:, b_start:b_start + x.shape[1]] = x
+
+        return stim_ch
+
+    def _set_stimchannels(self, info, stim='<'):
+        """Specify how the trigger channel is synthesized from analog channels.
+
+        Has to be done before loading data. For a RawKIT instance that has been
+        created with preload=True, this method will raise a
+        NotImplementedError.
+
+        Parameters
+        ----------
+        info : instance of MeasInfo
+            The measurement info.
+        stim : list of int | '<' | '>'
+            Can be submitted as list of trigger channels.
+            If a list is not specified, the default triggers extracted from
+            misc channels will be used with specified directionality.
+            '<' means that largest values assigned to the first channel
+            in sequence.
+            '>' means the largest trigger assigned to the last channel
+            in sequence.
+        """
+        if stim is not None:
+            if isinstance(stim, str):
+                picks = pick_types(info, meg=False, ref_meg=False,
+                                   misc=True, exclude=[])[:8]
+                if stim == '<':
+                    stim = picks[::-1]
+                elif stim == '>':
+                    stim = picks
+                else:
+                    raise ValueError("stim needs to be list of int, '>' or "
+                                     "'<', not %r" % str(stim))
+            elif np.max(stim) >= self._raw_extras[0]['nchan']:
+                raise ValueError('Tried to set stim channel %i, but sqd file '
+                                 'only has %i channels'
+                                 % (np.max(stim),
+                                    self._raw_extras[0]['nchan']))
+            # modify info
+            info['nchan'] = self._raw_extras[0]['nchan'] + 1
+            ch_name = 'STI 014'
+            chan_info = {}
+            chan_info['cal'] = KIT.CALIB_FACTOR
+            chan_info['logno'] = info['nchan']
+            chan_info['scanno'] = info['nchan']
+            chan_info['range'] = 1.0
+            chan_info['unit'] = FIFF.FIFF_UNIT_NONE
+            chan_info['unit_mul'] = 0
+            chan_info['ch_name'] = ch_name
+            chan_info['coil_type'] = FIFF.FIFFV_COIL_NONE
+            chan_info['loc'] = np.zeros(12)
+            chan_info['kind'] = FIFF.FIFFV_STIM_CH
+            info['chs'].append(chan_info)
+            info['ch_names'].append(ch_name)
+        if self.preload:
+            err = "Can't change stim channel after preloading data"
+            raise NotImplementedError(err)
+
+        self._raw_extras[0]['stim'] = stim
+
+    @verbose
+    def _read_segment_file(self, data, idx, offset, fi, start, stop,
+                           cals, mult):
+        """Read a chunk of raw data"""
+        # cals are all unity, so can be ignored
+
+        # RawFIF and RawEDF think of "stop" differently, easiest to increment
+        # here and refactor later
+        stop += 1
+        with open(self._filenames[fi], 'rb', buffering=0) as fid:
+            # extract data
+            data_offset = KIT.RAW_OFFSET
+            fid.seek(data_offset)
+            # data offset info
+            data_offset = unpack('i', fid.read(KIT.INT))[0]
+            nchan = self._raw_extras[fi]['nchan']
+            buffer_size = stop - start
+            count = buffer_size * nchan
+            pointer = start * nchan * KIT.SHORT
+            fid.seek(data_offset + pointer)
+            data_ = np.fromfile(fid, dtype='h', count=count)
+
+        # amplifier applies only to the sensor channels
+        data_.shape = (buffer_size, nchan)
+        n_sens = self._raw_extras[fi]['n_sens']
+        sensor_gain = self._raw_extras[fi]['sensor_gain'].copy()
+        sensor_gain[:n_sens] = (sensor_gain[:n_sens] /
+                                self._raw_extras[fi]['amp_gain'])
+        conv_factor = np.array((KIT.VOLTAGE_RANGE /
+                                self._raw_extras[fi]['DYNAMIC_RANGE']) *
+                               sensor_gain)
+        data_ = conv_factor[:, np.newaxis] * data_.T
+
+        # Create a synthetic channel
+        if self._raw_extras[fi]['stim'] is not None:
+            trig_chs = data_[self._raw_extras[fi]['stim'], :]
+            if self._raw_extras[fi]['slope'] == '+':
+                trig_chs = trig_chs > self._raw_extras[0]['stimthresh']
+            elif self._raw_extras[fi]['slope'] == '-':
+                trig_chs = trig_chs < self._raw_extras[0]['stimthresh']
+            else:
+                raise ValueError("slope needs to be '+' or '-'")
+            trig_vals = np.array(
+                2 ** np.arange(len(self._raw_extras[0]['stim'])), ndmin=2).T
+            trig_chs = trig_chs * trig_vals
+            stim_ch = np.array(trig_chs.sum(axis=0), ndmin=2)
+            data_ = np.vstack((data_, stim_ch))
+        data[:, offset:offset + (stop - start)] = \
+            np.dot(mult, data_) if mult is not None else data_[idx]
+
+
+class EpochsKIT(_BaseEpochs):
+    """Epochs Array object from KIT SQD file
+
+    Parameters
+    ----------
+    input_fname : str
+        Path to the sqd file.
+    events : str | array, shape (n_events, 3)
+        Path to events file. If array, it is the events typically returned
+        by the read_events function. If some events don't match the events
+        of interest as specified by event_id,they will be marked as 'IGNORED'
+        in the drop log.
+    event_id : int | list of int | dict | None
+        The id of the event to consider. If dict,
+        the keys can later be used to acces associated events. Example:
+        dict(auditory=1, visual=3). If int, a dict will be created with
+        the id as string. If a list, all events with the IDs specified
+        in the list are used. If None, all events will be used with
+        and a dict is created with string integer names corresponding
+        to the event id integers.
+    tmin : float
+        Start time before event.
+    baseline : None or tuple of length 2 (default (None, 0))
+        The time interval to apply baseline correction.
+        If None do not apply it. If baseline is (a, b)
+        the interval is between "a (s)" and "b (s)".
+        If a is None the beginning of the data is used
+        and if b is None then b is set to the end of the interval.
+        If baseline is equal to (None, None) all the time
+        interval is used.
+        The baseline (a, b) includes both endpoints, i.e. all
+        timepoints t such that a <= t <= b.
+    reject : dict | None
+        Rejection parameters based on peak-to-peak amplitude.
+        Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'.
+        If reject is None then no rejection is done. Example::
+
+            reject = dict(grad=4000e-13, # T / m (gradiometers)
+                          mag=4e-12, # T (magnetometers)
+                          eeg=40e-6, # uV (EEG channels)
+                          eog=250e-6 # uV (EOG channels)
+                          )
+    flat : dict | None
+        Rejection parameters based on flatness of signal.
+        Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg', and values
+        are floats that set the minimum acceptable peak-to-peak amplitude.
+        If flat is None then no rejection is done.
+    reject_tmin : scalar | None
+        Start of the time window used to reject epochs (with the default None,
+        the window will start with tmin).
+    reject_tmax : scalar | None
+        End of the time window used to reject epochs (with the default None,
+        the window will end with tmax).
+    mrk : None | str | array_like, shape = (5, 3) | list of str or array_like
+        Marker points representing the location of the marker coils with
+        respect to the MEG Sensors, or path to a marker file.
+        If list, all of the markers will be averaged together.
+    elp : None | str | array_like, shape = (8, 3)
+        Digitizer points representing the location of the fiducials and the
+        marker coils with respect to the digitized head shape, or path to a
+        file containing these points.
+    hsp : None | str | array, shape = (n_points, 3)
+        Digitizer head shape points, or path to head shape file. If more than
+        10`000 points are in the head shape, they are automatically decimated.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Notes
+    -----
+    ``elp`` and ``hsp`` are usually the exported text files (*.txt) from the
+    Polhemus FastScan system. hsp refers to the headshape surface points. elp
+    refers to the points in head-space that corresponds to the HPI points.
+    Currently, '*.elp' and '*.hsp' files are NOT supported.
+
+    See Also
+    --------
+    mne.Epochs : Documentation of attribute and methods.
+    """
+    @verbose
+    def __init__(self, input_fname, events, event_id=None, tmin=0,
+                 baseline=None,  reject=None, flat=None, reject_tmin=None,
+                 reject_tmax=None, mrk=None, elp=None, hsp=None, verbose=None):
+
+        if isinstance(events, string_types):
+            events = read_events(events)
+        if isinstance(mrk, list):
+            mrk = [read_mrk(marker) if isinstance(marker, string_types)
+                   else marker for marker in mrk]
+            mrk = np.mean(mrk, axis=0)
+
+        if (mrk is not None and elp is not None and hsp is not None):
+            dig_points, dev_head_t = _set_dig_kit(mrk, elp, hsp)
+            self.info['dig'] = dig_points
+            self.info['dev_head_t'] = dev_head_t
+        elif (mrk is not None or elp is not None or hsp is not None):
+            err = ("mrk, elp and hsp need to be provided as a group (all or "
+                   "none)")
+            raise ValueError(err)
+
+        logger.info('Extracting KIT Parameters from %s...' % input_fname)
+        input_fname = op.abspath(input_fname)
+        self.info, kit_info = get_kit_info(input_fname)
+        self._raw_extras = [kit_info]
+        if len(events) != self._raw_extras[0]['n_epochs']:
+            raise ValueError('Event list does not match number of epochs.')
+
+        if self._raw_extras[0]['acq_type'] == 3:
+            self._raw_extras[0]['data_offset'] = KIT.RAW_OFFSET
+            self._raw_extras[0]['data_length'] = KIT.INT
+            self._raw_extras[0]['dtype'] = 'h'
+        else:
+            err = ('SQD file contains raw data, not epochs or average. '
+                   'Wrong reader.')
+            raise TypeError(err)
+
+        if event_id is None:  # convert to int to make typing-checks happy
+            event_id = dict((str(e), int(e)) for e in np.unique(events[:, 2]))
+
+        for key, val in event_id.items():
+            if val not in events[:, 2]:
+                raise ValueError('No matching events found for %s '
+                                 '(event id %i)' % (key, val))
+
+        self._filename = input_fname
+        data = self._read_kit_data()
+        assert data.shape == (self._raw_extras[0]['n_epochs'],
+                              self.info['nchan'],
+                              self._raw_extras[0]['frame_length'])
+        tmax = ((data.shape[2] - 1) / self.info['sfreq']) + tmin
+        super(EpochsKIT, self).__init__(self.info, data, events, event_id,
+                                        tmin, tmax, baseline,
+                                        reject=reject, flat=flat,
+                                        reject_tmin=reject_tmin,
+                                        reject_tmax=reject_tmax,
+                                        verbose=verbose)
+        logger.info('Ready.')
+
+    def _read_kit_data(self):
+        """Read epochs data
+
+        Returns
+        -------
+        data : array, [channels x samples]
+           the data matrix (channels x samples).
+        times : array, [samples]
+            returns the time values corresponding to the samples.
+        """
+        #  Initial checks
+        epoch_length = self._raw_extras[0]['frame_length']
+        n_epochs = self._raw_extras[0]['n_epochs']
+        n_samples = self._raw_extras[0]['n_samples']
+
+        with open(self._filename, 'rb', buffering=0) as fid:
+            # extract data
+            data_offset = self._raw_extras[0]['data_offset']
+            dtype = self._raw_extras[0]['dtype']
+            fid.seek(data_offset)
+            # data offset info
+            data_offset = unpack('i', fid.read(KIT.INT))[0]
+            nchan = self._raw_extras[0]['nchan']
+            count = n_samples * nchan
+            fid.seek(data_offset)
+            data = np.fromfile(fid, dtype=dtype, count=count)
+            data = data.reshape((n_samples, nchan))
+        # amplifier applies only to the sensor channels
+        n_sens = self._raw_extras[0]['n_sens']
+        sensor_gain = np.copy(self._raw_extras[0]['sensor_gain'])
+        sensor_gain[:n_sens] = (sensor_gain[:n_sens] /
+                                self._raw_extras[0]['amp_gain'])
+        conv_factor = np.array((KIT.VOLTAGE_RANGE /
+                                self._raw_extras[0]['DYNAMIC_RANGE']) *
+                               sensor_gain, ndmin=2)
+        data = conv_factor * data
+        # reshape
+        data = data.T
+        data = data.reshape((nchan, n_epochs, epoch_length))
+        data = data.transpose((1, 0, 2))
+
+        return data
+
+
+def _set_dig_kit(mrk, elp, hsp, auto_decimate=True):
+    """Add landmark points and head shape data to the KIT instance
+
+    Digitizer data (elp and hsp) are represented in [mm] in the Polhemus
+    ALS coordinate system.
+
+    Parameters
+    ----------
+    mrk : None | str | array_like, shape = (5, 3)
+        Marker points representing the location of the marker coils with
+        respect to the MEG Sensors, or path to a marker file.
+    elp : None | str | array_like, shape = (8, 3)
+        Digitizer points representing the location of the fiducials and the
+        marker coils with respect to the digitized head shape, or path to a
+        file containing these points.
+    hsp : None | str | array, shape = (n_points, 3)
+        Digitizer head shape points, or path to head shape file. If more
+        than 10`000 points are in the head shape, they are automatically
+        decimated.
+    auto_decimate : bool
+        Decimate hsp points for head shape files with more than 10'000
+        points.
+
+    Returns
+    -------
+    dig_points : list
+        List of digitizer points for info['dig'].
+    dev_head_t : dict
+        A dictionary describe the device-head transformation.
+    """
+    if isinstance(hsp, string_types):
+        hsp = _read_dig_points(hsp)
+    n_pts = len(hsp)
+    if n_pts > KIT.DIG_POINTS:
+        hsp = _decimate_points(hsp, res=5)
+        n_new = len(hsp)
+        msg = ("The selected head shape contained {n_in} points, which is "
+               "more than recommended ({n_rec}), and was automatically "
+               "downsampled to {n_new} points. The preferred way to "
+               "downsample is using FastScan."
+               ).format(n_in=n_pts, n_rec=KIT.DIG_POINTS, n_new=n_new)
+        logger.warning(msg)
+
+    if isinstance(elp, string_types):
+        elp_points = _read_dig_points(elp)
+        if len(elp_points) != 8:
+            err = ("File %r should contain 8 points; got shape "
+                   "%s." % (elp, elp_points.shape))
+            raise ValueError(err)
+        elp = elp_points
+
+    elif len(elp) != 8:
+        err = ("ELP should contain 8 points; got shape "
+               "%s." % (elp.shape,))
+    if isinstance(mrk, string_types):
+        mrk = read_mrk(mrk)
+
+    hsp = apply_trans(als_ras_trans_mm, hsp)
+    elp = apply_trans(als_ras_trans_mm, elp)
+    mrk = apply_trans(als_ras_trans, mrk)
+
+    nasion, lpa, rpa = elp[:3]
+    nmtrans = get_ras_to_neuromag_trans(nasion, lpa, rpa)
+    elp = apply_trans(nmtrans, elp)
+    hsp = apply_trans(nmtrans, hsp)
+
+    # device head transform
+    trans = fit_matched_points(tgt_pts=elp[3:], src_pts=mrk, out='trans')
+
+    nasion, lpa, rpa = elp[:3]
+    elp = elp[3:]
+
+    dig_points = _make_dig_points(nasion, lpa, rpa, elp, hsp)
+    dev_head_t = Transform('meg', 'head', trans)
+
+    return dig_points, dev_head_t
+
+
+def get_kit_info(rawfile):
+    """Extracts all the information from the sqd file.
+
+    Parameters
+    ----------
+    rawfile : str
+        KIT file to be read.
+
+    Returns
+    -------
+    info : instance of Info
+        An Info for the instance.
+    sqd : dict
+        A dict containing all the sqd parameter settings.
+    """
+    sqd = dict()
+    sqd['rawfile'] = rawfile
+    with open(rawfile, 'rb', buffering=0) as fid:  # buffering=0 for np bug
+        fid.seek(KIT.BASIC_INFO)
+        basic_offset = unpack('i', fid.read(KIT.INT))[0]
+        fid.seek(basic_offset)
+        # skips version, revision, sysid
+        fid.seek(KIT.INT * 3, SEEK_CUR)
+        # basic info
+        sysname = unpack('128s', fid.read(KIT.STRING))
+        sysname = sysname[0].decode().split('\n')[0]
+        fid.seek(KIT.STRING, SEEK_CUR)  # skips modelname
+        sqd['nchan'] = unpack('i', fid.read(KIT.INT))[0]
+
+        if sysname == 'New York University Abu Dhabi':
+            KIT_SYS = KIT_AD
+        elif sysname == 'NYU 160ch System since Jan24 2009':
+            KIT_SYS = KIT_NY
+        else:
+            raise NotImplementedError
+
+        # channel locations
+        fid.seek(KIT_SYS.CHAN_LOC_OFFSET)
+        chan_offset = unpack('i', fid.read(KIT.INT))[0]
+        chan_size = unpack('i', fid.read(KIT.INT))[0]
+
+        fid.seek(chan_offset)
+        sensors = []
+        for i in range(KIT_SYS.N_SENS):
+            fid.seek(chan_offset + chan_size * i)
+            sens_type = unpack('i', fid.read(KIT.INT))[0]
+            if sens_type == 1:
+                # magnetometer
+                # x,y,z,theta,phi,coilsize
+                sensors.append(np.fromfile(fid, dtype='d', count=6))
+            elif sens_type == 2:
+                # axialgradiometer
+                # x,y,z,theta,phi,baseline,coilsize
+                sensors.append(np.fromfile(fid, dtype='d', count=7))
+            elif sens_type == 3:
+                # planargradiometer
+                # x,y,z,theta,phi,btheta,bphi,baseline,coilsize
+                sensors.append(np.fromfile(fid, dtype='d', count=9))
+            elif sens_type == 257:
+                # reference channels
+                sensors.append(np.zeros(7))
+                sqd['i'] = sens_type
+        sqd['sensor_locs'] = np.array(sensors)
+
+        # amplifier gain
+        fid.seek(KIT_SYS.AMPLIFIER_INFO)
+        amp_offset = unpack('i', fid.read(KIT_SYS.INT))[0]
+        fid.seek(amp_offset)
+        amp_data = unpack('i', fid.read(KIT_SYS.INT))[0]
+
+        gain1 = KIT_SYS.GAINS[(KIT_SYS.GAIN1_MASK & amp_data) >>
+                              KIT_SYS.GAIN1_BIT]
+        gain2 = KIT_SYS.GAINS[(KIT_SYS.GAIN2_MASK & amp_data) >>
+                              KIT_SYS.GAIN2_BIT]
+        if KIT_SYS.GAIN3_BIT:
+            gain3 = KIT_SYS.GAINS[(KIT_SYS.GAIN3_MASK & amp_data) >>
+                                  KIT_SYS.GAIN3_BIT]
+            sqd['amp_gain'] = gain1 * gain2 * gain3
+        else:
+            sqd['amp_gain'] = gain1 * gain2
+
+        # filter settings
+        sqd['lowpass'] = KIT_SYS.LPFS[(KIT_SYS.LPF_MASK & amp_data) >>
+                                      KIT_SYS.LPF_BIT]
+        sqd['highpass'] = KIT_SYS.HPFS[(KIT_SYS.HPF_MASK & amp_data) >>
+                                       KIT_SYS.HPF_BIT]
+        sqd['notch'] = KIT_SYS.BEFS[(KIT_SYS.BEF_MASK & amp_data) >>
+                                    KIT_SYS.BEF_BIT]
+
+        # only sensor channels requires gain. the additional misc channels
+        # (trigger channels, audio and voice channels) are passed
+        # through unaffected
+
+        fid.seek(KIT_SYS.CHAN_SENS)
+        sens_offset = unpack('i', fid.read(KIT_SYS.INT))[0]
+        fid.seek(sens_offset)
+        sens = np.fromfile(fid, dtype='d', count=sqd['nchan'] * 2)
+        sensitivities = (np.reshape(sens, (sqd['nchan'], 2))
+                         [:KIT_SYS.N_SENS, 1])
+        sqd['sensor_gain'] = np.ones(KIT_SYS.NCHAN)
+        sqd['sensor_gain'][:KIT_SYS.N_SENS] = sensitivities
+
+        fid.seek(KIT_SYS.SAMPLE_INFO)
+        acqcond_offset = unpack('i', fid.read(KIT_SYS.INT))[0]
+        fid.seek(acqcond_offset)
+        acq_type = unpack('i', fid.read(KIT_SYS.INT))[0]
+        sqd['sfreq'] = unpack('d', fid.read(KIT_SYS.DOUBLE))[0]
+        if acq_type == 1:
+            fid.read(KIT_SYS.INT)  # initialized estimate of samples
+            sqd['n_samples'] = unpack('i', fid.read(KIT_SYS.INT))[0]
+        elif acq_type == 2 or acq_type == 3:
+            sqd['frame_length'] = unpack('i', fid.read(KIT_SYS.INT))[0]
+            sqd['pretrigger_length'] = unpack('i', fid.read(KIT_SYS.INT))[0]
+            sqd['average_count'] = unpack('i', fid.read(KIT_SYS.INT))[0]
+            sqd['n_epochs'] = unpack('i', fid.read(KIT_SYS.INT))[0]
+            sqd['n_samples'] = sqd['frame_length'] * sqd['n_epochs']
+        else:
+            err = ("Your file is neither continuous nor epoched data. "
+                   "What type of file is it?!")
+            raise TypeError(err)
+        sqd['n_sens'] = KIT_SYS.N_SENS
+        sqd['nmegchan'] = KIT_SYS.NMEGCHAN
+        sqd['nmiscchan'] = KIT_SYS.NMISCCHAN
+        sqd['DYNAMIC_RANGE'] = KIT_SYS.DYNAMIC_RANGE
+        sqd['acq_type'] = acq_type
+
+        # Create raw.info dict for raw fif object with SQD data
+        info = _empty_info()
+        info.update(meas_date=int(time.time()), lowpass=sqd['lowpass'],
+                    highpass=sqd['highpass'], sfreq=float(sqd['sfreq']),
+                    filename=rawfile, nchan=sqd['nchan'])
+
+        # Creates a list of dicts of meg channels for raw.info
+        logger.info('Setting channel info structure...')
+        ch_names = {}
+        ch_names['MEG'] = ['MEG %03d' % ch for ch
+                           in range(1, sqd['n_sens'] + 1)]
+        ch_names['MISC'] = ['MISC %03d' % ch for ch
+                            in range(1, sqd['nmiscchan'] + 1)]
+        locs = sqd['sensor_locs']
+        chan_locs = apply_trans(als_ras_trans, locs[:, :3])
+        chan_angles = locs[:, 3:]
+        info['chs'] = []
+        for idx, ch_info in enumerate(zip(ch_names['MEG'], chan_locs,
+                                          chan_angles), 1):
+            ch_name, ch_loc, ch_angles = ch_info
+            chan_info = {}
+            chan_info['cal'] = KIT.CALIB_FACTOR
+            chan_info['logno'] = idx
+            chan_info['scanno'] = idx
+            chan_info['range'] = KIT.RANGE
+            chan_info['unit_mul'] = KIT.UNIT_MUL
+            chan_info['ch_name'] = ch_name
+            chan_info['unit'] = FIFF.FIFF_UNIT_T
+            chan_info['coord_frame'] = FIFF.FIFFV_COORD_DEVICE
+            if idx <= sqd['nmegchan']:
+                chan_info['coil_type'] = FIFF.FIFFV_COIL_KIT_GRAD
+                chan_info['kind'] = FIFF.FIFFV_MEG_CH
+            else:
+                chan_info['coil_type'] = FIFF.FIFFV_COIL_KIT_REF_MAG
+                chan_info['kind'] = FIFF.FIFFV_REF_MEG_CH
+
+            # create three orthogonal vector
+            # ch_angles[0]: theta, ch_angles[1]: phi
+            ch_angles = np.radians(ch_angles)
+            x = np.sin(ch_angles[0]) * np.cos(ch_angles[1])
+            y = np.sin(ch_angles[0]) * np.sin(ch_angles[1])
+            z = np.cos(ch_angles[0])
+            vec_z = np.array([x, y, z])
+            length = linalg.norm(vec_z)
+            vec_z /= length
+            vec_x = np.zeros(vec_z.size, dtype=np.float)
+            if vec_z[1] < vec_z[2]:
+                if vec_z[0] < vec_z[1]:
+                    vec_x[0] = 1.0
+                else:
+                    vec_x[1] = 1.0
+            elif vec_z[0] < vec_z[2]:
+                vec_x[0] = 1.0
+            else:
+                vec_x[2] = 1.0
+            vec_x -= np.sum(vec_x * vec_z) * vec_z
+            length = linalg.norm(vec_x)
+            vec_x /= length
+            vec_y = np.cross(vec_z, vec_x)
+            # transform to Neuromag like coordinate space
+            vecs = np.vstack((vec_x, vec_y, vec_z))
+            vecs = apply_trans(als_ras_trans, vecs)
+            chan_info['loc'] = np.vstack((ch_loc, vecs)).ravel()
+            info['chs'].append(chan_info)
+
+        # label trigger and misc channels
+        for idy, ch_name in enumerate(ch_names['MISC'],
+                                      sqd['n_sens'] + 1):
+            chan_info = {}
+            chan_info['cal'] = KIT.CALIB_FACTOR
+            chan_info['logno'] = idy
+            chan_info['scanno'] = idy
+            chan_info['range'] = 1.0
+            chan_info['unit'] = FIFF.FIFF_UNIT_V
+            chan_info['unit_mul'] = 0
+            chan_info['ch_name'] = ch_name
+            chan_info['coil_type'] = FIFF.FIFFV_COIL_NONE
+            chan_info['loc'] = np.zeros(12)
+            chan_info['kind'] = FIFF.FIFFV_MISC_CH
+            info['chs'].append(chan_info)
+
+        info['ch_names'] = ch_names['MEG'] + ch_names['MISC']
+
+    return info, sqd
+
+
+def read_raw_kit(input_fname, mrk=None, elp=None, hsp=None, stim='>',
+                 slope='-', stimthresh=1, preload=False, verbose=None):
+    """Reader function for KIT conversion to FIF
+
+    Parameters
+    ----------
+    input_fname : str
+        Path to the sqd file.
+    mrk : None | str | array_like, shape = (5, 3) | list of str or array_like
+        Marker points representing the location of the marker coils with
+        respect to the MEG Sensors, or path to a marker file.
+        If list, all of the markers will be averaged together.
+    elp : None | str | array_like, shape = (8, 3)
+        Digitizer points representing the location of the fiducials and the
+        marker coils with respect to the digitized head shape, or path to a
+        file containing these points.
+    hsp : None | str | array, shape = (n_points, 3)
+        Digitizer head shape points, or path to head shape file. If more than
+        10`000 points are in the head shape, they are automatically decimated.
+    stim : list of int | '<' | '>'
+        Channel-value correspondence when converting KIT trigger channels to a
+        Neuromag-style stim channel. For '<', the largest values are assigned
+        to the first channel (default). For '>', the largest values are
+        assigned to the last channel. Can also be specified as a list of
+        trigger channel indexes.
+    slope : '+' | '-'
+        How to interpret values on KIT trigger channels when synthesizing a
+        Neuromag-style stim channel. With '+', a positive slope (low-to-high)
+        is interpreted as an event. With '-', a negative slope (high-to-low)
+        is interpreted as an event.
+    stimthresh : float
+        The threshold level for accepting voltage changes in KIT trigger
+        channels as a trigger event.
+    preload : bool
+        If True, all data are loaded at initialization.
+        If False, data are not read until save.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    raw : Instance of RawKIT
+        A Raw object containing KIT data.
+
+    See Also
+    --------
+    mne.io.Raw : Documentation of attribute and methods.
+    """
+    return RawKIT(input_fname=input_fname, mrk=mrk, elp=elp, hsp=hsp,
+                  stim=stim, slope=slope, stimthresh=stimthresh,
+                  preload=preload, verbose=verbose)
+
+
+def read_epochs_kit(input_fname, events, event_id=None,
+                    mrk=None, elp=None, hsp=None, verbose=None):
+    """Reader function for KIT epochs files
+
+    Parameters
+    ----------
+    input_fname : str
+        Path to the sqd file.
+    events : array, shape (n_events, 3)
+        The events typically returned by the read_events function.
+        If some events don't match the events of interest as specified
+        by event_id, they will be marked as 'IGNORED' in the drop log.
+    event_id : int | list of int | dict | None
+        The id of the event to consider. If dict,
+        the keys can later be used to acces associated events. Example:
+        dict(auditory=1, visual=3). If int, a dict will be created with
+        the id as string. If a list, all events with the IDs specified
+        in the list are used. If None, all events will be used with
+        and a dict is created with string integer names corresponding
+        to the event id integers.
+    mrk : None | str | array_like, shape = (5, 3) | list of str or array_like
+        Marker points representing the location of the marker coils with
+        respect to the MEG Sensors, or path to a marker file.
+        If list, all of the markers will be averaged together.
+    elp : None | str | array_like, shape = (8, 3)
+        Digitizer points representing the location of the fiducials and the
+        marker coils with respect to the digitized head shape, or path to a
+        file containing these points.
+    hsp : None | str | array, shape = (n_points, 3)
+        Digitizer head shape points, or path to head shape file. If more than
+        10`000 points are in the head shape, they are automatically decimated.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    epochs : instance of Epochs
+        The epochs.
+
+    Notes
+    -----
+    .. versionadded:: 0.9.0
+    """
+    epochs = EpochsKIT(input_fname=input_fname, events=events,
+                       event_id=event_id, mrk=mrk, elp=elp, hsp=hsp,
+                       verbose=verbose)
+    return epochs
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/kit/tests/__init__.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/kit/tests/__init__.py
new file mode 100644
index 0000000..aba6507
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/kit/tests/__init__.py
@@ -0,0 +1,3 @@
+import os.path as op
+
+data_dir = op.join(op.dirname(__file__), 'data')
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/kit/tests/test_coreg.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/kit/tests/test_coreg.py
new file mode 100644
index 0000000..f117d99
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/kit/tests/test_coreg.py
@@ -0,0 +1,30 @@
+# Authors: Christian Brodbeck <christianbrodbeck at nyu.edu>
+#
+# License: BSD (3-clause)
+
+import inspect
+import os
+
+from numpy.testing import assert_array_equal
+
+from mne.io.kit import read_mrk
+from mne.io.meas_info import _write_dig_points
+from mne.utils import _TempDir
+
+
+FILE = inspect.getfile(inspect.currentframe())
+parent_dir = os.path.dirname(os.path.abspath(FILE))
+data_dir = os.path.join(parent_dir, 'data')
+mrk_fname = os.path.join(data_dir, 'test_mrk.sqd')
+
+
+def test_io_mrk():
+    """Test IO for mrk files"""
+    tempdir = _TempDir()
+    pts = read_mrk(mrk_fname)
+
+    # txt
+    path = os.path.join(tempdir, 'mrk.txt')
+    _write_dig_points(path, pts)
+    pts_2 = read_mrk(path)
+    assert_array_equal(pts, pts_2, "read/write mrk to text")
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/kit/tests/test_kit.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/kit/tests/test_kit.py
new file mode 100644
index 0000000..72b3028
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/kit/tests/test_kit.py
@@ -0,0 +1,163 @@
+"""Data and Channel Location Equivalence Tests"""
+from __future__ import print_function
+
+# Author: Teon Brooks <teon.brooks at gmail.com>
+#
+# License: BSD (3-clause)
+
+import os.path as op
+import inspect
+import numpy as np
+from numpy.testing import assert_array_almost_equal, assert_array_equal
+from nose.tools import assert_equal, assert_raises, assert_true
+import scipy.io
+
+from mne import pick_types, concatenate_raws, Epochs, read_events
+from mne.utils import _TempDir, run_tests_if_main
+from mne.io import Raw
+from mne.io import read_raw_kit, read_epochs_kit
+from mne.io.kit.coreg import read_sns
+from mne.io.tests.test_raw import _test_concat
+
+FILE = inspect.getfile(inspect.currentframe())
+parent_dir = op.dirname(op.abspath(FILE))
+data_dir = op.join(parent_dir, 'data')
+sqd_path = op.join(data_dir, 'test.sqd')
+epochs_path = op.join(data_dir, 'test-epoch.raw')
+events_path = op.join(data_dir, 'test-eve.txt')
+mrk_path = op.join(data_dir, 'test_mrk.sqd')
+mrk2_path = op.join(data_dir, 'test_mrk_pre.sqd')
+mrk3_path = op.join(data_dir, 'test_mrk_post.sqd')
+elp_path = op.join(data_dir, 'test_elp.txt')
+hsp_path = op.join(data_dir, 'test_hsp.txt')
+
+
+def test_concat():
+    """Test EDF concatenation
+    """
+    _test_concat(read_raw_kit, sqd_path)
+
+
+def test_data():
+    """Test reading raw kit files
+    """
+    assert_raises(TypeError, read_raw_kit, epochs_path)
+    assert_raises(TypeError, read_epochs_kit, sqd_path)
+    assert_raises(ValueError, read_raw_kit, sqd_path, mrk_path, elp_path)
+    assert_raises(ValueError, read_raw_kit, sqd_path, None, None, None,
+                  list(range(200, 190, -1)))
+    assert_raises(ValueError, read_raw_kit, sqd_path, None, None, None,
+                  list(range(167, 159, -1)), '*', 1, True)
+    # check functionality
+    _ = read_raw_kit(sqd_path, [mrk2_path, mrk3_path], elp_path,
+                     hsp_path)
+    raw_py = read_raw_kit(sqd_path, mrk_path, elp_path, hsp_path,
+                          stim=list(range(167, 159, -1)), slope='+',
+                          stimthresh=1, preload=True)
+    assert_true('RawKIT' in repr(raw_py))
+
+    # Binary file only stores the sensor channels
+    py_picks = pick_types(raw_py.info, exclude='bads')
+    raw_bin = op.join(data_dir, 'test_bin_raw.fif')
+    raw_bin = Raw(raw_bin, preload=True)
+    bin_picks = pick_types(raw_bin.info, stim=True, exclude='bads')
+    data_bin, _ = raw_bin[bin_picks]
+    data_py, _ = raw_py[py_picks]
+
+    # this .mat was generated using the Yokogawa MEG Reader
+    data_Ykgw = op.join(data_dir, 'test_Ykgw.mat')
+    data_Ykgw = scipy.io.loadmat(data_Ykgw)['data']
+    data_Ykgw = data_Ykgw[py_picks]
+
+    assert_array_almost_equal(data_py, data_Ykgw)
+
+    py_picks = pick_types(raw_py.info, stim=True, ref_meg=False,
+                          exclude='bads')
+    data_py, _ = raw_py[py_picks]
+    assert_array_almost_equal(data_py, data_bin)
+
+    # Make sure concatenation works
+    raw_concat = concatenate_raws([raw_py.copy(), raw_py])
+    assert_equal(raw_concat.n_times, 2 * raw_py.n_times)
+
+
+def test_epochs():
+    raw = read_raw_kit(sqd_path, stim=None)
+    events = read_events(events_path)
+    raw_epochs = Epochs(raw, events, None, tmin=0, tmax=.099, baseline=None)
+    data1 = raw_epochs.get_data()
+    epochs = read_epochs_kit(epochs_path, events_path)
+    data11 = epochs.get_data()
+    assert_array_equal(data1, data11)
+
+
+def test_read_segment():
+    """Test writing raw kit files when preload is False
+    """
+    tempdir = _TempDir()
+    raw1 = read_raw_kit(sqd_path, mrk_path, elp_path, hsp_path, stim='<',
+                        preload=False)
+    raw1_file = op.join(tempdir, 'test1-raw.fif')
+    raw1.save(raw1_file, buffer_size_sec=.1, overwrite=True)
+    raw2 = read_raw_kit(sqd_path, mrk_path, elp_path, hsp_path, stim='<',
+                        preload=True)
+    raw2_file = op.join(tempdir, 'test2-raw.fif')
+    raw2.save(raw2_file, buffer_size_sec=.1, overwrite=True)
+    data1, times1 = raw1[0, 0:1]
+
+    raw1 = Raw(raw1_file, preload=True)
+    raw2 = Raw(raw2_file, preload=True)
+    assert_array_equal(raw1._data, raw2._data)
+    data2, times2 = raw2[0, 0:1]
+    assert_array_almost_equal(data1, data2)
+    assert_array_almost_equal(times1, times2)
+    raw3 = read_raw_kit(sqd_path, mrk_path, elp_path, hsp_path, stim='<',
+                        preload=True)
+    assert_array_almost_equal(raw1._data, raw3._data)
+    raw4 = read_raw_kit(sqd_path, mrk_path, elp_path, hsp_path, stim='<',
+                        preload=False)
+    raw4.load_data()
+    buffer_fname = op.join(tempdir, 'buffer')
+    assert_array_almost_equal(raw1._data, raw4._data)
+    raw5 = read_raw_kit(sqd_path, mrk_path, elp_path, hsp_path, stim='<',
+                        preload=buffer_fname)
+    assert_array_almost_equal(raw1._data, raw5._data)
+
+
+def test_ch_loc():
+    """Test raw kit loc
+    """
+    raw_py = read_raw_kit(sqd_path, mrk_path, elp_path, hsp_path, stim='<')
+    raw_bin = Raw(op.join(data_dir, 'test_bin_raw.fif'))
+
+    ch_py = raw_py._raw_extras[0]['sensor_locs'][:, :5]
+    # ch locs stored as m, not mm
+    ch_py[:, :3] *= 1e3
+    ch_sns = read_sns(op.join(data_dir, 'sns.txt'))
+    assert_array_almost_equal(ch_py, ch_sns, 2)
+
+    assert_array_almost_equal(raw_py.info['dev_head_t']['trans'],
+                              raw_bin.info['dev_head_t']['trans'], 4)
+    for py_ch, bin_ch in zip(raw_py.info['chs'], raw_bin.info['chs']):
+        if bin_ch['ch_name'].startswith('MEG'):
+            # the stored ch locs have more precision than the sns.txt
+            assert_array_almost_equal(py_ch['loc'], bin_ch['loc'], decimal=2)
+
+    # test when more than one marker file provided
+    mrks = [mrk_path, mrk2_path, mrk3_path]
+    read_raw_kit(sqd_path, mrks, elp_path, hsp_path, preload=False)
+
+
+def test_stim_ch():
+    """Test raw kit stim ch
+    """
+    raw = read_raw_kit(sqd_path, mrk_path, elp_path, hsp_path, stim='<',
+                       slope='+', preload=True)
+    stim_pick = pick_types(raw.info, meg=False, ref_meg=False,
+                           stim=True, exclude='bads')
+    stim1, _ = raw[stim_pick]
+    stim2 = np.array(raw.read_stim_ch(), ndmin=2)
+    assert_array_equal(stim1, stim2)
+
+
+run_tests_if_main()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/matrix.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/matrix.py
new file mode 100644
index 0000000..caecafa
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/matrix.py
@@ -0,0 +1,130 @@
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+from .constants import FIFF
+from .tag import find_tag, has_tag
+from .write import (write_int, start_block, end_block, write_float_matrix,
+                    write_name_list)
+from ..utils import logger, verbose
+
+
+def _transpose_named_matrix(mat, copy=True):
+    """Transpose mat inplace (no copy)
+    """
+    if copy is True:
+        mat = mat.copy()
+    mat['nrow'], mat['ncol'] = mat['ncol'], mat['nrow']
+    mat['row_names'], mat['col_names'] = mat['col_names'], mat['row_names']
+    mat['data'] = mat['data'].T
+    return mat
+
+
+ at verbose
+def _read_named_matrix(fid, node, matkind, indent='    ', verbose=None):
+    """Read named matrix from the given node
+
+    Parameters
+    ----------
+    fid : file
+        The opened file descriptor.
+    node : dict
+        The node in the tree.
+    matkind : int
+        The type of matrix.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    mat: dict
+        The matrix data
+    """
+    #   Descend one level if necessary
+    if node['block'] != FIFF.FIFFB_MNE_NAMED_MATRIX:
+        for k in range(node['nchild']):
+            if node['children'][k]['block'] == FIFF.FIFFB_MNE_NAMED_MATRIX:
+                if has_tag(node['children'][k], matkind):
+                    node = node['children'][k]
+                    break
+        else:
+            logger.info(indent + 'Desired named matrix (kind = %d) not '
+                        'available' % matkind)
+            return None
+    else:
+        if not has_tag(node, matkind):
+            logger.info(indent + 'Desired named matrix (kind = %d) not '
+                        'available' % matkind)
+            return None
+
+    #   Read everything we need
+    tag = find_tag(fid, node, matkind)
+    if tag is None:
+        raise ValueError('Matrix data missing')
+    else:
+        data = tag.data
+
+    nrow, ncol = data.shape
+    tag = find_tag(fid, node, FIFF.FIFF_MNE_NROW)
+    if tag is not None and tag.data != nrow:
+        raise ValueError('Number of rows in matrix data and FIFF_MNE_NROW '
+                         'tag do not match')
+
+    tag = find_tag(fid, node, FIFF.FIFF_MNE_NCOL)
+    if tag is not None and tag.data != ncol:
+        raise ValueError('Number of columns in matrix data and '
+                         'FIFF_MNE_NCOL tag do not match')
+
+    tag = find_tag(fid, node, FIFF.FIFF_MNE_ROW_NAMES)
+    row_names = tag.data.split(':') if tag is not None else []
+
+    tag = find_tag(fid, node, FIFF.FIFF_MNE_COL_NAMES)
+    col_names = tag.data.split(':') if tag is not None else []
+
+    mat = dict(nrow=nrow, ncol=ncol, row_names=row_names, col_names=col_names,
+               data=data)
+    return mat
+
+
+def write_named_matrix(fid, kind, mat):
+    """Write named matrix from the given node
+
+    Parameters
+    ----------
+    fid : file
+        The opened file descriptor.
+    kind : int
+        The kind of the matrix.
+    matkind : int
+        The type of matrix.
+    """
+    # let's save ourselves from disaster
+    n_tot = mat['nrow'] * mat['ncol']
+    if mat['data'].size != n_tot:
+        ratio = n_tot / float(mat['data'].size)
+        if n_tot < mat['data'].size and ratio > 0:
+            ratio = 1 / ratio
+        raise ValueError('Cannot write matrix: row (%i) and column (%i) '
+                         'total element (%i) mismatch with data size (%i), '
+                         'appears to be off by a factor of %gx'
+                         % (mat['nrow'], mat['ncol'], n_tot,
+                            mat['data'].size, ratio))
+    start_block(fid, FIFF.FIFFB_MNE_NAMED_MATRIX)
+    write_int(fid, FIFF.FIFF_MNE_NROW, mat['nrow'])
+    write_int(fid, FIFF.FIFF_MNE_NCOL, mat['ncol'])
+
+    if len(mat['row_names']) > 0:
+        # let's prevent unintentional stupidity
+        if len(mat['row_names']) != mat['nrow']:
+            raise ValueError('len(mat["row_names"]) != mat["nrow"]')
+        write_name_list(fid, FIFF.FIFF_MNE_ROW_NAMES, mat['row_names'])
+
+    if len(mat['col_names']) > 0:
+        # let's prevent unintentional stupidity
+        if len(mat['col_names']) != mat['ncol']:
+            raise ValueError('len(mat["col_names"]) != mat["ncol"]')
+        write_name_list(fid, FIFF.FIFF_MNE_COL_NAMES, mat['col_names'])
+
+    write_float_matrix(fid, kind, mat['data'])
+    end_block(fid, FIFF.FIFFB_MNE_NAMED_MATRIX)
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/meas_info.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/meas_info.py
new file mode 100644
index 0000000..f8f3928
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/meas_info.py
@@ -0,0 +1,1408 @@
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
+#          Teon Brooks <teon.brooks at gmail.com>
+#
+# License: BSD (3-clause)
+
+from warnings import warn
+from copy import deepcopy
+from datetime import datetime as dt
+import os.path as op
+
+import numpy as np
+from scipy import linalg
+
+from .pick import channel_type
+from .constants import FIFF
+from .open import fiff_open
+from .tree import dir_tree_find
+from .tag import read_tag, find_tag
+from .proj import _read_proj, _write_proj, _uniquify_projs
+from .ctf import read_ctf_comp, write_ctf_comp
+from .write import (start_file, end_file, start_block, end_block,
+                    write_string, write_dig_point, write_float, write_int,
+                    write_coord_trans, write_ch_info, write_name_list,
+                    write_julian, write_float_matrix)
+from .proc_history import _read_proc_history, _write_proc_history
+from ..utils import logger, verbose
+from ..fixes import Counter
+from .. import __version__
+from ..externals.six import b, BytesIO, string_types, text_type
+
+
+_kind_dict = dict(
+    eeg=(FIFF.FIFFV_EEG_CH, FIFF.FIFFV_COIL_EEG, FIFF.FIFF_UNIT_V),
+    mag=(FIFF.FIFFV_MEG_CH, FIFF.FIFFV_COIL_VV_MAG_T3, FIFF.FIFF_UNIT_T),
+    grad=(FIFF.FIFFV_MEG_CH, FIFF.FIFFV_COIL_VV_PLANAR_T1, FIFF.FIFF_UNIT_T_M),
+    misc=(FIFF.FIFFV_MISC_CH, FIFF.FIFFV_COIL_NONE, FIFF.FIFF_UNIT_NONE),
+    stim=(FIFF.FIFFV_STIM_CH, FIFF.FIFFV_COIL_NONE, FIFF.FIFF_UNIT_V),
+    eog=(FIFF.FIFFV_EOG_CH, FIFF.FIFFV_COIL_NONE, FIFF.FIFF_UNIT_V),
+    ecg=(FIFF.FIFFV_ECG_CH, FIFF.FIFFV_COIL_NONE, FIFF.FIFF_UNIT_V),
+    seeg=(FIFF.FIFFV_SEEG_CH, FIFF.FIFFV_COIL_NONE, FIFF.FIFF_UNIT_V),
+)
+
+
+def _summarize_str(st):
+    """Aux function"""
+    return st[:56][::-1].split(',', 1)[-1][::-1] + ', ...'
+
+
+class Info(dict):
+    """Information about the recording.
+
+    This data structure behaves like a dictionary. It contains all meta-data
+    that is available for a recording.
+
+    The attributes listed below are the possible dictionary entries:
+
+    Attributes
+    ----------
+    bads : list of str
+        List of bad (noisy/broken) channels, by name. These channels will by
+        default be ignored by many processing steps.
+    ch_names : list of str
+        The names of the channels.
+    chs : list of dict
+        A list of channel information structures.
+        See: :ref:`faq` for details.
+    comps : list of dict
+        CTF software gradient compensation data.
+        See: :ref:`faq` for details.
+    custom_ref_applied : bool
+        Whether a custom (=other than average) reference has been applied to
+        the EEG data. This flag is checked by some algorithms that require an
+        average reference to be set.
+    events : list of dict
+        Event list, usually extracted from the stim channels.
+        See: :ref:`faq` for details.
+    hpi_results : list of dict
+        Head position indicator (HPI) digitization points.
+        See: :ref:`faq` for details.
+    meas_date : list of int
+        The first element of this list is a POSIX timestamp (milliseconds since
+        1970-01-01 00:00:00) denoting the date and time at which the
+        measurement was taken.
+        TODO: what are the other fields?
+    nchan : int
+        Number of channels.
+    projs : list of dict
+        List of SSP operators that operate on the data.
+        See: :ref:`faq` for details.
+    sfreq : float
+        Sampling frequency in Hertz.
+        See: :ref:`faq` for details.
+    acq_pars : str | None
+        MEG system acquition parameters.
+    acq_stim : str | None
+        TODO: What is this?
+    buffer_size_sec : float | None
+        Buffer size (in seconds) when reading the raw data in chunks.
+    ctf_head_t : dict | None
+        The transformation from 4D/CTF head coordinates to Neuromag head
+        coordinates. This is only present in 4D/CTF data.
+        See: :ref:`faq` for details.
+    description : str | None
+        String description of the recording.
+    dev_ctf_t : dict | None
+        The transformation from device coordinates to 4D/CTF head coordinates.
+        This is only present in 4D/CTF data.
+        See: :ref:`faq` for details.
+    dev_head_t : dict | None
+        The device to head transformation.
+        See: :ref:`faq` for details.
+    dig : list of dict | None
+        The Polhemus digitization data in head coordinates.
+        See: :ref:`faq` for details.
+    experimentor : str | None
+        Name of the person that ran the experiment.
+    file_id : dict | None
+        The fif ID datastructure of the measurement file.
+        See: :ref:`faq` for details.
+    filename : str | None
+        The name of the file that provided the raw data.
+    highpass : float | None
+        Highpass corner frequency in Hertz. Zero indicates a DC recording.
+    hpi_meas : list of dict | None
+        HPI measurements.
+        TODO: What is this exactly?
+    hpi_subsystem: | None
+        TODO: What is this?
+    line_freq : float | None
+        Frequency of the power line in Hertz.
+    lowpass : float | None
+        Lowpass corner frequency in Hertz.
+    meas_id : dict | None
+        The ID assigned to this measurement by the acquisition system or during
+        file conversion.
+        See: :ref:`faq` for details.
+    proj_id : int | None
+        ID number of the project the experiment belongs to.
+    proj_name : str | None
+        Name of the project the experiment belongs to.
+    subject_info : dict | None
+        Information about the subject.
+        See: :ref:`subject_info` for details
+    proc_history : list of dict | None | not present in dict
+        The SSS info, the CTC correction and the calibaraions from the SSS
+        processing logs inside of a raw file.
+        See: :ref:`faq` for details.
+    """
+
+    def copy(self):
+        """Copy the instance
+
+        Returns
+        -------
+        info : instance of Info
+            The copied info.
+        """
+        return Info(super(Info, self).copy())
+
+    def __repr__(self):
+        """Summarize info instead of printing all"""
+        strs = ['<Info | %s non-empty fields']
+        non_empty = 0
+        for k, v in self.items():
+            if k in ['bads', 'ch_names']:
+                entr = (', '.join(b for ii, b in enumerate(v) if ii < 10)
+                        if v else '0 items')
+                if len(entr) >= 56:
+                    # get rid of of half printed ch names
+                    entr = _summarize_str(entr)
+            elif k == 'filename' and v:
+                path, fname = op.split(v)
+                entr = path[:10] + '.../' + fname
+            elif k == 'projs' and v:
+                entr = ', '.join(p['desc'] + ': o%s' %
+                                 {0: 'ff', 1: 'n'}[p['active']] for p in v)
+                if len(entr) >= 56:
+                    entr = _summarize_str(entr)
+            elif k == 'meas_date' and np.iterable(v):
+                # first entire in meas_date is meaningful
+                entr = dt.fromtimestamp(v[0]).strftime('%Y-%m-%d %H:%M:%S')
+            else:
+                this_len = (len(v) if hasattr(v, '__len__') else
+                            ('%s' % v if v is not None else None))
+                entr = (('%d items' % this_len) if isinstance(this_len, int)
+                        else ('%s' % this_len if this_len else ''))
+            if entr:
+                non_empty += 1
+                entr = ' | ' + entr
+            if k == 'chs':
+                ch_types = [channel_type(self, idx) for idx in range(len(v))]
+                ch_counts = Counter(ch_types)
+                entr += " (%s)" % ', '.join("%s: %d" % (ch_type.upper(), count)
+                                            for ch_type, count
+                                            in ch_counts.items())
+            strs.append('%s : %s%s' % (k, str(type(v))[7:-2], entr))
+        strs_non_empty = sorted(s for s in strs if '|' in s)
+        strs_empty = sorted(s for s in strs if '|' not in s)
+        st = '\n    '.join(strs_non_empty + strs_empty)
+        st += '\n>'
+        st %= non_empty
+        return st
+
+    def _anonymize(self):
+        if self.get('subject_info') is not None:
+            del self['subject_info']
+
+    def _check_consistency(self):
+        """Do some self-consistency checks and datatype tweaks"""
+        missing = [bad for bad in self['bads'] if bad not in self['ch_names']]
+        if len(missing) > 0:
+            raise RuntimeError('bad channel(s) %s marked do not exist in info'
+                               % (missing,))
+        chs = [ch['ch_name'] for ch in self['chs']]
+        if len(self['ch_names']) != len(chs) or any(
+                ch_1 != ch_2 for ch_1, ch_2 in zip(self['ch_names'], chs)) or \
+                self['nchan'] != len(chs):
+            raise RuntimeError('info channel name inconsistency detected, '
+                               'please notify mne-python developers')
+        # make sure we have the proper datatypes
+        for key in ('sfreq', 'highpass', 'lowpass'):
+            if self.get(key) is not None:
+                self[key] = float(self[key])
+
+
+def read_fiducials(fname):
+    """Read fiducials from a fiff file
+
+    Parameters
+    ----------
+    fname : str
+        The filename to read.
+
+    Returns
+    -------
+    pts : list of dicts
+        List of digitizer points (each point in a dict).
+    coord_frame : int
+        The coordinate frame of the points (one of
+        mne.io.constants.FIFF.FIFFV_COORD_...)
+    """
+    fid, tree, _ = fiff_open(fname)
+    with fid:
+        isotrak = dir_tree_find(tree, FIFF.FIFFB_ISOTRAK)
+        isotrak = isotrak[0]
+        pts = []
+        coord_frame = FIFF.FIFFV_COORD_UNKNOWN
+        for k in range(isotrak['nent']):
+            kind = isotrak['directory'][k].kind
+            pos = isotrak['directory'][k].pos
+            if kind == FIFF.FIFF_DIG_POINT:
+                tag = read_tag(fid, pos)
+                pts.append(tag.data)
+            elif kind == FIFF.FIFF_MNE_COORD_FRAME:
+                tag = read_tag(fid, pos)
+                coord_frame = tag.data[0]
+
+    if coord_frame == FIFF.FIFFV_COORD_UNKNOWN:
+        err = ("No coordinate frame was found in the file %r, it is probably "
+               "not a valid fiducials file." % fname)
+        raise ValueError(err)
+
+    # coord_frame is not stored in the tag
+    for pt in pts:
+        pt['coord_frame'] = coord_frame
+
+    return pts, coord_frame
+
+
+def write_fiducials(fname, pts, coord_frame=0):
+    """Write fiducials to a fiff file
+
+    Parameters
+    ----------
+    fname : str
+        Destination file name.
+    pts : iterator of dict
+        Iterator through digitizer points. Each point is a dictionary with
+        the keys 'kind', 'ident' and 'r'.
+    coord_frame : int
+        The coordinate frame of the points (one of
+        mne.io.constants.FIFF.FIFFV_COORD_...)
+    """
+    pts_frames = set((pt.get('coord_frame', coord_frame) for pt in pts))
+    bad_frames = pts_frames - set((coord_frame,))
+    if len(bad_frames) > 0:
+        err = ("Points have coord_frame entries that are incompatible with "
+               "coord_frame=%i: %s." % (coord_frame, str(tuple(bad_frames))))
+        raise ValueError(err)
+
+    fid = start_file(fname)
+    start_block(fid, FIFF.FIFFB_ISOTRAK)
+    write_int(fid, FIFF.FIFF_MNE_COORD_FRAME, coord_frame)
+    for pt in pts:
+        write_dig_point(fid, pt)
+
+    end_block(fid, FIFF.FIFFB_ISOTRAK)
+    end_file(fid)
+
+
+def _read_dig_points(fname, comments='%'):
+    """Read digitizer data from file.
+
+    This function can read space-delimited text files of digitizer data.
+
+    Parameters
+    ----------
+    fname : str
+        The filepath of space delimited file with points.
+    comments : str
+        The character used to indicate the start of a comment;
+        Default: '%'.
+
+    Returns
+    -------
+    dig_points : np.ndarray, shape (n_points, 3)
+        Array of dig points.
+    """
+    dig_points = np.loadtxt(fname, comments=comments, ndmin=2)
+    if dig_points.shape[-1] != 3:
+        err = 'Data must be (n, 3) instead of %s' % (dig_points.shape,)
+        raise ValueError(err)
+
+    return dig_points
+
+
+def _write_dig_points(fname, dig_points):
+    """Write points to file
+
+    Parameters
+    ----------
+    fname : str
+        Path to the file to write. The kind of file to write is determined
+        based on the extension: '.txt' for tab separated text file.
+    dig_points : numpy.ndarray, shape (n_points, 3)
+        Points.
+    """
+    _, ext = op.splitext(fname)
+    dig_points = np.asarray(dig_points)
+    if (dig_points.ndim != 2) or (dig_points.shape[1] != 3):
+        err = ("Points must be of shape (n_points, 3), "
+               "not %s" % (dig_points.shape,))
+        raise ValueError(err)
+
+    if ext == '.txt':
+        with open(fname, 'wb') as fid:
+            version = __version__
+            now = dt.now().strftime("%I:%M%p on %B %d, %Y")
+            fid.write(b("% Ascii 3D points file created by mne-python version "
+                        "{version} at {now}\n".format(version=version,
+                                                      now=now)))
+            fid.write(b("% {N} 3D points, "
+                        "x y z per line\n".format(N=len(dig_points))))
+            np.savetxt(fid, dig_points, delimiter='\t', newline='\n')
+    else:
+        msg = "Unrecognized extension: %r. Need '.txt'." % ext
+        raise ValueError(msg)
+
+
+def _make_dig_points(nasion=None, lpa=None, rpa=None, hpi=None,
+                     dig_points=None):
+    """Constructs digitizer info for the info.
+
+    Parameters
+    ----------
+    nasion : array-like | numpy.ndarray, shape (3,) | None
+        Point designated as the nasion point.
+    lpa : array-like |  numpy.ndarray, shape (3,) | None
+        Point designated as the left auricular point.
+    rpa : array-like |  numpy.ndarray, shape (3,) | None
+        Point designated as the right auricular point.
+    hpi : array-like | numpy.ndarray, shape (n_points, 3) | None
+        Points designated as head position indicator points.
+    dig_points : array-like | numpy.ndarray, shape (n_points, 3)
+        Points designed as the headshape points.
+
+    Returns
+    -------
+    dig : list
+        List of digitizer points to be added to the info['dig'].
+    """
+    dig = []
+    if nasion is not None:
+        nasion = np.asarray(nasion)
+        if nasion.shape == (3,):
+            dig.append({'r': nasion, 'ident': FIFF.FIFFV_POINT_NASION,
+                        'kind': FIFF.FIFFV_POINT_CARDINAL,
+                        'coord_frame':  FIFF.FIFFV_COORD_HEAD})
+        else:
+            msg = ('Nasion should have the shape (3,) instead of %s'
+                   % (nasion.shape,))
+            raise ValueError(msg)
+    if lpa is not None:
+        lpa = np.asarray(lpa)
+        if lpa.shape == (3,):
+            dig.append({'r': lpa, 'ident': FIFF.FIFFV_POINT_LPA,
+                        'kind': FIFF.FIFFV_POINT_CARDINAL,
+                        'coord_frame':  FIFF.FIFFV_COORD_HEAD})
+        else:
+            msg = ('LPA should have the shape (3,) instead of %s'
+                   % (lpa.shape,))
+            raise ValueError(msg)
+    if rpa is not None:
+        rpa = np.asarray(rpa)
+        if rpa.shape == (3,):
+            dig.append({'r': rpa, 'ident': FIFF.FIFFV_POINT_RPA,
+                        'kind': FIFF.FIFFV_POINT_CARDINAL,
+                        'coord_frame':  FIFF.FIFFV_COORD_HEAD})
+        else:
+            msg = ('RPA should have the shape (3,) instead of %s'
+                   % (rpa.shape,))
+            raise ValueError(msg)
+    if hpi is not None:
+        hpi = np.asarray(hpi)
+        if hpi.shape[1] == 3:
+            for idx, point in enumerate(hpi):
+                dig.append({'r': point, 'ident': idx,
+                            'kind': FIFF.FIFFV_POINT_HPI,
+                            'coord_frame': FIFF.FIFFV_COORD_HEAD})
+        else:
+            msg = ('HPI should have the shape (n_points, 3) instead of '
+                   '%s' % (hpi.shape,))
+            raise ValueError(msg)
+    if dig_points is not None:
+        dig_points = np.asarray(dig_points)
+        if dig_points.shape[1] == 3:
+            for idx, point in enumerate(dig_points):
+                dig.append({'r': point, 'ident': idx,
+                            'kind': FIFF.FIFFV_POINT_EXTRA,
+                            'coord_frame': FIFF.FIFFV_COORD_HEAD})
+        else:
+            msg = ('Points should have the shape (n_points, 3) instead of '
+                   '%s' % (dig_points.shape,))
+            raise ValueError(msg)
+
+    return dig
+
+
+ at verbose
+def read_info(fname, verbose=None):
+    """Read measurement info from a file
+
+    Parameters
+    ----------
+    fname : str
+        File name.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    info : instance of mne.io.meas_info.Info
+       Info on dataset.
+    """
+    f, tree, _ = fiff_open(fname)
+    with f as fid:
+        info = read_meas_info(fid, tree)[0]
+    return info
+
+
+def read_bad_channels(fid, node):
+    """Read bad channels
+
+    Parameters
+    ----------
+    fid : file
+        The file descriptor.
+
+    node : dict
+        The node of the FIF tree that contains info on the bad channels.
+
+    Returns
+    -------
+    bads : list
+        A list of bad channel's names.
+    """
+    nodes = dir_tree_find(node, FIFF.FIFFB_MNE_BAD_CHANNELS)
+
+    bads = []
+    if len(nodes) > 0:
+        for node in nodes:
+            tag = find_tag(fid, node, FIFF.FIFF_MNE_CH_NAME_LIST)
+            if tag is not None and tag.data is not None:
+                bads = tag.data.split(':')
+    return bads
+
+
+ at verbose
+def read_meas_info(fid, tree, verbose=None):
+    """Read the measurement info
+
+    Parameters
+    ----------
+    fid : file
+        Open file descriptor.
+    tree : tree
+        FIF tree structure.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    info : instance of mne.io.meas_info.Info
+       Info on dataset.
+    meas : dict
+        Node in tree that contains the info.
+    """
+
+    #   Find the desired blocks
+    meas = dir_tree_find(tree, FIFF.FIFFB_MEAS)
+    if len(meas) == 0:
+        raise ValueError('Could not find measurement data')
+    if len(meas) > 1:
+        raise ValueError('Cannot read more that 1 measurement data')
+    meas = meas[0]
+
+    meas_info = dir_tree_find(meas, FIFF.FIFFB_MEAS_INFO)
+    if len(meas_info) == 0:
+        raise ValueError('Could not find measurement info')
+    if len(meas_info) > 1:
+        raise ValueError('Cannot read more that 1 measurement info')
+    meas_info = meas_info[0]
+
+    #   Read measurement info
+    dev_head_t = None
+    ctf_head_t = None
+    dev_ctf_t = None
+    meas_date = None
+    highpass = None
+    lowpass = None
+    nchan = None
+    sfreq = None
+    chs = []
+    experimenter = None
+    description = None
+    proj_id = None
+    proj_name = None
+    line_freq = None
+    custom_ref_applied = False
+    p = 0
+    for k in range(meas_info['nent']):
+        kind = meas_info['directory'][k].kind
+        pos = meas_info['directory'][k].pos
+        if kind == FIFF.FIFF_NCHAN:
+            tag = read_tag(fid, pos)
+            nchan = int(tag.data)
+        elif kind == FIFF.FIFF_SFREQ:
+            tag = read_tag(fid, pos)
+            sfreq = float(tag.data)
+        elif kind == FIFF.FIFF_CH_INFO:
+            tag = read_tag(fid, pos)
+            chs.append(tag.data)
+            p += 1
+        elif kind == FIFF.FIFF_LOWPASS:
+            tag = read_tag(fid, pos)
+            lowpass = float(tag.data)
+        elif kind == FIFF.FIFF_HIGHPASS:
+            tag = read_tag(fid, pos)
+            highpass = float(tag.data)
+        elif kind == FIFF.FIFF_MEAS_DATE:
+            tag = read_tag(fid, pos)
+            meas_date = tag.data
+        elif kind == FIFF.FIFF_COORD_TRANS:
+            tag = read_tag(fid, pos)
+            cand = tag.data
+
+            if cand['from'] == FIFF.FIFFV_COORD_DEVICE and \
+                    cand['to'] == FIFF.FIFFV_COORD_HEAD:
+                dev_head_t = cand
+            elif cand['from'] == FIFF.FIFFV_MNE_COORD_CTF_HEAD and \
+                    cand['to'] == FIFF.FIFFV_COORD_HEAD:
+                ctf_head_t = cand
+            elif cand['from'] == FIFF.FIFFV_MNE_COORD_CTF_DEVICE and \
+                    cand['to'] == FIFF.FIFFV_MNE_COORD_CTF_HEAD:
+                dev_ctf_t = cand
+        elif kind == FIFF.FIFF_EXPERIMENTER:
+            tag = read_tag(fid, pos)
+            experimenter = tag.data
+        elif kind == FIFF.FIFF_DESCRIPTION:
+            tag = read_tag(fid, pos)
+            description = tag.data
+        elif kind == FIFF.FIFF_PROJ_ID:
+            tag = read_tag(fid, pos)
+            proj_id = tag.data
+        elif kind == FIFF.FIFF_PROJ_NAME:
+            tag = read_tag(fid, pos)
+            proj_name = tag.data
+        elif kind == FIFF.FIFF_LINE_FREQ:
+            tag = read_tag(fid, pos)
+            line_freq = float(tag.data)
+        elif kind == FIFF.FIFF_CUSTOM_REF:
+            tag = read_tag(fid, pos)
+            custom_ref_applied = bool(tag.data)
+
+    # Check that we have everything we need
+    if nchan is None:
+        raise ValueError('Number of channels in not defined')
+
+    if sfreq is None:
+        raise ValueError('Sampling frequency is not defined')
+
+    if len(chs) == 0:
+        raise ValueError('Channel information not defined')
+
+    if len(chs) != nchan:
+        raise ValueError('Incorrect number of channel definitions found')
+
+    if dev_head_t is None or ctf_head_t is None:
+        hpi_result = dir_tree_find(meas_info, FIFF.FIFFB_HPI_RESULT)
+        if len(hpi_result) == 1:
+            hpi_result = hpi_result[0]
+            for k in range(hpi_result['nent']):
+                kind = hpi_result['directory'][k].kind
+                pos = hpi_result['directory'][k].pos
+                if kind == FIFF.FIFF_COORD_TRANS:
+                    tag = read_tag(fid, pos)
+                    cand = tag.data
+                    if (cand['from'] == FIFF.FIFFV_COORD_DEVICE and
+                            cand['to'] == FIFF.FIFFV_COORD_HEAD and
+                            dev_head_t is None):
+                        dev_head_t = cand
+                    elif (cand['from'] == FIFF.FIFFV_MNE_COORD_CTF_HEAD and
+                          cand['to'] == FIFF.FIFFV_COORD_HEAD and
+                          ctf_head_t is None):
+                        ctf_head_t = cand
+
+    #   Locate the Polhemus data
+    isotrak = dir_tree_find(meas_info, FIFF.FIFFB_ISOTRAK)
+    dig = None
+    if len(isotrak) == 0:
+        logger.info('Isotrak not found')
+    elif len(isotrak) > 1:
+        warn('Multiple Isotrak found')
+    else:
+        isotrak = isotrak[0]
+        dig = []
+        for k in range(isotrak['nent']):
+            kind = isotrak['directory'][k].kind
+            pos = isotrak['directory'][k].pos
+            if kind == FIFF.FIFF_DIG_POINT:
+                tag = read_tag(fid, pos)
+                dig.append(tag.data)
+                dig[-1]['coord_frame'] = FIFF.FIFFV_COORD_HEAD
+
+    #   Locate the acquisition information
+    acqpars = dir_tree_find(meas_info, FIFF.FIFFB_DACQ_PARS)
+    acq_pars = None
+    acq_stim = None
+    if len(acqpars) == 1:
+        acqpars = acqpars[0]
+        for k in range(acqpars['nent']):
+            kind = acqpars['directory'][k].kind
+            pos = acqpars['directory'][k].pos
+            if kind == FIFF.FIFF_DACQ_PARS:
+                tag = read_tag(fid, pos)
+                acq_pars = tag.data
+            elif kind == FIFF.FIFF_DACQ_STIM:
+                tag = read_tag(fid, pos)
+                acq_stim = tag.data
+
+    #   Load the SSP data
+    projs = _read_proj(fid, meas_info)
+
+    #   Load the CTF compensation data
+    comps = read_ctf_comp(fid, meas_info, chs)
+
+    #   Load the bad channel list
+    bads = read_bad_channels(fid, meas_info)
+
+    #
+    #   Put the data together
+    #
+    if tree['id'] is not None:
+        info = Info(file_id=tree['id'])
+    else:
+        info = Info(file_id=None)
+
+    #   Locate events list
+    events = dir_tree_find(meas_info, FIFF.FIFFB_EVENTS)
+    evs = list()
+    for event in events:
+        ev = dict()
+        for k in range(event['nent']):
+            kind = event['directory'][k].kind
+            pos = event['directory'][k].pos
+            if kind == FIFF.FIFF_EVENT_CHANNELS:
+                ev['channels'] = read_tag(fid, pos).data
+            elif kind == FIFF.FIFF_EVENT_LIST:
+                ev['list'] = read_tag(fid, pos).data
+        evs.append(ev)
+    info['events'] = evs
+
+    #   Locate HPI result
+    hpi_results = dir_tree_find(meas_info, FIFF.FIFFB_HPI_RESULT)
+    hrs = list()
+    for hpi_result in hpi_results:
+        hr = dict()
+        hr['dig_points'] = []
+        for k in range(hpi_result['nent']):
+            kind = hpi_result['directory'][k].kind
+            pos = hpi_result['directory'][k].pos
+            if kind == FIFF.FIFF_DIG_POINT:
+                hr['dig_points'].append(read_tag(fid, pos).data)
+            elif kind == FIFF.FIFF_HPI_DIGITIZATION_ORDER:
+                hr['order'] = read_tag(fid, pos).data
+            elif kind == FIFF.FIFF_HPI_COILS_USED:
+                hr['used'] = read_tag(fid, pos).data
+            elif kind == FIFF.FIFF_HPI_COIL_MOMENTS:
+                hr['moments'] = read_tag(fid, pos).data
+            elif kind == FIFF.FIFF_HPI_FIT_GOODNESS:
+                hr['goodness'] = read_tag(fid, pos).data
+            elif kind == FIFF.FIFF_HPI_FIT_GOOD_LIMIT:
+                hr['good_limit'] = float(read_tag(fid, pos).data)
+            elif kind == FIFF.FIFF_HPI_FIT_DIST_LIMIT:
+                hr['dist_limit'] = float(read_tag(fid, pos).data)
+            elif kind == FIFF.FIFF_HPI_FIT_ACCEPT:
+                hr['accept'] = int(read_tag(fid, pos).data)
+            elif kind == FIFF.FIFF_COORD_TRANS:
+                hr['coord_trans'] = read_tag(fid, pos).data
+        hrs.append(hr)
+    info['hpi_results'] = hrs
+
+    #   Locate HPI Measurement
+    hpi_meass = dir_tree_find(meas_info, FIFF.FIFFB_HPI_MEAS)
+    hms = list()
+    for hpi_meas in hpi_meass:
+        hm = dict()
+        for k in range(hpi_meas['nent']):
+            kind = hpi_meas['directory'][k].kind
+            pos = hpi_meas['directory'][k].pos
+            if kind == FIFF.FIFF_CREATOR:
+                hm['creator'] = text_type(read_tag(fid, pos).data)
+            elif kind == FIFF.FIFF_SFREQ:
+                hm['sfreq'] = float(read_tag(fid, pos).data)
+            elif kind == FIFF.FIFF_NCHAN:
+                hm['nchan'] = int(read_tag(fid, pos).data)
+            elif kind == FIFF.FIFF_NAVE:
+                hm['nave'] = int(read_tag(fid, pos).data)
+            elif kind == FIFF.FIFF_HPI_NCOIL:
+                hm['ncoil'] = int(read_tag(fid, pos).data)
+            elif kind == FIFF.FIFF_FIRST_SAMPLE:
+                hm['first_samp'] = int(read_tag(fid, pos).data)
+            elif kind == FIFF.FIFF_LAST_SAMPLE:
+                hm['last_samp'] = int(read_tag(fid, pos).data)
+        hpi_coils = dir_tree_find(hpi_meas, FIFF.FIFFB_HPI_COIL)
+        hcs = []
+        for hpi_coil in hpi_coils:
+            hc = dict()
+            for k in range(hpi_coil['nent']):
+                kind = hpi_coil['directory'][k].kind
+                pos = hpi_coil['directory'][k].pos
+                if kind == FIFF.FIFF_HPI_COIL_NO:
+                    hc['number'] = int(read_tag(fid, pos).data)
+                elif kind == FIFF.FIFF_EPOCH:
+                    hc['epoch'] = read_tag(fid, pos).data
+                elif kind == FIFF.FIFF_HPI_SLOPES:
+                    hc['slopes'] = read_tag(fid, pos).data
+                elif kind == FIFF.FIFF_HPI_CORR_COEFF:
+                    hc['corr_coeff'] = read_tag(fid, pos).data
+                elif kind == FIFF.FIFF_HPI_COIL_FREQ:
+                    hc['coil_freq'] = read_tag(fid, pos).data
+            hcs.append(hc)
+        hm['hpi_coils'] = hcs
+        hms.append(hm)
+    info['hpi_meas'] = hms
+
+    subject_info = dir_tree_find(meas_info, FIFF.FIFFB_SUBJECT)
+    si = None
+    if len(subject_info) == 1:
+        subject_info = subject_info[0]
+        si = dict()
+        for k in range(subject_info['nent']):
+            kind = subject_info['directory'][k].kind
+            pos = subject_info['directory'][k].pos
+            if kind == FIFF.FIFF_SUBJ_ID:
+                tag = read_tag(fid, pos)
+                si['id'] = int(tag.data)
+            elif kind == FIFF.FIFF_SUBJ_HIS_ID:
+                tag = read_tag(fid, pos)
+                si['his_id'] = text_type(tag.data)
+            elif kind == FIFF.FIFF_SUBJ_LAST_NAME:
+                tag = read_tag(fid, pos)
+                si['last_name'] = text_type(tag.data)
+            elif kind == FIFF.FIFF_SUBJ_FIRST_NAME:
+                tag = read_tag(fid, pos)
+                si['first_name'] = text_type(tag.data)
+            elif kind == FIFF.FIFF_SUBJ_MIDDLE_NAME:
+                tag = read_tag(fid, pos)
+                si['middle_name'] = text_type(tag.data)
+            elif kind == FIFF.FIFF_SUBJ_BIRTH_DAY:
+                tag = read_tag(fid, pos)
+                si['birthday'] = tag.data
+            elif kind == FIFF.FIFF_SUBJ_SEX:
+                tag = read_tag(fid, pos)
+                si['sex'] = int(tag.data)
+            elif kind == FIFF.FIFF_SUBJ_HAND:
+                tag = read_tag(fid, pos)
+                si['hand'] = int(tag.data)
+    info['subject_info'] = si
+
+    hpi_subsystem = dir_tree_find(meas_info, FIFF.FIFFB_HPI_SUBSYSTEM)
+    hs = None
+    if len(hpi_subsystem) == 1:
+        hpi_subsystem = hpi_subsystem[0]
+        hs = dict()
+        for k in range(hpi_subsystem['nent']):
+            kind = hpi_subsystem['directory'][k].kind
+            pos = hpi_subsystem['directory'][k].pos
+            if kind == FIFF.FIFF_HPI_NCOIL:
+                tag = read_tag(fid, pos)
+                hs['ncoil'] = int(tag.data)
+            elif kind == FIFF.FIFF_EVENT_CHANNEL:
+                tag = read_tag(fid, pos)
+                hs['event_channel'] = text_type(tag.data)
+            hpi_coils = dir_tree_find(hpi_subsystem, FIFF.FIFFB_HPI_COIL)
+            hc = []
+            for coil in hpi_coils:
+                this_coil = dict()
+                for j in range(coil['nent']):
+                    kind = coil['directory'][j].kind
+                    pos = coil['directory'][j].pos
+                    if kind == FIFF.FIFF_EVENT_BITS:
+                        tag = read_tag(fid, pos)
+                        this_coil['event_bits'] = np.array(tag.data)
+                hc.append(this_coil)
+            hs['hpi_coils'] = hc
+    info['hpi_subsystem'] = hs
+
+    #   Read processing history
+    _read_proc_history(fid, tree, info)
+
+    #  Make the most appropriate selection for the measurement id
+    if meas_info['parent_id'] is None:
+        if meas_info['id'] is None:
+            if meas['id'] is None:
+                if meas['parent_id'] is None:
+                    info['meas_id'] = info['file_id']
+                else:
+                    info['meas_id'] = meas['parent_id']
+            else:
+                info['meas_id'] = meas['id']
+        else:
+            info['meas_id'] = meas_info['id']
+    else:
+        info['meas_id'] = meas_info['parent_id']
+
+    info['experimenter'] = experimenter
+    info['description'] = description
+    info['proj_id'] = proj_id
+    info['proj_name'] = proj_name
+
+    if meas_date is None:
+        info['meas_date'] = [info['meas_id']['secs'], info['meas_id']['usecs']]
+    else:
+        info['meas_date'] = meas_date
+
+    info['nchan'] = nchan
+    info['sfreq'] = sfreq
+    info['highpass'] = highpass if highpass is not None else 0
+    info['lowpass'] = lowpass if lowpass is not None else info['sfreq'] / 2.0
+    info['line_freq'] = line_freq
+
+    #   Add the channel information and make a list of channel names
+    #   for convenience
+    info['chs'] = chs
+    info['ch_names'] = [ch['ch_name'] for ch in chs]
+
+    #
+    #  Add the coordinate transformations
+    #
+    info['dev_head_t'] = dev_head_t
+    info['ctf_head_t'] = ctf_head_t
+    info['dev_ctf_t'] = dev_ctf_t
+    if dev_head_t is not None and ctf_head_t is not None and dev_ctf_t is None:
+        from ..transforms import Transform
+        head_ctf_trans = linalg.inv(ctf_head_t['trans'])
+        dev_ctf_trans = np.dot(head_ctf_trans, info['dev_head_t']['trans'])
+        info['dev_ctf_t'] = Transform('meg', 'ctf_head', dev_ctf_trans)
+
+    #   All kinds of auxliary stuff
+    info['dig'] = dig
+    info['bads'] = bads
+    info['projs'] = projs
+    info['comps'] = comps
+    info['acq_pars'] = acq_pars
+    info['acq_stim'] = acq_stim
+    info['custom_ref_applied'] = custom_ref_applied
+    info._check_consistency()
+
+    return info, meas
+
+
+def write_meas_info(fid, info, data_type=None, reset_range=True):
+    """Write measurement info into a file id (from a fif file)
+
+    Parameters
+    ----------
+    fid : file
+        Open file descriptor.
+    info : instance of mne.io.meas_info.Info
+        The measurement info structure.
+    data_type : int
+        The data_type in case it is necessary. Should be 4 (FIFFT_FLOAT),
+        5 (FIFFT_DOUBLE), or 16 (FIFFT_DAU_PACK16) for
+        raw data.
+    reset_range : bool
+        If True, info['chs'][k]['range'] will be set to unity.
+
+    Notes
+    -----
+    Tags are written in a particular order for compatibility with maxfilter.
+    """
+    info._check_consistency()
+
+    # Measurement info
+    start_block(fid, FIFF.FIFFB_MEAS_INFO)
+
+    for event in info['events']:
+        start_block(fid, FIFF.FIFFB_EVENTS)
+        if event.get('channels') is not None:
+            write_int(fid, FIFF.FIFF_EVENT_CHANNELS, event['channels'])
+        if event.get('list') is not None:
+            write_int(fid, FIFF.FIFF_EVENT_LIST, event['list'])
+        end_block(fid, FIFF.FIFFB_EVENTS)
+
+    #   HPI Result
+    for hpi_result in info['hpi_results']:
+        start_block(fid, FIFF.FIFFB_HPI_RESULT)
+        for d in hpi_result['dig_points']:
+            write_dig_point(fid, d)
+        if 'order' in hpi_result:
+            write_int(fid, FIFF.FIFF_HPI_DIGITIZATION_ORDER,
+                      hpi_result['order'])
+        if 'used' in hpi_result:
+            write_int(fid, FIFF.FIFF_HPI_COILS_USED, hpi_result['used'])
+        if 'moments' in hpi_result:
+            write_float_matrix(fid, FIFF.FIFF_HPI_COIL_MOMENTS,
+                               hpi_result['moments'])
+        if 'goodness' in hpi_result:
+            write_float(fid, FIFF.FIFF_HPI_FIT_GOODNESS,
+                        hpi_result['goodness'])
+        if 'good_limit' in hpi_result:
+            write_float(fid, FIFF.FIFF_HPI_FIT_GOOD_LIMIT,
+                        hpi_result['good_limit'])
+        if 'dist_limit' in hpi_result:
+            write_float(fid, FIFF.FIFF_HPI_FIT_DIST_LIMIT,
+                        hpi_result['dist_limit'])
+        if 'accept' in hpi_result:
+            write_int(fid, FIFF.FIFF_HPI_FIT_ACCEPT, hpi_result['accept'])
+        if 'coord_trans' in hpi_result:
+            write_coord_trans(fid, hpi_result['coord_trans'])
+        end_block(fid, FIFF.FIFFB_HPI_RESULT)
+
+    #   HPI Measurement
+    for hpi_meas in info['hpi_meas']:
+        start_block(fid, FIFF.FIFFB_HPI_MEAS)
+        if hpi_meas.get('creator') is not None:
+            write_string(fid, FIFF.FIFF_CREATOR, hpi_meas['creator'])
+        if hpi_meas.get('sfreq') is not None:
+            write_float(fid, FIFF.FIFF_SFREQ, hpi_meas['sfreq'])
+        if hpi_meas.get('nchan') is not None:
+            write_int(fid, FIFF.FIFF_NCHAN, hpi_meas['nchan'])
+        if hpi_meas.get('nave') is not None:
+            write_int(fid, FIFF.FIFF_NAVE, hpi_meas['nave'])
+        if hpi_meas.get('ncoil') is not None:
+            write_int(fid, FIFF.FIFF_HPI_NCOIL, hpi_meas['ncoil'])
+        if hpi_meas.get('first_samp') is not None:
+            write_int(fid, FIFF.FIFF_FIRST_SAMPLE, hpi_meas['first_samp'])
+        if hpi_meas.get('last_samp') is not None:
+            write_int(fid, FIFF.FIFF_LAST_SAMPLE, hpi_meas['last_samp'])
+        for hpi_coil in hpi_meas['hpi_coils']:
+            start_block(fid, FIFF.FIFFB_HPI_COIL)
+            if hpi_coil.get('number') is not None:
+                write_int(fid, FIFF.FIFF_HPI_COIL_NO, hpi_coil['number'])
+            if hpi_coil.get('epoch') is not None:
+                write_float_matrix(fid, FIFF.FIFF_EPOCH, hpi_coil['epoch'])
+            if hpi_coil.get('slopes') is not None:
+                write_float(fid, FIFF.FIFF_HPI_SLOPES, hpi_coil['slopes'])
+            if hpi_coil.get('corr_coeff') is not None:
+                write_float(fid, FIFF.FIFF_HPI_CORR_COEFF,
+                            hpi_coil['corr_coeff'])
+            if hpi_coil.get('coil_freq') is not None:
+                write_float(fid, FIFF.FIFF_HPI_COIL_FREQ,
+                            hpi_coil['coil_freq'])
+            end_block(fid, FIFF.FIFFB_HPI_COIL)
+        end_block(fid, FIFF.FIFFB_HPI_MEAS)
+
+    #   Polhemus data
+    if info['dig'] is not None:
+        start_block(fid, FIFF.FIFFB_ISOTRAK)
+        for d in info['dig']:
+            write_dig_point(fid, d)
+
+        end_block(fid, FIFF.FIFFB_ISOTRAK)
+
+    #   megacq parameters
+    if info['acq_pars'] is not None or info['acq_stim'] is not None:
+        start_block(fid, FIFF.FIFFB_DACQ_PARS)
+        if info['acq_pars'] is not None:
+            write_string(fid, FIFF.FIFF_DACQ_PARS, info['acq_pars'])
+
+        if info['acq_stim'] is not None:
+            write_string(fid, FIFF.FIFF_DACQ_STIM, info['acq_stim'])
+
+        end_block(fid, FIFF.FIFFB_DACQ_PARS)
+
+    #   Coordinate transformations if the HPI result block was not there
+    if info['dev_head_t'] is not None:
+        write_coord_trans(fid, info['dev_head_t'])
+
+    if info['ctf_head_t'] is not None:
+        write_coord_trans(fid, info['ctf_head_t'])
+
+    if info['dev_ctf_t'] is not None:
+        write_coord_trans(fid, info['dev_ctf_t'])
+
+    #   Projectors
+    _write_proj(fid, info['projs'])
+
+    #   CTF compensation info
+    write_ctf_comp(fid, info['comps'])
+
+    #   Bad channels
+    if len(info['bads']) > 0:
+        start_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS)
+        write_name_list(fid, FIFF.FIFF_MNE_CH_NAME_LIST, info['bads'])
+        end_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS)
+
+    #   General
+    if info.get('experimenter') is not None:
+        write_string(fid, FIFF.FIFF_EXPERIMENTER, info['experimenter'])
+    if info.get('description') is not None:
+        write_string(fid, FIFF.FIFF_DESCRIPTION, info['description'])
+    if info.get('proj_id') is not None:
+        write_int(fid, FIFF.FIFF_PROJ_ID, info['proj_id'])
+    if info.get('proj_name') is not None:
+        write_string(fid, FIFF.FIFF_PROJ_NAME, info['proj_name'])
+    if info.get('meas_date') is not None:
+        write_int(fid, FIFF.FIFF_MEAS_DATE, info['meas_date'])
+    write_int(fid, FIFF.FIFF_NCHAN, info['nchan'])
+    write_float(fid, FIFF.FIFF_SFREQ, info['sfreq'])
+    write_float(fid, FIFF.FIFF_LOWPASS, info['lowpass'])
+    write_float(fid, FIFF.FIFF_HIGHPASS, info['highpass'])
+    if info.get('line_freq') is not None:
+        write_float(fid, FIFF.FIFF_LINE_FREQ, info['line_freq'])
+    if data_type is not None:
+        write_int(fid, FIFF.FIFF_DATA_PACK, data_type)
+    if info.get('custom_ref_applied'):
+        write_int(fid, FIFF.FIFF_CUSTOM_REF, info['custom_ref_applied'])
+
+    #  Channel information
+    for k, c in enumerate(info['chs']):
+        #   Scan numbers may have been messed up
+        c = deepcopy(c)
+        c['scanno'] = k + 1
+        # for float/double, the "range" param is unnecessary
+        if reset_range is True:
+            c['range'] = 1.0
+        write_ch_info(fid, c)
+
+    # Subject information
+    if info.get('subject_info') is not None:
+        start_block(fid, FIFF.FIFFB_SUBJECT)
+        si = info['subject_info']
+        if si.get('id') is not None:
+            write_int(fid, FIFF.FIFF_SUBJ_ID, si['id'])
+        if si.get('his_id') is not None:
+            write_string(fid, FIFF.FIFF_SUBJ_HIS_ID, si['his_id'])
+        if si.get('last_name') is not None:
+            write_string(fid, FIFF.FIFF_SUBJ_LAST_NAME, si['last_name'])
+        if si.get('first_name') is not None:
+            write_string(fid, FIFF.FIFF_SUBJ_FIRST_NAME, si['first_name'])
+        if si.get('middle_name') is not None:
+            write_string(fid, FIFF.FIFF_SUBJ_MIDDLE_NAME, si['middle_name'])
+        if si.get('birthday') is not None:
+            write_julian(fid, FIFF.FIFF_SUBJ_BIRTH_DAY, si['birthday'])
+        if si.get('sex') is not None:
+            write_int(fid, FIFF.FIFF_SUBJ_SEX, si['sex'])
+        if si.get('hand') is not None:
+            write_int(fid, FIFF.FIFF_SUBJ_HAND, si['hand'])
+        end_block(fid, FIFF.FIFFB_SUBJECT)
+
+    if info.get('hpi_subsystem') is not None:
+        hs = info['hpi_subsystem']
+        start_block(fid, FIFF.FIFFB_HPI_SUBSYSTEM)
+        if hs.get('ncoil') is not None:
+            write_int(fid, FIFF.FIFF_HPI_NCOIL, hs['ncoil'])
+        if hs.get('event_channel') is not None:
+            write_string(fid, FIFF.FIFF_EVENT_CHANNEL, hs['event_channel'])
+        if hs.get('hpi_coils') is not None:
+            for coil in hs['hpi_coils']:
+                start_block(fid, FIFF.FIFFB_HPI_COIL)
+                if coil.get('event_bits') is not None:
+                    write_int(fid, FIFF.FIFF_EVENT_BITS,
+                              coil['event_bits'])
+                end_block(fid, FIFF.FIFFB_HPI_COIL)
+        end_block(fid, FIFF.FIFFB_HPI_SUBSYSTEM)
+
+    end_block(fid, FIFF.FIFFB_MEAS_INFO)
+
+    #   Processing history
+    _write_proc_history(fid, info)
+
+
+def write_info(fname, info, data_type=None, reset_range=True):
+    """Write measurement info in fif file.
+
+    Parameters
+    ----------
+    fname : str
+        The name of the file. Should end by -info.fif.
+    info : instance of mne.io.meas_info.Info
+        The measurement info structure
+    data_type : int
+        The data_type in case it is necessary. Should be 4 (FIFFT_FLOAT),
+        5 (FIFFT_DOUBLE), or 16 (FIFFT_DAU_PACK16) for
+        raw data.
+    reset_range : bool
+        If True, info['chs'][k]['range'] will be set to unity.
+    """
+    fid = start_file(fname)
+    start_block(fid, FIFF.FIFFB_MEAS)
+    write_meas_info(fid, info, data_type, reset_range)
+    end_block(fid, FIFF.FIFFB_MEAS)
+    end_file(fid)
+
+
+def _is_equal_dict(dicts):
+    """Aux function"""
+    tests = zip(*[d.items() for d in dicts])
+    is_equal = []
+    for d in tests:
+        k0, v0 = d[0]
+        is_equal.append(all(np.all(k == k0) and
+                        np.all(v == v0) for k, v in d))
+    return all(is_equal)
+
+
+ at verbose
+def _merge_dict_values(dicts, key, verbose=None):
+    """Merge things together
+
+    Fork for {'dict', 'list', 'array', 'other'}
+    and consider cases where one or all are of the same type.
+    """
+    values = [d[key] for d in dicts]
+    msg = ("Don't know how to merge '%s'. Make sure values are "
+           "compatible." % key)
+
+    def _flatten(lists):
+        return [item for sublist in lists for item in sublist]
+
+    def _check_isinstance(values, kind, func):
+        return func([isinstance(v, kind) for v in values])
+
+    def _where_isinstance(values, kind):
+        """Aux function"""
+        return np.where([isinstance(v, type) for v in values])[0]
+
+    # list
+    if _check_isinstance(values, list, all):
+        lists = (d[key] for d in dicts)
+        return (_uniquify_projs(_flatten(lists)) if key == 'projs'
+                else _flatten(lists))
+    elif _check_isinstance(values, list, any):
+        idx = _where_isinstance(values, list)
+        if len(idx) == 1:
+            return values[int(idx)]
+        elif len(idx) > 1:
+            lists = (d[key] for d in dicts if isinstance(d[key], list))
+            return _flatten(lists)
+    # dict
+    elif _check_isinstance(values, dict, all):
+        is_qual = _is_equal_dict(values)
+        if is_qual:
+            return values[0]
+        else:
+            RuntimeError(msg)
+    elif _check_isinstance(values, dict, any):
+        idx = _where_isinstance(values, dict)
+        if len(idx) == 1:
+            return values[int(idx)]
+        elif len(idx) > 1:
+            raise RuntimeError(msg)
+    # ndarray
+    elif _check_isinstance(values, np.ndarray, all):
+        is_qual = all(np.all(values[0] == x) for x in values[1:])
+        if is_qual:
+            return values[0]
+        elif key == 'meas_date':
+            logger.info('Found multiple entries for %s. '
+                        'Setting value to `None`' % key)
+            return None
+        else:
+            raise RuntimeError(msg)
+    elif _check_isinstance(values, np.ndarray, any):
+        idx = _where_isinstance(values, np.ndarray)
+        if len(idx) == 1:
+            return values[int(idx)]
+        elif len(idx) > 1:
+            raise RuntimeError(msg)
+    # other
+    else:
+        unique_values = set(values)
+        if len(unique_values) == 1:
+            return list(values)[0]
+        elif isinstance(list(unique_values)[0], BytesIO):
+            logger.info('Found multiple StringIO instances. '
+                        'Setting value to `None`')
+            return None
+        elif isinstance(list(unique_values)[0], string_types):
+            logger.info('Found multiple filenames. '
+                        'Setting value to `None`')
+            return None
+        else:
+            raise RuntimeError(msg)
+
+
+ at verbose
+def _merge_info(infos, verbose=None):
+    """Merge multiple measurement info dictionaries.
+
+     - Fields that are present in only one info object will be used in the
+       merged info.
+     - Fields that are present in multiple info objects and are the same
+       will be used in the merged info.
+     - Fields that are present in multiple info objects and are different
+       will result in a None value in the merged info.
+     - Channels will be concatenated. If multiple info objects contain
+       channels with the same name, an exception is raised.
+
+    Parameters
+    ----------
+    infos | list of instance of Info
+        Info objects to merge into one info object.
+    verbose : bool, str, int, or NonIe
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    info : instance of Info
+        The merged info object.
+    """
+    for info in infos:
+        info._check_consistency()
+    info = Info()
+    ch_names = _merge_dict_values(infos, 'ch_names')
+    duplicates = set([ch for ch in ch_names if ch_names.count(ch) > 1])
+    if len(duplicates) > 0:
+        msg = ("The following channels are present in more than one input "
+               "measurement info objects: %s" % list(duplicates))
+        raise ValueError(msg)
+    info['nchan'] = len(ch_names)
+    info['ch_names'] = ch_names
+    info['chs'] = []
+    for this_info in infos:
+        info['chs'].extend(this_info['chs'])
+
+    transforms = ['ctf_head_t', 'dev_head_t', 'dev_ctf_t']
+    for trans_name in transforms:
+        trans = [i[trans_name] for i in infos if i[trans_name]]
+        if len(trans) == 0:
+            info[trans_name] = None
+        elif len(trans) == 1:
+            info[trans_name] = trans[0]
+        elif all(np.all(trans[0]['trans'] == x['trans']) and
+                 trans[0]['from'] == x['from'] and
+                 trans[0]['to'] == x['to']
+                 for x in trans[1:]):
+            info[trans_name] = trans[0]
+        else:
+            msg = ("Measurement infos provide mutually inconsistent %s" %
+                   trans_name)
+            raise ValueError(msg)
+    other_fields = ['acq_pars', 'acq_stim', 'bads', 'buffer_size_sec',
+                    'comps', 'custom_ref_applied', 'description', 'dig',
+                    'experimenter', 'file_id', 'filename', 'highpass',
+                    'hpi_results', 'hpi_meas', 'hpi_subsystem', 'events',
+                    'line_freq', 'lowpass', 'meas_date', 'meas_id',
+                    'proj_id', 'proj_name', 'projs', 'sfreq',
+                    'subject_info', 'sfreq']
+
+    for k in other_fields:
+        info[k] = _merge_dict_values(infos, k)
+    info._check_consistency()
+    return info
+
+
+def create_info(ch_names, sfreq, ch_types=None, montage=None):
+    """Create a basic Info instance suitable for use with create_raw
+
+    Parameters
+    ----------
+    ch_names : list of str | int
+        Channel names. If an int, a list of channel names will be created
+        from range(ch_names)
+    sfreq : float
+        Sample rate of the data.
+    ch_types : list of str | str
+        Channel types. If None, data are assumed to be misc.
+        Currently supported fields are "mag", "grad", "eeg", and "misc".
+        If str, then all channels are assumed to be of the same type.
+    montage : None | str | Montage | DigMontage | list
+        A montage containing channel positions. If str or Montage is
+        specified, the channel info will be updated with the channel
+        positions. Default is None. If DigMontage is specified, the
+        digitizer information will be updated. A list of unique montages,
+        can be specifed and applied to the info. See also the documentation of
+        :func:`mne.channels.read_montage` for more information.
+
+    Notes
+    -----
+    The info dictionary will be sparsely populated to enable functionality
+    within the rest of the package. Advanced functionality such as source
+    localization can only be obtained through substantial, proper
+    modifications of the info structure (not recommended).
+
+    Note that the MEG device-to-head transform ``info['dev_head_t']`` will
+    be initialized to the identity transform.
+    """
+    if isinstance(ch_names, int):
+        ch_names = list(np.arange(ch_names).astype(str))
+    if not isinstance(ch_names, (list, tuple)):
+        raise TypeError('ch_names must be a list, tuple, or int')
+    sfreq = float(sfreq)
+    if sfreq <= 0:
+        raise ValueError('sfreq must be positive')
+    nchan = len(ch_names)
+    if ch_types is None:
+        ch_types = ['misc'] * nchan
+    if isinstance(ch_types, string_types):
+        ch_types = [ch_types] * nchan
+    if len(ch_types) != nchan:
+        raise ValueError('ch_types and ch_names must be the same length')
+    info = _empty_info()
+    info['meas_date'] = np.array([0, 0], np.int32)
+    info['sfreq'] = sfreq
+    info['ch_names'] = ch_names
+    info['nchan'] = nchan
+    loc = np.concatenate((np.zeros(3), np.eye(3).ravel())).astype(np.float32)
+    for ci, (name, kind) in enumerate(zip(ch_names, ch_types)):
+        if not isinstance(name, string_types):
+            raise TypeError('each entry in ch_names must be a string')
+        if not isinstance(kind, string_types):
+            raise TypeError('each entry in ch_types must be a string')
+        if kind not in _kind_dict:
+            raise KeyError('kind must be one of %s, not %s'
+                           % (list(_kind_dict.keys()), kind))
+        kind = _kind_dict[kind]
+        chan_info = dict(loc=loc, unit_mul=0, range=1., cal=1.,
+                         kind=kind[0], coil_type=kind[1],
+                         unit=kind[2], coord_frame=FIFF.FIFFV_COORD_UNKNOWN,
+                         ch_name=name, scanno=ci + 1, logno=ci + 1)
+        info['chs'].append(chan_info)
+    if montage is not None:
+        from ..channels.montage import (Montage, DigMontage, _set_montage,
+                                        read_montage)
+        if not isinstance(montage, list):
+            montage = [montage]
+        for montage_ in montage:
+            if isinstance(montage_, (Montage, DigMontage)):
+                _set_montage(info, montage_)
+            elif isinstance(montage_, string_types):
+                montage_ = read_montage(montage_)
+                _set_montage(info, montage_)
+            else:
+                raise TypeError('Montage must be an instance of Montage, '
+                                'DigMontage, a list of montages, or filepath, '
+                                'not %s.' % type(montage))
+    return info
+
+
+RAW_INFO_FIELDS = (
+    'acq_pars', 'acq_stim', 'bads', 'buffer_size_sec', 'ch_names', 'chs',
+    'comps', 'ctf_head_t', 'custom_ref_applied', 'description', 'dev_ctf_t',
+    'dev_head_t', 'dig', 'experimenter', 'events',
+    'file_id', 'filename', 'highpass', 'hpi_meas', 'hpi_results',
+    'hpi_subsystem', 'line_freq', 'lowpass', 'meas_date', 'meas_id', 'nchan',
+    'proj_id', 'proj_name', 'projs', 'sfreq', 'subject_info',
+)
+
+
+def _empty_info():
+    """Create an empty info dictionary"""
+    from ..transforms import Transform
+    _none_keys = (
+        'acq_pars', 'acq_stim', 'buffer_size_sec', 'ctf_head_t', 'description',
+        'dev_ctf_t', 'dig', 'experimenter',
+        'file_id', 'filename', 'highpass', 'hpi_subsystem', 'line_freq',
+        'lowpass', 'meas_date', 'meas_id', 'proj_id', 'proj_name',
+        'subject_info',
+    )
+    _list_keys = (
+        'bads', 'ch_names', 'chs', 'comps', 'events', 'hpi_meas',
+        'hpi_results', 'projs',
+    )
+    info = Info()
+    for k in _none_keys:
+        info[k] = None
+    for k in _list_keys:
+        info[k] = list()
+    info['custom_ref_applied'] = False
+    info['nchan'] = info['sfreq'] = 0
+    info['dev_head_t'] = Transform('meg', 'head', np.eye(4))
+    assert set(info.keys()) == set(RAW_INFO_FIELDS)
+    info._check_consistency()
+    return info
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/open.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/open.py
new file mode 100644
index 0000000..bcc1ce0
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/open.py
@@ -0,0 +1,251 @@
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+from ..externals.six import string_types
+import numpy as np
+import os.path as op
+from io import BytesIO
+
+from .tag import read_tag_info, read_tag, read_big, Tag
+from .tree import make_dir_tree, dir_tree_find
+from .constants import FIFF
+from ..utils import logger, verbose
+from ..externals import six
+from ..fixes import gzip_open
+
+
+def _fiff_get_fid(fname):
+    """Helper to open a FIF file with no additional parsing"""
+    if isinstance(fname, string_types):
+        if op.splitext(fname)[1].lower() == '.gz':
+            logger.debug('Using gzip')
+            fid = gzip_open(fname, "rb")  # Open in binary mode
+        else:
+            logger.debug('Using normal I/O')
+            fid = open(fname, "rb")  # Open in binary mode
+    else:
+        fid = fname
+        fid.seek(0)
+    return fid
+
+
+def _get_next_fname(fid, fname, tree):
+    """Auxiliary function to get the next filename in split files."""
+    nodes_list = dir_tree_find(tree, FIFF.FIFFB_REF)
+    next_fname = None
+    for nodes in nodes_list:
+        next_fname = None
+        for ent in nodes['directory']:
+            if ent.kind == FIFF.FIFF_REF_ROLE:
+                tag = read_tag(fid, ent.pos)
+                role = int(tag.data)
+                if role != FIFF.FIFFV_ROLE_NEXT_FILE:
+                    next_fname = None
+                    break
+            if ent.kind == FIFF.FIFF_REF_FILE_NAME:
+                tag = read_tag(fid, ent.pos)
+                next_fname = op.join(op.dirname(fname), tag.data)
+            if ent.kind == FIFF.FIFF_REF_FILE_NUM:
+                # Some files don't have the name, just the number. So
+                # we construct the name from the current name.
+                if next_fname is not None:
+                    continue
+                next_num = read_tag(fid, ent.pos).data
+                path, base = op.split(fname)
+                idx = base.find('.')
+                idx2 = base.rfind('-')
+                if idx2 < 0 and next_num == 1:
+                    # this is the first file, which may not be numbered
+                    next_fname = op.join(
+                        path, '%s-%d.%s' % (base[:idx], next_num,
+                                            base[idx + 1:]))
+                    continue
+                num_str = base[idx2 + 1:idx]
+                if not num_str.isdigit():
+                    continue
+                next_fname = op.join(path, '%s-%d.%s' % (base[:idx2],
+                                     next_num, base[idx + 1:]))
+        if next_fname is not None:
+            break
+    return next_fname
+
+
+ at verbose
+def fiff_open(fname, preload=False, verbose=None):
+    """Open a FIF file.
+
+    Parameters
+    ----------
+    fname : string | fid
+        Name of the fif file, or an opened file (will seek back to 0).
+    preload : bool
+        If True, all data from the file is read into a memory buffer. This
+        requires more memory, but can be faster for I/O operations that require
+        frequent seeks.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    fid : file
+        The file descriptor of the open file
+    tree : fif tree
+        The tree is a complex structure filled with dictionaries,
+        lists and tags.
+    directory : list
+        A list of tags.
+    """
+    fid = _fiff_get_fid(fname)
+    # do preloading of entire file
+    if preload:
+        # note that StringIO objects instantiated this way are read-only,
+        # but that's okay here since we are using mode "rb" anyway
+        fid_old = fid
+        fid = BytesIO(read_big(fid_old))
+        fid_old.close()
+
+    tag = read_tag_info(fid)
+
+    #   Check that this looks like a fif file
+    if tag.kind != FIFF.FIFF_FILE_ID:
+        raise ValueError('file does not start with a file id tag')
+
+    if tag.type != FIFF.FIFFT_ID_STRUCT:
+        raise ValueError('file does not start with a file id tag')
+
+    if tag.size != 20:
+        raise ValueError('file does not start with a file id tag')
+
+    tag = read_tag(fid)
+
+    if tag.kind != FIFF.FIFF_DIR_POINTER:
+        raise ValueError('file does not have a directory pointer')
+
+    #   Read or create the directory tree
+    logger.debug('    Creating tag directory for %s...' % fname)
+
+    dirpos = int(tag.data)
+    if dirpos > 0:
+        tag = read_tag(fid, dirpos)
+        directory = tag.data
+    else:
+        fid.seek(0, 0)
+        directory = list()
+        while tag.next >= 0:
+            pos = fid.tell()
+            tag = read_tag_info(fid)
+            if tag is None:
+                break  # HACK : to fix file ending with empty tag...
+            else:
+                tag.pos = pos
+                directory.append(tag)
+
+    tree, _ = make_dir_tree(fid, directory)
+
+    logger.debug('[done]')
+
+    #   Back to the beginning
+    fid.seek(0)
+
+    return fid, tree, directory
+
+
+def show_fiff(fname, indent='    ', read_limit=np.inf, max_str=30,
+              output=str, verbose=None):
+    """Show FIFF information
+
+    This function is similar to mne_show_fiff.
+
+    Parameters
+    ----------
+    fname : str
+        Filename to evaluate.
+    indent : str
+        How to indent the lines.
+    read_limit : int
+        Max number of bytes of data to read from a tag. Can be np.inf
+        to always read all data (helps test read completion).
+    max_str : int
+        Max number of characters of string representation to print for
+        each tag's data.
+    output : type
+        Either str or list. str is a convenience output for printing.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+    """
+    if output not in [list, str]:
+        raise ValueError('output must be list or str')
+    f, tree, directory = fiff_open(fname)
+    with f as fid:
+        out = _show_tree(fid, tree['children'][0], indent=indent, level=0,
+                         read_limit=read_limit, max_str=max_str)
+    if output == str:
+        out = '\n'.join(out)
+    return out
+
+
+def _find_type(value, fmts=['FIFF_'], exclude=['FIFF_UNIT']):
+    """Helper to find matching values"""
+    vals = [k for k, v in six.iteritems(FIFF)
+            if v == value and any(fmt in k for fmt in fmts) and
+            not any(exc in k for exc in exclude)]
+    return vals
+
+
+def _show_tree(fid, tree, indent, level, read_limit, max_str):
+    """Helper for showing FIFF"""
+    this_idt = indent * level
+    next_idt = indent * (level + 1)
+    # print block-level information
+    out = [this_idt + str(tree['block'][0]) + ' = ' +
+           '/'.join(_find_type(tree['block'], fmts=['FIFFB_']))]
+    if tree['directory'] is not None:
+        kinds = [ent.kind for ent in tree['directory']] + [-1]
+        sizes = [ent.size for ent in tree['directory']]
+        poss = [ent.pos for ent in tree['directory']]
+        counter = 0
+        good = True
+        for k, kn, size, pos in zip(kinds[:-1], kinds[1:], sizes, poss):
+            tag = Tag(k, size, 0, pos)
+            if read_limit is None or size <= read_limit:
+                try:
+                    tag = read_tag(fid, pos)
+                except Exception:
+                    good = False
+
+            if kn == k:
+                # don't print if the next item is the same type (count 'em)
+                counter += 1
+            else:
+                # find the tag type
+                this_type = _find_type(k, fmts=['FIFF_'])
+                # prepend a count if necessary
+                prepend = 'x' + str(counter + 1) + ': ' if counter > 0 else ''
+                postpend = ''
+                # print tag data nicely
+                if tag.data is not None:
+                    postpend = ' = ' + str(tag.data)[:max_str]
+                    if isinstance(tag.data, np.ndarray):
+                        if tag.data.size > 1:
+                            postpend += ' ... array size=' + str(tag.data.size)
+                    elif isinstance(tag.data, dict):
+                        postpend += ' ... dict len=' + str(len(tag.data))
+                    elif isinstance(tag.data, string_types):
+                        postpend += ' ... str len=' + str(len(tag.data))
+                    elif isinstance(tag.data, (list, tuple)):
+                        postpend += ' ... list len=' + str(len(tag.data))
+                    else:
+                        postpend += ' ... (unknown type)'
+                postpend = '>' * 20 + 'BAD' if not good else postpend
+                out += [next_idt + prepend + str(k) + ' = ' +
+                        '/'.join(this_type) + ' (' + str(size) + ')' +
+                        postpend]
+                counter = 0
+                good = True
+
+    # deal with children
+    for branch in tree['children']:
+        out += _show_tree(fid, branch, indent, level + 1, read_limit, max_str)
+    return out
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/pick.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/pick.py
new file mode 100644
index 0000000..027445f
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/pick.py
@@ -0,0 +1,623 @@
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+from copy import deepcopy
+import re
+
+import numpy as np
+
+from .constants import FIFF
+from ..utils import logger, verbose
+from ..externals.six import string_types
+
+
+def channel_type(info, idx):
+    """Get channel type
+
+    Parameters
+    ----------
+    info : dict
+        Measurement info
+    idx : int
+        Index of channel
+
+    Returns
+    -------
+    type : 'grad' | 'mag' | 'eeg' | 'stim' | 'eog' | 'emg' | 'ecg'
+           'ref_meg' | 'resp' | 'exci' | 'ias' | 'syst' | 'misc'
+           'seeg' | 'chpi'
+        Type of channel
+    """
+    kind = info['chs'][idx]['kind']
+    if kind == FIFF.FIFFV_MEG_CH:
+        if info['chs'][idx]['unit'] == FIFF.FIFF_UNIT_T_M:
+            return 'grad'
+        elif info['chs'][idx]['unit'] == FIFF.FIFF_UNIT_T:
+            return 'mag'
+    elif kind == FIFF.FIFFV_REF_MEG_CH:
+        return 'ref_meg'
+    elif kind == FIFF.FIFFV_EEG_CH:
+        return 'eeg'
+    elif kind == FIFF.FIFFV_STIM_CH:
+        return 'stim'
+    elif kind == FIFF.FIFFV_EOG_CH:
+        return 'eog'
+    elif kind == FIFF.FIFFV_EMG_CH:
+        return 'emg'
+    elif kind == FIFF.FIFFV_ECG_CH:
+        return 'ecg'
+    elif kind == FIFF.FIFFV_RESP_CH:
+        return 'resp'
+    elif kind == FIFF.FIFFV_MISC_CH:
+        return 'misc'
+    elif kind == FIFF.FIFFV_EXCI_CH:
+        return 'exci'
+    elif kind == FIFF.FIFFV_IAS_CH:
+        return 'ias'
+    elif kind == FIFF.FIFFV_SYST_CH:
+        return 'syst'
+    elif kind == FIFF.FIFFV_SEEG_CH:
+        return 'seeg'
+    elif kind in [FIFF.FIFFV_QUAT_0, FIFF.FIFFV_QUAT_1, FIFF.FIFFV_QUAT_2,
+                  FIFF.FIFFV_QUAT_3, FIFF.FIFFV_QUAT_4, FIFF.FIFFV_QUAT_5,
+                  FIFF.FIFFV_QUAT_6, FIFF.FIFFV_HPI_G, FIFF.FIFFV_HPI_ERR,
+                  FIFF.FIFFV_HPI_MOV]:
+        return 'chpi'  # channels relative to head position monitoring
+    raise Exception('Unknown channel type')
+
+
+def pick_channels(ch_names, include, exclude=[]):
+    """Pick channels by names
+
+    Returns the indices of the good channels in ch_names.
+
+    Parameters
+    ----------
+    ch_names : list of string
+        List of channels.
+    include : list of string
+        List of channels to include (if empty include all available).
+    exclude : list of string
+        List of channels to exclude (if empty do not exclude any channel).
+        Defaults to [].
+
+    See Also
+    --------
+    pick_channels_regexp, pick_types
+
+    Returns
+    -------
+    sel : array of int
+        Indices of good channels.
+    """
+    if len(np.unique(ch_names)) != len(ch_names):
+        raise RuntimeError('ch_names is not a unique list, picking is unsafe')
+    _check_excludes_includes(include)
+    _check_excludes_includes(exclude)
+    sel = []
+    for k, name in enumerate(ch_names):
+        if (len(include) == 0 or name in include) and name not in exclude:
+            sel.append(k)
+    sel = np.unique(sel)
+    np.sort(sel)
+    return sel
+
+
+def pick_channels_regexp(ch_names, regexp):
+    """Pick channels using regular expression
+
+    Returns the indices of the good channels in ch_names.
+
+    Parameters
+    ----------
+    ch_names : list of string
+        List of channels
+
+    regexp : string
+        The regular expression. See python standard module for regular
+        expressions.
+
+    Returns
+    -------
+    sel : array of int
+        Indices of good channels.
+
+    See Also
+    --------
+    pick_channels
+
+    Examples
+    --------
+    >>> pick_channels_regexp(['MEG 2331', 'MEG 2332', 'MEG 2333'], 'MEG ...1')
+    [0]
+    >>> pick_channels_regexp(['MEG 2331', 'MEG 2332', 'MEG 2333'], 'MEG *')
+    [0, 1, 2]
+    """
+    r = re.compile(regexp)
+    return [k for k, name in enumerate(ch_names) if r.match(name)]
+
+
+def pick_types(info, meg=True, eeg=False, stim=False, eog=False, ecg=False,
+               emg=False, ref_meg='auto', misc=False, resp=False, chpi=False,
+               exci=False, ias=False, syst=False, seeg=False,
+               include=[], exclude='bads', selection=None):
+    """Pick channels by type and names
+
+    Parameters
+    ----------
+    info : dict
+        The measurement info.
+    meg : bool or string
+        If True include all MEG channels. If False include None
+        If string it can be 'mag', 'grad', 'planar1' or 'planar2' to select
+        only magnetometers, all gradiometers, or a specific type of
+        gradiometer.
+    eeg : bool
+        If True include EEG channels.
+    stim : bool
+        If True include stimulus channels.
+    eog : bool
+        If True include EOG channels.
+    ecg : bool
+        If True include ECG channels.
+    emg : bool
+        If True include EMG channels.
+    ref_meg: bool | str
+        If True include CTF / 4D reference channels. If 'auto', the reference
+        channels are only included if compensations are present.
+    misc : bool
+        If True include miscellaneous analog channels.
+    resp : bool
+        If True include response-trigger channel. For some MEG systems this
+        is separate from the stim channel.
+    chpi : bool
+        If True include continuous HPI coil channels.
+    exci : bool
+        Flux excitation channel used to be a stimulus channel.
+    ias : bool
+        Internal Active Shielding data (maybe on Triux only).
+    syst : bool
+        System status channel information (on Triux systems only).
+    seeg : bool
+        Stereotactic EEG channels
+    include : list of string
+        List of additional channels to include. If empty do not include any.
+    exclude : list of string | str
+        List of channels to exclude. If 'bads' (default), exclude channels
+        in info['bads'].
+    selection : list of string
+        Restrict sensor channels (MEG, EEG) to this list of channel names.
+
+    Returns
+    -------
+    sel : array of int
+        Indices of good channels.
+    """
+    # NOTE: Changes to this function's signature should also be changed in
+    # PickChannelsMixin
+    from .meas_info import Info
+    if not isinstance(info, Info):
+        raise TypeError('info must be an instance of Info, not %s'
+                        % type(info))
+    info._check_consistency()
+    nchan = info['nchan']
+    pick = np.zeros(nchan, dtype=np.bool)
+
+    if exclude is None:
+        raise ValueError('exclude must be a list of strings or "bads"')
+    elif exclude == 'bads':
+        exclude = info.get('bads', [])
+    elif not isinstance(exclude, (list, tuple)):
+        raise ValueError('exclude must either be "bads" or a list of strings.'
+                         ' If only one channel is to be excluded, use '
+                         '[ch_name] instead of passing ch_name.')
+
+    if isinstance(ref_meg, string_types):
+        if ref_meg != 'auto':
+            raise ValueError('ref_meg has to be either a bool or \'auto\'')
+
+        ref_meg = ('comps' in info and info['comps'] is not None and
+                   len(info['comps']) > 0)
+
+    for k in range(nchan):
+        kind = info['chs'][k]['kind']
+        if kind == FIFF.FIFFV_MEG_CH:
+            if meg is True:
+                pick[k] = True
+            elif info['chs'][k]['unit'] == FIFF.FIFF_UNIT_T_M:
+                if meg == 'grad':
+                    pick[k] = True
+                elif meg == 'planar1' and info['ch_names'][k].endswith('2'):
+                    pick[k] = True
+                elif meg == 'planar2' and info['ch_names'][k].endswith('3'):
+                    pick[k] = True
+            elif (meg == 'mag' and
+                  info['chs'][k]['unit'] == FIFF.FIFF_UNIT_T):
+                pick[k] = True
+        elif kind == FIFF.FIFFV_EEG_CH and eeg:
+            pick[k] = True
+        elif kind == FIFF.FIFFV_STIM_CH and stim:
+            pick[k] = True
+        elif kind == FIFF.FIFFV_EOG_CH and eog:
+            pick[k] = True
+        elif kind == FIFF.FIFFV_ECG_CH and ecg:
+            pick[k] = True
+        elif kind == FIFF.FIFFV_EMG_CH and emg:
+            pick[k] = True
+        elif kind == FIFF.FIFFV_MISC_CH and misc:
+            pick[k] = True
+        elif kind == FIFF.FIFFV_REF_MEG_CH and ref_meg:
+            pick[k] = True
+        elif kind == FIFF.FIFFV_RESP_CH and resp:
+            pick[k] = True
+        elif kind == FIFF.FIFFV_SYST_CH and syst:
+            pick[k] = True
+        elif kind == FIFF.FIFFV_SEEG_CH and seeg:
+            pick[k] = True
+        elif kind == FIFF.FIFFV_IAS_CH and ias:
+            pick[k] = True
+        elif kind == FIFF.FIFFV_EXCI_CH and exci:
+            pick[k] = True
+        elif kind in [FIFF.FIFFV_QUAT_0, FIFF.FIFFV_QUAT_1, FIFF.FIFFV_QUAT_2,
+                      FIFF.FIFFV_QUAT_3, FIFF.FIFFV_QUAT_4, FIFF.FIFFV_QUAT_5,
+                      FIFF.FIFFV_QUAT_6, FIFF.FIFFV_HPI_G, FIFF.FIFFV_HPI_ERR,
+                      FIFF.FIFFV_HPI_MOV] and chpi:
+            pick[k] = True
+
+    # restrict channels to selection if provided
+    if selection is not None:
+        # the selection only restricts these types of channels
+        sel_kind = [FIFF.FIFFV_MEG_CH, FIFF.FIFFV_REF_MEG_CH,
+                    FIFF.FIFFV_EEG_CH]
+        for k in np.where(pick == True)[0]:  # noqa
+            if (info['chs'][k]['kind'] in sel_kind and
+                    info['ch_names'][k] not in selection):
+                pick[k] = False
+
+    myinclude = [info['ch_names'][k] for k in range(nchan) if pick[k]]
+    myinclude += include
+
+    if len(myinclude) == 0:
+        sel = []
+    else:
+        sel = pick_channels(info['ch_names'], myinclude, exclude)
+
+    return sel
+
+
+def pick_info(info, sel=[], copy=True):
+    """Restrict an info structure to a selection of channels
+
+    Parameters
+    ----------
+    info : dict
+        Info structure from evoked or raw data.
+    sel : list of int | None
+        Indices of channels to include.
+    copy : bool
+        If copy is False, info is modified inplace.
+
+    Returns
+    -------
+    res : dict
+        Info structure restricted to a selection of channels.
+    """
+    info._check_consistency()
+    if copy:
+        info = deepcopy(info)
+    if sel is None:
+        return info
+    elif len(sel) == 0:
+        raise ValueError('No channels match the selection.')
+
+    info['chs'] = [info['chs'][k] for k in sel]
+    info['ch_names'] = [info['ch_names'][k] for k in sel]
+    info['nchan'] = len(sel)
+    info['bads'] = [ch for ch in info['bads'] if ch in info['ch_names']]
+
+    comps = deepcopy(info['comps'])
+    for c in comps:
+        row_idx = [k for k, n in enumerate(c['data']['row_names'])
+                   if n in info['ch_names']]
+        row_names = [c['data']['row_names'][i] for i in row_idx]
+        rowcals = c['rowcals'][row_idx]
+        c['rowcals'] = rowcals
+        c['data']['nrow'] = len(row_names)
+        c['data']['row_names'] = row_names
+        c['data']['data'] = c['data']['data'][row_idx]
+    info['comps'] = comps
+
+    return info
+
+
+def _has_kit_refs(info, picks):
+    """Helper to determine if KIT ref channels are chosen
+
+    This is currently only used by make_forward_solution, which cannot
+    run when KIT reference channels are included.
+    """
+    for p in picks:
+        if info['chs'][p]['coil_type'] == FIFF.FIFFV_COIL_KIT_REF_MAG:
+            return True
+    return False
+
+
+def pick_channels_evoked(orig, include=[], exclude='bads'):
+    """Pick channels from evoked data
+
+    Parameters
+    ----------
+    orig : Evoked object
+        One evoked dataset.
+    include : list of string, (optional)
+        List of channels to include (if empty, include all available).
+    exclude : list of string | str
+        List of channels to exclude. If empty do not exclude any (default).
+        If 'bads', exclude channels in orig.info['bads']. Defaults to 'bads'.
+
+    Returns
+    -------
+    res : instance of Evoked
+        Evoked data restricted to selected channels. If include and
+        exclude are empty it returns orig without copy.
+    """
+    if len(include) == 0 and len(exclude) == 0:
+        return orig
+
+    exclude = _check_excludes_includes(exclude, info=orig.info,
+                                       allow_bads=True)
+    sel = pick_channels(orig.info['ch_names'], include=include,
+                        exclude=exclude)
+
+    if len(sel) == 0:
+        raise ValueError('Warning : No channels match the selection.')
+
+    res = deepcopy(orig)
+    #
+    #   Modify the measurement info
+    #
+    res.info = pick_info(res.info, sel)
+    #
+    #   Create the reduced data set
+    #
+    res.data = res.data[sel, :]
+
+    return res
+
+
+ at verbose
+def pick_channels_forward(orig, include=[], exclude=[], verbose=None):
+    """Pick channels from forward operator
+
+    Parameters
+    ----------
+    orig : dict
+        A forward solution.
+    include : list of string
+        List of channels to include (if empty, include all available).
+        Defaults to [].
+    exclude : list of string | 'bads'
+        Channels to exclude (if empty, do not exclude any). Defaults to [].
+        If 'bads', then exclude bad channels in orig.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    res : dict
+        Forward solution restricted to selected channels. If include and
+        exclude are empty it returns orig without copy.
+    """
+    orig['info']._check_consistency()
+    if len(include) == 0 and len(exclude) == 0:
+        return orig
+    exclude = _check_excludes_includes(exclude,
+                                       info=orig['info'], allow_bads=True)
+
+    # Allow for possibility of channel ordering in forward solution being
+    # different from that of the M/EEG file it is based on.
+    sel_sol = pick_channels(orig['sol']['row_names'], include=include,
+                            exclude=exclude)
+    sel_info = pick_channels(orig['info']['ch_names'], include=include,
+                             exclude=exclude)
+
+    fwd = deepcopy(orig)
+
+    # Check that forward solution and original data file agree on #channels
+    if len(sel_sol) != len(sel_info):
+        raise ValueError('Forward solution and functional data appear to '
+                         'have different channel names, please check.')
+
+    #   Do we have something?
+    nuse = len(sel_sol)
+    if nuse == 0:
+        raise ValueError('Nothing remains after picking')
+
+    logger.info('    %d out of %d channels remain after picking'
+                % (nuse, fwd['nchan']))
+
+    #   Pick the correct rows of the forward operator using sel_sol
+    fwd['sol']['data'] = fwd['sol']['data'][sel_sol, :]
+    fwd['_orig_sol'] = fwd['_orig_sol'][sel_sol, :]
+    fwd['sol']['nrow'] = nuse
+
+    ch_names = [fwd['sol']['row_names'][k] for k in sel_sol]
+    fwd['nchan'] = nuse
+    fwd['sol']['row_names'] = ch_names
+
+    # Pick the appropriate channel names from the info-dict using sel_info
+    fwd['info']['ch_names'] = [fwd['info']['ch_names'][k] for k in sel_info]
+    fwd['info']['chs'] = [fwd['info']['chs'][k] for k in sel_info]
+    fwd['info']['nchan'] = nuse
+    fwd['info']['bads'] = [b for b in fwd['info']['bads'] if b in ch_names]
+
+    if fwd['sol_grad'] is not None:
+        fwd['sol_grad']['data'] = fwd['sol_grad']['data'][sel_sol, :]
+        fwd['_orig_sol_grad'] = fwd['_orig_sol_grad'][sel_sol, :]
+        fwd['sol_grad']['nrow'] = nuse
+        fwd['sol_grad']['row_names'] = [fwd['sol_grad']['row_names'][k]
+                                        for k in sel_sol]
+
+    return fwd
+
+
+def pick_types_forward(orig, meg=True, eeg=False, ref_meg=True, seeg=False,
+                       include=[], exclude=[]):
+    """Pick by channel type and names from a forward operator
+
+    Parameters
+    ----------
+    orig : dict
+        A forward solution
+    meg : bool or string
+        If True include all MEG channels. If False include None
+        If string it can be 'mag' or 'grad' to select only gradiometers
+        or magnetometers.
+    eeg : bool
+        If True include EEG channels
+    ref_meg : bool
+        If True include CTF / 4D reference channels
+    seeg : bool
+        If True include stereotactic EEG channels
+    include : list of string
+        List of additional channels to include. If empty do not include any.
+    exclude : list of string | str
+        List of channels to exclude. If empty do not exclude any (default).
+        If 'bads', exclude channels in orig['info']['bads'].
+
+    Returns
+    -------
+    res : dict
+        Forward solution restricted to selected channel types.
+    """
+    info = orig['info']
+    sel = pick_types(info, meg, eeg, ref_meg=ref_meg, seeg=seeg,
+                     include=include, exclude=exclude)
+    if len(sel) == 0:
+        raise ValueError('No valid channels found')
+    include_ch_names = [info['ch_names'][k] for k in sel]
+    return pick_channels_forward(orig, include_ch_names)
+
+
+def channel_indices_by_type(info):
+    """Get indices of channels by type
+    """
+    idx = dict(grad=[], mag=[], eeg=[], seeg=[], eog=[], ecg=[], stim=[],
+               emg=[], ref_meg=[], misc=[], resp=[], chpi=[], exci=[], ias=[],
+               syst=[])
+    for k, ch in enumerate(info['chs']):
+        for key in idx.keys():
+            if channel_type(info, k) == key:
+                idx[key].append(k)
+
+    return idx
+
+
+def pick_channels_cov(orig, include=[], exclude='bads'):
+    """Pick channels from covariance matrix
+
+    Parameters
+    ----------
+    orig : Covariance
+        A covariance.
+    include : list of string, (optional)
+        List of channels to include (if empty, include all available).
+    exclude : list of string, (optional) | 'bads'
+        Channels to exclude (if empty, do not exclude any). Defaults to 'bads'.
+
+    Returns
+    -------
+    res : dict
+        Covariance solution restricted to selected channels.
+    """
+    exclude = orig['bads'] if exclude == 'bads' else exclude
+    sel = pick_channels(orig['names'], include=include, exclude=exclude)
+    res = deepcopy(orig)
+    res['dim'] = len(sel)
+    if not res['diag']:
+        res['data'] = orig['data'][sel][:, sel]
+    else:
+        res['data'] = orig['data'][sel]
+    res['names'] = [orig['names'][k] for k in sel]
+    res['bads'] = [name for name in orig['bads'] if name in res['names']]
+    res['eig'] = None
+    res['eigvec'] = None
+    return res
+
+
+def _picks_by_type(info, meg_combined=False, ref_meg=False):
+    """Get data channel indices as separate list of tuples
+    Parameters
+    ----------
+    info : instance of mne.measuerment_info.Info
+        The info.
+    meg_combined : bool
+        Whether to return combined picks for grad and mag.
+    ref_meg : bool
+        If True include CTF / 4D reference channels
+
+    Returns
+    -------
+    picks_list : list of tuples
+        The list of tuples of picks and the type string.
+    """
+    from ..channels.channels import _contains_ch_type
+    picks_list = []
+    has_mag, has_grad, has_eeg = [_contains_ch_type(info, k)
+                                  for k in ('mag', 'grad', 'eeg')]
+    if has_mag and (meg_combined is not True or not has_grad):
+        picks_list.append(
+            ('mag', pick_types(info, meg='mag', eeg=False, stim=False,
+             ref_meg=ref_meg))
+        )
+    if has_grad and (meg_combined is not True or not has_mag):
+        picks_list.append(
+            ('grad', pick_types(info, meg='grad', eeg=False, stim=False,
+             ref_meg=ref_meg))
+        )
+    if has_mag and has_grad and meg_combined is True:
+        picks_list.append(
+            ('meg', pick_types(info, meg=True, eeg=False, stim=False,
+             ref_meg=ref_meg))
+        )
+    if has_eeg:
+        picks_list.append(
+            ('eeg', pick_types(info, meg=False, eeg=True, stim=False,
+             ref_meg=ref_meg))
+        )
+    return picks_list
+
+
+def _check_excludes_includes(chs, info=None, allow_bads=False):
+    """Ensure that inputs to exclude/include are list-like or "bads".
+
+    Parameters
+    ----------
+    chs : any input, should be list, tuple, string
+        The channels passed to include or exclude.
+    allow_bads : bool
+        Allow the user to supply "bads" as a string for auto exclusion.
+
+    Returns
+    -------
+    chs : list
+        Channels to be excluded/excluded. If allow_bads, and chs=="bads",
+        this will be the bad channels found in 'info'.
+    """
+    from .meas_info import Info
+    if not isinstance(chs, (list, tuple, np.ndarray)):
+        if allow_bads is True:
+            if not isinstance(info, Info):
+                raise ValueError('Supply an info object if allow_bads is true')
+            elif chs != 'bads':
+                raise ValueError('If chs is a string, it must be "bads"')
+            else:
+                chs = info['bads']
+        else:
+            raise ValueError(
+                'include/exclude must be list, tuple, ndarray, or "bads". ' +
+                'You provided type {0}'.format(type(chs)))
+    return chs
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/proc_history.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/proc_history.py
new file mode 100644
index 0000000..50d065f
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/proc_history.py
@@ -0,0 +1,290 @@
+# -*- coding: utf-8 -*-
+# Authors: Denis A. Engemann <denis.engemann at gmail.com>
+#          Eric Larson <larson.eric.d at gmail.com>
+# License: Simplified BSD
+
+import numpy as np
+from scipy.sparse import csc_matrix
+import warnings
+
+from .open import read_tag
+from .tree import dir_tree_find
+from .write import (start_block, end_block, write_int, write_float,
+                    write_string, write_float_matrix, write_int_matrix,
+                    write_float_sparse_rcs, write_id)
+from .constants import FIFF
+from ..externals.six import text_type
+
+
+_proc_keys = ['parent_file_id', 'block_id', 'parent_block_id',
+              'date', 'experimenter', 'creator']
+_proc_ids = [FIFF.FIFF_PARENT_FILE_ID,
+             FIFF.FIFF_BLOCK_ID,
+             FIFF.FIFF_PARENT_BLOCK_ID,
+             FIFF.FIFF_MEAS_DATE,
+             FIFF.FIFF_EXPERIMENTER,
+             FIFF.FIFF_CREATOR]
+_proc_writers = [write_id, write_id, write_id,
+                 write_int, write_string, write_string]
+_proc_casters = [dict, dict, dict,
+                 np.array, text_type, text_type]
+
+
+def _read_proc_history(fid, tree, info):
+    """Read processing history from fiff file
+
+    This function reads the SSS info, the CTC correction and the
+    calibaraions from the SSS processing logs inside af a raw file
+    (C.f. Maxfilter v2.2 manual (October 2010), page 21):
+
+    104 = {                 900 = proc. history
+      104 = {               901 = proc. record
+        103 = block ID
+        204 = date
+        212 = scientist
+        113 = creator program
+        104 = {             502 = SSS info
+          264 = SSS task
+          263 = SSS coord frame
+          265 = SSS origin
+          266 = SSS ins.order
+          267 = SSS outs.order
+          268 = SSS nr chnls
+          269 = SSS components
+          278 = SSS nfree
+          243 = HPI g limit    0.98
+          244 = HPI dist limit 0.005
+        105 = }             502 = SSS info
+        104 = {             504 = MaxST info
+          264 = SSS task
+          272 = SSST subspace correlation
+          279 = SSST buffer length
+        105 = }
+        104 = {             501 = CTC correction
+          103 = block ID
+          204 = date
+          113 = creator program
+          800 = CTC matrix
+          3417 = proj item chs
+        105 = }             501 = CTC correction
+        104 = {             503 = SSS finecalib.
+          270 = SSS cal chnls
+          271 = SSS cal coeff
+        105 = }             503 = SSS finecalib.
+      105 = }               901 = proc. record
+    105 = }                 900 = proc. history
+    """
+    proc_history = dir_tree_find(tree, FIFF.FIFFB_PROCESSING_HISTORY)
+    out = list()
+    if len(proc_history) > 0:
+        proc_history = proc_history[0]
+        proc_records = dir_tree_find(proc_history,
+                                     FIFF.FIFFB_PROCESSING_RECORD)
+        for proc_record in proc_records:
+            record = dict()
+            for i_ent in range(proc_record['nent']):
+                kind = proc_record['directory'][i_ent].kind
+                pos = proc_record['directory'][i_ent].pos
+                for key, id_, cast in zip(_proc_keys, _proc_ids,
+                                          _proc_casters):
+                    if kind == id_:
+                        tag = read_tag(fid, pos)
+                        record[key] = cast(tag.data)
+                        break
+                else:
+                    warnings.warn('Unknown processing history item %s' % kind)
+            record['max_info'] = _read_maxfilter_record(fid, proc_record)
+            smartshields = dir_tree_find(proc_record,
+                                         FIFF.FIFFB_SMARTSHIELD)
+            if len(smartshields) > 0:
+                # XXX should eventually populate this
+                ss = [dict() for _ in range(len(smartshields))]
+                record['smartshield'] = ss
+            if len(record['max_info']) > 0:
+                out.append(record)
+        if len(proc_records) > 0:
+            info['proc_history'] = out
+
+
+def _write_proc_history(fid, info):
+    """Write processing history to file"""
+    if 'proc_history' not in info:
+        return
+    if len(info['proc_history']) > 0:
+        start_block(fid, FIFF.FIFFB_PROCESSING_HISTORY)
+        for record in info['proc_history']:
+            start_block(fid, FIFF.FIFFB_PROCESSING_RECORD)
+            for key, id_, writer in zip(_proc_keys, _proc_ids, _proc_writers):
+                if key in record:
+                    writer(fid, id_, record[key])
+            _write_maxfilter_record(fid, record['max_info'])
+            if 'smartshield' in record:
+                for ss in record['smartshield']:
+                    start_block(fid, FIFF.FIFFB_SMARTSHIELD)
+                    # XXX should eventually populate this
+                    end_block(fid, FIFF.FIFFB_SMARTSHIELD)
+            end_block(fid, FIFF.FIFFB_PROCESSING_RECORD)
+        end_block(fid, FIFF.FIFFB_PROCESSING_HISTORY)
+
+
+_sss_info_keys = ('job', 'frame', 'origin', 'in_order',
+                  'out_order', 'nchan', 'components', 'nfree',
+                  'hpi_g_limit', 'hpi_dist_limit')
+_sss_info_ids = (FIFF.FIFF_SSS_JOB,
+                 FIFF.FIFF_SSS_FRAME,
+                 FIFF.FIFF_SSS_ORIGIN,
+                 FIFF.FIFF_SSS_ORD_IN,
+                 FIFF.FIFF_SSS_ORD_OUT,
+                 FIFF.FIFF_SSS_NMAG,
+                 FIFF.FIFF_SSS_COMPONENTS,
+                 FIFF.FIFF_SSS_NFREE,
+                 FIFF.FIFF_HPI_FIT_GOOD_LIMIT,
+                 FIFF.FIFF_HPI_FIT_DIST_LIMIT)
+_sss_info_writers = (write_int, write_int, write_float, write_int,
+                     write_int, write_int, write_int, write_int,
+                     write_float, write_float)
+_sss_info_casters = (int, int, np.array, int,
+                     int, int, np.array, int,
+                     float, float)
+
+_max_st_keys = ('job', 'subspcorr', 'buflen')
+_max_st_ids = (FIFF.FIFF_SSS_JOB, FIFF.FIFF_SSS_ST_CORR,
+               FIFF.FIFF_SSS_ST_LENGTH)
+_max_st_writers = (write_int, write_float, write_float)
+_max_st_casters = (int, float, float)
+
+_sss_ctc_keys = ('parent_file_id', 'block_id', 'parent_block_id',
+                 'date', 'creator', 'decoupler')
+_sss_ctc_ids = (FIFF.FIFF_PARENT_FILE_ID,
+                FIFF.FIFF_BLOCK_ID,
+                FIFF.FIFF_PARENT_BLOCK_ID,
+                FIFF.FIFF_MEAS_DATE,
+                FIFF.FIFF_CREATOR,
+                FIFF.FIFF_DECOUPLER_MATRIX)
+_sss_ctc_writers = (write_id, write_id, write_id,
+                    write_int, write_string, write_float_sparse_rcs)
+_sss_ctc_casters = (dict, dict, dict,
+                    np.array, text_type, csc_matrix)
+
+_sss_cal_keys = ('cal_chans', 'cal_corrs')
+_sss_cal_ids = (FIFF.FIFF_SSS_CAL_CHANS, FIFF.FIFF_SSS_CAL_CORRS)
+_sss_cal_writers = (write_int_matrix, write_float_matrix)
+_sss_cal_casters = (np.array, np.array)
+
+
+def _read_maxfilter_record(fid, tree):
+    """Read maxfilter processing record from file"""
+    sss_info_block = dir_tree_find(tree, FIFF.FIFFB_SSS_INFO)  # 502
+    sss_info = dict()
+    if len(sss_info_block) > 0:
+        sss_info_block = sss_info_block[0]
+        for i_ent in range(sss_info_block['nent']):
+            kind = sss_info_block['directory'][i_ent].kind
+            pos = sss_info_block['directory'][i_ent].pos
+            for key, id_, cast in zip(_sss_info_keys, _sss_info_ids,
+                                      _sss_info_casters):
+                if kind == id_:
+                    tag = read_tag(fid, pos)
+                    sss_info[key] = cast(tag.data)
+                    break
+
+    max_st_block = dir_tree_find(tree, FIFF.FIFFB_SSS_ST_INFO)  # 504
+    max_st = dict()
+    if len(max_st_block) > 0:
+        max_st_block = max_st_block[0]
+        for i_ent in range(max_st_block['nent']):
+            kind = max_st_block['directory'][i_ent].kind
+            pos = max_st_block['directory'][i_ent].pos
+            for key, id_, cast in zip(_max_st_keys, _max_st_ids,
+                                      _max_st_casters):
+                if kind == id_:
+                    tag = read_tag(fid, pos)
+                    max_st[key] = cast(tag.data)
+                    break
+
+    sss_ctc_block = dir_tree_find(tree, FIFF.FIFFB_CHANNEL_DECOUPLER)  # 501
+    sss_ctc = dict()
+    if len(sss_ctc_block) > 0:
+        sss_ctc_block = sss_ctc_block[0]
+        for i_ent in range(sss_ctc_block['nent']):
+            kind = sss_ctc_block['directory'][i_ent].kind
+            pos = sss_ctc_block['directory'][i_ent].pos
+            for key, id_, cast in zip(_sss_ctc_keys, _sss_ctc_ids,
+                                      _sss_ctc_casters):
+                if kind == id_:
+                    tag = read_tag(fid, pos)
+                    sss_ctc[key] = cast(tag.data)
+                    break
+            else:
+                if kind == FIFF.FIFF_PROJ_ITEM_CH_NAME_LIST:
+                    tag = read_tag(fid, pos)
+                    sss_ctc['proj_items_chs'] = tag.data.split(':')
+
+    sss_cal_block = dir_tree_find(tree, FIFF.FIFFB_SSS_CAL)  # 503
+    sss_cal = dict()
+    if len(sss_cal_block) > 0:
+        sss_cal_block = sss_cal_block[0]
+        for i_ent in range(sss_cal_block['nent']):
+            kind = sss_cal_block['directory'][i_ent].kind
+            pos = sss_cal_block['directory'][i_ent].pos
+            for key, id_, cast in zip(_sss_cal_keys, _sss_cal_ids,
+                                      _sss_cal_casters):
+                if kind == id_:
+                    tag = read_tag(fid, pos)
+                    sss_cal[key] = cast(tag.data)
+                    break
+
+    max_info = dict(sss_info=sss_info, sss_ctc=sss_ctc,
+                    sss_cal=sss_cal, max_st=max_st)
+    return max_info
+
+
+def _write_maxfilter_record(fid, record):
+    """Write maxfilter processing record to file"""
+    sss_info = record['sss_info']
+    if len(sss_info) > 0:
+        start_block(fid, FIFF.FIFFB_SSS_INFO)
+        for key, id_, writer in zip(_sss_info_keys, _sss_info_ids,
+                                    _sss_info_writers):
+            if key in sss_info:
+                writer(fid, id_, sss_info[key])
+        end_block(fid, FIFF.FIFFB_SSS_INFO)
+
+    max_st = record['max_st']
+    if len(max_st) > 0:
+        start_block(fid, FIFF.FIFFB_SSS_ST_INFO)
+        for key, id_, writer in zip(_max_st_keys, _max_st_ids,
+                                    _max_st_writers):
+            if key in max_st:
+                writer(fid, id_, max_st[key])
+        end_block(fid, FIFF.FIFFB_SSS_ST_INFO)
+
+    sss_ctc = record['sss_ctc']
+    if len(sss_ctc) > 0:  # dict has entries
+        start_block(fid, FIFF.FIFFB_CHANNEL_DECOUPLER)
+        for key, id_, writer in zip(_sss_ctc_keys, _sss_ctc_ids,
+                                    _sss_ctc_writers):
+            if key in sss_ctc:
+                writer(fid, id_, sss_ctc[key])
+        if 'proj_items_chs' in sss_ctc:
+            write_string(fid, FIFF.FIFF_PROJ_ITEM_CH_NAME_LIST,
+                         ':'.join(sss_ctc['proj_items_chs']))
+        end_block(fid, FIFF.FIFFB_CHANNEL_DECOUPLER)
+
+    sss_cal = record['sss_cal']
+    if len(sss_cal) > 0:
+        start_block(fid, FIFF.FIFFB_SSS_CAL)
+        for key, id_, writer in zip(_sss_cal_keys, _sss_cal_ids,
+                                    _sss_cal_writers):
+            if key in sss_cal:
+                writer(fid, id_, sss_cal[key])
+        end_block(fid, FIFF.FIFFB_SSS_CAL)
+
+
+def _get_sss_rank(sss):
+    """Get SSS rank"""
+    inside = sss['sss_info']['in_order']
+    nfree = (inside + 1) ** 2 - 1
+    nfree -= (len(sss['sss_info']['components'][:nfree]) -
+              sss['sss_info']['components'][:nfree].sum())
+    return nfree
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/proj.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/proj.py
new file mode 100644
index 0000000..0ab52e2
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/proj.py
@@ -0,0 +1,723 @@
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
+#          Denis Engemann <denis.engemann at gmail.com>
+#          Teon Brooks <teon.brooks at gmail.com>
+#
+# License: BSD (3-clause)
+
+from copy import deepcopy
+from math import sqrt
+import numpy as np
+from scipy import linalg
+from itertools import count
+import warnings
+
+from .tree import dir_tree_find
+from .tag import find_tag
+from .constants import FIFF
+from .pick import pick_types
+from .write import (write_int, write_float, write_string, write_name_list,
+                    write_float_matrix, end_block, start_block)
+from ..utils import logger, verbose
+from ..externals.six import string_types
+
+
+class Projection(dict):
+    """Projection vector
+
+    A basic class to proj a meaningful print for projection vectors.
+    """
+    def __repr__(self):
+        s = "%s" % self['desc']
+        s += ", active : %s" % self['active']
+        s += ", n_channels : %s" % self['data']['ncol']
+        return "<Projection  |  %s>" % s
+
+
+class ProjMixin(object):
+    """Mixin class for Raw, Evoked, Epochs
+
+    Notes
+    -----
+    This mixin adds a proj attribute as a property to data containers.
+    It is True if at least one proj is present and all of them are active.
+    The projs might not be applied yet if data are not preloaded. In
+    this case it's the _projector attribute that does the job.
+    If a private _data attribute is present then the projs applied
+    to it are the ones marked as active.
+
+    A proj parameter passed in constructor of raw or epochs calls
+    apply_proj and hence after the .proj attribute is True.
+
+    As soon as you've applied the projs it will stay active in the
+    remaining pipeline.
+
+    The suggested pipeline is proj=True in epochs (it's cheaper than for raw).
+
+    When you use delayed SSP in Epochs, projs are applied when you call
+    get_data() method. They are not applied to the evoked._data unless you call
+    apply_proj(). The reason is that you want to reject with projs although
+    it's not stored in proj mode.
+    """
+    @property
+    def proj(self):
+        return (len(self.info['projs']) > 0 and
+                all(p['active'] for p in self.info['projs']))
+
+    def add_proj(self, projs, remove_existing=False):
+        """Add SSP projection vectors
+
+        Parameters
+        ----------
+        projs : list
+            List with projection vectors.
+        remove_existing : bool
+            Remove the projection vectors currently in the file.
+
+        Returns
+        -------
+        self : instance of Raw | Epochs | Evoked
+            The data container.
+        """
+        if isinstance(projs, Projection):
+            projs = [projs]
+
+        if (not isinstance(projs, list) and
+                not all(isinstance(p, Projection) for p in projs)):
+            raise ValueError('Only projs can be added. You supplied '
+                             'something else.')
+
+        # mark proj as inactive, as they have not been applied
+        projs = deactivate_proj(projs, copy=True, verbose=self.verbose)
+        if remove_existing:
+            # we cannot remove the proj if they are active
+            if any(p['active'] for p in self.info['projs']):
+                raise ValueError('Cannot remove projectors that have '
+                                 'already been applied')
+            self.info['projs'] = projs
+        else:
+            self.info['projs'].extend(projs)
+
+        return self
+
+    def apply_proj(self):
+        """Apply the signal space projection (SSP) operators to the data.
+
+        Notes
+        -----
+        Once the projectors have been applied, they can no longer be
+        removed. It is usually not recommended to apply the projectors at
+        too early stages, as they are applied automatically later on
+        (e.g. when computing inverse solutions).
+        Hint: using the copy method individual projection vectors
+        can be tested without affecting the original data.
+        With evoked data, consider the following example::
+
+            projs_a = mne.read_proj('proj_a.fif')
+            projs_b = mne.read_proj('proj_b.fif')
+            # add the first, copy, apply and see ...
+            evoked.add_proj(a).copy().apply_proj().plot()
+            # add the second, copy, apply and see ...
+            evoked.add_proj(b).copy().apply_proj().plot()
+            # drop the first and see again
+            evoked.copy().del_proj(0).apply_proj().plot()
+            evoked.apply_proj()  # finally keep both
+
+        Returns
+        -------
+        self : instance of Raw | Epochs | Evoked
+            The instance.
+        """
+        from ..epochs import _BaseEpochs
+        from .base import _BaseRaw
+        if self.info['projs'] is None or len(self.info['projs']) == 0:
+            logger.info('No projector specified for this dataset.'
+                        'Please consider the method self.add_proj.')
+            return self
+
+        # Exit delayed mode if you apply proj
+        if isinstance(self, _BaseEpochs) and self._do_delayed_proj:
+            logger.info('Leaving delayed SSP mode.')
+            self._do_delayed_proj = False
+
+        if all(p['active'] for p in self.info['projs']):
+            logger.info('Projections have already been applied. '
+                        'Setting proj attribute to True.')
+            return self
+
+        _projector, info = setup_proj(deepcopy(self.info), activate=True,
+                                      verbose=self.verbose)
+        # let's not raise a RuntimeError here, otherwise interactive plotting
+        if _projector is None:  # won't be fun.
+            logger.info('The projections don\'t apply to these data.'
+                        ' Doing nothing.')
+            return self
+
+        self._projector, self.info = _projector, info
+        if isinstance(self, _BaseRaw):
+            if self.preload:
+                self._data = np.dot(self._projector, self._data)
+        elif isinstance(self, _BaseEpochs):
+            if self.preload:
+                for ii, e in enumerate(self._data):
+                    self._data[ii] = self._project_epoch(e)
+            else:
+                self.load_data()  # will automatically apply
+        else:  # Evoked
+            self.data = np.dot(self._projector, self.data)
+        logger.info('SSP projectors applied...')
+        return self
+
+    def del_proj(self, idx):
+        """Remove SSP projection vector
+
+        Note: The projection vector can only be removed if it is inactive
+              (has not been applied to the data).
+
+        Parameters
+        ----------
+        idx : int
+            Index of the projector to remove.
+
+        Returns
+        -------
+        self : instance of Raw | Epochs | Evoked
+        """
+        if self.info['projs'][idx]['active']:
+            raise ValueError('Cannot remove projectors that have already '
+                             'been applied')
+
+        self.info['projs'].pop(idx)
+
+        return self
+
+    def plot_projs_topomap(self, ch_type=None, layout=None, axes=None):
+        """Plot SSP vector
+
+        Parameters
+        ----------
+        ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None | List
+            The channel type to plot. For 'grad', the gradiometers are collec-
+            ted in pairs and the RMS for each pair is plotted. If None
+            (default), it will return all channel types present. If a list of
+            ch_types is provided, it will return multiple figures.
+        layout : None | Layout | List of Layouts
+            Layout instance specifying sensor positions (does not need to
+            be specified for Neuromag data). If possible, the correct
+            layout file is inferred from the data; if no appropriate layout
+            file was found, the layout is automatically generated from the
+            sensor locations. Or a list of Layout if projections
+            are from different sensor types.
+        axes : instance of Axes | list | None
+            The axes to plot to. If list, the list must be a list of Axes of
+            the same length as the number of projectors. If instance of Axes,
+            there must be only one projector. Defaults to None.
+
+        Returns
+        -------
+        fig : instance of matplotlib figure
+            Figure distributing one image per channel across sensor topography.
+        """
+        if self.info['projs'] is not None or len(self.info['projs']) != 0:
+            from ..viz.topomap import plot_projs_topomap
+            from ..channels.layout import find_layout
+            if layout is None:
+                layout = []
+                if ch_type is None:
+                    ch_type = [ch for ch in ['meg', 'eeg'] if ch in self]
+                elif isinstance(ch_type, string_types):
+                    ch_type = [ch_type]
+                for ch in ch_type:
+                    if ch in self:
+                        layout.append(find_layout(self.info, ch, exclude=[]))
+                    else:
+                        err = 'Channel type %s is not found in info.' % ch
+                        warnings.warn(err)
+            fig = plot_projs_topomap(self.info['projs'], layout, axes=axes)
+        else:
+            raise ValueError("Info is missing projs. Nothing to plot.")
+
+        return fig
+
+
+def _proj_equal(a, b):
+    """ Test if two projectors are equal """
+
+    equal = (a['active'] == b['active'] and
+             a['kind'] == b['kind'] and
+             a['desc'] == b['desc'] and
+             a['data']['col_names'] == b['data']['col_names'] and
+             a['data']['row_names'] == b['data']['row_names'] and
+             a['data']['ncol'] == b['data']['ncol'] and
+             a['data']['nrow'] == b['data']['nrow'] and
+             np.all(a['data']['data'] == b['data']['data']))
+    return equal
+
+
+ at verbose
+def _read_proj(fid, node, verbose=None):
+    """Read spatial projections from a FIF file.
+
+    Parameters
+    ----------
+    fid : file
+        The file descriptor of the open file.
+    node : tree node
+        The node of the tree where to look.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    projs: dict
+        The list of projections.
+    """
+    projs = list()
+
+    #   Locate the projection data
+    nodes = dir_tree_find(node, FIFF.FIFFB_PROJ)
+    if len(nodes) == 0:
+        return projs
+
+    tag = find_tag(fid, nodes[0], FIFF.FIFF_NCHAN)
+    if tag is not None:
+        global_nchan = int(tag.data)
+
+    items = dir_tree_find(nodes[0], FIFF.FIFFB_PROJ_ITEM)
+    for i in range(len(items)):
+
+        #   Find all desired tags in one item
+        item = items[i]
+        tag = find_tag(fid, item, FIFF.FIFF_NCHAN)
+        if tag is not None:
+            nchan = int(tag.data)
+        else:
+            nchan = global_nchan
+
+        tag = find_tag(fid, item, FIFF.FIFF_DESCRIPTION)
+        if tag is not None:
+            desc = tag.data
+        else:
+            tag = find_tag(fid, item, FIFF.FIFF_NAME)
+            if tag is not None:
+                desc = tag.data
+            else:
+                raise ValueError('Projection item description missing')
+
+        # XXX : is this useful ?
+        # tag = find_tag(fid, item, FIFF.FIFF_PROJ_ITEM_CH_NAME_LIST)
+        # if tag is not None:
+        #     namelist = tag.data
+        # else:
+        #     raise ValueError('Projection item channel list missing')
+
+        tag = find_tag(fid, item, FIFF.FIFF_PROJ_ITEM_KIND)
+        if tag is not None:
+            kind = int(tag.data)
+        else:
+            raise ValueError('Projection item kind missing')
+
+        tag = find_tag(fid, item, FIFF.FIFF_PROJ_ITEM_NVEC)
+        if tag is not None:
+            nvec = int(tag.data)
+        else:
+            raise ValueError('Number of projection vectors not specified')
+
+        tag = find_tag(fid, item, FIFF.FIFF_PROJ_ITEM_CH_NAME_LIST)
+        if tag is not None:
+            names = tag.data.split(':')
+        else:
+            raise ValueError('Projection item channel list missing')
+
+        tag = find_tag(fid, item, FIFF.FIFF_PROJ_ITEM_VECTORS)
+        if tag is not None:
+            data = tag.data
+        else:
+            raise ValueError('Projection item data missing')
+
+        tag = find_tag(fid, item, FIFF.FIFF_MNE_PROJ_ITEM_ACTIVE)
+        if tag is not None:
+            active = bool(tag.data)
+        else:
+            active = False
+
+        # handle the case when data is transposed for some reason
+        if data.shape[0] == len(names) and data.shape[1] == nvec:
+            data = data.T
+
+        if data.shape[1] != len(names):
+            raise ValueError('Number of channel names does not match the '
+                             'size of data matrix')
+
+        #   Use exactly the same fields in data as in a named matrix
+        one = Projection(kind=kind, active=active, desc=desc,
+                         data=dict(nrow=nvec, ncol=nchan, row_names=None,
+                                   col_names=names, data=data))
+
+        projs.append(one)
+
+    if len(projs) > 0:
+        logger.info('    Read a total of %d projection items:' % len(projs))
+        for k in range(len(projs)):
+            if projs[k]['active']:
+                misc = 'active'
+            else:
+                misc = ' idle'
+            logger.info('        %s (%d x %d) %s'
+                        % (projs[k]['desc'], projs[k]['data']['nrow'],
+                           projs[k]['data']['ncol'], misc))
+
+    return projs
+
+
+###############################################################################
+# Write
+
+def _write_proj(fid, projs):
+    """Write a projection operator to a file.
+
+    Parameters
+    ----------
+    fid : file
+        The file descriptor of the open file.
+    projs : dict
+        The projection operator.
+    """
+    start_block(fid, FIFF.FIFFB_PROJ)
+
+    for proj in projs:
+        start_block(fid, FIFF.FIFFB_PROJ_ITEM)
+        write_int(fid, FIFF.FIFF_NCHAN, proj['data']['ncol'])
+        write_name_list(fid, FIFF.FIFF_PROJ_ITEM_CH_NAME_LIST,
+                        proj['data']['col_names'])
+        write_string(fid, FIFF.FIFF_NAME, proj['desc'])
+        write_int(fid, FIFF.FIFF_PROJ_ITEM_KIND, proj['kind'])
+        if proj['kind'] == FIFF.FIFFV_PROJ_ITEM_FIELD:
+            write_float(fid, FIFF.FIFF_PROJ_ITEM_TIME, 0.0)
+
+        write_int(fid, FIFF.FIFF_PROJ_ITEM_NVEC, proj['data']['nrow'])
+        write_int(fid, FIFF.FIFF_MNE_PROJ_ITEM_ACTIVE, proj['active'])
+        write_float_matrix(fid, FIFF.FIFF_PROJ_ITEM_VECTORS,
+                           proj['data']['data'])
+        end_block(fid, FIFF.FIFFB_PROJ_ITEM)
+
+    end_block(fid, FIFF.FIFFB_PROJ)
+
+
+###############################################################################
+# Utils
+
+def make_projector(projs, ch_names, bads=[], include_active=True):
+    """Create an SSP operator from SSP projection vectors
+
+    Parameters
+    ----------
+    projs : list
+        List of projection vectors.
+    ch_names : list of strings
+        List of channels to include in the projection matrix.
+    bads : list of strings
+        Some bad channels to exclude. If bad channels were marked
+        in the raw file when projs were calculated using mne-python,
+        they should not need to be included here as they will
+        have been automatically omitted from the projectors.
+    include_active : bool
+        Also include projectors that are already active.
+
+    Returns
+    -------
+    proj : array of shape [n_channels, n_channels]
+        The projection operator to apply to the data.
+    nproj : int
+        How many items in the projector.
+    U : array
+        The orthogonal basis of the projection vectors (optional).
+    """
+    nchan = len(ch_names)
+    if nchan == 0:
+        raise ValueError('No channel names specified')
+
+    default_return = (np.eye(nchan, nchan), 0, [])
+
+    #   Check trivial cases first
+    if projs is None:
+        return default_return
+
+    nvec = 0
+    nproj = 0
+    for p in projs:
+        if not p['active'] or include_active:
+            nproj += 1
+            nvec += p['data']['nrow']
+
+    if nproj == 0:
+        return default_return
+
+    #   Pick the appropriate entries
+    vecs = np.zeros((nchan, nvec))
+    nvec = 0
+    nonzero = 0
+    for k, p in enumerate(projs):
+        if not p['active'] or include_active:
+            if (len(p['data']['col_names']) !=
+                    len(np.unique(p['data']['col_names']))):
+                raise ValueError('Channel name list in projection item %d'
+                                 ' contains duplicate items' % k)
+
+            # Get the two selection vectors to pick correct elements from
+            # the projection vectors omitting bad channels
+            sel = []
+            vecsel = []
+            for c, name in enumerate(ch_names):
+                if name in p['data']['col_names'] and name not in bads:
+                    sel.append(c)
+                    vecsel.append(p['data']['col_names'].index(name))
+
+            # If there is something to pick, pickit
+            if len(sel) > 0:
+                nrow = p['data']['nrow']
+                vecs[sel, nvec:nvec + nrow] = p['data']['data'][:, vecsel].T
+
+            # Rescale for better detection of small singular values
+            for v in range(p['data']['nrow']):
+                psize = sqrt(np.sum(vecs[:, nvec + v] * vecs[:, nvec + v]))
+                if psize > 0:
+                    vecs[:, nvec + v] /= psize
+                    nonzero += 1
+
+            nvec += p['data']['nrow']
+
+    #   Check whether all of the vectors are exactly zero
+    if nonzero == 0:
+        return default_return
+
+    # Reorthogonalize the vectors
+    U, S, V = linalg.svd(vecs[:, :nvec], full_matrices=False)
+
+    # Throw away the linearly dependent guys
+    nproj = np.sum((S / S[0]) > 1e-2)
+    U = U[:, :nproj]
+
+    # Here is the celebrated result
+    proj = np.eye(nchan, nchan) - np.dot(U, U.T)
+
+    return proj, nproj, U
+
+
+def make_projector_info(info, include_active=True):
+    """Make an SSP operator using the measurement info
+
+    Calls make_projector on good channels.
+
+    Parameters
+    ----------
+    info : dict
+        Measurement info.
+    include_active : bool
+        Also include projectors that are already active.
+
+    Returns
+    -------
+    proj : array of shape [n_channels, n_channels]
+        The projection operator to apply to the data.
+    nproj : int
+        How many items in the projector.
+    """
+    proj, nproj, _ = make_projector(info['projs'], info['ch_names'],
+                                    info['bads'], include_active)
+    return proj, nproj
+
+
+ at verbose
+def activate_proj(projs, copy=True, verbose=None):
+    """Set all projections to active
+
+    Useful before passing them to make_projector.
+
+    Parameters
+    ----------
+    projs : list
+        The projectors.
+    copy : bool
+        Modify projs in place or operate on a copy.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    projs : list
+        The projectors.
+    """
+    if copy:
+        projs = deepcopy(projs)
+
+    #   Activate the projection items
+    for proj in projs:
+        proj['active'] = True
+
+    logger.info('%d projection items activated' % len(projs))
+
+    return projs
+
+
+ at verbose
+def deactivate_proj(projs, copy=True, verbose=None):
+    """Set all projections to inactive
+
+    Useful before saving raw data without projectors applied.
+
+    Parameters
+    ----------
+    projs : list
+        The projectors.
+    copy : bool
+        Modify projs in place or operate on a copy.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    projs : list
+        The projectors.
+    """
+    if copy:
+        projs = deepcopy(projs)
+
+    #   Deactivate the projection items
+    for proj in projs:
+        proj['active'] = False
+
+    logger.info('%d projection items deactivated' % len(projs))
+
+    return projs
+
+
+ at verbose
+def make_eeg_average_ref_proj(info, activate=True, verbose=None):
+    """Create an EEG average reference SSP projection vector
+
+    Parameters
+    ----------
+    info : dict
+        Measurement info.
+    activate : bool
+        If True projections are activated.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    eeg_proj: instance of Projection
+        The SSP/PCA projector.
+    """
+    if info.get('custom_ref_applied', False):
+        raise RuntimeError('Cannot add an average EEG reference projection '
+                           'since a custom reference has been applied to the '
+                           'data earlier.')
+
+    logger.info("Adding average EEG reference projection.")
+    eeg_sel = pick_types(info, meg=False, eeg=True, ref_meg=False,
+                         exclude='bads')
+    ch_names = info['ch_names']
+    eeg_names = [ch_names[k] for k in eeg_sel]
+    n_eeg = len(eeg_sel)
+    if n_eeg == 0:
+        raise ValueError('Cannot create EEG average reference projector '
+                         '(no EEG data found)')
+    vec = np.ones((1, n_eeg)) / n_eeg
+    eeg_proj_data = dict(col_names=eeg_names, row_names=None,
+                         data=vec, nrow=1, ncol=n_eeg)
+    eeg_proj = Projection(active=activate, data=eeg_proj_data,
+                          desc='Average EEG reference',
+                          kind=FIFF.FIFFV_MNE_PROJ_ITEM_EEG_AVREF)
+    return eeg_proj
+
+
+def _has_eeg_average_ref_proj(projs):
+    """Determine if a list of projectors has an average EEG ref"""
+    for proj in projs:
+        if (proj['desc'] == 'Average EEG reference' or
+                proj['kind'] == FIFF.FIFFV_MNE_PROJ_ITEM_EEG_AVREF):
+            return True
+    return False
+
+
+def _needs_eeg_average_ref_proj(info):
+    """Determine if the EEG needs an averge EEG reference
+
+    This returns True if no custom reference has been applied and no average
+    reference projection is present in the list of projections.
+    """
+    eeg_sel = pick_types(info, meg=False, eeg=True, ref_meg=False,
+                         exclude='bads')
+    return (len(eeg_sel) > 0 and
+            not info['custom_ref_applied'] and
+            not _has_eeg_average_ref_proj(info['projs']))
+
+
+ at verbose
+def setup_proj(info, add_eeg_ref=True, activate=True,
+               verbose=None):
+    """Set up projection for Raw and Epochs
+
+    Parameters
+    ----------
+    info : dict
+        The measurement info.
+    add_eeg_ref : bool
+        If True, an EEG average reference will be added (unless one
+        already exists).
+    activate : bool
+        If True projections are activated.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    projector : array of shape [n_channels, n_channels]
+        The projection operator to apply to the data.
+    info : dict
+        The modified measurement info (Warning: info is modified inplace).
+    """
+    # Add EEG ref reference proj if necessary
+    if _needs_eeg_average_ref_proj(info) and add_eeg_ref:
+        eeg_proj = make_eeg_average_ref_proj(info, activate=activate)
+        info['projs'].append(eeg_proj)
+
+    # Create the projector
+    projector, nproj = make_projector_info(info)
+    if nproj == 0:
+        if verbose:
+            logger.info('The projection vectors do not apply to these '
+                        'channels')
+        projector = None
+    else:
+        logger.info('Created an SSP operator (subspace dimension = %d)'
+                    % nproj)
+
+    # The projection items have been activated
+    if activate:
+        info['projs'] = activate_proj(info['projs'], copy=False)
+
+    return projector, info
+
+
+def _uniquify_projs(projs):
+    """Aux function"""
+    final_projs = []
+    for proj in projs:  # flatten
+        if not any(_proj_equal(p, proj) for p in final_projs):
+            final_projs.append(proj)
+
+    my_count = count(len(final_projs))
+
+    def sorter(x):
+        """sort in a nice way"""
+        digits = [s for s in x['desc'] if s.isdigit()]
+        if digits:
+            sort_idx = int(digits[-1])
+        else:
+            sort_idx = next(my_count)
+        return (sort_idx, x['desc'])
+
+    return sorted(final_projs, key=sorter)
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/reference.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/reference.py
new file mode 100644
index 0000000..1fc0455
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/reference.py
@@ -0,0 +1,387 @@
+# Authors: Marijn van Vliet <w.m.vanvliet at gmail.com>
+#          Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Teon Brooks <teon.brooks at gmail.com>
+#
+# License: BSD (3-clause)
+
+import numpy as np
+
+from .constants import FIFF
+from .proj import _has_eeg_average_ref_proj, make_eeg_average_ref_proj
+from .pick import pick_types
+from .base import _BaseRaw
+from ..evoked import Evoked
+from ..epochs import Epochs
+from ..utils import logger
+
+
+def _apply_reference(inst, ref_from, ref_to=None, copy=True):
+    """Apply a custom EEG referencing scheme.
+
+    Calculates a reference signal by taking the mean of a set of channels and
+    applies the reference to another set of channels. Input data can be in the
+    form of Raw, Epochs or Evoked.
+
+    Parameters
+    ----------
+    inst : instance of Raw | Epochs | Evoked
+        Data containing the EEG channels and reference channel(s).
+    ref_from : list of str
+        The names of the channels to use to construct the reference. If an
+        empty list is specified, the data is assumed to already have a proper
+        reference and MNE will not attempt any re-referencing of the data.
+    ref_to : list of str | None
+        The names of the channels to apply the reference to. By default,
+        all EEG channels are chosen.
+    copy : bool
+        Specifies whether the data will be copied (True) or modified in place
+        (False). Defaults to True.
+
+    Returns
+    -------
+    inst : instance of Raw | Epochs | Evoked
+        The data with EEG channels rereferenced.
+    ref_data : array, shape (n_times,)
+        Array of reference data subtracted from EEG channels.
+
+    Notes
+    -----
+    1. Do not use this function to apply an average reference. By default, an
+       average reference projection has already been added upon loading raw
+       data.
+
+    2. If the reference is applied to any EEG channels, this function removes
+       any pre-existing average reference projections.
+
+    3. During source localization, the EEG signal should have an average
+       reference.
+
+    4. The data must be preloaded.
+
+    See Also
+    --------
+    set_eeg_reference : Convenience function for creating an EEG reference.
+    set_bipolar_reference : Convenience function for creating a bipolar
+                            reference.
+    """
+    # Check to see that data is preloaded
+    if not isinstance(inst, Evoked) and not inst.preload:
+        raise RuntimeError('Data needs to be preloaded. Use '
+                           'preload=True (or string) in the constructor.')
+
+    eeg_idx = pick_types(inst.info, eeg=True, meg=False, ref_meg=False)
+
+    if ref_to is None:
+        ref_to = [inst.ch_names[i] for i in eeg_idx]
+
+    if copy:
+        inst = inst.copy()
+
+    # After referencing, existing SSPs might not be valid anymore.
+    for i, proj in enumerate(inst.info['projs']):
+        if (not proj['active'] and
+            len([ch for ch in (ref_from + ref_to)
+                 if ch in proj['data']['col_names']]) > 0):
+
+            # Remove any average reference projections, apply any other types
+            if proj['desc'] == 'Average EEG reference' or \
+                    proj['kind'] == FIFF.FIFFV_MNE_PROJ_ITEM_EEG_AVREF:
+                logger.info('Removing existing average EEG reference '
+                            'projection.')
+                del inst.info['projs'][i]
+            else:
+                logger.info(
+                    'Inactive signal space projection (SSP) operators are '
+                    'present that operate on sensors involved in the current '
+                    'referencing scheme. Applying them now. Be aware that '
+                    'after re-referencing, these operators will be invalid.')
+                inst.apply_proj()
+            break
+
+    ref_from = [inst.ch_names.index(ch) for ch in ref_from]
+    ref_to = [inst.ch_names.index(ch) for ch in ref_to]
+
+    if isinstance(inst, Evoked):
+        data = inst.data
+    else:
+        data = inst._data
+
+    # Compute reference
+    if len(ref_from) > 0:
+        ref_data = data[..., ref_from, :].mean(-2)
+
+        if isinstance(inst, Epochs):
+            data[:, ref_to, :] -= ref_data[:, np.newaxis, :]
+        else:
+            data[ref_to] -= ref_data
+    else:
+        ref_data = None
+
+    # If the reference touches EEG electrodes, note in the info that a non-CAR
+    # has been applied.
+    if len(np.intersect1d(ref_to, eeg_idx)) > 0:
+        inst.info['custom_ref_applied'] = True
+
+    return inst, ref_data
+
+
+def add_reference_channels(inst, ref_channels, copy=True):
+    """Add reference channels to data that consists of all zeros.
+
+    Adds reference channels to data that were not included during recording.
+    This is useful when you need to re-reference your data to different
+    channel. These added channels will consist of all zeros.
+
+    Parameters
+    ----------
+    inst : instance of Raw | Epochs | Evoked
+        Instance of Raw or Epochs with EEG channels and reference channel(s).
+    ref_channels : str | list of str
+        Name of the electrode(s) which served as the reference in the
+        recording. If a name is provided, a corresponding channel is added
+        and its data is set to 0. This is useful for later re-referencing.
+    copy : bool
+        Specifies whether the data will be copied (True) or modified in place
+        (False). Defaults to True.
+
+    Returns
+    -------
+    inst : instance of Raw | Epochs | Evoked
+        Data with added EEG reference channels.
+    """
+    # Check to see that data is preloaded
+    if not isinstance(inst, Evoked) and not inst.preload:
+        raise RuntimeError('Data needs to be preloaded.')
+    if isinstance(ref_channels, str):
+        ref_channels = [ref_channels]
+    elif not isinstance(ref_channels, list):
+        raise ValueError("`ref_channels` should be either str or list of str. "
+                         "%s was provided." % type(ref_channels))
+    for ch in ref_channels:
+        if ch in inst.info['ch_names']:
+            raise ValueError("Channel %s already specified in inst." % ch)
+
+    if copy:
+        inst = inst.copy()
+
+    if isinstance(inst, Evoked):
+        data = inst.data
+        refs = np.zeros((len(ref_channels), data.shape[1]))
+        data = np.vstack((data, refs))
+        inst.data = data
+    elif isinstance(inst, _BaseRaw):
+        data = inst._data
+        refs = np.zeros((len(ref_channels), data.shape[1]))
+        data = np.vstack((data, refs))
+        inst._data = data
+    elif isinstance(inst, Epochs):
+        data = inst._data
+        x, y, z = data.shape
+        refs = np.zeros((x * len(ref_channels), z))
+        data = np.vstack((data.reshape((x * y, z), order='F'), refs))
+        data = data.reshape(x, y + len(ref_channels), z, order='F')
+        inst._data = data
+    else:
+        raise TypeError("inst should be Raw, Epochs, or Evoked instead of %s."
+                        % type(inst))
+    nchan = len(inst.info['ch_names'])
+    if ch in ref_channels:
+        chan_info = {'ch_name': ch,
+                     'coil_type': FIFF.FIFFV_COIL_EEG,
+                     'kind': FIFF.FIFFV_EEG_CH,
+                     'logno': nchan + 1,
+                     'scanno': nchan + 1,
+                     'cal': 1,
+                     'range': 1.,
+                     'unit_mul': 0.,
+                     'unit': FIFF.FIFF_UNIT_V,
+                     'coord_frame': FIFF.FIFFV_COORD_HEAD,
+                     'loc': np.zeros(12)}
+        inst.info['chs'].append(chan_info)
+    inst.info['ch_names'].extend(ref_channels)
+    inst.info['nchan'] = len(inst.info['ch_names'])
+    if isinstance(inst, _BaseRaw):
+        inst._cals = np.hstack((inst._cals, [1] * len(ref_channels)))
+
+    return inst
+
+
+def set_eeg_reference(inst, ref_channels=None, copy=True):
+    """Rereference EEG channels to new reference channel(s).
+
+    If multiple reference channels are specified, they will be averaged. If
+    no reference channels are specified, an average reference will be applied.
+
+    Parameters
+    ----------
+    inst : instance of Raw | Epochs | Evoked
+        Instance of Raw or Epochs with EEG channels and reference channel(s).
+    ref_channels : list of str | None
+        The names of the channels to use to construct the reference. If None is
+        specified here, an average reference will be applied in the form of an
+        SSP projector. If an empty list is specified, the data is assumed to
+        already have a proper reference and MNE will not attempt any
+        re-referencing of the data. Defaults to an average reference (None).
+    copy : bool
+        Specifies whether the data will be copied (True) or modified in place
+        (False). Defaults to True.
+
+    Returns
+    -------
+    inst : instance of Raw | Epochs | Evoked
+        Data with EEG channels re-referenced.
+    ref_data : array
+        Array of reference data subtracted from EEG channels.
+
+    Notes
+    -----
+    1. If a reference is requested that is not the average reference, this
+       function removes any pre-existing average reference projections.
+
+    2. During source localization, the EEG signal should have an average
+       reference.
+
+    3. In order to apply a reference other than an average reference, the data
+       must be preloaded.
+
+    .. versionadded:: 0.9.0
+
+    See Also
+    --------
+    set_bipolar_reference : Convenience function for creating bipolar
+                            references.
+    """
+    if ref_channels is None:
+        # CAR requested
+        if _has_eeg_average_ref_proj(inst.info['projs']):
+            logger.warning('An average reference projection was already '
+                           'added. The data has been left untouched.')
+            return inst, None
+        else:
+            inst.info['custom_ref_applied'] = False
+            inst.add_proj(make_eeg_average_ref_proj(inst.info, activate=False))
+            return inst, None
+    else:
+        logger.info('Applying a custom EEG reference.')
+        return _apply_reference(inst, ref_channels, copy=copy)
+
+
+def set_bipolar_reference(inst, anode, cathode, ch_name=None, ch_info=None,
+                          copy=True):
+    """Rereference selected channels using a bipolar referencing scheme.
+
+    A bipolar reference takes the difference between two channels (the anode
+    minus the cathode) and adds it as a new virtual channel. The original
+    channels will be dropped.
+
+    Multiple anodes and cathodes can be specified, in which case multiple
+    vitual channels will be created. The 1st anode will be substracted from the
+    1st cathode, the 2nd anode from the 2nd cathode, etc.
+
+    By default, the virtual channels will be annotated with channel info of
+    the anodes, their locations set to (0, 0, 0) and coil types set to
+    EEG_BIPOLAR.
+
+    Parameters
+    ----------
+    inst : instance of Raw | Epochs | Evoked
+        Data containing the unreferenced channels.
+    anode : str | list of str
+        The name(s) of the channel(s) to use as anode in the bipolar reference.
+    cathode : str | list of str
+        The name(s) of the channel(s) to use as cathode in the bipolar
+        reference.
+    ch_name : str | list of str | None
+        The channel name(s) for the virtual channel(s) containing the resulting
+        signal. By default, bipolar channels are named after the anode and
+        cathode, but it is recommended to supply a more meaningful name.
+    ch_info : dict | list of dict | None
+        This parameter can be used to supply a dictionary (or a dictionary for
+        each bipolar channel) containing channel information to merge in,
+        overwriting the default values. Defaults to None.
+    copy : bool
+        Whether to operate on a copy of the data (True) or modify it in-place
+        (False). Defaults to True.
+
+    Returns
+    -------
+    inst : instance of Raw | Epochs | Evoked
+        Data with the specified channels re-referenced.
+
+    Notes
+    -----
+    1. If the anodes contain any EEG channels, this function removes
+       any pre-existing average reference projections.
+
+    2. During source localization, the EEG signal should have an average
+       reference.
+
+    3. The data must be preloaded.
+
+    .. versionadded:: 0.9.0
+
+    See Also
+    --------
+    set_eeg_reference : Convenience function for creating an EEG reference.
+    """
+    if not isinstance(anode, list):
+        anode = [anode]
+
+    if not isinstance(cathode, list):
+        cathode = [cathode]
+
+    if len(anode) != len(cathode):
+        raise ValueError('Number of anodes must equal the number of cathodes.')
+
+    if ch_name is None:
+        ch_name = ['%s-%s' % ac for ac in zip(anode, cathode)]
+    elif not isinstance(ch_name, list):
+        ch_name = [ch_name]
+    if len(ch_name) != len(anode):
+        raise ValueError('Number of channel names must equal the number of '
+                         'anodes/cathodes.')
+
+    # Check for duplicate channel names (it is allowed to give the name of the
+    # anode or cathode channel, as they will be replaced).
+    for ch, a, c in zip(ch_name, anode, cathode):
+        if ch not in [a, c] and ch in inst.ch_names:
+            raise ValueError('There is already a channel named "%s", please '
+                             'specify a different name for the bipolar '
+                             'channel using the ch_name parameter.' % ch)
+
+    if ch_info is None:
+        ch_info = [{} for an in anode]
+    elif not isinstance(ch_info, list):
+        ch_info = [ch_info]
+    if len(ch_info) != len(anode):
+        raise ValueError('Number of channel info dictionaries must equal the '
+                         'number of anodes/cathodes.')
+
+    # Merge specified and anode channel information dictionaries
+    new_ch_info = []
+    for an, ci in zip(anode, ch_info):
+        new_info = inst.info['chs'][inst.ch_names.index(an)].copy()
+
+        # Set channel location and coil type
+        new_info['loc'] = np.zeros(12)
+        new_info['coil_type'] = FIFF.FIFFV_COIL_EEG_BIPOLAR
+
+        new_info.update(ci)
+        new_ch_info.append(new_info)
+
+    if copy:
+        inst = inst.copy()
+
+    # Perform bipolar referencing
+    for an, ca, name, info in zip(anode, cathode, ch_name, new_ch_info):
+        inst, _ = _apply_reference(inst, [ca], [an], copy=False)
+        an_idx = inst.ch_names.index(an)
+        inst.info['chs'][an_idx] = info
+        inst.info['chs'][an_idx]['ch_name'] = name
+        inst.info['ch_names'][an_idx] = name
+        logger.info('Bipolar channel added as "%s".' % name)
+
+    # Drop cathode channels
+    inst.drop_channels(cathode)
+
+    return inst
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/tag.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/tag.py
new file mode 100644
index 0000000..1f95733
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/tag.py
@@ -0,0 +1,518 @@
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+import os
+import gzip
+import numpy as np
+
+from .constants import FIFF
+
+from ..externals.six import text_type
+from ..externals.jdcal import jd2jcal
+
+
+class Tag(object):
+    """Tag in FIF tree structure
+
+    Parameters
+    ----------
+    kind : int
+        Kind of Tag.
+    type_ : int
+        Type of Tag.
+    size : int
+        Size in bytes.
+    int : next
+        Position of next Tag.
+    pos : int
+        Position of Tag is the original file.
+    """
+
+    def __init__(self, kind, type_, size, next, pos=None):
+        self.kind = int(kind)
+        self.type = int(type_)
+        self.size = int(size)
+        self.next = int(next)
+        self.pos = pos if pos is not None else next
+        self.pos = int(self.pos)
+        self.data = None
+
+    def __repr__(self):
+        out = ("kind: %s - type: %s - size: %s - next: %s - pos: %s"
+               % (self.kind, self.type, self.size, self.next, self.pos))
+        if hasattr(self, 'data'):
+            out += " - data: %s" % self.data
+        out += "\n"
+        return out
+
+    def __cmp__(self, tag):
+        is_equal = (self.kind == tag.kind and
+                    self.type == tag.type and
+                    self.size == tag.size and
+                    self.next == tag.next and
+                    self.pos == tag.pos and
+                    self.data == tag.data)
+        if is_equal:
+            return 0
+        else:
+            return 1
+
+
+def read_big(fid, size=None):
+    """Function to read large chunks of data (>16MB) Windows-friendly
+
+    Parameters
+    ----------
+    fid : file
+        Open file to read from.
+    size : int or None
+        Number of bytes to read. If None, the whole file is read.
+
+    Returns
+    -------
+    buf : bytes
+        The data.
+
+    Notes
+    -----
+    Windows (argh) can't handle reading large chunks of data, so we
+    have to do it piece-wise, possibly related to:
+       http://stackoverflow.com/questions/4226941
+
+    Examples
+    --------
+    This code should work for normal files and .gz files:
+
+        >>> import numpy as np
+        >>> import gzip, os, tempfile, shutil
+        >>> fname = tempfile.mkdtemp()
+        >>> fname_gz = os.path.join(fname, 'temp.gz')
+        >>> fname = os.path.join(fname, 'temp.bin')
+        >>> randgen = np.random.RandomState(9)
+        >>> x = randgen.randn(3000000)  # > 16MB data
+        >>> with open(fname, 'wb') as fid: x.tofile(fid)
+        >>> with open(fname, 'rb') as fid: y = np.fromstring(read_big(fid))
+        >>> assert np.all(x == y)
+        >>> fid_gz = gzip.open(fname_gz, 'wb')
+        >>> _ = fid_gz.write(x.tostring())
+        >>> fid_gz.close()
+        >>> fid_gz = gzip.open(fname_gz, 'rb')
+        >>> y = np.fromstring(read_big(fid_gz))
+        >>> assert np.all(x == y)
+        >>> fid_gz.close()
+        >>> shutil.rmtree(os.path.dirname(fname))
+
+    """
+    # buf_size is chosen as a largest working power of 2 (16 MB):
+    buf_size = 16777216
+    if size is None:
+        # it's not possible to get .gz uncompressed file size
+        if not isinstance(fid, gzip.GzipFile):
+            size = os.fstat(fid.fileno()).st_size - fid.tell()
+
+    if size is not None:
+        # Use pre-buffering method
+        segments = np.r_[np.arange(0, size, buf_size), size]
+        buf = bytearray(b' ' * size)
+        for start, end in zip(segments[:-1], segments[1:]):
+            data = fid.read(int(end - start))
+            if len(data) != end - start:
+                raise ValueError('Read error')
+            buf[start:end] = data
+        buf = bytes(buf)
+    else:
+        # Use presumably less efficient concatenating method
+        buf = [b'']
+        new = fid.read(buf_size)
+        while len(new) > 0:
+            buf.append(new)
+            new = fid.read(buf_size)
+        buf = b''.join(buf)
+
+    return buf
+
+
+def read_tag_info(fid):
+    """Read Tag info (or header)
+    """
+    s = fid.read(4 * 4)
+    if len(s) == 0:
+        return None
+    tag = Tag(*np.fromstring(s, '>i4'))
+    if tag.next == 0:
+        fid.seek(tag.size, 1)
+    elif tag.next > 0:
+        fid.seek(tag.next, 0)
+    return tag
+
+
+def _fromstring_rows(fid, tag_size, dtype=None, shape=None, rlims=None):
+    """Helper for getting a range of rows from a large tag"""
+    if shape is not None:
+        item_size = np.dtype(dtype).itemsize
+        if not len(shape) == 2:
+            raise ValueError('Only implemented for 2D matrices')
+        want_shape = np.prod(shape)
+        have_shape = tag_size // item_size
+        if want_shape != have_shape:
+            raise ValueError('Wrong shape specified, requested %s have %s'
+                             % (want_shape, have_shape))
+        if not len(rlims) == 2:
+            raise ValueError('rlims must have two elements')
+        n_row_out = rlims[1] - rlims[0]
+        if n_row_out <= 0:
+            raise ValueError('rlims must yield at least one output')
+        row_size = item_size * shape[1]
+        # # of bytes to skip at the beginning, # to read, where to end
+        start_skip = int(rlims[0] * row_size)
+        read_size = int(n_row_out * row_size)
+        end_pos = int(fid.tell() + tag_size)
+        # Move the pointer ahead to the read point
+        fid.seek(start_skip, 1)
+        # Do the reading
+        out = np.fromstring(fid.read(read_size), dtype=dtype)
+        # Move the pointer ahead to the end of the tag
+        fid.seek(end_pos)
+    else:
+        out = np.fromstring(fid.read(tag_size), dtype=dtype)
+    return out
+
+
+def _loc_to_coil_trans(loc):
+    """Helper to convert loc vector to coil_trans"""
+    # deal with nasty OSX Anaconda bug by casting to float64
+    loc = loc.astype(np.float64)
+    coil_trans = np.concatenate([loc.reshape(4, 3).T[:, [1, 2, 3, 0]],
+                                 np.array([0, 0, 0, 1]).reshape(1, 4)])
+    return coil_trans
+
+
+def _coil_trans_to_loc(coil_trans):
+    """Helper to convert coil_trans to loc"""
+    coil_trans = coil_trans.astype(np.float64)
+    return np.roll(coil_trans.T[:, :3], 1, 0).flatten()
+
+
+def _loc_to_eeg_loc(loc):
+    """Helper to convert a loc to an EEG loc"""
+    if loc[3:6].any():
+        return np.array([loc[0:3], loc[3:6]]).T
+    else:
+        return loc[0:3][:, np.newaxis].copy()
+
+
+def read_tag(fid, pos=None, shape=None, rlims=None):
+    """Read a Tag from a file at a given position
+
+    Parameters
+    ----------
+    fid : file
+        The open FIF file descriptor.
+    pos : int
+        The position of the Tag in the file.
+    shape : tuple | None
+        If tuple, the shape of the stored matrix. Only to be used with
+        data stored as a vector (not implemented for matrices yet).
+    rlims : tuple | None
+        If tuple, the first (inclusive) and last (exclusive) rows to retrieve.
+        Note that data are assumed to be stored row-major in the file. Only to
+        be used with data stored as a vector (not implemented for matrices
+        yet).
+
+    Returns
+    -------
+    tag : Tag
+        The Tag read.
+    """
+    if pos is not None:
+        fid.seek(pos, 0)
+
+    s = fid.read(4 * 4)
+
+    tag = Tag(*np.fromstring(s, dtype='>i4,>u4,>i4,>i4')[0])
+
+    #
+    #   The magic hexadecimal values
+    #
+    is_matrix = 4294901760  # ffff0000
+    matrix_coding_dense = 16384      # 4000
+    matrix_coding_CCS = 16400      # 4010
+    matrix_coding_RCS = 16416      # 4020
+    data_type = 65535      # ffff
+    #
+    if tag.size > 0:
+        matrix_coding = is_matrix & tag.type
+        if matrix_coding != 0:
+            matrix_coding = matrix_coding >> 16
+
+            # This should be easy to implement (see _fromstring_rows)
+            # if we need it, but for now, it's not...
+            if shape is not None:
+                raise ValueError('Row reading not implemented for matrices '
+                                 'yet')
+
+            #   Matrices
+            if matrix_coding == matrix_coding_dense:
+                # Find dimensions and return to the beginning of tag data
+                pos = fid.tell()
+                fid.seek(tag.size - 4, 1)
+                ndim = int(np.fromstring(fid.read(4), dtype='>i4'))
+                fid.seek(-(ndim + 1) * 4, 1)
+                dims = np.fromstring(fid.read(4 * ndim), dtype='>i4')[::-1]
+                #
+                # Back to where the data start
+                #
+                fid.seek(pos, 0)
+
+                if ndim > 3:
+                    raise Exception('Only 2 or 3-dimensional matrices are '
+                                    'supported at this time')
+
+                matrix_type = data_type & tag.type
+
+                if matrix_type == FIFF.FIFFT_INT:
+                    tag.data = np.fromstring(read_big(fid, 4 * dims.prod()),
+                                             dtype='>i4').reshape(dims)
+                elif matrix_type == FIFF.FIFFT_JULIAN:
+                    tag.data = np.fromstring(read_big(fid, 4 * dims.prod()),
+                                             dtype='>i4').reshape(dims)
+                elif matrix_type == FIFF.FIFFT_FLOAT:
+                    tag.data = np.fromstring(read_big(fid, 4 * dims.prod()),
+                                             dtype='>f4').reshape(dims)
+                elif matrix_type == FIFF.FIFFT_DOUBLE:
+                    tag.data = np.fromstring(read_big(fid, 8 * dims.prod()),
+                                             dtype='>f8').reshape(dims)
+                elif matrix_type == FIFF.FIFFT_COMPLEX_FLOAT:
+                    data = np.fromstring(read_big(fid, 4 * 2 * dims.prod()),
+                                         dtype='>f4')
+                    # Note: we need the non-conjugate transpose here
+                    tag.data = (data[::2] + 1j * data[1::2]).reshape(dims)
+                elif matrix_type == FIFF.FIFFT_COMPLEX_DOUBLE:
+                    data = np.fromstring(read_big(fid, 8 * 2 * dims.prod()),
+                                         dtype='>f8')
+                    # Note: we need the non-conjugate transpose here
+                    tag.data = (data[::2] + 1j * data[1::2]).reshape(dims)
+                else:
+                    raise Exception('Cannot handle matrix of type %d yet'
+                                    % matrix_type)
+
+            elif matrix_coding in (matrix_coding_CCS, matrix_coding_RCS):
+                from scipy import sparse
+                # Find dimensions and return to the beginning of tag data
+                pos = fid.tell()
+                fid.seek(tag.size - 4, 1)
+                ndim = int(np.fromstring(fid.read(4), dtype='>i4'))
+                fid.seek(-(ndim + 2) * 4, 1)
+                dims = np.fromstring(fid.read(4 * (ndim + 1)), dtype='>i4')
+                if ndim != 2:
+                    raise Exception('Only two-dimensional matrices are '
+                                    'supported at this time')
+
+                # Back to where the data start
+                fid.seek(pos, 0)
+                nnz = int(dims[0])
+                nrow = int(dims[1])
+                ncol = int(dims[2])
+                sparse_data = np.fromstring(fid.read(4 * nnz), dtype='>f4')
+                shape = (dims[1], dims[2])
+                if matrix_coding == matrix_coding_CCS:
+                    #    CCS
+                    tmp_indices = fid.read(4 * nnz)
+                    sparse_indices = np.fromstring(tmp_indices, dtype='>i4')
+                    tmp_ptrs = fid.read(4 * (ncol + 1))
+                    sparse_ptrs = np.fromstring(tmp_ptrs, dtype='>i4')
+                    if (sparse_ptrs[-1] > len(sparse_indices) or
+                            np.any(sparse_ptrs < 0)):
+                        # There was a bug in MNE-C that caused some data to be
+                        # stored without byte swapping
+                        sparse_indices = np.concatenate(
+                            (np.fromstring(tmp_indices[:4 * (nrow + 1)],
+                                           dtype='>i4'),
+                             np.fromstring(tmp_indices[4 * (nrow + 1):],
+                                           dtype='<i4')))
+                        sparse_ptrs = np.fromstring(tmp_ptrs, dtype='<i4')
+                    tag.data = sparse.csc_matrix((sparse_data, sparse_indices,
+                                                 sparse_ptrs), shape=shape)
+                else:
+                    #    RCS
+                    sparse_indices = np.fromstring(fid.read(4 * nnz),
+                                                   dtype='>i4')
+                    sparse_ptrs = np.fromstring(fid.read(4 * (nrow + 1)),
+                                                dtype='>i4')
+                    tag.data = sparse.csr_matrix((sparse_data, sparse_indices,
+                                                 sparse_ptrs), shape=shape)
+            else:
+                raise Exception('Cannot handle other than dense or sparse '
+                                'matrices yet')
+        else:
+            #   All other data types
+
+            #   Simple types
+            if tag.type == FIFF.FIFFT_BYTE:
+                tag.data = _fromstring_rows(fid, tag.size, dtype=">B1",
+                                            shape=shape, rlims=rlims)
+            elif tag.type == FIFF.FIFFT_SHORT:
+                tag.data = _fromstring_rows(fid, tag.size, dtype=">i2",
+                                            shape=shape, rlims=rlims)
+            elif tag.type == FIFF.FIFFT_INT:
+                tag.data = _fromstring_rows(fid, tag.size, dtype=">i4",
+                                            shape=shape, rlims=rlims)
+            elif tag.type == FIFF.FIFFT_USHORT:
+                tag.data = _fromstring_rows(fid, tag.size, dtype=">u2",
+                                            shape=shape, rlims=rlims)
+            elif tag.type == FIFF.FIFFT_UINT:
+                tag.data = _fromstring_rows(fid, tag.size, dtype=">u4",
+                                            shape=shape, rlims=rlims)
+            elif tag.type == FIFF.FIFFT_FLOAT:
+                tag.data = _fromstring_rows(fid, tag.size, dtype=">f4",
+                                            shape=shape, rlims=rlims)
+            elif tag.type == FIFF.FIFFT_DOUBLE:
+                tag.data = _fromstring_rows(fid, tag.size, dtype=">f8",
+                                            shape=shape, rlims=rlims)
+            elif tag.type == FIFF.FIFFT_STRING:
+                tag.data = _fromstring_rows(fid, tag.size, dtype=">c",
+                                            shape=shape, rlims=rlims)
+
+                # Always decode to unicode.
+                td = tag.data.tostring().decode('utf-8', 'ignore')
+                tag.data = text_type(td)
+
+            elif tag.type == FIFF.FIFFT_DAU_PACK16:
+                tag.data = _fromstring_rows(fid, tag.size, dtype=">i2",
+                                            shape=shape, rlims=rlims)
+            elif tag.type == FIFF.FIFFT_COMPLEX_FLOAT:
+                # data gets stored twice as large
+                if shape is not None:
+                    shape = (shape[0], shape[1] * 2)
+                tag.data = _fromstring_rows(fid, tag.size, dtype=">f4",
+                                            shape=shape, rlims=rlims)
+                tag.data = tag.data[::2] + 1j * tag.data[1::2]
+            elif tag.type == FIFF.FIFFT_COMPLEX_DOUBLE:
+                # data gets stored twice as large
+                if shape is not None:
+                    shape = (shape[0], shape[1] * 2)
+                tag.data = _fromstring_rows(fid, tag.size, dtype=">f8",
+                                            shape=shape, rlims=rlims)
+                tag.data = tag.data[::2] + 1j * tag.data[1::2]
+            #
+            #   Structures
+            #
+            elif tag.type == FIFF.FIFFT_ID_STRUCT:
+                tag.data = dict()
+                tag.data['version'] = int(np.fromstring(fid.read(4),
+                                                        dtype=">i4"))
+                tag.data['version'] = int(np.fromstring(fid.read(4),
+                                                        dtype=">i4"))
+                tag.data['machid'] = np.fromstring(fid.read(8), dtype=">i4")
+                tag.data['secs'] = int(np.fromstring(fid.read(4), dtype=">i4"))
+                tag.data['usecs'] = int(np.fromstring(fid.read(4),
+                                                      dtype=">i4"))
+            elif tag.type == FIFF.FIFFT_DIG_POINT_STRUCT:
+                tag.data = dict()
+                tag.data['kind'] = int(np.fromstring(fid.read(4), dtype=">i4"))
+                tag.data['ident'] = int(np.fromstring(fid.read(4),
+                                                      dtype=">i4"))
+                tag.data['r'] = np.fromstring(fid.read(12), dtype=">f4")
+                tag.data['coord_frame'] = FIFF.FIFFV_COORD_UNKNOWN
+            elif tag.type == FIFF.FIFFT_COORD_TRANS_STRUCT:
+                from ..transforms import Transform
+                fro = int(np.fromstring(fid.read(4), dtype=">i4"))
+                to = int(np.fromstring(fid.read(4), dtype=">i4"))
+                rot = np.fromstring(fid.read(36), dtype=">f4").reshape(3, 3)
+                move = np.fromstring(fid.read(12), dtype=">f4")
+                trans = np.r_[np.c_[rot, move],
+                              np.array([[0], [0], [0], [1]]).T]
+                tag.data = Transform(fro, to, trans)
+                # Skip over the inverse transformation
+                fid.seek(12 * 4, 1)
+            elif tag.type == FIFF.FIFFT_CH_INFO_STRUCT:
+                d = tag.data = dict()
+                d['scanno'] = int(np.fromstring(fid.read(4), dtype=">i4"))
+                d['logno'] = int(np.fromstring(fid.read(4), dtype=">i4"))
+                d['kind'] = int(np.fromstring(fid.read(4), dtype=">i4"))
+                d['range'] = float(np.fromstring(fid.read(4), dtype=">f4"))
+                d['cal'] = float(np.fromstring(fid.read(4), dtype=">f4"))
+                d['coil_type'] = int(np.fromstring(fid.read(4), dtype=">i4"))
+                #
+                #   Read the coil coordinate system definition
+                #
+                d['loc'] = np.fromstring(fid.read(48), dtype=">f4")
+                # deal with nasty OSX Anaconda bug by casting to float64
+                d['loc'] = d['loc'].astype(np.float64)
+                #
+                #   Convert loc into a more useful format
+                #
+                kind = d['kind']
+                if kind in [FIFF.FIFFV_MEG_CH, FIFF.FIFFV_REF_MEG_CH]:
+                    d['coord_frame'] = FIFF.FIFFV_COORD_DEVICE
+                elif d['kind'] == FIFF.FIFFV_EEG_CH:
+                    d['coord_frame'] = FIFF.FIFFV_COORD_HEAD
+                else:
+                    d['coord_frame'] = FIFF.FIFFV_COORD_UNKNOWN
+                #
+                #   Unit and exponent
+                #
+                d['unit'] = int(np.fromstring(fid.read(4), dtype=">i4"))
+                d['unit_mul'] = int(np.fromstring(fid.read(4), dtype=">i4"))
+                #
+                #   Handle the channel name
+                #
+                ch_name = np.fromstring(fid.read(16), dtype=">c")
+                ch_name = ch_name[:np.argmax(ch_name == b'')].tostring()
+                d['ch_name'] = ch_name.decode()
+            elif tag.type == FIFF.FIFFT_OLD_PACK:
+                offset = float(np.fromstring(fid.read(4), dtype=">f4"))
+                scale = float(np.fromstring(fid.read(4), dtype=">f4"))
+                tag.data = np.fromstring(fid.read(tag.size - 8), dtype=">h2")
+                tag.data = scale * tag.data + offset
+            elif tag.type == FIFF.FIFFT_DIR_ENTRY_STRUCT:
+                tag.data = list()
+                for _ in range(tag.size // 16 - 1):
+                    s = fid.read(4 * 4)
+                    tag.data.append(Tag(*np.fromstring(
+                        s, dtype='>i4,>u4,>i4,>i4')[0]))
+            elif tag.type == FIFF.FIFFT_JULIAN:
+                tag.data = int(np.fromstring(fid.read(4), dtype=">i4"))
+                tag.data = jd2jcal(tag.data)
+            else:
+                raise Exception('Unimplemented tag data type %s' % tag.type)
+
+    if tag.next != FIFF.FIFFV_NEXT_SEQ:
+        # f.seek(tag.next,0)
+        fid.seek(tag.next, 1)  # XXX : fix? pb when tag.next < 0
+
+    return tag
+
+
+def find_tag(fid, node, findkind):
+    """Find Tag in an open FIF file descriptor
+
+    Parameters
+    ----------
+    fid : file-like
+        Open file.
+    node : dict
+        Node to search.
+    findkind : int
+        Tag kind to find.
+
+    Returns
+    -------
+    tag : instance of Tag
+        The first tag found.
+    """
+    for p in range(node['nent']):
+        if node['directory'][p].kind == findkind:
+            return read_tag(fid, node['directory'][p].pos)
+    return None
+
+
+def has_tag(node, kind):
+    """Does the node contains a Tag of a given kind?
+    """
+    for d in node['directory']:
+        if d.kind == kind:
+            return True
+    return False
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/tests/__init__.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/tests/__init__.py
new file mode 100644
index 0000000..aba6507
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/tests/__init__.py
@@ -0,0 +1,3 @@
+import os.path as op
+
+data_dir = op.join(op.dirname(__file__), 'data')
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/tests/test_apply_function.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/tests/test_apply_function.py
new file mode 100644
index 0000000..7adfede
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/tests/test_apply_function.py
@@ -0,0 +1,58 @@
+# Authors: Eric Larson <larson.eric.d at gmail.com>
+#
+# License: BSD (3-clause)
+
+import numpy as np
+import os.path as op
+from nose.tools import assert_equal, assert_raises
+
+from mne import create_info
+from mne.io import RawArray
+from mne.utils import logger, set_log_file, slow_test, _TempDir
+
+
+def bad_1(x):
+    return  # bad return type
+
+
+def bad_2(x):
+    return x[:-1]  # bad shape
+
+
+def printer(x):
+    logger.info('exec')
+    return x
+
+
+ at slow_test
+def test_apply_function_verbose():
+    """Test apply function verbosity
+    """
+    n_chan = 2
+    n_times = 3
+    ch_names = [str(ii) for ii in range(n_chan)]
+    raw = RawArray(np.zeros((n_chan, n_times)),
+                   create_info(ch_names, 1., 'mag'))
+    # test return types in both code paths (parallel / 1 job)
+    assert_raises(TypeError, raw.apply_function, bad_1,
+                  None, None, 1)
+    assert_raises(ValueError, raw.apply_function, bad_2,
+                  None, None, 1)
+    assert_raises(TypeError, raw.apply_function, bad_1,
+                  None, None, 2)
+    assert_raises(ValueError, raw.apply_function, bad_2,
+                  None, None, 2)
+
+    # check our arguments
+    tempdir = _TempDir()
+    test_name = op.join(tempdir, 'test.log')
+    set_log_file(test_name)
+    try:
+        raw.apply_function(printer, None, None, 1, verbose=False)
+        with open(test_name) as fid:
+            assert_equal(len(fid.readlines()), 0)
+        raw.apply_function(printer, None, None, 1, verbose=True)
+        with open(test_name) as fid:
+            assert_equal(len(fid.readlines()), n_chan)
+    finally:
+        set_log_file(None)
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/tests/test_compensator.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/tests/test_compensator.py
new file mode 100644
index 0000000..bc15630
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/tests/test_compensator.py
@@ -0,0 +1,72 @@
+# Author: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#
+# License: BSD (3-clause)
+
+import os.path as op
+from nose.tools import assert_true
+import numpy as np
+from numpy.testing import assert_allclose
+
+from mne import Epochs, read_evokeds, pick_types
+from mne.io.compensator import make_compensator, get_current_comp
+from mne.io import Raw
+from mne.utils import _TempDir, requires_mne, run_subprocess
+
+base_dir = op.join(op.dirname(__file__), 'data')
+ctf_comp_fname = op.join(base_dir, 'test_ctf_comp_raw.fif')
+
+
+def test_compensation():
+    """Test compensation
+    """
+    tempdir = _TempDir()
+    raw = Raw(ctf_comp_fname, compensation=None)
+    comp1 = make_compensator(raw.info, 3, 1, exclude_comp_chs=False)
+    assert_true(comp1.shape == (340, 340))
+    comp2 = make_compensator(raw.info, 3, 1, exclude_comp_chs=True)
+    assert_true(comp2.shape == (311, 340))
+
+    # make sure that changing the comp doesn't modify the original data
+    raw2 = Raw(ctf_comp_fname, compensation=2)
+    assert_true(get_current_comp(raw2.info) == 2)
+    fname = op.join(tempdir, 'ctf-raw.fif')
+    raw2.save(fname)
+    raw2 = Raw(fname, compensation=None)
+    data, _ = raw[:, :]
+    data2, _ = raw2[:, :]
+    assert_allclose(data, data2, rtol=1e-9, atol=1e-20)
+    for ch1, ch2 in zip(raw.info['chs'], raw2.info['chs']):
+        assert_true(ch1['coil_type'] == ch2['coil_type'])
+
+
+ at requires_mne
+def test_compensation_mne():
+    """Test comensation by comparing with MNE
+    """
+    tempdir = _TempDir()
+
+    def make_evoked(fname, comp):
+        raw = Raw(fname, compensation=comp)
+        picks = pick_types(raw.info, meg=True, ref_meg=True)
+        events = np.array([[0, 0, 1]], dtype=np.int)
+        evoked = Epochs(raw, events, 1, 0, 20e-3, picks=picks).average()
+        return evoked
+
+    def compensate_mne(fname, comp):
+        tmp_fname = '%s-%d-ave.fif' % (fname[:-4], comp)
+        cmd = ['mne_compensate_data', '--in', fname,
+               '--out', tmp_fname, '--grad', str(comp)]
+        run_subprocess(cmd)
+        return read_evokeds(tmp_fname)[0]
+
+    # save evoked response with default compensation
+    fname_default = op.join(tempdir, 'ctf_default-ave.fif')
+    make_evoked(ctf_comp_fname, None).save(fname_default)
+
+    for comp in [0, 1, 2, 3]:
+        evoked_py = make_evoked(ctf_comp_fname, comp)
+        evoked_c = compensate_mne(fname_default, comp)
+        picks_py = pick_types(evoked_py.info, meg=True, ref_meg=True)
+        picks_c = pick_types(evoked_c.info, meg=True, ref_meg=True)
+        assert_allclose(evoked_py.data[picks_py], evoked_c.data[picks_c],
+                        rtol=1e-3, atol=1e-17)
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/tests/test_meas_info.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/tests/test_meas_info.py
new file mode 100644
index 0000000..4c81bfb
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/tests/test_meas_info.py
@@ -0,0 +1,211 @@
+# -*- coding: utf-8 -*-
+
+import os.path as op
+
+from nose.tools import assert_false, assert_equal, assert_raises, assert_true
+import numpy as np
+from numpy.testing import assert_array_equal, assert_allclose
+
+from mne import Epochs, read_events
+from mne.io import (read_fiducials, write_fiducials, _coil_trans_to_loc,
+                    _loc_to_coil_trans, Raw, read_info, write_info)
+from mne.io.constants import FIFF
+from mne.io.meas_info import (Info, create_info, _write_dig_points,
+                              _read_dig_points, _make_dig_points)
+from mne.utils import _TempDir, run_tests_if_main
+from mne.channels.montage import read_montage, read_dig_montage
+
+base_dir = op.join(op.dirname(__file__), 'data')
+fiducials_fname = op.join(base_dir, 'fsaverage-fiducials.fif')
+raw_fname = op.join(base_dir, 'test_raw.fif')
+chpi_fname = op.join(base_dir, 'test_chpi_raw_sss.fif')
+event_name = op.join(base_dir, 'test-eve.fif')
+evoked_nf_name = op.join(base_dir, 'test-nf-ave.fif')
+kit_data_dir = op.join(op.dirname(__file__), '..', 'kit', 'tests', 'data')
+hsp_fname = op.join(kit_data_dir, 'test_hsp.txt')
+elp_fname = op.join(kit_data_dir, 'test_elp.txt')
+
+
+def test_coil_trans():
+    """Test loc<->coil_trans functions"""
+    rng = np.random.RandomState(0)
+    x = rng.randn(4, 4)
+    x[3] = [0, 0, 0, 1]
+    assert_allclose(_loc_to_coil_trans(_coil_trans_to_loc(x)), x)
+    x = rng.randn(12)
+    assert_allclose(_coil_trans_to_loc(_loc_to_coil_trans(x)), x)
+
+
+def test_make_info():
+    """Test some create_info properties
+    """
+    n_ch = 1
+    info = create_info(n_ch, 1000., 'eeg')
+    coil_types = set([ch['coil_type'] for ch in info['chs']])
+    assert_true(FIFF.FIFFV_COIL_EEG in coil_types)
+
+    assert_raises(TypeError, create_info, ch_names='Test Ch', sfreq=1000)
+    assert_raises(ValueError, create_info, ch_names=['Test Ch'], sfreq=-1000)
+    assert_raises(ValueError, create_info, ch_names=['Test Ch'], sfreq=1000,
+                  ch_types=['eeg', 'eeg'])
+    assert_raises(TypeError, create_info, ch_names=[np.array([1])],
+                  sfreq=1000)
+    assert_raises(TypeError, create_info, ch_names=['Test Ch'], sfreq=1000,
+                  ch_types=np.array([1]))
+    assert_raises(KeyError, create_info, ch_names=['Test Ch'], sfreq=1000,
+                  ch_types='awesome')
+    assert_raises(TypeError, create_info, ['Test Ch'], sfreq=1000,
+                  ch_types=None, montage=np.array([1]))
+    m = read_montage('biosemi32')
+    info = create_info(ch_names=m.ch_names, sfreq=1000., ch_types='eeg',
+                       montage=m)
+    ch_pos = [ch['loc'][:3] for ch in info['chs']]
+    assert_array_equal(ch_pos, m.pos)
+
+    names = ['nasion', 'lpa', 'rpa', '1', '2', '3', '4', '5']
+    d = read_dig_montage(hsp_fname, None, elp_fname, names, unit='m',
+                         transform=False)
+    info = create_info(ch_names=m.ch_names, sfreq=1000., ch_types='eeg',
+                       montage=d)
+    idents = [p['ident'] for p in info['dig']]
+    assert_true(FIFF.FIFFV_POINT_NASION in idents)
+
+    info = create_info(ch_names=m.ch_names, sfreq=1000., ch_types='eeg',
+                       montage=[d, m])
+    ch_pos = [ch['loc'][:3] for ch in info['chs']]
+    assert_array_equal(ch_pos, m.pos)
+    idents = [p['ident'] for p in info['dig']]
+    assert_true(FIFF.FIFFV_POINT_NASION in idents)
+    info = create_info(ch_names=m.ch_names, sfreq=1000., ch_types='eeg',
+                       montage=[d, 'biosemi32'])
+    ch_pos = [ch['loc'][:3] for ch in info['chs']]
+    assert_array_equal(ch_pos, m.pos)
+    idents = [p['ident'] for p in info['dig']]
+    assert_true(FIFF.FIFFV_POINT_NASION in idents)
+
+
+def test_fiducials_io():
+    """Test fiducials i/o"""
+    tempdir = _TempDir()
+    pts, coord_frame = read_fiducials(fiducials_fname)
+    assert_equal(pts[0]['coord_frame'], FIFF.FIFFV_COORD_MRI)
+    assert_equal(pts[0]['ident'], FIFF.FIFFV_POINT_CARDINAL)
+
+    temp_fname = op.join(tempdir, 'test.fif')
+    write_fiducials(temp_fname, pts, coord_frame)
+    pts_1, coord_frame_1 = read_fiducials(temp_fname)
+    assert_equal(coord_frame, coord_frame_1)
+    for pt, pt_1 in zip(pts, pts_1):
+        assert_equal(pt['kind'], pt_1['kind'])
+        assert_equal(pt['ident'], pt_1['ident'])
+        assert_equal(pt['coord_frame'], pt_1['coord_frame'])
+        assert_array_equal(pt['r'], pt_1['r'])
+
+    # test safeguards
+    pts[0]['coord_frame'] += 1
+    assert_raises(ValueError, write_fiducials, temp_fname, pts, coord_frame)
+
+
+def test_info():
+    """Test info object"""
+    raw = Raw(raw_fname)
+    event_id, tmin, tmax = 1, -0.2, 0.5
+    events = read_events(event_name)
+    event_id = int(events[0, 2])
+    epochs = Epochs(raw, events[:1], event_id, tmin, tmax, picks=None,
+                    baseline=(None, 0))
+
+    evoked = epochs.average()
+
+    events = read_events(event_name)
+
+    # Test subclassing was successful.
+    info = Info(a=7, b='aaaaa')
+    assert_true('a' in info)
+    assert_true('b' in info)
+    info[42] = 'foo'
+    assert_true(info[42] == 'foo')
+
+    # test info attribute in API objects
+    for obj in [raw, epochs, evoked]:
+        assert_true(isinstance(obj.info, Info))
+        info_str = '%s' % obj.info
+        assert_equal(len(info_str.split('\n')), (len(obj.info.keys()) + 2))
+        assert_true(all(k in info_str for k in obj.info.keys()))
+
+
+def test_read_write_info():
+    """Test IO of info
+    """
+    tempdir = _TempDir()
+    info = read_info(raw_fname)
+    temp_file = op.join(tempdir, 'info.fif')
+    # check for bug `#1198`
+    info['dev_head_t']['trans'] = np.eye(4)
+    t1 = info['dev_head_t']['trans']
+    write_info(temp_file, info)
+    info2 = read_info(temp_file)
+    t2 = info2['dev_head_t']['trans']
+    assert_true(len(info['chs']) == len(info2['chs']))
+    assert_array_equal(t1, t2)
+    # proc_history (e.g., GH#1875)
+    creator = u'é'
+    info = read_info(chpi_fname)
+    info['proc_history'][0]['creator'] = creator
+    info['hpi_meas'][0]['creator'] = creator
+    info['subject_info']['his_id'] = creator
+    write_info(temp_file, info)
+    info = read_info(temp_file)
+    assert_equal(info['proc_history'][0]['creator'], creator)
+    assert_equal(info['hpi_meas'][0]['creator'], creator)
+    assert_equal(info['subject_info']['his_id'], creator)
+
+
+def test_io_dig_points():
+    """Test Writing for dig files"""
+    tempdir = _TempDir()
+    points = _read_dig_points(hsp_fname)
+
+    dest = op.join(tempdir, 'test.txt')
+    dest_bad = op.join(tempdir, 'test.mne')
+    assert_raises(ValueError, _write_dig_points, dest, points[:, :2])
+    assert_raises(ValueError, _write_dig_points, dest_bad, points)
+    _write_dig_points(dest, points)
+    points1 = _read_dig_points(dest)
+    err = "Dig points diverged after writing and reading."
+    assert_array_equal(points, points1, err)
+
+    points2 = np.array([[-106.93, 99.80], [99.80, 68.81]])
+    np.savetxt(dest, points2, delimiter='\t', newline='\n')
+    assert_raises(ValueError, _read_dig_points, dest)
+
+
+def test_make_dig_points():
+    """Test application of Polhemus HSP to info"""
+    dig_points = _read_dig_points(hsp_fname)
+    info = create_info(ch_names=['Test Ch'], sfreq=1000., ch_types=None)
+    assert_false(info['dig'])
+
+    info['dig'] = _make_dig_points(dig_points=dig_points)
+    assert_true(info['dig'])
+    assert_array_equal(info['dig'][0]['r'], [-106.93, 99.80, 68.81])
+
+    dig_points = _read_dig_points(elp_fname)
+    nasion, lpa, rpa = dig_points[:3]
+    info = create_info(ch_names=['Test Ch'], sfreq=1000., ch_types=None)
+    assert_false(info['dig'])
+
+    info['dig'] = _make_dig_points(nasion, lpa, rpa, dig_points[3:], None)
+    assert_true(info['dig'])
+    idx = [d['ident'] for d in info['dig']].index(FIFF.FIFFV_POINT_NASION)
+    assert_array_equal(info['dig'][idx]['r'],
+                       np.array([1.3930, 13.1613, -4.6967]))
+    assert_raises(ValueError, _make_dig_points, nasion[:2])
+    assert_raises(ValueError, _make_dig_points, None, lpa[:2])
+    assert_raises(ValueError, _make_dig_points, None, None, rpa[:2])
+    assert_raises(ValueError, _make_dig_points, None, None, None,
+                  dig_points[:, :2])
+    assert_raises(ValueError, _make_dig_points, None, None, None, None,
+                  dig_points[:, :2])
+
+run_tests_if_main()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/tests/test_pick.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/tests/test_pick.py
new file mode 100644
index 0000000..80e2767
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/tests/test_pick.py
@@ -0,0 +1,186 @@
+from nose.tools import assert_equal, assert_raises
+from numpy.testing import assert_array_equal
+import numpy as np
+import os.path as op
+
+from mne import (pick_channels_regexp, pick_types, Epochs,
+                 read_forward_solution, rename_channels,
+                 pick_info, pick_channels, __file__)
+
+from mne.io.meas_info import create_info
+from mne.io.array import RawArray
+from mne.io.pick import (channel_indices_by_type, channel_type,
+                         pick_types_forward, _picks_by_type)
+from mne.io.constants import FIFF
+from mne.io import Raw
+from mne.datasets import testing
+from mne.forward.tests import test_forward
+from mne.utils import run_tests_if_main
+
+
+def test_pick_channels_regexp():
+    """Test pick with regular expression
+    """
+    ch_names = ['MEG 2331', 'MEG 2332', 'MEG 2333']
+    assert_array_equal(pick_channels_regexp(ch_names, 'MEG ...1'), [0])
+    assert_array_equal(pick_channels_regexp(ch_names, 'MEG ...[2-3]'), [1, 2])
+    assert_array_equal(pick_channels_regexp(ch_names, 'MEG *'), [0, 1, 2])
+
+
+def test_pick_seeg():
+    """Test picking with SEEG
+    """
+    names = 'A1 A2 Fz O OTp1 OTp2 OTp3'.split()
+    types = 'mag mag eeg eeg seeg seeg seeg'.split()
+    info = create_info(names, 1024., types)
+    idx = channel_indices_by_type(info)
+    assert_array_equal(idx['mag'], [0, 1])
+    assert_array_equal(idx['eeg'], [2, 3])
+    assert_array_equal(idx['seeg'], [4, 5, 6])
+    assert_array_equal(pick_types(info, meg=False, seeg=True), [4, 5, 6])
+    for i, t in enumerate(types):
+        assert_equal(channel_type(info, i), types[i])
+    raw = RawArray(np.zeros((len(names), 10)), info)
+    events = np.array([[1, 0, 0], [2, 0, 0]])
+    epochs = Epochs(raw, events, {'event': 0}, -1e-5, 1e-5)
+    evoked = epochs.average(pick_types(epochs.info, meg=True, seeg=True))
+    e_seeg = evoked.pick_types(meg=False, seeg=True, copy=True)
+    for l, r in zip(e_seeg.ch_names, names[4:]):
+        assert_equal(l, r)
+
+
+def _check_fwd_n_chan_consistent(fwd, n_expected):
+    n_ok = len(fwd['info']['ch_names'])
+    n_sol = fwd['sol']['data'].shape[0]
+    assert_equal(n_expected, n_sol)
+    assert_equal(n_expected, n_ok)
+
+
+ at testing.requires_testing_data
+def test_pick_forward_seeg():
+    """Test picking forward with SEEG
+    """
+    fwd = read_forward_solution(test_forward.fname_meeg)
+    counts = channel_indices_by_type(fwd['info'])
+    for key in counts.keys():
+        counts[key] = len(counts[key])
+    counts['meg'] = counts['mag'] + counts['grad']
+    fwd_ = pick_types_forward(fwd, meg=True, eeg=False, seeg=False)
+    _check_fwd_n_chan_consistent(fwd_, counts['meg'])
+    fwd_ = pick_types_forward(fwd, meg=False, eeg=True, seeg=False)
+    _check_fwd_n_chan_consistent(fwd_, counts['eeg'])
+    # should raise exception related to emptiness
+    assert_raises(ValueError, pick_types_forward, fwd, meg=False, eeg=False,
+                  seeg=True)
+    # change last chan from EEG to sEEG
+    seeg_name = 'OTp1'
+    rename_channels(fwd['info'], {'EEG 060': seeg_name})
+    for ch in fwd['info']['chs']:
+        if ch['ch_name'] == seeg_name:
+            ch['kind'] = FIFF.FIFFV_SEEG_CH
+            ch['coil_type'] = FIFF.FIFFV_COIL_EEG
+    fwd['sol']['row_names'][-1] = fwd['info']['chs'][-1]['ch_name']
+    counts['eeg'] -= 1
+    counts['seeg'] += 1
+    # repick & check
+    fwd_seeg = pick_types_forward(fwd, meg=False, eeg=False, seeg=True)
+    assert_equal(fwd_seeg['sol']['row_names'], [seeg_name])
+    assert_equal(fwd_seeg['info']['ch_names'], [seeg_name])
+    # should work fine
+    fwd_ = pick_types_forward(fwd, meg=True, eeg=False, seeg=False)
+    _check_fwd_n_chan_consistent(fwd_, counts['meg'])
+    fwd_ = pick_types_forward(fwd, meg=False, eeg=True, seeg=False)
+    _check_fwd_n_chan_consistent(fwd_, counts['eeg'])
+    fwd_ = pick_types_forward(fwd, meg=False, eeg=False, seeg=True)
+    _check_fwd_n_chan_consistent(fwd_, counts['seeg'])
+
+
+def test_picks_by_channels():
+    """Test creating pick_lists"""
+
+    rng = np.random.RandomState(909)
+
+    test_data = rng.random_sample((4, 2000))
+    ch_names = ['MEG %03d' % i for i in [1, 2, 3, 4]]
+    ch_types = ['grad', 'mag', 'mag', 'eeg']
+    sfreq = 250.0
+    info = create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types)
+    raw = RawArray(test_data, info)
+
+    pick_list = _picks_by_type(raw.info)
+    assert_equal(len(pick_list), 3)
+    assert_equal(pick_list[0][0], 'mag')
+    pick_list2 = _picks_by_type(raw.info, meg_combined=False)
+    assert_equal(len(pick_list), len(pick_list2))
+    assert_equal(pick_list2[0][0], 'mag')
+
+    pick_list2 = _picks_by_type(raw.info, meg_combined=True)
+    assert_equal(len(pick_list), len(pick_list2) + 1)
+    assert_equal(pick_list2[0][0], 'meg')
+
+    test_data = rng.random_sample((4, 2000))
+    ch_names = ['MEG %03d' % i for i in [1, 2, 3, 4]]
+    ch_types = ['mag', 'mag', 'mag', 'mag']
+    sfreq = 250.0
+    info = create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types)
+    raw = RawArray(test_data, info)
+
+    # Make sure checks for list input work.
+    assert_raises(ValueError, pick_channels, ch_names, 'MEG 001')
+    assert_raises(ValueError, pick_channels, ch_names, ['MEG 001'], 'hi')
+
+    pick_list = _picks_by_type(raw.info)
+    assert_equal(len(pick_list), 1)
+    assert_equal(pick_list[0][0], 'mag')
+    pick_list2 = _picks_by_type(raw.info, meg_combined=True)
+    assert_equal(len(pick_list), len(pick_list2))
+    assert_equal(pick_list2[0][0], 'mag')
+
+
+def test_clean_info_bads():
+    """Test cleaning info['bads'] when bad_channels are excluded """
+
+    raw_file = op.join(op.dirname(__file__), 'io', 'tests', 'data',
+                       'test_raw.fif')
+    raw = Raw(raw_file)
+
+    # select eeg channels
+    picks_eeg = pick_types(raw.info, meg=False, eeg=True)
+
+    # select 3 eeg channels as bads
+    idx_eeg_bad_ch = picks_eeg[[1, 5, 14]]
+    eeg_bad_ch = [raw.info['ch_names'][k] for k in idx_eeg_bad_ch]
+
+    # select meg channels
+    picks_meg = pick_types(raw.info, meg=True, eeg=False)
+
+    # select randomly 3 meg channels as bads
+    idx_meg_bad_ch = picks_meg[[0, 15, 34]]
+    meg_bad_ch = [raw.info['ch_names'][k] for k in idx_meg_bad_ch]
+
+    # simulate the bad channels
+    raw.info['bads'] = eeg_bad_ch + meg_bad_ch
+
+    # simulate the call to pick_info excluding the bad eeg channels
+    info_eeg = pick_info(raw.info, picks_eeg)
+
+    # simulate the call to pick_info excluding the bad meg channels
+    info_meg = pick_info(raw.info, picks_meg)
+
+    assert_equal(info_eeg['bads'], eeg_bad_ch)
+    assert_equal(info_meg['bads'], meg_bad_ch)
+
+    info = pick_info(raw.info, picks_meg)
+    info._check_consistency()
+    info['bads'] += ['EEG 053']
+    assert_raises(RuntimeError, info._check_consistency)
+    info = pick_info(raw.info, picks_meg)
+    info._check_consistency()
+    info['ch_names'][0] += 'f'
+    assert_raises(RuntimeError, info._check_consistency)
+    info = pick_info(raw.info, picks_meg)
+    info._check_consistency()
+    info['nchan'] += 1
+    assert_raises(RuntimeError, info._check_consistency)
+
+run_tests_if_main()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/tests/test_proc_history.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/tests/test_proc_history.py
new file mode 100644
index 0000000..555b08d
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/tests/test_proc_history.py
@@ -0,0 +1,47 @@
+# Authors: Denis A. Engemann <denis.engemann at gmail.com>
+#          Eric Larson <larson.eric.d at gmail.com>
+# License: Simplified BSD
+
+import numpy as np
+import os.path as op
+from mne import io
+from mne.io.constants import FIFF
+from mne.io.proc_history import _get_sss_rank
+from nose.tools import assert_true, assert_equal
+
+base_dir = op.join(op.dirname(__file__), 'data')
+raw_fname = op.join(base_dir, 'test_chpi_raw_sss.fif')
+
+
+def test_maxfilter_io():
+    """test maxfilter io"""
+    raw = io.Raw(raw_fname)
+    mf = raw.info['proc_history'][1]['max_info']
+
+    assert_true(mf['sss_info']['frame'], FIFF.FIFFV_COORD_HEAD)
+    # based on manual 2.0, rev. 5.0 page 23
+    assert_true(5 <= mf['sss_info']['in_order'] <= 11)
+    assert_true(mf['sss_info']['out_order'] <= 5)
+    assert_true(mf['sss_info']['nchan'] > len(mf['sss_info']['components']))
+
+    assert_equal(raw.ch_names[:mf['sss_info']['nchan']],
+                 mf['sss_ctc']['proj_items_chs'])
+    assert_equal(mf['sss_ctc']['decoupler'].shape,
+                 (mf['sss_info']['nchan'], mf['sss_info']['nchan']))
+    assert_equal(np.unique(np.diag(mf['sss_ctc']['decoupler'].toarray())),
+                 np.array([1.], dtype=np.float32))
+
+    assert_equal(mf['sss_cal']['cal_corrs'].shape, (306, 14))
+    assert_equal(mf['sss_cal']['cal_chans'].shape, (306, 2))
+    vv_coils = [v for k, v in FIFF.items() if 'FIFFV_COIL_VV' in k]
+    assert_true(all(k in vv_coils
+                    for k in set(mf['sss_cal']['cal_chans'][:, 1])))
+
+
+def test_maxfilter_get_rank():
+    """test maxfilter rank lookup"""
+    raw = io.Raw(raw_fname)
+    mf = raw.info['proc_history'][0]['max_info']
+    rank1 = mf['sss_info']['nfree']
+    rank2 = _get_sss_rank(mf)
+    assert_equal(rank1, rank2)
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/tests/test_raw.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/tests/test_raw.py
new file mode 100644
index 0000000..9d79349
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/tests/test_raw.py
@@ -0,0 +1,51 @@
+# Generic tests that all raw classes should run
+from os import path as op
+from numpy.testing import assert_allclose
+
+from mne.datasets import testing
+from mne.io import Raw
+
+
+def _test_concat(reader, *args):
+    """Test concatenation of raw classes that allow not preloading"""
+    data = None
+    for preload in (True, False):
+        raw1 = reader(*args, preload=preload)
+        raw2 = reader(*args, preload=preload)
+        raw1.append(raw2)
+        raw1.load_data()
+        if data is None:
+            data = raw1[:, :][0]
+        assert_allclose(data, raw1[:, :][0])
+    for first_preload in (True, False):
+        raw = reader(*args, preload=first_preload)
+        data = raw[:, :][0]
+        for preloads in ((True, True), (True, False), (False, False)):
+            for last_preload in (True, False):
+                print(first_preload, preloads, last_preload)
+                raw1 = raw.crop(0, 0.4999)
+                if preloads[0]:
+                    raw1.load_data()
+                raw2 = raw.crop(0.5, None)
+                if preloads[1]:
+                    raw2.load_data()
+                raw1.append(raw2)
+                if last_preload:
+                    raw1.load_data()
+                assert_allclose(data, raw1[:, :][0])
+
+
+ at testing.requires_testing_data
+def test_time_index():
+    """Test indexing of raw times"""
+    raw_fname = op.join(op.dirname(__file__), '..', '..', 'io', 'tests',
+                        'data', 'test_raw.fif')
+    raw = Raw(raw_fname)
+
+    # Test original (non-rounding) indexing behavior
+    orig_inds = raw.time_as_index(raw.times)
+    assert(len(set(orig_inds)) != len(orig_inds))
+
+    # Test new (rounding) indexing behavior
+    new_inds = raw.time_as_index(raw.times, use_rounding=True)
+    assert(len(set(new_inds)) == len(new_inds))
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/tests/test_reference.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/tests/test_reference.py
new file mode 100644
index 0000000..7ce82d5
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/tests/test_reference.py
@@ -0,0 +1,307 @@
+# Authors: Marijn van Vliet <w.m.vanvliet at gmail.com>
+#          Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Teon Brooks <teon.brooks at gmail.com>
+#
+# License: BSD (3-clause)
+
+import warnings
+import os.path as op
+import numpy as np
+
+from nose.tools import assert_true, assert_equal, assert_raises
+from numpy.testing import assert_array_equal, assert_allclose
+
+from mne import pick_types, Evoked, Epochs, read_events
+from mne.io.constants import FIFF
+from mne.io import (set_eeg_reference, set_bipolar_reference,
+                    add_reference_channels)
+from mne.io.proj import _has_eeg_average_ref_proj
+from mne.io.reference import _apply_reference
+from mne.datasets import testing
+from mne.io import Raw
+
+warnings.simplefilter('always')  # enable b/c these tests throw warnings
+
+data_dir = op.join(testing.data_path(download=False), 'MEG', 'sample')
+fif_fname = op.join(data_dir, 'sample_audvis_trunc_raw.fif')
+eve_fname = op.join(data_dir, 'sample_audvis_trunc_raw-eve.fif')
+ave_fname = op.join(data_dir, 'sample_audvis_trunc-ave.fif')
+
+
+def _test_reference(raw, reref, ref_data, ref_from):
+    """Helper function to test whether a reference has been correctly
+    applied."""
+    # Separate EEG channels from other channel types
+    picks_eeg = pick_types(raw.info, meg=False, eeg=True, exclude='bads')
+    picks_other = pick_types(raw.info, meg=True, eeg=False, eog=True,
+                             stim=True, exclude='bads')
+
+    # Calculate indices of reference channesl
+    picks_ref = [raw.ch_names.index(ch) for ch in ref_from]
+
+    # Get data
+    if isinstance(raw, Evoked):
+        _data = raw.data
+        _reref = reref.data
+    else:
+        _data = raw._data
+        _reref = reref._data
+
+    # Check that the ref has been properly computed
+    assert_array_equal(ref_data, _data[..., picks_ref, :].mean(-2))
+
+    # Get the raw EEG data and other channel data
+    raw_eeg_data = _data[..., picks_eeg, :]
+    raw_other_data = _data[..., picks_other, :]
+
+    # Get the rereferenced EEG data
+    reref_eeg_data = _reref[..., picks_eeg, :]
+    reref_other_data = _reref[..., picks_other, :]
+
+    # Undo rereferencing of EEG channels
+    if isinstance(raw, Epochs):
+        unref_eeg_data = reref_eeg_data + ref_data[:, np.newaxis, :]
+    else:
+        unref_eeg_data = reref_eeg_data + ref_data
+
+    # Check that both EEG data and other data is the same
+    assert_allclose(raw_eeg_data, unref_eeg_data, 1e-6, atol=1e-15)
+    assert_allclose(raw_other_data, reref_other_data, 1e-6, atol=1e-15)
+
+
+ at testing.requires_testing_data
+def test_apply_reference():
+    """Test base function for rereferencing"""
+    raw = Raw(fif_fname, preload=True)
+
+    # Rereference raw data by creating a copy of original data
+    reref, ref_data = _apply_reference(raw, ref_from=['EEG 001', 'EEG 002'],
+                                       copy=True)
+    assert_true(reref.info['custom_ref_applied'])
+    _test_reference(raw, reref, ref_data, ['EEG 001', 'EEG 002'])
+
+    # The CAR reference projection should have been removed by the function
+    assert_true(not _has_eeg_average_ref_proj(reref.info['projs']))
+
+    # Test that disabling the reference does not break anything
+    reref, ref_data = _apply_reference(raw, [])
+    assert_array_equal(raw._data, reref._data)
+
+    # Test that data is modified in place when copy=False
+    reref, ref_data = _apply_reference(raw, ['EEG 001', 'EEG 002'],
+                                       copy=False)
+    assert_true(raw is reref)
+
+    # Test re-referencing Epochs object
+    raw = Raw(fif_fname, preload=False, add_eeg_ref=False)
+    events = read_events(eve_fname)
+    picks_eeg = pick_types(raw.info, meg=False, eeg=True)
+    epochs = Epochs(raw, events=events, event_id=1, tmin=-0.2, tmax=0.5,
+                    picks=picks_eeg, preload=True)
+    reref, ref_data = _apply_reference(epochs, ref_from=['EEG 001', 'EEG 002'],
+                                       copy=True)
+    assert_true(reref.info['custom_ref_applied'])
+    _test_reference(epochs, reref, ref_data, ['EEG 001', 'EEG 002'])
+
+    # Test re-referencing Evoked object
+    evoked = epochs.average()
+    reref, ref_data = _apply_reference(evoked, ref_from=['EEG 001', 'EEG 002'],
+                                       copy=True)
+    assert_true(reref.info['custom_ref_applied'])
+    _test_reference(evoked, reref, ref_data, ['EEG 001', 'EEG 002'])
+
+    # Test invalid input
+    raw_np = Raw(fif_fname, preload=False)
+    assert_raises(RuntimeError, _apply_reference, raw_np, ['EEG 001'])
+
+
+ at testing.requires_testing_data
+def test_set_eeg_reference():
+    """Test rereference eeg data"""
+    raw = Raw(fif_fname, preload=True)
+    raw.info['projs'] = []
+
+    # Test setting an average reference
+    assert_true(not _has_eeg_average_ref_proj(raw.info['projs']))
+    reref, ref_data = set_eeg_reference(raw)
+    assert_true(_has_eeg_average_ref_proj(reref.info['projs']))
+    assert_true(ref_data is None)
+
+    # Test setting an average reference when one was already present
+    reref, ref_data = set_eeg_reference(raw, copy=False)
+    assert_true(ref_data is None)
+
+    # Rereference raw data by creating a copy of original data
+    reref, ref_data = set_eeg_reference(raw, ['EEG 001', 'EEG 002'], copy=True)
+    assert_true(reref.info['custom_ref_applied'])
+    _test_reference(raw, reref, ref_data, ['EEG 001', 'EEG 002'])
+
+    # Test that data is modified in place when copy=False
+    reref, ref_data = set_eeg_reference(raw, ['EEG 001', 'EEG 002'],
+                                        copy=False)
+    assert_true(raw is reref)
+
+
+ at testing.requires_testing_data
+def test_set_bipolar_reference():
+    """Test bipolar referencing"""
+    raw = Raw(fif_fname, preload=True)
+    reref = set_bipolar_reference(raw, 'EEG 001', 'EEG 002', 'bipolar',
+                                  {'kind': FIFF.FIFFV_EOG_CH,
+                                   'extra': 'some extra value'})
+    assert_true(reref.info['custom_ref_applied'])
+
+    # Compare result to a manual calculation
+    a = raw.pick_channels(['EEG 001', 'EEG 002'], copy=True)
+    a = a._data[0, :] - a._data[1, :]
+    b = reref.pick_channels(['bipolar'], copy=True)._data[0, :]
+    assert_allclose(a, b)
+
+    # Original channels should be replaced by a virtual one
+    assert_true('EEG 001' not in reref.ch_names)
+    assert_true('EEG 002' not in reref.ch_names)
+    assert_true('bipolar' in reref.ch_names)
+
+    # Check channel information
+    bp_info = reref.info['chs'][reref.ch_names.index('bipolar')]
+    an_info = reref.info['chs'][raw.ch_names.index('EEG 001')]
+    for key in bp_info:
+        if key == 'loc':
+            assert_array_equal(bp_info[key], 0)
+        elif key == 'coil_type':
+            assert_equal(bp_info[key], FIFF.FIFFV_COIL_EEG_BIPOLAR)
+        elif key == 'kind':
+            assert_equal(bp_info[key], FIFF.FIFFV_EOG_CH)
+        else:
+            assert_equal(bp_info[key], an_info[key])
+    assert_equal(bp_info['extra'], 'some extra value')
+
+    # Minimalist call
+    reref = set_bipolar_reference(raw, 'EEG 001', 'EEG 002')
+    assert_true('EEG 001-EEG 002' in reref.ch_names)
+
+    # Test creating a bipolar reference that doesn't involve EEG channels:
+    # it should not set the custom_ref_applied flag
+    reref = set_bipolar_reference(raw, 'MEG 0111', 'MEG 0112',
+                                  ch_info={'kind': FIFF.FIFFV_MEG_CH})
+    assert_true(not reref.info['custom_ref_applied'])
+    assert_true('MEG 0111-MEG 0112' in reref.ch_names)
+
+    # Test a battery of invalid inputs
+    assert_raises(ValueError, set_bipolar_reference, raw,
+                  'EEG 001', ['EEG 002', 'EEG 003'], 'bipolar')
+    assert_raises(ValueError, set_bipolar_reference, raw,
+                  ['EEG 001', 'EEG 002'], 'EEG 003', 'bipolar')
+    assert_raises(ValueError, set_bipolar_reference, raw,
+                  'EEG 001', 'EEG 002', ['bipolar1', 'bipolar2'])
+    assert_raises(ValueError, set_bipolar_reference, raw,
+                  'EEG 001', 'EEG 002', 'bipolar',
+                  ch_info=[{'foo': 'bar'}, {'foo': 'bar'}])
+    assert_raises(ValueError, set_bipolar_reference, raw,
+                  'EEG 001', 'EEG 002', ch_name='EEG 003')
+
+
+ at testing.requires_testing_data
+def test_add_reference():
+    raw = Raw(fif_fname, preload=True)
+    picks_eeg = pick_types(raw.info, meg=False, eeg=True)
+    # check if channel already exists
+    assert_raises(ValueError, add_reference_channels,
+                  raw, raw.info['ch_names'][0])
+    # add reference channel to Raw
+    raw_ref = add_reference_channels(raw, 'Ref', copy=True)
+    assert_equal(raw_ref._data.shape[0], raw._data.shape[0] + 1)
+    assert_array_equal(raw._data[picks_eeg, :], raw_ref._data[picks_eeg, :])
+
+    orig_nchan = raw.info['nchan']
+    raw = add_reference_channels(raw, 'Ref', copy=False)
+    assert_array_equal(raw._data, raw_ref._data)
+    assert_equal(raw.info['nchan'], orig_nchan + 1)
+
+    ref_idx = raw.ch_names.index('Ref')
+    ref_data, _ = raw[ref_idx]
+    assert_array_equal(ref_data, 0)
+
+    # add two reference channels to Raw
+    raw = Raw(fif_fname, preload=True)
+    picks_eeg = pick_types(raw.info, meg=False, eeg=True)
+    assert_raises(ValueError, add_reference_channels, raw,
+                  raw.info['ch_names'][0])
+    raw_ref = add_reference_channels(raw, ['M1', 'M2'], copy=True)
+    assert_equal(raw_ref._data.shape[0], raw._data.shape[0] + 2)
+    assert_array_equal(raw._data[picks_eeg, :], raw_ref._data[picks_eeg, :])
+
+    raw = add_reference_channels(raw, ['M1', 'M2'], copy=False)
+    ref_idx = raw.ch_names.index('M1')
+    ref_idy = raw.ch_names.index('M2')
+    ref_data, _ = raw[[ref_idx, ref_idy]]
+    assert_array_equal(ref_data, 0)
+
+    # add reference channel to epochs
+    raw = Raw(fif_fname, preload=True)
+    events = read_events(eve_fname)
+    picks_eeg = pick_types(raw.info, meg=False, eeg=True)
+    epochs = Epochs(raw, events=events, event_id=1, tmin=-0.2, tmax=0.5,
+                    picks=picks_eeg, preload=True)
+    epochs_ref = add_reference_channels(epochs, 'Ref', copy=True)
+    assert_equal(epochs_ref._data.shape[1], epochs._data.shape[1] + 1)
+    ref_idx = epochs_ref.ch_names.index('Ref')
+    ref_data = epochs_ref.get_data()[:, ref_idx, :]
+    assert_array_equal(ref_data, 0)
+    picks_eeg = pick_types(epochs.info, meg=False, eeg=True)
+    assert_array_equal(epochs.get_data()[:, picks_eeg, :],
+                       epochs_ref.get_data()[:, picks_eeg, :])
+
+    # add two reference channels to epochs
+    raw = Raw(fif_fname, preload=True)
+    events = read_events(eve_fname)
+    picks_eeg = pick_types(raw.info, meg=False, eeg=True)
+    epochs = Epochs(raw, events=events, event_id=1, tmin=-0.2, tmax=0.5,
+                    picks=picks_eeg, preload=True)
+    epochs_ref = add_reference_channels(epochs, ['M1', 'M2'], copy=True)
+    assert_equal(epochs_ref._data.shape[1], epochs._data.shape[1] + 2)
+    ref_idx = epochs_ref.ch_names.index('M1')
+    ref_idy = epochs_ref.ch_names.index('M2')
+    ref_data = epochs_ref.get_data()[:, [ref_idx, ref_idy], :]
+    assert_array_equal(ref_data, 0)
+    picks_eeg = pick_types(epochs.info, meg=False, eeg=True)
+    assert_array_equal(epochs.get_data()[:, picks_eeg, :],
+                       epochs_ref.get_data()[:, picks_eeg, :])
+
+    # add reference channel to evoked
+    raw = Raw(fif_fname, preload=True)
+    events = read_events(eve_fname)
+    picks_eeg = pick_types(raw.info, meg=False, eeg=True)
+    epochs = Epochs(raw, events=events, event_id=1, tmin=-0.2, tmax=0.5,
+                    picks=picks_eeg, preload=True)
+    evoked = epochs.average()
+    evoked_ref = add_reference_channels(evoked, 'Ref', copy=True)
+    assert_equal(evoked_ref.data.shape[0], evoked.data.shape[0] + 1)
+    ref_idx = evoked_ref.ch_names.index('Ref')
+    ref_data = evoked_ref.data[ref_idx, :]
+    assert_array_equal(ref_data, 0)
+    picks_eeg = pick_types(evoked.info, meg=False, eeg=True)
+    assert_array_equal(evoked.data[picks_eeg, :],
+                       evoked_ref.data[picks_eeg, :])
+
+    # add two reference channels to evoked
+    raw = Raw(fif_fname, preload=True)
+    events = read_events(eve_fname)
+    picks_eeg = pick_types(raw.info, meg=False, eeg=True)
+    epochs = Epochs(raw, events=events, event_id=1, tmin=-0.2, tmax=0.5,
+                    picks=picks_eeg, preload=True)
+    evoked = epochs.average()
+    evoked_ref = add_reference_channels(evoked, ['M1', 'M2'], copy=True)
+    assert_equal(evoked_ref.data.shape[0], evoked.data.shape[0] + 2)
+    ref_idx = evoked_ref.ch_names.index('M1')
+    ref_idy = evoked_ref.ch_names.index('M2')
+    ref_data = evoked_ref.data[[ref_idx, ref_idy], :]
+    assert_array_equal(ref_data, 0)
+    picks_eeg = pick_types(evoked.info, meg=False, eeg=True)
+    assert_array_equal(evoked.data[picks_eeg, :],
+                       evoked_ref.data[picks_eeg, :])
+
+    # Test invalid inputs
+    raw_np = Raw(fif_fname, preload=False)
+    assert_raises(RuntimeError, add_reference_channels, raw_np, ['Ref'])
+    assert_raises(ValueError, add_reference_channels, raw, 1)
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/tree.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/tree.py
new file mode 100644
index 0000000..dccfd4e
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/tree.py
@@ -0,0 +1,158 @@
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+import numpy as np
+
+from .constants import FIFF
+from .tag import Tag
+from .tag import read_tag
+from .write import write_id, start_block, end_block, _write
+from ..utils import logger, verbose
+
+
+def dir_tree_find(tree, kind):
+    """Find nodes of the given kind from a directory tree structure
+
+    Parameters
+    ----------
+    tree : dict
+        Directory tree.
+    kind : int
+        Kind to find.
+
+    Returns
+    -------
+    nodes : list
+        List of matching nodes.
+    """
+    nodes = []
+
+    if isinstance(tree, list):
+        for t in tree:
+            nodes += dir_tree_find(t, kind)
+    else:
+        #   Am I desirable myself?
+        if tree['block'] == kind:
+            nodes.append(tree)
+
+        #   Search the subtrees
+        for child in tree['children']:
+            nodes += dir_tree_find(child, kind)
+    return nodes
+
+
+ at verbose
+def make_dir_tree(fid, directory, start=0, indent=0, verbose=None):
+    """Create the directory tree structure
+    """
+    FIFF_BLOCK_START = 104
+    FIFF_BLOCK_END = 105
+    FIFF_FILE_ID = 100
+    FIFF_BLOCK_ID = 103
+    FIFF_PARENT_BLOCK_ID = 110
+
+    if directory[start].kind == FIFF_BLOCK_START:
+        tag = read_tag(fid, directory[start].pos)
+        block = tag.data
+    else:
+        block = 0
+
+    logger.debug('    ' * indent + 'start { %d' % block)
+
+    this = start
+
+    tree = dict()
+    tree['block'] = block
+    tree['id'] = None
+    tree['parent_id'] = None
+    tree['nent'] = 0
+    tree['nchild'] = 0
+    tree['directory'] = directory[this]
+    tree['children'] = []
+
+    while this < len(directory):
+        if directory[this].kind == FIFF_BLOCK_START:
+            if this != start:
+                child, this = make_dir_tree(fid, directory, this, indent + 1)
+                tree['nchild'] += 1
+                tree['children'].append(child)
+        elif directory[this].kind == FIFF_BLOCK_END:
+            tag = read_tag(fid, directory[start].pos)
+            if tag.data == block:
+                break
+        else:
+            tree['nent'] += 1
+            if tree['nent'] == 1:
+                tree['directory'] = list()
+            tree['directory'].append(directory[this])
+
+            #  Add the id information if available
+            if block == 0:
+                if directory[this].kind == FIFF_FILE_ID:
+                    tag = read_tag(fid, directory[this].pos)
+                    tree['id'] = tag.data
+            else:
+                if directory[this].kind == FIFF_BLOCK_ID:
+                    tag = read_tag(fid, directory[this].pos)
+                    tree['id'] = tag.data
+                elif directory[this].kind == FIFF_PARENT_BLOCK_ID:
+                    tag = read_tag(fid, directory[this].pos)
+                    tree['parent_id'] = tag.data
+
+        this += 1
+
+    # Eliminate the empty directory
+    if tree['nent'] == 0:
+        tree['directory'] = None
+
+    logger.debug('    ' * (indent + 1) + 'block = %d nent = %d nchild = %d'
+                 % (tree['block'], tree['nent'], tree['nchild']))
+    logger.debug('    ' * indent + 'end } %d' % block)
+    last = this
+    return tree, last
+
+
+###############################################################################
+# Writing
+
+def copy_tree(fidin, in_id, nodes, fidout):
+    """Copies directory subtrees from fidin to fidout"""
+
+    if len(nodes) <= 0:
+        return
+
+    if not isinstance(nodes, list):
+        nodes = [nodes]
+
+    for node in nodes:
+        start_block(fidout, node['block'])
+        if node['id'] is not None:
+            if in_id is not None:
+                write_id(fidout, FIFF.FIFF_PARENT_FILE_ID, in_id)
+
+            write_id(fidout, FIFF.FIFF_BLOCK_ID, in_id)
+            write_id(fidout, FIFF.FIFF_PARENT_BLOCK_ID, node['id'])
+
+        if node['directory'] is not None:
+            for d in node['directory']:
+                #   Do not copy these tags
+                if d.kind == FIFF.FIFF_BLOCK_ID or \
+                        d.kind == FIFF.FIFF_PARENT_BLOCK_ID or \
+                        d.kind == FIFF.FIFF_PARENT_FILE_ID:
+                    continue
+
+                #   Read and write tags, pass data through transparently
+                fidin.seek(d.pos, 0)
+
+                s = fidin.read(4 * 4)
+                tag = Tag(*np.fromstring(s, dtype=('>i4,>I4,>i4,>i4'))[0])
+                tag.data = np.fromstring(fidin.read(tag.size), dtype='>B')
+
+                _write(fidout, tag.data, tag.kind, 1, tag.type, '>B')
+
+        for child in node['children']:
+            copy_tree(fidin, in_id, child, fidout)
+
+        end_block(fidout, node['block'])
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/write.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/write.py
new file mode 100644
index 0000000..da090fb
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/io/write.py
@@ -0,0 +1,395 @@
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+from ..externals.six import string_types, b
+import time
+import numpy as np
+from scipy import linalg
+import os.path as op
+import re
+import uuid
+
+from .constants import FIFF
+from ..utils import logger
+from ..externals.jdcal import jcal2jd
+from ..fixes import gzip_open
+
+
+def _write(fid, data, kind, data_size, FIFFT_TYPE, dtype):
+    if isinstance(data, np.ndarray):
+        data_size *= data.size
+
+    # XXX for string types the data size is used as
+    # computed in ``write_string``.
+
+    fid.write(np.array(kind, dtype='>i4').tostring())
+    fid.write(np.array(FIFFT_TYPE, dtype='>i4').tostring())
+    fid.write(np.array(data_size, dtype='>i4').tostring())
+    fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, dtype='>i4').tostring())
+    fid.write(np.array(data, dtype=dtype).tostring())
+
+
+def _get_split_size(split_size):
+    """Convert human-readable bytes to machine-readable bytes."""
+    if isinstance(split_size, string_types):
+        exp = dict(MB=20, GB=30).get(split_size[-2:], None)
+        if exp is None:
+            raise ValueError('split_size has to end with either'
+                             '"MB" or "GB"')
+        split_size = int(float(split_size[:-2]) * 2 ** exp)
+
+    if split_size > 2147483648:
+        raise ValueError('split_size cannot be larger than 2GB')
+    return split_size
+
+
+def write_int(fid, kind, data):
+    """Writes a 32-bit integer tag to a fif file"""
+    data_size = 4
+    data = np.array(data, dtype='>i4').T
+    _write(fid, data, kind, data_size, FIFF.FIFFT_INT, '>i4')
+
+
+def write_double(fid, kind, data):
+    """Writes a double-precision floating point tag to a fif file"""
+    data_size = 8
+    data = np.array(data, dtype='>f8').T
+    _write(fid, data, kind, data_size, FIFF.FIFFT_DOUBLE, '>f8')
+
+
+def write_float(fid, kind, data):
+    """Writes a single-precision floating point tag to a fif file"""
+    data_size = 4
+    data = np.array(data, dtype='>f4').T
+    _write(fid, data, kind, data_size, FIFF.FIFFT_FLOAT, '>f4')
+
+
+def write_dau_pack16(fid, kind, data):
+    """Writes a dau_pack16 tag to a fif file"""
+    data_size = 2
+    data = np.array(data, dtype='>i2').T
+    _write(fid, data, kind, data_size, FIFF.FIFFT_DAU_PACK16, '>i2')
+
+
+def write_complex64(fid, kind, data):
+    """Writes a 64 bit complex floating point tag to a fif file"""
+    data_size = 8
+    data = np.array(data, dtype='>c8').T
+    _write(fid, data, kind, data_size, FIFF.FIFFT_COMPLEX_FLOAT, '>c8')
+
+
+def write_complex128(fid, kind, data):
+    """Writes a 128 bit complex floating point tag to a fif file"""
+    data_size = 16
+    data = np.array(data, dtype='>c16').T
+    _write(fid, data, kind, data_size, FIFF.FIFFT_COMPLEX_FLOAT, '>c16')
+
+
+def write_julian(fid, kind, data):
+    """Writes a Julian-formatted date to a FIF file"""
+    assert len(data) == 3
+    data_size = 4
+    jd = np.sum(jcal2jd(*data))
+    data = np.array(jd, dtype='>i4')
+    _write(fid, data, kind, data_size, FIFF.FIFFT_JULIAN, '>i4')
+
+
+def write_string(fid, kind, data):
+    """Writes a string tag"""
+
+    str_data = data.encode('utf-8')  # Use unicode or bytes depending on Py2/3
+    data_size = len(str_data)  # therefore compute size here
+    my_dtype = '>a'  # py2/3 compatible on writing -- don't ask me why
+    _write(fid, str_data, kind, data_size, FIFF.FIFFT_STRING, my_dtype)
+
+
+def write_name_list(fid, kind, data):
+    """Writes a colon-separated list of names
+
+    Parameters
+    ----------
+    data : list of strings
+    """
+    write_string(fid, kind, ':'.join(data))
+
+
+def write_float_matrix(fid, kind, mat):
+    """Writes a single-precision floating-point matrix tag"""
+    FIFFT_MATRIX = 1 << 30
+    FIFFT_MATRIX_FLOAT = FIFF.FIFFT_FLOAT | FIFFT_MATRIX
+
+    data_size = 4 * mat.size + 4 * (mat.ndim + 1)
+
+    fid.write(np.array(kind, dtype='>i4').tostring())
+    fid.write(np.array(FIFFT_MATRIX_FLOAT, dtype='>i4').tostring())
+    fid.write(np.array(data_size, dtype='>i4').tostring())
+    fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, dtype='>i4').tostring())
+    fid.write(np.array(mat, dtype='>f4').tostring())
+
+    dims = np.empty(mat.ndim + 1, dtype=np.int32)
+    dims[:mat.ndim] = mat.shape[::-1]
+    dims[-1] = mat.ndim
+    fid.write(np.array(dims, dtype='>i4').tostring())
+    check_fiff_length(fid)
+
+
+def write_double_matrix(fid, kind, mat):
+    """Writes a double-precision floating-point matrix tag"""
+    FIFFT_MATRIX = 1 << 30
+    FIFFT_MATRIX_DOUBLE = FIFF.FIFFT_DOUBLE | FIFFT_MATRIX
+
+    data_size = 8 * mat.size + 4 * (mat.ndim + 1)
+
+    fid.write(np.array(kind, dtype='>i4').tostring())
+    fid.write(np.array(FIFFT_MATRIX_DOUBLE, dtype='>i4').tostring())
+    fid.write(np.array(data_size, dtype='>i4').tostring())
+    fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, dtype='>i4').tostring())
+    fid.write(np.array(mat, dtype='>f8').tostring())
+
+    dims = np.empty(mat.ndim + 1, dtype=np.int32)
+    dims[:mat.ndim] = mat.shape[::-1]
+    dims[-1] = mat.ndim
+    fid.write(np.array(dims, dtype='>i4').tostring())
+    check_fiff_length(fid)
+
+
+def write_int_matrix(fid, kind, mat):
+    """Writes integer 32 matrix tag"""
+    FIFFT_MATRIX = 1 << 30
+    FIFFT_MATRIX_INT = FIFF.FIFFT_INT | FIFFT_MATRIX
+
+    data_size = 4 * mat.size + 4 * 3
+
+    fid.write(np.array(kind, dtype='>i4').tostring())
+    fid.write(np.array(FIFFT_MATRIX_INT, dtype='>i4').tostring())
+    fid.write(np.array(data_size, dtype='>i4').tostring())
+    fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, dtype='>i4').tostring())
+    fid.write(np.array(mat, dtype='>i4').tostring())
+
+    dims = np.empty(3, dtype=np.int32)
+    dims[0] = mat.shape[1]
+    dims[1] = mat.shape[0]
+    dims[2] = 2
+    fid.write(np.array(dims, dtype='>i4').tostring())
+    check_fiff_length(fid)
+
+
+def get_machid():
+    """Get (mostly) unique machine ID
+
+    Returns
+    -------
+    ids : array (length 2, int32)
+        The machine identifier used in MNE.
+    """
+    mac = b('%012x' % uuid.getnode())  # byte conversion for Py3
+    mac = re.findall(b'..', mac)  # split string
+    mac += [b'00', b'00']  # add two more fields
+
+    # Convert to integer in reverse-order (for some reason)
+    from codecs import encode
+    mac = b''.join([encode(h, 'hex_codec') for h in mac[::-1]])
+    ids = np.flipud(np.fromstring(mac, np.int32, count=2))
+    return ids
+
+
+def write_id(fid, kind, id_=None):
+    """Writes fiff id"""
+    id_ = _generate_meas_id() if id_ is None else id_
+
+    FIFFT_ID_STRUCT = 31
+    FIFFV_NEXT_SEQ = 0
+
+    data_size = 5 * 4                       # The id comprises five integers
+    fid.write(np.array(kind, dtype='>i4').tostring())
+    fid.write(np.array(FIFFT_ID_STRUCT, dtype='>i4').tostring())
+    fid.write(np.array(data_size, dtype='>i4').tostring())
+    fid.write(np.array(FIFFV_NEXT_SEQ, dtype='>i4').tostring())
+
+    # Collect the bits together for one write
+    data = np.empty(5, dtype=np.int32)
+    data[0] = id_['version']
+    data[1] = id_['machid'][0]
+    data[2] = id_['machid'][1]
+    data[3] = id_['secs']
+    data[4] = id_['usecs']
+    fid.write(np.array(data, dtype='>i4').tostring())
+
+
+def start_block(fid, kind):
+    """Writes a FIFF_BLOCK_START tag"""
+    write_int(fid, FIFF.FIFF_BLOCK_START, kind)
+
+
+def end_block(fid, kind):
+    """Writes a FIFF_BLOCK_END tag"""
+    write_int(fid, FIFF.FIFF_BLOCK_END, kind)
+
+
+def start_file(fname, id_=None):
+    """Opens a fif file for writing and writes the compulsory header tags
+
+    Parameters
+    ----------
+    fname : string | fid
+        The name of the file to open. It is recommended
+        that the name ends with .fif or .fif.gz. Can also be an
+        already opened file.
+    id_ : dict | None
+        ID to use for the FIFF_FILE_ID.
+    """
+    if isinstance(fname, string_types):
+        if op.splitext(fname)[1].lower() == '.gz':
+            logger.debug('Writing using gzip')
+            # defaults to compression level 9, which is barely smaller but much
+            # slower. 2 offers a good compromise.
+            fid = gzip_open(fname, "wb", compresslevel=2)
+        else:
+            logger.debug('Writing using normal I/O')
+            fid = open(fname, "wb")
+    else:
+        logger.debug('Writing using %s I/O' % type(fname))
+        fid = fname
+        fid.seek(0)
+    #   Write the compulsory items
+    write_id(fid, FIFF.FIFF_FILE_ID, id_)
+    write_int(fid, FIFF.FIFF_DIR_POINTER, -1)
+    write_int(fid, FIFF.FIFF_FREE_LIST, -1)
+    return fid
+
+
+def check_fiff_length(fid, close=True):
+    """Ensure our file hasn't grown too large to work properly"""
+    if fid.tell() > 2147483648:  # 2 ** 31, FIFF uses signed 32-bit locations
+        if close:
+            fid.close()
+        raise IOError('FIFF file exceeded 2GB limit, please split file or '
+                      'save to a different format')
+
+
+def end_file(fid):
+    """Writes the closing tags to a fif file and closes the file"""
+    data_size = 0
+    fid.write(np.array(FIFF.FIFF_NOP, dtype='>i4').tostring())
+    fid.write(np.array(FIFF.FIFFT_VOID, dtype='>i4').tostring())
+    fid.write(np.array(data_size, dtype='>i4').tostring())
+    fid.write(np.array(FIFF.FIFFV_NEXT_NONE, dtype='>i4').tostring())
+    check_fiff_length(fid)
+    fid.close()
+
+
+def write_coord_trans(fid, trans):
+    """Writes a coordinate transformation structure"""
+    data_size = 4 * 2 * 12 + 4 * 2
+    fid.write(np.array(FIFF.FIFF_COORD_TRANS, dtype='>i4').tostring())
+    fid.write(np.array(FIFF.FIFFT_COORD_TRANS_STRUCT, dtype='>i4').tostring())
+    fid.write(np.array(data_size, dtype='>i4').tostring())
+    fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, dtype='>i4').tostring())
+    fid.write(np.array(trans['from'], dtype='>i4').tostring())
+    fid.write(np.array(trans['to'], dtype='>i4').tostring())
+
+    #   The transform...
+    rot = trans['trans'][:3, :3]
+    move = trans['trans'][:3, 3]
+    fid.write(np.array(rot, dtype='>f4').tostring())
+    fid.write(np.array(move, dtype='>f4').tostring())
+
+    #   ...and its inverse
+    trans_inv = linalg.inv(trans['trans'])
+    rot = trans_inv[:3, :3]
+    move = trans_inv[:3, 3]
+    fid.write(np.array(rot, dtype='>f4').tostring())
+    fid.write(np.array(move, dtype='>f4').tostring())
+
+
+def write_ch_info(fid, ch):
+    """Writes a channel information record to a fif file"""
+    data_size = 4 * 13 + 4 * 7 + 16
+
+    fid.write(np.array(FIFF.FIFF_CH_INFO, dtype='>i4').tostring())
+    fid.write(np.array(FIFF.FIFFT_CH_INFO_STRUCT, dtype='>i4').tostring())
+    fid.write(np.array(data_size, dtype='>i4').tostring())
+    fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, dtype='>i4').tostring())
+
+    #   Start writing fiffChInfoRec
+    fid.write(np.array(ch['scanno'], dtype='>i4').tostring())
+    fid.write(np.array(ch['logno'], dtype='>i4').tostring())
+    fid.write(np.array(ch['kind'], dtype='>i4').tostring())
+    fid.write(np.array(ch['range'], dtype='>f4').tostring())
+    fid.write(np.array(ch['cal'], dtype='>f4').tostring())
+    fid.write(np.array(ch['coil_type'], dtype='>i4').tostring())
+    fid.write(np.array(ch['loc'], dtype='>f4').tostring())  # writing 12 values
+
+    #   unit and unit multiplier
+    fid.write(np.array(ch['unit'], dtype='>i4').tostring())
+    fid.write(np.array(ch['unit_mul'], dtype='>i4').tostring())
+
+    #   Finally channel name
+    if len(ch['ch_name']):
+        ch_name = ch['ch_name'][:15]
+    else:
+        ch_name = ch['ch_name']
+
+    fid.write(np.array(ch_name, dtype='>c').tostring())
+    if len(ch_name) < 16:
+        fid.write(b('\0') * (16 - len(ch_name)))
+
+
+def write_dig_point(fid, dig):
+    """Writes a digitizer data point into a fif file"""
+    data_size = 5 * 4
+
+    fid.write(np.array(FIFF.FIFF_DIG_POINT, dtype='>i4').tostring())
+    fid.write(np.array(FIFF.FIFFT_DIG_POINT_STRUCT, dtype='>i4').tostring())
+    fid.write(np.array(data_size, dtype='>i4').tostring())
+    fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, dtype='>i4').tostring())
+
+    #   Start writing fiffDigPointRec
+    fid.write(np.array(dig['kind'], dtype='>i4').tostring())
+    fid.write(np.array(dig['ident'], dtype='>i4').tostring())
+    fid.write(np.array(dig['r'][:3], dtype='>f4').tostring())
+
+
+def write_float_sparse_rcs(fid, kind, mat):
+    """Writes a single-precision floating-point matrix tag"""
+    FIFFT_MATRIX = 16416 << 16
+    FIFFT_MATRIX_FLOAT_RCS = FIFF.FIFFT_FLOAT | FIFFT_MATRIX
+
+    nnzm = mat.nnz
+    nrow = mat.shape[0]
+    data_size = 4 * nnzm + 4 * nnzm + 4 * (nrow + 1) + 4 * 4
+
+    fid.write(np.array(kind, dtype='>i4').tostring())
+    fid.write(np.array(FIFFT_MATRIX_FLOAT_RCS, dtype='>i4').tostring())
+    fid.write(np.array(data_size, dtype='>i4').tostring())
+    fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, dtype='>i4').tostring())
+
+    fid.write(np.array(mat.data, dtype='>f4').tostring())
+    fid.write(np.array(mat.indices, dtype='>i4').tostring())
+    fid.write(np.array(mat.indptr, dtype='>i4').tostring())
+
+    dims = [nnzm, mat.shape[0], mat.shape[1], 2]
+    fid.write(np.array(dims, dtype='>i4').tostring())
+    check_fiff_length(fid)
+
+
+def _generate_meas_id():
+    """Helper to generate a new meas_id dict"""
+    id_ = dict()
+    id_['version'] = (1 << 16) | 2
+    id_['machid'] = get_machid()
+    id_['secs'], id_['usecs'] = _date_now()
+    return id_
+
+
+def _date_now():
+    """Helper to get date in secs, usecs"""
+    now = time.time()
+    # Get date in secs/usecs (as in `fill_measurement_info` in
+    # mne/forward/forward.py)
+    date_arr = np.array([np.floor(now), 1e6 * (now - np.floor(now))],
+                        dtype='int32')
+
+    return date_arr
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/label.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/label.py
new file mode 100644
index 0000000..8452a31
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/label.py
@@ -0,0 +1,1961 @@
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#          Denis Engemann <denis.engemann at gmail.com>
+#
+# License: BSD (3-clause)
+
+from collections import defaultdict
+from colorsys import hsv_to_rgb, rgb_to_hsv
+from os import path as op
+import os
+import copy as cp
+import re
+
+import numpy as np
+from scipy import linalg, sparse
+
+from .fixes import digitize, in1d
+from .utils import get_subjects_dir, _check_subject, logger, verbose
+from .source_estimate import (morph_data, SourceEstimate,
+                              spatial_src_connectivity)
+from .source_space import add_source_space_distances
+from .surface import read_surface, fast_cross_3d, mesh_edges, mesh_dist
+from .source_space import SourceSpaces
+from .parallel import parallel_func, check_n_jobs
+from .stats.cluster_level import _find_clusters
+from .externals.six import b, string_types
+from .externals.six.moves import zip, xrange
+
+
+def _blend_colors(color_1, color_2):
+    """Blend two colors in HSV space
+
+    Parameters
+    ----------
+    color_1, color_2 : None | tuple
+        RGBA tuples with values between 0 and 1. None if no color is available.
+        If both colors are None, the output is None. If only one is None, the
+        output is the other color.
+
+    Returns
+    -------
+    color : None | tuple
+        RGBA tuple of the combined color. Saturation, value and alpha are
+        averaged, whereas the new hue is determined as angle half way between
+        the two input colors' hues.
+    """
+    if color_1 is None and color_2 is None:
+        return None
+    elif color_1 is None:
+        return color_2
+    elif color_2 is None:
+        return color_1
+
+    r_1, g_1, b_1, a_1 = color_1
+    h_1, s_1, v_1 = rgb_to_hsv(r_1, g_1, b_1)
+    r_2, g_2, b_2, a_2 = color_2
+    h_2, s_2, v_2 = rgb_to_hsv(r_2, g_2, b_2)
+    hue_diff = abs(h_1 - h_2)
+    if hue_diff < 0.5:
+        h = min(h_1, h_2) + hue_diff / 2.
+    else:
+        h = max(h_1, h_2) + (1. - hue_diff) / 2.
+        h %= 1.
+    s = (s_1 + s_2) / 2.
+    v = (v_1 + v_2) / 2.
+    r, g, b = hsv_to_rgb(h, s, v)
+    a = (a_1 + a_2) / 2.
+    color = (r, g, b, a)
+    return color
+
+
+def _split_colors(color, n):
+    """Create n colors in HSV space that occupy a gradient in value
+
+    Parameters
+    ----------
+    color : tuple
+        RGBA tuple with values between 0 and 1.
+    n : int >= 2
+        Number of colors on the gradient.
+
+    Returns
+    -------
+    colors : tuple of tuples, len = n
+        N RGBA tuples that occupy a gradient in value (low to high) but share
+        saturation and hue with the input color.
+    """
+    r, g, b, a = color
+    h, s, v = rgb_to_hsv(r, g, b)
+    gradient_range = np.sqrt(n / 10.)
+    if v > 0.5:
+        v_max = min(0.95, v + gradient_range / 2)
+        v_min = max(0.05, v_max - gradient_range)
+    else:
+        v_min = max(0.05, v - gradient_range / 2)
+        v_max = min(0.95, v_min + gradient_range)
+
+    hsv_colors = ((h, s, v_) for v_ in np.linspace(v_min, v_max, n))
+    rgb_colors = (hsv_to_rgb(h_, s_, v_) for h_, s_, v_ in hsv_colors)
+    rgba_colors = ((r_, g_, b_, a,) for r_, g_, b_ in rgb_colors)
+    return tuple(rgba_colors)
+
+
+def _n_colors(n, bytes_=False, cmap='hsv'):
+    """Produce a list of n unique RGBA color tuples based on a colormap
+
+    Parameters
+    ----------
+    n : int
+        Number of colors.
+    bytes : bool
+        Return colors as integers values between 0 and 255 (instead of floats
+        between 0 and 1).
+    cmap : str
+        Which colormap to use.
+
+    Returns
+    -------
+    colors : array, shape (n, 4)
+        RGBA color values.
+    """
+    n_max = 2 ** 10
+    if n > n_max:
+        raise NotImplementedError("Can't produce more than %i unique "
+                                  "colors" % n_max)
+
+    from matplotlib.cm import get_cmap
+    cm = get_cmap(cmap, n_max)
+    pos = np.linspace(0, 1, n, False)
+    colors = cm(pos, bytes=bytes_)
+    if bytes_:
+        # make sure colors are unique
+        for ii, c in enumerate(colors):
+            if np.any(np.all(colors[:ii] == c, 1)):
+                raise RuntimeError('Could not get %d unique colors from %s '
+                                   'colormap. Try using a different colormap.'
+                                   % (n, cmap))
+    return colors
+
+
+class Label(object):
+    """A FreeSurfer/MNE label with vertices restricted to one hemisphere
+
+    Labels can be combined with the ``+`` operator:
+
+        * Duplicate vertices are removed.
+        * If duplicate vertices have conflicting position values, an error
+          is raised.
+        * Values of duplicate vertices are summed.
+
+
+    Parameters
+    ----------
+    vertices : array (length N)
+        vertex indices (0 based).
+    pos : array (N by 3) | None
+        locations in meters. If None, then zeros are used.
+    values : array (length N) | None
+        values at the vertices. If None, then ones are used.
+    hemi : 'lh' | 'rh'
+        Hemisphere to which the label applies.
+    comment : str
+        Kept as information but not used by the object itself.
+    name : str
+        Kept as information but not used by the object itself.
+    filename : str
+        Kept as information but not used by the object itself.
+    subject : str | None
+        Name of the subject the label is from.
+    color : None | matplotlib color
+        Default label color and alpha (e.g., ``(1., 0., 0., 1.)`` for red).
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Attributes
+    ----------
+    color : None | tuple
+        Default label color, represented as RGBA tuple with values between 0
+        and 1.
+    comment : str
+        Comment from the first line of the label file.
+    hemi : 'lh' | 'rh'
+        Hemisphere.
+    name : None | str
+        A name for the label. It is OK to change that attribute manually.
+    pos : array, shape = (n_pos, 3)
+        Locations in meters.
+    subject : str | None
+        Subject name. It is best practice to set this to the proper
+        value on initialization, but it can also be set manually.
+    values : array, len = n_pos
+        Values at the vertices.
+    verbose : bool, str, int, or None
+        See above.
+    vertices : array, len = n_pos
+        Vertex indices (0 based)
+    """
+    @verbose
+    def __init__(self, vertices, pos=None, values=None, hemi=None, comment="",
+                 name=None, filename=None, subject=None, color=None,
+                 verbose=None):
+        # check parameters
+        if not isinstance(hemi, string_types):
+            raise ValueError('hemi must be a string, not %s' % type(hemi))
+        vertices = np.asarray(vertices)
+        if np.any(np.diff(vertices.astype(int)) <= 0):
+            raise ValueError('Vertices must be ordered in increasing order.')
+
+        if color is not None:
+            from matplotlib.colors import colorConverter
+            color = colorConverter.to_rgba(color)
+
+        if values is None:
+            values = np.ones(len(vertices))
+        else:
+            values = np.asarray(values)
+
+        if pos is None:
+            pos = np.zeros((len(vertices), 3))
+        else:
+            pos = np.asarray(pos)
+
+        if not (len(vertices) == len(values) == len(pos)):
+            raise ValueError("vertices, values and pos need to have same "
+                             "length (number of vertices)")
+
+        # name
+        if name is None and filename is not None:
+            name = op.basename(filename[:-6])
+
+        self.vertices = vertices
+        self.pos = pos
+        self.values = values
+        self.hemi = hemi
+        self.comment = comment
+        self.verbose = verbose
+        self.subject = _check_subject(None, subject, False)
+        self.color = color
+        self.name = name
+        self.filename = filename
+
+    def __setstate__(self, state):
+        self.vertices = state['vertices']
+        self.pos = state['pos']
+        self.values = state['values']
+        self.hemi = state['hemi']
+        self.comment = state['comment']
+        self.verbose = state['verbose']
+        self.subject = state.get('subject', None)
+        self.color = state.get('color', None)
+        self.name = state['name']
+        self.filename = state['filename']
+
+    def __getstate__(self):
+        out = dict(vertices=self.vertices,
+                   pos=self.pos,
+                   values=self.values,
+                   hemi=self.hemi,
+                   comment=self.comment,
+                   verbose=self.verbose,
+                   subject=self.subject,
+                   color=self.color,
+                   name=self.name,
+                   filename=self.filename)
+        return out
+
+    def __repr__(self):
+        name = 'unknown, ' if self.subject is None else self.subject + ', '
+        name += repr(self.name) if self.name is not None else "unnamed"
+        n_vert = len(self)
+        return "<Label  |  %s, %s : %i vertices>" % (name, self.hemi, n_vert)
+
+    def __len__(self):
+        return len(self.vertices)
+
+    def __add__(self, other):
+        if isinstance(other, BiHemiLabel):
+            return other + self
+        elif isinstance(other, Label):
+            if self.subject != other.subject:
+                raise ValueError('Label subject parameters must match, got '
+                                 '"%s" and "%s". Consider setting the '
+                                 'subject parameter on initialization, or '
+                                 'setting label.subject manually before '
+                                 'combining labels.' % (self.subject,
+                                                        other.subject))
+            if self.hemi != other.hemi:
+                name = '%s + %s' % (self.name, other.name)
+                if self.hemi == 'lh':
+                    lh, rh = self.copy(), other.copy()
+                else:
+                    lh, rh = other.copy(), self.copy()
+                color = _blend_colors(self.color, other.color)
+                return BiHemiLabel(lh, rh, name, color)
+        else:
+            raise TypeError("Need: Label or BiHemiLabel. Got: %r" % other)
+
+        # check for overlap
+        duplicates = np.intersect1d(self.vertices, other.vertices)
+        n_dup = len(duplicates)
+        if n_dup:
+            self_dup = [np.where(self.vertices == d)[0][0]
+                        for d in duplicates]
+            other_dup = [np.where(other.vertices == d)[0][0]
+                         for d in duplicates]
+            if not np.all(self.pos[self_dup] == other.pos[other_dup]):
+                err = ("Labels %r and %r: vertices overlap but differ in "
+                       "position values" % (self.name, other.name))
+                raise ValueError(err)
+
+            isnew = np.array([v not in duplicates for v in other.vertices])
+
+            vertices = np.hstack((self.vertices, other.vertices[isnew]))
+            pos = np.vstack((self.pos, other.pos[isnew]))
+
+            # find position of other's vertices in new array
+            tgt_idx = [np.where(vertices == v)[0][0] for v in other.vertices]
+            n_self = len(self.values)
+            n_other = len(other.values)
+            new_len = n_self + n_other - n_dup
+            values = np.zeros(new_len, dtype=self.values.dtype)
+            values[:n_self] += self.values
+            values[tgt_idx] += other.values
+        else:
+            vertices = np.hstack((self.vertices, other.vertices))
+            pos = np.vstack((self.pos, other.pos))
+            values = np.hstack((self.values, other.values))
+
+        indcs = np.argsort(vertices)
+        vertices, pos, values = vertices[indcs], pos[indcs, :], values[indcs]
+
+        comment = "%s + %s" % (self.comment, other.comment)
+
+        name0 = self.name if self.name else 'unnamed'
+        name1 = other.name if other.name else 'unnamed'
+        name = "%s + %s" % (name0, name1)
+
+        color = _blend_colors(self.color, other.color)
+        verbose = self.verbose or other.verbose
+
+        label = Label(vertices, pos, values, self.hemi, comment, name, None,
+                      self.subject, color, verbose)
+        return label
+
+    def __sub__(self, other):
+        if isinstance(other, BiHemiLabel):
+            if self.hemi == 'lh':
+                return self - other.lh
+            else:
+                return self - other.rh
+        elif isinstance(other, Label):
+            if self.subject != other.subject:
+                raise ValueError('Label subject parameters must match, got '
+                                 '"%s" and "%s". Consider setting the '
+                                 'subject parameter on initialization, or '
+                                 'setting label.subject manually before '
+                                 'combining labels.' % (self.subject,
+                                                        other.subject))
+        else:
+            raise TypeError("Need: Label or BiHemiLabel. Got: %r" % other)
+
+        if self.hemi == other.hemi:
+            keep = in1d(self.vertices, other.vertices, True, invert=True)
+        else:
+            keep = np.arange(len(self.vertices))
+
+        name = "%s - %s" % (self.name or 'unnamed', other.name or 'unnamed')
+        return Label(self.vertices[keep], self.pos[keep], self.values[keep],
+                     self.hemi, self.comment, name, None, self.subject,
+                     self.color, self.verbose)
+
+    def save(self, filename):
+        """Write to disk as FreeSurfer \*.label file
+
+        Parameters
+        ----------
+        filename : string
+            Path to label file to produce.
+
+        Notes
+        -----
+        Note that due to file specification limitations, the Label's subject
+        and color attributes are not saved to disk.
+        """
+        write_label(filename, self)
+
+    def copy(self):
+        """Copy the label instance.
+
+        Returns
+        -------
+        label : instance of Label
+            The copied label.
+        """
+        return cp.deepcopy(self)
+
+    def fill(self, src, name=None):
+        """Fill the surface between sources for a label defined in source space
+
+        Parameters
+        ----------
+        src : SourceSpaces
+            Source space in which the label was defined. If a source space is
+            provided, the label is expanded to fill in surface vertices that
+            lie between the vertices included in the source space. For the
+            added vertices, ``pos`` is filled in with positions from the
+            source space, and ``values`` is filled in from the closest source
+            space vertex.
+        name : None | str
+            Name for the new Label (default is self.name).
+
+        Returns
+        -------
+        label : Label
+            The label covering the same vertices in source space but also
+            including intermediate surface vertices.
+        """
+        # find source space patch info
+        if self.hemi == 'lh':
+            hemi_src = src[0]
+        elif self.hemi == 'rh':
+            hemi_src = src[1]
+
+        if not np.all(in1d(self.vertices, hemi_src['vertno'])):
+            msg = "Source space does not contain all of the label's vertices"
+            raise ValueError(msg)
+
+        nearest = hemi_src['nearest']
+        if nearest is None:
+            logger.warn("Computing patch info for source space, this can take "
+                        "a while. In order to avoid this in the future, run "
+                        "mne.add_source_space_distances() on the source space "
+                        "and save it.")
+            add_source_space_distances(src)
+            nearest = hemi_src['nearest']
+
+        # find new vertices
+        include = in1d(nearest, self.vertices, False)
+        vertices = np.nonzero(include)[0]
+
+        # values
+        nearest_in_label = digitize(nearest[vertices], self.vertices, True)
+        values = self.values[nearest_in_label]
+        # pos
+        pos = hemi_src['rr'][vertices]
+
+        if name is None:
+            name = self.name
+        label = Label(vertices, pos, values, self.hemi, self.comment, name,
+                      None, self.subject, self.color)
+        return label
+
+    @verbose
+    def smooth(self, subject=None, smooth=2, grade=None,
+               subjects_dir=None, n_jobs=1, copy=True, verbose=None):
+        """Smooth the label
+
+        Useful for filling in labels made in a
+        decimated source space for display.
+
+        Parameters
+        ----------
+        subject : str | None
+            The name of the subject used. If None, the value will be
+            taken from self.subject.
+        smooth : int
+            Number of iterations for the smoothing of the surface data.
+            Cannot be None here since not all vertices are used. For a
+            grade of 5 (e.g., fsaverage), a smoothing of 2 will fill a
+            label.
+        grade : int, list (of two arrays), array, or None
+            Resolution of the icosahedral mesh (typically 5). If None, all
+            vertices will be used (potentially filling the surface). If a list,
+            values will be morphed to the set of vertices specified in grade[0]
+            and grade[1], assuming that these are vertices for the left and
+            right hemispheres. Note that specifying the vertices (e.g.,
+            grade=[np.arange(10242), np.arange(10242)] for fsaverage on a
+            standard grade 5 source space) can be substantially faster than
+            computing vertex locations. If one array is used, it is assumed
+            that all vertices belong to the hemisphere of the label. To create
+            a label filling the surface, use None.
+        subjects_dir : string, or None
+            Path to SUBJECTS_DIR if it is not set in the environment.
+        n_jobs : int
+            Number of jobs to run in parallel
+        copy : bool
+            If False, smoothing is done in-place.
+        verbose : bool, str, int, or None
+            If not None, override default verbose level (see mne.verbose).
+            Defaults to self.verbose.
+
+        Returns
+        -------
+        label : instance of Label
+            The smoothed label.
+
+        Notes
+        -----
+        This function will set label.pos to be all zeros. If the positions
+        on the new surface are required, consider using mne.read_surface
+        with label.vertices.
+        """
+        subject = _check_subject(self.subject, subject)
+        return self.morph(subject, subject, smooth, grade, subjects_dir,
+                          n_jobs, copy)
+
+    @verbose
+    def morph(self, subject_from=None, subject_to=None, smooth=5, grade=None,
+              subjects_dir=None, n_jobs=1, copy=True, verbose=None):
+        """Morph the label
+
+        Useful for transforming a label from one subject to another.
+
+        Parameters
+        ----------
+        subject_from : str | None
+            The name of the subject of the current label. If None, the
+            initial subject will be taken from self.subject.
+        subject_to : str
+            The name of the subject to morph the label to. This will
+            be put in label.subject of the output label file.
+        smooth : int
+            Number of iterations for the smoothing of the surface data.
+            Cannot be None here since not all vertices are used.
+        grade : int, list (of two arrays), array, or None
+            Resolution of the icosahedral mesh (typically 5). If None, all
+            vertices will be used (potentially filling the surface). If a list,
+            values will be morphed to the set of vertices specified in grade[0]
+            and grade[1], assuming that these are vertices for the left and
+            right hemispheres. Note that specifying the vertices (e.g.,
+            ``grade=[np.arange(10242), np.arange(10242)]`` for fsaverage on a
+            standard grade 5 source space) can be substantially faster than
+            computing vertex locations. If one array is used, it is assumed
+            that all vertices belong to the hemisphere of the label. To create
+            a label filling the surface, use None.
+        subjects_dir : string, or None
+            Path to SUBJECTS_DIR if it is not set in the environment.
+        n_jobs : int
+            Number of jobs to run in parallel.
+        copy : bool
+            If False, the morphing is done in-place.
+        verbose : bool, str, int, or None
+            If not None, override default verbose level (see mne.verbose).
+
+        Returns
+        -------
+        label : instance of Label
+            The morphed label.
+
+        Notes
+        -----
+        This function will set label.pos to be all zeros. If the positions
+        on the new surface are required, consider using `mne.read_surface`
+        with `label.vertices`.
+        """
+        subject_from = _check_subject(self.subject, subject_from)
+        if not isinstance(subject_to, string_types):
+            raise TypeError('"subject_to" must be entered as a string')
+        if not isinstance(smooth, int):
+            raise TypeError('smooth must be an integer')
+        if np.all(self.values == 0):
+            raise ValueError('Morphing label with all zero values will result '
+                             'in the label having no vertices. Consider using '
+                             'something like label.values.fill(1.0).')
+        if(isinstance(grade, np.ndarray)):
+            if self.hemi == 'lh':
+                grade = [grade, np.array([], int)]
+            else:
+                grade = [np.array([], int), grade]
+        if self.hemi == 'lh':
+            vertices = [self.vertices, np.array([], int)]
+        else:
+            vertices = [np.array([], int), self.vertices]
+        data = self.values[:, np.newaxis]
+        stc = SourceEstimate(data, vertices, tmin=1, tstep=1,
+                             subject=subject_from)
+        stc = morph_data(subject_from, subject_to, stc, grade=grade,
+                         smooth=smooth, subjects_dir=subjects_dir,
+                         warn=False, n_jobs=n_jobs)
+        inds = np.nonzero(stc.data)[0]
+        if copy is True:
+            label = self.copy()
+        else:
+            label = self
+        label.values = stc.data[inds, :].ravel()
+        label.pos = np.zeros((len(inds), 3))
+        if label.hemi == 'lh':
+            label.vertices = stc.vertices[0][inds]
+        else:
+            label.vertices = stc.vertices[1][inds]
+        label.subject = subject_to
+        return label
+
+    def split(self, parts=2, subject=None, subjects_dir=None,
+              freesurfer=False):
+        """Split the Label into two or more parts
+
+        Parameters
+        ----------
+        parts : int >= 2 | tuple of str
+            A sequence of strings specifying label names for the new labels
+            (from posterior to anterior), or the number of new labels to create
+            (default is 2). If a number is specified, names of the new labels
+            will be the input label's name with div1, div2 etc. appended.
+        subject : None | str
+            Subject which this label belongs to (needed to locate surface file;
+            should only be specified if it is not specified in the label).
+        subjects_dir : None | str
+            Path to SUBJECTS_DIR if it is not set in the environment.
+        freesurfer : bool
+            By default (``False``) ``split_label`` uses an algorithm that is
+            slightly optimized for performance and numerical precision. Set
+            ``freesurfer`` to ``True`` in order to replicate label splits from
+            FreeSurfer's ``mris_divide_parcellation``.
+
+        Returns
+        -------
+        labels : list of Label (len = n_parts)
+            The labels, starting from the lowest to the highest end of the
+            projection axis.
+
+        Notes
+        -----
+        Works by finding the label's principal eigen-axis on the spherical
+        surface, projecting all label vertex coordinates onto this axis and
+        dividing them at regular spatial intervals.
+        """
+        return split_label(self, parts, subject, subjects_dir, freesurfer)
+
+    def get_vertices_used(self, vertices=None):
+        """Get the source space's vertices inside the label
+
+        Parameters
+        ----------
+        vertices : ndarray of int, shape (n_vertices,) | None
+            The set of vertices to compare the label to. If None, equals to
+            ``np.arange(10242)``. Defaults to None.
+
+        Returns
+        -------
+        label_verts : ndarray of in, shape (n_label_vertices,)
+            The vertices of the label corresponding used by the data.
+        """
+        if vertices is None:
+            vertices = np.arange(10242)
+
+        label_verts = vertices[in1d(vertices, self.vertices)]
+        return label_verts
+
+    def get_tris(self, tris, vertices=None):
+        """Get the source space's triangles inside the label
+
+        Parameters
+        ----------
+        tris : ndarray of int, shape (n_tris, 3)
+            The set of triangles corresponding to the vertices in a
+            source space.
+        vertices : ndarray of int, shape (n_vertices,) | None
+            The set of vertices to compare the label to. If None, equals to
+            ``np.arange(10242)``. Defaults to None.
+
+        Returns
+        -------
+        label_tris : ndarray of int, shape (n_tris, 3)
+            The subset of tris used by the label
+        """
+        vertices_ = self.get_vertices_used(vertices)
+        selection = np.all(in1d(tris, vertices_).reshape(tris.shape),
+                           axis=1)
+        label_tris = tris[selection]
+        if len(np.unique(label_tris)) < len(vertices_):
+            logger.info('Surprising label structure. Trying to repair '
+                        'triangles.')
+            dropped_vertices = np.setdiff1d(vertices_, label_tris)
+            n_dropped = len(dropped_vertices)
+            assert n_dropped == (len(vertices_) - len(np.unique(label_tris)))
+
+            #  put missing vertices as extra zero-length triangles
+            add_tris = (dropped_vertices +
+                        np.zeros((len(dropped_vertices), 3), dtype=int).T)
+
+            label_tris = np.r_[label_tris, add_tris.T]
+            assert len(np.unique(label_tris)) == len(vertices_)
+
+        return label_tris
+
+
+class BiHemiLabel(object):
+    """A freesurfer/MNE label with vertices in both hemispheres
+
+    Parameters
+    ----------
+    lh : Label
+        Label for the left hemisphere.
+    rh : Label
+        Label for the right hemisphere.
+    name : None | str
+        name for the label
+    color : None | matplotlib color
+        Label color and alpha (e.g., ``(1., 0., 0., 1.)`` for red).
+        Note that due to file specification limitations, the color isn't saved
+        to or loaded from files written to disk.
+
+    Attributes
+    ----------
+    lh : Label
+        Label for the left hemisphere.
+    rh : Label
+        Label for the right hemisphere.
+    name : None | str
+        A name for the label. It is OK to change that attribute manually.
+    subject : str | None
+        Subject the label is from.
+    """
+
+    def __init__(self, lh, rh, name=None, color=None):
+        if lh.subject != rh.subject:
+            raise ValueError('lh.subject (%s) and rh.subject (%s) must '
+                             'agree' % (lh.subject, rh.subject))
+        self.lh = lh
+        self.rh = rh
+        self.name = name
+        self.subject = lh.subject
+        self.color = color
+        self.hemi = 'both'
+
+    def __repr__(self):
+        temp = "<BiHemiLabel  |  %s, lh : %i vertices,  rh : %i vertices>"
+        name = 'unknown, ' if self.subject is None else self.subject + ', '
+        name += repr(self.name) if self.name is not None else "unnamed"
+        return temp % (name, len(self.lh), len(self.rh))
+
+    def __len__(self):
+        return len(self.lh) + len(self.rh)
+
+    def __add__(self, other):
+        if isinstance(other, Label):
+            if other.hemi == 'lh':
+                lh = self.lh + other
+                rh = self.rh
+            else:
+                lh = self.lh
+                rh = self.rh + other
+        elif isinstance(other, BiHemiLabel):
+            lh = self.lh + other.lh
+            rh = self.rh + other.rh
+        else:
+            raise TypeError("Need: Label or BiHemiLabel. Got: %r" % other)
+
+        name = '%s + %s' % (self.name, other.name)
+        color = _blend_colors(self.color, other.color)
+        return BiHemiLabel(lh, rh, name, color)
+
+    def __sub__(self, other):
+        if isinstance(other, Label):
+            if other.hemi == 'lh':
+                lh = self.lh - other
+                rh = self.rh
+            else:
+                rh = self.rh - other
+                lh = self.lh
+        elif isinstance(other, BiHemiLabel):
+            lh = self.lh - other.lh
+            rh = self.rh - other.rh
+        else:
+            raise TypeError("Need: Label or BiHemiLabel. Got: %r" % other)
+
+        if len(lh.vertices) == 0:
+            return rh
+        elif len(rh.vertices) == 0:
+            return lh
+        else:
+            name = '%s - %s' % (self.name, other.name)
+            return BiHemiLabel(lh, rh, name, self.color)
+
+
+def read_label(filename, subject=None, color=None):
+    """Read FreeSurfer Label file
+
+    Parameters
+    ----------
+    filename : string
+        Path to label file.
+    subject : str | None
+        Name of the subject the data are defined for.
+        It is good practice to set this attribute to avoid combining
+        incompatible labels and SourceEstimates (e.g., ones from other
+        subjects). Note that due to file specification limitations, the
+        subject name isn't saved to or loaded from files written to disk.
+    color : None | matplotlib color
+        Default label color and alpha (e.g., ``(1., 0., 0., 1.)`` for red).
+        Note that due to file specification limitations, the color isn't saved
+        to or loaded from files written to disk.
+
+    Returns
+    -------
+    label : Label
+        Instance of Label object with attributes:
+
+            - ``comment``: comment from the first line of the label file
+            - ``vertices``: vertex indices (0 based, column 1)
+            - ``pos``: locations in meters (columns 2 - 4 divided by 1000)
+            - ``values``: values at the vertices (column 5)
+
+    See Also
+    --------
+    read_labels_from_annot
+    """
+    if subject is not None and not isinstance(subject, string_types):
+        raise TypeError('subject must be a string')
+
+    # find hemi
+    basename = op.basename(filename)
+    if basename.endswith('lh.label') or basename.startswith('lh.'):
+        hemi = 'lh'
+    elif basename.endswith('rh.label') or basename.startswith('rh.'):
+        hemi = 'rh'
+    else:
+        raise ValueError('Cannot find which hemisphere it is. File should end'
+                         ' with lh.label or rh.label')
+
+    # find name
+    if basename.startswith(('lh.', 'rh.')):
+        basename_ = basename[3:]
+        if basename.endswith('.label'):
+            basename_ = basename[:-6]
+    else:
+        basename_ = basename[:-9]
+    name = "%s-%s" % (basename_, hemi)
+
+    # read the file
+    with open(filename, 'r') as fid:
+        comment = fid.readline().replace('\n', '')[1:]
+        nv = int(fid.readline())
+        data = np.empty((5, nv))
+        for i, line in enumerate(fid):
+            data[:, i] = line.split()
+
+    # let's make sure everything is ordered correctly
+    vertices = np.array(data[0], dtype=np.int32)
+    pos = 1e-3 * data[1:4].T
+    values = data[4]
+    order = np.argsort(vertices)
+    vertices = vertices[order]
+    pos = pos[order]
+    values = values[order]
+
+    label = Label(vertices, pos, values, hemi, comment, name, filename,
+                  subject, color)
+
+    return label
+
+
+ at verbose
+def write_label(filename, label, verbose=None):
+    """Write a FreeSurfer label
+
+    Parameters
+    ----------
+    filename : string
+        Path to label file to produce.
+    label : Label
+        The label object to save.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Notes
+    -----
+    Note that due to file specification limitations, the Label's subject and
+    color attributes are not saved to disk.
+
+    See Also
+    --------
+    write_labels_to_annot
+    """
+    hemi = label.hemi
+    path_head, name = op.split(filename)
+    if name.endswith('.label'):
+        name = name[:-6]
+    if not (name.startswith(hemi) or name.endswith(hemi)):
+        name += '-' + hemi
+    filename = op.join(path_head, name) + '.label'
+
+    logger.info('Saving label to : %s' % filename)
+
+    with open(filename, 'wb') as fid:
+        n_vertices = len(label.vertices)
+        data = np.zeros((n_vertices, 5), dtype=np.float)
+        data[:, 0] = label.vertices
+        data[:, 1:4] = 1e3 * label.pos
+        data[:, 4] = label.values
+        fid.write(b("#%s\n" % label.comment))
+        fid.write(b("%d\n" % n_vertices))
+        for d in data:
+            fid.write(b("%d %f %f %f %f\n" % tuple(d)))
+    return label
+
+
+def split_label(label, parts=2, subject=None, subjects_dir=None,
+                freesurfer=False):
+    """Split a Label into two or more parts
+
+    Parameters
+    ----------
+    label : Label | str
+        Label which is to be split (Label object or path to a label file).
+    parts : int >= 2 | tuple of str
+        A sequence of strings specifying label names for the new labels (from
+        posterior to anterior), or the number of new labels to create (default
+        is 2). If a number is specified, names of the new labels will be the
+        input label's name with div1, div2 etc. appended.
+    subject : None | str
+        Subject which this label belongs to (needed to locate surface file;
+        should only be specified if it is not specified in the label).
+    subjects_dir : None | str
+        Path to SUBJECTS_DIR if it is not set in the environment.
+    freesurfer : bool
+        By default (``False``) ``split_label`` uses an algorithm that is
+        slightly optimized for performance and numerical precision. Set
+        ``freesurfer`` to ``True`` in order to replicate label splits from
+        FreeSurfer's ``mris_divide_parcellation``.
+
+    Returns
+    -------
+    labels : list of Label (len = n_parts)
+        The labels, starting from the lowest to the highest end of the
+        projection axis.
+
+    Notes
+    -----
+    Works by finding the label's principal eigen-axis on the spherical surface,
+    projecting all label vertex coordinates onto this axis and dividing them at
+    regular spatial intervals.
+    """
+    # find the label
+    if isinstance(label, BiHemiLabel):
+        raise TypeError("Can only split labels restricted to one hemisphere.")
+    elif isinstance(label, string_types):
+        label = read_label(label)
+
+    # find the parts
+    if np.isscalar(parts):
+        n_parts = int(parts)
+        if label.name.endswith(('lh', 'rh')):
+            basename = label.name[:-3]
+            name_ext = label.name[-3:]
+        else:
+            basename = label.name
+            name_ext = ''
+        name_pattern = "%s_div%%i%s" % (basename, name_ext)
+        names = tuple(name_pattern % i for i in range(1, n_parts + 1))
+    else:
+        names = parts
+        n_parts = len(names)
+
+    if n_parts < 2:
+        raise ValueError("Can't split label into %i parts" % n_parts)
+
+    # find the subject
+    subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
+    if label.subject is None and subject is None:
+        raise ValueError("The subject needs to be specified.")
+    elif subject is None:
+        subject = label.subject
+    elif label.subject is None:
+        pass
+    elif subject != label.subject:
+        raise ValueError("The label specifies a different subject (%r) from "
+                         "the subject parameter (%r)."
+                         % label.subject, subject)
+
+    # find the spherical surface
+    surf_fname = '.'.join((label.hemi, 'sphere'))
+    surf_path = os.path.join(subjects_dir, subject, "surf", surf_fname)
+    surface_points, surface_tris = read_surface(surf_path)
+    # find the label coordinates on the surface
+    points = surface_points[label.vertices]
+    center = np.mean(points, axis=0)
+    centered_points = points - center
+
+    # find the label's normal
+    if freesurfer:
+        # find the Freesurfer vertex closest to the center
+        distance = np.sqrt(np.sum(centered_points ** 2, axis=1))
+        i_closest = np.argmin(distance)
+        closest_vertex = label.vertices[i_closest]
+        # find the normal according to freesurfer convention
+        idx = np.any(surface_tris == closest_vertex, axis=1)
+        tris_for_normal = surface_tris[idx]
+        r1 = surface_points[tris_for_normal[:, 0], :]
+        r2 = surface_points[tris_for_normal[:, 1], :]
+        r3 = surface_points[tris_for_normal[:, 2], :]
+        tri_normals = fast_cross_3d((r2 - r1), (r3 - r1))
+        normal = np.mean(tri_normals, axis=0)
+        normal /= linalg.norm(normal)
+    else:
+        # Normal of the center
+        normal = center / linalg.norm(center)
+
+    # project all vertex coordinates on the tangential plane for this point
+    q, _ = linalg.qr(normal[:, np.newaxis])
+    tangent_u = q[:, 1:]
+    m_obs = np.dot(centered_points, tangent_u)
+    # find principal eigendirection
+    m_cov = np.dot(m_obs.T, m_obs)
+    w, vr = linalg.eig(m_cov)
+    i = np.argmax(w)
+    eigendir = vr[:, i]
+    # project back into 3d space
+    axis = np.dot(tangent_u, eigendir)
+    # orient them from posterior to anterior
+    if axis[1] < 0:
+        axis *= -1
+
+    # project the label on the axis
+    proj = np.dot(points, axis)
+
+    # assign mark (new label index)
+    proj -= proj.min()
+    proj /= (proj.max() / n_parts)
+    mark = proj // 1
+    mark[mark == n_parts] = n_parts - 1
+
+    # colors
+    if label.color is None:
+        colors = (None,) * n_parts
+    else:
+        colors = _split_colors(label.color, n_parts)
+
+    # construct new labels
+    labels = []
+    for i, name, color in zip(range(n_parts), names, colors):
+        idx = (mark == i)
+        vert = label.vertices[idx]
+        pos = label.pos[idx]
+        values = label.values[idx]
+        hemi = label.hemi
+        comment = label.comment
+        lbl = Label(vert, pos, values, hemi, comment, name, None, subject,
+                    color)
+        labels.append(lbl)
+
+    return labels
+
+
+def label_sign_flip(label, src):
+    """Compute sign for label averaging
+
+    Parameters
+    ----------
+    label : Label
+        A label.
+    src : list of dict
+        The source space over which the label is defined.
+
+    Returns
+    -------
+    flip : array
+        Sign flip vector (contains 1 or -1)
+    """
+    if len(src) != 2:
+        raise ValueError('Only source spaces with 2 hemisphers are accepted')
+
+    lh_vertno = src[0]['vertno']
+    rh_vertno = src[1]['vertno']
+
+    # get source orientations
+    if label.hemi == 'lh':
+        vertno_sel = np.intersect1d(lh_vertno, label.vertices)
+        if len(vertno_sel) == 0:
+            return np.array([], int)
+        ori = src[0]['nn'][vertno_sel]
+    elif label.hemi == 'rh':
+        vertno_sel = np.intersect1d(rh_vertno, label.vertices)
+        if len(vertno_sel) == 0:
+            return np.array([], int)
+        ori = src[1]['nn'][vertno_sel]
+    else:
+        raise Exception("Unknown hemisphere type")
+
+    _, _, Vh = linalg.svd(ori, full_matrices=False)
+
+    # Comparing to the direction of the first right singular vector
+    flip = np.sign(np.dot(ori, Vh[:, 0] if len(vertno_sel) > 3 else Vh[0]))
+    return flip
+
+
+def stc_to_label(stc, src=None, smooth=True, connected=False,
+                 subjects_dir=None):
+    """Compute a label from the non-zero sources in an stc object.
+
+    Parameters
+    ----------
+    stc : SourceEstimate
+        The source estimates.
+    src : SourceSpaces | str | None
+        The source space over which the source estimates are defined.
+        If it's a string it should the subject name (e.g. fsaverage).
+        Can be None if stc.subject is not None.
+    smooth : bool
+        Fill in vertices on the cortical surface that are not in the source
+        space based on the closest source space vertex (requires
+        src to be a SourceSpace).
+    connected : bool
+        If True a list of connected labels will be returned in each
+        hemisphere. The labels are ordered in decreasing order depending
+        of the maximum value in the stc.
+    subjects_dir : str | None
+        Path to SUBJECTS_DIR if it is not set in the environment.
+
+    Returns
+    -------
+    labels : list of Labels | list of list of Labels
+        The generated labels. If connected is False, it returns
+        a list of Labels (one per hemisphere). If no Label is available
+        in a hemisphere, None is returned. If connected is True,
+        it returns for each hemisphere a list of connected labels
+        ordered in decreasing order depending of the maximum value in the stc.
+        If no Label is available in an hemisphere, an empty list is returned.
+    """
+    if not isinstance(smooth, bool):
+        raise ValueError('smooth should be True or False. Got %s.' % smooth)
+
+    src = stc.subject if src is None else src
+    if src is None:
+        raise ValueError('src cannot be None if stc.subject is None')
+    if isinstance(src, string_types):
+        subject = src
+    else:
+        subject = stc.subject
+
+    if not isinstance(stc, SourceEstimate):
+        raise ValueError('SourceEstimate should be surface source estimates')
+
+    if isinstance(src, string_types):
+        if connected:
+            raise ValueError('The option to return only connected labels is '
+                             'only available if source spaces are provided.')
+        if smooth:
+            msg = ("stc_to_label with smooth=True requires src to be an "
+                   "instance of SourceSpace")
+            raise ValueError(msg)
+        subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
+        surf_path_from = op.join(subjects_dir, src, 'surf')
+        rr_lh, tris_lh = read_surface(op.join(surf_path_from, 'lh.white'))
+        rr_rh, tris_rh = read_surface(op.join(surf_path_from, 'rh.white'))
+        rr = [rr_lh, rr_rh]
+        tris = [tris_lh, tris_rh]
+    else:
+        if not isinstance(src, SourceSpaces):
+            raise TypeError('src must be a string or a set of source spaces')
+        if len(src) != 2:
+            raise ValueError('source space should contain the 2 hemispheres')
+        rr = [1e3 * src[0]['rr'], 1e3 * src[1]['rr']]
+        tris = [src[0]['tris'], src[1]['tris']]
+        src_conn = spatial_src_connectivity(src).tocsr()
+
+    labels = []
+    cnt = 0
+    cnt_full = 0
+    for hemi_idx, (hemi, this_vertno, this_tris, this_rr) in enumerate(
+            zip(['lh', 'rh'], stc.vertices, tris, rr)):
+        this_data = stc.data[cnt:cnt + len(this_vertno)]
+        e = mesh_edges(this_tris)
+        e.data[e.data == 2] = 1
+        n_vertices = e.shape[0]
+        e = e + sparse.eye(n_vertices, n_vertices)
+
+        if connected:  # we know src *must* be a SourceSpaces now
+            vertno = np.where(src[hemi_idx]['inuse'])[0]
+            if not len(np.setdiff1d(this_vertno, vertno)) == 0:
+                raise RuntimeError('stc contains vertices not present '
+                                   'in source space, did you morph?')
+            tmp = np.zeros((len(vertno), this_data.shape[1]))
+            this_vertno_idx = np.searchsorted(vertno, this_vertno)
+            tmp[this_vertno_idx] = this_data
+            this_data = tmp
+            offset = cnt_full + len(this_data)
+            this_src_conn = src_conn[cnt_full:offset, cnt_full:offset].tocoo()
+            this_data_abs_max = np.abs(this_data).max(axis=1)
+            clusters, _ = _find_clusters(this_data_abs_max, 0.,
+                                         connectivity=this_src_conn)
+            cnt_full += len(this_data)
+            # Then order clusters in descending order based on maximum value
+            clusters_max = np.argsort([np.max(this_data_abs_max[c])
+                                       for c in clusters])[::-1]
+            clusters = [clusters[k] for k in clusters_max]
+            clusters = [vertno[c] for c in clusters]
+        else:
+            clusters = [this_vertno[np.any(this_data, axis=1)]]
+
+        cnt += len(this_vertno)
+
+        clusters = [c for c in clusters if len(c) > 0]
+
+        if len(clusters) == 0:
+            if not connected:
+                this_labels = None
+            else:
+                this_labels = []
+        else:
+            this_labels = []
+            colors = _n_colors(len(clusters))
+            for c, color in zip(clusters, colors):
+                idx_use = c
+                label = Label(idx_use, this_rr[idx_use], None, hemi,
+                              'Label from stc', subject=subject,
+                              color=color)
+                if smooth:
+                    label = label.fill(src)
+
+                this_labels.append(label)
+
+            if not connected:
+                this_labels = this_labels[0]
+
+        labels.append(this_labels)
+
+    return labels
+
+
+def _verts_within_dist(graph, sources, max_dist):
+    """Find all vertices wihin a maximum geodesic distance from source
+
+    Parameters
+    ----------
+    graph : scipy.sparse.csr_matrix
+        Sparse matrix with distances between adjacent vertices.
+    sources : list of int
+        Source vertices.
+    max_dist : float
+        Maximum geodesic distance.
+
+    Returns
+    -------
+    verts : array
+        Vertices within max_dist.
+    dist : array
+        Distances from source vertex.
+    """
+    dist_map = {}
+    verts_added_last = []
+    for source in sources:
+        dist_map[source] = 0
+        verts_added_last.append(source)
+
+    # add neighbors until no more neighbors within max_dist can be found
+    while len(verts_added_last) > 0:
+        verts_added = []
+        for i in verts_added_last:
+            v_dist = dist_map[i]
+            row = graph[i, :]
+            neighbor_vert = row.indices
+            neighbor_dist = row.data
+            for j, d in zip(neighbor_vert, neighbor_dist):
+                n_dist = v_dist + d
+                if j in dist_map:
+                    if n_dist < dist_map[j]:
+                        dist_map[j] = n_dist
+                else:
+                    if n_dist <= max_dist:
+                        dist_map[j] = n_dist
+                        # we found a new vertex within max_dist
+                        verts_added.append(j)
+        verts_added_last = verts_added
+
+    verts = np.sort(np.array(list(dist_map.keys()), dtype=np.int))
+    dist = np.array([dist_map[v] for v in verts])
+
+    return verts, dist
+
+
+def _grow_labels(seeds, extents, hemis, names, dist, vert, subject):
+    """Helper for parallelization of grow_labels
+    """
+    labels = []
+    for seed, extent, hemi, name in zip(seeds, extents, hemis, names):
+        label_verts, label_dist = _verts_within_dist(dist[hemi], seed, extent)
+
+        # create a label
+        if len(seed) == 1:
+            seed_repr = str(seed)
+        else:
+            seed_repr = ','.join(map(str, seed))
+        comment = 'Circular label: seed=%s, extent=%0.1fmm' % (seed_repr,
+                                                               extent)
+        label = Label(vertices=label_verts,
+                      pos=vert[hemi][label_verts],
+                      values=label_dist,
+                      hemi=hemi,
+                      comment=comment,
+                      name=str(name),
+                      subject=subject)
+        labels.append(label)
+    return labels
+
+
+def grow_labels(subject, seeds, extents, hemis, subjects_dir=None, n_jobs=1,
+                overlap=True, names=None, surface='white'):
+    """Generate circular labels in source space with region growing
+
+    This function generates a number of labels in source space by growing
+    regions starting from the vertices defined in "seeds". For each seed, a
+    label is generated containing all vertices within a maximum geodesic
+    distance on the white matter surface from the seed.
+
+    Note: "extents" and "hemis" can either be arrays with the same length as
+          seeds, which allows using a different extent and hemisphere for each
+          label, or integers, in which case the same extent and hemisphere is
+          used for each label.
+
+    Parameters
+    ----------
+    subject : string
+        Name of the subject as in SUBJECTS_DIR.
+    seeds : int | list
+        Seed, or list of seeds. Each seed can be either a vertex number or
+        a list of vertex numbers.
+    extents : array | float
+        Extents (radius in mm) of the labels.
+    hemis : array | int
+        Hemispheres to use for the labels (0: left, 1: right).
+    subjects_dir : string
+        Path to SUBJECTS_DIR if not set in the environment.
+    n_jobs : int
+        Number of jobs to run in parallel. Likely only useful if tens
+        or hundreds of labels are being expanded simultaneously. Does not
+        apply with ``overlap=False``.
+    overlap : bool
+        Produce overlapping labels. If True (default), the resulting labels
+        can be overlapping. If False, each label will be grown one step at a
+        time, and occupied territory will not be invaded.
+    names : None | list of str
+        Assign names to the new labels (list needs to have the same length as
+        seeds).
+    surface : string
+        The surface used to grow the labels, defaults to the white surface.
+
+    Returns
+    -------
+    labels : list of Label
+        The labels' ``comment`` attribute contains information on the seed
+        vertex and extent; the ``values``  attribute contains distance from the
+        seed in millimeters
+    """
+    subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
+    n_jobs = check_n_jobs(n_jobs)
+
+    # make sure the inputs are arrays
+    if np.isscalar(seeds):
+        seeds = [seeds]
+    seeds = np.atleast_1d([np.atleast_1d(seed) for seed in seeds])
+    extents = np.atleast_1d(extents)
+    hemis = np.atleast_1d(hemis)
+    n_seeds = len(seeds)
+
+    if len(extents) != 1 and len(extents) != n_seeds:
+        raise ValueError('The extents parameter has to be of length 1 or '
+                         'len(seeds)')
+
+    if len(hemis) != 1 and len(hemis) != n_seeds:
+        raise ValueError('The hemis parameter has to be of length 1 or '
+                         'len(seeds)')
+
+    # make the arrays the same length as seeds
+    if len(extents) == 1:
+        extents = np.tile(extents, n_seeds)
+
+    if len(hemis) == 1:
+        hemis = np.tile(hemis, n_seeds)
+
+    hemis = np.array(['lh' if h == 0 else 'rh' for h in hemis])
+
+    # names
+    if names is None:
+        names = ["Label_%i-%s" % items for items in enumerate(hemis)]
+    else:
+        if np.isscalar(names):
+            names = [names]
+        if len(names) != n_seeds:
+            raise ValueError('The names parameter has to be None or have '
+                             'length len(seeds)')
+        for i, hemi in enumerate(hemis):
+            if not names[i].endswith(hemi):
+                names[i] = '-'.join((names[i], hemi))
+    names = np.array(names)
+
+    # load the surfaces and create the distance graphs
+    tris, vert, dist = {}, {}, {}
+    for hemi in set(hemis):
+        surf_fname = op.join(subjects_dir, subject, 'surf', hemi + '.' +
+                             surface)
+        vert[hemi], tris[hemi] = read_surface(surf_fname)
+        dist[hemi] = mesh_dist(tris[hemi], vert[hemi])
+
+    if overlap:
+        # create the patches
+        parallel, my_grow_labels, _ = parallel_func(_grow_labels, n_jobs)
+        seeds = np.array_split(seeds, n_jobs)
+        extents = np.array_split(extents, n_jobs)
+        hemis = np.array_split(hemis, n_jobs)
+        names = np.array_split(names, n_jobs)
+        labels = sum(parallel(my_grow_labels(s, e, h, n, dist, vert, subject)
+                              for s, e, h, n
+                              in zip(seeds, extents, hemis, names)), [])
+    else:
+        # special procedure for non-overlapping labels
+        labels = _grow_nonoverlapping_labels(subject, seeds, extents, hemis,
+                                             vert, dist, names)
+
+    # add a unique color to each label
+    colors = _n_colors(len(labels))
+    for label, color in zip(labels, colors):
+        label.color = color
+
+    return labels
+
+
+def _grow_nonoverlapping_labels(subject, seeds_, extents_, hemis, vertices_,
+                                graphs, names_):
+    """Grow labels while ensuring that they don't overlap
+    """
+    labels = []
+    for hemi in set(hemis):
+        hemi_index = (hemis == hemi)
+        seeds = seeds_[hemi_index]
+        extents = extents_[hemi_index]
+        names = names_[hemi_index]
+        graph = graphs[hemi]  # distance graph
+        n_vertices = len(vertices_[hemi])
+        n_labels = len(seeds)
+
+        # prepare parcellation
+        parc = np.empty(n_vertices, dtype='int32')
+        parc[:] = -1
+
+        # initialize active sources
+        sources = {}  # vert -> (label, dist_from_seed)
+        edge = []  # queue of vertices to process
+        for label, seed in enumerate(seeds):
+            if np.any(parc[seed] >= 0):
+                raise ValueError("Overlapping seeds")
+            parc[seed] = label
+            for s in np.atleast_1d(seed):
+                sources[s] = (label, 0.)
+                edge.append(s)
+
+        # grow from sources
+        while edge:
+            vert_from = edge.pop(0)
+            label, old_dist = sources[vert_from]
+
+            # add neighbors within allowable distance
+            row = graph[vert_from, :]
+            for vert_to, dist in zip(row.indices, row.data):
+                new_dist = old_dist + dist
+
+                # abort if outside of extent
+                if new_dist > extents[label]:
+                    continue
+
+                vert_to_label = parc[vert_to]
+                if vert_to_label >= 0:
+                    _, vert_to_dist = sources[vert_to]
+                    # abort if the vertex is occupied by a closer seed
+                    if new_dist > vert_to_dist:
+                        continue
+                    elif vert_to in edge:
+                        edge.remove(vert_to)
+
+                # assign label value
+                parc[vert_to] = label
+                sources[vert_to] = (label, new_dist)
+                edge.append(vert_to)
+
+        # convert parc to labels
+        for i in xrange(n_labels):
+            vertices = np.nonzero(parc == i)[0]
+            name = str(names[i])
+            label_ = Label(vertices, hemi=hemi, name=name, subject=subject)
+            labels.append(label_)
+
+    return labels
+
+
+def _read_annot(fname):
+    """Read a Freesurfer annotation from a .annot file.
+
+    Note : Copied from PySurfer
+
+    Parameters
+    ----------
+    fname : str
+        Path to annotation file
+
+    Returns
+    -------
+    annot : numpy array, shape=(n_verts)
+        Annotation id at each vertex
+    ctab : numpy array, shape=(n_entries, 5)
+        RGBA + label id colortable array
+    names : list of str
+        List of region names as stored in the annot file
+
+    """
+    if not op.isfile(fname):
+        dir_name = op.split(fname)[0]
+        if not op.isdir(dir_name):
+            raise IOError('Directory for annotation does not exist: %s',
+                          fname)
+        cands = os.listdir(dir_name)
+        cands = [c for c in cands if '.annot' in c]
+        if len(cands) == 0:
+            raise IOError('No such file %s, no candidate parcellations '
+                          'found in directory' % fname)
+        else:
+            raise IOError('No such file %s, candidate parcellations in '
+                          'that directory: %s' % (fname, ', '.join(cands)))
+    with open(fname, "rb") as fid:
+        n_verts = np.fromfile(fid, '>i4', 1)[0]
+        data = np.fromfile(fid, '>i4', n_verts * 2).reshape(n_verts, 2)
+        annot = data[data[:, 0], 1]
+        ctab_exists = np.fromfile(fid, '>i4', 1)[0]
+        if not ctab_exists:
+            raise Exception('Color table not found in annotation file')
+        n_entries = np.fromfile(fid, '>i4', 1)[0]
+        if n_entries > 0:
+            length = np.fromfile(fid, '>i4', 1)[0]
+            orig_tab = np.fromfile(fid, '>c', length)
+            orig_tab = orig_tab[:-1]
+
+            names = list()
+            ctab = np.zeros((n_entries, 5), np.int)
+            for i in range(n_entries):
+                name_length = np.fromfile(fid, '>i4', 1)[0]
+                name = np.fromfile(fid, "|S%d" % name_length, 1)[0]
+                names.append(name)
+                ctab[i, :4] = np.fromfile(fid, '>i4', 4)
+                ctab[i, 4] = (ctab[i, 0] + ctab[i, 1] * (2 ** 8) +
+                              ctab[i, 2] * (2 ** 16) +
+                              ctab[i, 3] * (2 ** 24))
+        else:
+            ctab_version = -n_entries
+            if ctab_version != 2:
+                raise Exception('Color table version not supported')
+            n_entries = np.fromfile(fid, '>i4', 1)[0]
+            ctab = np.zeros((n_entries, 5), np.int)
+            length = np.fromfile(fid, '>i4', 1)[0]
+            np.fromfile(fid, "|S%d" % length, 1)  # Orig table path
+            entries_to_read = np.fromfile(fid, '>i4', 1)[0]
+            names = list()
+            for i in range(entries_to_read):
+                np.fromfile(fid, '>i4', 1)  # Structure
+                name_length = np.fromfile(fid, '>i4', 1)[0]
+                name = np.fromfile(fid, "|S%d" % name_length, 1)[0]
+                names.append(name)
+                ctab[i, :4] = np.fromfile(fid, '>i4', 4)
+                ctab[i, 4] = (ctab[i, 0] + ctab[i, 1] * (2 ** 8) +
+                              ctab[i, 2] * (2 ** 16))
+
+        # convert to more common alpha value
+        ctab[:, 3] = 255 - ctab[:, 3]
+
+    return annot, ctab, names
+
+
+def _get_annot_fname(annot_fname, subject, hemi, parc, subjects_dir):
+    """Helper function to get the .annot filenames and hemispheres"""
+    if annot_fname is not None:
+        # we use use the .annot file specified by the user
+        hemis = [op.basename(annot_fname)[:2]]
+        if hemis[0] not in ['lh', 'rh']:
+            raise ValueError('Could not determine hemisphere from filename, '
+                             'filename has to start with "lh" or "rh".')
+        annot_fname = [annot_fname]
+    else:
+        # construct .annot file names for requested subject, parc, hemi
+        if hemi not in ['lh', 'rh', 'both']:
+            raise ValueError('hemi has to be "lh", "rh", or "both"')
+        if hemi == 'both':
+            hemis = ['lh', 'rh']
+        else:
+            hemis = [hemi]
+
+        subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
+        dst = op.join(subjects_dir, subject, 'label', '%%s.%s.annot' % parc)
+        annot_fname = [dst % hemi_ for hemi_ in hemis]
+
+    return annot_fname, hemis
+
+
+ at verbose
+def read_labels_from_annot(subject, parc='aparc', hemi='both',
+                           surf_name='white', annot_fname=None, regexp=None,
+                           subjects_dir=None, verbose=None):
+    """Read labels from a FreeSurfer annotation file
+
+    Note: Only cortical labels will be returned.
+
+    Parameters
+    ----------
+    subject : str
+        The subject for which to read the parcellation for.
+    parc : str
+        The parcellation to use, e.g., 'aparc' or 'aparc.a2009s'.
+    hemi : str
+        The hemisphere to read the parcellation for, can be 'lh', 'rh',
+        or 'both'.
+    surf_name : str
+        Surface used to obtain vertex locations, e.g., 'white', 'pial'
+    annot_fname : str or None
+        Filename of the .annot file. If not None, only this file is read
+        and 'parc' and 'hemi' are ignored.
+    regexp : str
+        Regular expression or substring to select particular labels from the
+        parcellation. E.g. 'superior' will return all labels in which this
+        substring is contained.
+    subjects_dir : string, or None
+        Path to SUBJECTS_DIR if it is not set in the environment.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    labels : list of Label
+        The labels, sorted by label name (ascending).
+    """
+    logger.info('Reading labels from parcellation..')
+
+    subjects_dir = get_subjects_dir(subjects_dir)
+
+    # get the .annot filenames and hemispheres
+    annot_fname, hemis = _get_annot_fname(annot_fname, subject, hemi, parc,
+                                          subjects_dir)
+
+    if regexp is not None:
+        # allow for convenient substring match
+        r_ = (re.compile('.*%s.*' % regexp if regexp.replace('_', '').isalnum()
+              else regexp))
+
+    # now we are ready to create the labels
+    n_read = 0
+    labels = list()
+    for fname, hemi in zip(annot_fname, hemis):
+        # read annotation
+        annot, ctab, label_names = _read_annot(fname)
+        label_rgbas = ctab[:, :4]
+        label_ids = ctab[:, -1]
+
+        # load the vertex positions from surface
+        fname_surf = op.join(subjects_dir, subject, 'surf',
+                             '%s.%s' % (hemi, surf_name))
+        vert_pos, _ = read_surface(fname_surf)
+        vert_pos /= 1e3  # the positions in labels are in meters
+        for label_id, label_name, label_rgba in\
+                zip(label_ids, label_names, label_rgbas):
+            vertices = np.where(annot == label_id)[0]
+            if len(vertices) == 0:
+                # label is not part of cortical surface
+                continue
+            name = label_name.decode() + '-' + hemi
+            if (regexp is not None) and not r_.match(name):
+                continue
+            pos = vert_pos[vertices, :]
+            values = np.zeros(len(vertices))
+            label_rgba = tuple(label_rgba / 255.)
+            label = Label(vertices, pos, values, hemi, name=name,
+                          subject=subject, color=label_rgba)
+            labels.append(label)
+
+        n_read = len(labels) - n_read
+        logger.info('   read %d labels from %s' % (n_read, fname))
+
+    # sort the labels by label name
+    labels = sorted(labels, key=lambda l: l.name)
+
+    if len(labels) == 0:
+        msg = 'No labels found.'
+        if regexp is not None:
+            msg += ' Maybe the regular expression %r did not match?' % regexp
+        raise RuntimeError(msg)
+
+    logger.info('[done]')
+    return labels
+
+
+def _write_annot(fname, annot, ctab, names):
+    """Write a Freesurfer annotation to a .annot file.
+
+    Parameters
+    ----------
+    fname : str
+        Path to annotation file
+    annot : numpy array, shape=(n_verts)
+        Annotation id at each vertex. Note: IDs must be computed from
+        RGBA colors, otherwise the mapping will be invalid.
+    ctab : numpy array, shape=(n_entries, 4)
+        RGBA colortable array.
+    names : list of str
+        List of region names to be stored in the annot file
+    """
+
+    with open(fname, 'wb') as fid:
+        n_verts = len(annot)
+        np.array(n_verts, dtype='>i4').tofile(fid)
+
+        data = np.zeros((n_verts, 2), dtype='>i4')
+        data[:, 0] = np.arange(n_verts)
+        data[:, 1] = annot
+        data.ravel().tofile(fid)
+
+        # indicate that color table exists
+        np.array(1, dtype='>i4').tofile(fid)
+
+        # color table version 2
+        np.array(-2, dtype='>i4').tofile(fid)
+
+        # write color table
+        n_entries = len(ctab)
+        np.array(n_entries, dtype='>i4').tofile(fid)
+
+        # write dummy color table name
+        table_name = 'MNE-Python Colortable'
+        np.array(len(table_name), dtype='>i4').tofile(fid)
+        np.fromstring(table_name, dtype=np.uint8).tofile(fid)
+
+        # number of entries to write
+        np.array(n_entries, dtype='>i4').tofile(fid)
+
+        # write entries
+        for ii, (name, color) in enumerate(zip(names, ctab)):
+            np.array(ii, dtype='>i4').tofile(fid)
+            np.array(len(name), dtype='>i4').tofile(fid)
+            np.fromstring(name, dtype=np.uint8).tofile(fid)
+            np.array(color[:4], dtype='>i4').tofile(fid)
+
+
+ at verbose
+def write_labels_to_annot(labels, subject=None, parc=None, overwrite=False,
+                          subjects_dir=None, annot_fname=None,
+                          colormap='hsv', hemi='both', verbose=None):
+    """Create a FreeSurfer annotation from a list of labels
+
+    Parameters
+    ----------
+    labels : list with instances of mne.Label
+        The labels to create a parcellation from.
+    subject : str | None
+        The subject for which to write the parcellation for.
+    parc : str | None
+        The parcellation name to use.
+    overwrite : bool
+        Overwrite files if they already exist.
+    subjects_dir : string, or None
+        Path to SUBJECTS_DIR if it is not set in the environment.
+    annot_fname : str | None
+        Filename of the .annot file. If not None, only this file is written
+        and 'parc' and 'subject' are ignored.
+    colormap : str
+        Colormap to use to generate label colors for labels that do not
+        have a color specified.
+    hemi : 'both' | 'lh' | 'rh'
+        The hemisphere(s) for which to write \*.annot files (only applies if
+        annot_fname is not specified; default is 'both').
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Notes
+    -----
+    Vertices that are not covered by any of the labels are assigned to a label
+    named "unknown".
+    """
+    logger.info('Writing labels to parcellation..')
+
+    subjects_dir = get_subjects_dir(subjects_dir)
+
+    # get the .annot filenames and hemispheres
+    annot_fname, hemis = _get_annot_fname(annot_fname, subject, hemi, parc,
+                                          subjects_dir)
+
+    if not overwrite:
+        for fname in annot_fname:
+            if op.exists(fname):
+                raise ValueError('File %s exists. Use "overwrite=True" to '
+                                 'overwrite it' % fname)
+
+    # prepare container for data to save:
+    to_save = []
+    # keep track of issues found in the labels
+    duplicate_colors = []
+    invalid_colors = []
+    overlap = []
+    no_color = (-1, -1, -1, -1)
+    no_color_rgb = (-1, -1, -1)
+    for hemi, fname in zip(hemis, annot_fname):
+        hemi_labels = [label for label in labels if label.hemi == hemi]
+        n_hemi_labels = len(hemi_labels)
+
+        if n_hemi_labels == 0:
+            ctab = np.empty((0, 4), dtype=np.int32)
+            ctab_rgb = ctab[:, :3]
+        else:
+            hemi_labels.sort(key=lambda label: label.name)
+
+            # convert colors to 0-255 RGBA tuples
+            hemi_colors = [no_color if label.color is None else
+                           tuple(int(round(255 * i)) for i in label.color)
+                           for label in hemi_labels]
+            ctab = np.array(hemi_colors, dtype=np.int32)
+            ctab_rgb = ctab[:, :3]
+
+            # make color dict (for annot ID, only R, G and B count)
+            labels_by_color = defaultdict(list)
+            for label, color in zip(hemi_labels, ctab_rgb):
+                labels_by_color[tuple(color)].append(label.name)
+
+            # check label colors
+            for color, names in labels_by_color.items():
+                if color == no_color_rgb:
+                    continue
+
+                if color == (0, 0, 0):
+                    # we cannot have an all-zero color, otherw. e.g. tksurfer
+                    # refuses to read the parcellation
+                    msg = ('At least one label contains a color with, "r=0, '
+                           'g=0, b=0" value. Some FreeSurfer tools may fail '
+                           'to read the parcellation')
+                    logger.warning(msg)
+
+                if any(i > 255 for i in color):
+                    msg = ("%s: %s (%s)" % (color, ', '.join(names), hemi))
+                    invalid_colors.append(msg)
+
+                if len(names) > 1:
+                    msg = "%s: %s (%s)" % (color, ', '.join(names), hemi)
+                    duplicate_colors.append(msg)
+
+            # replace None values (labels with unspecified color)
+            if labels_by_color[no_color_rgb]:
+                default_colors = _n_colors(n_hemi_labels, bytes_=True,
+                                           cmap=colormap)
+                # keep track of colors known to be in hemi_colors :
+                safe_color_i = 0
+                for i in xrange(n_hemi_labels):
+                    if ctab[i, 0] == -1:
+                        color = default_colors[i]
+                        # make sure to add no duplicate color
+                        while np.any(np.all(color[:3] == ctab_rgb, 1)):
+                            color = default_colors[safe_color_i]
+                            safe_color_i += 1
+                        # assign the color
+                        ctab[i] = color
+
+        # find number of vertices in surface
+        if subject is not None and subjects_dir is not None:
+            fpath = os.path.join(subjects_dir, subject, 'surf',
+                                 '%s.white' % hemi)
+            points, _ = read_surface(fpath)
+            n_vertices = len(points)
+        else:
+            if len(hemi_labels) > 0:
+                max_vert = max(np.max(label.vertices) for label in hemi_labels)
+                n_vertices = max_vert + 1
+            else:
+                n_vertices = 1
+            msg = ('    Number of vertices in the surface could not be '
+                   'verified because the surface file could not be found; '
+                   'specify subject and subjects_dir parameters.')
+            logger.warning(msg)
+
+        # Create annot and color table array to write
+        annot = np.empty(n_vertices, dtype=np.int)
+        annot[:] = -1
+        # create the annotation ids from the colors
+        annot_id_coding = np.array((1, 2 ** 8, 2 ** 16))
+        annot_ids = list(np.sum(ctab_rgb * annot_id_coding, axis=1))
+        for label, annot_id in zip(hemi_labels, annot_ids):
+            # make sure the label is not overwriting another label
+            if np.any(annot[label.vertices] != -1):
+                other_ids = set(annot[label.vertices])
+                other_ids.discard(-1)
+                other_indices = (annot_ids.index(i) for i in other_ids)
+                other_names = (hemi_labels[i].name for i in other_indices)
+                other_repr = ', '.join(other_names)
+                msg = "%s: %s overlaps %s" % (hemi, label.name, other_repr)
+                overlap.append(msg)
+
+            annot[label.vertices] = annot_id
+
+        hemi_names = [label.name for label in hemi_labels]
+
+        if None in hemi_names:
+            msg = ("Found %i labels with no name. Writing annotation file"
+                   "requires all labels named" % (hemi_names.count(None)))
+            # raise the error immediately rather than crash with an
+            # uninformative error later (e.g. cannot join NoneType)
+            raise ValueError(msg)
+
+        # Assign unlabeled vertices to an "unknown" label
+        unlabeled = (annot == -1)
+        if np.any(unlabeled):
+            msg = ("Assigning %i unlabeled vertices to "
+                   "'unknown-%s'" % (unlabeled.sum(), hemi))
+            logger.info(msg)
+
+            # find an unused color (try shades of gray first)
+            for i in range(1, 257):
+                if not np.any(np.all((i, i, i) == ctab_rgb, 1)):
+                    break
+            if i < 256:
+                color = (i, i, i, 0)
+            else:
+                err = ("Need one free shade of gray for 'unknown' label. "
+                       "Please modify your label colors, or assign the "
+                       "unlabeled vertices to another label.")
+                raise ValueError(err)
+
+            # find the id
+            annot_id = np.sum(annot_id_coding * color[:3])
+
+            # update data to write
+            annot[unlabeled] = annot_id
+            ctab = np.vstack((ctab, color))
+            hemi_names.append("unknown")
+
+        # convert to FreeSurfer alpha values
+        ctab[:, 3] = 255 - ctab[:, 3]
+
+        # remove hemi ending in names
+        hemi_names = [name[:-3] if name.endswith(hemi) else name
+                      for name in hemi_names]
+
+        to_save.append((fname, annot, ctab, hemi_names))
+
+    issues = []
+    if duplicate_colors:
+        msg = ("Some labels have the same color values (all labels in one "
+               "hemisphere must have a unique color):")
+        duplicate_colors.insert(0, msg)
+        issues.append(os.linesep.join(duplicate_colors))
+    if invalid_colors:
+        msg = ("Some labels have invalid color values (all colors should be "
+               "RGBA tuples with values between 0 and 1)")
+        invalid_colors.insert(0, msg)
+        issues.append(os.linesep.join(invalid_colors))
+    if overlap:
+        msg = ("Some labels occupy vertices that are also occupied by one or "
+               "more other labels. Each vertex can only be occupied by a "
+               "single label in *.annot files.")
+        overlap.insert(0, msg)
+        issues.append(os.linesep.join(overlap))
+
+    if issues:
+        raise ValueError('\n\n'.join(issues))
+
+    # write it
+    for fname, annot, ctab, hemi_names in to_save:
+        logger.info('   writing %d labels to %s' % (len(hemi_names), fname))
+        _write_annot(fname, annot, ctab, hemi_names)
+
+    logger.info('[done]')
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/minimum_norm/__init__.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/minimum_norm/__init__.py
new file mode 100644
index 0000000..b48f805
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/minimum_norm/__init__.py
@@ -0,0 +1,10 @@
+"""Linear inverse solvers based on L2 Minimum Norm Estimates (MNE)"""
+
+from .inverse import (InverseOperator, read_inverse_operator, apply_inverse,
+                      apply_inverse_raw, make_inverse_operator,
+                      apply_inverse_epochs, write_inverse_operator,
+                      compute_rank_inverse, prepare_inverse_operator,
+                      estimate_snr)
+from .psf_ctf import point_spread_function, cross_talk_function
+from .time_frequency import (source_band_induced_power, source_induced_power,
+                             compute_source_psd, compute_source_psd_epochs)
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/minimum_norm/inverse.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/minimum_norm/inverse.py
new file mode 100644
index 0000000..eca2a24
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/minimum_norm/inverse.py
@@ -0,0 +1,1576 @@
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
+#          Teon Brooks <teon.brooks at gmail.com>
+#
+# License: BSD (3-clause)
+
+import warnings
+from copy import deepcopy
+from math import sqrt
+import numpy as np
+from scipy import linalg
+
+from ..io.constants import FIFF
+from ..io.open import fiff_open
+from ..io.tag import find_tag
+from ..io.matrix import (_read_named_matrix, _transpose_named_matrix,
+                         write_named_matrix)
+from ..io.proj import _read_proj, make_projector, _write_proj
+from ..io.proj import _has_eeg_average_ref_proj
+from ..io.tree import dir_tree_find
+from ..io.write import (write_int, write_float_matrix, start_file,
+                        start_block, end_block, end_file, write_float,
+                        write_coord_trans, write_string)
+
+from ..io.pick import channel_type, pick_info, pick_types
+from ..cov import prepare_noise_cov, _read_cov, _write_cov, Covariance
+from ..forward import (compute_depth_prior, _read_forward_meas_info,
+                       write_forward_meas_info, is_fixed_orient,
+                       compute_orient_prior, convert_forward_solution)
+from ..source_space import (_read_source_spaces_from_tree,
+                            find_source_space_hemi, _get_vertno,
+                            _write_source_spaces_to_fid, label_src_vertno_sel)
+from ..transforms import _ensure_trans, transform_surface_to
+from ..source_estimate import _make_stc
+from ..utils import check_fname, logger, verbose
+from functools import reduce
+
+
+class InverseOperator(dict):
+    """InverseOperator class to represent info from inverse operator
+    """
+
+    def __repr__(self):
+        """Summarize inverse info instead of printing all"""
+
+        entr = '<InverseOperator'
+
+        nchan = len(pick_types(self['info'], meg=True, eeg=False))
+        entr += ' | ' + 'MEG channels: %d' % nchan
+        nchan = len(pick_types(self['info'], meg=False, eeg=True))
+        entr += ' | ' + 'EEG channels: %d' % nchan
+
+        # XXX TODO: This and the __repr__ in SourceSpaces should call a
+        # function _get_name_str() in source_space.py
+        if self['src'][0]['type'] == 'surf':
+            entr += (' | Source space: Surface with %d vertices'
+                     % self['nsource'])
+        elif self['src'][0]['type'] == 'vol':
+            entr += (' | Source space: Volume with %d grid points'
+                     % self['nsource'])
+        elif self['src'][0]['type'] == 'discrete':
+            entr += (' | Source space: Discrete with %d dipoles'
+                     % self['nsource'])
+
+        source_ori = {FIFF.FIFFV_MNE_UNKNOWN_ORI: 'Unknown',
+                      FIFF.FIFFV_MNE_FIXED_ORI: 'Fixed',
+                      FIFF.FIFFV_MNE_FREE_ORI: 'Free'}
+        entr += ' | Source orientation: %s' % source_ori[self['source_ori']]
+        entr += '>'
+
+        return entr
+
+
+def _pick_channels_inverse_operator(ch_names, inv):
+    """Gives the indices of the data channel to be used knowing
+    an inverse operator
+    """
+    sel = []
+    for name in inv['noise_cov'].ch_names:
+        if name in ch_names:
+            sel.append(ch_names.index(name))
+        else:
+            raise ValueError('The inverse operator was computed with '
+                             'channel %s which is not present in '
+                             'the data. You should compute a new inverse '
+                             'operator restricted to the good data '
+                             'channels.' % name)
+    return sel
+
+
+ at verbose
+def read_inverse_operator(fname, verbose=None):
+    """Read the inverse operator decomposition from a FIF file
+
+    Parameters
+    ----------
+    fname : string
+        The name of the FIF file, which ends with -inv.fif or -inv.fif.gz.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    inv : instance of InverseOperator
+        The inverse operator.
+
+    See Also
+    --------
+    write_inverse_operator, make_inverse_operator
+    """
+    check_fname(fname, 'inverse operator', ('-inv.fif', '-inv.fif.gz'))
+
+    #
+    #   Open the file, create directory
+    #
+    logger.info('Reading inverse operator decomposition from %s...'
+                % fname)
+    f, tree, _ = fiff_open(fname, preload=True)
+    with f as fid:
+        #
+        #   Find all inverse operators
+        #
+        invs = dir_tree_find(tree, FIFF.FIFFB_MNE_INVERSE_SOLUTION)
+        if invs is None or len(invs) < 1:
+            raise Exception('No inverse solutions in %s' % fname)
+
+        invs = invs[0]
+        #
+        #   Parent MRI data
+        #
+        parent_mri = dir_tree_find(tree, FIFF.FIFFB_MNE_PARENT_MRI_FILE)
+        if len(parent_mri) == 0:
+            raise Exception('No parent MRI information in %s' % fname)
+        parent_mri = parent_mri[0]  # take only first one
+
+        logger.info('    Reading inverse operator info...')
+        #
+        #   Methods and source orientations
+        #
+        tag = find_tag(fid, invs, FIFF.FIFF_MNE_INCLUDED_METHODS)
+        if tag is None:
+            raise Exception('Modalities not found')
+
+        inv = dict()
+        inv['methods'] = int(tag.data)
+
+        tag = find_tag(fid, invs, FIFF.FIFF_MNE_SOURCE_ORIENTATION)
+        if tag is None:
+            raise Exception('Source orientation constraints not found')
+
+        inv['source_ori'] = int(tag.data)
+
+        tag = find_tag(fid, invs, FIFF.FIFF_MNE_SOURCE_SPACE_NPOINTS)
+        if tag is None:
+            raise Exception('Number of sources not found')
+
+        inv['nsource'] = int(tag.data)
+        inv['nchan'] = 0
+        #
+        #   Coordinate frame
+        #
+        tag = find_tag(fid, invs, FIFF.FIFF_MNE_COORD_FRAME)
+        if tag is None:
+            raise Exception('Coordinate frame tag not found')
+
+        inv['coord_frame'] = tag.data
+
+        #
+        #   Units
+        #
+        tag = find_tag(fid, invs, FIFF.FIFF_MNE_INVERSE_SOURCE_UNIT)
+        unit_dict = {FIFF.FIFF_UNIT_AM: 'Am',
+                     FIFF.FIFF_UNIT_AM_M2: 'Am/m^2',
+                     FIFF.FIFF_UNIT_AM_M3: 'Am/m^3'}
+        inv['units'] = unit_dict.get(int(getattr(tag, 'data', -1)), None)
+
+        #
+        #   The actual source orientation vectors
+        #
+        tag = find_tag(fid, invs, FIFF.FIFF_MNE_INVERSE_SOURCE_ORIENTATIONS)
+        if tag is None:
+            raise Exception('Source orientation information not found')
+
+        inv['source_nn'] = tag.data
+        logger.info('    [done]')
+        #
+        #   The SVD decomposition...
+        #
+        logger.info('    Reading inverse operator decomposition...')
+        tag = find_tag(fid, invs, FIFF.FIFF_MNE_INVERSE_SING)
+        if tag is None:
+            raise Exception('Singular values not found')
+
+        inv['sing'] = tag.data
+        inv['nchan'] = len(inv['sing'])
+        #
+        #   The eigenleads and eigenfields
+        #
+        inv['eigen_leads_weighted'] = False
+        eigen_leads = _read_named_matrix(
+            fid, invs, FIFF.FIFF_MNE_INVERSE_LEADS)
+        if eigen_leads is None:
+            inv['eigen_leads_weighted'] = True
+            eigen_leads = _read_named_matrix(
+                fid, invs, FIFF.FIFF_MNE_INVERSE_LEADS_WEIGHTED)
+        if eigen_leads is None:
+            raise ValueError('Eigen leads not found in inverse operator.')
+        #
+        #   Having the eigenleads as cols is better for the inverse calcs
+        #
+        inv['eigen_leads'] = _transpose_named_matrix(eigen_leads, copy=False)
+        inv['eigen_fields'] = _read_named_matrix(fid, invs,
+                                                 FIFF.FIFF_MNE_INVERSE_FIELDS)
+        logger.info('    [done]')
+        #
+        #   Read the covariance matrices
+        #
+        inv['noise_cov'] = Covariance(
+            **_read_cov(fid, invs, FIFF.FIFFV_MNE_NOISE_COV, limited=True))
+        logger.info('    Noise covariance matrix read.')
+
+        inv['source_cov'] = _read_cov(fid, invs, FIFF.FIFFV_MNE_SOURCE_COV)
+        logger.info('    Source covariance matrix read.')
+        #
+        #   Read the various priors
+        #
+        inv['orient_prior'] = _read_cov(fid, invs,
+                                        FIFF.FIFFV_MNE_ORIENT_PRIOR_COV)
+        if inv['orient_prior'] is not None:
+            logger.info('    Orientation priors read.')
+
+        inv['depth_prior'] = _read_cov(fid, invs,
+                                       FIFF.FIFFV_MNE_DEPTH_PRIOR_COV)
+        if inv['depth_prior'] is not None:
+            logger.info('    Depth priors read.')
+
+        inv['fmri_prior'] = _read_cov(fid, invs, FIFF.FIFFV_MNE_FMRI_PRIOR_COV)
+        if inv['fmri_prior'] is not None:
+            logger.info('    fMRI priors read.')
+
+        #
+        #   Read the source spaces
+        #
+        inv['src'] = _read_source_spaces_from_tree(fid, tree,
+                                                   patch_stats=False)
+
+        for s in inv['src']:
+            s['id'] = find_source_space_hemi(s)
+
+        #
+        #   Get the MRI <-> head coordinate transformation
+        #
+        tag = find_tag(fid, parent_mri, FIFF.FIFF_COORD_TRANS)
+        if tag is None:
+            raise Exception('MRI/head coordinate transformation not found')
+        mri_head_t = _ensure_trans(tag.data, 'mri', 'head')
+
+        inv['mri_head_t'] = mri_head_t
+
+        #
+        # get parent MEG info
+        #
+        inv['info'] = _read_forward_meas_info(tree, fid)
+
+        #
+        #   Transform the source spaces to the correct coordinate frame
+        #   if necessary
+        #
+        if inv['coord_frame'] not in (FIFF.FIFFV_COORD_MRI,
+                                      FIFF.FIFFV_COORD_HEAD):
+            raise Exception('Only inverse solutions computed in MRI or '
+                            'head coordinates are acceptable')
+
+        #
+        #  Number of averages is initially one
+        #
+        inv['nave'] = 1
+        #
+        #  We also need the SSP operator
+        #
+        inv['projs'] = _read_proj(fid, tree)
+
+        #
+        #  Some empty fields to be filled in later
+        #
+        inv['proj'] = []       # This is the projector to apply to the data
+        inv['whitener'] = []   # This whitens the data
+        # This the diagonal matrix implementing regularization and the inverse
+        inv['reginv'] = []
+        inv['noisenorm'] = []  # These are the noise-normalization factors
+        #
+        nuse = 0
+        for k in range(len(inv['src'])):
+            try:
+                inv['src'][k] = transform_surface_to(inv['src'][k],
+                                                     inv['coord_frame'],
+                                                     mri_head_t)
+            except Exception as inst:
+                raise Exception('Could not transform source space (%s)' % inst)
+
+            nuse += inv['src'][k]['nuse']
+
+        logger.info('    Source spaces transformed to the inverse solution '
+                    'coordinate frame')
+        #
+        #   Done!
+        #
+
+    return InverseOperator(inv)
+
+
+ at verbose
+def write_inverse_operator(fname, inv, verbose=None):
+    """Write an inverse operator to a FIF file
+
+    Parameters
+    ----------
+    fname : string
+        The name of the FIF file, which ends with -inv.fif or -inv.fif.gz.
+    inv : dict
+        The inverse operator.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    See Also
+    --------
+    read_inverse_operator
+    """
+    check_fname(fname, 'inverse operator', ('-inv.fif', '-inv.fif.gz'))
+
+    #
+    #   Open the file, create directory
+    #
+    logger.info('Write inverse operator decomposition in %s...' % fname)
+
+    # Create the file and save the essentials
+    fid = start_file(fname)
+    start_block(fid, FIFF.FIFFB_MNE)
+
+    #
+    #   Parent MEG measurement info
+    #
+    write_forward_meas_info(fid, inv['info'])
+
+    #
+    #   Parent MRI data
+    #
+    start_block(fid, FIFF.FIFFB_MNE_PARENT_MRI_FILE)
+    write_string(fid, FIFF.FIFF_MNE_FILE_NAME, inv['info']['mri_file'])
+    write_coord_trans(fid, inv['mri_head_t'])
+    end_block(fid, FIFF.FIFFB_MNE_PARENT_MRI_FILE)
+
+    #
+    #   Write SSP operator
+    #
+    _write_proj(fid, inv['projs'])
+
+    #
+    #   Write the source spaces
+    #
+    if 'src' in inv:
+        _write_source_spaces_to_fid(fid, inv['src'])
+
+    start_block(fid, FIFF.FIFFB_MNE_INVERSE_SOLUTION)
+
+    logger.info('    Writing inverse operator info...')
+
+    write_int(fid, FIFF.FIFF_MNE_INCLUDED_METHODS, inv['methods'])
+    write_int(fid, FIFF.FIFF_MNE_COORD_FRAME, inv['coord_frame'])
+
+    udict = {'Am': FIFF.FIFF_UNIT_AM,
+             'Am/m^2': FIFF.FIFF_UNIT_AM_M2,
+             'Am/m^3': FIFF.FIFF_UNIT_AM_M3}
+    if 'units' in inv and inv['units'] is not None:
+        write_int(fid, FIFF.FIFF_MNE_INVERSE_SOURCE_UNIT, udict[inv['units']])
+
+    write_int(fid, FIFF.FIFF_MNE_SOURCE_ORIENTATION, inv['source_ori'])
+    write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NPOINTS, inv['nsource'])
+    if 'nchan' in inv:
+        write_int(fid, FIFF.FIFF_NCHAN, inv['nchan'])
+    elif 'nchan' in inv['info']:
+        write_int(fid, FIFF.FIFF_NCHAN, inv['info']['nchan'])
+    write_float_matrix(fid, FIFF.FIFF_MNE_INVERSE_SOURCE_ORIENTATIONS,
+                       inv['source_nn'])
+    write_float(fid, FIFF.FIFF_MNE_INVERSE_SING, inv['sing'])
+
+    #
+    #   write the covariance matrices
+    #
+    logger.info('    Writing noise covariance matrix.')
+    _write_cov(fid, inv['noise_cov'])
+
+    logger.info('    Writing source covariance matrix.')
+    _write_cov(fid, inv['source_cov'])
+
+    #
+    #   write the various priors
+    #
+    logger.info('    Writing orientation priors.')
+    if inv['depth_prior'] is not None:
+        _write_cov(fid, inv['depth_prior'])
+    if inv['orient_prior'] is not None:
+        _write_cov(fid, inv['orient_prior'])
+    if inv['fmri_prior'] is not None:
+        _write_cov(fid, inv['fmri_prior'])
+
+    write_named_matrix(fid, FIFF.FIFF_MNE_INVERSE_FIELDS, inv['eigen_fields'])
+
+    #
+    #   The eigenleads and eigenfields
+    #
+    if inv['eigen_leads_weighted']:
+        write_named_matrix(fid, FIFF.FIFF_MNE_INVERSE_LEADS_WEIGHTED,
+                           _transpose_named_matrix(inv['eigen_leads']))
+    else:
+        write_named_matrix(fid, FIFF.FIFF_MNE_INVERSE_LEADS,
+                           _transpose_named_matrix(inv['eigen_leads']))
+
+    #
+    #   Done!
+    #
+    logger.info('    [done]')
+
+    end_block(fid, FIFF.FIFFB_MNE_INVERSE_SOLUTION)
+    end_block(fid, FIFF.FIFFB_MNE)
+    end_file(fid)
+
+    fid.close()
+
+###############################################################################
+# Compute inverse solution
+
+
+def combine_xyz(vec, square=False):
+    """Compute the three Cartesian components of a vector or matrix together
+
+    Parameters
+    ----------
+    vec : 2d array of shape [3 n x p]
+        Input [ x1 y1 z1 ... x_n y_n z_n ] where x1 ... z_n
+        can be vectors
+
+    Returns
+    -------
+    comb : array
+        Output vector [sqrt(x1^2+y1^2+z1^2), ..., sqrt(x_n^2+y_n^2+z_n^2)]
+    """
+    if vec.ndim != 2:
+        raise ValueError('Input must be 2D')
+    if (vec.shape[0] % 3) != 0:
+        raise ValueError('Input must have 3N rows')
+
+    n, p = vec.shape
+    if np.iscomplexobj(vec):
+        vec = np.abs(vec)
+    comb = vec[0::3] ** 2
+    comb += vec[1::3] ** 2
+    comb += vec[2::3] ** 2
+    if not square:
+        comb = np.sqrt(comb)
+    return comb
+
+
+def _check_ch_names(inv, info):
+    """Check that channels in inverse operator are measurements"""
+
+    inv_ch_names = inv['eigen_fields']['col_names']
+
+    if inv['noise_cov'].ch_names != inv_ch_names:
+        raise ValueError('Channels in inverse operator eigen fields do not '
+                         'match noise covariance channels.')
+    data_ch_names = info['ch_names']
+
+    missing_ch_names = list()
+    for ch_name in inv_ch_names:
+        if ch_name not in data_ch_names:
+            missing_ch_names.append(ch_name)
+    n_missing = len(missing_ch_names)
+    if n_missing > 0:
+        raise ValueError('%d channels in inverse operator ' % n_missing +
+                         'are not present in the data (%s)' % missing_ch_names)
+
+
+ at verbose
+def prepare_inverse_operator(orig, nave, lambda2, method, verbose=None):
+    """Prepare an inverse operator for actually computing the inverse
+
+    Parameters
+    ----------
+    orig : dict
+        The inverse operator structure read from a file.
+    nave : int
+        Number of averages (scales the noise covariance).
+    lambda2 : float
+        The regularization factor. Recommended to be 1 / SNR**2.
+    method : "MNE" | "dSPM" | "sLORETA"
+        Use mininum norm, dSPM or sLORETA.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    inv : instance of InverseOperator
+        Prepared inverse operator.
+    """
+    if nave <= 0:
+        raise ValueError('The number of averages should be positive')
+
+    logger.info('Preparing the inverse operator for use...')
+    inv = deepcopy(orig)
+    #
+    #   Scale some of the stuff
+    #
+    scale = float(inv['nave']) / nave
+    inv['noise_cov']['data'] = scale * inv['noise_cov']['data']
+    # deal with diagonal case
+    if inv['noise_cov']['data'].ndim == 1:
+        logger.info('    Diagonal noise covariance found')
+        inv['noise_cov']['eig'] = inv['noise_cov']['data']
+        inv['noise_cov']['eigvec'] = np.eye(len(inv['noise_cov']['data']))
+
+    inv['noise_cov']['eig'] = scale * inv['noise_cov']['eig']
+    inv['source_cov']['data'] = scale * inv['source_cov']['data']
+    #
+    if inv['eigen_leads_weighted']:
+        inv['eigen_leads']['data'] = sqrt(scale) * inv['eigen_leads']['data']
+
+    logger.info('    Scaled noise and source covariance from nave = %d to'
+                ' nave = %d' % (inv['nave'], nave))
+    inv['nave'] = nave
+    #
+    #   Create the diagonal matrix for computing the regularized inverse
+    #
+    sing = np.array(inv['sing'], dtype=np.float64)
+    inv['reginv'] = sing / (sing ** 2 + lambda2)
+    logger.info('    Created the regularized inverter')
+    #
+    #   Create the projection operator
+    #
+    inv['proj'], ncomp, _ = make_projector(inv['projs'],
+                                           inv['noise_cov']['names'])
+    if ncomp > 0:
+        logger.info('    Created an SSP operator (subspace dimension = %d)'
+                    % ncomp)
+    else:
+        logger.info('    The projection vectors do not apply to these '
+                    'channels.')
+
+    #
+    #   Create the whitener
+    #
+    if not inv['noise_cov']['diag']:
+        inv['whitener'] = np.zeros((inv['noise_cov']['dim'],
+                                    inv['noise_cov']['dim']))
+        #
+        #   Omit the zeroes due to projection
+        #
+        eig = inv['noise_cov']['eig']
+        nzero = (eig > 0)
+        inv['whitener'][nzero, nzero] = 1.0 / np.sqrt(eig[nzero])
+        #
+        #   Rows of eigvec are the eigenvectors
+        #
+        inv['whitener'] = np.dot(inv['whitener'], inv['noise_cov']['eigvec'])
+        logger.info('    Created the whitener using a full noise '
+                    'covariance matrix (%d small eigenvalues omitted)'
+                    % (inv['noise_cov']['dim'] - np.sum(nzero)))
+    else:
+        #
+        #   No need to omit the zeroes due to projection
+        #
+        inv['whitener'] = np.diag(1.0 /
+                                  np.sqrt(inv['noise_cov']['data'].ravel()))
+        logger.info('    Created the whitener using a diagonal noise '
+                    'covariance matrix (%d small eigenvalues discarded)'
+                    % ncomp)
+
+    #
+    #   Finally, compute the noise-normalization factors
+    #
+    if method in ["dSPM", 'sLORETA']:
+        if method == "dSPM":
+            logger.info('    Computing noise-normalization factors '
+                        '(dSPM)...')
+            noise_weight = inv['reginv']
+        else:
+            logger.info('    Computing noise-normalization factors '
+                        '(sLORETA)...')
+            noise_weight = (inv['reginv'] *
+                            np.sqrt((1. + inv['sing'] ** 2 / lambda2)))
+        noise_norm = np.zeros(inv['eigen_leads']['nrow'])
+        nrm2, = linalg.get_blas_funcs(('nrm2',), (noise_norm,))
+        if inv['eigen_leads_weighted']:
+            for k in range(inv['eigen_leads']['nrow']):
+                one = inv['eigen_leads']['data'][k, :] * noise_weight
+                noise_norm[k] = nrm2(one)
+        else:
+            for k in range(inv['eigen_leads']['nrow']):
+                one = (sqrt(inv['source_cov']['data'][k]) *
+                       inv['eigen_leads']['data'][k, :] * noise_weight)
+                noise_norm[k] = nrm2(one)
+
+        #
+        #   Compute the final result
+        #
+        if inv['source_ori'] == FIFF.FIFFV_MNE_FREE_ORI:
+            #
+            #   The three-component case is a little bit more involved
+            #   The variances at three consequtive entries must be squared and
+            #   added together
+            #
+            #   Even in this case return only one noise-normalization factor
+            #   per source location
+            #
+            noise_norm = combine_xyz(noise_norm[:, None]).ravel()
+
+        inv['noisenorm'] = 1.0 / np.abs(noise_norm)
+        logger.info('[done]')
+    else:
+        inv['noisenorm'] = []
+
+    return InverseOperator(inv)
+
+
+ at verbose
+def _assemble_kernel(inv, label, method, pick_ori, verbose=None):
+    #
+    #   Simple matrix multiplication followed by combination of the
+    #   current components
+    #
+    #   This does all the data transformations to compute the weights for the
+    #   eigenleads
+    #
+    eigen_leads = inv['eigen_leads']['data']
+    source_cov = inv['source_cov']['data'][:, None]
+    if method != "MNE":
+        noise_norm = inv['noisenorm'][:, None]
+
+    src = inv['src']
+    vertno = _get_vertno(src)
+
+    if label is not None:
+        vertno, src_sel = label_src_vertno_sel(label, inv['src'])
+
+        if method != "MNE":
+            noise_norm = noise_norm[src_sel]
+
+        if inv['source_ori'] == FIFF.FIFFV_MNE_FREE_ORI:
+            src_sel = 3 * src_sel
+            src_sel = np.c_[src_sel, src_sel + 1, src_sel + 2]
+            src_sel = src_sel.ravel()
+
+        eigen_leads = eigen_leads[src_sel]
+        source_cov = source_cov[src_sel]
+
+    if pick_ori == "normal":
+        if not inv['source_ori'] == FIFF.FIFFV_MNE_FREE_ORI:
+            raise ValueError('Picking normal orientation can only be done '
+                             'with a free orientation inverse operator.')
+
+        is_loose = 0 < inv['orient_prior']['data'][0] < 1
+        if not is_loose:
+            raise ValueError('Picking normal orientation can only be done '
+                             'when working with loose orientations.')
+
+        # keep only the normal components
+        eigen_leads = eigen_leads[2::3]
+        source_cov = source_cov[2::3]
+
+    trans = inv['reginv'][:, None] * reduce(np.dot,
+                                            [inv['eigen_fields']['data'],
+                                             inv['whitener'],
+                                             inv['proj']])
+    #
+    #   Transformation into current distributions by weighting the eigenleads
+    #   with the weights computed above
+    #
+    if inv['eigen_leads_weighted']:
+        #
+        #     R^0.5 has been already factored in
+        #
+        logger.info('(eigenleads already weighted)...')
+        K = np.dot(eigen_leads, trans)
+    else:
+        #
+        #     R^0.5 has to be factored in
+        #
+        logger.info('(eigenleads need to be weighted)...')
+        K = np.sqrt(source_cov) * np.dot(eigen_leads, trans)
+
+    if method == "MNE":
+        noise_norm = None
+
+    return K, noise_norm, vertno
+
+
+def _check_method(method):
+    if method not in ["MNE", "dSPM", "sLORETA"]:
+        raise ValueError('method parameter should be "MNE" or "dSPM" '
+                         'or "sLORETA".')
+    return method
+
+
+def _check_ori(pick_ori):
+    if pick_ori is not None and pick_ori != 'normal':
+        raise RuntimeError('pick_ori must be None or "normal", not %s'
+                           % pick_ori)
+    return pick_ori
+
+
+def _check_reference(inst):
+    """Aux funcion"""
+    if "eeg" in inst and not _has_eeg_average_ref_proj(inst.info['projs']):
+        raise ValueError('EEG average reference is mandatory for inverse '
+                         'modeling.')
+    if inst.info['custom_ref_applied']:
+        raise ValueError('Custom EEG reference is not allowed for inverse '
+                         'modeling.')
+
+
+def _subject_from_inverse(inverse_operator):
+    """Get subject id from inverse operator"""
+    return inverse_operator['src'][0].get('subject_his_id', None)
+
+
+ at verbose
+def apply_inverse(evoked, inverse_operator, lambda2=1. / 9.,
+                  method="dSPM", pick_ori=None,
+                  prepared=False, label=None, verbose=None):
+    """Apply inverse operator to evoked data
+
+    Parameters
+    ----------
+    evoked : Evoked object
+        Evoked data.
+    inverse_operator: instance of InverseOperator
+        Inverse operator returned from `mne.read_inverse_operator`,
+        `prepare_inverse_operator` or `make_inverse_operator`.
+    lambda2 : float
+        The regularization parameter.
+    method : "MNE" | "dSPM" | "sLORETA"
+        Use mininum norm, dSPM or sLORETA.
+    pick_ori : None | "normal"
+        If "normal", rather than pooling the orientations by taking the norm,
+        only the radial component is kept. This is only implemented
+        when working with loose orientations.
+    prepared : bool
+        If True, do not call `prepare_inverse_operator`.
+    label : Label | None
+        Restricts the source estimates to a given label. If None,
+        source estimates will be computed for the entire source space.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    stc : SourceEstimate | VolSourceEstimate
+        The source estimates
+
+    See Also
+    --------
+    apply_inverse_raw : Apply inverse operator to raw object
+    apply_inverse_epochs : Apply inverse operator to epochs object
+    """
+    _check_reference(evoked)
+    method = _check_method(method)
+    pick_ori = _check_ori(pick_ori)
+    #
+    #   Set up the inverse according to the parameters
+    #
+    nave = evoked.nave
+
+    _check_ch_names(inverse_operator, evoked.info)
+
+    if not prepared:
+        inv = prepare_inverse_operator(inverse_operator, nave, lambda2, method)
+    else:
+        inv = inverse_operator
+    #
+    #   Pick the correct channels from the data
+    #
+    sel = _pick_channels_inverse_operator(evoked.ch_names, inv)
+    logger.info('Picked %d channels from the data' % len(sel))
+    logger.info('Computing inverse...')
+    K, noise_norm, vertno = _assemble_kernel(inv, label, method, pick_ori)
+    sol = np.dot(K, evoked.data[sel])  # apply imaging kernel
+
+    is_free_ori = (inverse_operator['source_ori'] ==
+                   FIFF.FIFFV_MNE_FREE_ORI and pick_ori is None)
+
+    if is_free_ori:
+        logger.info('combining the current components...')
+        sol = combine_xyz(sol)
+
+    if noise_norm is not None:
+        logger.info('(dSPM)...')
+        sol *= noise_norm
+
+    tstep = 1.0 / evoked.info['sfreq']
+    tmin = float(evoked.times[0])
+    subject = _subject_from_inverse(inverse_operator)
+
+    stc = _make_stc(sol, vertices=vertno, tmin=tmin, tstep=tstep,
+                    subject=subject)
+    logger.info('[done]')
+
+    return stc
+
+
+ at verbose
+def apply_inverse_raw(raw, inverse_operator, lambda2, method="dSPM",
+                      label=None, start=None, stop=None, nave=1,
+                      time_func=None, pick_ori=None, buffer_size=None,
+                      prepared=False, verbose=None):
+    """Apply inverse operator to Raw data
+
+    Parameters
+    ----------
+    raw : Raw object
+        Raw data.
+    inverse_operator : dict
+        Inverse operator returned from `mne.read_inverse_operator`,
+        `prepare_inverse_operator` or `make_inverse_operator`.
+    lambda2 : float
+        The regularization parameter.
+    method : "MNE" | "dSPM" | "sLORETA"
+        Use mininum norm, dSPM or sLORETA.
+    label : Label | None
+        Restricts the source estimates to a given label. If None,
+        source estimates will be computed for the entire source space.
+    start : int
+        Index of first time sample (index not time is seconds).
+    stop : int
+        Index of first time sample not to include (index not time is seconds).
+    nave : int
+        Number of averages used to regularize the solution.
+        Set to 1 on raw data.
+    time_func : callable
+        Linear function applied to sensor space time series.
+    pick_ori : None | "normal"
+        If "normal", rather than pooling the orientations by taking the norm,
+        only the radial component is kept. This is only implemented
+        when working with loose orientations.
+    buffer_size : int (or None)
+        If not None, the computation of the inverse and the combination of the
+        current components is performed in segments of length buffer_size
+        samples. While slightly slower, this is useful for long datasets as it
+        reduces the memory requirements by approx. a factor of 3 (assuming
+        buffer_size << data length).
+        Note that this setting has no effect for fixed-orientation inverse
+        operators.
+    prepared : bool
+        If True, do not call `prepare_inverse_operator`.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    stc : SourceEstimate | VolSourceEstimate
+        The source estimates.
+
+    See Also
+    --------
+    apply_inverse_epochs : Apply inverse operator to epochs object
+    apply_inverse : Apply inverse operator to evoked object
+    """
+    _check_reference(raw)
+    method = _check_method(method)
+    pick_ori = _check_ori(pick_ori)
+
+    _check_ch_names(inverse_operator, raw.info)
+
+    #
+    #   Set up the inverse according to the parameters
+    #
+    if not prepared:
+        inv = prepare_inverse_operator(inverse_operator, nave, lambda2, method)
+    else:
+        inv = inverse_operator
+    #
+    #   Pick the correct channels from the data
+    #
+    sel = _pick_channels_inverse_operator(raw.ch_names, inv)
+    logger.info('Picked %d channels from the data' % len(sel))
+    logger.info('Computing inverse...')
+
+    data, times = raw[sel, start:stop]
+
+    if time_func is not None:
+        data = time_func(data)
+
+    K, noise_norm, vertno = _assemble_kernel(inv, label, method, pick_ori)
+
+    is_free_ori = (inverse_operator['source_ori'] ==
+                   FIFF.FIFFV_MNE_FREE_ORI and pick_ori is None)
+
+    if buffer_size is not None and is_free_ori:
+        # Process the data in segments to conserve memory
+        n_seg = int(np.ceil(data.shape[1] / float(buffer_size)))
+        logger.info('computing inverse and combining the current '
+                    'components (using %d segments)...' % (n_seg))
+
+        # Allocate space for inverse solution
+        n_times = data.shape[1]
+        sol = np.empty((K.shape[0] // 3, n_times),
+                       dtype=(K[0, 0] * data[0, 0]).dtype)
+
+        for pos in range(0, n_times, buffer_size):
+            sol[:, pos:pos + buffer_size] = \
+                combine_xyz(np.dot(K, data[:, pos:pos + buffer_size]))
+
+            logger.info('segment %d / %d done..'
+                        % (pos / buffer_size + 1, n_seg))
+    else:
+        sol = np.dot(K, data)
+        if is_free_ori:
+            logger.info('combining the current components...')
+            sol = combine_xyz(sol)
+
+    if noise_norm is not None:
+        sol *= noise_norm
+
+    tmin = float(times[0])
+    tstep = 1.0 / raw.info['sfreq']
+    subject = _subject_from_inverse(inverse_operator)
+    stc = _make_stc(sol, vertices=vertno, tmin=tmin, tstep=tstep,
+                    subject=subject)
+    logger.info('[done]')
+
+    return stc
+
+
+def _apply_inverse_epochs_gen(epochs, inverse_operator, lambda2, method='dSPM',
+                              label=None, nave=1, pick_ori=None,
+                              prepared=False, verbose=None):
+    """ see apply_inverse_epochs """
+    method = _check_method(method)
+    pick_ori = _check_ori(pick_ori)
+
+    _check_ch_names(inverse_operator, epochs.info)
+
+    #
+    #   Set up the inverse according to the parameters
+    #
+    if not prepared:
+        inv = prepare_inverse_operator(inverse_operator, nave, lambda2, method)
+    else:
+        inv = inverse_operator
+    #
+    #   Pick the correct channels from the data
+    #
+    sel = _pick_channels_inverse_operator(epochs.ch_names, inv)
+    logger.info('Picked %d channels from the data' % len(sel))
+    logger.info('Computing inverse...')
+    K, noise_norm, vertno = _assemble_kernel(inv, label, method, pick_ori)
+
+    tstep = 1.0 / epochs.info['sfreq']
+    tmin = epochs.times[0]
+
+    is_free_ori = (inverse_operator['source_ori'] ==
+                   FIFF.FIFFV_MNE_FREE_ORI and pick_ori is None)
+
+    if not is_free_ori and noise_norm is not None:
+        # premultiply kernel with noise normalization
+        K *= noise_norm
+
+    subject = _subject_from_inverse(inverse_operator)
+    for k, e in enumerate(epochs):
+        logger.info('Processing epoch : %d' % (k + 1))
+        if is_free_ori:
+            # Compute solution and combine current components (non-linear)
+            sol = np.dot(K, e[sel])  # apply imaging kernel
+            if is_free_ori:
+                logger.info('combining the current components...')
+                sol = combine_xyz(sol)
+
+                if noise_norm is not None:
+                    sol *= noise_norm
+        else:
+            # Linear inverse: do computation here or delayed
+            if len(sel) < K.shape[0]:
+                sol = (K, e[sel])
+            else:
+                sol = np.dot(K, e[sel])
+
+        stc = _make_stc(sol, vertices=vertno, tmin=tmin, tstep=tstep,
+                        subject=subject)
+
+        yield stc
+
+    logger.info('[done]')
+
+
+ at verbose
+def apply_inverse_epochs(epochs, inverse_operator, lambda2, method="dSPM",
+                         label=None, nave=1, pick_ori=None,
+                         return_generator=False,
+                         prepared=False, verbose=None):
+    """Apply inverse operator to Epochs
+
+    Parameters
+    ----------
+    epochs : Epochs object
+        Single trial epochs.
+    inverse_operator : dict
+        Inverse operator returned from `mne.read_inverse_operator`,
+        `prepare_inverse_operator` or `make_inverse_operator`.
+    lambda2 : float
+        The regularization parameter.
+    method : "MNE" | "dSPM" | "sLORETA"
+        Use mininum norm, dSPM or sLORETA.
+    label : Label | None
+        Restricts the source estimates to a given label. If None,
+        source estimates will be computed for the entire source space.
+    nave : int
+        Number of averages used to regularize the solution.
+        Set to 1 on single Epoch by default.
+    pick_ori : None | "normal"
+        If "normal", rather than pooling the orientations by taking the norm,
+        only the radial component is kept. This is only implemented
+        when working with loose orientations.
+    return_generator : bool
+        Return a generator object instead of a list. This allows iterating
+        over the stcs without having to keep them all in memory.
+    prepared : bool
+        If True, do not call `prepare_inverse_operator`.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    stc : list of SourceEstimate or VolSourceEstimate
+        The source estimates for all epochs.
+
+    See Also
+    --------
+    apply_inverse_raw : Apply inverse operator to raw object
+    apply_inverse : Apply inverse operator to evoked object
+    """
+    _check_reference(epochs)
+    stcs = _apply_inverse_epochs_gen(epochs, inverse_operator, lambda2,
+                                     method=method, label=label, nave=nave,
+                                     pick_ori=pick_ori, verbose=verbose,
+                                     prepared=prepared)
+
+    if not return_generator:
+        # return a list
+        stcs = [stc for stc in stcs]
+
+    return stcs
+
+
+'''
+def _xyz2lf(Lf_xyz, normals):
+    """Reorient leadfield to one component matching the normal to the cortex
+
+    This program takes a leadfield matix computed for dipole components
+    pointing in the x, y, and z directions, and outputs a new lead field
+    matrix for dipole components pointing in the normal direction of the
+    cortical surfaces and in the two tangential directions to the cortex
+    (that is on the tangent cortical space). These two tangential dipole
+    components are uniquely determined by the SVD (reduction of variance).
+
+    Parameters
+    ----------
+    Lf_xyz: array of shape [n_sensors, n_positions x 3]
+        Leadfield
+    normals : array of shape [n_positions, 3]
+        Normals to the cortex
+
+    Returns
+    -------
+    Lf_cortex : array of shape [n_sensors, n_positions x 3]
+        Lf_cortex is a leadfield matrix for dipoles in rotated orientations, so
+        that the first column is the gain vector for the cortical normal dipole
+        and the following two column vectors are the gain vectors for the
+        tangential orientations (tangent space of cortical surface).
+    """
+    n_sensors, n_dipoles = Lf_xyz.shape
+    n_positions = n_dipoles // 3
+    Lf_xyz = Lf_xyz.reshape(n_sensors, n_positions, 3)
+    n_sensors, n_positions, _ = Lf_xyz.shape
+    Lf_cortex = np.zeros_like(Lf_xyz)
+
+    for k in range(n_positions):
+        lf_normal = np.dot(Lf_xyz[:, k, :], normals[k])
+        lf_normal_n = lf_normal[:, None] / linalg.norm(lf_normal)
+        P = np.eye(n_sensors, n_sensors) - np.dot(lf_normal_n, lf_normal_n.T)
+        lf_p = np.dot(P, Lf_xyz[:, k, :])
+        U, s, Vh = linalg.svd(lf_p)
+        Lf_cortex[:, k, 0] = lf_normal
+        Lf_cortex[:, k, 1:] = np.c_[U[:, 0] * s[0], U[:, 1] * s[1]]
+
+    Lf_cortex = Lf_cortex.reshape(n_sensors, n_dipoles)
+    return Lf_cortex
+'''
+
+
+###############################################################################
+# Assemble the inverse operator
+
+ at verbose
+def _prepare_forward(forward, info, noise_cov, pca=False, rank=None,
+                     verbose=None):
+    """Util function to prepare forward solution for inverse solvers
+    """
+    # fwd['sol']['row_names'] may be different order from fwd['info']['chs']
+    fwd_sol_ch_names = forward['sol']['row_names']
+    ch_names = [c['ch_name'] for c in info['chs']
+                if ((c['ch_name'] not in info['bads'] and
+                     c['ch_name'] not in noise_cov['bads']) and
+                    (c['ch_name'] in fwd_sol_ch_names and
+                     c['ch_name'] in noise_cov.ch_names))]
+
+    if not len(info['bads']) == len(noise_cov['bads']) or \
+            not all(b in noise_cov['bads'] for b in info['bads']):
+        logger.info('info["bads"] and noise_cov["bads"] do not match, '
+                    'excluding bad channels from both')
+
+    n_chan = len(ch_names)
+    logger.info("Computing inverse operator with %d channels." % n_chan)
+
+    #
+    #   Handle noise cov
+    #
+    noise_cov = prepare_noise_cov(noise_cov, info, ch_names, rank)
+
+    #   Omit the zeroes due to projection
+    eig = noise_cov['eig']
+    nzero = (eig > 0)
+    n_nzero = sum(nzero)
+
+    if pca:
+        #   Rows of eigvec are the eigenvectors
+        whitener = noise_cov['eigvec'][nzero] / np.sqrt(eig[nzero])[:, None]
+        logger.info('Reducing data rank to %d' % n_nzero)
+    else:
+        whitener = np.zeros((n_chan, n_chan), dtype=np.float)
+        whitener[nzero, nzero] = 1.0 / np.sqrt(eig[nzero])
+        #   Rows of eigvec are the eigenvectors
+        whitener = np.dot(whitener, noise_cov['eigvec'])
+
+    gain = forward['sol']['data']
+
+    # This actually reorders the gain matrix to conform to the info ch order
+    fwd_idx = [fwd_sol_ch_names.index(name) for name in ch_names]
+    gain = gain[fwd_idx]
+    # Any function calling this helper will be using the returned fwd_info
+    # dict, so fwd['sol']['row_names'] becomes obsolete and is NOT re-ordered
+
+    info_idx = [info['ch_names'].index(name) for name in ch_names]
+    fwd_info = pick_info(info, info_idx)
+
+    logger.info('Total rank is %d' % n_nzero)
+
+    return fwd_info, gain, noise_cov, whitener, n_nzero
+
+
+ at verbose
+def make_inverse_operator(info, forward, noise_cov, loose=0.2, depth=0.8,
+                          fixed=False, limit_depth_chs=True, rank=None,
+                          verbose=None):
+    """Assemble inverse operator
+
+    Parameters
+    ----------
+    info : dict
+        The measurement info to specify the channels to include.
+        Bad channels in info['bads'] are not used.
+    forward : dict
+        Forward operator.
+    noise_cov : instance of Covariance
+        The noise covariance matrix.
+    loose : None | float in [0, 1]
+        Value that weights the source variances of the dipole components
+        defining the tangent space of the cortical surfaces. Requires surface-
+        based, free orientation forward solutions.
+    depth : None | float in [0, 1]
+        Depth weighting coefficients. If None, no depth weighting is performed.
+    fixed : bool
+        Use fixed source orientations normal to the cortical mantle. If True,
+        the loose parameter is ignored.
+    limit_depth_chs : bool
+        If True, use only grad channels in depth weighting (equivalent to MNE
+        C code). If grad chanels aren't present, only mag channels will be
+        used (if no mag, then eeg). If False, use all channels.
+    rank : None | int | dict
+        Specified rank of the noise covariance matrix. If None, the rank is
+        detected automatically. If int, the rank is specified for the MEG
+        channels. A dictionary with entries 'eeg' and/or 'meg' can be used
+        to specify the rank for each modality.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    inv : instance of InverseOperator
+        Inverse operator.
+
+    Notes
+    -----
+    For different sets of options (**loose**, **depth**, **fixed**) to work,
+    the forward operator must have been loaded using a certain configuration
+    (i.e., with **force_fixed** and **surf_ori** set appropriately). For
+    example, given the desired inverse type (with representative choices
+    of **loose** = 0.2 and **depth** = 0.8 shown in the table in various
+    places, as these are the defaults for those parameters):
+
+        +---------------------+-----------+-----------+-----------+-----------------+--------------+
+        | Inverse desired                             | Forward parameters allowed                 |
+        +=====================+===========+===========+===========+=================+==============+
+        |                     | **loose** | **depth** | **fixed** | **force_fixed** | **surf_ori** |
+        +---------------------+-----------+-----------+-----------+-----------------+--------------+
+        | | Loose constraint, | 0.2       | 0.8       | False     | False           | True         |
+        | | Depth weighted    |           |           |           |                 |              |
+        +---------------------+-----------+-----------+-----------+-----------------+--------------+
+        | | Loose constraint  | 0.2       | None      | False     | False           | True         |
+        +---------------------+-----------+-----------+-----------+-----------------+--------------+
+        | | Free orientation, | None      | 0.8       | False     | False           | True         |
+        | | Depth weighted    |           |           |           |                 |              |
+        +---------------------+-----------+-----------+-----------+-----------------+--------------+
+        | | Free orientation  | None      | None      | False     | False           | True | False |
+        +---------------------+-----------+-----------+-----------+-----------------+--------------+
+        | | Fixed constraint, | None      | 0.8       | True      | False           | True         |
+        | | Depth weighted    |           |           |           |                 |              |
+        +---------------------+-----------+-----------+-----------+-----------------+--------------+
+        | | Fixed constraint  | None      | None      | True      | True            | True         |
+        +---------------------+-----------+-----------+-----------+-----------------+--------------+
+
+    Also note that, if the source space (as stored in the forward solution)
+    has patch statistics computed, these are used to improve the depth
+    weighting. Thus slightly different results are to be expected with
+    and without this information.
+    """  # noqa
+    is_fixed_ori = is_fixed_orient(forward)
+
+    if fixed and loose is not None:
+        warnings.warn("When invoking make_inverse_operator with fixed=True, "
+                      "the loose parameter is ignored.")
+        loose = None
+
+    if is_fixed_ori and not fixed:
+        raise ValueError('Forward operator has fixed orientation and can only '
+                         'be used to make a fixed-orientation inverse '
+                         'operator.')
+    if fixed:
+        if depth is not None:
+            if is_fixed_ori or not forward['surf_ori']:
+                raise ValueError('For a fixed orientation inverse solution '
+                                 'with depth weighting, the forward solution '
+                                 'must be free-orientation and in surface '
+                                 'orientation')
+        elif forward['surf_ori'] is False:
+            raise ValueError('For a fixed orientation inverse solution '
+                             'without depth weighting, the forward solution '
+                             'must be in surface orientation')
+
+    # depth=None can use fixed fwd, depth=0<x<1 must use free ori
+    if depth is not None:
+        if not (0 < depth <= 1):
+            raise ValueError('depth should be a scalar between 0 and 1')
+        if is_fixed_ori:
+            raise ValueError('You need a free-orientation, surface-oriented '
+                             'forward solution to do depth weighting even '
+                             'when calculating a fixed-orientation inverse.')
+        if not forward['surf_ori']:
+            forward = convert_forward_solution(forward, surf_ori=True)
+        assert forward['surf_ori']
+    if loose is not None:
+        if not (0 <= loose <= 1):
+            raise ValueError('loose value should be smaller than 1 and bigger '
+                             'than 0, or None for not loose orientations.')
+        if loose < 1 and not forward['surf_ori']:
+            raise ValueError('Forward operator is not oriented in surface '
+                             'coordinates. A loose inverse operator requires '
+                             'a surface-based, free orientation forward '
+                             'operator.')
+
+    #
+    # 1. Read the bad channels
+    # 2. Read the necessary data from the forward solution matrix file
+    # 3. Load the projection data
+    # 4. Load the sensor noise covariance matrix and attach it to the forward
+    #
+
+    gain_info, gain, noise_cov, whitener, n_nzero = \
+        _prepare_forward(forward, info, noise_cov, rank=rank)
+    forward['info']._check_consistency()
+
+    #
+    # 5. Compose the depth-weighting matrix
+    #
+
+    if depth is not None:
+        patch_areas = forward.get('patch_areas', None)
+        depth_prior = compute_depth_prior(gain, gain_info, is_fixed_ori,
+                                          exp=depth, patch_areas=patch_areas,
+                                          limit_depth_chs=limit_depth_chs)
+    else:
+        depth_prior = np.ones(gain.shape[1], dtype=gain.dtype)
+
+    # Deal with fixed orientation forward / inverse
+    if fixed:
+        if depth is not None:
+            # Convert the depth prior into a fixed-orientation one
+            logger.info('    Picked elements from a free-orientation '
+                        'depth-weighting prior into the fixed-orientation one')
+        if not is_fixed_ori:
+            # Convert to the fixed orientation forward solution now
+            depth_prior = depth_prior[2::3]
+            forward = convert_forward_solution(
+                forward, surf_ori=forward['surf_ori'], force_fixed=True)
+            is_fixed_ori = is_fixed_orient(forward)
+            gain_info, gain, noise_cov, whitener, n_nzero = \
+                _prepare_forward(forward, info, noise_cov, verbose=False)
+
+    logger.info("Computing inverse operator with %d channels."
+                % len(gain_info['ch_names']))
+
+    #
+    # 6. Compose the source covariance matrix
+    #
+
+    logger.info('Creating the source covariance matrix')
+    source_cov = depth_prior.copy()
+    depth_prior = dict(data=depth_prior, kind=FIFF.FIFFV_MNE_DEPTH_PRIOR_COV,
+                       bads=[], diag=True, names=[], eig=None,
+                       eigvec=None, dim=depth_prior.size, nfree=1,
+                       projs=[])
+
+    # apply loose orientations
+    if not is_fixed_ori:
+        orient_prior = compute_orient_prior(forward, loose=loose)
+        source_cov *= orient_prior
+        orient_prior = dict(data=orient_prior,
+                            kind=FIFF.FIFFV_MNE_ORIENT_PRIOR_COV,
+                            bads=[], diag=True, names=[], eig=None,
+                            eigvec=None, dim=orient_prior.size, nfree=1,
+                            projs=[])
+    else:
+        orient_prior = None
+
+    # 7. Apply fMRI weighting (not done)
+
+    #
+    # 8. Apply the linear projection to the forward solution
+    # 9. Apply whitening to the forward computation matrix
+    #
+    logger.info('Whitening the forward solution.')
+    gain = np.dot(whitener, gain)
+
+    # 10. Exclude the source space points within the labels (not done)
+
+    #
+    # 11. Do appropriate source weighting to the forward computation matrix
+    #
+
+    # Adjusting Source Covariance matrix to make trace of G*R*G' equal
+    # to number of sensors.
+    logger.info('Adjusting source covariance matrix.')
+    source_std = np.sqrt(source_cov)
+    gain *= source_std[None, :]
+    trace_GRGT = linalg.norm(gain, ord='fro') ** 2
+    scaling_source_cov = n_nzero / trace_GRGT
+    source_cov *= scaling_source_cov
+    gain *= sqrt(scaling_source_cov)
+
+    source_cov = dict(data=source_cov, dim=source_cov.size,
+                      kind=FIFF.FIFFV_MNE_SOURCE_COV, diag=True,
+                      names=[], projs=[], eig=None, eigvec=None,
+                      nfree=1, bads=[])
+
+    # now np.trace(np.dot(gain, gain.T)) == n_nzero
+    # logger.info(np.trace(np.dot(gain, gain.T)), n_nzero)
+
+    #
+    # 12. Decompose the combined matrix
+    #
+
+    logger.info('Computing SVD of whitened and weighted lead field '
+                'matrix.')
+    eigen_fields, sing, eigen_leads = linalg.svd(gain, full_matrices=False)
+    logger.info('    largest singular value = %g' % np.max(sing))
+    logger.info('    scaling factor to adjust the trace = %g' % trace_GRGT)
+
+    eigen_fields = dict(data=eigen_fields.T, col_names=gain_info['ch_names'],
+                        row_names=[], nrow=eigen_fields.shape[1],
+                        ncol=eigen_fields.shape[0])
+    eigen_leads = dict(data=eigen_leads.T, nrow=eigen_leads.shape[1],
+                       ncol=eigen_leads.shape[0], row_names=[],
+                       col_names=[])
+    nave = 1.0
+
+    # Handle methods
+    has_meg = False
+    has_eeg = False
+    ch_idx = [k for k, c in enumerate(info['chs'])
+              if c['ch_name'] in gain_info['ch_names']]
+    for idx in ch_idx:
+        ch_type = channel_type(info, idx)
+        if ch_type == 'eeg':
+            has_eeg = True
+        if (ch_type == 'mag') or (ch_type == 'grad'):
+            has_meg = True
+    if has_eeg and has_meg:
+        methods = FIFF.FIFFV_MNE_MEG_EEG
+    elif has_meg:
+        methods = FIFF.FIFFV_MNE_MEG
+    else:
+        methods = FIFF.FIFFV_MNE_EEG
+
+    # We set this for consistency with mne C code written inverses
+    if depth is None:
+        depth_prior = None
+    inv_op = dict(eigen_fields=eigen_fields, eigen_leads=eigen_leads,
+                  sing=sing, nave=nave, depth_prior=depth_prior,
+                  source_cov=source_cov, noise_cov=noise_cov,
+                  orient_prior=orient_prior, projs=deepcopy(info['projs']),
+                  eigen_leads_weighted=False, source_ori=forward['source_ori'],
+                  mri_head_t=deepcopy(forward['mri_head_t']),
+                  methods=methods, nsource=forward['nsource'],
+                  coord_frame=forward['coord_frame'],
+                  source_nn=forward['source_nn'].copy(),
+                  src=deepcopy(forward['src']), fmri_prior=None)
+    inv_info = deepcopy(forward['info'])
+    inv_info['bads'] = [bad for bad in info['bads']
+                        if bad in inv_info['ch_names']]
+    inv_info._check_consistency()
+    inv_op['units'] = 'Am'
+    inv_op['info'] = inv_info
+
+    return InverseOperator(inv_op)
+
+
+def compute_rank_inverse(inv):
+    """Compute the rank of a linear inverse operator (MNE, dSPM, etc.)
+
+    Parameters
+    ----------
+    inv : dict
+        The inverse operator.
+
+    Returns
+    -------
+    rank : int
+        The rank of the inverse operator.
+    """
+    # this code shortened from prepare_inverse_operator
+    eig = inv['noise_cov']['eig']
+    if not inv['noise_cov']['diag']:
+        rank = np.sum(eig > 0)
+    else:
+        ncomp = make_projector(inv['projs'], inv['noise_cov']['names'])[1]
+        rank = inv['noise_cov']['dim'] - ncomp
+    return rank
+
+
+# #############################################################################
+# SNR Estimation
+
+ at verbose
+def estimate_snr(evoked, inv, verbose=None):
+    """Estimate the SNR as a function of time for evoked data
+
+    Parameters
+    ----------
+    evoked : instance of Evoked
+        Evoked instance.
+    inv : instance of InverseOperator
+        The inverse operator.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    snr : ndarray, shape (n_times,)
+        The SNR estimated from the whitened data.
+    snr_est : ndarray, shape (n_times,)
+        The SNR estimated using the mismatch between the unregularized
+        solution and the regularized solution.
+
+    Notes
+    -----
+    ``snr_est`` is estimated by using different amounts of inverse
+    regularization and checking the mismatch between predicted and
+    measured whitened data.
+
+    In more detail, given our whitened inverse obtained from SVD:
+
+    .. math::
+
+        \\tilde{M} = R^\\frac{1}{2}V\\Gamma U^T
+
+    The values in the diagonal matrix :math:`\\Gamma` are expressed in terms
+    of the chosen regularization :math:`\\lambda\\approx\\frac{1}{\\rm{SNR}^2}`
+    and singular values :math:`\\lambda_k` as:
+
+    .. math::
+
+        \\gamma_k = \\frac{1}{\\lambda_k}\\frac{\\lambda_k^2}{\\lambda_k^2 + \\lambda^2}
+
+    We also know that our predicted data is given by:
+
+    .. math::
+
+        \\hat{x}(t) = G\\hat{j}(t)=C^\\frac{1}{2}U\\Pi w(t)
+
+    And thus our predicted whitened data is just:
+
+    .. math::
+
+        \\hat{w}(t) = U\\Pi w(t)
+
+    Where :math:`\\Pi` is diagonal with entries entries:
+
+    .. math::
+
+        \\lambda_k\\gamma_k = \\frac{\\lambda_k^2}{\\lambda_k^2 + \\lambda^2}
+
+    If we use no regularization, note that :math:`\\Pi` is just the
+    identity matrix. Here we test the squared magnitude of the difference
+    between unregularized solution and regularized solutions, choosing the
+    biggest regularization that achieves a :math:`\\chi^2`-test significance
+    of 0.001.
+
+    .. versionadded:: 0.9.0
+    """  # noqa
+    from scipy.stats import chi2
+    _check_reference(evoked)
+    _check_ch_names(inv, evoked.info)
+    inv = prepare_inverse_operator(inv, evoked.nave, 1. / 9., 'MNE')
+    sel = _pick_channels_inverse_operator(evoked.ch_names, inv)
+    logger.info('Picked %d channels from the data' % len(sel))
+    data_white = np.dot(inv['whitener'], np.dot(inv['proj'], evoked.data[sel]))
+    data_white_ef = np.dot(inv['eigen_fields']['data'], data_white)
+    n_ch, n_times = data_white.shape
+
+    # Adapted from mne_analyze/regularization.c, compute_regularization
+    n_zero = (inv['noise_cov']['eig'] <= 0).sum()
+    logger.info('Effective nchan = %d - %d = %d'
+                % (n_ch, n_zero, n_ch - n_zero))
+    signal = np.sum(data_white ** 2, axis=0)  # sum of squares across channels
+    noise = n_ch - n_zero
+    snr = signal / noise
+
+    # Adapted from noise_regularization
+    lambda2_est = np.empty(n_times)
+    lambda2_est.fill(10.)
+    remaining = np.ones(n_times, bool)
+
+    # deal with low SNRs
+    bad = (snr <= 1)
+    lambda2_est[bad] = 100.
+    remaining[bad] = False
+
+    # parameters
+    lambda_mult = 0.9
+    sing2 = (inv['sing'] * inv['sing'])[:, np.newaxis]
+    val = chi2.isf(1e-3, n_ch - 1)
+    for n_iter in range(1000):
+        # get_mne_weights (ew=error_weights)
+        # (split newaxis creation here for old numpy)
+        f = sing2 / (sing2 + lambda2_est[np.newaxis][:, remaining])
+        f[inv['sing'] == 0] = 0
+        ew = data_white_ef[:, remaining] * (1.0 - f)
+        # check condition
+        err = np.sum(ew * ew, axis=0)
+        remaining[np.where(remaining)[0][err < val]] = False
+        if not remaining.any():
+            break
+        lambda2_est[remaining] *= lambda_mult
+    else:
+        warnings.warn('SNR estimation did not converge')
+    snr_est = 1.0 / np.sqrt(lambda2_est)
+    snr = np.sqrt(snr)
+    return snr, snr_est
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/minimum_norm/psf_ctf.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/minimum_norm/psf_ctf.py
new file mode 100644
index 0000000..7081159
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/minimum_norm/psf_ctf.py
@@ -0,0 +1,436 @@
+# Authors: Olaf Hauk <olaf.hauk at mrc-cbu.cam.ac.uk>
+#          Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#
+# License: BSD (3-clause)
+
+from copy import deepcopy
+
+import numpy as np
+from scipy import linalg
+
+from ..io.pick import pick_channels
+from ..utils import logger, verbose
+from ..forward import convert_forward_solution
+from ..evoked import EvokedArray
+from ..source_estimate import SourceEstimate
+from .inverse import _subject_from_inverse
+from . import apply_inverse
+
+
+def _prepare_info(inverse_operator):
+    """Helper to get a usable dict"""
+    # in order to convert sub-leadfield matrix to evoked data type (pretending
+    # it's an epoch, see in loop below), uses 'info' from inverse solution
+    # because this has all the correct projector information
+    info = deepcopy(inverse_operator['info'])
+    info['sfreq'] = 1000.  # necessary
+    info['projs'] = inverse_operator['projs']
+    return info
+
+
+def _pick_leadfield(leadfield, forward, ch_names):
+    """Helper to pick out correct lead field components"""
+    # NB must pick from fwd['sol']['row_names'], not ['info']['ch_names'],
+    # because ['sol']['data'] may be ordered differently from functional data
+    picks_fwd = pick_channels(forward['sol']['row_names'], ch_names)
+    return leadfield[picks_fwd]
+
+
+ at verbose
+def point_spread_function(inverse_operator, forward, labels, method='dSPM',
+                          lambda2=1 / 9., pick_ori=None, mode='mean',
+                          n_svd_comp=1, verbose=None):
+    """Compute point-spread functions (PSFs) for linear estimators
+
+    Compute point-spread functions (PSF) in labels for a combination of inverse
+    operator and forward solution. PSFs are computed for test sources that are
+    perpendicular to cortical surface.
+
+    Parameters
+    ----------
+    inverse_operator : instance of InverseOperator
+        Inverse operator.
+    forward : dict
+        Forward solution. Note: (Bad) channels not included in forward
+        solution will not be used in PSF computation.
+    labels : list of Label
+        Labels for which PSFs shall be computed.
+    method : 'MNE' | 'dSPM' | 'sLORETA'
+        Inverse method for which PSFs shall be computed (for apply_inverse).
+    lambda2 : float
+        The regularization parameter (for apply_inverse).
+    pick_ori : None | "normal"
+        If "normal", rather than pooling the orientations by taking the norm,
+        only the radial component is kept. This is only implemented
+        when working with loose orientations (for apply_inverse).
+    mode : 'mean' | 'sum' | 'svd' |
+        PSFs can be computed for different summary measures with labels:
+        'sum' or 'mean': sum or means of sub-leadfields for labels
+        This corresponds to situations where labels can be assumed to be
+        homogeneously activated.
+        'svd': SVD components of sub-leadfields for labels
+        This is better suited for situations where activation patterns are
+        assumed to be more variable.
+        "sub-leadfields" are the parts of the forward solutions that belong to
+        vertices within invidual labels.
+    n_svd_comp : integer
+        Number of SVD components for which PSFs will be computed and output
+        (irrelevant for 'sum' and 'mean'). Explained variances within
+        sub-leadfields are shown in screen output.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    stc_psf : SourceEstimate
+        The PSFs for the specified labels
+        If mode='svd': n_svd_comp components per label are created
+        (i.e. n_svd_comp successive time points in mne_analyze)
+        The last sample is the summed PSF across all labels
+        Scaling of PSFs is arbitrary, and may differ greatly among methods
+        (especially for MNE compared to noise-normalized estimates).
+    evoked_fwd : Evoked
+        Forward solutions corresponding to PSFs in stc_psf
+        If mode='svd': n_svd_comp components per label are created
+        (i.e. n_svd_comp successive time points in mne_analyze)
+        The last sample is the summed forward solution across all labels
+        (sum is taken across summary measures).
+    """
+    mode = mode.lower()
+    if mode not in ['mean', 'sum', 'svd']:
+        raise ValueError("mode must be 'svd', 'mean' or 'sum'. Got %s."
+                         % mode)
+
+    logger.info("About to process %d labels" % len(labels))
+
+    forward = convert_forward_solution(forward, force_fixed=False,
+                                       surf_ori=True)
+    info = _prepare_info(inverse_operator)
+    leadfield = _pick_leadfield(forward['sol']['data'][:, 2::3], forward,
+                                info['ch_names'])
+
+    # will contain means of subleadfields for all labels
+    label_psf_summary = []
+    # if mode='svd', this will collect all SVD singular values for labels
+    label_singvals = []
+
+    # loop over labels
+    for ll in labels:
+        logger.info(ll)
+        if ll.hemi == 'rh':
+            # for RH labels, add number of LH vertices
+            offset = forward['src'][0]['vertno'].shape[0]
+            # remember whether we are in the LH or RH
+            this_hemi = 1
+        elif ll.hemi == 'lh':
+            offset = 0
+            this_hemi = 0
+
+        # get vertices on cortical surface inside label
+        idx = np.intersect1d(ll.vertices, forward['src'][this_hemi]['vertno'])
+
+        # get vertices in source space inside label
+        fwd_idx = np.searchsorted(forward['src'][this_hemi]['vertno'], idx)
+
+        # get sub-leadfield matrix for label vertices
+        sub_leadfield = leadfield[:, fwd_idx + offset]
+
+        # compute summary data for labels
+        if mode == 'sum':  # sum across forward solutions in label
+            logger.info("Computing sums within labels")
+            this_label_psf_summary = sub_leadfield.sum(axis=1)[np.newaxis, :]
+        elif mode == 'mean':
+            logger.info("Computing means within labels")
+            this_label_psf_summary = sub_leadfield.mean(axis=1)[np.newaxis, :]
+        elif mode == 'svd':  # takes svd of forward solutions in label
+            logger.info("Computing SVD within labels, using %d component(s)"
+                        % n_svd_comp)
+
+            # compute SVD of sub-leadfield
+            u_svd, s_svd, _ = linalg.svd(sub_leadfield,
+                                         full_matrices=False,
+                                         compute_uv=True)
+
+            # keep singular values (might be useful to some people)
+            label_singvals.append(s_svd)
+
+            # get first n_svd_comp components, weighted with their
+            # corresponding singular values
+            logger.info("First 5 singular values: %s" % s_svd[0:5])
+            logger.info("(This tells you something about variability of "
+                        "forward solutions in sub-leadfield for label)")
+            # explained variance by chosen components within sub-leadfield
+            my_comps = s_svd[:n_svd_comp]
+            comp_var = (100. * np.sum(my_comps * my_comps) /
+                        np.sum(s_svd * s_svd))
+            logger.info("Your %d component(s) explain(s) %.1f%% "
+                        "variance in label." % (n_svd_comp, comp_var))
+            this_label_psf_summary = (u_svd[:, :n_svd_comp] *
+                                      s_svd[:n_svd_comp][np.newaxis, :])
+            # transpose required for conversion to "evoked"
+            this_label_psf_summary = this_label_psf_summary.T
+
+        # initialise or append to existing collection
+        label_psf_summary.append(this_label_psf_summary)
+
+    label_psf_summary = np.concatenate(label_psf_summary, axis=0)
+    # compute sum across forward solutions for labels, append to end
+    label_psf_summary = np.r_[label_psf_summary,
+                              label_psf_summary.sum(axis=0)[np.newaxis, :]].T
+
+    # convert sub-leadfield matrix to evoked data type (a bit of a hack)
+    evoked_fwd = EvokedArray(label_psf_summary, info=info, tmin=0.)
+
+    # compute PSFs by applying inverse operator to sub-leadfields
+    logger.info("About to apply inverse operator for method='%s' and "
+                "lambda2=%s" % (method, lambda2))
+
+    stc_psf = apply_inverse(evoked_fwd, inverse_operator, lambda2,
+                            method=method, pick_ori=pick_ori)
+
+    return stc_psf, evoked_fwd
+
+
+def _get_matrix_from_inverse_operator(inverse_operator, forward, labels=None,
+                                      method='dSPM', lambda2=1. / 9.,
+                                      mode='mean', n_svd_comp=1):
+    """Get inverse matrix from an inverse operator
+
+    Currently works only for fixed/loose orientation constraints
+    For loose orientation constraint, the CTFs are computed for the radial
+    component (pick_ori='normal').
+
+    Parameters
+    ----------
+    inverse_operator : instance of InverseOperator
+        The inverse operator.
+    forward : dict
+        The forward operator.
+    method : 'MNE' | 'dSPM' | 'sLORETA'
+        Inverse methods (for apply_inverse).
+    labels : list of Label | None
+        Labels for which CTFs shall be computed. If None, inverse matrix for
+        all vertices will be returned.
+    lambda2 : float
+        The regularization parameter (for apply_inverse).
+    pick_ori : None | "normal"
+        pick_ori : None | "normal"
+        If "normal", rather than pooling the orientations by taking the norm,
+        only the radial component is kept. This is only implemented
+        when working with loose orientations (for apply_inverse).
+        Determines whether whole inverse matrix G will have one or three rows
+        per vertex. This will also affect summary measures for labels.
+    mode : 'mean' | 'sum' | 'svd'
+        CTFs can be computed for different summary measures with labels:
+        'sum' or 'mean': sum or means of sub-inverse for labels
+        This corresponds to situations where labels can be assumed to be
+        homogeneously activated.
+        'svd': SVD components of sub-inverse for labels
+        This is better suited for situations where activation patterns are
+        assumed to be more variable.
+        "sub-inverse" is the part of the inverse matrix that belongs to
+        vertices within invidual labels.
+    n_svd_comp : int
+        Number of SVD components for which CTFs will be computed and output
+        (irrelevant for 'sum' and 'mean'). Explained variances within
+        sub-inverses are shown in screen output.
+
+    Returns
+    -------
+    invmat : ndarray
+        Inverse matrix associated with inverse operator and specified
+        parameters.
+    label_singvals : list of ndarray
+        Singular values of svd for sub-inverses.
+        Provides information about how well labels are represented by chosen
+        components. Explained variances within sub-inverses are shown in
+        screen output.
+    """
+    mode = mode.lower()
+
+    if not forward['surf_ori']:
+        raise RuntimeError('Forward has to be surface oriented and '
+                           'force_fixed=True.')
+    if not (forward['source_ori'] == 1):
+        raise RuntimeError('Forward has to be surface oriented and '
+                           'force_fixed=True.')
+
+    if labels:
+        logger.info("About to process %d labels" % len(labels))
+    else:
+        logger.info("Computing whole inverse operator.")
+
+    info = _prepare_info(inverse_operator)
+
+    # create identity matrix as input for inverse operator
+    id_mat = np.eye(len(info['ch_names']))
+
+    # convert identity matrix to evoked data type (pretending it's an epoch)
+    ev_id = EvokedArray(id_mat, info=info, tmin=0.)
+
+    snr = 3.0
+    lambda2 = 1.0 / snr ** 2
+
+    # apply inverse operator to identity matrix in order to get inverse matrix
+    # free orientation constraint not possible because apply_inverse would
+    # combined components
+    invmat_mat_op = apply_inverse(ev_id, inverse_operator, lambda2=lambda2,
+                                  method=method, pick_ori='normal')
+
+    logger.info("Dimension of inverse matrix: %s" % str(invmat_mat_op.shape))
+
+    # turn source estimate into numpty array
+    invmat_mat = invmat_mat_op.data
+    invmat_summary = []
+    # if mode='svd', label_singvals will collect all SVD singular values for
+    # labels
+    label_singvals = []
+
+    if labels:
+        for ll in labels:
+            if ll.hemi == 'rh':
+                # for RH labels, add number of LH vertices
+                offset = forward['src'][0]['vertno'].shape[0]
+                # remember whether we are in the LH or RH
+                this_hemi = 1
+            elif ll.hemi == 'lh':
+                offset = 0
+                this_hemi = 0
+            else:
+                raise RuntimeError("Cannot determine hemisphere of label.")
+
+            # get vertices on cortical surface inside label
+            idx = np.intersect1d(ll.vertices,
+                                 forward['src'][this_hemi]['vertno'])
+
+            # get vertices in source space inside label
+            fwd_idx = np.searchsorted(forward['src'][this_hemi]['vertno'], idx)
+
+            # get sub-inverse for label vertices, one row per vertex
+            invmat_lbl = invmat_mat[fwd_idx + offset, :]
+
+            # compute summary data for labels
+            if mode == 'sum':  # takes sum across estimators in label
+                logger.info("Computing sums within labels")
+                this_invmat_summary = invmat_lbl.sum(axis=0)
+                this_invmat_summary = np.vstack(this_invmat_summary).T
+            elif mode == 'mean':
+                logger.info("Computing means within labels")
+                this_invmat_summary = invmat_lbl.mean(axis=0)
+                this_invmat_summary = np.vstack(this_invmat_summary).T
+            elif mode == 'svd':  # takes svd of sub-inverse in label
+                logger.info("Computing SVD within labels, using %d "
+                            "component(s)" % n_svd_comp)
+
+                # compute SVD of sub-inverse
+                u_svd, s_svd, _ = linalg.svd(invmat_lbl.T,
+                                             full_matrices=False,
+                                             compute_uv=True)
+
+                # keep singular values (might be useful to some people)
+                label_singvals.append(s_svd)
+
+                # get first n_svd_comp components, weighted with their
+                # corresponding singular values
+                logger.info("First 5 singular values: %s" % s_svd[:5])
+                logger.info("(This tells you something about variability of "
+                            "estimators in sub-inverse for label)")
+                # explained variance by chosen components within sub-inverse
+                my_comps = s_svd[:n_svd_comp]
+                comp_var = ((100 * np.sum(my_comps * my_comps)) /
+                            np.sum(s_svd * s_svd))
+                logger.info("Your %d component(s) explain(s) %.1f%% "
+                            "variance in label." % (n_svd_comp, comp_var))
+                this_invmat_summary = (u_svd[:, :n_svd_comp].T *
+                                       s_svd[:n_svd_comp][:, np.newaxis])
+
+            invmat_summary.append(this_invmat_summary)
+
+        invmat = np.concatenate(invmat_summary, axis=0)
+    else:   # no labels provided: return whole matrix
+        invmat = invmat_mat
+
+    return invmat, label_singvals
+
+
+ at verbose
+def cross_talk_function(inverse_operator, forward, labels,
+                        method='dSPM', lambda2=1 / 9., signed=False,
+                        mode='mean', n_svd_comp=1, verbose=None):
+    """Compute cross-talk functions (CTFs) for linear estimators
+
+    Compute cross-talk functions (CTF) in labels for a combination of inverse
+    operator and forward solution. CTFs are computed for test sources that are
+    perpendicular to cortical surface.
+
+    Parameters
+    ----------
+    inverse_operator : instance of InverseOperator
+        Inverse operator.
+    forward : dict
+        Forward solution. Note: (Bad) channels not included in forward
+        solution will not be used in CTF computation.
+    labels : list of Label
+        Labels for which CTFs shall be computed.
+    method : 'MNE' | 'dSPM' | 'sLORETA'
+        Inverse method for which CTFs shall be computed.
+    lambda2 : float
+        The regularization parameter.
+    signed : bool
+        If True, CTFs will be written as signed source estimates. If False,
+        absolute (unsigned) values will be written
+    mode : 'mean' | 'sum' | 'svd'
+        CTFs can be computed for different summary measures with labels:
+        'sum' or 'mean': sum or means of sub-inverses for labels
+        This corresponds to situations where labels can be assumed to be
+        homogeneously activated.
+        'svd': SVD components of sub-inverses for labels
+        This is better suited for situations where activation patterns are
+        assumed to be more variable. "sub-inverse" is the part of the inverse
+        matrix that belongs to vertices within invidual labels.
+    n_svd_comp : int
+        Number of SVD components for which CTFs will be computed and output
+        (irrelevant for 'sum' and 'mean'). Explained variances within
+        sub-inverses are shown in screen output.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    stc_ctf : SourceEstimate
+        The CTFs for the specified labels.
+        If mode='svd': n_svd_comp components per label are created
+        (i.e. n_svd_comp successive time points in mne_analyze)
+        The last sample is the summed CTF across all labels.
+    """
+    forward = convert_forward_solution(forward, force_fixed=True,
+                                       surf_ori=True)
+
+    # get the inverse matrix corresponding to inverse operator
+    out = _get_matrix_from_inverse_operator(inverse_operator, forward,
+                                            labels=labels, method=method,
+                                            lambda2=lambda2, mode=mode,
+                                            n_svd_comp=n_svd_comp)
+    invmat, label_singvals = out
+
+    # get the leadfield matrix from forward solution
+    leadfield = _pick_leadfield(forward['sol']['data'], forward,
+                                inverse_operator['info']['ch_names'])
+
+    # compute cross-talk functions (CTFs)
+    ctfs = np.dot(invmat, leadfield)
+
+    # compute sum across forward solutions for labels, append to end
+    ctfs = np.vstack((ctfs, ctfs.sum(axis=0)))
+
+    # if unsigned output requested, take absolute values
+    if not signed:
+        ctfs = np.abs(ctfs, out=ctfs)
+
+    # create source estimate object
+    vertno = [ss['vertno'] for ss in inverse_operator['src']]
+    stc_ctf = SourceEstimate(ctfs.T, vertno, tmin=0., tstep=1.)
+
+    stc_ctf.subject = _subject_from_inverse(inverse_operator)
+
+    return stc_ctf
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/minimum_norm/tests/__init__.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/minimum_norm/tests/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/minimum_norm/tests/test_inverse.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/minimum_norm/tests/test_inverse.py
new file mode 100644
index 0000000..22747ce
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/minimum_norm/tests/test_inverse.py
@@ -0,0 +1,621 @@
+from __future__ import print_function
+import os.path as op
+import numpy as np
+from numpy.testing import (assert_array_almost_equal, assert_equal,
+                           assert_allclose, assert_array_equal)
+from scipy import sparse
+from nose.tools import assert_true, assert_raises
+import copy
+import warnings
+
+from mne.datasets import testing
+from mne.label import read_label, label_sign_flip
+from mne.event import read_events
+from mne.epochs import Epochs
+from mne.source_estimate import read_source_estimate, VolSourceEstimate
+from mne import (read_cov, read_forward_solution, read_evokeds, pick_types,
+                 pick_types_forward, make_forward_solution,
+                 convert_forward_solution, Covariance)
+from mne.io import Raw
+from mne.minimum_norm.inverse import (apply_inverse, read_inverse_operator,
+                                      apply_inverse_raw, apply_inverse_epochs,
+                                      make_inverse_operator,
+                                      write_inverse_operator,
+                                      compute_rank_inverse,
+                                      prepare_inverse_operator)
+from mne.utils import _TempDir, run_tests_if_main, slow_test
+from mne.externals import six
+
+test_path = testing.data_path(download=False)
+s_path = op.join(test_path, 'MEG', 'sample')
+fname_fwd = op.join(s_path, 'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif')
+# Four inverses:
+fname_full = op.join(s_path, 'sample_audvis_trunc-meg-eeg-oct-6-meg-inv.fif')
+fname_inv = op.join(s_path, 'sample_audvis_trunc-meg-eeg-oct-4-meg-inv.fif')
+fname_inv_fixed_nodepth = op.join(s_path,
+                                  'sample_audvis_trunc-meg-eeg-oct-4-meg'
+                                  '-nodepth-fixed-inv.fif')
+fname_inv_meeg_diag = op.join(s_path,
+                              'sample_audvis_trunc-'
+                              'meg-eeg-oct-4-meg-eeg-diagnoise-inv.fif')
+
+fname_data = op.join(s_path, 'sample_audvis_trunc-ave.fif')
+fname_cov = op.join(s_path, 'sample_audvis_trunc-cov.fif')
+fname_raw = op.join(s_path, 'sample_audvis_trunc_raw.fif')
+fname_event = op.join(s_path, 'sample_audvis_trunc_raw-eve.fif')
+fname_label = op.join(s_path, 'labels', '%s.label')
+fname_vol_inv = op.join(s_path,
+                        'sample_audvis_trunc-meg-vol-7-meg-inv.fif')
+# trans and bem needed for channel reordering tests incl. forward computation
+fname_trans = op.join(s_path, 'sample_audvis_trunc-trans.fif')
+s_path_bem = op.join(test_path, 'subjects', 'sample', 'bem')
+fname_bem = op.join(s_path_bem, 'sample-320-320-320-bem-sol.fif')
+src_fname = op.join(s_path_bem, 'sample-oct-4-src.fif')
+
+snr = 3.0
+lambda2 = 1.0 / snr ** 2
+
+last_keys = [None] * 10
+
+
+def read_forward_solution_meg(*args, **kwargs):
+    fwd = read_forward_solution(*args, **kwargs)
+    fwd = pick_types_forward(fwd, meg=True, eeg=False)
+    return fwd
+
+
+def read_forward_solution_eeg(*args, **kwargs):
+    fwd = read_forward_solution(*args, **kwargs)
+    fwd = pick_types_forward(fwd, meg=False, eeg=True)
+    return fwd
+
+
+def _get_evoked():
+    evoked = read_evokeds(fname_data, condition=0, baseline=(None, 0))
+    evoked.crop(0, 0.2)
+    return evoked
+
+
+def _compare(a, b):
+    global last_keys
+    skip_types = ['whitener', 'proj', 'reginv', 'noisenorm', 'nchan',
+                  'command_line', 'working_dir', 'mri_file', 'mri_id']
+    try:
+        if isinstance(a, dict):
+            assert_true(isinstance(b, dict))
+            for k, v in six.iteritems(a):
+                if k not in b and k not in skip_types:
+                    raise ValueError('First one had one second one didn\'t:\n'
+                                     '%s not in %s' % (k, b.keys()))
+                if k not in skip_types:
+                    last_keys.pop()
+                    last_keys = [k] + last_keys
+                    _compare(v, b[k])
+            for k, v in six.iteritems(b):
+                if k not in a and k not in skip_types:
+                    raise ValueError('Second one had one first one didn\'t:\n'
+                                     '%s not in %s' % (k, a.keys()))
+        elif isinstance(a, list):
+            assert_true(len(a) == len(b))
+            for i, j in zip(a, b):
+                _compare(i, j)
+        elif isinstance(a, sparse.csr.csr_matrix):
+            assert_array_almost_equal(a.data, b.data)
+            assert_equal(a.indices, b.indices)
+            assert_equal(a.indptr, b.indptr)
+        elif isinstance(a, np.ndarray):
+            assert_array_almost_equal(a, b)
+        else:
+            assert_true(a == b)
+    except Exception as exptn:
+        print(last_keys)
+        raise exptn
+
+
+def _compare_inverses_approx(inv_1, inv_2, evoked, rtol, atol,
+                             check_depth=True):
+    # depth prior
+    if check_depth:
+        if inv_1['depth_prior'] is not None:
+            assert_array_almost_equal(inv_1['depth_prior']['data'],
+                                      inv_2['depth_prior']['data'], 5)
+        else:
+            assert_true(inv_2['depth_prior'] is None)
+    # orient prior
+    if inv_1['orient_prior'] is not None:
+        assert_array_almost_equal(inv_1['orient_prior']['data'],
+                                  inv_2['orient_prior']['data'])
+    else:
+        assert_true(inv_2['orient_prior'] is None)
+    # source cov
+    assert_array_almost_equal(inv_1['source_cov']['data'],
+                              inv_2['source_cov']['data'])
+
+    # These are not as close as we'd like XXX
+    assert_array_almost_equal(np.abs(inv_1['eigen_fields']['data']),
+                              np.abs(inv_2['eigen_fields']['data']), 0)
+    assert_array_almost_equal(np.abs(inv_1['eigen_leads']['data']),
+                              np.abs(inv_2['eigen_leads']['data']), 0)
+
+    stc_1 = apply_inverse(evoked, inv_1, lambda2, "dSPM")
+    stc_2 = apply_inverse(evoked, inv_2, lambda2, "dSPM")
+
+    assert_true(stc_1.subject == stc_2.subject)
+    assert_equal(stc_1.times, stc_2.times)
+    assert_allclose(stc_1.data, stc_2.data, rtol=rtol, atol=atol)
+    assert_true(inv_1['units'] == inv_2['units'])
+
+
+def _compare_io(inv_op, out_file_ext='.fif'):
+    tempdir = _TempDir()
+    if out_file_ext == '.fif':
+        out_file = op.join(tempdir, 'test-inv.fif')
+    elif out_file_ext == '.gz':
+        out_file = op.join(tempdir, 'test-inv.fif.gz')
+    else:
+        raise ValueError('IO test could not complete')
+    # Test io operations
+    inv_init = copy.deepcopy(inv_op)
+    write_inverse_operator(out_file, inv_op)
+    read_inv_op = read_inverse_operator(out_file)
+    _compare(inv_init, read_inv_op)
+    _compare(inv_init, inv_op)
+
+
+ at testing.requires_testing_data
+def test_warn_inverse_operator():
+    """Test MNE inverse warning without average EEG projection
+    """
+    bad_info = copy.deepcopy(_get_evoked().info)
+    bad_info['projs'] = list()
+    fwd_op = read_forward_solution(fname_fwd, surf_ori=True)
+    noise_cov = read_cov(fname_cov)
+    with warnings.catch_warnings(record=True) as w:
+        make_inverse_operator(bad_info, fwd_op, noise_cov)
+    assert_equal(len(w), 1)
+
+
+ at slow_test
+ at testing.requires_testing_data
+def test_make_inverse_operator():
+    """Test MNE inverse computation (precomputed and non-precomputed)
+    """
+    # Test old version of inverse computation starting from forward operator
+    evoked = _get_evoked()
+    noise_cov = read_cov(fname_cov)
+    inverse_operator = read_inverse_operator(fname_inv)
+    fwd_op = read_forward_solution_meg(fname_fwd, surf_ori=True)
+    my_inv_op = make_inverse_operator(evoked.info, fwd_op, noise_cov,
+                                      loose=0.2, depth=0.8,
+                                      limit_depth_chs=False)
+    _compare_io(my_inv_op)
+    assert_true(inverse_operator['units'] == 'Am')
+    _compare_inverses_approx(my_inv_op, inverse_operator, evoked, 1e-2, 1e-2,
+                             check_depth=False)
+    # Test MNE inverse computation starting from forward operator
+    my_inv_op = make_inverse_operator(evoked.info, fwd_op, noise_cov,
+                                      loose=0.2, depth=0.8)
+    _compare_io(my_inv_op)
+    _compare_inverses_approx(my_inv_op, inverse_operator, evoked, 1e-2, 1e-2)
+    assert_true('dev_head_t' in my_inv_op['info'])
+    assert_true('mri_head_t' in my_inv_op)
+
+
+ at slow_test
+ at testing.requires_testing_data
+def test_inverse_operator_channel_ordering():
+    """Test MNE inverse computation is immune to channel reorderings
+    """
+    # These are with original ordering
+    evoked = _get_evoked()
+    noise_cov = read_cov(fname_cov)
+
+    fwd_orig = make_forward_solution(evoked.info, fname_trans, src_fname,
+                                     fname_bem, eeg=True, mindist=5.0)
+    fwd_orig = convert_forward_solution(fwd_orig, surf_ori=True)
+    inv_orig = make_inverse_operator(evoked.info, fwd_orig, noise_cov,
+                                     loose=0.2, depth=0.8,
+                                     limit_depth_chs=False)
+    stc_1 = apply_inverse(evoked, inv_orig, lambda2, "dSPM")
+
+    # Assume that a raw reordering applies to both evoked and noise_cov,
+    # so we don't need to create those from scratch. Just reorder them,
+    # then try to apply the original inverse operator
+    new_order = np.arange(len(evoked.info['ch_names']))
+    randomiser = np.random.RandomState(42)
+    randomiser.shuffle(new_order)
+    evoked.data = evoked.data[new_order]
+    evoked.info['ch_names'] = [evoked.info['ch_names'][n] for n in new_order]
+    evoked.info['chs'] = [evoked.info['chs'][n] for n in new_order]
+
+    cov_ch_reorder = [c for c in evoked.info['ch_names']
+                      if (c in noise_cov.ch_names)]
+
+    new_order_cov = [noise_cov.ch_names.index(name) for name in cov_ch_reorder]
+    noise_cov['data'] = noise_cov.data[np.ix_(new_order_cov, new_order_cov)]
+    noise_cov['names'] = [noise_cov['names'][idx] for idx in new_order_cov]
+
+    fwd_reorder = make_forward_solution(evoked.info, fname_trans, src_fname,
+                                        fname_bem, eeg=True, mindist=5.0)
+    fwd_reorder = convert_forward_solution(fwd_reorder, surf_ori=True)
+    inv_reorder = make_inverse_operator(evoked.info, fwd_reorder, noise_cov,
+                                        loose=0.2, depth=0.8,
+                                        limit_depth_chs=False)
+
+    stc_2 = apply_inverse(evoked, inv_reorder, lambda2, "dSPM")
+
+    assert_equal(stc_1.subject, stc_2.subject)
+    assert_array_equal(stc_1.times, stc_2.times)
+    assert_allclose(stc_1.data, stc_2.data, rtol=1e-5, atol=1e-5)
+    assert_true(inv_orig['units'] == inv_reorder['units'])
+
+    # Reload with original ordering & apply reordered inverse
+    evoked = _get_evoked()
+    noise_cov = read_cov(fname_cov)
+
+    stc_3 = apply_inverse(evoked, inv_reorder, lambda2, "dSPM")
+    assert_allclose(stc_1.data, stc_3.data, rtol=1e-5, atol=1e-5)
+
+
+ at slow_test
+ at testing.requires_testing_data
+def test_apply_inverse_operator():
+    """Test MNE inverse application
+    """
+    inverse_operator = read_inverse_operator(fname_full)
+    evoked = _get_evoked()
+
+    # Inverse has 306 channels - 4 proj = 302
+    assert_true(compute_rank_inverse(inverse_operator) == 302)
+
+    # Inverse has 306 channels - 4 proj = 302
+    assert_true(compute_rank_inverse(inverse_operator) == 302)
+
+    stc = apply_inverse(evoked, inverse_operator, lambda2, "MNE")
+    assert_true(stc.subject == 'sample')
+    assert_true(stc.data.min() > 0)
+    assert_true(stc.data.max() < 10e-9)
+    assert_true(stc.data.mean() > 1e-11)
+
+    # test if using prepared and not prepared inverse operator give the same
+    # result
+    inv_op = prepare_inverse_operator(inverse_operator, nave=evoked.nave,
+                                      lambda2=lambda2, method="MNE")
+    stc2 = apply_inverse(evoked, inv_op, lambda2, "MNE")
+    assert_array_almost_equal(stc.data, stc2.data)
+    assert_array_almost_equal(stc.times, stc2.times)
+
+    stc = apply_inverse(evoked, inverse_operator, lambda2, "sLORETA")
+    assert_true(stc.subject == 'sample')
+    assert_true(stc.data.min() > 0)
+    assert_true(stc.data.max() < 10.0)
+    assert_true(stc.data.mean() > 0.1)
+
+    stc = apply_inverse(evoked, inverse_operator, lambda2, "dSPM")
+    assert_true(stc.subject == 'sample')
+    assert_true(stc.data.min() > 0)
+    assert_true(stc.data.max() < 35)
+    assert_true(stc.data.mean() > 0.1)
+
+    # test without using a label (so delayed computation is used)
+    label = read_label(fname_label % 'Aud-lh')
+    stc = apply_inverse(evoked, inv_op, lambda2, "MNE")
+    stc_label = apply_inverse(evoked, inv_op, lambda2, "MNE",
+                              label=label)
+    assert_equal(stc_label.subject, 'sample')
+    label_stc = stc.in_label(label)
+    assert_true(label_stc.subject == 'sample')
+    assert_array_almost_equal(stc_label.data, label_stc.data)
+
+    # Test we get errors when using custom ref or no average proj is present
+    evoked.info['custom_ref_applied'] = True
+    assert_raises(ValueError, apply_inverse, evoked, inv_op, lambda2, "MNE")
+    evoked.info['custom_ref_applied'] = False
+    evoked.info['projs'] = []  # remove EEG proj
+    assert_raises(ValueError, apply_inverse, evoked, inv_op, lambda2, "MNE")
+
+
+ at testing.requires_testing_data
+def test_make_inverse_operator_fixed():
+    """Test MNE inverse computation (fixed orientation)
+    """
+    fwd_1 = read_forward_solution_meg(fname_fwd, surf_ori=False,
+                                      force_fixed=False)
+    fwd_2 = read_forward_solution_meg(fname_fwd, surf_ori=False,
+                                      force_fixed=True)
+    evoked = _get_evoked()
+    noise_cov = read_cov(fname_cov)
+
+    # can't make depth-weighted fixed inv without surf ori fwd
+    assert_raises(ValueError, make_inverse_operator, evoked.info, fwd_1,
+                  noise_cov, depth=0.8, loose=None, fixed=True)
+    # can't make fixed inv with depth weighting without free ori fwd
+    assert_raises(ValueError, make_inverse_operator, evoked.info, fwd_2,
+                  noise_cov, depth=0.8, loose=None, fixed=True)
+
+    # now compare to C solution
+    # note that the forward solution must not be surface-oriented
+    # to get equivalency (surf_ori=True changes the normals)
+    inv_op = make_inverse_operator(evoked.info, fwd_2, noise_cov, depth=None,
+                                   loose=None, fixed=True)
+    inverse_operator_nodepth = read_inverse_operator(fname_inv_fixed_nodepth)
+    _compare_inverses_approx(inverse_operator_nodepth, inv_op, evoked, 0, 1e-2)
+    # Inverse has 306 channels - 6 proj = 302
+    assert_true(compute_rank_inverse(inverse_operator_nodepth) == 302)
+
+
+ at testing.requires_testing_data
+def test_make_inverse_operator_free():
+    """Test MNE inverse computation (free orientation)
+    """
+    fwd_op = read_forward_solution_meg(fname_fwd, surf_ori=True)
+    fwd_1 = read_forward_solution_meg(fname_fwd, surf_ori=False,
+                                      force_fixed=False)
+    fwd_2 = read_forward_solution_meg(fname_fwd, surf_ori=False,
+                                      force_fixed=True)
+    evoked = _get_evoked()
+    noise_cov = read_cov(fname_cov)
+
+    # can't make free inv with fixed fwd
+    assert_raises(ValueError, make_inverse_operator, evoked.info, fwd_2,
+                  noise_cov, depth=None)
+
+    # for free ori inv, loose=None and loose=1 should be equivalent
+    inv_1 = make_inverse_operator(evoked.info, fwd_op, noise_cov, loose=None)
+    inv_2 = make_inverse_operator(evoked.info, fwd_op, noise_cov, loose=1)
+    _compare_inverses_approx(inv_1, inv_2, evoked, 0, 1e-2)
+
+    # for depth=None, surf_ori of the fwd should not matter
+    inv_3 = make_inverse_operator(evoked.info, fwd_op, noise_cov, depth=None,
+                                  loose=None)
+    inv_4 = make_inverse_operator(evoked.info, fwd_1, noise_cov, depth=None,
+                                  loose=None)
+    _compare_inverses_approx(inv_3, inv_4, evoked, 0, 1e-2)
+
+
+ at testing.requires_testing_data
+def test_make_inverse_operator_diag():
+    """Test MNE inverse computation with diagonal noise cov
+    """
+    evoked = _get_evoked()
+    noise_cov = read_cov(fname_cov)
+    fwd_op = read_forward_solution(fname_fwd, surf_ori=True)
+    inv_op = make_inverse_operator(evoked.info, fwd_op, noise_cov.as_diag(),
+                                   loose=0.2, depth=0.8)
+    _compare_io(inv_op)
+    inverse_operator_diag = read_inverse_operator(fname_inv_meeg_diag)
+    # This one's only good to zero decimal places, roundoff error (?)
+    _compare_inverses_approx(inverse_operator_diag, inv_op, evoked, 0, 1e0)
+    # Inverse has 366 channels - 6 proj = 360
+    assert_true(compute_rank_inverse(inverse_operator_diag) == 360)
+
+
+ at testing.requires_testing_data
+def test_inverse_operator_noise_cov_rank():
+    """Test MNE inverse operator with a specified noise cov rank
+    """
+    fwd_op = read_forward_solution_meg(fname_fwd, surf_ori=True)
+    evoked = _get_evoked()
+    noise_cov = read_cov(fname_cov)
+    inv = make_inverse_operator(evoked.info, fwd_op, noise_cov, rank=64)
+    assert_true(compute_rank_inverse(inv) == 64)
+
+    fwd_op = read_forward_solution_eeg(fname_fwd, surf_ori=True)
+    inv = make_inverse_operator(evoked.info, fwd_op, noise_cov,
+                                rank=dict(eeg=20))
+    assert_true(compute_rank_inverse(inv) == 20)
+
+
+ at testing.requires_testing_data
+def test_inverse_operator_volume():
+    """Test MNE inverse computation on volume source space
+    """
+    tempdir = _TempDir()
+    evoked = _get_evoked()
+    inverse_operator_vol = read_inverse_operator(fname_vol_inv)
+    assert_true(repr(inverse_operator_vol))
+    stc = apply_inverse(evoked, inverse_operator_vol, lambda2, "dSPM")
+    assert_true(isinstance(stc, VolSourceEstimate))
+    # volume inverses don't have associated subject IDs
+    assert_true(stc.subject is None)
+    stc.save(op.join(tempdir, 'tmp-vl.stc'))
+    stc2 = read_source_estimate(op.join(tempdir, 'tmp-vl.stc'))
+    assert_true(np.all(stc.data > 0))
+    assert_true(np.all(stc.data < 35))
+    assert_array_almost_equal(stc.data, stc2.data)
+    assert_array_almost_equal(stc.times, stc2.times)
+
+
+ at slow_test
+ at testing.requires_testing_data
+def test_io_inverse_operator():
+    """Test IO of inverse_operator
+    """
+    tempdir = _TempDir()
+    inverse_operator = read_inverse_operator(fname_inv)
+    x = repr(inverse_operator)
+    assert_true(x)
+    assert_true(isinstance(inverse_operator['noise_cov'], Covariance))
+    # just do one example for .gz, as it should generalize
+    _compare_io(inverse_operator, '.gz')
+
+    # test warnings on bad filenames
+    with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter('always')
+        inv_badname = op.join(tempdir, 'test-bad-name.fif.gz')
+        write_inverse_operator(inv_badname, inverse_operator)
+        read_inverse_operator(inv_badname)
+    assert_true(len(w) == 2)
+
+    # make sure we can write and read
+    inv_fname = op.join(tempdir, 'test-inv.fif')
+    args = (10, 1. / 9., 'dSPM')
+    inv_prep = prepare_inverse_operator(inverse_operator, *args)
+    write_inverse_operator(inv_fname, inv_prep)
+    inv_read = read_inverse_operator(inv_fname)
+    _compare(inverse_operator, inv_read)
+    inv_read_prep = prepare_inverse_operator(inv_read, *args)
+    _compare(inv_prep, inv_read_prep)
+    inv_prep_prep = prepare_inverse_operator(inv_prep, *args)
+    _compare(inv_prep, inv_prep_prep)
+
+
+ at testing.requires_testing_data
+def test_apply_mne_inverse_raw():
+    """Test MNE with precomputed inverse operator on Raw
+    """
+    start = 3
+    stop = 10
+    raw = Raw(fname_raw)
+    label_lh = read_label(fname_label % 'Aud-lh')
+    _, times = raw[0, start:stop]
+    inverse_operator = read_inverse_operator(fname_full)
+    inverse_operator = prepare_inverse_operator(inverse_operator, nave=1,
+                                                lambda2=lambda2, method="dSPM")
+    for pick_ori in [None, "normal"]:
+        stc = apply_inverse_raw(raw, inverse_operator, lambda2, "dSPM",
+                                label=label_lh, start=start, stop=stop, nave=1,
+                                pick_ori=pick_ori, buffer_size=None,
+                                prepared=True)
+
+        stc2 = apply_inverse_raw(raw, inverse_operator, lambda2, "dSPM",
+                                 label=label_lh, start=start, stop=stop,
+                                 nave=1, pick_ori=pick_ori,
+                                 buffer_size=3, prepared=True)
+
+        if pick_ori is None:
+            assert_true(np.all(stc.data > 0))
+            assert_true(np.all(stc2.data > 0))
+
+        assert_true(stc.subject == 'sample')
+        assert_true(stc2.subject == 'sample')
+        assert_array_almost_equal(stc.times, times)
+        assert_array_almost_equal(stc2.times, times)
+        assert_array_almost_equal(stc.data, stc2.data)
+
+
+ at testing.requires_testing_data
+def test_apply_mne_inverse_fixed_raw():
+    """Test MNE with fixed-orientation inverse operator on Raw
+    """
+    raw = Raw(fname_raw)
+    start = 3
+    stop = 10
+    _, times = raw[0, start:stop]
+    label_lh = read_label(fname_label % 'Aud-lh')
+
+    # create a fixed-orientation inverse operator
+    fwd = read_forward_solution_meg(fname_fwd, force_fixed=False,
+                                    surf_ori=True)
+    noise_cov = read_cov(fname_cov)
+    inv_op = make_inverse_operator(raw.info, fwd, noise_cov,
+                                   loose=None, depth=0.8, fixed=True)
+
+    inv_op2 = prepare_inverse_operator(inv_op, nave=1,
+                                       lambda2=lambda2, method="dSPM")
+    stc = apply_inverse_raw(raw, inv_op2, lambda2, "dSPM",
+                            label=label_lh, start=start, stop=stop, nave=1,
+                            pick_ori=None, buffer_size=None, prepared=True)
+
+    stc2 = apply_inverse_raw(raw, inv_op2, lambda2, "dSPM",
+                             label=label_lh, start=start, stop=stop, nave=1,
+                             pick_ori=None, buffer_size=3, prepared=True)
+
+    stc3 = apply_inverse_raw(raw, inv_op, lambda2, "dSPM",
+                             label=label_lh, start=start, stop=stop, nave=1,
+                             pick_ori=None, buffer_size=None)
+
+    assert_true(stc.subject == 'sample')
+    assert_true(stc2.subject == 'sample')
+    assert_array_almost_equal(stc.times, times)
+    assert_array_almost_equal(stc2.times, times)
+    assert_array_almost_equal(stc3.times, times)
+    assert_array_almost_equal(stc.data, stc2.data)
+    assert_array_almost_equal(stc.data, stc3.data)
+
+
+ at testing.requires_testing_data
+def test_apply_mne_inverse_epochs():
+    """Test MNE with precomputed inverse operator on Epochs
+    """
+    inverse_operator = read_inverse_operator(fname_full)
+    label_lh = read_label(fname_label % 'Aud-lh')
+    label_rh = read_label(fname_label % 'Aud-rh')
+    event_id, tmin, tmax = 1, -0.2, 0.5
+    raw = Raw(fname_raw)
+
+    picks = pick_types(raw.info, meg=True, eeg=False, stim=True, ecg=True,
+                       eog=True, include=['STI 014'], exclude='bads')
+    reject = dict(grad=4000e-13, mag=4e-12, eog=150e-6)
+    flat = dict(grad=1e-15, mag=1e-15)
+
+    events = read_events(fname_event)[:15]
+    epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                    baseline=(None, 0), reject=reject, flat=flat)
+    stcs = apply_inverse_epochs(epochs, inverse_operator, lambda2, "dSPM",
+                                label=label_lh, pick_ori="normal")
+    inverse_operator = prepare_inverse_operator(inverse_operator, nave=1,
+                                                lambda2=lambda2, method="dSPM")
+    stcs2 = apply_inverse_epochs(epochs, inverse_operator, lambda2, "dSPM",
+                                 label=label_lh, pick_ori="normal",
+                                 prepared=True)
+    # test if using prepared and not prepared inverse operator give the same
+    # result
+    assert_array_almost_equal(stcs[0].data, stcs2[0].data)
+    assert_array_almost_equal(stcs[0].times, stcs2[0].times)
+
+    assert_true(len(stcs) == 2)
+    assert_true(3 < stcs[0].data.max() < 10)
+    assert_true(stcs[0].subject == 'sample')
+
+    data = sum(stc.data for stc in stcs) / len(stcs)
+    flip = label_sign_flip(label_lh, inverse_operator['src'])
+
+    label_mean = np.mean(data, axis=0)
+    label_mean_flip = np.mean(flip[:, np.newaxis] * data, axis=0)
+
+    assert_true(label_mean.max() < label_mean_flip.max())
+
+    # test extracting a BiHemiLabel
+
+    stcs_rh = apply_inverse_epochs(epochs, inverse_operator, lambda2, "dSPM",
+                                   label=label_rh, pick_ori="normal",
+                                   prepared=True)
+    stcs_bh = apply_inverse_epochs(epochs, inverse_operator, lambda2, "dSPM",
+                                   label=label_lh + label_rh,
+                                   pick_ori="normal",
+                                   prepared=True)
+
+    n_lh = len(stcs[0].data)
+    assert_array_almost_equal(stcs[0].data, stcs_bh[0].data[:n_lh])
+    assert_array_almost_equal(stcs_rh[0].data, stcs_bh[0].data[n_lh:])
+
+    # test without using a label (so delayed computation is used)
+    stcs = apply_inverse_epochs(epochs, inverse_operator, lambda2, "dSPM",
+                                pick_ori="normal", prepared=True)
+    assert_true(stcs[0].subject == 'sample')
+    label_stc = stcs[0].in_label(label_rh)
+    assert_true(label_stc.subject == 'sample')
+    assert_array_almost_equal(stcs_rh[0].data, label_stc.data)
+
+
+ at testing.requires_testing_data
+def test_make_inverse_operator_bads():
+    """Test MNE inverse computation given a mismatch of bad channels
+    """
+    fwd_op = read_forward_solution_meg(fname_fwd, surf_ori=True)
+    evoked = _get_evoked()
+    noise_cov = read_cov(fname_cov)
+
+    # test bads
+    bad = evoked.info['bads'].pop()
+    inv_ = make_inverse_operator(evoked.info, fwd_op, noise_cov, loose=None)
+    union_good = set(noise_cov['names']) & set(evoked.ch_names)
+    union_bads = set(noise_cov['bads']) & set(evoked.info['bads'])
+    evoked.info['bads'].append(bad)
+
+    assert_true(len(set(inv_['info']['ch_names']) - union_good) == 0)
+    assert_true(len(set(inv_['info']['bads']) - union_bads) == 0)
+
+
+run_tests_if_main()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/minimum_norm/tests/test_psf_ctf.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/minimum_norm/tests/test_psf_ctf.py
new file mode 100644
index 0000000..78702e2
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/minimum_norm/tests/test_psf_ctf.py
@@ -0,0 +1,81 @@
+
+import os.path as op
+import mne
+from mne.datasets import testing
+from mne import read_forward_solution
+from mne.minimum_norm import (read_inverse_operator,
+                              point_spread_function, cross_talk_function)
+from mne.utils import slow_test, run_tests_if_main
+
+from nose.tools import assert_true
+
+data_path = op.join(testing.data_path(download=False), 'MEG', 'sample')
+
+fname_inv_meg = op.join(data_path,
+                        'sample_audvis_trunc-meg-eeg-oct-4-meg-inv.fif')
+fname_inv_meeg = op.join(data_path, 'sample_audvis_trunc-meg-eeg-oct-4-'
+                         'meg-eeg-diagnoise-inv.fif')
+fname_fwd = op.join(data_path, 'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif')
+
+fname_label = [op.join(data_path, 'labels', 'Aud-rh.label'),
+               op.join(data_path, 'labels', 'Aud-lh.label')]
+
+snr = 3.0
+lambda2 = 1.0 / snr ** 2
+
+
+ at slow_test
+ at testing.requires_testing_data
+def test_psf_ctf():
+    """Test computation of PSFs and CTFs for linear estimators
+    """
+    forward = read_forward_solution(fname_fwd)
+    labels = [mne.read_label(ss) for ss in fname_label]
+
+    method = 'MNE'
+    n_svd_comp = 2
+
+    # make sure it works for both types of inverses
+    for fname_inv in (fname_inv_meg, fname_inv_meeg):
+        inverse_operator = read_inverse_operator(fname_inv)
+        # Test PSFs (then CTFs)
+        for mode in ('sum', 'svd'):
+            stc_psf, psf_ev = point_spread_function(
+                inverse_operator, forward, method=method, labels=labels,
+                lambda2=lambda2, pick_ori='normal', mode=mode,
+                n_svd_comp=n_svd_comp)
+
+            n_vert, n_samples = stc_psf.shape
+            should_n_vert = (inverse_operator['src'][1]['vertno'].shape[0] +
+                             inverse_operator['src'][0]['vertno'].shape[0])
+            if mode == 'svd':
+                should_n_samples = len(labels) * n_svd_comp + 1
+            else:
+                should_n_samples = len(labels) + 1
+
+            assert_true(n_vert == should_n_vert)
+            assert_true(n_samples == should_n_samples)
+
+            n_chan, n_samples = psf_ev.data.shape
+            assert_true(n_chan == forward['nchan'])
+
+        # Test CTFs
+        for mode in ('sum', 'svd'):
+            stc_ctf = cross_talk_function(
+                inverse_operator, forward, labels, method=method,
+                lambda2=lambda2, signed=False, mode=mode,
+                n_svd_comp=n_svd_comp)
+
+            n_vert, n_samples = stc_ctf.shape
+            should_n_vert = (inverse_operator['src'][1]['vertno'].shape[0] +
+                             inverse_operator['src'][0]['vertno'].shape[0])
+            if mode == 'svd':
+                should_n_samples = len(labels) * n_svd_comp + 1
+            else:
+                should_n_samples = len(labels) + 1
+
+            assert_true(n_vert == should_n_vert)
+            assert_true(n_samples == should_n_samples)
+
+
+run_tests_if_main()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/minimum_norm/tests/test_snr.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/minimum_norm/tests/test_snr.py
new file mode 100644
index 0000000..ebbd776
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/minimum_norm/tests/test_snr.py
@@ -0,0 +1,42 @@
+# -*- coding: utf-8 -*-
+# Authors: Eric Larson <larson.eric.d at gmail.com>
+#          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+import os
+from os import path as op
+import numpy as np
+from numpy.testing import assert_allclose
+
+from mne import read_evokeds
+from mne.datasets import testing
+from mne.minimum_norm import read_inverse_operator, estimate_snr
+
+from mne.utils import _TempDir, requires_mne, run_subprocess
+
+s_path = op.join(testing.data_path(download=False), 'MEG', 'sample')
+fname_inv = op.join(s_path, 'sample_audvis_trunc-meg-eeg-oct-6-meg-inv.fif')
+fname_evoked = op.join(s_path, 'sample_audvis-ave.fif')
+
+
+ at testing.requires_testing_data
+ at requires_mne
+def test_snr():
+    """Test SNR calculation"""
+    tempdir = _TempDir()
+    inv = read_inverse_operator(fname_inv)
+    evoked = read_evokeds(fname_evoked, baseline=(None, 0))[0]
+    snr = estimate_snr(evoked, inv)[0]
+    orig_dir = os.getcwd()
+    os.chdir(tempdir)
+    try:
+        cmd = ['mne_compute_mne', '--inv', fname_inv, '--meas', fname_evoked,
+               '--snronly', '--bmin', '-200', '--bmax', '0']
+        run_subprocess(cmd)
+    except Exception:
+        pass  # this returns 1 for some reason
+    finally:
+        os.chdir(orig_dir)
+    snr_c = np.loadtxt(op.join(tempdir, 'SNR'))[:, 1]
+    assert_allclose(snr, snr_c, atol=1e-2, rtol=1e-2)
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/minimum_norm/tests/test_time_frequency.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/minimum_norm/tests/test_time_frequency.py
new file mode 100644
index 0000000..c20e0d3
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/minimum_norm/tests/test_time_frequency.py
@@ -0,0 +1,200 @@
+import os.path as op
+
+import numpy as np
+from numpy.testing import assert_array_almost_equal
+from nose.tools import assert_true
+import warnings
+
+from mne.datasets import testing
+from mne import io, find_events, Epochs, pick_types
+from mne.utils import run_tests_if_main
+from mne.label import read_label
+from mne.minimum_norm.inverse import (read_inverse_operator,
+                                      apply_inverse_epochs,
+                                      prepare_inverse_operator)
+from mne.minimum_norm.time_frequency import (source_band_induced_power,
+                                             source_induced_power,
+                                             compute_source_psd,
+                                             compute_source_psd_epochs)
+
+
+from mne.time_frequency import multitaper_psd
+
+data_path = testing.data_path(download=False)
+fname_inv = op.join(data_path, 'MEG', 'sample',
+                    'sample_audvis_trunc-meg-eeg-oct-6-meg-inv.fif')
+fname_data = op.join(data_path, 'MEG', 'sample',
+                     'sample_audvis_trunc_raw.fif')
+fname_label = op.join(data_path, 'MEG', 'sample', 'labels', 'Aud-lh.label')
+warnings.simplefilter('always')
+
+
+ at testing.requires_testing_data
+def test_tfr_with_inverse_operator():
+    """Test time freq with MNE inverse computation"""
+
+    tmin, tmax, event_id = -0.2, 0.5, 1
+
+    # Setup for reading the raw data
+    raw = io.Raw(fname_data)
+    events = find_events(raw, stim_channel='STI 014')
+    inverse_operator = read_inverse_operator(fname_inv)
+    inv = prepare_inverse_operator(inverse_operator, nave=1,
+                                   lambda2=1. / 9., method="dSPM")
+
+    raw.info['bads'] += ['MEG 2443', 'EEG 053']  # bads + 2 more
+
+    # picks MEG gradiometers
+    picks = pick_types(raw.info, meg=True, eeg=False, eog=True,
+                       stim=False, exclude='bads')
+
+    # Load condition 1
+    event_id = 1
+    events3 = events[:3]  # take 3 events to keep the computation time low
+    epochs = Epochs(raw, events3, event_id, tmin, tmax, picks=picks,
+                    baseline=(None, 0), reject=dict(grad=4000e-13, eog=150e-6),
+                    preload=True)
+
+    # Compute a source estimate per frequency band
+    bands = dict(alpha=[10, 10])
+    label = read_label(fname_label)
+
+    stcs = source_band_induced_power(epochs, inv, bands,
+                                     n_cycles=2, use_fft=False, pca=True,
+                                     label=label, prepared=True)
+
+    stc = stcs['alpha']
+    assert_true(len(stcs) == len(list(bands.keys())))
+    assert_true(np.all(stc.data > 0))
+    assert_array_almost_equal(stc.times, epochs.times)
+
+    stcs_no_pca = source_band_induced_power(epochs, inv, bands,
+                                            n_cycles=2, use_fft=False,
+                                            pca=False, label=label,
+                                            prepared=True)
+
+    assert_array_almost_equal(stcs['alpha'].data, stcs_no_pca['alpha'].data)
+
+    # Compute a source estimate per frequency band
+    epochs = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks,
+                    baseline=(None, 0), reject=dict(grad=4000e-13, eog=150e-6),
+                    preload=True)
+
+    frequencies = np.arange(7, 30, 2)  # define frequencies of interest
+    power, phase_lock = source_induced_power(epochs, inv,
+                                             frequencies, label,
+                                             baseline=(-0.1, 0),
+                                             baseline_mode='percent',
+                                             n_cycles=2, n_jobs=1,
+                                             prepared=True)
+    assert_true(np.all(phase_lock > 0))
+    assert_true(np.all(phase_lock <= 1))
+    assert_true(np.max(power) > 10)
+
+
+ at testing.requires_testing_data
+def test_source_psd():
+    """Test source PSD computation in label"""
+    raw = io.Raw(fname_data)
+    inverse_operator = read_inverse_operator(fname_inv)
+    label = read_label(fname_label)
+    tmin, tmax = 0, 20  # seconds
+    fmin, fmax = 55, 65  # Hz
+    n_fft = 2048
+    stc = compute_source_psd(raw, inverse_operator, lambda2=1. / 9.,
+                             method="dSPM", tmin=tmin, tmax=tmax,
+                             fmin=fmin, fmax=fmax, pick_ori="normal",
+                             n_fft=n_fft, label=label, overlap=0.1)
+    assert_true(stc.times[0] >= fmin * 1e-3)
+    assert_true(stc.times[-1] <= fmax * 1e-3)
+    # Time max at line frequency (60 Hz in US)
+    assert_true(59e-3 <= stc.times[np.argmax(np.sum(stc.data, axis=0))] <=
+                61e-3)
+
+
+ at testing.requires_testing_data
+def test_source_psd_epochs():
+    """Test multi-taper source PSD computation in label from epochs"""
+
+    raw = io.Raw(fname_data)
+    inverse_operator = read_inverse_operator(fname_inv)
+    label = read_label(fname_label)
+
+    event_id, tmin, tmax = 1, -0.2, 0.5
+    lambda2, method = 1. / 9., 'dSPM'
+    bandwidth = 8.
+    fmin, fmax = 0, 100
+
+    picks = pick_types(raw.info, meg=True, eeg=False, stim=True,
+                       ecg=True, eog=True, include=['STI 014'],
+                       exclude='bads')
+    reject = dict(grad=4000e-13, mag=4e-12, eog=150e-6)
+
+    events = find_events(raw, stim_channel='STI 014')
+    epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                    baseline=(None, 0), reject=reject)
+
+    # only look at one epoch
+    epochs.drop_bad_epochs()
+    one_epochs = epochs[:1]
+
+    inv = prepare_inverse_operator(inverse_operator, nave=1,
+                                   lambda2=1. / 9., method="dSPM")
+    # return list
+    stc_psd = compute_source_psd_epochs(one_epochs, inv,
+                                        lambda2=lambda2, method=method,
+                                        pick_ori="normal", label=label,
+                                        bandwidth=bandwidth,
+                                        fmin=fmin, fmax=fmax,
+                                        prepared=True)[0]
+
+    # return generator
+    stcs = compute_source_psd_epochs(one_epochs, inv,
+                                     lambda2=lambda2, method=method,
+                                     pick_ori="normal", label=label,
+                                     bandwidth=bandwidth,
+                                     fmin=fmin, fmax=fmax,
+                                     return_generator=True,
+                                     prepared=True)
+
+    for stc in stcs:
+        stc_psd_gen = stc
+
+    assert_array_almost_equal(stc_psd.data, stc_psd_gen.data)
+
+    # compare with direct computation
+    stc = apply_inverse_epochs(one_epochs, inv,
+                               lambda2=lambda2, method=method,
+                               pick_ori="normal", label=label,
+                               prepared=True)[0]
+
+    sfreq = epochs.info['sfreq']
+    psd, freqs = multitaper_psd(stc.data, sfreq=sfreq, bandwidth=bandwidth,
+                                fmin=fmin, fmax=fmax)
+
+    assert_array_almost_equal(psd, stc_psd.data)
+    assert_array_almost_equal(freqs, stc_psd.times)
+
+    # Check corner cases caused by tiny bandwidth
+    with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter('always')
+        compute_source_psd_epochs(one_epochs, inv,
+                                  lambda2=lambda2, method=method,
+                                  pick_ori="normal", label=label,
+                                  bandwidth=0.01, low_bias=True,
+                                  fmin=fmin, fmax=fmax,
+                                  return_generator=False,
+                                  prepared=True)
+        compute_source_psd_epochs(one_epochs, inv,
+                                  lambda2=lambda2, method=method,
+                                  pick_ori="normal", label=label,
+                                  bandwidth=0.01, low_bias=False,
+                                  fmin=fmin, fmax=fmax,
+                                  return_generator=False,
+                                  prepared=True)
+    assert_true(len(w) >= 2)
+    assert_true(any('not properly use' in str(ww.message) for ww in w))
+    assert_true(any('Bandwidth too small' in str(ww.message) for ww in w))
+
+
+run_tests_if_main()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/minimum_norm/time_frequency.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/minimum_norm/time_frequency.py
new file mode 100644
index 0000000..81e037b
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/minimum_norm/time_frequency.py
@@ -0,0 +1,688 @@
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+from warnings import warn
+
+import numpy as np
+from scipy import linalg, fftpack
+import warnings
+
+from ..io.constants import FIFF
+from ..source_estimate import _make_stc
+from ..time_frequency.tfr import cwt, morlet
+from ..time_frequency.multitaper import (dpss_windows, _psd_from_mt,
+                                         _psd_from_mt_adaptive, _mt_spectra)
+from ..baseline import rescale
+from .inverse import (combine_xyz, prepare_inverse_operator, _assemble_kernel,
+                      _pick_channels_inverse_operator, _check_method,
+                      _check_ori, _subject_from_inverse)
+from ..parallel import parallel_func
+from ..utils import logger, verbose
+from ..externals import six
+
+
+def _prepare_source_params(inst, inverse_operator, label=None,
+                           lambda2=1.0 / 9.0, method="dSPM", nave=1,
+                           decim=1, pca=True, pick_ori="normal",
+                           prepared=False, verbose=None):
+    """Prepare inverse operator and params for spectral / TFR analysis"""
+    if not prepared:
+        inv = prepare_inverse_operator(inverse_operator, nave, lambda2, method)
+    else:
+        inv = inverse_operator
+    #
+    #   Pick the correct channels from the data
+    #
+    sel = _pick_channels_inverse_operator(inst.ch_names, inv)
+    logger.info('Picked %d channels from the data' % len(sel))
+    logger.info('Computing inverse...')
+    #
+    #   Simple matrix multiplication followed by combination of the
+    #   three current components
+    #
+    #   This does all the data transformations to compute the weights for the
+    #   eigenleads
+    #
+    K, noise_norm, vertno = _assemble_kernel(inv, label, method, pick_ori)
+
+    if pca:
+        U, s, Vh = linalg.svd(K, full_matrices=False)
+        rank = np.sum(s > 1e-8 * s[0])
+        K = s[:rank] * U[:, :rank]
+        Vh = Vh[:rank]
+        logger.info('Reducing data rank to %d' % rank)
+    else:
+        Vh = None
+    is_free_ori = inverse_operator['source_ori'] == FIFF.FIFFV_MNE_FREE_ORI
+
+    return K, sel, Vh, vertno, is_free_ori, noise_norm
+
+
+ at verbose
+def source_band_induced_power(epochs, inverse_operator, bands, label=None,
+                              lambda2=1.0 / 9.0, method="dSPM", nave=1,
+                              n_cycles=5, df=1, use_fft=False, decim=1,
+                              baseline=None, baseline_mode='logratio',
+                              pca=True, n_jobs=1, prepared=False,
+                              verbose=None):
+    """Compute source space induced power in given frequency bands
+
+    Parameters
+    ----------
+    epochs : instance of Epochs
+        The epochs.
+    inverse_operator : instance of inverse operator
+        The inverse operator.
+    bands : dict
+        Example : bands = dict(alpha=[8, 9]).
+    label : Label
+        Restricts the source estimates to a given label.
+    lambda2 : float
+        The regularization parameter of the minimum norm.
+    method : "MNE" | "dSPM" | "sLORETA"
+        Use mininum norm, dSPM or sLORETA.
+    nave : int
+        The number of averages used to scale the noise covariance matrix.
+    n_cycles : float | array of float
+        Number of cycles. Fixed number or one per frequency.
+    df : float
+        delta frequency within bands.
+    use_fft : bool
+        Do convolutions in time or frequency domain with FFT.
+    decim : int
+        Temporal decimation factor.
+    baseline : None (default) or tuple of length 2
+        The time interval to apply baseline correction.
+        If None do not apply it. If baseline is (a, b)
+        the interval is between "a (s)" and "b (s)".
+        If a is None the beginning of the data is used
+        and if b is None then b is set to the end of the interval.
+        If baseline is equal ot (None, None) all the time
+        interval is used.
+    baseline_mode : None | 'logratio' | 'zscore'
+        Do baseline correction with ratio (power is divided by mean
+        power during baseline) or zscore (power is divided by standard
+        deviation of power during baseline after subtracting the mean,
+        power = [power - mean(power_baseline)] / std(power_baseline)).
+    pca : bool
+        If True, the true dimension of data is estimated before running
+        the time frequency transforms. It reduces the computation times
+        e.g. with a dataset that was maxfiltered (true dim is 64).
+    n_jobs : int
+        Number of jobs to run in parallel.
+    prepared : bool
+        If True, do not call `prepare_inverse_operator`.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    stcs : dict with a SourceEstimate (or VolSourceEstimate) for each band
+        The estimated source space induced power estimates.
+    """
+    method = _check_method(method)
+
+    frequencies = np.concatenate([np.arange(band[0], band[1] + df / 2.0, df)
+                                 for _, band in six.iteritems(bands)])
+
+    powers, _, vertno = _source_induced_power(
+        epochs, inverse_operator, frequencies, label=label, lambda2=lambda2,
+        method=method, nave=nave, n_cycles=n_cycles, decim=decim,
+        use_fft=use_fft, pca=pca, n_jobs=n_jobs, with_plv=False,
+        prepared=prepared)
+
+    Fs = epochs.info['sfreq']  # sampling in Hz
+    stcs = dict()
+
+    subject = _subject_from_inverse(inverse_operator)
+    for name, band in six.iteritems(bands):
+        idx = [k for k, f in enumerate(frequencies) if band[0] <= f <= band[1]]
+
+        # average power in band + mean over epochs
+        power = np.mean(powers[:, idx, :], axis=1)
+
+        # Run baseline correction
+        power = rescale(power, epochs.times[::decim], baseline, baseline_mode,
+                        copy=False)
+
+        tmin = epochs.times[0]
+        tstep = float(decim) / Fs
+        stc = _make_stc(power, vertices=vertno, tmin=tmin, tstep=tstep,
+                        subject=subject)
+        stcs[name] = stc
+
+        logger.info('[done]')
+
+    return stcs
+
+
+def _prepare_tfr(data, decim, pick_ori, Ws, K, source_ori):
+    """Aux function to prepare TFR source localization"""
+    n_times = data[:, :, ::decim].shape[2]
+    n_freqs = len(Ws)
+    n_sources = K.shape[0]
+    is_free_ori = False
+    if (source_ori == FIFF.FIFFV_MNE_FREE_ORI and pick_ori is None):
+        is_free_ori = True
+        n_sources //= 3
+
+    shape = (n_sources, n_freqs, n_times)
+    return shape, is_free_ori
+
+
+ at verbose
+def _compute_pow_plv(data, K, sel, Ws, source_ori, use_fft, Vh,
+                     with_power, with_plv, pick_ori, decim, verbose=None):
+    """Aux function for induced power and PLV"""
+    shape, is_free_ori = _prepare_tfr(data, decim, pick_ori, Ws, K, source_ori)
+    n_sources, n_times = shape[:2]
+    power = np.zeros(shape, dtype=np.float)  # power or raw TFR
+    # phase lock
+    plv = np.zeros(shape, dtype=np.complex) if with_plv else None
+
+    for epoch in data:
+        epoch = epoch[sel]  # keep only selected channels
+
+        if Vh is not None:
+            epoch = np.dot(Vh, epoch)  # reducing data rank
+
+        power_e, plv_e = _single_epoch_tfr(
+            data=epoch, is_free_ori=is_free_ori, K=K, Ws=Ws, use_fft=use_fft,
+            decim=decim, shape=shape, with_plv=with_plv, with_power=with_power)
+
+        power += power_e
+        if with_plv:
+            plv += plv_e
+
+    return power, plv
+
+
+def _single_epoch_tfr(data, is_free_ori, K, Ws, use_fft, decim, shape,
+                      with_plv, with_power):
+    """Compute single trial TFRs, either ITC, power or raw TFR"""
+    tfr_e = np.zeros(shape, dtype=np.float)  # power or raw TFR
+    # phase lock
+    plv_e = np.zeros(shape, dtype=np.complex) if with_plv else None
+    n_sources, _, n_times = shape
+    for f, w in enumerate(Ws):
+        tfr_ = cwt(data, [w], use_fft=use_fft, decim=decim)
+        tfr_ = np.asfortranarray(tfr_.reshape(len(data), -1))
+
+        # phase lock and power at freq f
+        if with_plv:
+            plv_f = np.zeros((n_sources, n_times), dtype=np.complex)
+
+        tfr_f = np.zeros((n_sources, n_times), dtype=np.float)
+
+        for k, t in enumerate([np.real(tfr_), np.imag(tfr_)]):
+            sol = np.dot(K, t)
+
+            sol_pick_normal = sol
+            if is_free_ori:
+                sol_pick_normal = sol[2::3]
+
+            if with_plv:
+                if k == 0:  # real
+                    plv_f += sol_pick_normal
+                else:  # imag
+                    plv_f += 1j * sol_pick_normal
+
+            if is_free_ori:
+                logger.debug('combining the current components...')
+                sol = combine_xyz(sol, square=with_power)
+            elif with_power:
+                sol *= sol
+            tfr_f += sol
+            del sol
+
+        tfr_e[:, f, :] += tfr_f
+        del tfr_f
+
+        if with_plv:
+            plv_f /= np.abs(plv_f)
+            plv_e[:, f, :] += plv_f
+            del plv_f
+
+    return tfr_e, plv_e
+
+
+ at verbose
+def _source_induced_power(epochs, inverse_operator, frequencies, label=None,
+                          lambda2=1.0 / 9.0, method="dSPM", nave=1, n_cycles=5,
+                          decim=1, use_fft=False, pca=True, pick_ori="normal",
+                          n_jobs=1, with_plv=True, zero_mean=False,
+                          prepared=False, verbose=None):
+    """Aux function for source induced power"""
+    epochs_data = epochs.get_data()
+    K, sel, Vh, vertno, is_free_ori, noise_norm = _prepare_source_params(
+        inst=epochs, inverse_operator=inverse_operator, label=label,
+        lambda2=lambda2, method=method, nave=nave, pca=pca, pick_ori=pick_ori,
+        prepared=prepared, verbose=verbose)
+
+    inv = inverse_operator
+    parallel, my_compute_source_tfrs, n_jobs = parallel_func(
+        _compute_pow_plv, n_jobs)
+    Fs = epochs.info['sfreq']  # sampling in Hz
+
+    logger.info('Computing source power ...')
+
+    Ws = morlet(Fs, frequencies, n_cycles=n_cycles, zero_mean=zero_mean)
+
+    n_jobs = min(n_jobs, len(epochs_data))
+    out = parallel(my_compute_source_tfrs(data=data, K=K, sel=sel, Ws=Ws,
+                                          source_ori=inv['source_ori'],
+                                          use_fft=use_fft, Vh=Vh,
+                                          with_plv=with_plv, with_power=True,
+                                          pick_ori=pick_ori, decim=decim)
+                   for data in np.array_split(epochs_data, n_jobs))
+    power = sum(o[0] for o in out)
+    power /= len(epochs_data)  # average power over epochs
+
+    if with_plv:
+        plv = sum(o[1] for o in out)
+        plv = np.abs(plv)
+        plv /= len(epochs_data)  # average power over epochs
+    else:
+        plv = None
+
+    if method != "MNE":
+        power *= noise_norm.ravel()[:, None, None] ** 2
+
+    return power, plv, vertno
+
+
+ at verbose
+def source_induced_power(epochs, inverse_operator, frequencies, label=None,
+                         lambda2=1.0 / 9.0, method="dSPM", nave=1, n_cycles=5,
+                         decim=1, use_fft=False, pick_ori=None,
+                         baseline=None, baseline_mode='logratio', pca=True,
+                         n_jobs=1, zero_mean=False, prepared=False,
+                         verbose=None):
+    """Compute induced power and phase lock
+
+    Computation can optionaly be restricted in a label.
+
+    Parameters
+    ----------
+    epochs : instance of Epochs
+        The epochs.
+    inverse_operator : instance of InverseOperator
+        The inverse operator.
+    frequencies : array
+        Array of frequencies of interest.
+    label : Label
+        Restricts the source estimates to a given label.
+    lambda2 : float
+        The regularization parameter of the minimum norm.
+    method : "MNE" | "dSPM" | "sLORETA"
+        Use mininum norm, dSPM or sLORETA.
+    nave : int
+        The number of averages used to scale the noise covariance matrix.
+    n_cycles : float | array of float
+        Number of cycles. Fixed number or one per frequency.
+    decim : int
+        Temporal decimation factor.
+    use_fft : bool
+        Do convolutions in time or frequency domain with FFT.
+    pick_ori : None | "normal"
+        If "normal", rather than pooling the orientations by taking the norm,
+        only the radial component is kept. This is only implemented
+        when working with loose orientations.
+    baseline : None (default) or tuple of length 2
+        The time interval to apply baseline correction.
+        If None do not apply it. If baseline is (a, b)
+        the interval is between "a (s)" and "b (s)".
+        If a is None the beginning of the data is used
+        and if b is None then b is set to the end of the interval.
+        If baseline is equal ot (None, None) all the time
+        interval is used.
+    baseline_mode : None | 'logratio' | 'zscore'
+        Do baseline correction with ratio (power is divided by mean
+        power during baseline) or zscore (power is divided by standard
+        deviation of power during baseline after subtracting the mean,
+        power = [power - mean(power_baseline)] / std(power_baseline)).
+    pca : bool
+        If True, the true dimension of data is estimated before running
+        the time frequency transforms. It reduces the computation times
+        e.g. with a dataset that was maxfiltered (true dim is 64).
+    n_jobs : int
+        Number of jobs to run in parallel.
+    zero_mean : bool
+        Make sure the wavelets are zero mean.
+    prepared : bool
+        If True, do not call `prepare_inverse_operator`.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+    """
+    method = _check_method(method)
+    pick_ori = _check_ori(pick_ori)
+
+    power, plv, vertno = _source_induced_power(epochs,
+                                               inverse_operator, frequencies,
+                                               label=label, lambda2=lambda2,
+                                               method=method, nave=nave,
+                                               n_cycles=n_cycles, decim=decim,
+                                               use_fft=use_fft,
+                                               pick_ori=pick_ori,
+                                               pca=pca, n_jobs=n_jobs,
+                                               prepared=False)
+
+    # Run baseline correction
+    if baseline is not None:
+        power = rescale(power, epochs.times[::decim], baseline, baseline_mode,
+                        copy=False)
+
+    return power, plv
+
+
+ at verbose
+def compute_source_psd(raw, inverse_operator, lambda2=1. / 9., method="dSPM",
+                       tmin=None, tmax=None, fmin=0., fmax=200.,
+                       n_fft=2048, overlap=0.5, pick_ori=None, label=None,
+                       nave=1, pca=True, prepared=False, verbose=None):
+    """Compute source power spectrum density (PSD)
+
+    Parameters
+    ----------
+    raw : instance of Raw
+        The raw data
+    inverse_operator : instance of InverseOperator
+        The inverse operator
+    lambda2: float
+        The regularization parameter
+    method: "MNE" | "dSPM" | "sLORETA"
+        Use mininum norm, dSPM or sLORETA
+    tmin : float | None
+        The beginning of the time interval of interest (in seconds). If None
+        start from the beginning of the file.
+    tmax : float | None
+        The end of the time interval of interest (in seconds). If None
+        stop at the end of the file.
+    fmin : float
+        The lower frequency of interest
+    fmax : float
+        The upper frequency of interest
+    n_fft: int
+        Window size for the FFT. Should be a power of 2.
+    overlap: float
+        The overlap fraction between windows. Should be between 0 and 1.
+        0 means no overlap.
+    pick_ori : None | "normal"
+        If "normal", rather than pooling the orientations by taking the norm,
+        only the radial component is kept. This is only implemented
+        when working with loose orientations.
+    label: Label
+        Restricts the source estimates to a given label
+    nave : int
+        The number of averages used to scale the noise covariance matrix.
+    pca: bool
+        If True, the true dimension of data is estimated before running
+        the time frequency transforms. It reduces the computation times
+        e.g. with a dataset that was maxfiltered (true dim is 64).
+    prepared : bool
+        If True, do not call `prepare_inverse_operator`.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    stc : SourceEstimate | VolSourceEstimate
+        The PSD (in dB) of each of the sources.
+    """
+    from scipy.signal import hanning
+    pick_ori = _check_ori(pick_ori)
+
+    logger.info('Considering frequencies %g ... %g Hz' % (fmin, fmax))
+
+    K, sel, Vh, vertno, is_free_ori, noise_norm = _prepare_source_params(
+        inst=raw, inverse_operator=inverse_operator, label=label,
+        lambda2=lambda2, method=method, nave=nave, pca=pca, pick_ori=pick_ori,
+        prepared=prepared, verbose=verbose)
+
+    start, stop = 0, raw.last_samp + 1 - raw.first_samp
+    if tmin is not None:
+        start = raw.time_as_index(tmin)[0]
+    if tmax is not None:
+        stop = raw.time_as_index(tmax)[0] + 1
+    n_fft = int(n_fft)
+    Fs = raw.info['sfreq']
+    window = hanning(n_fft)
+    freqs = fftpack.fftfreq(n_fft, 1. / Fs)
+    freqs_mask = (freqs >= 0) & (freqs >= fmin) & (freqs <= fmax)
+    freqs = freqs[freqs_mask]
+    fstep = np.mean(np.diff(freqs))
+    psd = np.zeros((K.shape[0], np.sum(freqs_mask)))
+    n_windows = 0
+
+    for this_start in np.arange(start, stop, int(n_fft * (1. - overlap))):
+        data, _ = raw[sel, this_start:this_start + n_fft]
+        if data.shape[1] < n_fft:
+            logger.info("Skipping last buffer")
+            break
+
+        if Vh is not None:
+            data = np.dot(Vh, data)  # reducing data rank
+
+        data *= window[None, :]
+
+        data_fft = fftpack.fft(data)[:, freqs_mask]
+        sol = np.dot(K, data_fft)
+
+        if is_free_ori and pick_ori is None:
+            sol = combine_xyz(sol, square=True)
+        else:
+            sol = (sol * sol.conj()).real
+
+        if method != "MNE":
+            sol *= noise_norm ** 2
+
+        psd += sol
+        n_windows += 1
+
+    psd /= n_windows
+
+    psd = 10 * np.log10(psd)
+
+    subject = _subject_from_inverse(inverse_operator)
+    stc = _make_stc(psd, vertices=vertno, tmin=fmin * 1e-3,
+                    tstep=fstep * 1e-3, subject=subject)
+    return stc
+
+
+ at verbose
+def _compute_source_psd_epochs(epochs, inverse_operator, lambda2=1. / 9.,
+                               method="dSPM", fmin=0., fmax=200.,
+                               pick_ori=None, label=None, nave=1,
+                               pca=True, inv_split=None, bandwidth=4.,
+                               adaptive=False, low_bias=True, n_jobs=1,
+                               prepared=False, verbose=None):
+    """ Generator for compute_source_psd_epochs """
+
+    logger.info('Considering frequencies %g ... %g Hz' % (fmin, fmax))
+
+    K, sel, Vh, vertno, is_free_ori, noise_norm = _prepare_source_params(
+        inst=epochs, inverse_operator=inverse_operator, label=label,
+        lambda2=lambda2, method=method, nave=nave, pca=pca, pick_ori=pick_ori,
+        prepared=prepared, verbose=verbose)
+
+    # split the inverse operator
+    if inv_split is not None:
+        K_split = np.array_split(K, inv_split)
+    else:
+        K_split = [K]
+
+    # compute DPSS windows
+    n_times = len(epochs.times)
+    sfreq = epochs.info['sfreq']
+
+    # compute standardized half-bandwidth
+    half_nbw = float(bandwidth) * n_times / (2 * sfreq)
+    if half_nbw < 0.5:
+        warnings.warn('Bandwidth too small, using minimum (normalized 0.5)')
+        half_nbw = 0.5
+    n_tapers_max = int(2 * half_nbw)
+
+    dpss, eigvals = dpss_windows(n_times, half_nbw, n_tapers_max,
+                                 low_bias=low_bias)
+    n_tapers = len(dpss)
+
+    logger.info('Using %d tapers with bandwidth %0.1fHz'
+                % (n_tapers, bandwidth))
+
+    if adaptive and len(eigvals) < 3:
+        warn('Not adaptively combining the spectral estimators '
+             'due to a low number of tapers.')
+        adaptive = False
+
+    if adaptive:
+        parallel, my_psd_from_mt_adaptive, n_jobs = \
+            parallel_func(_psd_from_mt_adaptive, n_jobs)
+    else:
+        weights = np.sqrt(eigvals)[np.newaxis, :, np.newaxis]
+
+    subject = _subject_from_inverse(inverse_operator)
+    for k, e in enumerate(epochs):
+        logger.info("Processing epoch : %d" % (k + 1))
+        data = e[sel]
+
+        if Vh is not None:
+            data = np.dot(Vh, data)  # reducing data rank
+
+        # compute tapered spectra in sensor space
+        x_mt, freqs = _mt_spectra(data, dpss, sfreq)
+
+        if k == 0:
+            freq_mask = (freqs >= fmin) & (freqs <= fmax)
+            fstep = np.mean(np.diff(freqs))
+
+        # allocate space for output
+        psd = np.empty((K.shape[0], np.sum(freq_mask)))
+
+        # Optionally, we split the inverse operator into parts to save memory.
+        # Without splitting the tapered spectra in source space have size
+        # (n_vertices x n_tapers x n_times / 2)
+        pos = 0
+        for K_part in K_split:
+            # allocate space for tapered spectra in source space
+            x_mt_src = np.empty((K_part.shape[0], x_mt.shape[1],
+                                x_mt.shape[2]), dtype=x_mt.dtype)
+
+            # apply inverse to each taper
+            for i in range(n_tapers):
+                x_mt_src[:, i, :] = np.dot(K_part, x_mt[:, i, :])
+
+            # compute the psd
+            if adaptive:
+                out = parallel(my_psd_from_mt_adaptive(x, eigvals, freq_mask)
+                               for x in np.array_split(x_mt_src,
+                                                       min(n_jobs,
+                                                           len(x_mt_src))))
+                this_psd = np.concatenate(out)
+            else:
+                x_mt_src = x_mt_src[:, :, freq_mask]
+                this_psd = _psd_from_mt(x_mt_src, weights)
+
+            psd[pos:pos + K_part.shape[0], :] = this_psd
+            pos += K_part.shape[0]
+
+        # combine orientations
+        if is_free_ori and pick_ori is None:
+            psd = combine_xyz(psd, square=False)
+
+        if method != "MNE":
+            psd *= noise_norm ** 2
+
+        stc = _make_stc(psd, tmin=fmin, tstep=fstep, vertices=vertno,
+                        subject=subject)
+
+        # we return a generator object for "stream processing"
+        yield stc
+
+
+ at verbose
+def compute_source_psd_epochs(epochs, inverse_operator, lambda2=1. / 9.,
+                              method="dSPM", fmin=0., fmax=200.,
+                              pick_ori=None, label=None, nave=1,
+                              pca=True, inv_split=None, bandwidth=4.,
+                              adaptive=False, low_bias=True,
+                              return_generator=False, n_jobs=1,
+                              prepared=False, verbose=None):
+    """Compute source power spectrum density (PSD) from Epochs using
+       multi-taper method
+
+    Parameters
+    ----------
+    epochs : instance of Epochs
+        The raw data.
+    inverse_operator : instance of InverseOperator
+        The inverse operator.
+    lambda2 : float
+        The regularization parameter.
+    method : "MNE" | "dSPM" | "sLORETA"
+        Use mininum norm, dSPM or sLORETA.
+    fmin : float
+        The lower frequency of interest.
+    fmax : float
+        The upper frequency of interest.
+    pick_ori : None | "normal"
+        If "normal", rather than pooling the orientations by taking the norm,
+        only the radial component is kept. This is only implemented
+        when working with loose orientations.
+    label : Label
+        Restricts the source estimates to a given label.
+    nave : int
+        The number of averages used to scale the noise covariance matrix.
+    pca : bool
+        If True, the true dimension of data is estimated before running
+        the time frequency transforms. It reduces the computation times
+        e.g. with a dataset that was maxfiltered (true dim is 64).
+    inv_split : int or None
+        Split inverse operator into inv_split parts in order to save memory.
+    bandwidth : float
+        The bandwidth of the multi taper windowing function in Hz.
+    adaptive : bool
+        Use adaptive weights to combine the tapered spectra into PSD
+        (slow, use n_jobs >> 1 to speed up computation).
+    low_bias : bool
+        Only use tapers with more than 90% spectral concentration within
+        bandwidth.
+    return_generator : bool
+        Return a generator object instead of a list. This allows iterating
+        over the stcs without having to keep them all in memory.
+    n_jobs : int
+        Number of parallel jobs to use (only used if adaptive=True).
+    prepared : bool
+        If True, do not call `prepare_inverse_operator`.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    stcs : list (or generator object) of SourceEstimate | VolSourceEstimate
+        The source space PSDs for each epoch.
+    """
+
+    # use an auxiliary function so we can either return a generator or a list
+    stcs_gen = _compute_source_psd_epochs(epochs, inverse_operator,
+                                          lambda2=lambda2, method=method,
+                                          fmin=fmin, fmax=fmax,
+                                          pick_ori=pick_ori, label=label,
+                                          nave=nave, pca=pca,
+                                          inv_split=inv_split,
+                                          bandwidth=bandwidth,
+                                          adaptive=adaptive,
+                                          low_bias=low_bias, n_jobs=n_jobs,
+                                          prepared=prepared)
+
+    if return_generator:
+        # return generator object
+        return stcs_gen
+    else:
+        # return a list
+        stcs = list()
+        for stc in stcs_gen:
+            stcs.append(stc)
+
+        return stcs
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/misc.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/misc.py
new file mode 100644
index 0000000..ab5f1bd
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/misc.py
@@ -0,0 +1,108 @@
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Scott Burns <sburns at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+
+def parse_config(fname):
+    """Parse a config file (like .ave and .cov files)
+
+    Parameters
+    ----------
+    fname : string
+        config file name
+
+    Returns
+    -------
+    conditions : list of dict
+        Each condition is indexed by the event type.
+        A condition contains as keys::
+
+            tmin, tmax, name, grad_reject, mag_reject,
+            eeg_reject, eog_reject
+
+    """
+    reject_params = read_reject_parameters(fname)
+
+    try:
+        with open(fname, 'r') as f:
+            lines = f.readlines()
+    except:
+        raise ValueError("Error while reading %s" % fname)
+
+    cat_ind = [i for i, x in enumerate(lines) if "category {" in x]
+    event_dict = dict()
+    for ind in cat_ind:
+        for k in range(ind + 1, ind + 7):
+            words = lines[k].split()
+            if len(words) >= 2:
+                key = words[0]
+                if key == 'event':
+                    event = int(words[1])
+                    break
+        else:
+            raise ValueError('Could not find event id.')
+        event_dict[event] = dict(**reject_params)
+        for k in range(ind + 1, ind + 7):
+            words = lines[k].split()
+            if len(words) >= 2:
+                key = words[0]
+                if key == 'name':
+                    name = ' '.join(words[1:])
+                    if name[0] == '"':
+                        name = name[1:]
+                    if name[-1] == '"':
+                        name = name[:-1]
+                    event_dict[event]['name'] = name
+                if key in ['tmin', 'tmax', 'basemin', 'basemax']:
+                    event_dict[event][key] = float(words[1])
+    return event_dict
+
+
+def read_reject_parameters(fname):
+    """Read rejection parameters from .cov or .ave config file
+
+    Parameters
+    ----------
+    fname : str
+        Filename to read.
+    """
+
+    try:
+        with open(fname, 'r') as f:
+            lines = f.readlines()
+    except:
+        raise ValueError("Error while reading %s" % fname)
+
+    reject_names = ['gradReject', 'magReject', 'eegReject', 'eogReject',
+                    'ecgReject']
+    reject_pynames = ['grad', 'mag', 'eeg', 'eog', 'ecg']
+    reject = dict()
+    for line in lines:
+        words = line.split()
+        if words[0] in reject_names:
+            reject[reject_pynames[reject_names.index(words[0])]] = \
+                float(words[1])
+
+    return reject
+
+
+def read_flat_parameters(fname):
+    """Read flat channel rejection parameters from .cov or .ave config file"""
+
+    try:
+        with open(fname, 'r') as f:
+            lines = f.readlines()
+    except:
+        raise ValueError("Error while reading %s" % fname)
+
+    reject_names = ['gradFlat', 'magFlat', 'eegFlat', 'eogFlat', 'ecgFlat']
+    reject_pynames = ['grad', 'mag', 'eeg', 'eog', 'ecg']
+    flat = dict()
+    for line in lines:
+        words = line.split()
+        if words[0] in reject_names:
+            flat[reject_pynames[reject_names.index(words[0])]] = \
+                float(words[1])
+
+    return flat
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/parallel.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/parallel.py
new file mode 100644
index 0000000..8c93acc
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/parallel.py
@@ -0,0 +1,148 @@
+"""Parallel util function
+"""
+
+# Author: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#
+# License: Simplified BSD
+
+from .externals.six import string_types
+import inspect
+import logging
+import os
+
+from . import get_config
+from .utils import logger, verbose
+
+if 'MNE_FORCE_SERIAL' in os.environ:
+    _force_serial = True
+else:
+    _force_serial = None
+
+
+ at verbose
+def parallel_func(func, n_jobs, verbose=None, max_nbytes='auto'):
+    """Return parallel instance with delayed function
+
+    Util function to use joblib only if available
+
+    Parameters
+    ----------
+    func: callable
+        A function
+    n_jobs: int
+        Number of jobs to run in parallel
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+        INFO or DEBUG will print parallel status, others will not.
+    max_nbytes : int, str, or None
+        Threshold on the minimum size of arrays passed to the workers that
+        triggers automated memmory mapping. Can be an int in Bytes,
+        or a human-readable string, e.g., '1M' for 1 megabyte.
+        Use None to disable memmaping of large arrays. Use 'auto' to
+        use the value set using mne.set_memmap_min_size.
+
+    Returns
+    -------
+    parallel: instance of joblib.Parallel or list
+        The parallel object
+    my_func: callable
+        func if not parallel or delayed(func)
+    n_jobs: int
+        Number of jobs >= 0
+    """
+    # for a single job, we don't need joblib
+    if n_jobs == 1:
+        n_jobs = 1
+        my_func = func
+        parallel = list
+        return parallel, my_func, n_jobs
+
+    try:
+        from joblib import Parallel, delayed
+    except ImportError:
+        try:
+            from sklearn.externals.joblib import Parallel, delayed
+        except ImportError:
+            logger.warning('joblib not installed. Cannot run in parallel.')
+            n_jobs = 1
+            my_func = func
+            parallel = list
+            return parallel, my_func, n_jobs
+
+    # check if joblib is recent enough to support memmaping
+    p_args = inspect.getargspec(Parallel.__init__).args
+    joblib_mmap = ('temp_folder' in p_args and 'max_nbytes' in p_args)
+
+    cache_dir = get_config('MNE_CACHE_DIR', None)
+    if isinstance(max_nbytes, string_types) and max_nbytes == 'auto':
+        max_nbytes = get_config('MNE_MEMMAP_MIN_SIZE', None)
+
+    if max_nbytes is not None:
+        if not joblib_mmap and cache_dir is not None:
+            logger.warning('"MNE_CACHE_DIR" is set but a newer version of '
+                           'joblib is needed to use the memmapping pool.')
+        if joblib_mmap and cache_dir is None:
+            logger.info('joblib supports memapping pool but "MNE_CACHE_DIR" '
+                        'is not set in MNE-Python config. To enable it, use, '
+                        'e.g., mne.set_cache_dir(\'/tmp/shm\'). This will '
+                        'store temporary files under /dev/shm and can result '
+                        'in large memory savings.')
+
+    # create keyword arguments for Parallel
+    kwargs = {'verbose': 5 if logger.level <= logging.INFO else 0}
+
+    if joblib_mmap:
+        if cache_dir is None:
+            max_nbytes = None  # disable memmaping
+        kwargs['temp_folder'] = cache_dir
+        kwargs['max_nbytes'] = max_nbytes
+
+    n_jobs = check_n_jobs(n_jobs)
+    parallel = Parallel(n_jobs, **kwargs)
+    my_func = delayed(func)
+    return parallel, my_func, n_jobs
+
+
+def check_n_jobs(n_jobs, allow_cuda=False):
+    """Check n_jobs in particular for negative values
+
+    Parameters
+    ----------
+    n_jobs : int
+        The number of jobs.
+    allow_cuda : bool
+        Allow n_jobs to be 'cuda'. Default: False.
+
+    Returns
+    -------
+    n_jobs : int
+        The checked number of jobs. Always positive (or 'cuda' if
+        applicable.)
+    """
+    if not isinstance(n_jobs, int):
+        if not allow_cuda:
+            raise ValueError('n_jobs must be an integer')
+        elif not isinstance(n_jobs, string_types) or n_jobs != 'cuda':
+            raise ValueError('n_jobs must be an integer, or "cuda"')
+        # else, we have n_jobs='cuda' and this is okay, so do nothing
+    elif _force_serial:
+        n_jobs = 1
+        logger.info('... MNE_FORCE_SERIAL set. Processing in forced '
+                    'serial mode.')
+    elif n_jobs <= 0:
+        try:
+            import multiprocessing
+            n_cores = multiprocessing.cpu_count()
+            n_jobs = min(n_cores + n_jobs + 1, n_cores)
+            if n_jobs <= 0:
+                raise ValueError('If n_jobs has a negative value it must not '
+                                 'be less than the number of CPUs present. '
+                                 'You\'ve got %s CPUs' % n_cores)
+        except ImportError:
+            # only warn if they tried to use something other than 1 job
+            if n_jobs != 1:
+                logger.warning('multiprocessing not installed. Cannot run in '
+                               'parallel.')
+                n_jobs = 1
+
+    return n_jobs
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/preprocessing/__init__.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/preprocessing/__init__.py
new file mode 100644
index 0000000..e1f6420
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/preprocessing/__init__.py
@@ -0,0 +1,19 @@
+"""Preprocessing with artifact detection, SSP, and ICA"""
+
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#          Denis Engemann <denis.engemann at gmail.com>
+#
+# License: BSD (3-clause)
+
+from .maxfilter import apply_maxfilter
+from .ssp import compute_proj_ecg, compute_proj_eog
+from .eog import find_eog_events, create_eog_epochs
+from .ecg import find_ecg_events, create_ecg_epochs
+from .ica import (ICA, ica_find_eog_events, ica_find_ecg_events,
+                  get_score_funcs, read_ica, run_ica)
+from .bads import find_outliers
+from .stim import fix_stim_artifact
+from .maxwell import _maxwell_filter
+from .xdawn import Xdawn
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/preprocessing/bads.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/preprocessing/bads.py
new file mode 100644
index 0000000..c2f6827
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/preprocessing/bads.py
@@ -0,0 +1,40 @@
+# Authors: Denis Engemann <denis.engemann at gmail.com>
+# License: BSD (3-clause)
+
+
+import numpy as np
+
+
+def find_outliers(X, threshold=3.0, max_iter=2):
+    """Find outliers based on iterated Z-scoring
+
+    This procedure compares the absolute z-score against the threshold.
+    After excluding local outliers, the comparison is repeated until no
+    local outlier is present any more.
+
+    Parameters
+    ----------
+    X : np.ndarray of float, shape (n_elemenets,)
+        The scores for which to find outliers.
+    threshold : float
+        The value above which a feature is classified as outlier.
+    max_iter : int
+        The maximum number of iterations.
+
+    Returns
+    -------
+    bad_idx : np.ndarray of int, shape (n_features)
+        The outlier indices.
+    """
+    from scipy.stats import zscore
+    my_mask = np.zeros(len(X), dtype=np.bool)
+    for _ in range(max_iter):
+        X = np.ma.masked_array(X, my_mask)
+        this_z = np.abs(zscore(X))
+        local_bad = this_z > threshold
+        my_mask = np.max([my_mask, local_bad], 0)
+        if not np.any(local_bad):
+            break
+
+    bad_idx = np.where(my_mask)[0]
+    return bad_idx
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/preprocessing/ctps_.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/preprocessing/ctps_.py
new file mode 100644
index 0000000..606a9de
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/preprocessing/ctps_.py
@@ -0,0 +1,169 @@
+# Authors: Juergen Dammers <j.dammers at fz-juelich.de>
+#          Denis Engemann <denis.engemann at gmail.com>
+#
+# License: Simplified BSD
+import math
+
+import numpy as np
+
+
+def _compute_normalized_phase(data):
+    """Compute normalized phase angles
+
+    Parameters
+    ----------
+    data : ndarray, shape (n_epochs, n_sources, n_times)
+        The data to compute the phase angles for.
+
+    Returns
+    -------
+    phase_angles : ndarray, shape (n_epochs, n_sources, n_times)
+        The normalized phase angles.
+    """
+    from scipy.signal import hilbert
+    return (np.angle(hilbert(data)) + np.pi) / (2 * np.pi)
+
+
+def ctps(data, is_raw=True):
+    """Compute cross-trial-phase-statistics [1]
+
+    Note. It is assumed that the sources are already
+    appropriately filtered
+
+    Parameters
+    ----------
+    data: ndarray, shape (n_epochs, n_channels, n_times)
+        Any kind of data of dimensions trials, traces, features.
+    is_raw : bool
+        If True it is assumed that data haven't been transformed to Hilbert
+        space and phase angles haven't been normalized. Defaults to True.
+
+    Returns
+    -------
+    ks_dynamics : ndarray, shape (n_sources, n_times)
+        The kuiper statistics.
+    pk_dynamics : ndarray, shape (n_sources, n_times)
+        The normalized kuiper index for ICA sources and
+        time slices.
+    phase_angles : ndarray, shape (n_epochs, n_sources, n_times) | None
+        The phase values for epochs, sources and time slices. If ``is_raw``
+        is False, None is returned.
+
+    References
+    ----------
+    [1] Dammers, J., Schiek, M., Boers, F., Silex, C., Zvyagintsev,
+        M., Pietrzyk, U., Mathiak, K., 2008. Integration of amplitude
+        and phase statistics for complete artifact removal in independent
+        components of neuromagnetic recordings. Biomedical
+        Engineering, IEEE Transactions on 55 (10), 2353-2362.
+    """
+    if not data.ndim == 3:
+        ValueError('Data must have 3 dimensions, not %i.' % data.ndim)
+
+    if is_raw:
+        phase_angles = _compute_normalized_phase(data)
+    else:
+        phase_angles = data  # phase angles can be computed externally
+
+    # initialize array for results
+    ks_dynamics = np.zeros_like(phase_angles[0])
+    pk_dynamics = np.zeros_like(phase_angles[0])
+
+    # calculate Kuiper's statistic for each source
+    for ii, source in enumerate(np.transpose(phase_angles, [1, 0, 2])):
+        ks, pk = kuiper(source)
+        pk_dynamics[ii, :] = pk
+        ks_dynamics[ii, :] = ks
+
+    return ks_dynamics, pk_dynamics, phase_angles if is_raw else None
+
+
+def kuiper(data, dtype=np.float64):
+    """ Kuiper's test of uniform distribution
+
+    Parameters
+    ----------
+    data : ndarray, shape (n_sources,) | (n_sources, n_times)
+           Empirical distribution.
+    dtype : str | obj
+        The data type to be used.
+
+    Returns
+    -------
+    ks : ndarray
+        Kuiper's statistic.
+    pk : ndarray
+        Normalized probability of Kuiper's statistic [0, 1].
+    """
+    # if data not numpy array, implicitly convert and make to use copied data
+    # ! sort data array along first axis !
+    data = np.sort(data, axis=0).astype(dtype)
+    shape = data.shape
+    n_dim = len(shape)
+    n_trials = shape[0]
+
+    # create uniform cdf
+    j1 = (np.arange(n_trials, dtype=dtype) + 1.) / float(n_trials)
+    j2 = np.arange(n_trials, dtype=dtype) / float(n_trials)
+    if n_dim > 1:  # single phase vector (n_trials)
+        j1 = j1[:, np.newaxis]
+        j2 = j2[:, np.newaxis]
+    d1 = (j1 - data).max(axis=0)
+    d2 = (data - j2).max(axis=0)
+    n_eff = n_trials
+
+    d = d1 + d2  # Kuiper's statistic [n_time_slices]
+
+    return d, _prob_kuiper(d, n_eff, dtype=dtype)
+
+
+def _prob_kuiper(d, n_eff, dtype='f8'):
+    """ Test for statistical significance against uniform distribution.
+
+    Parameters
+    ----------
+    d : float
+        The kuiper distance value.
+    n_eff : int
+        The effective number of elements.
+    dtype : str | obj
+        The data type to be used. Defaults to double precision floats.
+
+    Returns
+    -------
+    pk_norm : float
+        The normalized Kuiper value such that 0 < ``pk_norm`` < 1.
+
+    References
+    ----------
+    [1] Stephens MA 1970. Journal of the Royal Statistical Society, ser. B,
+    vol 32, pp 115-122.
+
+    [2] Kuiper NH 1962. Proceedings of the Koninklijke Nederlands Akademie
+    van Wetenschappen, ser Vol 63 pp 38-47
+    """
+    n_time_slices = np.size(d)  # single value or vector
+    n_points = 100
+
+    en = math.sqrt(n_eff)
+    k_lambda = (en + 0.155 + 0.24 / en) * d  # see [1]
+    l2 = k_lambda ** 2.0
+    j2 = (np.arange(n_points) + 1) ** 2
+    j2 = j2.repeat(n_time_slices).reshape(n_points, n_time_slices)
+    fact = 4. * j2 * l2 - 1.
+    expo = np.exp(-2. * j2 * l2)
+    term = 2. * fact * expo
+    pk = term.sum(axis=0, dtype=dtype)
+
+    # Normalized pK to range [0,1]
+    pk_norm = np.zeros(n_time_slices)  # init pk_norm
+    pk_norm[pk > 0] = -np.log(pk[pk > 0]) / (2. * n_eff)
+    pk_norm[pk <= 0] = 1
+
+    # check for no difference to uniform cdf
+    pk_norm = np.where(k_lambda < 0.4, 0.0, pk_norm)
+
+    # check for round off errors
+    pk_norm = np.where(pk_norm > 1.0, 1.0, pk_norm)
+
+    return pk_norm
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/preprocessing/ecg.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/preprocessing/ecg.py
new file mode 100644
index 0000000..1976318
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/preprocessing/ecg.py
@@ -0,0 +1,317 @@
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Denis Engemann <denis.engemann at gmail.com>
+#          Eric Larson <larson.eric.d at gmail.com>
+#
+# License: BSD (3-clause)
+
+import warnings
+from ..externals.six import string_types
+import numpy as np
+
+from .. import pick_types, pick_channels
+from ..utils import logger, verbose, sum_squared
+from ..filter import band_pass_filter
+from ..epochs import Epochs, _BaseEpochs
+from ..io.base import _BaseRaw
+from ..evoked import Evoked
+
+
+def qrs_detector(sfreq, ecg, thresh_value=0.6, levels=2.5, n_thresh=3,
+                 l_freq=5, h_freq=35, tstart=0, filter_length='10s'):
+    """Detect QRS component in ECG channels.
+
+    QRS is the main wave on the heart beat.
+
+    Parameters
+    ----------
+    sfreq : float
+        Sampling rate
+    ecg : array
+        ECG signal
+    thresh_value : float | str
+        qrs detection threshold. Can also be "auto" for automatic
+        selection of threshold.
+    levels : float
+        number of std from mean to include for detection
+    n_thresh : int
+        max number of crossings
+    l_freq : float
+        Low pass frequency
+    h_freq : float
+        High pass frequency
+    tstart : float
+        Start detection after tstart seconds.
+    filter_length : str | int | None
+        Number of taps to use for filtering.
+
+    Returns
+    -------
+    events : array
+        Indices of ECG peaks
+    """
+    win_size = int(round((60.0 * sfreq) / 120.0))
+
+    filtecg = band_pass_filter(ecg, sfreq, l_freq, h_freq,
+                               filter_length=filter_length)
+
+    ecg_abs = np.abs(filtecg)
+    init = int(sfreq)
+
+    n_samples_start = int(sfreq * tstart)
+    ecg_abs = ecg_abs[n_samples_start:]
+
+    n_points = len(ecg_abs)
+
+    maxpt = np.empty(3)
+    maxpt[0] = np.max(ecg_abs[:init])
+    maxpt[1] = np.max(ecg_abs[init:init * 2])
+    maxpt[2] = np.max(ecg_abs[init * 2:init * 3])
+
+    init_max = np.mean(maxpt)
+
+    if thresh_value == 'auto':
+        thresh_runs = np.arange(0.3, 1.1, 0.05)
+    elif isinstance(thresh_value, string_types):
+        raise ValueError('threshold value must be "auto" or a float')
+    else:
+        thresh_runs = [thresh_value]
+
+    # Try a few thresholds (or just one)
+    clean_events = list()
+    for thresh_value in thresh_runs:
+        thresh1 = init_max * thresh_value
+        numcross = list()
+        time = list()
+        rms = list()
+        ii = 0
+        while ii < (n_points - win_size):
+            window = ecg_abs[ii:ii + win_size]
+            if window[0] > thresh1:
+                max_time = np.argmax(window)
+                time.append(ii + max_time)
+                nx = np.sum(np.diff(((window > thresh1).astype(np.int) ==
+                                     1).astype(int)))
+                numcross.append(nx)
+                rms.append(np.sqrt(sum_squared(window) / window.size))
+                ii += win_size
+            else:
+                ii += 1
+
+        if len(rms) == 0:
+            rms.append(0.0)
+            time.append(0.0)
+        time = np.array(time)
+        rms_mean = np.mean(rms)
+        rms_std = np.std(rms)
+        rms_thresh = rms_mean + (rms_std * levels)
+        b = np.where(rms < rms_thresh)[0]
+        a = np.array(numcross)[b]
+        ce = time[b[a < n_thresh]]
+
+        ce += n_samples_start
+        clean_events.append(ce)
+
+    # pick the best threshold; first get effective heart rates
+    rates = np.array([60. * len(cev) / (len(ecg) / float(sfreq))
+                      for cev in clean_events])
+
+    # now find heart rates that seem reasonable (infant thru adult athlete)
+    idx = np.where(np.logical_and(rates <= 160., rates >= 40.))[0]
+    if len(idx) > 0:
+        ideal_rate = np.median(rates[idx])  # get close to the median
+    else:
+        ideal_rate = 80.  # get close to a reasonable default
+    idx = np.argmin(np.abs(rates - ideal_rate))
+    clean_events = clean_events[idx]
+    return clean_events
+
+
+ at verbose
+def find_ecg_events(raw, event_id=999, ch_name=None, tstart=0.0,
+                    l_freq=5, h_freq=35, qrs_threshold='auto',
+                    filter_length='10s', verbose=None):
+    """Find ECG peaks
+
+    Parameters
+    ----------
+    raw : instance of Raw
+        The raw data
+    event_id : int
+        The index to assign to found events
+    ch_name : None | str
+        The name of the channel to use for ECG peak detection.
+        If None (default), a synthetic ECG channel is created from
+        cross channel average. Synthetic channel can only be created from
+        'meg' channels.
+    tstart : float
+        Start detection after tstart seconds. Useful when beginning
+        of run is noisy.
+    l_freq : float
+        Low pass frequency.
+    h_freq : float
+        High pass frequency.
+    qrs_threshold : float | str
+        Between 0 and 1. qrs detection threshold. Can also be "auto" to
+        automatically choose the threshold that generates a reasonable
+        number of heartbeats (40-160 beats / min).
+    filter_length : str | int | None
+        Number of taps to use for filtering.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    ecg_events : array
+        Events.
+    ch_ecg : string
+        Name of channel used.
+    average_pulse : float
+        Estimated average pulse.
+    """
+    idx_ecg = _get_ecg_channel_index(ch_name, raw)
+    if idx_ecg is not None:
+        logger.info('Using channel %s to identify heart beats.'
+                    % raw.ch_names[idx_ecg])
+        ecg, times = raw[idx_ecg, :]
+    else:
+        ecg, times = _make_ecg(raw, None, None, verbose)
+
+    # detecting QRS and generating event file
+    ecg_events = qrs_detector(raw.info['sfreq'], ecg.ravel(), tstart=tstart,
+                              thresh_value=qrs_threshold, l_freq=l_freq,
+                              h_freq=h_freq, filter_length=filter_length)
+
+    n_events = len(ecg_events)
+    average_pulse = n_events * 60.0 / (times[-1] - times[0])
+    logger.info("Number of ECG events detected : %d (average pulse %d / "
+                "min.)" % (n_events, average_pulse))
+
+    ecg_events = np.array([ecg_events + raw.first_samp,
+                           np.zeros(n_events, int),
+                           event_id * np.ones(n_events, int)]).T
+    return ecg_events, idx_ecg, average_pulse
+
+
+def _get_ecg_channel_index(ch_name, inst):
+    """Geting ECG channel index. If no channel found returns None."""
+    if ch_name is None:
+        ecg_idx = pick_types(inst.info, meg=False, eeg=False, stim=False,
+                             eog=False, ecg=True, emg=False, ref_meg=False,
+                             exclude='bads')
+    else:
+        if ch_name not in inst.ch_names:
+            raise ValueError('%s not in channel list (%s)' %
+                             (ch_name, inst.ch_names))
+        ecg_idx = pick_channels(inst.ch_names, include=[ch_name])
+
+    if len(ecg_idx) == 0:
+        return None
+        # raise RuntimeError('No ECG channel found. Please specify ch_name '
+        #                    'parameter e.g. MEG 1531')
+
+    if len(ecg_idx) > 1:
+        warnings.warn('More than one ECG channel found. Using only %s.'
+                      % inst.ch_names[ecg_idx[0]])
+
+    return ecg_idx[0]
+
+
+ at verbose
+def create_ecg_epochs(raw, ch_name=None, event_id=999, picks=None,
+                      tmin=-0.5, tmax=0.5, l_freq=8, h_freq=16, reject=None,
+                      flat=None, baseline=None, preload=True, verbose=None):
+    """Conveniently generate epochs around ECG artifact events
+
+
+    Parameters
+    ----------
+    raw : instance of Raw
+        The raw data
+    ch_name : None | str
+        The name of the channel to use for ECG peak detection.
+        If None (default), a synthetic ECG channel is created from
+        cross channel average. Synthetic channel can only be created from
+        'meg' channels.
+    event_id : int
+        The index to assign to found events
+    picks : array-like of int | None (default)
+        Indices of channels to include (if None, all channels are used).
+    tmin : float
+        Start time before event.
+    tmax : float
+        End time after event.
+    l_freq : float
+        Low pass frequency.
+    h_freq : float
+        High pass frequency.
+    reject : dict | None
+        Rejection parameters based on peak-to-peak amplitude.
+        Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'.
+        If reject is None then no rejection is done. Example::
+
+            reject = dict(grad=4000e-13, # T / m (gradiometers)
+                          mag=4e-12, # T (magnetometers)
+                          eeg=40e-6, # uV (EEG channels)
+                          eog=250e-6 # uV (EOG channels)
+                          )
+
+    flat : dict | None
+        Rejection parameters based on flatness of signal.
+        Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg', and values
+        are floats that set the minimum acceptable peak-to-peak amplitude.
+        If flat is None then no rejection is done.
+    baseline : tuple or list of length 2, or None
+        The time interval to apply rescaling / baseline correction.
+        If None do not apply it. If baseline is (a, b)
+        the interval is between "a (s)" and "b (s)".
+        If a is None the beginning of the data is used
+        and if b is None then b is set to the end of the interval.
+        If baseline is equal ot (None, None) all the time
+        interval is used. If None, no correction is applied.
+    preload : bool
+        Preload epochs or not.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    ecg_epochs : instance of Epochs
+        Data epoched around ECG r-peaks.
+    """
+
+    events, _, _ = find_ecg_events(raw, ch_name=ch_name, event_id=event_id,
+                                   l_freq=l_freq, h_freq=h_freq,
+                                   verbose=verbose)
+    if picks is None:
+        picks = pick_types(raw.info, meg=True, eeg=True, ref_meg=False)
+
+    # create epochs around ECG events and baseline (important)
+    ecg_epochs = Epochs(raw, events=events, event_id=event_id,
+                        tmin=tmin, tmax=tmax, proj=False,
+                        picks=picks, reject=reject, baseline=baseline,
+                        verbose=verbose, preload=preload)
+    return ecg_epochs
+
+
+ at verbose
+def _make_ecg(inst, start, stop, verbose=None):
+    """Create ECG signal from cross channel average
+    """
+    if not any(c in inst for c in ['mag', 'grad']):
+        raise ValueError('Unable to generate artifical ECG channel')
+    for ch in ['mag', 'grad']:
+        if ch in inst:
+            break
+    logger.info('Reconstructing ECG signal from {0}'
+                .format({'mag': 'Magnetometers',
+                         'grad': 'Gradiometers'}[ch]))
+    picks = pick_types(inst.info, meg=ch, eeg=False, ref_meg=False)
+    if isinstance(inst, _BaseRaw):
+        ecg, times = inst[picks, start:stop]
+    elif isinstance(inst, _BaseEpochs):
+        ecg = np.hstack(inst.crop(start, stop, copy=True).get_data())
+        times = inst.times
+    elif isinstance(inst, Evoked):
+        ecg = inst.data
+        times = inst.times
+    return ecg.mean(0), times
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/preprocessing/eog.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/preprocessing/eog.py
new file mode 100644
index 0000000..ece895c
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/preprocessing/eog.py
@@ -0,0 +1,208 @@
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Denis Engemann <denis.engemann at gmail.com>
+#          Eric Larson <larson.eric.d at gmail.com>
+#
+# License: BSD (3-clause)
+
+import numpy as np
+
+from .peak_finder import peak_finder
+from .. import pick_types, pick_channels
+from ..utils import logger, verbose
+from ..filter import band_pass_filter
+from ..epochs import Epochs
+
+
+ at verbose
+def find_eog_events(raw, event_id=998, l_freq=1, h_freq=10,
+                    filter_length='10s', ch_name=None, tstart=0,
+                    verbose=None):
+    """Locate EOG artifacts
+
+    Parameters
+    ----------
+    raw : instance of Raw
+        The raw data.
+    event_id : int
+        The index to assign to found events.
+    l_freq : float
+        Low cut-off frequency in Hz.
+    h_freq : float
+        High cut-off frequency in Hz.
+    filter_length : str | int | None
+        Number of taps to use for filtering.
+    ch_name: str | None
+        If not None, use specified channel(s) for EOG
+    tstart : float
+        Start detection after tstart seconds.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    eog_events : array
+        Events.
+    """
+
+    # Getting EOG Channel
+    eog_inds = _get_eog_channel_index(ch_name, raw)
+    logger.info('EOG channel index for this subject is: %s' % eog_inds)
+
+    eog, _ = raw[eog_inds, :]
+
+    eog_events = _find_eog_events(eog, event_id=event_id, l_freq=l_freq,
+                                  h_freq=h_freq,
+                                  sampling_rate=raw.info['sfreq'],
+                                  first_samp=raw.first_samp,
+                                  filter_length=filter_length,
+                                  tstart=tstart)
+
+    return eog_events
+
+
+def _find_eog_events(eog, event_id, l_freq, h_freq, sampling_rate, first_samp,
+                     filter_length='10s', tstart=0.):
+    """Helper function"""
+
+    logger.info('Filtering the data to remove DC offset to help '
+                'distinguish blinks from saccades')
+
+    # filtering to remove dc offset so that we know which is blink and saccades
+    fmax = np.minimum(45, sampling_rate / 2.0 - 0.75)  # protect Nyquist
+    filteog = np.array([band_pass_filter(x, sampling_rate, 2, fmax,
+                                         filter_length=filter_length)
+                        for x in eog])
+    temp = np.sqrt(np.sum(filteog ** 2, axis=1))
+
+    indexmax = np.argmax(temp)
+
+    # easier to detect peaks with filtering.
+    filteog = band_pass_filter(eog[indexmax], sampling_rate, l_freq, h_freq,
+                               filter_length=filter_length)
+
+    # detecting eog blinks and generating event file
+
+    logger.info('Now detecting blinks and generating corresponding events')
+
+    temp = filteog - np.mean(filteog)
+    n_samples_start = int(sampling_rate * tstart)
+    if np.abs(np.max(temp)) > np.abs(np.min(temp)):
+        eog_events, _ = peak_finder(filteog[n_samples_start:], extrema=1)
+    else:
+        eog_events, _ = peak_finder(filteog[n_samples_start:], extrema=-1)
+
+    eog_events += n_samples_start
+    n_events = len(eog_events)
+    logger.info("Number of EOG events detected : %d" % n_events)
+    eog_events = np.array([eog_events + first_samp,
+                           np.zeros(n_events, int),
+                           event_id * np.ones(n_events, int)]).T
+
+    return eog_events
+
+
+def _get_eog_channel_index(ch_name, inst):
+    if isinstance(ch_name, str):
+        # Check if multiple EOG Channels
+        if ',' in ch_name:
+            ch_name = ch_name.split(',')
+        else:
+            ch_name = [ch_name]
+
+        eog_inds = pick_channels(inst.ch_names, include=ch_name)
+
+        if len(eog_inds) == 0:
+            raise ValueError('%s not in channel list' % ch_name)
+        else:
+            logger.info('Using channel %s as EOG channel%s' % (
+                        " and ".join(ch_name),
+                        '' if len(eog_inds) < 2 else 's'))
+    elif ch_name is None:
+
+        eog_inds = pick_types(inst.info, meg=False, eeg=False, stim=False,
+                              eog=True, ecg=False, emg=False, ref_meg=False,
+                              exclude='bads')
+
+        if len(eog_inds) == 0:
+            logger.info('No EOG channels found')
+            logger.info('Trying with EEG 061 and EEG 062')
+            eog_inds = pick_channels(inst.ch_names,
+                                     include=['EEG 061', 'EEG 062'])
+            if len(eog_inds) != 2:
+                raise RuntimeError('EEG 61 or EEG 62 channel not found !!')
+
+    else:
+        raise ValueError('Could not find EOG channel.')
+    return eog_inds
+
+
+ at verbose
+def create_eog_epochs(raw, ch_name=None, event_id=998, picks=None,
+                      tmin=-0.5, tmax=0.5, l_freq=1, h_freq=10,
+                      reject=None, flat=None, baseline=None,
+                      preload=True, verbose=None):
+    """Conveniently generate epochs around EOG artifact events
+
+    Parameters
+    ----------
+    raw : instance of Raw
+        The raw data
+    ch_name : str
+        The name of the channel to use for EOG peak detection.
+        The argument is mandatory if the dataset contains no EOG channels.
+    event_id : int
+        The index to assign to found events
+    picks : array-like of int | None (default)
+        Indices of channels to include (if None, all channels
+        are used).
+    tmin : float
+        Start time before event.
+    tmax : float
+        End time after event.
+    l_freq : float
+        Low pass frequency.
+    h_freq : float
+        High pass frequency.
+    reject : dict | None
+        Rejection parameters based on peak-to-peak amplitude.
+        Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'.
+        If reject is None then no rejection is done. Example::
+
+            reject = dict(grad=4000e-13, # T / m (gradiometers)
+                          mag=4e-12, # T (magnetometers)
+                          eeg=40e-6, # uV (EEG channels)
+                          eog=250e-6 # uV (EOG channels)
+                          )
+
+    flat : dict | None
+        Rejection parameters based on flatness of signal.
+        Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg', and values
+        are floats that set the minimum acceptable peak-to-peak amplitude.
+        If flat is None then no rejection is done.
+    baseline : tuple or list of length 2, or None
+        The time interval to apply rescaling / baseline correction.
+        If None do not apply it. If baseline is (a, b)
+        the interval is between "a (s)" and "b (s)".
+        If a is None the beginning of the data is used
+        and if b is None then b is set to the end of the interval.
+        If baseline is equal ot (None, None) all the time
+        interval is used. If None, no correction is applied.
+    preload : bool
+        Preload epochs or not.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    eog_epochs : instance of Epochs
+        Data epoched around EOG events.
+    """
+    events = find_eog_events(raw, ch_name=ch_name, event_id=event_id,
+                             l_freq=l_freq, h_freq=h_freq)
+
+    # create epochs around EOG events
+    eog_epochs = Epochs(raw, events=events, event_id=event_id,
+                        tmin=tmin, tmax=tmax, proj=False, reject=reject,
+                        flat=flat, picks=picks, baseline=baseline,
+                        preload=preload)
+    return eog_epochs
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/preprocessing/ica.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/preprocessing/ica.py
new file mode 100644
index 0000000..0d7f42b
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/preprocessing/ica.py
@@ -0,0 +1,2453 @@
+# Authors: Denis A. Engemann <denis.engemann at gmail.com>
+#          Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Juergen Dammers <j.dammers at fz-juelich.de>
+#
+# License: BSD (3-clause)
+
+from inspect import getargspec, isfunction
+from collections import namedtuple
+from copy import deepcopy
+
+import os
+import json
+
+import numpy as np
+from scipy import linalg
+
+from .ecg import (qrs_detector, _get_ecg_channel_index, _make_ecg,
+                  create_ecg_epochs)
+from .eog import _find_eog_events, _get_eog_channel_index
+from .infomax_ import infomax
+
+from ..cov import compute_whitener
+from .. import Covariance, Evoked
+from ..io.pick import (pick_types, pick_channels, pick_info)
+from ..io.write import (write_double_matrix, write_string,
+                        write_name_list, write_int, start_block,
+                        end_block)
+from ..io.tree import dir_tree_find
+from ..io.open import fiff_open
+from ..io.tag import read_tag
+from ..io.meas_info import write_meas_info, read_meas_info
+from ..io.constants import Bunch, FIFF
+from ..io.base import _BaseRaw
+from ..epochs import _BaseEpochs
+from ..viz import (plot_ica_components, plot_ica_scores,
+                   plot_ica_sources, plot_ica_overlay)
+from ..viz.utils import (_prepare_trellis, tight_layout,
+                         _setup_vmin_vmax)
+from ..viz.topomap import (_prepare_topo_plot, _check_outlines,
+                           plot_topomap)
+
+from ..channels.channels import _contains_ch_type, ContainsMixin
+from ..io.write import start_file, end_file, write_id
+from ..utils import (check_version, logger, check_fname, verbose,
+                     _reject_data_segments, check_random_state,
+                     _get_fast_dot, compute_corr)
+from ..filter import band_pass_filter
+from .bads import find_outliers
+from .ctps_ import ctps
+from ..externals.six import string_types, text_type
+
+
+def _make_xy_sfunc(func, ndim_output=False):
+    """Aux function"""
+    if ndim_output:
+        def sfunc(x, y):
+            return np.array([func(a, y.ravel()) for a in x])[:, 0]
+    else:
+        def sfunc(x, y):
+            return np.array([func(a, y.ravel()) for a in x])
+    sfunc.__name__ = '.'.join(['score_func', func.__module__, func.__name__])
+    sfunc.__doc__ = func.__doc__
+    return sfunc
+
+
+# makes score funcs attr accessible for users
+def get_score_funcs():
+    """Helper to get the score functions"""
+    from scipy import stats
+    from scipy.spatial import distance
+    score_funcs = Bunch()
+    xy_arg_dist_funcs = [(n, f) for n, f in vars(distance).items()
+                         if isfunction(f) and not n.startswith('_')]
+    xy_arg_stats_funcs = [(n, f) for n, f in vars(stats).items()
+                          if isfunction(f) and not n.startswith('_')]
+    score_funcs.update(dict((n, _make_xy_sfunc(f))
+                            for n, f in xy_arg_dist_funcs
+                            if getargspec(f).args == ['u', 'v']))
+    score_funcs.update(dict((n, _make_xy_sfunc(f, ndim_output=True))
+                            for n, f in xy_arg_stats_funcs
+                            if getargspec(f).args == ['x', 'y']))
+    return score_funcs
+
+
+__all__ = ['ICA', 'ica_find_ecg_events', 'ica_find_eog_events',
+           'get_score_funcs', 'read_ica', 'run_ica']
+
+
+class ICA(ContainsMixin):
+
+    """M/EEG signal decomposition using Independent Component Analysis (ICA)
+
+    This object can be used to estimate ICA components and then
+    remove some from Raw or Epochs for data exploration or artifact
+    correction.
+
+    Caveat! If supplying a noise covariance keep track of the projections
+    available in the cov or in the raw object. For example, if you are
+    interested in EOG or ECG artifacts, EOG and ECG projections should be
+    temporally removed before fitting the ICA. You can say::
+
+        >> projs, raw.info['projs'] = raw.info['projs'], []
+        >> ica.fit(raw)
+        >> raw.info['projs'] = projs
+
+    Parameters
+    ----------
+    n_components : int | float | None
+        The number of components used for ICA decomposition. If int, it must be
+        smaller then max_pca_components. If None, all PCA components will be
+        used. If float between 0 and 1 components can will be selected by the
+        cumulative percentage of explained variance.
+    max_pca_components : int | None
+        The number of components used for PCA decomposition. If None, no
+        dimension reduction will be applied and max_pca_components will equal
+        the number of channels supplied on decomposing data. Defaults to None.
+    n_pca_components : int | float
+        The number of PCA components used after ICA recomposition. The ensuing
+        attribute allows to balance noise reduction against potential loss of
+        features due to dimensionality reduction. If greater than
+        ``self.n_components_``, the next ``n_pca_components`` minus
+        ``n_components_`` PCA components will be added before restoring the
+        sensor space data. The attribute gets updated each time the according
+        parameter for in .pick_sources_raw or .pick_sources_epochs is changed.
+        If float, the number of components selected matches the number of
+        components with a cumulative explained variance below
+        `n_pca_components`.
+    noise_cov : None | instance of mne.cov.Covariance
+        Noise covariance used for whitening. If None, channels are just
+        z-scored.
+    random_state : None | int | instance of np.random.RandomState
+        np.random.RandomState to initialize the FastICA estimation.
+        As the estimation is non-deterministic it can be useful to
+        fix the seed to have reproducible results. Defaults to None.
+    method : {'fastica', 'infomax', 'extended-infomax'}
+        The ICA method to use. Defaults to 'fastica'.
+    fit_params : dict | None.
+        Additional parameters passed to the ICA estimator chosen by `method`.
+    max_iter : int, optional
+        Maximum number of iterations during fit.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Attributes
+    ----------
+    current_fit : str
+        Flag informing about which data type (raw or epochs) was used for
+        the fit.
+    ch_names : list-like
+        Channel names resulting from initial picking.
+        The number of components used for ICA decomposition.
+    ``n_components_`` : int
+        If fit, the actual number of components used for ICA decomposition.
+    n_pca_components : int
+        See above.
+    max_pca_components : int
+        The number of components used for PCA dimensionality reduction.
+    verbose : bool, str, int, or None
+        See above.
+    ``pca_components_` : ndarray
+        If fit, the PCA components
+    ``pca_mean_`` : ndarray
+        If fit, the mean vector used to center the data before doing the PCA.
+    ``pca_explained_variance_`` : ndarray
+        If fit, the variance explained by each PCA component
+    ``mixing_matrix_`` : ndarray
+        If fit, the mixing matrix to restore observed data, else None.
+    ``unmixing_matrix_`` : ndarray
+        If fit, the matrix to unmix observed data, else None.
+    exclude : list
+        List of sources indices to exclude, i.e. artifact components identified
+        throughout the ICA solution. Indices added to this list, will be
+        dispatched to the .pick_sources methods. Source indices passed to
+        the .pick_sources method via the 'exclude' argument are added to the
+        .exclude attribute. When saving the ICA also the indices are restored.
+        Hence, artifact components once identified don't have to be added
+        again. To dump this 'artifact memory' say: ica.exclude = []
+    info : None | instance of mne.io.meas_info.Info
+        The measurement info copied from the object fitted.
+    `n_samples_` : int
+        the number of samples used on fit.
+    `labels_` : dict
+        A dictionary of independent component indices, grouped by types of
+        independent components. This attribute is set by some of the artifact
+        detection functions.
+    """
+    @verbose
+    def __init__(self, n_components=None, max_pca_components=None,
+                 n_pca_components=None, noise_cov=None, random_state=None,
+                 method='fastica', fit_params=None, max_iter=200,
+                 verbose=None):
+        methods = ('fastica', 'infomax', 'extended-infomax')
+        if method not in methods:
+            raise ValueError('`method` must be "%s". You passed: "%s"' %
+                             ('" or "'.join(methods), method))
+        if not check_version('sklearn', '0.12'):
+            raise RuntimeError('the scikit-learn package (version >= 0.12)'
+                               'is required for ICA')
+
+        self.noise_cov = noise_cov
+
+        if max_pca_components is not None and \
+                n_components > max_pca_components:
+            raise ValueError('n_components must be smaller than '
+                             'max_pca_components')
+
+        if isinstance(n_components, float) \
+                and not 0 < n_components <= 1:
+            raise ValueError('Selecting ICA components by explained variance '
+                             'needs values between 0.0 and 1.0 ')
+
+        self.current_fit = 'unfitted'
+        self.verbose = verbose
+        self.n_components = n_components
+        self.max_pca_components = max_pca_components
+        self.n_pca_components = n_pca_components
+        self.ch_names = None
+        self.random_state = random_state
+
+        if fit_params is None:
+            fit_params = {}
+        if method == 'fastica':
+            update = {'algorithm': 'parallel', 'fun': 'logcosh',
+                      'fun_args': None}
+            fit_params.update(dict((k, v) for k, v in update.items() if k
+                              not in fit_params))
+        elif method == 'infomax':
+            fit_params.update({'extended': False})
+        elif method == 'extended-infomax':
+            fit_params.update({'extended': True})
+        if 'max_iter' not in fit_params:
+            fit_params['max_iter'] = max_iter
+        self.max_iter = max_iter
+        self.fit_params = fit_params
+
+        self.exclude = []
+        self.info = None
+        self.method = method
+
+    def __repr__(self):
+        """ICA fit information"""
+        if self.current_fit == 'unfitted':
+            s = 'no'
+        elif self.current_fit == 'raw':
+            s = 'raw data'
+        else:
+            s = 'epochs'
+        s += ' decomposition, '
+        s += 'fit (%s): %s samples, ' % (self.method,
+                                         str(getattr(self, 'n_samples_', '')))
+        s += ('%s components' % str(self.n_components_) if
+              hasattr(self, 'n_components_') else
+              'no dimension reduction')
+        if self.info is not None:
+            ch_fit = ['"%s"' % c for c in ['mag', 'grad', 'eeg'] if c in self]
+            s += ', channels used: {0}'.format('; '.join(ch_fit))
+        if self.exclude:
+            s += ', %i sources marked for exclusion' % len(self.exclude)
+
+        return '<ICA  |  %s>' % s
+
+    @verbose
+    def fit(self, inst, picks=None, start=None, stop=None, decim=None,
+            reject=None, flat=None, tstep=2.0, verbose=None):
+        """Run the ICA decomposition on raw data
+
+        Caveat! If supplying a noise covariance keep track of the projections
+        available in the cov, the raw or the epochs object. For example,
+        if you are interested in EOG or ECG artifacts, EOG and ECG projections
+        should be temporally removed before fitting the ICA.
+
+        Parameters
+        ----------
+        inst : instance of Raw, Epochs or Evoked
+            Raw measurements to be decomposed.
+        picks : array-like of int
+            Channels to be included. This selection remains throughout the
+            initialized ICA solution. If None only good data channels are used.
+        start : int | float | None
+            First sample to include. If float, data will be interpreted as
+            time in seconds. If None, data will be used from the first sample.
+        stop : int | float | None
+            Last sample to not include. If float, data will be interpreted as
+            time in seconds. If None, data will be used to the last sample.
+        decim : int | None
+            Increment for selecting each nth time slice. If None, all samples
+            within ``start`` and ``stop`` are used.
+        reject : dict | None
+            Rejection parameters based on peak-to-peak amplitude.
+            Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'.
+            If reject is None then no rejection is done. Example::
+
+                reject = dict(grad=4000e-13, # T / m (gradiometers)
+                              mag=4e-12, # T (magnetometers)
+                              eeg=40e-6, # uV (EEG channels)
+                              eog=250e-6 # uV (EOG channels)
+                              )
+
+            It only applies if `inst` is of type Raw.
+        flat : dict | None
+            Rejection parameters based on flatness of signal.
+            Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg', and values
+            are floats that set the minimum acceptable peak-to-peak amplitude.
+            If flat is None then no rejection is done.
+            It only applies if `inst` is of type Raw.
+        tstep : float
+            Length of data chunks for artifact rejection in seconds.
+            It only applies if `inst` is of type Raw.
+        verbose : bool, str, int, or None
+            If not None, override default verbose level (see mne.verbose).
+            Defaults to self.verbose.
+
+        Returns
+        -------
+        self : instance of ICA
+            Returns the modified instance.
+        """
+        if isinstance(inst, _BaseRaw):
+            self._fit_raw(inst, picks, start, stop, decim, reject, flat,
+                          tstep, verbose)
+        elif isinstance(inst, _BaseEpochs):
+            self._fit_epochs(inst, picks, decim, verbose)
+        else:
+            raise ValueError('Data input must be of Raw or Epochs type')
+        return self
+
+    def _reset(self):
+        """Aux method"""
+        del self._pre_whitener
+        del self.unmixing_matrix_
+        del self.mixing_matrix_
+        del self.n_components_
+        del self.n_samples_
+        del self.pca_components_
+        del self.pca_explained_variance_
+        del self.pca_mean_
+        if hasattr(self, 'drop_inds_'):
+            del self.drop_inds_
+
+    def _fit_raw(self, raw, picks, start, stop, decim, reject, flat, tstep,
+                 verbose):
+        """Aux method
+        """
+        if self.current_fit != 'unfitted':
+            self._reset()
+
+        if picks is None:  # just use good data channels
+            picks = pick_types(raw.info, meg=True, eeg=True, eog=False,
+                               ecg=False, misc=False, stim=False,
+                               ref_meg=False, exclude='bads')
+        logger.info('Fitting ICA to data using %i channels. \n'
+                    'Please be patient, this may take some time' % len(picks))
+
+        if self.max_pca_components is None:
+            self.max_pca_components = len(picks)
+            logger.info('Inferring max_pca_components from picks.')
+
+        self.info = pick_info(raw.info, picks)
+        if self.info['comps']:
+            self.info['comps'] = []
+        self.ch_names = self.info['ch_names']
+        start, stop = _check_start_stop(raw, start, stop)
+
+        data = raw[picks, start:stop][0]
+        if decim is not None:
+            data = data[:, ::decim].copy()
+
+        if (reject is not None) or (flat is not None):
+            data, self.drop_inds_ = _reject_data_segments(data, reject, flat,
+                                                          decim, self.info,
+                                                          tstep)
+
+        self.n_samples_ = data.shape[1]
+
+        data, self._pre_whitener = self._pre_whiten(data,
+                                                    raw.info, picks)
+
+        self._fit(data, self.max_pca_components, 'raw')
+
+        return self
+
+    def _fit_epochs(self, epochs, picks, decim, verbose):
+        """Aux method
+        """
+        if self.current_fit != 'unfitted':
+            self._reset()
+
+        if picks is None:
+            picks = pick_types(epochs.info, meg=True, eeg=True, eog=False,
+                               ecg=False, misc=False, stim=False,
+                               ref_meg=False, exclude='bads')
+        logger.info('Fitting ICA to data using %i channels. \n'
+                    'Please be patient, this may take some time' % len(picks))
+
+        # filter out all the channels the raw wouldn't have initialized
+        self.info = pick_info(epochs.info, picks)
+        if self.info['comps']:
+            self.info['comps'] = []
+        self.ch_names = self.info['ch_names']
+
+        if self.max_pca_components is None:
+            self.max_pca_components = len(picks)
+            logger.info('Inferring max_pca_components from picks.')
+
+        data = epochs.get_data()[:, picks]
+        if decim is not None:
+            data = data[:, :, ::decim].copy()
+
+        self.n_samples_ = np.prod(data[:, 0, :].shape)
+
+        data, self._pre_whitener = \
+            self._pre_whiten(np.hstack(data), epochs.info, picks)
+
+        self._fit(data, self.max_pca_components, 'epochs')
+
+        return self
+
+    def _pre_whiten(self, data, info, picks):
+        """Aux function"""
+        fast_dot = _get_fast_dot()
+        has_pre_whitener = hasattr(self, '_pre_whitener')
+        if not has_pre_whitener and self.noise_cov is None:
+            # use standardization as whitener
+            # Scale (z-score) the data by channel type
+            info = pick_info(info, picks)
+            pre_whitener = np.empty([len(data), 1])
+            for ch_type in ['mag', 'grad', 'eeg']:
+                if _contains_ch_type(info, ch_type):
+                    if ch_type == 'eeg':
+                        this_picks = pick_types(info, meg=False, eeg=True)
+                    else:
+                        this_picks = pick_types(info, meg=ch_type, eeg=False)
+                    pre_whitener[this_picks] = np.std(data[this_picks])
+            data /= pre_whitener
+        elif not has_pre_whitener and self.noise_cov is not None:
+            pre_whitener, _ = compute_whitener(self.noise_cov, info, picks)
+            assert data.shape[0] == pre_whitener.shape[1]
+            data = fast_dot(pre_whitener, data)
+        elif has_pre_whitener and self.noise_cov is None:
+            data /= self._pre_whitener
+            pre_whitener = self._pre_whitener
+        else:
+            data = fast_dot(self._pre_whitener, data)
+            pre_whitener = self._pre_whitener
+
+        return data, pre_whitener
+
+    def _fit(self, data, max_pca_components, fit_type):
+        """Aux function """
+        from sklearn.decomposition import RandomizedPCA
+
+        random_state = check_random_state(self.random_state)
+
+        # XXX fix copy==True later. Bug in sklearn, see PR #2273
+        pca = RandomizedPCA(n_components=max_pca_components, whiten=True,
+                            copy=True, random_state=random_state)
+
+        if isinstance(self.n_components, float):
+            # compute full feature variance before doing PCA
+            full_var = np.var(data, axis=1).sum()
+
+        data = pca.fit_transform(data.T)
+
+        if isinstance(self.n_components, float):
+            # compute eplained variance manually, cf. sklearn bug
+            # fixed in #2664
+            explained_variance_ratio_ = pca.explained_variance_ / full_var
+            n_components_ = np.sum(explained_variance_ratio_.cumsum() <=
+                                   self.n_components)
+            if n_components_ < 1:
+                raise RuntimeError('One PCA component captures most of the '
+                                   'explained variance, your threshold resu'
+                                   'lts in 0 components. You should select '
+                                   'a higher value.')
+            logger.info('Selection by explained variance: %i components' %
+                        n_components_)
+            sel = slice(n_components_)
+        else:
+            if self.n_components is not None:  # normal n case
+                sel = slice(self.n_components)
+                logger.info('Selection by number: %i components' %
+                            self.n_components)
+            else:  # None case
+                logger.info('Using all PCA components: %i'
+                            % len(pca.components_))
+                sel = slice(len(pca.components_))
+
+        # the things to store for PCA
+        self.pca_mean_ = pca.mean_
+        self.pca_components_ = pca.components_
+        # unwhiten pca components and put scaling in unmixintg matrix later.
+        self.pca_explained_variance_ = exp_var = pca.explained_variance_
+        self.pca_components_ *= np.sqrt(exp_var[:, None])
+        del pca
+        # update number of components
+        self.n_components_ = sel.stop
+        if self.n_pca_components is not None:
+            if self.n_pca_components > len(self.pca_components_):
+                self.n_pca_components = len(self.pca_components_)
+
+        # Take care of ICA
+        if self.method == 'fastica':
+            from sklearn.decomposition import FastICA  # to avoid strong dep.
+            ica = FastICA(whiten=False,
+                          random_state=random_state, **self.fit_params)
+            ica.fit(data[:, sel])
+            # get unmixing and add scaling
+            self.unmixing_matrix_ = getattr(ica, 'components_',
+                                            'unmixing_matrix_')
+        elif self.method in ('infomax', 'extended-infomax'):
+            self.unmixing_matrix_ = infomax(data[:, sel],
+                                            random_state=random_state,
+                                            **self.fit_params)
+        self.unmixing_matrix_ /= np.sqrt(exp_var[sel])[None, :]
+        self.mixing_matrix_ = linalg.pinv(self.unmixing_matrix_)
+        self.current_fit = fit_type
+
+    def _transform(self, data):
+        """Compute sources from data (operates inplace)"""
+        fast_dot = _get_fast_dot()
+        if self.pca_mean_ is not None:
+            data -= self.pca_mean_[:, None]
+
+        # Apply first PCA
+        pca_data = fast_dot(self.pca_components_[:self.n_components_], data)
+        # Apply unmixing to low dimension PCA
+        sources = fast_dot(self.unmixing_matrix_, pca_data)
+        return sources
+
+    def _transform_raw(self, raw, start, stop):
+        if not hasattr(self, 'mixing_matrix_'):
+            raise RuntimeError('No fit available. Please fit ICA.')
+        start, stop = _check_start_stop(raw, start, stop)
+
+        picks = pick_types(raw.info, include=self.ch_names, exclude='bads',
+                           meg=False, ref_meg=False)
+        if len(picks) != len(self.ch_names):
+            raise RuntimeError('Raw doesn\'t match fitted data: %i channels '
+                               'fitted but %i channels supplied. \nPlease '
+                               'provide Raw compatible with '
+                               'ica.ch_names' % (len(self.ch_names),
+                                                 len(picks)))
+
+        data, _ = self._pre_whiten(raw[picks, start:stop][0], raw.info, picks)
+        return self._transform(data)
+
+    def _transform_epochs(self, epochs, concatenate):
+        """Aux method
+        """
+        if not hasattr(self, 'mixing_matrix_'):
+            raise RuntimeError('No fit available. Please fit ICA')
+
+        picks = pick_types(epochs.info, include=self.ch_names, exclude='bads',
+                           meg=False, ref_meg=False)
+        # special case where epochs come picked but fit was 'unpicked'.
+        if len(picks) != len(self.ch_names):
+            raise RuntimeError('Epochs don\'t match fitted data: %i channels '
+                               'fitted but %i channels supplied. \nPlease '
+                               'provide Epochs compatible with '
+                               'ica.ch_names' % (len(self.ch_names),
+                                                 len(picks)))
+
+        data = np.hstack(epochs.get_data()[:, picks])
+        data, _ = self._pre_whiten(data, epochs.info, picks)
+        sources = self._transform(data)
+
+        if not concatenate:
+            # Put the data back in 3D
+            sources = np.array(np.split(sources, len(epochs.events), 1))
+
+        return sources
+
+    def _transform_evoked(self, evoked):
+        """Aux method
+        """
+        if not hasattr(self, 'mixing_matrix_'):
+            raise RuntimeError('No fit available. Please first fit ICA')
+
+        picks = pick_types(evoked.info, include=self.ch_names, exclude='bads',
+                           meg=False, ref_meg=False)
+
+        if len(picks) != len(self.ch_names):
+            raise RuntimeError('Evoked doesn\'t match fitted data: %i channels'
+                               ' fitted but %i channels supplied. \nPlease '
+                               'provide Evoked compatible with '
+                               'ica.ch_names' % (len(self.ch_names),
+                                                 len(picks)))
+
+        data, _ = self._pre_whiten(evoked.data[picks], evoked.info, picks)
+        sources = self._transform(data)
+
+        return sources
+
+    def get_sources(self, inst, add_channels=None, start=None, stop=None):
+        """Estimate sources given the unmixing matrix
+
+        This method will return the sources in the container format passed.
+        Typical usecases:
+
+        1. pass Raw object to use `raw.plot` for ICA sources
+        2. pass Epochs object to compute trial-based statistics in ICA space
+        3. pass Evoked object to investigate time-locking in ICA space
+
+        Parameters
+        ----------
+        inst : instance of Raw, Epochs or Evoked
+            Object to compute sources from and to represent sources in.
+        add_channels : None | list of str
+            Additional channels  to be added. Useful to e.g. compare sources
+            with some reference. Defaults to None
+        start : int | float | None
+            First sample to include. If float, data will be interpreted as
+            time in seconds. If None, the entire data will be used.
+        stop : int | float | None
+            Last sample to not include. If float, data will be interpreted as
+            time in seconds. If None, the entire data will be used.
+
+        Returns
+        -------
+        sources : instance of Raw, Epochs or Evoked
+            The ICA sources time series.
+        """
+        if isinstance(inst, _BaseRaw):
+            sources = self._sources_as_raw(inst, add_channels, start, stop)
+        elif isinstance(inst, _BaseEpochs):
+            sources = self._sources_as_epochs(inst, add_channels, False)
+        elif isinstance(inst, Evoked):
+            sources = self._sources_as_evoked(inst, add_channels)
+        else:
+            raise ValueError('Data input must be of Raw, Epochs or Evoked '
+                             'type')
+
+        return sources
+
+    def _sources_as_raw(self, raw, add_channels, start, stop):
+        """Aux method
+        """
+        # merge copied instance and picked data with sources
+        sources = self._transform_raw(raw, start=start, stop=stop)
+        if raw.preload:  # get data and temporarily delete
+            data = raw._data
+            del raw._data
+
+        out = raw.copy()  # copy and reappend
+        if raw.preload:
+            raw._data = data
+
+        # populate copied raw.
+        start, stop = _check_start_stop(raw, start, stop)
+        if add_channels is not None:
+            raw_picked = raw.pick_channels(add_channels, copy=True)
+            data_, times_ = raw_picked[:, start:stop]
+            data_ = np.r_[sources, data_]
+        else:
+            data_ = sources
+            _, times_ = raw[0, start:stop]
+        out._data = data_
+        out._times = times_
+        out._filenames = list()
+        out.preload = True
+
+        # update first and last samples
+        out._first_samps = np.array([raw.first_samp +
+                                     (start if start else 0)])
+        out._last_samps = np.array([out.first_samp + stop
+                                    if stop else raw.last_samp])
+
+        out._projector = None
+        self._export_info(out.info, raw, add_channels)
+        out._update_times()
+
+        return out
+
+    def _sources_as_epochs(self, epochs, add_channels, concatenate):
+        """Aux method"""
+        out = epochs.copy()
+        sources = self._transform_epochs(epochs, concatenate)
+        if add_channels is not None:
+            picks = [epochs.ch_names.index(k) for k in add_channels]
+        else:
+            picks = []
+        out._data = np.concatenate([sources, epochs.get_data()[:, picks]],
+                                   axis=1) if len(picks) > 0 else sources
+
+        self._export_info(out.info, epochs, add_channels)
+        out.preload = True
+        out._raw = None
+        out._projector = None
+
+        return out
+
+    def _sources_as_evoked(self, evoked, add_channels):
+        """Aux method
+        """
+        if add_channels is not None:
+            picks = [evoked.ch_names.index(k) for k in add_channels]
+        else:
+            picks = []
+
+        sources = self._transform_evoked(evoked)
+        if len(picks) > 1:
+            data = np.r_[sources, evoked.data[picks]]
+        else:
+            data = sources
+        out = evoked.copy()
+        out.data = data
+        self._export_info(out.info, evoked, add_channels)
+
+        return out
+
+    def _export_info(self, info, container, add_channels):
+        """Aux method
+        """
+        # set channel names and info
+        ch_names = info['ch_names'] = []
+        ch_info = info['chs'] = []
+        for ii in range(self.n_components_):
+            this_source = 'ICA %03d' % (ii + 1)
+            ch_names.append(this_source)
+            ch_info.append(dict(ch_name=this_source, cal=1,
+                                logno=ii + 1, coil_type=FIFF.FIFFV_COIL_NONE,
+                                kind=FIFF.FIFFV_MISC_CH,
+                                coord_Frame=FIFF.FIFFV_COORD_UNKNOWN,
+                                loc=np.array([0., 0., 0., 1.] * 3, dtype='f4'),
+                                unit=FIFF.FIFF_UNIT_NONE,
+                                range=1.0, scanno=ii + 1, unit_mul=0))
+
+        if add_channels is not None:
+            # re-append additionally picked ch_names
+            ch_names += add_channels
+            # re-append additionally picked ch_info
+            ch_info += [k for k in container.info['chs'] if k['ch_name'] in
+                        add_channels]
+            # update number of channels
+        info['nchan'] = self.n_components_
+        if add_channels is not None:
+            info['nchan'] += len(add_channels)
+        info['bads'] = [ch_names[k] for k in self.exclude]
+        info['projs'] = []  # make sure projections are removed.
+
+    @verbose
+    def score_sources(self, inst, target=None, score_func='pearsonr',
+                      start=None, stop=None, l_freq=None, h_freq=None,
+                      verbose=None):
+        """Assign score to components based on statistic or metric
+
+        Parameters
+        ----------
+        inst : instance of Raw, Epochs or Evoked
+            The object to reconstruct the sources from.
+        target : array-like | ch_name | None
+            Signal to which the sources shall be compared. It has to be of
+            the same shape as the sources. If some string is supplied, a
+            routine will try to find a matching channel. If None, a score
+            function expecting only one input-array argument must be used,
+            for instance, scipy.stats.skew (default).
+        score_func : callable | str label
+            Callable taking as arguments either two input arrays
+            (e.g. Pearson correlation) or one input
+            array (e. g. skewness) and returns a float. For convenience the
+            most common score_funcs are available via string labels:
+            Currently, all distance metrics from scipy.spatial and All
+            functions from scipy.stats taking compatible input arguments are
+            supported. These function have been modified to support iteration
+            over the rows of a 2D array.
+        start : int | float | None
+            First sample to include. If float, data will be interpreted as
+            time in seconds. If None, data will be used from the first sample.
+        stop : int | float | None
+            Last sample to not include. If float, data will be interpreted as
+            time in seconds. If None, data will be used to the last sample.
+        l_freq : float
+            Low pass frequency.
+        h_freq : float
+            High pass frequency.
+        verbose : bool, str, int, or None
+            If not None, override default verbose level (see mne.verbose).
+            Defaults to self.verbose.
+
+        Returns
+        -------
+        scores : ndarray
+            scores for each source as returned from score_func
+        """
+        if isinstance(inst, _BaseRaw):
+            sources = self._transform_raw(inst, start, stop)
+        elif isinstance(inst, _BaseEpochs):
+            sources = self._transform_epochs(inst, concatenate=True)
+        elif isinstance(inst, Evoked):
+            sources = self._transform_evoked(inst)
+        else:
+            raise ValueError('Input must be of Raw, Epochs or Evoked type')
+
+        if target is not None:  # we can have univariate metrics without target
+            target = self._check_target(target, inst, start, stop)
+
+            if sources.shape[-1] != target.shape[-1]:
+                raise ValueError('Sources and target do not have the same'
+                                 'number of time slices.')
+            # auto target selection
+            if verbose is None:
+                verbose = self.verbose
+            if isinstance(inst, (_BaseRaw, _BaseRaw)):
+                sources, target = _band_pass_filter(self, sources, target,
+                                                    l_freq, h_freq, verbose)
+
+        scores = _find_sources(sources, target, score_func)
+
+        return scores
+
+    def _check_target(self, target, inst, start, stop):
+        """Aux Method"""
+        if isinstance(inst, _BaseRaw):
+            start, stop = _check_start_stop(inst, start, stop)
+            if hasattr(target, 'ndim'):
+                if target.ndim < 2:
+                    target = target.reshape(1, target.shape[-1])
+            if isinstance(target, string_types):
+                pick = _get_target_ch(inst, target)
+                target, _ = inst[pick, start:stop]
+
+        elif isinstance(inst, _BaseEpochs):
+            if isinstance(target, string_types):
+                pick = _get_target_ch(inst, target)
+                target = inst.get_data()[:, pick]
+
+            if hasattr(target, 'ndim'):
+                if target.ndim == 3 and min(target.shape) == 1:
+                    target = target.ravel()
+
+        elif isinstance(inst, Evoked):
+            if isinstance(target, string_types):
+                pick = _get_target_ch(inst, target)
+                target = inst.data[pick]
+
+        return target
+
+    @verbose
+    def find_bads_ecg(self, inst, ch_name=None, threshold=None,
+                      start=None, stop=None, l_freq=8, h_freq=16,
+                      method='ctps', verbose=None):
+        """Detect ECG related components using correlation
+
+        Note. If no ECG channel is available, routine attempts to create
+        an artificial ECG based on cross-channel averaging.
+
+        Parameters
+        ----------
+        inst : instance of Raw, Epochs or Evoked
+            Object to compute sources from.
+        ch_name : str
+            The name of the channel to use for ECG peak detection.
+            The argument is mandatory if the dataset contains no ECG
+            channels.
+        threshold : float
+            The value above which a feature is classified as outlier. If
+            method is 'ctps', defaults to 0.25, else defaults to 3.0.
+        start : int | float | None
+            First sample to include. If float, data will be interpreted as
+            time in seconds. If None, data will be used from the first sample.
+        stop : int | float | None
+            Last sample to not include. If float, data will be interpreted as
+            time in seconds. If None, data will be used to the last sample.
+        l_freq : float
+            Low pass frequency.
+        h_freq : float
+            High pass frequency.
+        method : {'ctps', 'correlation'}
+            The method used for detection. If 'ctps', cross-trial phase
+            statistics [1] are used to detect ECG related components.
+            Thresholding is then based on the significance value of a Kuiper
+            statistic.
+            If 'correlation', detection is based on Pearson correlation
+            between the filtered data and the filtered ECG channel.
+            Thresholding is based on iterative z-scoring. The above
+            threshold components will be masked and the z-score will
+            be recomputed until no supra-threshold component remains.
+            Defaults to 'ctps'.
+        verbose : bool, str, int, or None
+            If not None, override default verbose level (see mne.verbose).
+            Defaults to self.verbose.
+
+        Returns
+        -------
+        ecg_idx : list of int
+            The indices of ECG related components.
+        scores : np.ndarray of float, shape (``n_components_``)
+            The correlation scores.
+
+        See also
+        --------
+        find_bads_eog
+
+        References
+        ----------
+        [1] Dammers, J., Schiek, M., Boers, F., Silex, C., Zvyagintsev,
+            M., Pietrzyk, U., Mathiak, K., 2008. Integration of amplitude
+            and phase statistics for complete artifact removal in independent
+            components of neuromagnetic recordings. Biomedical
+            Engineering, IEEE Transactions on 55 (10), 2353-2362.
+        """
+        if verbose is None:
+            verbose = self.verbose
+
+        idx_ecg = _get_ecg_channel_index(ch_name, inst)
+
+        if idx_ecg is None:
+            if verbose is not None:
+                verbose = self.verbose
+            ecg, times = _make_ecg(inst, start, stop, verbose)
+            ch_name = 'ECG'
+        else:
+            ecg = inst.ch_names[idx_ecg]
+
+        # some magic we need inevitably ...
+        if inst.ch_names != self.ch_names:
+            extra_picks = pick_types(inst.info, meg=False, ecg=True)
+            ch_names_to_pick = (self.ch_names +
+                                [inst.ch_names[k] for k in extra_picks])
+            inst = inst.pick_channels(ch_names_to_pick, copy=True)
+
+        if method == 'ctps':
+            if threshold is None:
+                threshold = 0.25
+            if isinstance(inst, _BaseRaw):
+                sources = self.get_sources(create_ecg_epochs(inst)).get_data()
+            elif isinstance(inst, _BaseEpochs):
+                sources = self.get_sources(inst).get_data()
+            else:
+                raise ValueError('With `ctps` only Raw and Epochs input is '
+                                 'supported')
+            _, p_vals, _ = ctps(sources)
+            scores = p_vals.max(-1)
+            ecg_idx = np.where(scores >= threshold)[0]
+        elif method == 'correlation':
+            if threshold is None:
+                threshold = 3.0
+            scores = self.score_sources(inst, target=ecg,
+                                        score_func='pearsonr',
+                                        start=start, stop=stop,
+                                        l_freq=l_freq, h_freq=h_freq,
+                                        verbose=verbose)
+            ecg_idx = find_outliers(scores, threshold=threshold)
+        else:
+            raise ValueError('Method "%s" not supported.' % method)
+        # sort indices by scores
+        ecg_idx = ecg_idx[np.abs(scores[ecg_idx]).argsort()[::-1]]
+        if not hasattr(self, 'labels_'):
+            self.labels_ = dict()
+        self.labels_['ecg'] = list(ecg_idx)
+        return self.labels_['ecg'], scores
+
+    @verbose
+    def find_bads_eog(self, inst, ch_name=None, threshold=3.0,
+                      start=None, stop=None, l_freq=1, h_freq=10,
+                      verbose=None):
+        """Detect EOG related components using correlation
+
+        Detection is based on Pearson correlation between the
+        filtered data and the filtered EOG channel.
+        Thresholding is based on adaptive z-scoring. The above threshold
+        components will be masked and the z-score will be recomputed
+        until no supra-threshold component remains.
+
+        Parameters
+        ----------
+        inst : instance of Raw, Epochs or Evoked
+            Object to compute sources from.
+        ch_name : str
+            The name of the channel to use for EOG peak detection.
+            The argument is mandatory if the dataset contains no ECG
+            channels.
+        threshold : int | float
+            The value above which a feature is classified as outlier.
+        start : int | float | None
+            First sample to include. If float, data will be interpreted as
+            time in seconds. If None, data will be used from the first sample.
+        stop : int | float | None
+            Last sample to not include. If float, data will be interpreted as
+            time in seconds. If None, data will be used to the last sample.
+        l_freq : float
+            Low pass frequency.
+        h_freq : float
+            High pass frequency.
+        verbose : bool, str, int, or None
+            If not None, override default verbose level (see mne.verbose).
+            Defaults to self.verbose.
+
+        Returns
+        -------
+        eog_idx : list of int
+            The indices of EOG related components, sorted by score.
+        scores : np.ndarray of float, shape (``n_components_``) | list of array
+            The correlation scores.
+
+        See Also
+        --------
+        find_bads_ecg
+        """
+        if verbose is None:
+            verbose = self.verbose
+
+        eog_inds = _get_eog_channel_index(ch_name, inst)
+        if len(eog_inds) > 2:
+            eog_inds = eog_inds[:1]
+            logger.info('Using EOG channel %s' % inst.ch_names[eog_inds[0]])
+        scores, eog_idx = [], []
+        eog_chs = [inst.ch_names[k] for k in eog_inds]
+
+        # some magic we need inevitably ...
+        # get targets befor equalizing
+        targets = [self._check_target(k, inst, start, stop) for k in eog_chs]
+
+        if inst.ch_names != self.ch_names:
+            inst = inst.pick_channels(self.ch_names, copy=True)
+
+        for eog_ch, target in zip(eog_chs, targets):
+            scores += [self.score_sources(inst, target=target,
+                                          score_func='pearsonr',
+                                          start=start, stop=stop,
+                                          l_freq=l_freq, h_freq=h_freq,
+                                          verbose=verbose)]
+            eog_idx += [find_outliers(scores[-1], threshold=threshold)]
+
+        # remove duplicates but keep order by score, even across multiple
+        # EOG channels
+        scores_ = np.concatenate([scores[ii][inds]
+                                  for ii, inds in enumerate(eog_idx)])
+        eog_idx_ = np.concatenate(eog_idx)[np.abs(scores_).argsort()[::-1]]
+
+        eog_idx_unique = list(np.unique(eog_idx_))
+        eog_idx = []
+        for i in eog_idx_:
+            if i in eog_idx_unique:
+                eog_idx.append(i)
+                eog_idx_unique.remove(i)
+        if len(scores) == 1:
+            scores = scores[0]
+
+        if not hasattr(self, 'labels_'):
+            self.labels_ = dict()
+        self.labels_['eog'] = list(eog_idx)
+        return self.labels_['eog'], scores
+
+    def apply(self, inst, include=None, exclude=None,
+              n_pca_components=None, start=None, stop=None,
+              copy=False):
+        """Remove selected components from the signal.
+
+        Given the unmixing matrix, transform data,
+        zero out components, and inverse transform the data.
+        This procedure will reconstruct M/EEG signals from which
+        the dynamics described by the excluded components is subtracted.
+
+        Parameters
+        ----------
+        inst : instance of Raw, Epochs or Evoked
+            The data to be processed.
+        include : array_like of int.
+            The indices refering to columns in the ummixing matrix. The
+            components to be kept.
+        exclude : array_like of int.
+            The indices refering to columns in the ummixing matrix. The
+            components to be zeroed out.
+        n_pca_components : int | float | None
+            The number of PCA components to be kept, either absolute (int)
+            or percentage of the explained variance (float). If None (default),
+            all PCA components will be used.
+        start : int | float | None
+            First sample to include. If float, data will be interpreted as
+            time in seconds. If None, data will be used from the first sample.
+        stop : int | float | None
+            Last sample to not include. If float, data will be interpreted as
+            time in seconds. If None, data will be used to the last sample.
+        copy : bool
+            Whether to return a copy or whether to apply the solution in place.
+            Defaults to False.
+        """
+        if isinstance(inst, _BaseRaw):
+            out = self._apply_raw(raw=inst, include=include,
+                                  exclude=exclude,
+                                  n_pca_components=n_pca_components,
+                                  start=start, stop=stop, copy=copy)
+        elif isinstance(inst, _BaseEpochs):
+            out = self._apply_epochs(epochs=inst, include=include,
+                                     exclude=exclude,
+                                     n_pca_components=n_pca_components,
+                                     copy=copy)
+        elif isinstance(inst, Evoked):
+            out = self._apply_evoked(evoked=inst, include=include,
+                                     exclude=exclude,
+                                     n_pca_components=n_pca_components,
+                                     copy=copy)
+        else:
+            raise ValueError('Data input must be of Raw, Epochs or Evoked '
+                             'type')
+        return out
+
+    def _apply_raw(self, raw, include, exclude, n_pca_components, start, stop,
+                   copy=True):
+        """Aux method"""
+        if not raw.preload:
+            raise ValueError('Raw data must be preloaded to apply ICA')
+
+        if exclude is None:
+            exclude = list(set(self.exclude))
+        else:
+            exclude = list(set(self.exclude + exclude))
+
+        if n_pca_components is not None:
+            self.n_pca_components = n_pca_components
+
+        start, stop = _check_start_stop(raw, start, stop)
+
+        picks = pick_types(raw.info, meg=False, include=self.ch_names,
+                           exclude='bads', ref_meg=False)
+
+        data = raw[picks, start:stop][0]
+        data, _ = self._pre_whiten(data, raw.info, picks)
+
+        data = self._pick_sources(data, include, exclude)
+
+        if copy is True:
+            raw = raw.copy()
+
+        raw[picks, start:stop] = data
+        return raw
+
+    def _apply_epochs(self, epochs, include, exclude,
+                      n_pca_components, copy):
+
+        if not epochs.preload:
+            raise ValueError('Epochs must be preloaded to apply ICA')
+
+        picks = pick_types(epochs.info, meg=False, ref_meg=False,
+                           include=self.ch_names,
+                           exclude='bads')
+
+        # special case where epochs come picked but fit was 'unpicked'.
+        if len(picks) != len(self.ch_names):
+            raise RuntimeError('Epochs don\'t match fitted data: %i channels '
+                               'fitted but %i channels supplied. \nPlease '
+                               'provide Epochs compatible with '
+                               'ica.ch_names' % (len(self.ch_names),
+                                                 len(picks)))
+
+        if n_pca_components is not None:
+            self.n_pca_components = n_pca_components
+
+        data = np.hstack(epochs.get_data()[:, picks])
+        data, _ = self._pre_whiten(data, epochs.info, picks)
+        data = self._pick_sources(data, include=include, exclude=exclude)
+
+        if copy is True:
+            epochs = epochs.copy()
+
+        # restore epochs, channels, tsl order
+        epochs._data[:, picks] = np.array(np.split(data,
+                                          len(epochs.events), 1))
+        epochs.preload = True
+
+        return epochs
+
+    def _apply_evoked(self, evoked, include, exclude,
+                      n_pca_components, copy):
+
+        picks = pick_types(evoked.info, meg=False, ref_meg=False,
+                           include=self.ch_names,
+                           exclude='bads')
+
+        # special case where evoked come picked but fit was 'unpicked'.
+        if len(picks) != len(self.ch_names):
+            raise RuntimeError('Evoked does not match fitted data: %i channels'
+                               ' fitted but %i channels supplied. \nPlease '
+                               'provide an Evoked object that\'s compatible '
+                               'with ica.ch_names' % (len(self.ch_names),
+                                                      len(picks)))
+
+        if n_pca_components is not None:
+            self.n_pca_components = n_pca_components
+
+        data = evoked.data[picks]
+        data, _ = self._pre_whiten(data, evoked.info, picks)
+        data = self._pick_sources(data, include=include,
+                                  exclude=exclude)
+
+        if copy is True:
+            evoked = evoked.copy()
+
+        # restore evoked
+        evoked.data[picks] = data
+
+        return evoked
+
+    def _pick_sources(self, data, include, exclude):
+        """Aux function"""
+        fast_dot = _get_fast_dot()
+        if exclude is None:
+            exclude = self.exclude
+        else:
+            exclude = list(set(self.exclude + list(exclude)))
+
+        _n_pca_comp = self._check_n_pca_components(self.n_pca_components)
+
+        if not(self.n_components_ <= _n_pca_comp <= self.max_pca_components):
+            raise ValueError('n_pca_components must be >= '
+                             'n_components and <= max_pca_components.')
+
+        n_components = self.n_components_
+        logger.info('Transforming to ICA space (%i components)' % n_components)
+
+        # Apply first PCA
+        if self.pca_mean_ is not None:
+            data -= self.pca_mean_[:, None]
+
+        pca_data = fast_dot(self.pca_components_, data)
+        # Apply unmixing to low dimension PCA
+        sources = fast_dot(self.unmixing_matrix_, pca_data[:n_components])
+
+        if include not in (None, []):
+            mask = np.ones(len(sources), dtype=np.bool)
+            mask[np.unique(include)] = False
+            sources[mask] = 0.
+            logger.info('Zeroing out %i ICA components' % mask.sum())
+        elif exclude not in (None, []):
+            exclude_ = np.unique(exclude)
+            sources[exclude_] = 0.
+            logger.info('Zeroing out %i ICA components' % len(exclude_))
+        logger.info('Inverse transforming to PCA space')
+        pca_data[:n_components] = fast_dot(self.mixing_matrix_, sources)
+        data = fast_dot(self.pca_components_[:n_components].T,
+                        pca_data[:n_components])
+        logger.info('Reconstructing sensor space signals from %i PCA '
+                    'components' % max(_n_pca_comp, n_components))
+        if _n_pca_comp > n_components:
+            data += fast_dot(self.pca_components_[n_components:_n_pca_comp].T,
+                             pca_data[n_components:_n_pca_comp])
+
+        if self.pca_mean_ is not None:
+            data += self.pca_mean_[:, None]
+
+        # restore scaling
+        if self.noise_cov is None:  # revert standardization
+            data *= self._pre_whitener
+        else:
+            data = fast_dot(linalg.pinv(self._pre_whitener), data)
+
+        return data
+
+    @verbose
+    def save(self, fname):
+        """Store ICA solution into a fiff file.
+
+        Parameters
+        ----------
+        fname : str
+            The absolute path of the file name to save the ICA solution into.
+            The file name should end with -ica.fif or -ica.fif.gz.
+        """
+        if self.current_fit == 'unfitted':
+            raise RuntimeError('No fit available. Please first fit ICA')
+
+        check_fname(fname, 'ICA', ('-ica.fif', '-ica.fif.gz'))
+
+        logger.info('Writing ica solution to %s...' % fname)
+        fid = start_file(fname)
+
+        try:
+            _write_ica(fid, self)
+        except Exception:
+            os.remove(fname)
+            raise
+        end_file(fid)
+
+        return self
+
+    def copy(self):
+        """Copy the ICA object
+
+        Returns
+        -------
+        ica : instance of ICA
+            The copied object.
+        """
+        return deepcopy(self)
+
+    def plot_components(self, picks=None, ch_type=None, res=64, layout=None,
+                        vmin=None, vmax=None, cmap='RdBu_r', sensors=True,
+                        colorbar=False, title=None, show=True, outlines='head',
+                        contours=6, image_interp='bilinear', head_pos=None):
+        """Project unmixing matrix on interpolated sensor topography.
+
+        Parameters
+        ----------
+        picks : int | array-like | None
+            The indices of the sources to be plotted.
+            If None all are plotted in batches of 20.
+        ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
+            The channel type to plot. For 'grad', the gradiometers are
+            collected in pairs and the RMS for each pair is plotted.
+            If None, then channels are chosen in the order given above.
+        res : int
+            The resolution of the topomap image (n pixels along each side).
+        layout : None | Layout
+            Layout instance specifying sensor positions (does not need to
+            be specified for Neuromag data). If possible, the correct layout is
+            inferred from the data.
+        vmin : float | callable
+            The value specfying the lower bound of the color range.
+            If None, and vmax is None, -vmax is used. Else np.min(data).
+            If callable, the output equals vmin(data).
+        vmax : float | callable
+            The value specfying the upper bound of the color range.
+            If None, the maximum absolute value is used. If vmin is None,
+            but vmax is not, defaults to np.min(data).
+            If callable, the output equals vmax(data).
+        cmap : matplotlib colormap
+            Colormap.
+        sensors : bool | str
+            Add markers for sensor locations to the plot. Accepts matplotlib
+            plot format string (e.g., 'r+' for red plusses). If True, a circle
+            will be used (via .add_artist). Defaults to True.
+        colorbar : bool
+            Plot a colorbar.
+        title : str | None
+            Title to use.
+        show : bool
+            Call pyplot.show() at the end.
+        outlines : 'head' | 'skirt' | dict | None
+            The outlines to be drawn. If 'head', the default head scheme will
+            be drawn. If 'skirt' the head scheme will be drawn, but sensors are
+            allowed to be plotted outside of the head circle. If dict, each key
+            refers to a tuple of x and y positions, the values in 'mask_pos'
+            will serve as image mask, and the 'autoshrink' (bool) field will
+            trigger automated shrinking of the positions due to points outside
+            the outline. Alternatively, a matplotlib patch object can be passed
+            for advanced masking options, either directly or as a function that
+            returns patches (required for multi-axis plots). If None, nothing
+            will be drawn. Defaults to 'head'.
+        contours : int | False | None
+            The number of contour lines to draw. If 0, no contours will
+            be drawn.
+        image_interp : str
+            The image interpolation to be used. All matplotlib options are
+            accepted.
+        head_pos : dict | None
+            If None (default), the sensors are positioned such that they span
+            the head circle. If dict, can have entries 'center' (tuple) and
+            'scale' (tuple) for what the center and scale of the head should be
+            relative to the electrode locations.
+
+        Returns
+        -------
+        fig : instance of matplotlib.pyplot.Figure
+            The figure object.
+        """
+        return plot_ica_components(self, picks=picks,
+                                   ch_type=ch_type,
+                                   res=res, layout=layout, vmax=vmax,
+                                   cmap=cmap,
+                                   sensors=sensors, colorbar=colorbar,
+                                   title=title, show=show,
+                                   outlines=outlines, contours=contours,
+                                   image_interp=image_interp,
+                                   head_pos=head_pos)
+
+    def plot_sources(self, inst, picks=None, exclude=None, start=None,
+                     stop=None, title=None, show=True, block=False):
+        """Plot estimated latent sources given the unmixing matrix.
+
+        Typical usecases:
+
+        1. plot evolution of latent sources over time based on (Raw input)
+        2. plot latent source around event related time windows (Epochs input)
+        3. plot time-locking in ICA space (Evoked input)
+
+
+        Parameters
+        ----------
+        inst : instance of mne.io.Raw, mne.Epochs, mne.Evoked
+            The object to plot the sources from.
+        picks : ndarray | None.
+            The components to be displayed. If None, plot will show the
+            sources in the order as fitted.
+        exclude : array_like of int
+            The components marked for exclusion. If None (default), ICA.exclude
+            will be used.
+        start : int
+            X-axis start index. If None from the beginning.
+        stop : int
+            X-axis stop index. If None to the end.
+        title : str | None
+            The figure title. If None a default is provided.
+        show : bool
+            If True, all open plots will be shown.
+        block : bool
+            Whether to halt program execution until the figure is closed.
+            Useful for interactive selection of components in raw and epoch
+            plotter. For evoked, this parameter has no effect. Defaults to
+            False.
+
+        Returns
+        -------
+        fig : instance of pyplot.Figure
+            The figure.
+
+        Notes
+        -----
+        For raw and epoch instances, it is possible to select components for
+        exclusion by clicking on the line. The selected components are added to
+        ``ica.exclude`` on close. The independent components can be viewed as
+        topographies by clicking on the component name on the left of of the
+        main axes. The topography view tries to infer the correct electrode
+        layout from the data. This should work at least for Neuromag data.
+
+        .. versionadded:: 0.10.0
+        """
+
+        return plot_ica_sources(self, inst=inst, picks=picks, exclude=exclude,
+                                title=title, start=start, stop=stop, show=show,
+                                block=block)
+
+    def plot_scores(self, scores, exclude=None, axhline=None,
+                    title='ICA component scores', figsize=(12, 6),
+                    show=True):
+        """Plot scores related to detected components.
+
+        Use this function to assess how well your score describes outlier
+        sources and how well you were detecting them.
+
+        Parameters
+        ----------
+        scores : array_like of float, shape (n ica components,) | list of array
+            Scores based on arbitrary metric to characterize ICA components.
+        exclude : array_like of int
+            The components marked for exclusion. If None (default), ICA.exclude
+            will be used.
+        axhline : float
+            Draw horizontal line to e.g. visualize rejection threshold.
+        title : str
+            The figure title.
+        figsize : tuple of int
+            The figure size. Defaults to (12, 6).
+        show : bool
+            If True, all open plots will be shown.
+
+        Returns
+        -------
+        fig : instance of matplotlib.pyplot.Figure
+            The figure object.
+        """
+        return plot_ica_scores(ica=self, scores=scores, exclude=exclude,
+                               axhline=axhline, title=title,
+                               figsize=figsize, show=show)
+
+    def plot_overlay(self, inst, exclude=None, picks=None, start=None,
+                     stop=None, title=None, show=True):
+        """Overlay of raw and cleaned signals given the unmixing matrix.
+
+        This method helps visualizing signal quality and artifact rejection.
+
+        Parameters
+        ----------
+        inst : instance of mne.io.Raw or mne.Evoked
+            The signals to be compared given the ICA solution. If Raw input,
+            The raw data are displayed before and after cleaning. In a second
+            panel the cross channel average will be displayed. Since dipolar
+            sources will be canceled out this display is sensitive to
+            artifacts. If evoked input, butterfly plots for clean and raw
+            signals will be superimposed.
+        exclude : array_like of int
+            The components marked for exclusion. If None (default), ICA.exclude
+            will be used.
+        picks : array-like of int | None (default)
+            Indices of channels to include (if None, all channels
+            are used that were included on fitting).
+        start : int
+            X-axis start index. If None from the beginning.
+        stop : int
+            X-axis stop index. If None to the end.
+        title : str
+            The figure title.
+        show : bool
+            If True, all open plots will be shown.
+
+        Returns
+        -------
+        fig : instance of pyplot.Figure
+            The figure.
+        """
+        return plot_ica_overlay(self, inst=inst, exclude=exclude, picks=picks,
+                                start=start, stop=stop, title=title, show=show)
+
+    def detect_artifacts(self, raw, start_find=None, stop_find=None,
+                         ecg_ch=None, ecg_score_func='pearsonr',
+                         ecg_criterion=0.1, eog_ch=None,
+                         eog_score_func='pearsonr',
+                         eog_criterion=0.1, skew_criterion=-1,
+                         kurt_criterion=-1, var_criterion=0,
+                         add_nodes=None):
+        """Run ICA artifacts detection workflow.
+
+        Note. This is still experimental and will most likely change. Over
+        the next releases. For maximum control use the workflow exposed in
+        the examples.
+
+        Hints and caveats:
+        - It is highly recommended to bandpass filter ECG and EOG
+        data and pass them instead of the channel names as ecg_ch and eog_ch
+        arguments.
+        - please check your results. Detection by kurtosis and variance
+        may be powerful but misclassification of brain signals as
+        noise cannot be precluded.
+        - Consider using shorter times for start_find and stop_find than
+        for start and stop. It can save you much time.
+
+        Example invocation (taking advantage of the defaults)::
+
+            ica.detect_artifacts(ecg_channel='MEG 1531', eog_channel='EOG 061')
+
+        Parameters
+        ----------
+        raw : instance of Raw
+            Raw object to draw sources from.
+        start_find : int | float | None
+            First sample to include for artifact search. If float, data will be
+            interpreted as time in seconds. If None, data will be used from the
+            first sample.
+        stop_find : int | float | None
+            Last sample to not include for artifact search. If float, data will
+            be interpreted as time in seconds. If None, data will be used to
+            the last sample.
+        ecg_ch : str | ndarray | None
+            The `target` argument passed to ica.find_sources_raw. Either the
+            name of the ECG channel or the ECG time series. If None, this step
+            will be skipped.
+        ecg_score_func : str | callable
+            The `score_func` argument passed to ica.find_sources_raw. Either
+            the name of function supported by ICA or a custom function.
+        ecg_criterion : float | int | list-like | slice
+            The indices of the sorted skewness scores. If float, sources with
+            scores smaller than the criterion will be dropped. Else, the scores
+            sorted in descending order will be indexed accordingly.
+            E.g. range(2) would return the two sources with the highest score.
+            If None, this step will be skipped.
+        eog_ch : list | str | ndarray | None
+            The `target` argument or the list of target arguments subsequently
+            passed to ica.find_sources_raw. Either the name of the vertical EOG
+            channel or the corresponding EOG time series. If None, this step
+            will be skipped.
+        eog_score_func : str | callable
+            The `score_func` argument passed to ica.find_sources_raw. Either
+            the name of function supported by ICA or a custom function.
+        eog_criterion : float | int | list-like | slice
+            The indices of the sorted skewness scores. If float, sources with
+            scores smaller than the criterion will be dropped. Else, the scores
+            sorted in descending order will be indexed accordingly.
+            E.g. range(2) would return the two sources with the highest score.
+            If None, this step will be skipped.
+        skew_criterion : float | int | list-like | slice
+            The indices of the sorted skewness scores. If float, sources with
+            scores smaller than the criterion will be dropped. Else, the scores
+            sorted in descending order will be indexed accordingly.
+            E.g. range(2) would return the two sources with the highest score.
+            If None, this step will be skipped.
+        kurt_criterion : float | int | list-like | slice
+            The indices of the sorted skewness scores. If float, sources with
+            scores smaller than the criterion will be dropped. Else, the scores
+            sorted in descending order will be indexed accordingly.
+            E.g. range(2) would return the two sources with the highest score.
+            If None, this step will be skipped.
+        var_criterion : float | int | list-like | slice
+            The indices of the sorted skewness scores. If float, sources with
+            scores smaller than the criterion will be dropped. Else, the scores
+            sorted in descending order will be indexed accordingly.
+            E.g. range(2) would return the two sources with the highest score.
+            If None, this step will be skipped.
+        add_nodes : list of ica_nodes
+            Additional list if tuples carrying the following parameters:
+            (name : str, target : str | array, score_func : callable,
+            criterion : float | int | list-like | slice). This parameter is a
+            generalization of the artifact specific parameters above and has
+            the same structure. Example:
+            add_nodes=('ECG phase lock', ECG 01', my_phase_lock_function, 0.5)
+
+        Returns
+        -------
+        self : instance of ICA
+            The ica object with the detected artifact indices marked for
+            exclusion
+        """
+        logger.info('    Searching for artifacts...')
+        _detect_artifacts(self, raw=raw, start_find=start_find,
+                          stop_find=stop_find, ecg_ch=ecg_ch,
+                          ecg_score_func=ecg_score_func,
+                          ecg_criterion=ecg_criterion,
+                          eog_ch=eog_ch, eog_score_func=eog_score_func,
+                          eog_criterion=eog_criterion,
+                          skew_criterion=skew_criterion,
+                          kurt_criterion=kurt_criterion,
+                          var_criterion=var_criterion,
+                          add_nodes=add_nodes)
+
+        return self
+
+    @verbose
+    def _check_n_pca_components(self, _n_pca_comp, verbose=None):
+        """Aux function"""
+        if isinstance(_n_pca_comp, float):
+            _n_pca_comp = ((self.pca_explained_variance_ /
+                           self.pca_explained_variance_.sum()).cumsum() <=
+                           _n_pca_comp).sum()
+            logger.info('Selected %i PCA components by explained '
+                        'variance' % _n_pca_comp)
+        elif _n_pca_comp is None:
+            _n_pca_comp = self.max_pca_components
+        elif _n_pca_comp < self.n_components_:
+            _n_pca_comp = self.n_components_
+
+        return _n_pca_comp
+
+
+def _check_start_stop(raw, start, stop):
+    """Aux function"""
+    return [c if (isinstance(c, int) or c is None) else
+            raw.time_as_index(c)[0] for c in (start, stop)]
+
+
+ at verbose
+def ica_find_ecg_events(raw, ecg_source, event_id=999,
+                        tstart=0.0, l_freq=5, h_freq=35, qrs_threshold='auto',
+                        verbose=None):
+    """Find ECG peaks from one selected ICA source
+
+    Parameters
+    ----------
+    raw : instance of Raw
+        Raw object to draw sources from.
+    ecg_source : ndarray
+        ICA source resembling ECG to find peaks from.
+    event_id : int
+        The index to assign to found events.
+    tstart : float
+        Start detection after tstart seconds. Useful when beginning
+        of run is noisy.
+    l_freq : float
+        Low pass frequency.
+    h_freq : float
+        High pass frequency.
+    qrs_threshold : float | str
+        Between 0 and 1. qrs detection threshold. Can also be "auto" to
+        automatically choose the threshold that generates a reasonable
+        number of heartbeats (40-160 beats / min).
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    ecg_events : array
+        Events.
+    ch_ECG : string
+        Name of channel used.
+    average_pulse : float.
+        Estimated average pulse.
+    """
+    logger.info('Using ICA source to identify heart beats')
+
+    # detecting QRS and generating event file
+    ecg_events = qrs_detector(raw.info['sfreq'], ecg_source.ravel(),
+                              tstart=tstart, thresh_value=qrs_threshold,
+                              l_freq=l_freq, h_freq=h_freq)
+
+    n_events = len(ecg_events)
+
+    ecg_events = np.c_[ecg_events + raw.first_samp, np.zeros(n_events),
+                       event_id * np.ones(n_events)]
+
+    return ecg_events
+
+
+ at verbose
+def ica_find_eog_events(raw, eog_source=None, event_id=998, l_freq=1,
+                        h_freq=10, verbose=None):
+    """Locate EOG artifacts from one selected ICA source
+
+    Parameters
+    ----------
+    raw : instance of Raw
+        The raw data.
+    eog_source : ndarray
+        ICA source resembling EOG to find peaks from.
+    event_id : int
+        The index to assign to found events.
+    l_freq : float
+        Low cut-off frequency in Hz.
+    h_freq : float
+        High cut-off frequency in Hz.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    eog_events : array
+        Events
+    """
+    eog_events = _find_eog_events(eog_source[np.newaxis], event_id=event_id,
+                                  l_freq=l_freq, h_freq=h_freq,
+                                  sampling_rate=raw.info['sfreq'],
+                                  first_samp=raw.first_samp)
+    return eog_events
+
+
+def _get_target_ch(container, target):
+    """Aux function"""
+
+    # auto target selection
+    picks = pick_channels(container.ch_names, include=[target])
+    ref_picks = pick_types(container.info, meg=False, eeg=False, ref_meg=True)
+    if len(ref_picks) > 0:
+        picks = list(set(picks) - set(ref_picks))
+
+    if len(picks) == 0:
+        raise ValueError('%s not in channel list (%s)' %
+                         (target, container.ch_names))
+    return picks
+
+
+def _find_sources(sources, target, score_func):
+    """Aux function"""
+    if isinstance(score_func, string_types):
+        score_func = get_score_funcs().get(score_func, score_func)
+
+    if not callable(score_func):
+        raise ValueError('%s is not a valid score_func.' % score_func)
+
+    scores = (score_func(sources, target) if target is not None
+              else score_func(sources, 1))
+
+    return scores
+
+
+def _serialize(dict_, outer_sep=';', inner_sep=':'):
+    """Aux function"""
+    s = []
+    for k, v in dict_.items():
+        if callable(v):
+            v = v.__name__
+        elif isinstance(v, int):
+            v = int(v)
+        for cls in (np.random.RandomState, Covariance):
+            if isinstance(v, cls):
+                v = cls.__name__
+
+        s.append(k + inner_sep + json.dumps(v))
+
+    return outer_sep.join(s)
+
+
+def _deserialize(str_, outer_sep=';', inner_sep=':'):
+    """Aux Function"""
+    out = {}
+    for mapping in str_.split(outer_sep):
+        k, v = mapping.split(inner_sep)
+        vv = json.loads(v)
+        out[k] = vv if not isinstance(vv, text_type) else str(vv)
+
+    return out
+
+
+def _write_ica(fid, ica):
+    """Write an ICA object
+
+    Parameters
+    ----------
+    fid: file
+        The file descriptor
+    ica:
+        The instance of ICA to write
+    """
+    ica_init = dict(noise_cov=ica.noise_cov,
+                    n_components=ica.n_components,
+                    n_pca_components=ica.n_pca_components,
+                    max_pca_components=ica.max_pca_components,
+                    current_fit=ica.current_fit)
+
+    if ica.info is not None:
+        start_block(fid, FIFF.FIFFB_MEAS)
+        write_id(fid, FIFF.FIFF_BLOCK_ID)
+        if ica.info['meas_id'] is not None:
+            write_id(fid, FIFF.FIFF_PARENT_BLOCK_ID, ica.info['meas_id'])
+
+        # Write measurement info
+        write_meas_info(fid, ica.info)
+        end_block(fid, FIFF.FIFFB_MEAS)
+
+    start_block(fid, FIFF.FIFFB_ICA)
+
+    #   ICA interface params
+    write_string(fid, FIFF.FIFF_MNE_ICA_INTERFACE_PARAMS,
+                 _serialize(ica_init))
+
+    #   Channel names
+    if ica.ch_names is not None:
+        write_name_list(fid, FIFF.FIFF_MNE_ROW_NAMES, ica.ch_names)
+
+    # samples on fit
+    ica_misc = {'n_samples_': getattr(ica, 'n_samples_', None)}
+    #   ICA init params
+    write_string(fid, FIFF.FIFF_MNE_ICA_INTERFACE_PARAMS,
+                 _serialize(ica_init))
+
+    #   ICA misct params
+    write_string(fid, FIFF.FIFF_MNE_ICA_MISC_PARAMS,
+                 _serialize(ica_misc))
+
+    #   Whitener
+    write_double_matrix(fid, FIFF.FIFF_MNE_ICA_WHITENER, ica._pre_whitener)
+
+    #   PCA components_
+    write_double_matrix(fid, FIFF.FIFF_MNE_ICA_PCA_COMPONENTS,
+                        ica.pca_components_)
+
+    #   PCA mean_
+    write_double_matrix(fid, FIFF.FIFF_MNE_ICA_PCA_MEAN, ica.pca_mean_)
+
+    #   PCA explained_variance_
+    write_double_matrix(fid, FIFF.FIFF_MNE_ICA_PCA_EXPLAINED_VAR,
+                        ica.pca_explained_variance_)
+
+    #   ICA unmixing
+    write_double_matrix(fid, FIFF.FIFF_MNE_ICA_MATRIX, ica.unmixing_matrix_)
+
+    #   Write bad components
+
+    write_int(fid, FIFF.FIFF_MNE_ICA_BADS, ica.exclude)
+
+    # Done!
+    end_block(fid, FIFF.FIFFB_ICA)
+
+
+ at verbose
+def read_ica(fname):
+    """Restore ICA solution from fif file.
+
+    Parameters
+    ----------
+    fname : str
+        Absolute path to fif file containing ICA matrices.
+        The file name should end with -ica.fif or -ica.fif.gz.
+
+    Returns
+    -------
+    ica : instance of ICA
+        The ICA estimator.
+    """
+    check_fname(fname, 'ICA', ('-ica.fif', '-ica.fif.gz'))
+
+    logger.info('Reading %s ...' % fname)
+    fid, tree, _ = fiff_open(fname)
+
+    try:
+        info, meas = read_meas_info(fid, tree)
+    except ValueError:
+        logger.info('Could not find the measurement info. \n'
+                    'Functionality requiring the info won\'t be'
+                    ' available.')
+        info = None
+    else:
+        info['filename'] = fname
+
+    ica_data = dir_tree_find(tree, FIFF.FIFFB_ICA)
+    if len(ica_data) == 0:
+        fid.close()
+        raise ValueError('Could not find ICA data')
+
+    my_ica_data = ica_data[0]
+    for d in my_ica_data['directory']:
+        kind = d.kind
+        pos = d.pos
+        if kind == FIFF.FIFF_MNE_ICA_INTERFACE_PARAMS:
+            tag = read_tag(fid, pos)
+            ica_init = tag.data
+        elif kind == FIFF.FIFF_MNE_ROW_NAMES:
+            tag = read_tag(fid, pos)
+            ch_names = tag.data
+        elif kind == FIFF.FIFF_MNE_ICA_WHITENER:
+            tag = read_tag(fid, pos)
+            pre_whitener = tag.data
+        elif kind == FIFF.FIFF_MNE_ICA_PCA_COMPONENTS:
+            tag = read_tag(fid, pos)
+            pca_components = tag.data
+        elif kind == FIFF.FIFF_MNE_ICA_PCA_EXPLAINED_VAR:
+            tag = read_tag(fid, pos)
+            pca_explained_variance = tag.data
+        elif kind == FIFF.FIFF_MNE_ICA_PCA_MEAN:
+            tag = read_tag(fid, pos)
+            pca_mean = tag.data
+        elif kind == FIFF.FIFF_MNE_ICA_MATRIX:
+            tag = read_tag(fid, pos)
+            unmixing_matrix = tag.data
+        elif kind == FIFF.FIFF_MNE_ICA_BADS:
+            tag = read_tag(fid, pos)
+            exclude = tag.data
+        elif kind == FIFF.FIFF_MNE_ICA_MISC_PARAMS:
+            tag = read_tag(fid, pos)
+            ica_misc = tag.data
+
+    fid.close()
+
+    ica_init, ica_misc = [_deserialize(k) for k in (ica_init, ica_misc)]
+    current_fit = ica_init.pop('current_fit')
+    if ica_init['noise_cov'] == Covariance.__name__:
+        logger.info('Reading whitener drawn from noise covariance ...')
+
+    logger.info('Now restoring ICA solution ...')
+
+    # make sure dtypes are np.float64 to satisfy fast_dot
+    def f(x):
+        return x.astype(np.float64)
+
+    ica_init = dict((k, v) for k, v in ica_init.items()
+                    if k in getargspec(ICA.__init__).args)
+    ica = ICA(**ica_init)
+    ica.current_fit = current_fit
+    ica.ch_names = ch_names.split(':')
+    ica._pre_whitener = f(pre_whitener)
+    ica.pca_mean_ = f(pca_mean)
+    ica.pca_components_ = f(pca_components)
+    ica.n_components_ = unmixing_matrix.shape[0]
+    ica.pca_explained_variance_ = f(pca_explained_variance)
+    ica.unmixing_matrix_ = f(unmixing_matrix)
+    ica.mixing_matrix_ = linalg.pinv(ica.unmixing_matrix_)
+    ica.exclude = [] if exclude is None else list(exclude)
+    ica.info = info
+    if 'n_samples_' in ica_misc:
+        ica.n_samples_ = ica_misc['n_samples_']
+
+    logger.info('Ready.')
+
+    return ica
+
+
+_ica_node = namedtuple('Node', 'name target score_func criterion')
+
+
+def _detect_artifacts(ica, raw, start_find, stop_find, ecg_ch, ecg_score_func,
+                      ecg_criterion, eog_ch, eog_score_func, eog_criterion,
+                      skew_criterion, kurt_criterion, var_criterion,
+                      add_nodes):
+    """Aux Function"""
+    from scipy import stats
+
+    nodes = []
+    if ecg_ch is not None:
+        nodes += [_ica_node('ECG', ecg_ch, ecg_score_func, ecg_criterion)]
+
+    if eog_ch not in [None, []]:
+        if not isinstance(eog_ch, list):
+            eog_ch = [eog_ch]
+        for idx, ch in enumerate(eog_ch):
+            nodes += [_ica_node('EOG %02d' % idx, ch, eog_score_func,
+                      eog_criterion)]
+
+    if skew_criterion is not None:
+        nodes += [_ica_node('skewness', None, stats.skew, skew_criterion)]
+
+    if kurt_criterion is not None:
+        nodes += [_ica_node('kurtosis', None, stats.kurtosis, kurt_criterion)]
+
+    if var_criterion is not None:
+        nodes += [_ica_node('variance', None, np.var, var_criterion)]
+
+    if add_nodes is not None:
+        nodes.extend(add_nodes)
+
+    for node in nodes:
+        scores = ica.score_sources(raw, start=start_find, stop=stop_find,
+                                   target=node.target,
+                                   score_func=node.score_func)
+        if isinstance(node.criterion, float):
+            found = list(np.where(np.abs(scores) > node.criterion)[0])
+        else:
+            found = list(np.atleast_1d(abs(scores).argsort()[node.criterion]))
+
+        case = (len(found), 's' if len(found) > 1 else '', node.name)
+        logger.info('    found %s artifact%s by %s' % case)
+        ica.exclude += found
+
+    logger.info('Artifact indices found:\n    ' + str(ica.exclude).strip('[]'))
+    if len(set(ica.exclude)) != len(ica.exclude):
+        logger.info('    Removing duplicate indices...')
+        ica.exclude = list(set(ica.exclude))
+
+    logger.info('Ready.')
+
+
+ at verbose
+def run_ica(raw, n_components, max_pca_components=100,
+            n_pca_components=64, noise_cov=None, random_state=None,
+            picks=None, start=None, stop=None, start_find=None,
+            stop_find=None, ecg_ch=None, ecg_score_func='pearsonr',
+            ecg_criterion=0.1, eog_ch=None, eog_score_func='pearsonr',
+            eog_criterion=0.1, skew_criterion=-1, kurt_criterion=-1,
+            var_criterion=0, add_nodes=None, verbose=None):
+    """Run ICA decomposition on raw data and identify artifact sources
+
+    This function implements an automated artifact removal work flow.
+
+    Hints and caveats:
+
+        - It is highly recommended to bandpass filter ECG and EOG
+          data and pass them instead of the channel names as ecg_ch and eog_ch
+          arguments.
+        - Please check your results. Detection by kurtosis and variance
+          can be powerful but misclassification of brain signals as
+          noise cannot be precluded. If you are not sure set those to None.
+        - Consider using shorter times for start_find and stop_find than
+          for start and stop. It can save you much time.
+
+    Example invocation (taking advantage of defaults)::
+
+        ica = run_ica(raw, n_components=.9, start_find=10000, stop_find=12000,
+                      ecg_ch='MEG 1531', eog_ch='EOG 061')
+
+    Parameters
+    ----------
+    raw : instance of Raw
+        The raw data to decompose.
+    n_components : int | float | None
+        The number of components used for ICA decomposition. If int, it must be
+        smaller then max_pca_components. If None, all PCA components will be
+        used. If float between 0 and 1 components can will be selected by the
+        cumulative percentage of explained variance.
+    max_pca_components : int | None
+        The number of components used for PCA decomposition. If None, no
+        dimension reduction will be applied and max_pca_components will equal
+        the number of channels supplied on decomposing data.
+    n_pca_components
+        The number of PCA components used after ICA recomposition. The ensuing
+        attribute allows to balance noise reduction against potential loss of
+        features due to dimensionality reduction. If greater than
+        ``self.n_components_``, the next ``'n_pca_components'`` minus
+        ``'n_components_'`` PCA components will be added before restoring the
+        sensor space data. The attribute gets updated each time the according
+        parameter for in .pick_sources_raw or .pick_sources_epochs is changed.
+    noise_cov : None | instance of mne.cov.Covariance
+        Noise covariance used for whitening. If None, channels are just
+        z-scored.
+    random_state : None | int | instance of np.random.RandomState
+        np.random.RandomState to initialize the FastICA estimation.
+        As the estimation is non-deterministic it can be useful to
+        fix the seed to have reproducible results.
+    picks : array-like of int
+        Channels to be included. This selection remains throughout the
+        initialized ICA solution. If None only good data channels are used.
+    start : int | float | None
+        First sample to include for decomposition. If float, data will be
+        interpreted as time in seconds. If None, data will be used from the
+        first sample.
+    stop : int | float | None
+        Last sample to not include for decomposition. If float, data will be
+        interpreted as time in seconds. If None, data will be used to the
+        last sample.
+    start_find : int | float | None
+        First sample to include for artifact search. If float, data will be
+        interpreted as time in seconds. If None, data will be used from the
+        first sample.
+    stop_find : int | float | None
+        Last sample to not include for artifact search. If float, data will be
+        interpreted as time in seconds. If None, data will be used to the last
+        sample.
+    ecg_ch : str | ndarray | None
+        The ``target`` argument passed to ica.find_sources_raw. Either the
+        name of the ECG channel or the ECG time series. If None, this step
+        will be skipped.
+    ecg_score_func : str | callable
+        The ``score_func`` argument passed to ica.find_sources_raw. Either
+        the name of function supported by ICA or a custom function.
+    ecg_criterion : float | int | list-like | slice
+        The indices of the sorted skewness scores. If float, sources with
+        scores smaller than the criterion will be dropped. Else, the scores
+        sorted in descending order will be indexed accordingly.
+        E.g. range(2) would return the two sources with the highest score.
+        If None, this step will be skipped.
+    eog_ch : list | str | ndarray | None
+        The ``target`` argument or the list of target arguments subsequently
+        passed to ica.find_sources_raw. Either the name of the vertical EOG
+        channel or the corresponding EOG time series. If None, this step
+        will be skipped.
+    eog_score_func : str | callable
+        The ``score_func`` argument passed to ica.find_sources_raw. Either
+        the name of function supported by ICA or a custom function.
+    eog_criterion : float | int | list-like | slice
+        The indices of the sorted skewness scores. If float, sources with
+        scores smaller than the criterion will be dropped. Else, the scores
+        sorted in descending order will be indexed accordingly.
+        E.g. range(2) would return the two sources with the highest score.
+        If None, this step will be skipped.
+    skew_criterion : float | int | list-like | slice
+        The indices of the sorted skewness scores. If float, sources with
+        scores smaller than the criterion will be dropped. Else, the scores
+        sorted in descending order will be indexed accordingly.
+        E.g. range(2) would return the two sources with the highest score.
+        If None, this step will be skipped.
+    kurt_criterion : float | int | list-like | slice
+        The indices of the sorted skewness scores. If float, sources with
+        scores smaller than the criterion will be dropped. Else, the scores
+        sorted in descending order will be indexed accordingly.
+        E.g. range(2) would return the two sources with the highest score.
+        If None, this step will be skipped.
+    var_criterion : float | int | list-like | slice
+        The indices of the sorted skewness scores. If float, sources with
+        scores smaller than the criterion will be dropped. Else, the scores
+        sorted in descending order will be indexed accordingly.
+        E.g. range(2) would return the two sources with the highest score.
+        If None, this step will be skipped.
+    add_nodes : list of ica_nodes
+        Additional list if tuples carrying the following parameters:
+        (name : str, target : str | array, score_func : callable,
+        criterion : float | int | list-like | slice). This parameter is a
+        generalization of the artifact specific parameters above and has
+        the same structure. Example::
+
+            add_nodes=('ECG phase lock', ECG 01', my_phase_lock_function, 0.5)
+
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    ica : instance of ICA
+        The ica object with detected artifact sources marked for exclusion
+    """
+    ica = ICA(n_components=n_components, max_pca_components=max_pca_components,
+              n_pca_components=n_pca_components, noise_cov=noise_cov,
+              random_state=random_state, verbose=verbose)
+
+    ica.fit(raw, start=start, stop=stop, picks=picks)
+    logger.info('%s' % ica)
+    logger.info('    Now searching for artifacts...')
+
+    _detect_artifacts(ica=ica, raw=raw, start_find=start_find,
+                      stop_find=stop_find, ecg_ch=ecg_ch,
+                      ecg_score_func=ecg_score_func,
+                      ecg_criterion=ecg_criterion, eog_ch=eog_ch,
+                      eog_score_func=eog_score_func,
+                      eog_criterion=ecg_criterion,
+                      skew_criterion=skew_criterion,
+                      kurt_criterion=kurt_criterion,
+                      var_criterion=var_criterion,
+                      add_nodes=add_nodes)
+    return ica
+
+
+ at verbose
+def _band_pass_filter(ica, sources, target, l_freq, h_freq, verbose=None):
+    if l_freq is not None and h_freq is not None:
+        logger.info('... filtering ICA sources')
+        # use fft, here, steeper is better here.
+        sources = band_pass_filter(sources, ica.info['sfreq'],
+                                   l_freq, h_freq, method='fft',
+                                   verbose=verbose)
+        logger.info('... filtering target')
+        target = band_pass_filter(target, ica.info['sfreq'],
+                                  l_freq, h_freq, method='fft',
+                                  verbose=verbose)
+    elif l_freq is not None or h_freq is not None:
+        raise ValueError('Must specify both pass bands')
+
+    return sources, target
+
+
+# #############################################################################
+# CORRMAP
+
+def _get_ica_map(ica, components=None):
+    """Get ICA topomap for components"""
+    fast_dot = _get_fast_dot()
+    if components is None:
+        components = list(range(ica.n_components_))
+    maps = fast_dot(ica.mixing_matrix_[:, components].T,
+                    ica.pca_components_[:ica.n_components_])
+    return maps
+
+
+def _find_max_corrs(all_maps, target, threshold):
+    """Compute correlations between template and target components"""
+    all_corrs = [compute_corr(target, subj.T) for subj in all_maps]
+    abs_corrs = [np.abs(a) for a in all_corrs]
+    corr_polarities = [np.sign(a) for a in all_corrs]
+
+    if threshold <= 1:
+        max_corrs = [list(np.nonzero(s_corr > threshold)[0])
+                     for s_corr in abs_corrs]
+    else:
+        max_corrs = [list(find_outliers(s_corr, threshold=threshold))
+                     for s_corr in abs_corrs]
+
+    am = [l[i] for l, i_s in zip(abs_corrs, max_corrs)
+          for i in i_s]
+    median_corr_with_target = np.median(am) if len(am) > 0 else 0
+
+    polarities = [l[i] for l, i_s in zip(corr_polarities, max_corrs)
+                  for i in i_s]
+
+    maxmaps = [l[i] for l, i_s in zip(all_maps, max_corrs)
+               for i in i_s]
+
+    if len(maxmaps) == 0:
+        return [], 0, 0, []
+    newtarget = np.zeros(maxmaps[0].size)
+    std_of_maps = np.std(np.asarray(maxmaps))
+    mean_of_maps = np.std(np.asarray(maxmaps))
+    for maxmap, polarity in zip(maxmaps, polarities):
+        newtarget += (maxmap / std_of_maps - mean_of_maps) * polarity
+
+    newtarget /= len(maxmaps)
+    newtarget *= std_of_maps
+
+    sim_i_o = np.abs(np.corrcoef(target, newtarget)[1, 0])
+
+    return newtarget, median_corr_with_target, sim_i_o, max_corrs
+
+
+def _plot_corrmap(data, subjs, indices, ch_type, ica, label, show, outlines,
+                  layout, cmap, contours):
+    """Customized ica.plot_components for corrmap"""
+    import matplotlib.pyplot as plt
+
+    title = 'Detected components'
+    if label is not None:
+        title += ' of type ' + label
+
+    picks = list(range(len(data)))
+
+    p = 20
+    if len(picks) > p:  # plot components by sets of 20
+        n_components = len(picks)
+        figs = [_plot_corrmap(data[k:k + p], subjs[k:k + p],
+                indices[k:k + p], ch_type, ica, label, show,
+                outlines=outlines, layout=layout, cmap=cmap,
+                contours=contours)
+                for k in range(0, n_components, p)]
+        return figs
+    elif np.isscalar(picks):
+        picks = [picks]
+
+    data_picks, pos, merge_grads, names, _ = _prepare_topo_plot(
+        ica, ch_type, layout)
+    pos, outlines = _check_outlines(pos, outlines)
+
+    data = np.atleast_2d(data)
+    data = data[:, data_picks]
+
+    # prepare data for iteration
+    fig, axes = _prepare_trellis(len(picks), max_col=5)
+    fig.suptitle(title)
+
+    if merge_grads:
+        from ..channels.layout import _merge_grad_data
+    for ii, data_, ax, subject, idx in zip(picks, data, axes, subjs, indices):
+        ttl = 'Subj. {0}, IC {1}'.format(subject, idx)
+        ax.set_title(ttl, fontsize=12)
+        data_ = _merge_grad_data(data_) if merge_grads else data_
+        vmin_, vmax_ = _setup_vmin_vmax(data_, None, None)
+        plot_topomap(data_.flatten(), pos, vmin=vmin_, vmax=vmax_,
+                     res=64, axis=ax, cmap=cmap, outlines=outlines,
+                     image_mask=None, contours=contours, show=False,
+                     image_interp='bilinear')[0]
+        ax.set_yticks([])
+        ax.set_xticks([])
+        ax.set_frame_on(False)
+    tight_layout(fig=fig)
+    fig.subplots_adjust(top=0.8)
+    fig.canvas.draw()
+    if show is True:
+        plt.show()
+    return fig
+
+
+ at verbose
+def corrmap(icas, template, threshold="auto", label=None,
+            ch_type="eeg", plot=True, show=True, verbose=None, outlines='head',
+            layout=None, sensors=True, contours=6, cmap='RdBu_r'):
+    """Find similar Independent Components across subjects by map similarity.
+
+    Corrmap (Viola et al. 2009 Clin Neurophysiol) identifies the best group
+    match to a supplied template. Typically, feed it a list of fitted ICAs and
+    a template IC, for example, the blink for the first subject, to identify
+    specific ICs across subjects.
+
+    The specific procedure consists of two iterations. In a first step, the
+    maps best correlating with the template are identified. In the step, the
+    analysis is repeated with the mean of the maps identified in the first
+    stage.
+
+    Outputs a list of fitted ICAs with the indices of the marked ICs in a
+    specified field.
+
+    The original Corrmap website: www.debener.de/corrmap/corrmapplugin1.html
+
+    Parameters
+    ----------
+    icas : list of mne.preprocessing.ICA
+        A list of fitted ICA objects.
+    template : tuple
+        A tuple with two elements (int, int) representing the list indices of
+        the set from which the template should be chosen, and the template.
+        E.g., if template=(1, 0), the first IC of the 2nd ICA object is used.
+    threshold : "auto" | list of float | float
+        Correlation threshold for identifying ICs
+        If "auto", search for the best map by trying all correlations between
+        0.6 and 0.95. In the original proposal, lower values are considered,
+        but this is not yet implemented.
+        If list of floats, search for the best map in the specified range of
+        correlation strengths. As correlation values, must be between 0 and 1
+        If float > 0, select ICs correlating better than this.
+        If float > 1, use find_outliers to identify ICs within subjects (not in
+        original Corrmap)
+        Defaults to "auto".
+    label : None | str
+        If not None, categorised ICs are stored in a dictionary "labels_" under
+        the given name. Preexisting entries will be appended to
+        (excluding repeats), not overwritten. If None, a dry run is performed.
+    ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg'
+            The channel type to plot. Defaults to 'eeg'.
+    plot : bool
+        Should constructed template and selected maps be plotted? Defaults
+        to True.
+    show : bool
+        Show figures if True.
+    layout : None | Layout | list of Layout
+        Layout instance specifying sensor positions (does not need to be
+        specified for Neuromag data). Or a list of Layout if projections
+        are from different sensor types.
+    cmap : matplotlib colormap
+        Colormap.
+    sensors : bool | str
+        Add markers for sensor locations to the plot. Accepts matplotlib plot
+        format string (e.g., 'r+' for red plusses). If True, a circle will be
+        used (via .add_artist). Defaults to True.
+    outlines : 'head' | dict | None
+        The outlines to be drawn. If 'head', a head scheme will be drawn. If
+        dict, each key refers to a tuple of x and y positions. The values in
+        'mask_pos' will serve as image mask. If None, nothing will be drawn.
+        Defaults to 'head'. If dict, the 'autoshrink' (bool) field will
+        trigger automated shrinking of the positions due to points outside the
+        outline. Moreover, a matplotlib patch object can be passed for
+        advanced masking options, either directly or as a function that returns
+        patches (required for multi-axis plots).
+    layout : None | Layout | list of Layout
+        Layout instance specifying sensor positions (does not need to be
+        specified for Neuromag data). Or a list of Layout if projections
+        are from different sensor types.
+    cmap : matplotlib colormap
+        Colormap.
+    sensors : bool | str
+        Add markers for sensor locations to the plot. Accepts matplotlib plot
+        format string (e.g., 'r+' for red plusses). If True, a circle will be
+        used (via .add_artist). Defaults to True.
+    contours : int | False | None
+        The number of contour lines to draw. If 0, no contours will be drawn.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    template_fig : fig
+        Figure showing the mean template.
+    labelled_ics : fig
+        Figure showing the labelled ICs in all ICA decompositions.
+    """
+    if not isinstance(plot, bool):
+        raise ValueError("`plot` must be of type `bool`")
+
+    if threshold == 'auto':
+        threshold = np.arange(60, 95, dtype=np.float64) / 100.
+
+    all_maps = [_get_ica_map(ica) for ica in icas]
+
+    target = all_maps[template[0]][template[1]]
+
+    if plot is True:
+        ttl = 'Template from subj. {0}'.format(str(template[0]))
+        template_fig = icas[template[0]].plot_components(
+            picks=template[1], ch_type=ch_type, title=ttl, outlines=outlines,
+            cmap=cmap, contours=contours, layout=layout, show=show)
+        template_fig.subplots_adjust(top=0.8)
+        template_fig.canvas.draw()
+
+    # first run: use user-selected map
+    if isinstance(threshold, (int, float)):
+        if len(all_maps) == 0 or len(target) == 0:
+            logger.info('No component detected using find_outliers.'
+                        ' Consider using threshold="auto"')
+            return icas
+        nt, mt, s, mx = _find_max_corrs(all_maps, target, threshold)
+    elif len(threshold) > 1:
+        paths = [_find_max_corrs(all_maps, target, t) for t in threshold]
+        # find iteration with highest avg correlation with target
+        nt, mt, s, mx = paths[np.argmax([path[2] for path in paths])]
+
+    # second run: use output from first run
+    if isinstance(threshold, (int, float)):
+        if len(all_maps) == 0 or len(nt) == 0:
+            if threshold > 1:
+                logger.info('No component detected using find_outliers. '
+                            'Consider using threshold="auto"')
+            return icas
+        nt, mt, s, mx = _find_max_corrs(all_maps, nt, threshold)
+    elif len(threshold) > 1:
+        paths = [_find_max_corrs(all_maps, nt, t) for t in threshold]
+        # find iteration with highest avg correlation with target
+        nt, mt, s, mx = paths[np.argmax([path[1] for path in paths])]
+
+    allmaps, indices, subjs, nones = [list() for _ in range(4)]
+    logger.info('Median correlation with constructed map: %0.3f' % mt)
+    if plot is True:
+        logger.info('Displaying selected ICs per subject.')
+
+    for ii, (ica, max_corr) in enumerate(zip(icas, mx)):
+        if (label is not None) and (not hasattr(ica, 'labels_')):
+            ica.labels_ = dict()
+        if len(max_corr) > 0:
+            if isinstance(max_corr[0], np.ndarray):
+                max_corr = max_corr[0]
+            if label is not None:
+                ica.labels_[label] = list(set(list(max_corr) +
+                                          ica.labels_.get(label, list())))
+            if plot is True:
+                allmaps.extend(_get_ica_map(ica, components=max_corr))
+                subjs.extend([ii] * len(max_corr))
+                indices.extend(max_corr)
+        else:
+            if (label is not None) and (label not in ica.labels_):
+                ica.labels_[label] = list()
+            nones.append(ii)
+
+    if len(nones) == 0:
+        logger.info('At least 1 IC detected for each subject.')
+    else:
+        logger.info('No maps selected for subject(s) ' +
+                    ', '.join([str(x) for x in nones]) +
+                    ', consider a more liberal threshold.')
+
+    if plot is True:
+        labelled_ics = _plot_corrmap(allmaps, subjs, indices, ch_type, ica,
+                                     label, outlines=outlines, cmap=cmap,
+                                     contours=contours, layout=layout,
+                                     show=show)
+        return template_fig, labelled_ics
+    else:
+        return None
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/preprocessing/infomax_.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/preprocessing/infomax_.py
new file mode 100644
index 0000000..053efde
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/preprocessing/infomax_.py
@@ -0,0 +1,315 @@
+# Authors: Lukas Breuer <l.breuer at fz-juelich.de>
+#          Juergen Dammers <j.dammers at fz-juelich.de>
+#          Denis A. Engeman <denis.engemann at gemail.com>
+#
+# License: BSD (3-clause)
+
+import math
+
+import numpy as np
+
+from ..utils import logger, verbose, check_random_state, random_permutation
+
+
+ at verbose
+def infomax(data, weights=None, l_rate=None, block=None, w_change=1e-12,
+            anneal_deg=60., anneal_step=0.9, extended=False, n_subgauss=1,
+            kurt_size=6000, ext_blocks=1, max_iter=200,
+            random_state=None, blowup=1e4, blowup_fac=0.5, n_small_angle=20,
+            use_bias=True, verbose=None):
+    """Run the (extended) Infomax ICA decomposition on raw data
+
+    based on the publications of Bell & Sejnowski 1995 (Infomax)
+    and Lee, Girolami & Sejnowski, 1999 (extended Infomax)
+
+    Parameters
+    ----------
+    data : np.ndarray, shape (n_samples, n_features)
+        The data to unmix.
+    w_init : np.ndarray, shape (n_features, n_features)
+        The initialized unmixing matrix. Defaults to None. If None, the
+        identity matrix is used.
+    l_rate : float
+        This quantity indicates the relative size of the change in weights.
+        Note. Smaller learining rates will slow down the procedure.
+        Defaults to 0.010d / alog(n_features ^ 2.0)
+    block : int
+        The block size of randomly chosen data segment.
+        Defaults to floor(sqrt(n_times / 3d))
+    w_change : float
+        The change at which to stop iteration. Defaults to 1e-12.
+    anneal_deg : float
+        The angle at which (in degree) the learning rate will be reduced.
+        Defaults to 60.0
+    anneal_step : float
+        The factor by which the learning rate will be reduced once
+        ``anneal_deg`` is exceeded:
+            l_rate *= anneal_step
+        Defaults to 0.9
+    extended : bool
+        Wheather to use the extended infomax algorithm or not. Defaults to
+        True.
+    n_subgauss : int
+        The number of subgaussian components. Only considered for extended
+        Infomax.
+    kurt_size : int
+        The window size for kurtosis estimation. Only considered for extended
+        Infomax.
+    ext_blocks : int
+        Only considered for extended Infomax.
+        If positive, it denotes the number of blocks after which to recompute
+        the Kurtosis, which is used to estimate the signs of the sources.
+        In this case the number of sub-gaussian sources is automatically
+        determined.
+        If negative, the number of sub-gaussian sources to be used is fixed
+        and equal to n_subgauss. In this case the Kurtosis is not estimated.
+    max_iter : int
+        The maximum number of iterations. Defaults to 200.
+    random_state : int | np.random.RandomState
+        If random_state is an int, use random_state as seed of the random
+        number generator.
+        If random_state is already a np.random.RandomState instance, use
+        random_state as random number generator.
+    blowup : float
+        The maximum difference allowed between two succesive estimations of the
+        unmixing matrix. Defaults to 1e4
+    blowup_fac : float
+        The factor by which the learning rate will be reduced if the
+        difference between two succesive estimations of the
+        unmixing matrix exceededs ``blowup``:
+            l_rate *= blowup_fac
+        Defaults to 0.5
+    n_small_angle : int | None
+        The maximum number of allowed steps in which the angle between two
+        succesive estimations of the unmixing matrix is less than
+        ``anneal_deg``.
+        If None, this parameter is not taken into account to stop the
+        iterations.
+        Defaults to 20
+    use_bias : bool
+        This quantity indicates if the bias should be computed.
+        Defaults to True
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    unmixing_matrix : np.ndarray of float, shape (n_features, n_features)
+        The linear unmixing operator.
+    """
+    from scipy.stats import kurtosis
+    rng = check_random_state(random_state)
+
+    # define some default parameter
+    max_weight = 1e8
+    restart_fac = 0.9
+    min_l_rate = 1e-10
+    degconst = 180.0 / np.pi
+
+    # for extended Infomax
+    extmomentum = 0.5
+    signsbias = 0.02
+    signcount_threshold = 25
+    signcount_step = 2
+
+    # check data shape
+    n_samples, n_features = data.shape
+    n_features_square = n_features ** 2
+
+    # check input parameter
+    # heuristic default - may need adjustment for
+    # large or tiny data sets
+    if l_rate is None:
+        l_rate = 0.01 / math.log(n_features ** 2.0)
+
+    if block is None:
+        block = int(math.floor(math.sqrt(n_samples / 3.0)))
+
+    logger.info('computing%sInfomax ICA' % ' Extended ' if extended is True
+                else ' ')
+
+    # collect parameter
+    nblock = n_samples // block
+    lastt = (nblock - 1) * block + 1
+
+    # initialize training
+    if weights is None:
+        # initialize weights as identity matrix
+        weights = np.identity(n_features, dtype=np.float64)
+
+    BI = block * np.identity(n_features, dtype=np.float64)
+    bias = np.zeros((n_features, 1), dtype=np.float64)
+    onesrow = np.ones((1, block), dtype=np.float64)
+    startweights = weights.copy()
+    oldweights = startweights.copy()
+    step = 0
+    count_small_angle = 0
+    wts_blowup = False
+    blockno = 0
+    signcount = 0
+    initial_ext_blocks = ext_blocks   # save the initial value in case of reset
+
+    # for extended Infomax
+    if extended is True:
+        signs = np.ones(n_features)
+
+        for k in range(n_subgauss):
+            signs[k] = -1
+
+        kurt_size = min(kurt_size, n_samples)
+        old_kurt = np.zeros(n_features, dtype=np.float64)
+        oldsigns = np.zeros(n_features)
+
+    # trainings loop
+    olddelta, oldchange = 1., 0.
+    while step < max_iter:
+
+        # shuffle data at each step
+        permute = random_permutation(n_samples, rng)
+
+        # ICA training block
+        # loop across block samples
+        for t in range(0, lastt, block):
+            u = np.dot(data[permute[t:t + block], :], weights)
+            u += np.dot(bias, onesrow).T
+
+            if extended is True:
+                # extended ICA update
+                y = np.tanh(u)
+                weights += l_rate * np.dot(weights,
+                                           BI -
+                                           signs[None, :] * np.dot(u.T, y) -
+                                           np.dot(u.T, u))
+                if use_bias:
+                    bias += l_rate * np.reshape(np.sum(y, axis=0,
+                                                dtype=np.float64) * -2.0,
+                                                (n_features, 1))
+
+            else:
+                # logistic ICA weights update
+                y = 1.0 / (1.0 + np.exp(-u))
+                weights += l_rate * np.dot(weights,
+                                           BI + np.dot(u.T, (1.0 - 2.0 * y)))
+
+                if use_bias:
+                    bias += l_rate * np.reshape(np.sum((1.0 - 2.0 * y), axis=0,
+                                                dtype=np.float64),
+                                                (n_features, 1))
+
+            # check change limit
+            max_weight_val = np.max(np.abs(weights))
+            if max_weight_val > max_weight:
+                wts_blowup = True
+
+            blockno += 1
+            if wts_blowup:
+                break
+
+            # ICA kurtosis estimation
+            if extended is True:
+
+                if ext_blocks > 0 and blockno % ext_blocks == 0:
+
+                    if kurt_size < n_samples:
+                        rp = np.floor(rng.uniform(0, 1, kurt_size) *
+                                      (n_samples - 1))
+                        tpartact = np.dot(data[rp.astype(int), :], weights).T
+                    else:
+                        tpartact = np.dot(data, weights).T
+
+                    # estimate kurtosis
+                    kurt = kurtosis(tpartact, axis=1, fisher=True)
+
+                    if extmomentum != 0:
+                        kurt = (extmomentum * old_kurt +
+                                (1.0 - extmomentum) * kurt)
+                        old_kurt = kurt
+
+                    # estimate weighted signs
+                    signs = np.sign(kurt + signsbias)
+
+                    ndiff = (signs - oldsigns != 0).sum()
+                    if ndiff == 0:
+                        signcount += 1
+                    else:
+                        signcount = 0
+                    oldsigns = signs
+
+                    if signcount >= signcount_threshold:
+                        ext_blocks = np.fix(ext_blocks * signcount_step)
+                        signcount = 0
+
+        # here we continue after the for
+        # loop over the ICA training blocks
+        # if weights in bounds:
+        if not wts_blowup:
+            oldwtchange = weights - oldweights
+            step += 1
+            angledelta = 0.0
+            delta = oldwtchange.reshape(1, n_features_square)
+            change = np.sum(delta * delta, dtype=np.float64)
+            if step > 2:
+                angledelta = math.acos(np.sum(delta * olddelta) /
+                                       math.sqrt(change * oldchange))
+                angledelta *= degconst
+
+            if verbose:
+                logger.info(
+                    'step %d - lrate %5f, wchange %8.8f, angledelta %4.1f deg'
+                    % (step, l_rate, change, angledelta))
+
+            # anneal learning rate
+            oldweights = weights.copy()
+            if angledelta > anneal_deg:
+                l_rate *= anneal_step    # anneal learning rate
+                # accumulate angledelta until anneal_deg reached l_rates
+                olddelta = delta
+                oldchange = change
+                count_small_angle = 0  # reset count when angle delta is large
+            else:
+                if step == 1:  # on first step only
+                    olddelta = delta  # initialize
+                    oldchange = change
+
+                if n_small_angle is not None:
+                    count_small_angle += 1
+                    if count_small_angle > n_small_angle:
+                        max_iter = step
+
+            # apply stopping rule
+            if step > 2 and change < w_change:
+                step = max_iter
+            elif change > blowup:
+                l_rate *= blowup_fac
+
+        # restart if weights blow up
+        # (for lowering l_rate)
+        else:
+            step = 0  # start again
+            wts_blowup = 0  # re-initialize variables
+            blockno = 1
+            l_rate *= restart_fac  # with lower learning rate
+            weights = startweights.copy()
+            oldweights = startweights.copy()
+            olddelta = np.zeros((1, n_features_square), dtype=np.float64)
+            bias = np.zeros((n_features, 1), dtype=np.float64)
+
+            ext_blocks = initial_ext_blocks
+
+            # for extended Infomax
+            if extended:
+                signs = np.ones(n_features)
+                for k in range(n_subgauss):
+                    signs[k] = -1
+                oldsigns = np.zeros(n_features)
+
+            if l_rate > min_l_rate:
+                if verbose:
+                    logger.info('... lowering learning rate to %g'
+                                '\n... re-starting...' % l_rate)
+            else:
+                raise ValueError('Error in Infomax ICA: unmixing_matrix matrix'
+                                 'might not be invertible!')
+
+    # prepare return values
+    return weights.T
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/preprocessing/maxfilter.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/preprocessing/maxfilter.py
new file mode 100644
index 0000000..2d76a44
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/preprocessing/maxfilter.py
@@ -0,0 +1,227 @@
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+from ..externals.six import string_types
+import os
+from warnings import warn
+
+
+from ..bem import fit_sphere_to_headshape
+from ..io import Raw
+from ..utils import logger, verbose
+from ..externals.six.moves import map
+
+
+def _mxwarn(msg):
+    warn('Possible MaxFilter bug: %s, more info: '
+         'http://imaging.mrc-cbu.cam.ac.uk/meg/maxbugs' % msg)
+
+
+ at verbose
+def apply_maxfilter(in_fname, out_fname, origin=None, frame='device',
+                    bad=None, autobad='off', skip=None, force=False,
+                    st=False, st_buflen=16.0, st_corr=0.96, mv_trans=None,
+                    mv_comp=False, mv_headpos=False, mv_hp=None,
+                    mv_hpistep=None, mv_hpisubt=None, mv_hpicons=True,
+                    linefreq=None, cal=None, ctc=None, mx_args='',
+                    overwrite=True, verbose=None):
+
+    """ Apply NeuroMag MaxFilter to raw data.
+
+        Needs Maxfilter license, maxfilter has to be in PATH
+
+    Parameters
+    ----------
+    in_fname : string
+        Input file name
+
+    out_fname : string
+        Output file name
+
+    origin : array-like or string
+        Head origin in mm. If None it will be estimated from headshape points.
+
+    frame : string ('device' or 'head')
+        Coordinate frame for head center
+
+    bad : string, list (or None)
+        List of static bad channels. Can be a list with channel names, or a
+        string with channels (names or logical channel numbers)
+
+    autobad : string ('on', 'off', 'n')
+        Sets automated bad channel detection on or off
+
+    skip : string or a list of float-tuples (or None)
+        Skips raw data sequences, time intervals pairs in sec,
+        e.g.: 0 30 120 150
+
+    force : bool
+        Ignore program warnings
+
+    st : bool
+        Apply the time-domain MaxST extension
+
+    st_buflen : float
+        MaxSt buffer length in sec (disabled if st is False)
+
+    st_corr : float
+        MaxSt subspace correlation limit (disabled if st is False)
+
+    mv_trans : string (filename or 'default') (or None)
+        Transforms the data into the coil definitions of in_fname, or into the
+        default frame (None: don't use option)
+
+    mv_comp : bool (or 'inter')
+        Estimates and compensates head movements in continuous raw data
+
+    mv_headpos : bool
+        Estimates and stores head position parameters, but does not compensate
+        movements (disabled if mv_comp is False)
+
+    mv_hp : string (or None)
+        Stores head position data in an ascii file
+        (disabled if mv_comp is False)
+
+    mv_hpistep : float (or None)
+        Sets head position update interval in ms (disabled if mv_comp is False)
+
+    mv_hpisubt : string ('amp', 'base', 'off') (or None)
+        Subtracts hpi signals: sine amplitudes, amp + baseline, or switch off
+        (disabled if mv_comp is False)
+
+    mv_hpicons : bool
+        Check initial consistency isotrak vs hpifit
+        (disabled if mv_comp is False)
+
+    linefreq : int (50, 60) (or None)
+        Sets the basic line interference frequency (50 or 60 Hz)
+        (None: do not use line filter)
+
+    cal : string
+        Path to calibration file
+
+    ctc : string
+        Path to Cross-talk compensation file
+
+    mx_args : string
+        Additional command line arguments to pass to MaxFilter
+
+    overwrite : bool
+        Overwrite output file if it already exists
+
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+
+    Returns
+    -------
+    origin: string
+        Head origin in selected coordinate frame
+    """
+
+    # check for possible maxfilter bugs
+    if mv_trans is not None and mv_comp:
+        _mxwarn("Don't use '-trans' with head-movement compensation "
+                "'-movecomp'")
+
+    if autobad != 'off' and (mv_headpos or mv_comp):
+        _mxwarn("Don't use '-autobad' with head-position estimation "
+                "'-headpos' or movement compensation '-movecomp'")
+
+    if st and autobad != 'off':
+        _mxwarn("Don't use '-autobad' with '-st' option")
+
+    # determine the head origin if necessary
+    if origin is None:
+        logger.info('Estimating head origin from headshape points..')
+        raw = Raw(in_fname)
+        r, o_head, o_dev = fit_sphere_to_headshape(raw.info)
+        raw.close()
+        logger.info('[done]')
+        if frame == 'head':
+            origin = o_head
+        elif frame == 'device':
+            origin = o_dev
+        else:
+            RuntimeError('invalid frame for origin')
+
+    if not isinstance(origin, string_types):
+        origin = '%0.1f %0.1f %0.1f' % (origin[0], origin[1], origin[2])
+
+    # format command
+    cmd = ('maxfilter -f %s -o %s -frame %s -origin %s '
+           % (in_fname, out_fname, frame, origin))
+
+    if bad is not None:
+        # format the channels
+        if not isinstance(bad, list):
+            bad = bad.split()
+        bad = map(str, bad)
+        bad_logic = [ch[3:] if ch.startswith('MEG') else ch for ch in bad]
+        bad_str = ' '.join(bad_logic)
+
+        cmd += '-bad %s ' % bad_str
+
+    cmd += '-autobad %s ' % autobad
+
+    if skip is not None:
+        if isinstance(skip, list):
+            skip = ' '.join(['%0.3f %0.3f' % (s[0], s[1]) for s in skip])
+        cmd += '-skip %s ' % skip
+
+    if force:
+        cmd += '-force '
+
+    if st:
+        cmd += '-st '
+        cmd += ' %d ' % st_buflen
+        cmd += '-corr %0.4f ' % st_corr
+
+    if mv_trans is not None:
+        cmd += '-trans %s ' % mv_trans
+
+    if mv_comp:
+        cmd += '-movecomp '
+        if mv_comp == 'inter':
+            cmd += ' inter '
+
+        if mv_headpos:
+            cmd += '-headpos '
+
+        if mv_hp is not None:
+            cmd += '-hp %s ' % mv_hp
+
+        if mv_hpisubt is not None:
+            cmd += 'hpisubt %s ' % mv_hpisubt
+
+        if mv_hpicons:
+            cmd += '-hpicons '
+
+    if linefreq is not None:
+        cmd += '-linefreq %d ' % linefreq
+
+    if cal is not None:
+        cmd += '-cal %s ' % cal
+
+    if ctc is not None:
+        cmd += '-ctc %s ' % ctc
+
+    cmd += mx_args
+
+    if overwrite and os.path.exists(out_fname):
+        os.remove(out_fname)
+
+    logger.info('Running MaxFilter: %s ' % cmd)
+    if os.getenv('_MNE_MAXFILTER_TEST', '') != 'true':  # fake maxfilter
+        st = os.system(cmd)
+    else:
+        print(cmd)  # we can check the output
+        st = 0
+    if st != 0:
+        raise RuntimeError('MaxFilter returned non-zero exit status %d' % st)
+    logger.info('[done]')
+
+    return origin
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/preprocessing/maxwell.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/preprocessing/maxwell.py
new file mode 100644
index 0000000..51d3a4d
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/preprocessing/maxwell.py
@@ -0,0 +1,644 @@
+# Authors: Mark Wronkiewicz <wronk.mark at gmail.com>
+#          Eric Larson <larson.eric.d at gmail.com>
+#          Jussi Nurminen <jnu at iki.fi>
+
+
+# License: BSD (3-clause)
+
+from __future__ import division
+import numpy as np
+from scipy import linalg
+from math import factorial
+import inspect
+
+from .. import pick_types
+from ..forward._compute_forward import _concatenate_coils
+from ..forward._make_forward import _prep_meg_channels
+from ..io.write import _generate_meas_id, _date_now
+from ..utils import verbose, logger
+
+
+ at verbose
+def _maxwell_filter(raw, origin=(0, 0, 40), int_order=8, ext_order=3,
+                    st_dur=None, st_corr=0.98, verbose=None):
+    """Apply Maxwell filter to data using spherical harmonics.
+
+    Parameters
+    ----------
+    raw : instance of mne.io.Raw
+        Data to be filtered
+    origin : array-like, shape (3,)
+        Origin of internal and external multipolar moment space in head coords
+        and in millimeters
+    int_order : int
+        Order of internal component of spherical expansion
+    ext_order : int
+        Order of external component of spherical expansion
+    st_dur : float | None
+        If not None, apply spatiotemporal SSS with specified buffer duration
+        (in seconds). Elekta's default is 10.0 seconds in MaxFilter v2.2.
+        Spatiotemporal SSS acts as implicitly as a high-pass filter where the
+        cut-off frequency is 1/st_dur Hz. For this (and other) reasons, longer
+        buffers are generally better as long as your system can handle the
+        higher memory usage. To ensure that each window is processed
+        identically, choose a buffer length that divides evenly into your data.
+        Any data at the trailing edge that doesn't fit evenly into a whole
+        buffer window will be lumped into the previous buffer.
+    st_corr : float
+        Correlation limit between inner and outer subspaces used to reject
+        ovwrlapping intersecting inner/outer signals during spatiotemporal SSS.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose)
+
+    Returns
+    -------
+    raw_sss : instance of mne.io.Raw
+        The raw data with Maxwell filtering applied
+
+    Notes
+    -----
+    .. versionadded:: 0.10
+
+    Equation numbers refer to Taulu and Kajola, 2005 [1]_ unless otherwise
+    noted.
+
+    Some of this code was adapted and relicensed (with BSD form) with
+    permission from Jussi Nurminen.
+
+    References
+    ----------
+    .. [1] Taulu S. and Kajola M. "Presentation of electromagnetic
+           multichannel data: The signal space separation method,"
+           Journal of Applied Physics, vol. 97, pp. 124905 1-10, 2005.
+
+           http://lib.tkk.fi/Diss/2008/isbn9789512295654/article2.pdf
+
+    .. [2] Taulu S. and Simola J. "Spatiotemporal signal space separation
+           method for rejecting nearby interference in MEG measurements,"
+           Physics in Medicine and Biology, vol. 51, pp. 1759-1768, 2006.
+
+           http://lib.tkk.fi/Diss/2008/isbn9789512295654/article3.pdf
+    """
+
+    # There are an absurd number of different possible notations for spherical
+    # coordinates, which confounds the notation for spherical harmonics.  Here,
+    # we purposefully stay away from shorthand notation in both and use
+    # explicit terms (like 'azimuth' and 'polar') to avoid confusion.
+    # See mathworld.wolfram.com/SphericalHarmonic.html for more discussion.
+    # Our code follows the same standard that ``scipy`` uses for ``sph_harm``.
+
+    if raw.proj:
+        raise RuntimeError('Projectors cannot be applied to raw data.')
+    if len(raw.info.get('comps', [])) > 0:
+        raise RuntimeError('Maxwell filter cannot handle compensated '
+                           'channels.')
+    st_corr = float(st_corr)
+    if st_corr <= 0. or st_corr > 1.:
+        raise ValueError('Need 0 < st_corr <= 1., got %s' % st_corr)
+    logger.info('Bad channels being reconstructed: ' + str(raw.info['bads']))
+
+    logger.info('Preparing coil definitions')
+    all_coils, _, _, meg_info = _prep_meg_channels(raw.info, accurate=True,
+                                                   elekta_defs=True,
+                                                   verbose=False)
+    raw_sss = raw.copy().load_data()
+    del raw
+    times = raw_sss.times
+
+    # Get indices of channels to use in multipolar moment calculation
+    good_chs = pick_types(raw_sss.info, meg=True, exclude='bads')
+    # Get indices of MEG channels
+    meg_picks = pick_types(raw_sss.info, meg=True, exclude=[])
+    meg_coils, _, _, meg_info = _prep_meg_channels(raw_sss.info, accurate=True,
+                                                   elekta_defs=True)
+
+    # Magnetometers (with coil_class == 1.0) must be scaled by 100 to improve
+    # numerical stability as they have different scales than gradiometers
+    coil_scale = np.ones((len(meg_coils), 1))
+    coil_scale[np.array([coil['coil_class'] == 1.0
+                         for coil in meg_coils])] = 100.
+
+    # Compute multipolar moment bases
+    origin = np.array(origin) / 1000.  # Convert scale from mm to m
+    # Compute in/out bases and create copies containing only good chs
+    S_in, S_out = _sss_basis(origin, meg_coils, int_order, ext_order)
+    n_in = S_in.shape[1]
+
+    S_in_good, S_out_good = S_in[good_chs, :], S_out[good_chs, :]
+    S_in_good_norm = np.sqrt(np.sum(S_in_good * S_in_good, axis=0))[:,
+                                                                    np.newaxis]
+    S_out_good_norm = \
+        np.sqrt(np.sum(S_out_good * S_out_good, axis=0))[:, np.newaxis]
+    # Pseudo-inverse of total multipolar moment basis set (Part of Eq. 37)
+    S_tot_good = np.c_[S_in_good, S_out_good]
+    S_tot_good /= np.sqrt(np.sum(S_tot_good * S_tot_good, axis=0))[np.newaxis,
+                                                                   :]
+    pS_tot_good = linalg.pinv(S_tot_good, cond=1e-15)
+
+    # Compute multipolar moments of (magnetometer scaled) data (Eq. 37)
+    # XXX eventually we can refactor this to work in chunks
+    data = raw_sss[good_chs][0]
+    mm = np.dot(pS_tot_good, data * coil_scale[good_chs])
+    # Reconstruct data from internal space (Eq. 38)
+    raw_sss._data[meg_picks] = np.dot(S_in, mm[:n_in] / S_in_good_norm)
+    raw_sss._data[meg_picks] /= coil_scale
+
+    # Reset 'bads' for any MEG channels since they've been reconstructed
+    bad_inds = [raw_sss.info['ch_names'].index(ch)
+                for ch in raw_sss.info['bads']]
+    raw_sss.info['bads'] = [raw_sss.info['ch_names'][bi] for bi in bad_inds
+                            if bi not in meg_picks]
+
+    # Reconstruct raw file object with spatiotemporal processed data
+    if st_dur is not None:
+        if st_dur > times[-1]:
+            raise ValueError('st_dur (%0.1fs) longer than length of signal in '
+                             'raw (%0.1fs).' % (st_dur, times[-1]))
+        logger.info('Processing data using tSSS with st_dur=%s' % st_dur)
+
+        # Generate time points to break up data in to windows
+        lims = raw_sss.time_as_index(np.arange(times[0], times[-1], st_dur))
+        len_last_buf = raw_sss.times[-1] - raw_sss.index_as_time(lims[-1])[0]
+        if len_last_buf == st_dur:
+            lims = np.concatenate([lims, [len(raw_sss.times)]])
+        else:
+            # len_last_buf < st_dur so fold it into the previous buffer
+            lims[-1] = len(raw_sss.times)
+            logger.info('Spatiotemporal window did not fit evenly into raw '
+                        'object. The final %0.2f seconds were lumped onto '
+                        'the previous window.' % len_last_buf)
+
+        # Loop through buffer windows of data
+        for win in zip(lims[:-1], lims[1:]):
+            # Reconstruct data from external space and compute residual
+            resid = data[:, win[0]:win[1]]
+            resid -= raw_sss._data[meg_picks, win[0]:win[1]]
+            resid -= np.dot(S_out, mm[n_in:, win[0]:win[1]] /
+                            S_out_good_norm) / coil_scale
+            _check_finite(resid)
+
+            # Compute SSP-like projector. Set overlap limit to 0.02
+            this_data = raw_sss._data[meg_picks, win[0]:win[1]]
+            _check_finite(this_data)
+            V = _overlap_projector(this_data, resid, st_corr)
+
+            # Apply projector according to Eq. 12 in [2]_
+            logger.info('    Projecting out %s tSSS components for %s-%s'
+                        % (V.shape[1], win[0] / raw_sss.info['sfreq'],
+                           win[1] / raw_sss.info['sfreq']))
+            this_data -= np.dot(np.dot(this_data, V), V.T)
+            raw_sss._data[meg_picks, win[0]:win[1]] = this_data
+
+    # Update info
+    raw_sss = _update_sss_info(raw_sss, origin, int_order, ext_order,
+                               len(good_chs))
+
+    return raw_sss
+
+
+def _check_finite(data):
+    """Helper to ensure data is finite"""
+    if not np.isfinite(data).all():
+        raise RuntimeError('data contains non-finite numbers')
+
+
+def _sph_harm(order, degree, az, pol):
+    """Evaluate point in specified multipolar moment. [1]_ Equation 4.
+
+    When using, pay close attention to inputs. Spherical harmonic notation for
+    order/degree, and theta/phi are both reversed in original SSS work compared
+    to many other sources. See mathworld.wolfram.com/SphericalHarmonic.html for
+    more discussion.
+
+    Note that scipy has ``scipy.special.sph_harm``, but that function is
+    too slow on old versions (< 0.15) and has a weird bug on newer versions.
+    At some point we should track it down and open a bug report...
+
+    Parameters
+    ----------
+    order : int
+        Order of spherical harmonic. (Usually) corresponds to 'm'
+    degree : int
+        Degree of spherical harmonic. (Usually) corresponds to 'l'
+    az : float
+        Azimuthal (longitudinal) spherical coordinate [0, 2*pi]. 0 is aligned
+        with x-axis.
+    pol : float
+        Polar (or colatitudinal) spherical coordinate [0, pi]. 0 is aligned
+        with z-axis.
+
+    Returns
+    -------
+    base : complex float
+        The spherical harmonic value at the specified azimuth and polar angles
+    """
+    from scipy.special import lpmv
+
+    # Error checks
+    if np.abs(order) > degree:
+        raise ValueError('Absolute value of expansion coefficient must be <= '
+                         'degree')
+    # Ensure that polar and azimuth angles are arrays
+    az = np.asarray(az)
+    pol = np.asarray(pol)
+    if (az < -2 * np.pi).any() or (az > 2 * np.pi).any():
+        raise ValueError('Azimuth coords must lie in [-2*pi, 2*pi]')
+    if(pol < 0).any() or (pol > np.pi).any():
+        raise ValueError('Polar coords must lie in [0, pi]')
+
+    base = np.sqrt((2 * degree + 1) / (4 * np.pi) * factorial(degree - order) /
+                   factorial(degree + order)) * \
+        lpmv(order, degree, np.cos(pol)) * np.exp(1j * order * az)
+    return base
+
+
+def _sss_basis(origin, coils, int_order, ext_order):
+    """Compute SSS basis for given conditions.
+
+    Parameters
+    ----------
+    origin : ndarray, shape (3,)
+        Origin of the multipolar moment space in millimeters
+    coils : list
+        List of MEG coils. Each should contain coil information dict. All
+        position info must be in the same coordinate frame as 'origin'
+    int_order : int
+        Order of the internal multipolar moment space
+    ext_order : int
+        Order of the external multipolar moment space
+
+    Returns
+    -------
+    bases: tuple, len (2)
+        Internal and external basis sets ndarrays with shape
+        (n_coils, n_mult_moments)
+    """
+    r_int_pts, ncoils, wcoils, counts = _concatenate_coils(coils)
+    bins = np.repeat(np.arange(len(counts)), counts)
+    n_sens = len(counts)
+    n_bases = get_num_moments(int_order, ext_order)
+    # int_lens = np.insert(np.cumsum(counts), obj=0, values=0)
+
+    S_in = np.empty((n_sens, (int_order + 1) ** 2 - 1))
+    S_out = np.empty((n_sens, (ext_order + 1) ** 2 - 1))
+    S_in.fill(np.nan)
+    S_out.fill(np.nan)
+
+    # Set all magnetometers (with 'coil_type' == 1.0) to be scaled by 100
+    coil_scale = np.ones((len(coils)))
+    coil_scale[np.array([coil['coil_class'] == 1.0 for coil in coils])] = 100.
+
+    if n_bases > n_sens:
+        raise ValueError('Number of requested bases (%s) exceeds number of '
+                         'sensors (%s)' % (str(n_bases), str(n_sens)))
+
+    # Compute position vector between origin and coil integration pts
+    cvec_cart = r_int_pts - origin[np.newaxis, :]
+    # Convert points to spherical coordinates
+    cvec_sph = _cart_to_sph(cvec_cart)
+
+    # Compute internal/external basis vectors (exclude degree 0; L/RHS Eq. 5)
+    for spc, g_func, order in zip([S_in, S_out],
+                                  [_grad_in_components, _grad_out_components],
+                                  [int_order, ext_order]):
+        for deg in range(1, order + 1):
+            for order in range(-deg, deg + 1):
+
+                # Compute gradient for all integration points
+                grads = -1 * g_func(deg, order, cvec_sph[:, 0], cvec_sph[:, 1],
+                                    cvec_sph[:, 2])
+
+                # Gradients dotted with integration point normals and weighted
+                all_grads = wcoils * np.einsum('ij,ij->i', grads, ncoils)
+
+                # For order and degree, sum over each sensor's integration pts
+                # for pt_i in range(0, len(int_lens) - 1):
+                #    int_pts_sum = \
+                #        np.sum(all_grads[int_lens[pt_i]:int_lens[pt_i + 1]])
+                #    spc[pt_i, deg ** 2 + deg + order - 1] = int_pts_sum
+                spc[:, deg ** 2 + deg + order - 1] = \
+                    np.bincount(bins, weights=all_grads, minlength=len(counts))
+
+        # Scale magnetometers
+        spc *= coil_scale[:, np.newaxis]
+
+    return S_in, S_out
+
+
+def _alegendre_deriv(degree, order, val):
+    """Compute the derivative of the associated Legendre polynomial at a value.
+
+    Parameters
+    ----------
+    degree : int
+        Degree of spherical harmonic. (Usually) corresponds to 'l'
+    order : int
+        Order of spherical harmonic. (Usually) corresponds to 'm'
+    val : float
+        Value to evaluate the derivative at
+
+    Returns
+    -------
+    dPlm : float
+        Associated Legendre function derivative
+    """
+    from scipy.special import lpmv
+
+    C = 1
+    if order < 0:
+        order = abs(order)
+        C = (-1) ** order * factorial(degree - order) / factorial(degree +
+                                                                  order)
+    return C * (order * val * lpmv(order, degree, val) + (degree + order) *
+                (degree - order + 1) * np.sqrt(1 - val ** 2) *
+                lpmv(order - 1, degree, val)) / (1 - val ** 2)
+
+
+def _grad_in_components(degree, order, rad, az, pol):
+    """Compute gradient of internal component of V(r) spherical expansion.
+
+    Internal component has form: Ylm(pol, az) / (rad ** (degree + 1))
+
+    Parameters
+    ----------
+    degree : int
+        Degree of spherical harmonic. (Usually) corresponds to 'l'
+    order : int
+        Order of spherical harmonic. (Usually) corresponds to 'm'
+    rad : ndarray, shape (n_samples,)
+        Array of radii
+    az : ndarray, shape (n_samples,)
+        Array of azimuthal (longitudinal) spherical coordinates [0, 2*pi]. 0 is
+        aligned with x-axis.
+    pol : ndarray, shape (n_samples,)
+        Array of polar (or colatitudinal) spherical coordinates [0, pi]. 0 is
+        aligned with z-axis.
+
+    Returns
+    -------
+    grads : ndarray, shape (n_samples, 3)
+        Gradient of the spherical harmonic and vector specified in rectangular
+        coordinates
+    """
+    # Compute gradients for all spherical coordinates (Eq. 6)
+    g_rad = (-(degree + 1) / rad ** (degree + 2) *
+             _sph_harm(order, degree, az, pol))
+
+    g_az = (1 / (rad ** (degree + 2) * np.sin(pol)) * 1j * order *
+            _sph_harm(order, degree, az, pol))
+
+    g_pol = (1 / rad ** (degree + 2) *
+             np.sqrt((2 * degree + 1) * factorial(degree - order) /
+                     (4 * np.pi * factorial(degree + order))) *
+             np.sin(-pol) * _alegendre_deriv(degree, order, np.cos(pol)) *
+             np.exp(1j * order * az))
+
+    # Get real component of vectors, convert to cartesian coords, and return
+    real_grads = _get_real_grad(np.c_[g_rad, g_az, g_pol], order)
+    return _sph_to_cart_partials(np.c_[rad, az, pol], real_grads)
+
+
+def _grad_out_components(degree, order, rad, az, pol):
+    """Compute gradient of external component of V(r) spherical expansion.
+
+    External component has form: Ylm(azimuth, polar) * (radius ** degree)
+
+    Parameters
+    ----------
+    degree : int
+        Degree of spherical harmonic. (Usually) corresponds to 'l'
+    order : int
+        Order of spherical harmonic. (Usually) corresponds to 'm'
+    rad : ndarray, shape (n_samples,)
+        Array of radii
+    az : ndarray, shape (n_samples,)
+        Array of azimuthal (longitudinal) spherical coordinates [0, 2*pi]. 0 is
+        aligned with x-axis.
+    pol : ndarray, shape (n_samples,)
+        Array of polar (or colatitudinal) spherical coordinates [0, pi]. 0 is
+        aligned with z-axis.
+
+    Returns
+    -------
+    grads : ndarray, shape (n_samples, 3)
+        Gradient of the spherical harmonic and vector specified in rectangular
+        coordinates
+    """
+    # Compute gradients for all spherical coordinates (Eq. 7)
+    g_rad = degree * rad ** (degree - 1) * _sph_harm(order, degree, az, pol)
+
+    g_az = (rad ** (degree - 1) / np.sin(pol) * 1j * order *
+            _sph_harm(order, degree, az, pol))
+
+    g_pol = (rad ** (degree - 1) *
+             np.sqrt((2 * degree + 1) * factorial(degree - order) /
+                     (4 * np.pi * factorial(degree + order))) *
+             np.sin(-pol) * _alegendre_deriv(degree, order, np.cos(pol)) *
+             np.exp(1j * order * az))
+
+    # Get real component of vectors, convert to cartesian coords, and return
+    real_grads = _get_real_grad(np.c_[g_rad, g_az, g_pol], order)
+    return _sph_to_cart_partials(np.c_[rad, az, pol], real_grads)
+
+
+def _get_real_grad(grad_vec_raw, order):
+    """Helper function to convert gradient vector to to real basis functions.
+
+    Parameters
+    ----------
+    grad_vec_raw : ndarray, shape (n_gradients, 3)
+        Gradient array with columns for radius, azimuth, polar points
+    order : int
+        Order (usually 'm') of multipolar moment.
+
+    Returns
+    -------
+    grad_vec : ndarray, shape (n_gradients, 3)
+        Gradient vectors with only real componnet
+    """
+
+    if order > 0:
+        grad_vec = np.sqrt(2) * np.real(grad_vec_raw)
+    elif order < 0:
+        grad_vec = np.sqrt(2) * np.imag(grad_vec_raw)
+    else:
+        grad_vec = grad_vec_raw
+
+    return np.real(grad_vec)
+
+
+def get_num_moments(int_order, ext_order):
+    """Compute total number of multipolar moments. Equivalent to [1]_ Eq. 32.
+
+    Parameters
+    ----------
+    int_order : int
+        Internal expansion order
+    ext_order : int
+        External expansion order
+
+    Returns
+    -------
+    M : int
+        Total number of multipolar moments
+    """
+
+    # TODO: Eventually, reuse code in field_interpolation
+
+    return int_order ** 2 + 2 * int_order + ext_order ** 2 + 2 * ext_order
+
+
+def _sph_to_cart_partials(sph_pts, sph_grads):
+    """Convert spherical partial derivatives to cartesian coords.
+
+    Note: Because we are dealing with partial derivatives, this calculation is
+    not a static transformation. The transformation matrix itself is dependent
+    on azimuth and polar coord.
+
+    See the 'Spherical coordinate sytem' section here:
+    wikipedia.org/wiki/Vector_fields_in_cylindrical_and_spherical_coordinates
+
+    Parameters
+    ----------
+    sph_pts : ndarray, shape (n_points, 3)
+        Array containing spherical coordinates points (rad, azimuth, polar)
+    sph_grads : ndarray, shape (n_points, 3)
+        Array containing partial derivatives at each spherical coordinate
+
+    Returns
+    -------
+    cart_grads : ndarray, shape (n_points, 3)
+        Array containing partial derivatives in Cartesian coordinates (x, y, z)
+    """
+
+    cart_grads = np.zeros_like(sph_grads)
+    c_as, s_as = np.cos(sph_pts[:, 1]), np.sin(sph_pts[:, 1])
+    c_ps, s_ps = np.cos(sph_pts[:, 2]), np.sin(sph_pts[:, 2])
+    trans = np.array([[c_as * s_ps, -s_as, c_as * c_ps],
+                      [s_as * s_ps, c_as, c_ps * s_as],
+                      [c_ps, np.zeros_like(c_as), -s_ps]])
+    cart_grads = np.einsum('ijk,kj->ki', trans, sph_grads)
+    return cart_grads
+
+
+def _cart_to_sph(cart_pts):
+    """Convert Cartesian coordinates to spherical coordinates.
+
+    Parameters
+    ----------
+    cart_pts : ndarray, shape (n_points, 3)
+        Array containing points in Cartesian coordinates (x, y, z)
+
+    Returns
+    -------
+    sph_pts : ndarray, shape (n_points, 3)
+        Array containing points in spherical coordinates (rad, azimuth, polar)
+    """
+
+    rad = np.sqrt(np.sum(cart_pts * cart_pts, axis=1))
+    az = np.arctan2(cart_pts[:, 1], cart_pts[:, 0])
+    pol = np.arccos(cart_pts[:, 2] / rad)
+
+    return np.c_[rad, az, pol]
+
+
+def _update_sss_info(raw, origin, int_order, ext_order, nsens):
+    """Helper function to update info after Maxwell filtering.
+
+    Parameters
+    ----------
+    raw : instance of mne.io.Raw
+        Data to be filtered
+    origin : array-like, shape (3,)
+        Origin of internal and external multipolar moment space in head coords
+        and in millimeters
+    int_order : int
+        Order of internal component of spherical expansion
+    ext_order : int
+        Order of external component of spherical expansion
+    nsens : int
+        Number of sensors
+
+    Returns
+    -------
+    raw : mne.io.Raw
+        raw file object with raw.info modified
+    """
+    from .. import __version__
+    # TODO: Continue to fill out bookkeeping info as additional features
+    # are added (fine calibration, cross-talk calibration, etc.)
+    int_moments = get_num_moments(int_order, 0)
+    ext_moments = get_num_moments(0, ext_order)
+
+    raw.info['maxshield'] = False
+    sss_info_dict = dict(in_order=int_order, out_order=ext_order,
+                         nsens=nsens, origin=origin.astype('float32'),
+                         n_int_moments=int_moments,
+                         frame=raw.info['dev_head_t']['to'],
+                         components=np.ones(int_moments +
+                                            ext_moments).astype('int32'))
+
+    max_info_dict = dict(max_st={}, sss_cal={}, sss_ctc={},
+                         sss_info=sss_info_dict)
+
+    block_id = _generate_meas_id()
+    proc_block = dict(max_info=max_info_dict, block_id=block_id,
+                      creator='mne-python v%s' % __version__,
+                      date=_date_now(), experimentor='')
+
+    # Insert information in raw.info['proc_info']
+    raw.info['proc_history'] = [proc_block] + raw.info.get('proc_history', [])
+    return raw
+
+
+check_disable = dict()  # not available on really old versions of SciPy
+if 'check_finite' in inspect.getargspec(linalg.svd)[0]:
+    check_disable['check_finite'] = False
+
+
+def _orth_overwrite(A):
+    """Helper to create a slightly more efficient 'orth'"""
+    # adapted from scipy/linalg/decomp_svd.py
+    u, s = linalg.svd(A, overwrite_a=True, full_matrices=False,
+                      **check_disable)[:2]
+    M, N = A.shape
+    eps = np.finfo(float).eps
+    tol = max(M, N) * np.amax(s) * eps
+    num = np.sum(s > tol, dtype=int)
+    return u[:, :num]
+
+
+def _overlap_projector(data_int, data_res, corr):
+    """Calculate projector for removal of subspace intersection in tSSS"""
+    # corr necessary to deal with noise when finding identical signal
+    # directions in the subspace. See the end of the Results section in [2]_
+
+    # Note that the procedure here is an updated version of [2]_ (and used in
+    # Elekta's tSSS) that uses residuals instead of internal/external spaces
+    # directly. This provides more degrees of freedom when analyzing for
+    # intersections between internal and external spaces.
+
+    # Normalize data, then compute orth to get temporal bases. Matrices
+    # must have shape (n_samps x effective_rank) when passed into svd
+    # computation
+    Q_int = linalg.qr(_orth_overwrite((data_int / np.linalg.norm(data_int)).T),
+                      overwrite_a=True, mode='economic', **check_disable)[0].T
+    Q_res = linalg.qr(_orth_overwrite((data_res / np.linalg.norm(data_res)).T),
+                      overwrite_a=True, mode='economic', **check_disable)[0]
+    assert data_int.shape[1] > 0
+    C_mat = np.dot(Q_int, Q_res)
+    del Q_int
+
+    # Compute angles between subspace and which bases to keep
+    S_intersect, Vh_intersect = linalg.svd(C_mat, overwrite_a=True,
+                                           full_matrices=False,
+                                           **check_disable)[1:]
+    del C_mat
+    intersect_mask = (S_intersect >= corr)
+    del S_intersect
+
+    # Compute projection operator as (I-LL_T) Eq. 12 in [2]_
+    # V_principal should be shape (n_time_pts x n_retained_inds)
+    Vh_intersect = Vh_intersect[intersect_mask].T
+    V_principal = np.dot(Q_res, Vh_intersect)
+    return V_principal
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/preprocessing/peak_finder.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/preprocessing/peak_finder.py
new file mode 100644
index 0000000..a2e78fb
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/preprocessing/peak_finder.py
@@ -0,0 +1,168 @@
+import numpy as np
+from math import ceil
+
+from .. utils import logger, verbose
+
+
+ at verbose
+def peak_finder(x0, thresh=None, extrema=1, verbose=None):
+    """Noise tolerant fast peak finding algorithm
+
+    Parameters
+    ----------
+    x0 : 1d array
+        A real vector from the maxima will be found (required).
+    thresh : float
+        The amount above surrounding data for a peak to be
+        identified (default = (max(x0)-min(x0))/4). Larger values mean
+        the algorithm is more selective in finding peaks.
+    extrema : {-1, 1}
+        1 if maxima are desired, -1 if minima are desired
+        (default = maxima, 1).
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    peak_loc : array
+        The indices of the identified peaks in x0
+    peak_mag : array
+        The magnitude of the identified peaks
+
+    Note
+    ----
+    If repeated values are found the first is identified as the peak.
+    Conversion from initial Matlab code from:
+    Nathanael C. Yoder (ncyoder at purdue.edu)
+
+    Example
+    -------
+    t = 0:.0001:10;
+    x = 12*sin(10*2*pi*t)-3*sin(.1*2*pi*t)+randn(1,numel(t));
+    x(1250:1255) = max(x);
+    peak_finder(x)
+    """
+
+    x0 = np.asanyarray(x0)
+
+    if x0.ndim >= 2:
+        raise ValueError('The input data must be a 1D vector')
+
+    s = x0.size
+
+    if thresh is None:
+        thresh = (np.max(x0) - np.min(x0)) / 4
+
+    assert extrema in [-1, 1]
+
+    if extrema == -1:
+        x0 = extrema * x0  # Make it so we are finding maxima regardless
+
+    dx0 = np.diff(x0)  # Find derivative
+    # This is so we find the first of repeated values
+    dx0[dx0 == 0] = -np.finfo(float).eps
+    # Find where the derivative changes sign
+    ind = np.where(dx0[:-1:] * dx0[1::] < 0)[0] + 1
+
+    # Include endpoints in potential peaks and valleys
+    x = np.concatenate((x0[:1], x0[ind], x0[-1:]))
+    ind = np.concatenate(([0], ind, [s - 1]))
+
+    #  x only has the peaks, valleys, and endpoints
+    length = x.size
+    min_mag = np.min(x)
+
+    if length > 2:  # Function with peaks and valleys
+
+        # Set initial parameters for loop
+        temp_mag = min_mag
+        found_peak = False
+        left_min = min_mag
+
+        # Deal with first point a little differently since tacked it on
+        # Calculate the sign of the derivative since we taked the first point
+        # on it does not necessarily alternate like the rest.
+        signDx = np.sign(np.diff(x[:3]))
+        if signDx[0] <= 0:  # The first point is larger or equal to the second
+            ii = -1
+            if signDx[0] == signDx[1]:  # Want alternating signs
+                x = np.concatenate((x[:1], x[2:]))
+                ind = np.concatenate((ind[:1], ind[2:]))
+                length -= 1
+
+        else:  # First point is smaller than the second
+            ii = 0
+            if signDx[0] == signDx[1]:  # Want alternating signs
+                x = x[1:]
+                ind = ind[1:]
+                length -= 1
+
+        # Preallocate max number of maxima
+        maxPeaks = int(ceil(length / 2.0))
+        peak_loc = np.zeros(maxPeaks, dtype=np.int)
+        peak_mag = np.zeros(maxPeaks)
+        c_ind = 0
+        # Loop through extrema which should be peaks and then valleys
+        while ii < (length - 1):
+            ii += 1  # This is a peak
+            # Reset peak finding if we had a peak and the next peak is bigger
+            # than the last or the left min was small enough to reset.
+            if found_peak and ((x[ii] > peak_mag[-1]) or
+                               (left_min < peak_mag[-1] - thresh)):
+                temp_mag = min_mag
+                found_peak = False
+
+            # Make sure we don't iterate past the length of our vector
+            if ii == length - 1:
+                break  # We assign the last point differently out of the loop
+
+            # Found new peak that was lager than temp mag and threshold larger
+            # than the minimum to its left.
+            if (x[ii] > temp_mag) and (x[ii] > left_min + thresh):
+                temp_loc = ii
+                temp_mag = x[ii]
+
+            ii += 1  # Move onto the valley
+            # Come down at least thresh from peak
+            if not found_peak and (temp_mag > (thresh + x[ii])):
+                found_peak = True  # We have found a peak
+                left_min = x[ii]
+                peak_loc[c_ind] = temp_loc  # Add peak to index
+                peak_mag[c_ind] = temp_mag
+                c_ind += 1
+            elif x[ii] < left_min:  # New left minima
+                left_min = x[ii]
+
+        # Check end point
+        if (x[-1] > temp_mag) and (x[-1] > (left_min + thresh)):
+            peak_loc[c_ind] = length - 1
+            peak_mag[c_ind] = x[-1]
+            c_ind += 1
+        elif not found_peak and temp_mag > min_mag:
+            # Check if we still need to add the last point
+            peak_loc[c_ind] = temp_loc
+            peak_mag[c_ind] = temp_mag
+            c_ind += 1
+
+        # Create output
+        peak_inds = ind[peak_loc[:c_ind]]
+        peak_mags = peak_mag[:c_ind]
+    else:  # This is a monotone function where an endpoint is the only peak
+        x_ind = np.argmax(x)
+        peak_mags = x[x_ind]
+        if peak_mags > (min_mag + thresh):
+            peak_inds = ind[x_ind]
+        else:
+            peak_mags = []
+            peak_inds = []
+
+    # Change sign of data if was finding minima
+    if extrema < 0:
+        peak_mags *= -1.0
+        x0 = -x0
+
+    # Plot if no output desired
+    if len(peak_inds) == 0:
+        logger.info('No significant peaks found')
+
+    return peak_inds, peak_mags
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/preprocessing/ssp.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/preprocessing/ssp.py
new file mode 100644
index 0000000..63fac16
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/preprocessing/ssp.py
@@ -0,0 +1,396 @@
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+import copy as cp
+from warnings import warn
+import numpy as np
+
+from .. import Epochs, compute_proj_evoked, compute_proj_epochs
+from ..utils import logger, verbose
+from .. import pick_types
+from ..io import make_eeg_average_ref_proj
+from .ecg import find_ecg_events
+from .eog import find_eog_events
+
+
+def _safe_del_key(dict_, key):
+    """ Aux function
+
+    Use this function when preparing rejection parameters
+    instead of directly deleting keys.
+    """
+    if key in dict_:
+        del dict_[key]
+
+
+ at verbose
+def _compute_exg_proj(mode, raw, raw_event, tmin, tmax,
+                      n_grad, n_mag, n_eeg, l_freq, h_freq,
+                      average, filter_length, n_jobs, ch_name,
+                      reject, flat, bads, avg_ref, no_proj, event_id,
+                      exg_l_freq, exg_h_freq, tstart, qrs_threshold,
+                      filter_method, iir_params=None, verbose=None):
+    """Compute SSP/PCA projections for ECG or EOG artifacts
+
+    .. note:: raw data must be preloaded.
+
+    Parameters
+    ----------
+    mode : string ('ECG', or 'EOG')
+        What type of events to detect.
+    raw : mne.io.Raw
+        Raw input file.
+    raw_event : mne.io.Raw or None
+        Raw file to use for event detection (if None, raw is used).
+    tmin : float
+        Time before event in seconds.
+    tmax : float
+        Time after event in seconds.
+    n_grad : int
+        Number of SSP vectors for gradiometers.
+    n_mag : int
+        Number of SSP vectors for magnetometers.
+    n_eeg : int
+        Number of SSP vectors for EEG.
+    l_freq : float | None
+        Filter low cut-off frequency in Hz.
+    h_freq : float | None
+        Filter high cut-off frequency in Hz.
+    average : bool
+        Compute SSP after averaging.
+    filter_length : str | int | None
+        Number of taps to use for filtering.
+    n_jobs : int
+        Number of jobs to run in parallel.
+    ch_name : string (or None)
+        Channel to use for ECG event detection.
+    reject : dict | None
+        Epoch rejection configuration (see Epochs).
+    flat : dict | None
+        Epoch flat configuration (see Epochs).
+    bads : list
+        List with (additional) bad channels.
+    avg_ref : bool
+        Add EEG average reference proj.
+    no_proj : bool
+        Exclude the SSP projectors currently in the fiff file.
+    event_id : int
+        ID to use for events.
+    exg_l_freq : float
+        Low pass frequency applied for filtering EXG channel.
+    exg_h_freq : float
+        High pass frequency applied for filtering EXG channel.
+    tstart : float
+        Start artifact detection after tstart seconds.
+    qrs_threshold : float | str
+        Between 0 and 1. qrs detection threshold. Can also be "auto" to
+        automatically choose the threshold that generates a reasonable
+        number of heartbeats (40-160 beats / min). Only for ECG.
+    filter_method : str
+        Method for filtering ('iir' or 'fft').
+    iir_params : dict | None
+        Dictionary of parameters to use for IIR filtering.
+        See mne.filter.construct_iir_filter for details. If iir_params
+        is None and method="iir", 4th order Butterworth will be used.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    proj : list
+        Computed SSP projectors.
+    events : ndarray
+        Detected events.
+    """
+    if not raw.preload:
+        raise ValueError('raw needs to be preloaded, '
+                         'use preload=True in constructor')
+
+    if no_proj:
+        projs = []
+    else:
+        projs = cp.deepcopy(raw.info['projs'])
+        logger.info('Including %d SSP projectors from raw file'
+                    % len(projs))
+
+    if avg_ref:
+        eeg_proj = make_eeg_average_ref_proj(raw.info)
+        projs.append(eeg_proj)
+
+    if raw_event is None:
+        raw_event = raw
+
+    if mode == 'ECG':
+        logger.info('Running ECG SSP computation')
+        events, _, _ = find_ecg_events(raw_event, ch_name=ch_name,
+                                       event_id=event_id, l_freq=exg_l_freq,
+                                       h_freq=exg_h_freq, tstart=tstart,
+                                       qrs_threshold=qrs_threshold,
+                                       filter_length=filter_length)
+    elif mode == 'EOG':
+        logger.info('Running EOG SSP computation')
+        events = find_eog_events(raw_event, event_id=event_id,
+                                 l_freq=exg_l_freq, h_freq=exg_h_freq,
+                                 filter_length=filter_length, ch_name=ch_name,
+                                 tstart=tstart)
+    else:
+        raise ValueError("mode must be 'ECG' or 'EOG'")
+
+    # Check to make sure we actually got at least one useable event
+    if events.shape[0] < 1:
+        warn('No %s events found, returning None for projs' % mode)
+        return None, events
+
+    logger.info('Computing projector')
+    my_info = cp.deepcopy(raw.info)
+    my_info['bads'] += bads
+
+    # Handler rejection parameters
+    if reject is not None:  # make sure they didn't pass None
+        if len(pick_types(my_info, meg='grad', eeg=False, eog=False,
+                          ref_meg=False, exclude='bads')) == 0:
+            _safe_del_key(reject, 'grad')
+        if len(pick_types(my_info, meg='mag', eeg=False, eog=False,
+                          ref_meg=False, exclude='bads')) == 0:
+            _safe_del_key(reject, 'mag')
+        if len(pick_types(my_info, meg=False, eeg=True, eog=False,
+                          ref_meg=False, exclude='bads')) == 0:
+            _safe_del_key(reject, 'eeg')
+        if len(pick_types(my_info, meg=False, eeg=False, eog=True,
+                          ref_meg=False, exclude='bads')) == 0:
+            _safe_del_key(reject, 'eog')
+    if flat is not None:  # make sure they didn't pass None
+        if len(pick_types(my_info, meg='grad', eeg=False, eog=False,
+                          ref_meg=False, exclude='bads')) == 0:
+            _safe_del_key(flat, 'grad')
+        if len(pick_types(my_info, meg='mag', eeg=False, eog=False,
+                          ref_meg=False, exclude='bads')) == 0:
+            _safe_del_key(flat, 'mag')
+        if len(pick_types(my_info, meg=False, eeg=True, eog=False,
+                          ref_meg=False, exclude='bads')) == 0:
+            _safe_del_key(flat, 'eeg')
+        if len(pick_types(my_info, meg=False, eeg=False, eog=True,
+                          ref_meg=False, exclude='bads')) == 0:
+            _safe_del_key(flat, 'eog')
+
+    # exclude bad channels from projection
+    picks = pick_types(my_info, meg=True, eeg=True, eog=True, ref_meg=False,
+                       exclude='bads')
+    raw.filter(l_freq, h_freq, picks=picks, filter_length=filter_length,
+               n_jobs=n_jobs, method=filter_method, iir_params=iir_params)
+
+    epochs = Epochs(raw, events, None, tmin, tmax, baseline=None, preload=True,
+                    picks=picks, reject=reject, flat=flat, proj=True)
+
+    epochs.drop_bad_epochs()
+    if epochs.events.shape[0] < 1:
+        warn('No good epochs found, returning None for projs')
+        return None, events
+
+    if average:
+        evoked = epochs.average()
+        ev_projs = compute_proj_evoked(evoked, n_grad=n_grad, n_mag=n_mag,
+                                       n_eeg=n_eeg)
+    else:
+        ev_projs = compute_proj_epochs(epochs, n_grad=n_grad, n_mag=n_mag,
+                                       n_eeg=n_eeg, n_jobs=n_jobs)
+
+    for p in ev_projs:
+        p['desc'] = mode + "-" + p['desc']
+
+    projs.extend(ev_projs)
+
+    logger.info('Done.')
+
+    return projs, events
+
+
+ at verbose
+def compute_proj_ecg(raw, raw_event=None, tmin=-0.2, tmax=0.4,
+                     n_grad=2, n_mag=2, n_eeg=2, l_freq=1.0, h_freq=35.0,
+                     average=False, filter_length='10s', n_jobs=1,
+                     ch_name=None, reject=dict(grad=2000e-13, mag=3000e-15,
+                                               eeg=50e-6, eog=250e-6),
+                     flat=None, bads=[], avg_ref=False,
+                     no_proj=False, event_id=999, ecg_l_freq=5, ecg_h_freq=35,
+                     tstart=0., qrs_threshold='auto', filter_method='fft',
+                     iir_params=None, copy=True, verbose=None):
+    """Compute SSP/PCA projections for ECG artifacts
+
+    .. note:: raw data must be preloaded.
+
+    Parameters
+    ----------
+    raw : mne.io.Raw
+        Raw input file.
+    raw_event : mne.io.Raw or None
+        Raw file to use for event detection (if None, raw is used).
+    tmin : float
+        Time before event in seconds.
+    tmax : float
+        Time after event in seconds.
+    n_grad : int
+        Number of SSP vectors for gradiometers.
+    n_mag : int
+        Number of SSP vectors for magnetometers.
+    n_eeg : int
+        Number of SSP vectors for EEG.
+    l_freq : float | None
+        Filter low cut-off frequency in Hz.
+    h_freq : float | None
+        Filter high cut-off frequency in Hz.
+    average : bool
+        Compute SSP after averaging.
+    filter_length : str | int | None
+        Number of taps to use for filtering.
+    n_jobs : int
+        Number of jobs to run in parallel.
+    ch_name : string (or None)
+        Channel to use for ECG detection (Required if no ECG found).
+    reject : dict | None
+        Epoch rejection configuration (see Epochs).
+    flat : dict | None
+        Epoch flat configuration (see Epochs).
+    bads : list
+        List with (additional) bad channels.
+    avg_ref : bool
+        Add EEG average reference proj.
+    no_proj : bool
+        Exclude the SSP projectors currently in the fiff file.
+    event_id : int
+        ID to use for events.
+    ecg_l_freq : float
+        Low pass frequency applied for filtering ECG channel.
+    ecg_h_freq : float
+        High pass frequency applied for filtering ECG channel.
+    tstart : float
+        Start artifact detection after tstart seconds.
+    qrs_threshold : float | str
+        Between 0 and 1. qrs detection threshold. Can also be "auto" to
+        automatically choose the threshold that generates a reasonable
+        number of heartbeats (40-160 beats / min).
+    filter_method : str
+        Method for filtering ('iir' or 'fft').
+    iir_params : dict | None
+        Dictionary of parameters to use for IIR filtering.
+        See mne.filter.construct_iir_filter for details. If iir_params
+        is None and method="iir", 4th order Butterworth will be used.
+    copy : bool
+        If False, filtering raw data is done in place. Defaults to True.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    proj : list
+        Computed SSP projectors.
+    ecg_events : ndarray
+        Detected ECG events.
+    """
+    if copy is True:
+        raw = raw.copy()
+
+    projs, ecg_events = _compute_exg_proj('ECG', raw, raw_event, tmin, tmax,
+                                          n_grad, n_mag, n_eeg, l_freq, h_freq,
+                                          average, filter_length, n_jobs,
+                                          ch_name, reject, flat, bads, avg_ref,
+                                          no_proj, event_id, ecg_l_freq,
+                                          ecg_h_freq, tstart, qrs_threshold,
+                                          filter_method, iir_params)
+
+    return projs, ecg_events
+
+
+ at verbose
+def compute_proj_eog(raw, raw_event=None, tmin=-0.2, tmax=0.2,
+                     n_grad=2, n_mag=2, n_eeg=2, l_freq=1.0, h_freq=35.0,
+                     average=False, filter_length='10s', n_jobs=1,
+                     reject=dict(grad=2000e-13, mag=3000e-15, eeg=500e-6,
+                                 eog=np.inf), flat=None, bads=[],
+                     avg_ref=False, no_proj=False, event_id=998, eog_l_freq=1,
+                     eog_h_freq=10, tstart=0., filter_method='fft',
+                     iir_params=None, ch_name=None, copy=True, verbose=None):
+    """Compute SSP/PCA projections for EOG artifacts
+
+    .. note:: raw data must be preloaded.
+
+    Parameters
+    ----------
+    raw : mne.io.Raw
+        Raw input file.
+    raw_event : mne.io.Raw or None
+        Raw file to use for event detection (if None, raw is used).
+    tmin : float
+        Time before event in seconds.
+    tmax : float
+        Time after event in seconds.
+    n_grad : int
+        Number of SSP vectors for gradiometers.
+    n_mag : int
+        Number of SSP vectors for magnetometers.
+    n_eeg : int
+        Number of SSP vectors for EEG.
+    l_freq : float | None
+        Filter low cut-off frequency in Hz.
+    h_freq : float | None
+        Filter high cut-off frequency in Hz.
+    average : bool
+        Compute SSP after averaging.
+    filter_length : str | int | None
+        Number of taps to use for filtering.
+    n_jobs : int
+        Number of jobs to run in parallel.
+    reject : dict | None
+        Epoch rejection configuration (see Epochs).
+    flat : dict | None
+        Epoch flat configuration (see Epochs).
+    bads : list
+        List with (additional) bad channels.
+    avg_ref : bool
+        Add EEG average reference proj.
+    no_proj : bool
+        Exclude the SSP projectors currently in the fiff file.
+    event_id : int
+        ID to use for events.
+    eog_l_freq : float
+        Low pass frequency applied for filtering E0G channel.
+    eog_h_freq : float
+        High pass frequency applied for filtering E0G channel.
+    tstart : float
+        Start artifact detection after tstart seconds.
+    filter_method : str
+        Method for filtering ('iir' or 'fft').
+    iir_params : dict | None
+        Dictionary of parameters to use for IIR filtering.
+        See mne.filter.construct_iir_filter for details. If iir_params
+        is None and method="iir", 4th order Butterworth will be used.
+    ch_name: str | None
+        If not None, specify EOG channel name.
+    copy : bool
+        If False, filtering raw data is done in place. Defaults to True.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    proj: list
+        Computed SSP projectors.
+    eog_events: ndarray
+        Detected EOG events.
+    """
+    if copy is True:
+        raw = raw.copy()
+    projs, eog_events = _compute_exg_proj('EOG', raw, raw_event, tmin, tmax,
+                                          n_grad, n_mag, n_eeg, l_freq, h_freq,
+                                          average, filter_length, n_jobs,
+                                          ch_name, reject, flat, bads, avg_ref,
+                                          no_proj, event_id, eog_l_freq,
+                                          eog_h_freq, tstart,
+                                          qrs_threshold='auto',
+                                          filter_method=filter_method,
+                                          iir_params=iir_params)
+
+    return projs, eog_events
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/preprocessing/stim.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/preprocessing/stim.py
new file mode 100644
index 0000000..06fd200
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/preprocessing/stim.py
@@ -0,0 +1,130 @@
+# Authors: Daniel Strohmeier <daniel.strohmeier at tu-ilmenau.de>
+#
+# License: BSD (3-clause)
+
+import numpy as np
+from ..evoked import Evoked
+from ..epochs import Epochs
+from ..io import Raw
+from ..event import find_events
+
+from ..io.pick import pick_channels
+
+
+def _get_window(start, end):
+    """Return window which has length as much as parameter start - end"""
+    from scipy.signal import hann
+    window = 1 - np.r_[hann(4)[:2],
+                       np.ones(np.abs(end - start) - 4),
+                       hann(4)[-2:]].T
+    return window
+
+
+def _check_preload(inst):
+    """Check if inst.preload is False. If it is False, raising error"""
+    if inst.preload is False:
+        raise RuntimeError('Modifying data of Instance is only supported '
+                           'when preloading is used. Use preload=True '
+                           '(or string) in the constructor.')
+
+
+def _fix_artifact(data, window, picks, first_samp, last_samp, mode):
+    """Modify original data by using parameter data"""
+    from scipy.interpolate import interp1d
+    if mode == 'linear':
+        x = np.array([first_samp, last_samp])
+        f = interp1d(x, data[:, (first_samp, last_samp)])
+        xnew = np.arange(first_samp, last_samp)
+        interp_data = f(xnew)
+        data[picks, first_samp:last_samp] = interp_data
+    if mode == 'window':
+        data[picks, first_samp:last_samp] = \
+            data[picks, first_samp:last_samp] * window[np.newaxis, :]
+
+
+def fix_stim_artifact(inst, events=None, event_id=None, tmin=0.,
+                      tmax=0.01, mode='linear', stim_channel=None, copy=False):
+    """Eliminate stimulation's artifacts from instance
+
+    Parameters
+    ----------
+    inst : instance of Raw or Epochs or Evoked
+        The data.
+    events : array, shape (n_events, 3)
+        The list of events. Required only when inst is Raw.
+    event_id : int
+        The id of the events generating the stimulation artifacts.
+        If None, read all events. Required only when inst is Raw.
+    tmin : float
+        Start time of the interpolation window in seconds.
+    tmax : float
+        End time of the interpolation window in seconds.
+    mode : 'linear' | 'window'
+        Way to fill the artifacted time interval.
+        'linear' does linear interpolation
+        'window' applies a (1 - hanning) window.
+    stim_channel : str | None
+        Stim channel to use.
+    copy : bool
+        If True, data will be copied. Else data may be modified in place.
+
+    Returns
+    -------
+    inst : instance of Raw or Evoked or Epochs
+        Instance with modified data
+    """
+    if mode not in ('linear', 'window'):
+        raise ValueError("mode has to be 'linear' or 'window' (got %s)" % mode)
+
+    if copy:
+        inst = inst.copy()
+    s_start = int(np.ceil(inst.info['sfreq'] * tmin))
+    s_end = int(np.ceil(inst.info['sfreq'] * tmax))
+    if (mode == "window") and (s_end - s_start) < 4:
+        raise ValueError('Time range is too short. Use a larger interval '
+                         'or set mode to "linear".')
+    window = None
+    if mode == 'window':
+        window = _get_window(s_start, s_end)
+    ch_names = inst.info['ch_names']
+    picks = pick_channels(ch_names, ch_names)
+
+    if isinstance(inst, Raw):
+        _check_preload(inst)
+        if events is None:
+            events = find_events(inst, stim_channel=stim_channel)
+        if len(events) == 0:
+            raise ValueError('No events are found')
+        if event_id is None:
+            events_sel = np.arange(len(events))
+        else:
+            events_sel = (events[:, 2] == event_id)
+        event_start = events[events_sel, 0]
+        data = inst._data
+        for event_idx in event_start:
+            first_samp = int(event_idx) - inst.first_samp + s_start
+            last_samp = int(event_idx) - inst.first_samp + s_end
+            _fix_artifact(data, window, picks, first_samp, last_samp, mode)
+
+    elif isinstance(inst, Epochs):
+        _check_preload(inst)
+        if inst.reject is not None:
+            raise RuntimeError('Reject is already applied. Use reject=None '
+                               'in the constructor.')
+        e_start = int(np.ceil(inst.info['sfreq'] * inst.tmin))
+        first_samp = s_start - e_start
+        last_samp = s_end - e_start
+        data = inst._data
+        for epoch in data:
+            _fix_artifact(epoch, window, picks, first_samp, last_samp, mode)
+
+    elif isinstance(inst, Evoked):
+        first_samp = s_start - inst.first
+        last_samp = s_end - inst.first
+        data = inst.data
+        _fix_artifact(data, window, picks, first_samp, last_samp, mode)
+
+    else:
+        raise TypeError('Not a Raw or Epochs or Evoked (got %s).' % type(inst))
+
+    return inst
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/preprocessing/tests/__init__.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/preprocessing/tests/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/preprocessing/tests/test_ctps.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/preprocessing/tests/test_ctps.py
new file mode 100644
index 0000000..c562775
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/preprocessing/tests/test_ctps.py
@@ -0,0 +1,84 @@
+# Authors: Denis A. Engemann <denis.engemann at gmail.com>
+#
+# License: BSD 3 clause
+
+import numpy as np
+from mne.time_frequency import morlet
+from nose.tools import assert_true, assert_raises
+from numpy.testing import assert_array_equal
+from mne.preprocessing.ctps_ import (ctps, _prob_kuiper,
+                                     _compute_normalized_phase)
+
+###############################################################################
+# Generate testing signal
+
+tmin = -0.3
+sfreq = 1000.  # Hz
+tstep = 1. / sfreq
+n_samples = 600
+times = np.linspace(tmin, tmin + n_samples * tstep, n_samples)
+
+# Generate times series from Morlet wavelet
+single_trial = np.zeros((1, len(times)))
+Ws = morlet(sfreq, [3], n_cycles=[1])
+
+single_trial[0][:len(Ws[0])] = np.real(Ws[0])
+roll_to = 300 - 265  # shift data to center of time window
+single_trial = np.roll(single_trial, roll_to)
+rng = np.random.RandomState(42)
+
+
+def get_data(n_trials, j_extent):
+    """Generate ground truth and testing data"""
+    ground_truth = np.tile(single_trial,  n_trials)
+    my_shape = n_trials, 1, 600
+    random_data = rng.random_sample(my_shape)
+    rand_ints = rng.random_integers(-j_extent, j_extent, n_trials)
+    jittered_data = np.array([np.roll(single_trial, i) for i in rand_ints])
+    data = np.concatenate([ground_truth.reshape(my_shape),
+                           jittered_data.reshape(my_shape),
+                           random_data.reshape(my_shape)], 1)
+
+    assert_true(data.shape == (n_trials, 3, 600))
+    return data
+
+# vary extent of jittering --> creates phaselocks at the borders if
+# 2 * extent != n_samples
+iter_test_ctps = enumerate(zip([400, 400], [150, 300], [0.6, 0.2]))
+
+
+def test_ctps():
+    """ Test basic ctps functionality
+    """
+    for ii, (n_trials, j_extent, pk_max) in iter_test_ctps:
+        data = get_data(n_trials, j_extent)
+        ks_dyn, pk_dyn, phase_trial = ctps(data)
+        data2 = _compute_normalized_phase(data)
+        ks_dyn2, pk_dyn2, phase_trial2 = ctps(data2, is_raw=False)
+        for a, b in zip([ks_dyn, pk_dyn, phase_trial],
+                        [ks_dyn2, pk_dyn2, data2]):
+            assert_array_equal(a, b)
+            assert_true(a.min() >= 0)
+            assert_true(a.max() <= 1)
+            assert_true(b.min() >= 0)
+            assert_true(b.max() <= 1)
+
+        # test for normalization
+        assert_true((pk_dyn.min() > 0.0) or (pk_dyn.max() < 1.0))
+        # test shapes
+        assert_true(phase_trial.shape == data.shape)
+        assert_true(pk_dyn.shape == data.shape[1:])
+        # tets ground_truth + random + jittered case
+        assert_true(pk_dyn[0].max() == 1.0)
+        assert_true(len(np.unique(pk_dyn[0])) == 1.0)
+        assert_true(pk_dyn[1].max() < pk_max)
+        assert_true(pk_dyn[2].max() > 0.3)
+        if ii < 1:
+            assert_raises(ValueError, ctps,
+                          data[:, :, :, None])
+
+    assert_true(_prob_kuiper(1.0, 400) == 1.0)
+    # test vecrosization
+    assert_array_equal(_prob_kuiper(np.array([1.0, 1.0]), 400),
+                       _prob_kuiper(np.array([1.0, 1.0]), 400))
+    assert_true(_prob_kuiper(0.1, 400) < 0.1)
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/preprocessing/tests/test_ecg.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/preprocessing/tests/test_ecg.py
new file mode 100644
index 0000000..e034227
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/preprocessing/tests/test_ecg.py
@@ -0,0 +1,24 @@
+import os.path as op
+
+from nose.tools import assert_true, assert_equal
+
+from mne.io import Raw
+from mne.preprocessing.ecg import find_ecg_events, create_ecg_epochs
+
+data_path = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
+raw_fname = op.join(data_path, 'test_raw.fif')
+event_fname = op.join(data_path, 'test-eve.fif')
+proj_fname = op.join(data_path, 'test-proj.fif')
+
+
+def test_find_ecg():
+    """Test find ECG peaks"""
+    raw = Raw(raw_fname)
+    events, ch_ECG, average_pulse = find_ecg_events(raw, event_id=999,
+                                                    ch_name='MEG 1531')
+    n_events = len(events)
+    _, times = raw[0, :]
+    assert_true(55 < average_pulse < 60)
+
+    ecg_epochs = create_ecg_epochs(raw, ch_name='MEG 1531')
+    assert_equal(len(ecg_epochs.events), n_events)
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/preprocessing/tests/test_eeglab_infomax.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/preprocessing/tests/test_eeglab_infomax.py
new file mode 100644
index 0000000..99ef5af
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/preprocessing/tests/test_eeglab_infomax.py
@@ -0,0 +1,204 @@
+import numpy as np
+
+from scipy.linalg import svd
+
+from mne.io import Raw
+from mne import pick_types
+
+import scipy.io as sio
+from scipy.linalg import pinv
+from mne.preprocessing.infomax_ import infomax
+from numpy.testing import assert_almost_equal
+from mne.utils import random_permutation
+from mne.datasets import testing
+import os.path as op
+
+base_dir = op.join(op.dirname(__file__), 'data')
+
+
+def generate_data_for_comparing_against_eeglab_infomax(ch_type, random_state):
+
+    data_dir = op.join(testing.data_path(download=False), 'MEG', 'sample')
+    raw_fname = op.join(data_dir, 'sample_audvis_trunc_raw.fif')
+
+    raw = Raw(raw_fname, preload=True)
+
+    if ch_type == 'eeg':
+        picks = pick_types(raw.info, meg=False, eeg=True, exclude='bads')
+    else:
+        picks = pick_types(raw.info, meg=ch_type,
+                           eeg=False, exclude='bads')
+
+    # select a small number of channels for the test
+    number_of_channels_to_use = 5
+    idx_perm = random_permutation(picks.shape[0], random_state)
+    picks = picks[idx_perm[:number_of_channels_to_use]]
+
+    raw.filter(1, 45, n_jobs=2)
+    X = raw[picks, :][0][:, ::20]
+
+    # Substract the mean
+    mean_X = X.mean(axis=1)
+    X -= mean_X[:, None]
+
+    # pre_whitening: z-score
+    X /= np.std(X)
+
+    T = X.shape[1]
+    cov_X = np.dot(X, X.T) / T
+
+    # Let's whiten the data
+    U, D, _ = svd(cov_X)
+    W = np.dot(U, U.T / np.sqrt(D)[:, None])
+    Y = np.dot(W, X)
+
+    return Y
+
+
+ at testing.requires_testing_data
+def test_mne_python_vs_eeglab():
+    """ Test eeglab vs mne_python infomax code.
+    """
+    random_state = 42
+
+    methods = ['infomax', 'infomax', 'extended_infomax', 'extended_infomax']
+    list_ch_types = ['eeg', 'mag', 'eeg', 'mag']
+
+    for method, ch_type in zip(methods, list_ch_types):
+
+        if method == 'infomax':
+            if ch_type == 'eeg':
+                eeglab_results_file = 'eeglab_infomax_results_eeg_data.mat'
+            elif ch_type == 'mag':
+                eeglab_results_file = 'eeglab_infomax_results_meg_data.mat'
+
+        elif method == 'extended_infomax':
+
+            if ch_type == 'eeg':
+                eeglab_results_file = ('eeglab_extended_infomax_results_eeg_'
+                                       'data.mat')
+            elif ch_type == 'mag':
+                eeglab_results_file = ('eeglab_extended_infomax_results_meg_'
+                                       'data.mat')
+
+        Y = generate_data_for_comparing_against_eeglab_infomax(ch_type,
+                                                               random_state)
+        N = Y.shape[0]
+        T = Y.shape[1]
+
+        # For comparasion against eeglab, make sure the folowing
+        # parameters have the same value in mne_python and eeglab:
+        #
+        # - starting point
+        # - random state
+        # - learning rate
+        # - block size
+        # - blowup parameter
+        # - blowup_fac parameter
+        # - tolerance for stopping the algorithm
+        # - number of iterations
+        # - anneal_step parameter
+        #
+        # Notes:
+        # * By default, eeglab whiten the data using the "sphering transform"
+        #   instead of pca. The mne_python infomax code does not
+        #   whiten the data. To make sure both mne_python and eeglab starts
+        #   from the same point (i.e., the same matrix), we need to make sure
+        #   to whiten the data outside, and pass these whiten data to
+        #   mne_python and eeglab. Finally, we need to tell eeglab that
+        #   the input data is already whiten, this can be done by calling
+        #   eeglab with the following syntax:
+        #
+        #   % Run infomax
+        #   [unmixing,sphere,meanvar,bias,signs,lrates,sources,y] = ...
+        #       runica( Y, 'sphering', 'none');
+        #
+        #   % Run extended infomax
+        #   [unmixing,sphere,meanvar,bias,signs,lrates,sources,y]  = ...
+        #       runica( Y, 'sphering', 'none', 'extended', 1);
+        #
+        #   By calling eeglab using the former code, we are using its default
+        #   parameters, which are specified below in the section
+        #   "EEGLAB default parameters".
+        #
+        # * eeglab does not expose a parameter for fixing the random state.
+        #   Therefore, to accomplish this, we need to edit the runica.m
+        #   file located at /path_to_eeglab/functions/sigprocfunc/runica.m
+        #
+        #   i) Comment the line related with the random number generator
+        #      (line 812).
+        #   ii) Then, add the following line just below line 812:
+        #       rng(42); %use 42 as random seed.
+        #
+        # * eeglab does not have the parameter "n_small_angle",
+        #   so we need to disable it for making a fair comparison.
+        #
+        # * Finally, we need to take the unmixing matrix estimated by the
+        #   mne_python infomax implementation and order the components
+        #   in the same way that eeglab does. This is done below in the section
+        #   "Order the components in the same way that eeglab does".
+
+        ###############################################################
+        # EEGLAB default parameters
+        ###############################################################
+        l_rate_eeglab = 0.00065 / np.log(N)
+        block_eeglab = int(np.ceil(np.min([5 * np.log(T), 0.3 * T])))
+        blowup_eeglab = 1e9
+        blowup_fac_eeglab = 0.8
+        max_iter_eeglab = 512
+
+        if method == 'infomax':
+            anneal_step_eeglab = 0.9
+            use_extended = False
+
+        elif method == 'extended_infomax':
+            anneal_step_eeglab = 0.98
+            use_extended = True
+
+        if N > 32:
+            w_change_eeglab = 1e-7
+        else:
+            w_change_eeglab = 1e-6
+        ###############################################################
+
+        # Call mne_python infomax version using the following sintax
+        # to obtain the same result than eeglab version
+        unmixing = infomax(Y.T, extended=use_extended,
+                           random_state=random_state,
+                           max_iter=max_iter_eeglab,
+                           l_rate=l_rate_eeglab,
+                           block=block_eeglab,
+                           w_change=w_change_eeglab,
+                           blowup=blowup_eeglab,
+                           blowup_fac=blowup_fac_eeglab,
+                           n_small_angle=None,
+                           anneal_step=anneal_step_eeglab
+                           )
+
+        #######################################################################
+        # Order the components in the same way that eeglab does
+        #######################################################################
+
+        sources = np.dot(unmixing, Y)
+        mixing = pinv(unmixing)
+
+        mvar = np.sum(mixing ** 2, axis=0) * \
+            np.sum(sources ** 2, axis=1) / (N * T - 1)
+        windex = np.argsort(mvar)[::-1]
+
+        unmixing_ordered = unmixing[windex, :]
+        #######################################################################
+
+        #######################################################################
+        # Load the eeglab results, then compare the unmixing matrices estimated
+        # by mne_python and eeglab. To make the comparison use the
+        # \ell_inf norm:
+        # ||unmixing_mne_python - unmixing_eeglab||_inf
+        #######################################################################
+
+        eeglab_data = sio.loadmat(op.join(base_dir, eeglab_results_file))
+        unmixing_eeglab = eeglab_data['unmixing_eeglab']
+
+        maximum_difference = np.max(np.abs(unmixing_ordered - unmixing_eeglab))
+
+        assert_almost_equal(maximum_difference, 1e-12, decimal=10)
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/preprocessing/tests/test_eog.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/preprocessing/tests/test_eog.py
new file mode 100644
index 0000000..97220dd
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/preprocessing/tests/test_eog.py
@@ -0,0 +1,18 @@
+import os.path as op
+from nose.tools import assert_true
+
+from mne.io import Raw
+from mne.preprocessing.eog import find_eog_events
+
+data_path = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
+raw_fname = op.join(data_path, 'test_raw.fif')
+event_fname = op.join(data_path, 'test-eve.fif')
+proj_fname = op.join(data_path, 'test-proj.fif')
+
+
+def test_find_eog():
+    """Test find EOG peaks"""
+    raw = Raw(raw_fname)
+    events = find_eog_events(raw)
+    n_events = len(events)
+    assert_true(n_events == 4)
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/preprocessing/tests/test_ica.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/preprocessing/tests/test_ica.py
new file mode 100644
index 0000000..c5862ce
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/preprocessing/tests/test_ica.py
@@ -0,0 +1,592 @@
+from __future__ import print_function
+
+# Author: Denis Engemann <denis.engemann at gmail.com>
+#         Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#
+# License: BSD (3-clause)
+
+import os
+import os.path as op
+import warnings
+
+from nose.tools import assert_true, assert_raises, assert_equal
+import numpy as np
+from numpy.testing import (assert_array_almost_equal, assert_array_equal,
+                           assert_allclose)
+from scipy import stats
+from itertools import product
+
+from mne import io, Epochs, read_events, pick_types
+from mne.cov import read_cov
+from mne.preprocessing import (ICA, ica_find_ecg_events, ica_find_eog_events,
+                               read_ica, run_ica)
+from mne.preprocessing.ica import get_score_funcs, corrmap
+from mne.io.meas_info import Info
+from mne.utils import (set_log_file, _TempDir, requires_sklearn, slow_test,
+                       run_tests_if_main)
+
+# Set our plotters to test mode
+import matplotlib
+matplotlib.use('Agg')  # for testing don't use X server
+import matplotlib.pyplot as plt  # noqa
+
+warnings.simplefilter('always')  # enable b/c these tests throw warnings
+
+data_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
+raw_fname = op.join(data_dir, 'test_raw.fif')
+event_name = op.join(data_dir, 'test-eve.fif')
+evoked_nf_name = op.join(data_dir, 'test-nf-ave.fif')
+test_cov_name = op.join(data_dir, 'test-cov.fif')
+
+event_id, tmin, tmax = 1, -0.2, 0.2
+# if stop is too small pca may fail in some cases, but we're okay on this file
+start, stop = 0, 6
+score_funcs_unsuited = ['pointbiserialr', 'ansari']
+try:
+    from sklearn.utils.validation import NonBLASDotWarning
+    warnings.simplefilter('error', NonBLASDotWarning)
+except:
+    pass
+
+
+ at requires_sklearn
+def test_ica_full_data_recovery():
+    """Test recovery of full data when no source is rejected"""
+    # Most basic recovery
+    raw = io.Raw(raw_fname).crop(0.5, stop, False)
+    raw.load_data()
+    events = read_events(event_name)
+    picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
+                       eog=False, exclude='bads')[:10]
+    epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
+                    baseline=(None, 0), preload=True)
+    evoked = epochs.average()
+    n_channels = 5
+    data = raw._data[:n_channels].copy()
+    data_epochs = epochs.get_data()
+    data_evoked = evoked.data
+    for method in ['fastica']:
+        stuff = [(2, n_channels, True), (2, n_channels // 2, False)]
+        for n_components, n_pca_components, ok in stuff:
+            ica = ICA(n_components=n_components,
+                      max_pca_components=n_pca_components,
+                      n_pca_components=n_pca_components,
+                      method=method, max_iter=1)
+            with warnings.catch_warnings(record=True):
+                ica.fit(raw, picks=list(range(n_channels)))
+            raw2 = ica.apply(raw, exclude=[], copy=True)
+            if ok:
+                assert_allclose(data[:n_channels], raw2._data[:n_channels],
+                                rtol=1e-10, atol=1e-15)
+            else:
+                diff = np.abs(data[:n_channels] - raw2._data[:n_channels])
+                assert_true(np.max(diff) > 1e-14)
+
+            ica = ICA(n_components=n_components,
+                      max_pca_components=n_pca_components,
+                      n_pca_components=n_pca_components)
+            with warnings.catch_warnings(record=True):
+                ica.fit(epochs, picks=list(range(n_channels)))
+            epochs2 = ica.apply(epochs, exclude=[], copy=True)
+            data2 = epochs2.get_data()[:, :n_channels]
+            if ok:
+                assert_allclose(data_epochs[:, :n_channels], data2,
+                                rtol=1e-10, atol=1e-15)
+            else:
+                diff = np.abs(data_epochs[:, :n_channels] - data2)
+                assert_true(np.max(diff) > 1e-14)
+
+            evoked2 = ica.apply(evoked, exclude=[], copy=True)
+            data2 = evoked2.data[:n_channels]
+            if ok:
+                assert_allclose(data_evoked[:n_channels], data2,
+                                rtol=1e-10, atol=1e-15)
+            else:
+                diff = np.abs(evoked.data[:n_channels] - data2)
+                assert_true(np.max(diff) > 1e-14)
+    assert_raises(ValueError, ICA, method='pizza-decomposision')
+
+
+ at requires_sklearn
+def test_ica_rank_reduction():
+    """Test recovery of full data when no source is rejected"""
+    # Most basic recovery
+    raw = io.Raw(raw_fname).crop(0.5, stop, False)
+    raw.load_data()
+    picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
+                       eog=False, exclude='bads')[:10]
+    n_components = 5
+    max_pca_components = len(picks)
+    for n_pca_components in [6, 10]:
+        with warnings.catch_warnings(record=True):  # non-convergence
+            warnings.simplefilter('always')
+            ica = ICA(n_components=n_components,
+                      max_pca_components=max_pca_components,
+                      n_pca_components=n_pca_components,
+                      method='fastica', max_iter=1).fit(raw, picks=picks)
+
+        rank_before = raw.estimate_rank(picks=picks)
+        assert_equal(rank_before, len(picks))
+        raw_clean = ica.apply(raw, copy=True)
+        rank_after = raw_clean.estimate_rank(picks=picks)
+        # interaction between ICA rejection and PCA components difficult
+        # to preduct. Rank_after often seems to be 1 higher then
+        # n_pca_components
+        assert_true(n_components < n_pca_components <= rank_after <=
+                    rank_before)
+
+
+ at requires_sklearn
+def test_ica_reset():
+    """Test ICA resetting"""
+    raw = io.Raw(raw_fname).crop(0.5, stop, False)
+    raw.load_data()
+    picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
+                       eog=False, exclude='bads')[:10]
+
+    run_time_attrs = (
+        '_pre_whitener',
+        'unmixing_matrix_',
+        'mixing_matrix_',
+        'n_components_',
+        'n_samples_',
+        'pca_components_',
+        'pca_explained_variance_',
+        'pca_mean_'
+    )
+    with warnings.catch_warnings(record=True):
+        ica = ICA(
+            n_components=3, max_pca_components=3, n_pca_components=3,
+            method='fastica', max_iter=1).fit(raw, picks=picks)
+
+    assert_true(all(hasattr(ica, attr) for attr in run_time_attrs))
+    ica._reset()
+    assert_true(not any(hasattr(ica, attr) for attr in run_time_attrs))
+
+
+ at requires_sklearn
+def test_ica_core():
+    """Test ICA on raw and epochs"""
+    raw = io.Raw(raw_fname).crop(1.5, stop, False)
+    raw.load_data()
+    picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
+                       eog=False, exclude='bads')
+    # XXX. The None cases helped revealing bugs but are time consuming.
+    test_cov = read_cov(test_cov_name)
+    events = read_events(event_name)
+    picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
+                       eog=False, exclude='bads')
+    epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
+                    baseline=(None, 0), preload=True)
+    noise_cov = [None, test_cov]
+    # removed None cases to speed up...
+    n_components = [2, 1.0]  # for future dbg add cases
+    max_pca_components = [3]
+    picks_ = [picks]
+    methods = ['fastica']
+    iter_ica_params = product(noise_cov, n_components, max_pca_components,
+                              picks_, methods)
+
+    # # test init catchers
+    assert_raises(ValueError, ICA, n_components=3, max_pca_components=2)
+    assert_raises(ValueError, ICA, n_components=2.3, max_pca_components=2)
+
+    # test essential core functionality
+    for n_cov, n_comp, max_n, pcks, method in iter_ica_params:
+        # Test ICA raw
+        ica = ICA(noise_cov=n_cov, n_components=n_comp,
+                  max_pca_components=max_n, n_pca_components=max_n,
+                  random_state=0, method=method, max_iter=1)
+        assert_raises(ValueError, ica.__contains__, 'mag')
+
+        print(ica)  # to test repr
+
+        # test fit checker
+        assert_raises(RuntimeError, ica.get_sources, raw)
+        assert_raises(RuntimeError, ica.get_sources, epochs)
+
+        # test decomposition
+        with warnings.catch_warnings(record=True):
+            ica.fit(raw, picks=pcks, start=start, stop=stop)
+            repr(ica)  # to test repr
+        assert_true('mag' in ica)  # should now work without error
+
+        # test re-fit
+        unmixing1 = ica.unmixing_matrix_
+        with warnings.catch_warnings(record=True):
+            ica.fit(raw, picks=pcks, start=start, stop=stop)
+        assert_array_almost_equal(unmixing1, ica.unmixing_matrix_)
+
+        sources = ica.get_sources(raw)[:, :][0]
+        assert_true(sources.shape[0] == ica.n_components_)
+
+        # test preload filter
+        raw3 = raw.copy()
+        raw3.preload = False
+        assert_raises(ValueError, ica.apply, raw3,
+                      include=[1, 2])
+
+        #######################################################################
+        # test epochs decomposition
+        ica = ICA(noise_cov=n_cov, n_components=n_comp,
+                  max_pca_components=max_n, n_pca_components=max_n,
+                  random_state=0)
+        with warnings.catch_warnings(record=True):
+            ica.fit(epochs, picks=picks)
+        data = epochs.get_data()[:, 0, :]
+        n_samples = np.prod(data.shape)
+        assert_equal(ica.n_samples_, n_samples)
+        print(ica)  # to test repr
+
+        sources = ica.get_sources(epochs).get_data()
+        assert_true(sources.shape[1] == ica.n_components_)
+
+        assert_raises(ValueError, ica.score_sources, epochs,
+                      target=np.arange(1))
+
+        # test preload filter
+        epochs3 = epochs.copy()
+        epochs3.preload = False
+        assert_raises(ValueError, ica.apply, epochs3,
+                      include=[1, 2])
+
+    # test for bug with whitener updating
+    _pre_whitener = ica._pre_whitener.copy()
+    epochs._data[:, 0, 10:15] *= 1e12
+    ica.apply(epochs, copy=True)
+    assert_array_equal(_pre_whitener, ica._pre_whitener)
+
+    # test expl. var threshold leading to empty sel
+    ica.n_components = 0.1
+    assert_raises(RuntimeError, ica.fit, epochs)
+
+    offender = 1, 2, 3,
+    assert_raises(ValueError, ica.get_sources, offender)
+    assert_raises(ValueError, ica.fit, offender)
+    assert_raises(ValueError, ica.apply, offender)
+
+
+ at slow_test
+ at requires_sklearn
+def test_ica_additional():
+    """Test additional ICA functionality"""
+    tempdir = _TempDir()
+    stop2 = 500
+    raw = io.Raw(raw_fname).crop(1.5, stop, False)
+    raw.load_data()
+    picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
+                       eog=False, exclude='bads')
+    test_cov = read_cov(test_cov_name)
+    events = read_events(event_name)
+    picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
+                       eog=False, exclude='bads')
+    epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
+                    baseline=(None, 0), preload=True)
+    # test if n_components=None works
+    with warnings.catch_warnings(record=True):
+        ica = ICA(n_components=None,
+                  max_pca_components=None,
+                  n_pca_components=None, random_state=0)
+        ica.fit(epochs, picks=picks, decim=3)
+    # for testing eog functionality
+    picks2 = pick_types(raw.info, meg=True, stim=False, ecg=False,
+                        eog=True, exclude='bads')
+    epochs_eog = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks2,
+                        baseline=(None, 0), preload=True)
+
+    test_cov2 = test_cov.copy()
+    ica = ICA(noise_cov=test_cov2, n_components=3, max_pca_components=4,
+              n_pca_components=4)
+    assert_true(ica.info is None)
+    with warnings.catch_warnings(record=True):
+        ica.fit(raw, picks[:5])
+    assert_true(isinstance(ica.info, Info))
+    assert_true(ica.n_components_ < 5)
+
+    ica = ICA(n_components=3, max_pca_components=4,
+              n_pca_components=4)
+    assert_raises(RuntimeError, ica.save, '')
+    with warnings.catch_warnings(record=True):
+        ica.fit(raw, picks=[1, 2, 3, 4, 5], start=start, stop=stop2)
+
+    # test corrmap
+    ica2 = ica.copy()
+    corrmap([ica, ica2], (0, 0), threshold='auto', label='blinks', plot=True,
+            ch_type="mag")
+    corrmap([ica, ica2], (0, 0), threshold=2, plot=False)
+    assert_true(ica.labels_["blinks"] == ica2.labels_["blinks"])
+    assert_true(0 in ica.labels_["blinks"])
+    plt.close('all')
+
+    # test warnings on bad filenames
+    with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter('always')
+        ica_badname = op.join(op.dirname(tempdir), 'test-bad-name.fif.gz')
+        ica.save(ica_badname)
+        read_ica(ica_badname)
+    assert_true(len(w) == 2)
+
+    # test decim
+    ica = ICA(n_components=3, max_pca_components=4,
+              n_pca_components=4)
+    raw_ = raw.copy()
+    for _ in range(3):
+        raw_.append(raw_)
+    n_samples = raw_._data.shape[1]
+    with warnings.catch_warnings(record=True):
+        ica.fit(raw, picks=None, decim=3)
+    assert_true(raw_._data.shape[1], n_samples)
+
+    # test expl var
+    ica = ICA(n_components=1.0, max_pca_components=4,
+              n_pca_components=4)
+    with warnings.catch_warnings(record=True):
+        ica.fit(raw, picks=None, decim=3)
+    assert_true(ica.n_components_ == 4)
+
+    # epochs extraction from raw fit
+    assert_raises(RuntimeError, ica.get_sources, epochs)
+    # test reading and writing
+    test_ica_fname = op.join(op.dirname(tempdir), 'test-ica.fif')
+    for cov in (None, test_cov):
+        ica = ICA(noise_cov=cov, n_components=2, max_pca_components=4,
+                  n_pca_components=4)
+        with warnings.catch_warnings(record=True):  # ICA does not converge
+            ica.fit(raw, picks=picks, start=start, stop=stop2)
+        sources = ica.get_sources(epochs).get_data()
+        assert_true(ica.mixing_matrix_.shape == (2, 2))
+        assert_true(ica.unmixing_matrix_.shape == (2, 2))
+        assert_true(ica.pca_components_.shape == (4, len(picks)))
+        assert_true(sources.shape[1] == ica.n_components_)
+
+        for exclude in [[], [0]]:
+            ica.exclude = [0]
+            ica.save(test_ica_fname)
+            ica_read = read_ica(test_ica_fname)
+            assert_true(ica.exclude == ica_read.exclude)
+
+            ica.exclude = []
+            ica.apply(raw, exclude=[1])
+            assert_true(ica.exclude == [])
+
+            ica.exclude = [0, 1]
+            ica.apply(raw, exclude=[1])
+            assert_true(ica.exclude == [0, 1])
+
+            ica_raw = ica.get_sources(raw)
+            assert_true(ica.exclude == [ica_raw.ch_names.index(e) for e in
+                                        ica_raw.info['bads']])
+
+        # test filtering
+        d1 = ica_raw._data[0].copy()
+        with warnings.catch_warnings(record=True):  # dB warning
+            ica_raw.filter(4, 20)
+        assert_true((d1 != ica_raw._data[0]).any())
+        d1 = ica_raw._data[0].copy()
+        with warnings.catch_warnings(record=True):  # dB warning
+            ica_raw.notch_filter([10])
+        assert_true((d1 != ica_raw._data[0]).any())
+
+        ica.n_pca_components = 2
+        ica.save(test_ica_fname)
+        ica_read = read_ica(test_ica_fname)
+        assert_true(ica.n_pca_components == ica_read.n_pca_components)
+
+        # check type consistency
+        attrs = ('mixing_matrix_ unmixing_matrix_ pca_components_ '
+                 'pca_explained_variance_ _pre_whitener')
+
+        def f(x, y):
+            return getattr(x, y).dtype
+
+        for attr in attrs.split():
+            assert_equal(f(ica_read, attr), f(ica, attr))
+
+        ica.n_pca_components = 4
+        ica_read.n_pca_components = 4
+
+        ica.exclude = []
+        ica.save(test_ica_fname)
+        ica_read = read_ica(test_ica_fname)
+        for attr in ['mixing_matrix_', 'unmixing_matrix_', 'pca_components_',
+                     'pca_mean_', 'pca_explained_variance_',
+                     '_pre_whitener']:
+            assert_array_almost_equal(getattr(ica, attr),
+                                      getattr(ica_read, attr))
+
+        assert_true(ica.ch_names == ica_read.ch_names)
+        assert_true(isinstance(ica_read.info, Info))
+
+        sources = ica.get_sources(raw)[:, :][0]
+        sources2 = ica_read.get_sources(raw)[:, :][0]
+        assert_array_almost_equal(sources, sources2)
+
+        _raw1 = ica.apply(raw, exclude=[1])
+        _raw2 = ica_read.apply(raw, exclude=[1])
+        assert_array_almost_equal(_raw1[:, :][0], _raw2[:, :][0])
+
+    os.remove(test_ica_fname)
+    # check scrore funcs
+    for name, func in get_score_funcs().items():
+        if name in score_funcs_unsuited:
+            continue
+        scores = ica.score_sources(raw, target='EOG 061', score_func=func,
+                                   start=0, stop=10)
+        assert_true(ica.n_components_ == len(scores))
+
+    # check univariate stats
+    scores = ica.score_sources(raw, score_func=stats.skew)
+    # check exception handling
+    assert_raises(ValueError, ica.score_sources, raw,
+                  target=np.arange(1))
+
+    params = []
+    params += [(None, -1, slice(2), [0, 1])]  # varicance, kurtosis idx params
+    params += [(None, 'MEG 1531')]  # ECG / EOG channel params
+    for idx, ch_name in product(*params):
+        ica.detect_artifacts(raw, start_find=0, stop_find=50, ecg_ch=ch_name,
+                             eog_ch=ch_name, skew_criterion=idx,
+                             var_criterion=idx, kurt_criterion=idx)
+    with warnings.catch_warnings(record=True):
+        idx, scores = ica.find_bads_ecg(raw, method='ctps')
+        assert_equal(len(scores), ica.n_components_)
+        idx, scores = ica.find_bads_ecg(raw, method='correlation')
+        assert_equal(len(scores), ica.n_components_)
+        idx, scores = ica.find_bads_ecg(epochs, method='ctps')
+        assert_equal(len(scores), ica.n_components_)
+        assert_raises(ValueError, ica.find_bads_ecg, epochs.average(),
+                      method='ctps')
+        assert_raises(ValueError, ica.find_bads_ecg, raw,
+                      method='crazy-coupling')
+
+        idx, scores = ica.find_bads_eog(raw)
+        assert_equal(len(scores), ica.n_components_)
+        raw.info['chs'][raw.ch_names.index('EOG 061') - 1]['kind'] = 202
+        idx, scores = ica.find_bads_eog(raw)
+        assert_true(isinstance(scores, list))
+        assert_equal(len(scores[0]), ica.n_components_)
+
+    # check score funcs
+    for name, func in get_score_funcs().items():
+        if name in score_funcs_unsuited:
+            continue
+        scores = ica.score_sources(epochs_eog, target='EOG 061',
+                                   score_func=func)
+        assert_true(ica.n_components_ == len(scores))
+
+    # check univariate stats
+    scores = ica.score_sources(epochs, score_func=stats.skew)
+
+    # check exception handling
+    assert_raises(ValueError, ica.score_sources, epochs,
+                  target=np.arange(1))
+
+    # ecg functionality
+    ecg_scores = ica.score_sources(raw, target='MEG 1531',
+                                   score_func='pearsonr')
+
+    with warnings.catch_warnings(record=True):  # filter attenuation warning
+        ecg_events = ica_find_ecg_events(raw,
+                                         sources[np.abs(ecg_scores).argmax()])
+
+    assert_true(ecg_events.ndim == 2)
+
+    # eog functionality
+    eog_scores = ica.score_sources(raw, target='EOG 061',
+                                   score_func='pearsonr')
+    with warnings.catch_warnings(record=True):  # filter attenuation warning
+        eog_events = ica_find_eog_events(raw,
+                                         sources[np.abs(eog_scores).argmax()])
+
+    assert_true(eog_events.ndim == 2)
+
+    # Test ica fiff export
+    ica_raw = ica.get_sources(raw, start=0, stop=100)
+    assert_true(ica_raw.last_samp - ica_raw.first_samp == 100)
+    assert_true(len(ica_raw._filenames) == 0)  # API consistency
+    ica_chans = [ch for ch in ica_raw.ch_names if 'ICA' in ch]
+    assert_true(ica.n_components_ == len(ica_chans))
+    test_ica_fname = op.join(op.abspath(op.curdir), 'test-ica_raw.fif')
+    ica.n_components = np.int32(ica.n_components)
+    ica_raw.save(test_ica_fname, overwrite=True)
+    ica_raw2 = io.Raw(test_ica_fname, preload=True)
+    assert_allclose(ica_raw._data, ica_raw2._data, rtol=1e-5, atol=1e-4)
+    ica_raw2.close()
+    os.remove(test_ica_fname)
+
+    # Test ica epochs export
+    ica_epochs = ica.get_sources(epochs)
+    assert_true(ica_epochs.events.shape == epochs.events.shape)
+    ica_chans = [ch for ch in ica_epochs.ch_names if 'ICA' in ch]
+    assert_true(ica.n_components_ == len(ica_chans))
+    assert_true(ica.n_components_ == ica_epochs.get_data().shape[1])
+    assert_true(ica_epochs._raw is None)
+    assert_true(ica_epochs.preload is True)
+
+    # test float n pca components
+    ica.pca_explained_variance_ = np.array([0.2] * 5)
+    ica.n_components_ = 0
+    for ncomps, expected in [[0.3, 1], [0.9, 4], [1, 1]]:
+        ncomps_ = ica._check_n_pca_components(ncomps)
+        assert_true(ncomps_ == expected)
+
+
+ at requires_sklearn
+def test_run_ica():
+    """Test run_ica function"""
+    raw = io.Raw(raw_fname, preload=True).crop(0, stop, False).crop(1.5)
+    params = []
+    params += [(None, -1, slice(2), [0, 1])]  # varicance, kurtosis idx
+    params += [(None, 'MEG 1531')]  # ECG / EOG channel params
+    for idx, ch_name in product(*params):
+        warnings.simplefilter('always')
+        with warnings.catch_warnings(record=True):
+            run_ica(raw, n_components=2, start=0, stop=6, start_find=0,
+                    stop_find=5, ecg_ch=ch_name, eog_ch=ch_name,
+                    skew_criterion=idx, var_criterion=idx, kurt_criterion=idx)
+
+
+ at requires_sklearn
+def test_ica_reject_buffer():
+    """Test ICA data raw buffer rejection"""
+    tempdir = _TempDir()
+    raw = io.Raw(raw_fname).crop(1.5, stop, False)
+    raw.load_data()
+    picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
+                       eog=False, exclude='bads')
+    ica = ICA(n_components=3, max_pca_components=4, n_pca_components=4)
+    raw._data[2, 1000:1005] = 5e-12
+    drop_log = op.join(op.dirname(tempdir), 'ica_drop.log')
+    set_log_file(drop_log, overwrite=True)
+    with warnings.catch_warnings(record=True):
+        ica.fit(raw, picks[:5], reject=dict(mag=2.5e-12), decim=2,
+                tstep=0.01, verbose=True)
+    assert_true(raw._data[:5, ::2].shape[1] - 4 == ica.n_samples_)
+    with open(drop_log) as fid:
+        log = [l for l in fid if 'detected' in l]
+    assert_equal(len(log), 1)
+
+
+ at requires_sklearn
+def test_ica_twice():
+    """Test running ICA twice"""
+    raw = io.Raw(raw_fname).crop(1.5, stop, False)
+    raw.load_data()
+    picks = pick_types(raw.info, meg='grad', exclude='bads')
+    n_components = 0.9
+    max_pca_components = None
+    n_pca_components = 1.1
+    with warnings.catch_warnings(record=True):
+        ica1 = ICA(n_components=n_components,
+                   max_pca_components=max_pca_components,
+                   n_pca_components=n_pca_components, random_state=0)
+
+        ica1.fit(raw, picks=picks, decim=3)
+        raw_new = ica1.apply(raw, n_pca_components=n_pca_components)
+        ica2 = ICA(n_components=n_components,
+                   max_pca_components=max_pca_components,
+                   n_pca_components=1.0, random_state=0)
+        ica2.fit(raw_new, picks=picks, decim=3)
+        assert_equal(ica1.n_components_, ica2.n_components_)
+
+run_tests_if_main()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/preprocessing/tests/test_infomax.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/preprocessing/tests/test_infomax.py
new file mode 100644
index 0000000..d8d9a72
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/preprocessing/tests/test_infomax.py
@@ -0,0 +1,179 @@
+# Authors: Denis A. Engemann <denis.engemann at gmail.com>
+#
+# License: BSD (3-clause)
+
+"""
+Test the infomax algorithm.
+Parts of this code are taken from scikit-learn
+"""
+
+import numpy as np
+from numpy.testing import assert_almost_equal
+
+from scipy import stats
+from scipy import linalg
+
+from mne.preprocessing.infomax_ import infomax
+from mne.utils import requires_sklearn, run_tests_if_main
+
+
+def center_and_norm(x, axis=-1):
+    """ Centers and norms x **in place**
+
+    Parameters
+    -----------
+    x: ndarray
+        Array with an axis of observations (statistical units) measured on
+        random variables.
+    axis: int, optional
+        Axis along which the mean and variance are calculated.
+    """
+    x = np.rollaxis(x, axis)
+    x -= x.mean(axis=0)
+    x /= x.std(axis=0)
+
+
+ at requires_sklearn
+def test_infomax_blowup():
+    """ Test the infomax algorithm blowup condition
+    """
+    from sklearn.decomposition import RandomizedPCA
+    # scipy.stats uses the global RNG:
+    np.random.seed(0)
+    n_samples = 100
+    # Generate two sources:
+    s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1
+    s2 = stats.t.rvs(1, size=n_samples)
+    s = np.c_[s1, s2].T
+    center_and_norm(s)
+    s1, s2 = s
+
+    # Mixing angle
+    phi = 0.6
+    mixing = np.array([[np.cos(phi),  np.sin(phi)],
+                       [np.sin(phi), -np.cos(phi)]])
+    m = np.dot(mixing, s)
+
+    center_and_norm(m)
+
+    X = RandomizedPCA(n_components=2, whiten=True).fit_transform(m.T)
+    k_ = infomax(X, extended=True, l_rate=0.1)
+    s_ = np.dot(k_, X.T)
+
+    center_and_norm(s_)
+    s1_, s2_ = s_
+    # Check to see if the sources have been estimated
+    # in the wrong order
+    if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
+        s2_, s1_ = s_
+    s1_ *= np.sign(np.dot(s1_, s1))
+    s2_ *= np.sign(np.dot(s2_, s2))
+
+    # Check that we have estimated the original sources
+    assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2)
+    assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2)
+
+
+ at requires_sklearn
+def test_infomax_simple():
+    """ Test the infomax algorithm on very simple data.
+    """
+    from sklearn.decomposition import RandomizedPCA
+    rng = np.random.RandomState(0)
+    # scipy.stats uses the global RNG:
+    np.random.seed(0)
+    n_samples = 500
+    # Generate two sources:
+    s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1
+    s2 = stats.t.rvs(1, size=n_samples)
+    s = np.c_[s1, s2].T
+    center_and_norm(s)
+    s1, s2 = s
+
+    # Mixing angle
+    phi = 0.6
+    mixing = np.array([[np.cos(phi),  np.sin(phi)],
+                       [np.sin(phi), -np.cos(phi)]])
+    for add_noise in (False, True):
+        m = np.dot(mixing, s)
+        if add_noise:
+            m += 0.1 * rng.randn(2, n_samples)
+        center_and_norm(m)
+
+        algos = [True, False]
+        for algo in algos:
+            X = RandomizedPCA(n_components=2, whiten=True).fit_transform(m.T)
+            k_ = infomax(X, extended=algo)
+            s_ = np.dot(k_, X.T)
+
+            center_and_norm(s_)
+            s1_, s2_ = s_
+            # Check to see if the sources have been estimated
+            # in the wrong order
+            if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
+                s2_, s1_ = s_
+            s1_ *= np.sign(np.dot(s1_, s1))
+            s2_ *= np.sign(np.dot(s2_, s2))
+
+            # Check that we have estimated the original sources
+            if not add_noise:
+                assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2)
+                assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2)
+            else:
+                assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=1)
+                assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=1)
+
+
+ at requires_sklearn
+def test_non_square_infomax():
+    """ Test non-square infomax
+    """
+    from sklearn.decomposition import RandomizedPCA
+
+    rng = np.random.RandomState(0)
+
+    n_samples = 200
+    # Generate two sources:
+    t = np.linspace(0, 100, n_samples)
+    s1 = np.sin(t)
+    s2 = np.ceil(np.sin(np.pi * t))
+    s = np.c_[s1, s2].T
+    center_and_norm(s)
+    s1, s2 = s
+
+    # Mixing matrix
+    n_observed = 6
+    mixing = rng.randn(n_observed, 2)
+    for add_noise in (False, True):
+        m = np.dot(mixing, s)
+
+        if add_noise:
+            m += 0.1 * rng.randn(n_observed, n_samples)
+
+        center_and_norm(m)
+        pca = RandomizedPCA(n_components=2, whiten=True, random_state=rng)
+        m = m.T
+        m = pca.fit_transform(m)
+        # we need extended since input signals are sub-gaussian
+        unmixing_ = infomax(m, random_state=rng, extended=True)
+        s_ = np.dot(unmixing_, m.T)
+        # Check that the mixing model described in the docstring holds:
+        mixing_ = linalg.pinv(unmixing_.T)
+
+        assert_almost_equal(m, s_.T.dot(mixing_))
+
+        center_and_norm(s_)
+        s1_, s2_ = s_
+        # Check to see if the sources have been estimated
+        # in the wrong order
+        if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
+            s2_, s1_ = s_
+        s1_ *= np.sign(np.dot(s1_, s1))
+        s2_ *= np.sign(np.dot(s2_, s2))
+
+        # Check that we have estimated the original sources
+        if not add_noise:
+            assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2)
+            assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2)
+
+run_tests_if_main()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/preprocessing/tests/test_maxwell.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/preprocessing/tests/test_maxwell.py
new file mode 100644
index 0000000..f2320dc
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/preprocessing/tests/test_maxwell.py
@@ -0,0 +1,256 @@
+# Author: Mark Wronkiewicz <wronk at uw.edu>
+#
+# License: BSD (3-clause)
+
+import os.path as op
+import warnings
+import numpy as np
+from numpy.testing import (assert_equal, assert_allclose,
+                           assert_array_almost_equal)
+from nose.tools import assert_true, assert_raises
+
+from mne import compute_raw_covariance, pick_types
+from mne.cov import _estimate_rank_meeg_cov
+from mne.datasets import testing
+from mne.forward._make_forward import _prep_meg_channels
+from mne.io import Raw, proc_history
+from mne.preprocessing.maxwell import (_maxwell_filter as maxwell_filter,
+                                       get_num_moments, _sss_basis)
+from mne.utils import _TempDir, run_tests_if_main, slow_test
+
+warnings.simplefilter('always')  # Always throw warnings
+
+data_path = op.join(testing.data_path(download=False))
+raw_fname = op.join(data_path, 'SSS', 'test_move_anon_raw.fif')
+sss_std_fname = op.join(data_path, 'SSS',
+                        'test_move_anon_raw_simp_stdOrigin_sss.fif')
+sss_nonstd_fname = op.join(data_path, 'SSS',
+                           'test_move_anon_raw_simp_nonStdOrigin_sss.fif')
+sss_bad_recon_fname = op.join(data_path, 'SSS',
+                              'test_move_anon_raw_bad_recon_sss.fif')
+
+
+ at testing.requires_testing_data
+def test_maxwell_filter():
+    """Test multipolar moment and Maxwell filter"""
+
+    # TODO: Future tests integrate with mne/io/tests/test_proc_history
+
+    # Load testing data (raw, SSS std origin, SSS non-standard origin)
+    with warnings.catch_warnings(record=True):  # maxshield
+        raw = Raw(raw_fname, allow_maxshield=True).crop(0., 1., False)
+    raw.load_data()
+    with warnings.catch_warnings(record=True):  # maxshield, naming
+        sss_std = Raw(sss_std_fname, allow_maxshield=True)
+        sss_nonStd = Raw(sss_nonstd_fname, allow_maxshield=True)
+        raw_err = Raw(raw_fname, proj=True,
+                      allow_maxshield=True).crop(0., 0.1, False)
+    assert_raises(RuntimeError, maxwell_filter, raw_err)
+
+    # Create coils
+    all_coils, _, _, meg_info = _prep_meg_channels(raw.info, ignore_ref=True,
+                                                   elekta_defs=True)
+    picks = [raw.info['ch_names'].index(ch) for ch in [coil['chname']
+                                                       for coil in all_coils]]
+    coils = [all_coils[ci] for ci in picks]
+    ncoils = len(coils)
+
+    int_order, ext_order = 8, 3
+    n_int_bases = int_order ** 2 + 2 * int_order
+    n_ext_bases = ext_order ** 2 + 2 * ext_order
+    nbases = n_int_bases + n_ext_bases
+
+    # Check number of bases computed correctly
+    assert_equal(get_num_moments(int_order, ext_order), nbases)
+
+    # Check multipolar moment basis set
+    S_in, S_out = _sss_basis(origin=np.array([0, 0, 40]), coils=coils,
+                             int_order=int_order, ext_order=ext_order)
+    assert_equal(S_in.shape, (ncoils, n_int_bases), 'S_in has incorrect shape')
+    assert_equal(S_out.shape, (ncoils, n_ext_bases),
+                 'S_out has incorrect shape')
+
+    # Test sss computation at the standard head origin
+    raw_sss = maxwell_filter(raw, origin=[0., 0., 40.],
+                             int_order=int_order, ext_order=ext_order)
+
+    sss_std_data = sss_std[picks][0]
+    assert_array_almost_equal(raw_sss[picks][0], sss_std_data,
+                              decimal=11, err_msg='Maxwell filtered data at '
+                              'standard origin incorrect.')
+
+    # Confirm SNR is above 100
+    bench_rms = np.sqrt(np.mean(sss_std_data * sss_std_data, axis=1))
+    error = raw_sss[picks][0] - sss_std_data
+    error_rms = np.sqrt(np.mean(error ** 2, axis=1))
+    assert_true(np.mean(bench_rms / error_rms) > 1000, 'SNR < 1000')
+
+    # Test sss computation at non-standard head origin
+    raw_sss = maxwell_filter(raw, origin=[0., 20., 20.],
+                             int_order=int_order, ext_order=ext_order)
+    sss_nonStd_data = sss_nonStd[picks][0]
+    assert_array_almost_equal(raw_sss[picks][0], sss_nonStd_data, decimal=11,
+                              err_msg='Maxwell filtered data at non-std '
+                              'origin incorrect.')
+    # Confirm SNR is above 100
+    bench_rms = np.sqrt(np.mean(sss_nonStd_data * sss_nonStd_data, axis=1))
+    error = raw_sss[picks][0] - sss_nonStd_data
+    error_rms = np.sqrt(np.mean(error ** 2, axis=1))
+    assert_true(np.mean(bench_rms / error_rms) > 1000, 'SNR < 1000')
+
+    # Check against SSS functions from proc_history
+    sss_info = raw_sss.info['proc_history'][0]['max_info']
+    assert_equal(get_num_moments(int_order, 0),
+                 proc_history._get_sss_rank(sss_info))
+
+    # Degenerate cases
+    raw_bad = raw.copy()
+    raw_bad.info['comps'] = [0]
+    assert_raises(RuntimeError, maxwell_filter, raw_bad)
+
+
+ at testing.requires_testing_data
+def test_maxwell_filter_additional():
+    """Test processing of Maxwell filtered data"""
+
+    # TODO: Future tests integrate with mne/io/tests/test_proc_history
+
+    # Load testing data (raw, SSS std origin, SSS non-standard origin)
+    data_path = op.join(testing.data_path(download=False))
+
+    file_name = 'test_move_anon'
+
+    raw_fname = op.join(data_path, 'SSS', file_name + '_raw.fif')
+
+    with warnings.catch_warnings(record=True):  # maxshield
+        # Use 2.0 seconds of data to get stable cov. estimate
+        raw = Raw(raw_fname, allow_maxshield=True).crop(0., 2., False)
+
+    # Get MEG channels, compute Maxwell filtered data
+    raw.load_data()
+    raw.pick_types(meg=True, eeg=False)
+    int_order, ext_order = 8, 3
+    raw_sss = maxwell_filter(raw, int_order=int_order, ext_order=ext_order)
+
+    # Test io on processed data
+    tempdir = _TempDir()
+    test_outname = op.join(tempdir, 'test_raw_sss.fif')
+    raw_sss.save(test_outname)
+    raw_sss_loaded = Raw(test_outname, preload=True, proj=False,
+                         allow_maxshield=True)
+
+    # Some numerical imprecision since save uses 'single' fmt
+    assert_allclose(raw_sss_loaded[:][0], raw_sss[:][0],
+                    rtol=1e-6, atol=1e-20)
+
+    # Test rank of covariance matrices for raw and SSS processed data
+    cov_raw = compute_raw_covariance(raw)
+    cov_sss = compute_raw_covariance(raw_sss)
+
+    scalings = None
+    cov_raw_rank = _estimate_rank_meeg_cov(cov_raw['data'], raw.info, scalings)
+    cov_sss_rank = _estimate_rank_meeg_cov(cov_sss['data'], raw_sss.info,
+                                           scalings)
+
+    assert_equal(cov_raw_rank, raw.info['nchan'])
+    assert_equal(cov_sss_rank, get_num_moments(int_order, 0))
+
+
+ at slow_test
+ at testing.requires_testing_data
+def test_bads_reconstruction():
+    """Test reconstruction of channels marked as bad"""
+
+    with warnings.catch_warnings(record=True):  # maxshield, naming
+        sss_bench = Raw(sss_bad_recon_fname, allow_maxshield=True)
+
+    raw_fname = op.join(data_path, 'SSS', 'test_move_anon_raw.fif')
+
+    with warnings.catch_warnings(record=True):  # maxshield
+        raw = Raw(raw_fname, allow_maxshield=True).crop(0., 1., False)
+
+    # Set 30 random bad MEG channels (20 grad, 10 mag)
+    bads = ['MEG0912', 'MEG1722', 'MEG2213', 'MEG0132', 'MEG1312', 'MEG0432',
+            'MEG2433', 'MEG1022', 'MEG0442', 'MEG2332', 'MEG0633', 'MEG1043',
+            'MEG1713', 'MEG0422', 'MEG0932', 'MEG1622', 'MEG1343', 'MEG0943',
+            'MEG0643', 'MEG0143', 'MEG2142', 'MEG0813', 'MEG2143', 'MEG1323',
+            'MEG0522', 'MEG1123', 'MEG0423', 'MEG2122', 'MEG2532', 'MEG0812']
+    raw.info['bads'] = bads
+
+    # Compute Maxwell filtered data
+    raw_sss = maxwell_filter(raw)
+    meg_chs = pick_types(raw_sss.info)
+    non_meg_chs = np.setdiff1d(np.arange(len(raw.ch_names)), meg_chs)
+    sss_bench_data = sss_bench[meg_chs][0]
+
+    # Some numerical imprecision since save uses 'single' fmt
+    assert_allclose(raw_sss[meg_chs][0], sss_bench_data,
+                    rtol=1e-12, atol=1e-4, err_msg='Maxwell filtered data '
+                    'with reconstructed bads is incorrect.')
+
+    # Confirm SNR is above 1000
+    bench_rms = np.sqrt(np.mean(raw_sss[meg_chs][0] ** 2, axis=1))
+    error = raw_sss[meg_chs][0] - sss_bench_data
+    error_rms = np.sqrt(np.mean(error ** 2, axis=1))
+    assert_true(np.mean(bench_rms / error_rms) >= 1000,
+                'SNR (%0.1f) < 1000' % np.mean(bench_rms / error_rms))
+    assert_allclose(raw_sss[non_meg_chs][0], raw[non_meg_chs][0])
+
+
+ at testing.requires_testing_data
+def test_spatiotemporal_maxwell():
+    """Test spatiotemporal (tSSS) processing"""
+    # Load raw testing data
+    with warnings.catch_warnings(record=True):  # maxshield
+        raw = Raw(raw_fname, allow_maxshield=True)
+
+    # Create coils
+    picks = pick_types(raw.info)
+
+    # Test that window is less than length of data
+    assert_raises(ValueError, maxwell_filter, raw, st_dur=1000.)
+
+    # Check both 4 and 10 seconds because Elekta handles them differently
+    # This is to ensure that std/non-std tSSS windows are correctly handled
+    st_durs = [4., 10.]
+    for st_dur in st_durs:
+        # Load tSSS data depending on st_dur and get data
+        tSSS_fname = op.join(data_path, 'SSS', 'test_move_anon_raw_' +
+                             'spatiotemporal_%0ds_sss.fif' % st_dur)
+
+        with warnings.catch_warnings(record=True):  # maxshield, naming
+            tsss_bench = Raw(tSSS_fname, allow_maxshield=True)
+            # Because Elekta's tSSS sometimes(!) lumps the tail window of data
+            # onto the previous buffer if it's shorter than st_dur, we have to
+            # crop the data here to compensate for Elekta's tSSS behavior.
+            if st_dur == 10.:
+                tsss_bench.crop(0, st_dur, copy=False)
+        tsss_bench_data = tsss_bench[picks, :][0]
+        del tsss_bench
+
+        # Test sss computation at the standard head origin. Same cropping issue
+        # as mentioned above.
+        if st_dur == 10.:
+            raw_tsss = maxwell_filter(raw.crop(0, st_dur), st_dur=st_dur)
+        else:
+            raw_tsss = maxwell_filter(raw, st_dur=st_dur)
+        assert_allclose(raw_tsss[picks][0], tsss_bench_data,
+                        rtol=1e-12, atol=1e-4, err_msg='Spatiotemporal (tSSS) '
+                        'maxwell filtered data at standard origin incorrect.')
+
+        # Confirm SNR is above 500. Single precision is part of discrepancy
+        bench_rms = np.sqrt(np.mean(tsss_bench_data * tsss_bench_data, axis=1))
+        error = raw_tsss[picks][0] - tsss_bench_data
+        error_rms = np.sqrt(np.mean(error * error, axis=1))
+        assert_true(np.mean(bench_rms / error_rms) >= 500,
+                    'SNR (%0.1f) < 500' % np.mean(bench_rms / error_rms))
+
+    # Confirm we didn't modify other channels (like EEG chs)
+    non_picks = np.setdiff1d(np.arange(len(raw.ch_names)), picks)
+    assert_allclose(raw[non_picks, 0:raw_tsss.n_times][0],
+                    raw_tsss[non_picks, 0:raw_tsss.n_times][0])
+
+    # Degenerate cases
+    assert_raises(ValueError, maxwell_filter, raw, st_dur=10., st_corr=0.)
+
+run_tests_if_main()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/preprocessing/tests/test_peak_finder.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/preprocessing/tests/test_peak_finder.py
new file mode 100644
index 0000000..56dbb2f
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/preprocessing/tests/test_peak_finder.py
@@ -0,0 +1,10 @@
+from numpy.testing import assert_array_equal
+
+from mne.preprocessing.peak_finder import peak_finder
+
+
+def test_peak_finder():
+    """Test the peak detection method"""
+    x = [0, 2, 5, 0, 6, -1]
+    peak_inds, peak_mags = peak_finder(x)
+    assert_array_equal(peak_inds, [2, 4])
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/preprocessing/tests/test_ssp.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/preprocessing/tests/test_ssp.py
new file mode 100644
index 0000000..1d5cd0a
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/preprocessing/tests/test_ssp.py
@@ -0,0 +1,103 @@
+import os.path as op
+import warnings
+
+from nose.tools import assert_true, assert_equal
+from numpy.testing import assert_array_almost_equal
+import numpy as np
+
+from mne.io import Raw
+from mne.io.proj import make_projector, activate_proj
+from mne.preprocessing.ssp import compute_proj_ecg, compute_proj_eog
+from mne.utils import run_tests_if_main
+
+warnings.simplefilter('always')  # enable b/c these tests throw warnings
+
+data_path = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
+raw_fname = op.join(data_path, 'test_raw.fif')
+dur_use = 5.0
+eog_times = np.array([0.5, 2.3, 3.6, 14.5])
+
+
+def test_compute_proj_ecg():
+    """Test computation of ECG SSP projectors"""
+    raw = Raw(raw_fname).crop(0, 10, False)
+    raw.load_data()
+    for average in [False, True]:
+        # For speed, let's not filter here (must also not reject then)
+        projs, events = compute_proj_ecg(raw, n_mag=2, n_grad=2, n_eeg=2,
+                                         ch_name='MEG 1531', bads=['MEG 2443'],
+                                         average=average, avg_ref=True,
+                                         no_proj=True, l_freq=None,
+                                         h_freq=None, reject=None,
+                                         tmax=dur_use, qrs_threshold=0.5)
+        assert_true(len(projs) == 7)
+        # heart rate at least 0.5 Hz, but less than 3 Hz
+        assert_true(events.shape[0] > 0.5 * dur_use and
+                    events.shape[0] < 3 * dur_use)
+        # XXX: better tests
+
+        # without setting a bad channel, this should throw a warning
+        with warnings.catch_warnings(record=True) as w:
+            warnings.simplefilter('always')
+            projs, events = compute_proj_ecg(raw, n_mag=2, n_grad=2, n_eeg=2,
+                                             ch_name='MEG 1531', bads=[],
+                                             average=average, avg_ref=True,
+                                             no_proj=True, l_freq=None,
+                                             h_freq=None, tmax=dur_use)
+            assert_equal(len(w), 1)
+        assert_equal(projs, None)
+
+
+def test_compute_proj_eog():
+    """Test computation of EOG SSP projectors"""
+    raw = Raw(raw_fname).crop(0, 10, False)
+    raw.load_data()
+    for average in [False, True]:
+        n_projs_init = len(raw.info['projs'])
+        projs, events = compute_proj_eog(raw, n_mag=2, n_grad=2, n_eeg=2,
+                                         bads=['MEG 2443'], average=average,
+                                         avg_ref=True, no_proj=False,
+                                         l_freq=None, h_freq=None,
+                                         reject=None, tmax=dur_use)
+        assert_true(len(projs) == (7 + n_projs_init))
+        assert_true(np.abs(events.shape[0] -
+                    np.sum(np.less(eog_times, dur_use))) <= 1)
+        # XXX: better tests
+
+        # This will throw a warning b/c simplefilter('always')
+        with warnings.catch_warnings(record=True) as w:
+            warnings.simplefilter('always')
+            projs, events = compute_proj_eog(raw, n_mag=2, n_grad=2, n_eeg=2,
+                                             average=average, bads=[],
+                                             avg_ref=True, no_proj=False,
+                                             l_freq=None, h_freq=None,
+                                             tmax=dur_use)
+            assert_equal(len(w), 1)
+        assert_equal(projs, None)
+
+
+def test_compute_proj_parallel():
+    """Test computation of ExG projectors using parallelization"""
+    raw_0 = Raw(raw_fname).crop(0, 10, False)
+    raw_0.load_data()
+    raw = raw_0.copy()
+    projs, _ = compute_proj_eog(raw, n_mag=2, n_grad=2, n_eeg=2,
+                                bads=['MEG 2443'], average=False,
+                                avg_ref=True, no_proj=False, n_jobs=1,
+                                l_freq=None, h_freq=None, reject=None,
+                                tmax=dur_use)
+    raw_2 = raw_0.copy()
+    projs_2, _ = compute_proj_eog(raw_2, n_mag=2, n_grad=2, n_eeg=2,
+                                  bads=['MEG 2443'], average=False,
+                                  avg_ref=True, no_proj=False, n_jobs=2,
+                                  l_freq=None, h_freq=None, reject=None,
+                                  tmax=dur_use)
+    projs = activate_proj(projs)
+    projs_2 = activate_proj(projs_2)
+    projs, _, _ = make_projector(projs, raw_2.info['ch_names'],
+                                 bads=['MEG 2443'])
+    projs_2, _, _ = make_projector(projs_2, raw_2.info['ch_names'],
+                                   bads=['MEG 2443'])
+    assert_array_almost_equal(projs, projs_2, 10)
+
+run_tests_if_main()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/preprocessing/tests/test_stim.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/preprocessing/tests/test_stim.py
new file mode 100644
index 0000000..eb290c4
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/preprocessing/tests/test_stim.py
@@ -0,0 +1,96 @@
+# Authors: Daniel Strohmeier <daniel.strohmeier at tu-ilmenau.de>
+#
+# License: BSD (3-clause)
+
+import os.path as op
+
+import numpy as np
+from numpy.testing import assert_array_almost_equal
+from nose.tools import assert_true, assert_raises
+
+from mne.io import Raw
+from mne.io.pick import pick_types
+from mne.event import read_events
+from mne.epochs import Epochs
+from mne.preprocessing.stim import fix_stim_artifact
+
+data_path = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
+raw_fname = op.join(data_path, 'test_raw.fif')
+event_fname = op.join(data_path, 'test-eve.fif')
+
+
+def test_fix_stim_artifact():
+    """Test fix stim artifact"""
+    events = read_events(event_fname)
+
+    raw = Raw(raw_fname, preload=False)
+    assert_raises(RuntimeError, fix_stim_artifact, raw)
+
+    raw = Raw(raw_fname, preload=True)
+
+    # use window before stimulus in epochs
+    tmin, tmax, event_id = -0.2, 0.5, 1
+    picks = pick_types(raw.info, meg=True, eeg=True,
+                       eog=True, stim=False, exclude='bads')
+    epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                    preload=True, reject=None)
+    e_start = int(np.ceil(epochs.info['sfreq'] * epochs.tmin))
+    tmin, tmax = -0.045, -0.015
+    tmin_samp = int(-0.035 * epochs.info['sfreq']) - e_start
+    tmax_samp = int(-0.015 * epochs.info['sfreq']) - e_start
+
+    epochs = fix_stim_artifact(epochs, tmin=tmin, tmax=tmax, mode='linear')
+    data = epochs.get_data()[:, :, tmin_samp:tmax_samp]
+    diff_data0 = np.diff(data[0][0])
+    diff_data0 -= np.mean(diff_data0)
+    assert_array_almost_equal(diff_data0, np.zeros(len(diff_data0)))
+
+    epochs = fix_stim_artifact(epochs, tmin=tmin, tmax=tmax, mode='window')
+    data_from_epochs_fix = epochs.get_data()[:, :, tmin_samp:tmax_samp]
+    assert_true(np.all(data_from_epochs_fix) == 0.)
+
+    # use window before stimulus in raw
+    event_idx = np.where(events[:, 2] == 1)[0][0]
+    tmin, tmax = -0.045, -0.015
+    tmin_samp = int(-0.035 * raw.info['sfreq'])
+    tmax_samp = int(-0.015 * raw.info['sfreq'])
+    tidx = int(events[event_idx, 0] - raw.first_samp)
+
+    assert_raises(ValueError, fix_stim_artifact, raw, events=np.array([]))
+    raw = fix_stim_artifact(raw, events=None, event_id=1, tmin=tmin,
+                            tmax=tmax, mode='linear', stim_channel='STI 014')
+    data, times = raw[:, (tidx + tmin_samp):(tidx + tmax_samp)]
+    diff_data0 = np.diff(data[0])
+    diff_data0 -= np.mean(diff_data0)
+    assert_array_almost_equal(diff_data0, np.zeros(len(diff_data0)))
+
+    raw = fix_stim_artifact(raw, events, event_id=1, tmin=tmin,
+                            tmax=tmax, mode='window')
+    data, times = raw[:, (tidx + tmin_samp):(tidx + tmax_samp)]
+    assert_true(np.all(data) == 0.)
+
+    # get epochs from raw with fixed data
+    tmin, tmax, event_id = -0.2, 0.5, 1
+    epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                    preload=True, reject=None, baseline=None)
+    e_start = int(np.ceil(epochs.info['sfreq'] * epochs.tmin))
+    tmin_samp = int(-0.035 * epochs.info['sfreq']) - e_start
+    tmax_samp = int(-0.015 * epochs.info['sfreq']) - e_start
+    data_from_raw_fix = epochs.get_data()[:, :, tmin_samp:tmax_samp]
+    assert_true(np.all(data_from_raw_fix) == 0.)
+
+    # use window after stimulus
+    evoked = epochs.average()
+    tmin, tmax = 0.005, 0.045
+    tmin_samp = int(0.015 * evoked.info['sfreq']) - evoked.first
+    tmax_samp = int(0.035 * evoked.info['sfreq']) - evoked.first
+
+    evoked = fix_stim_artifact(evoked, tmin=tmin, tmax=tmax, mode='linear')
+    data = evoked.data[:, tmin_samp:tmax_samp]
+    diff_data0 = np.diff(data[0])
+    diff_data0 -= np.mean(diff_data0)
+    assert_array_almost_equal(diff_data0, np.zeros(len(diff_data0)))
+
+    evoked = fix_stim_artifact(evoked, tmin=tmin, tmax=tmax, mode='window')
+    data = evoked.data[:, tmin_samp:tmax_samp]
+    assert_true(np.all(data) == 0.)
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/preprocessing/tests/test_xdawn.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/preprocessing/tests/test_xdawn.py
new file mode 100644
index 0000000..453ead0
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/preprocessing/tests/test_xdawn.py
@@ -0,0 +1,145 @@
+# Authors: Alexandre Barachant <alexandre.barachant at gmail.com>
+#
+# License: BSD (3-clause)
+
+import numpy as np
+import os.path as op
+from nose.tools import (assert_equal, assert_raises)
+from numpy.testing import assert_array_equal
+from mne import (io, Epochs, read_events, pick_types,
+                 compute_raw_covariance)
+from mne.utils import requires_sklearn, run_tests_if_main
+from mne.preprocessing.xdawn import Xdawn
+
+base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
+raw_fname = op.join(base_dir, 'test_raw.fif')
+event_name = op.join(base_dir, 'test-eve.fif')
+evoked_nf_name = op.join(base_dir, 'test-nf-ave.fif')
+
+tmin, tmax = -0.1, 0.2
+event_id = dict(cond2=2, cond3=3)
+
+
+def _get_data():
+    raw = io.Raw(raw_fname, add_eeg_ref=False, verbose=False, preload=True)
+    events = read_events(event_name)
+    picks = pick_types(raw.info, meg=False, eeg=True, stim=False,
+                       ecg=False, eog=False,
+                       exclude='bads')[::8]
+    return raw, events, picks
+
+
+def test_xdawn_init():
+    """Test init of xdawn."""
+    # init xdawn with good parameters
+    Xdawn(n_components=2, correct_overlap='auto', signal_cov=None, reg=None)
+    # init xdawn with bad parameters
+    assert_raises(ValueError, Xdawn, correct_overlap=42)
+
+
+def test_xdawn_fit():
+    """Test Xdawn fit."""
+    # get data
+    raw, events, picks = _get_data()
+    epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                    preload=True, baseline=None, verbose=False)
+    # =========== Basic Fit test =================
+    # test base xdawn
+    xd = Xdawn(n_components=2, correct_overlap='auto',
+               signal_cov=None, reg=None)
+    xd.fit(epochs)
+    # with this parameters, the overlapp correction must be False
+    assert_equal(xd.correct_overlap, False)
+    # no overlapp correction should give averaged evoked
+    evoked = epochs['cond2'].average()
+    assert_array_equal(evoked.data, xd.evokeds_['cond2'].data)
+
+    # ========== with signal cov provided ====================
+    # provide covariance object
+    signal_cov = compute_raw_covariance(raw, picks=picks)
+    xd = Xdawn(n_components=2, correct_overlap=False,
+               signal_cov=signal_cov, reg=None)
+    xd.fit(epochs)
+    # provide ndarray
+    signal_cov = np.eye(len(picks))
+    xd = Xdawn(n_components=2, correct_overlap=False,
+               signal_cov=signal_cov, reg=None)
+    xd.fit(epochs)
+    # provide ndarray of bad shape
+    signal_cov = np.eye(len(picks) - 1)
+    xd = Xdawn(n_components=2, correct_overlap=False,
+               signal_cov=signal_cov, reg=None)
+    assert_raises(ValueError, xd.fit, epochs)
+    # provide another type
+    signal_cov = 42
+    xd = Xdawn(n_components=2, correct_overlap=False,
+               signal_cov=signal_cov, reg=None)
+    assert_raises(ValueError, xd.fit, epochs)
+    # fit with baseline correction and ovverlapp correction should throw an
+    # error
+    epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                    preload=True, baseline=(None, 0), verbose=False)
+
+    xd = Xdawn(n_components=2, correct_overlap=True)
+    assert_raises(ValueError, xd.fit, epochs)
+
+
+def test_xdawn_apply_transform():
+    """Test Xdawn apply and transform."""
+    # get data
+    raw, events, picks = _get_data()
+    epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                    preload=True, baseline=None, verbose=False)
+    n_components = 2
+    # Fit Xdawn
+    xd = Xdawn(n_components=n_components, correct_overlap='auto')
+    xd.fit(epochs)
+
+    # apply on raw
+    xd.apply(raw)
+    # apply on epochs
+    xd.apply(epochs)
+    # apply on evoked
+    xd.apply(epochs.average())
+    # apply on other thing should raise an error
+    assert_raises(ValueError, xd.apply, 42)
+
+    # transform on epochs
+    xd.transform(epochs)
+    # transform on ndarray
+    xd.transform(epochs._data)
+    # transform on someting else
+    assert_raises(ValueError, xd.transform, 42)
+
+
+ at requires_sklearn
+def test_xdawn_regularization():
+    """Test Xdawn with regularization."""
+    # get data
+    raw, events, picks = _get_data()
+    epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                    preload=True, baseline=None, verbose=False)
+
+    # test xdawn with overlap correction
+    xd = Xdawn(n_components=2, correct_overlap=True,
+               signal_cov=None, reg=0.1)
+    xd.fit(epochs)
+    # ========== with cov regularization ====================
+    # ledoit-wolf
+    xd = Xdawn(n_components=2, correct_overlap=False,
+               signal_cov=np.eye(len(picks)), reg='ledoit_wolf')
+    xd.fit(epochs)
+    # oas
+    xd = Xdawn(n_components=2, correct_overlap=False,
+               signal_cov=np.eye(len(picks)), reg='oas')
+    xd.fit(epochs)
+    # with shrinkage
+    xd = Xdawn(n_components=2, correct_overlap=False,
+               signal_cov=np.eye(len(picks)), reg=0.1)
+    xd.fit(epochs)
+    # with bad shrinkage
+    xd = Xdawn(n_components=2, correct_overlap=False,
+               signal_cov=np.eye(len(picks)), reg=2)
+    assert_raises(ValueError, xd.fit, epochs)
+
+run_tests_if_main()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/preprocessing/xdawn.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/preprocessing/xdawn.py
new file mode 100644
index 0000000..a113e45
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/preprocessing/xdawn.py
@@ -0,0 +1,484 @@
+"""Xdawn implementation."""
+# Authors: Alexandre Barachant <alexandre.barachant at gmail.com>
+#
+# License: BSD (3-clause)
+
+import copy as cp
+
+import numpy as np
+from scipy import linalg
+
+from ..io.base import _BaseRaw
+from ..epochs import _BaseEpochs
+from .. import Covariance, EvokedArray, Evoked, EpochsArray
+from ..io.pick import pick_types
+from .ica import _get_fast_dot
+from ..utils import logger
+from ..decoding.mixin import TransformerMixin
+from ..cov import _regularized_covariance
+from ..channels.channels import ContainsMixin
+
+
+def _least_square_evoked(data, events, event_id, tmin, tmax, sfreq):
+    """Least square estimation of evoked response from data.
+
+    Parameters
+    ----------
+    data : ndarray, shape (n_channels, n_times)
+        The data to estimates evoked
+    events : ndarray, shape (n_events, 3)
+        The events typically returned by the read_events function.
+        If some events don't match the events of interest as specified
+        by event_id, they will be ignored.
+    event_id : dict
+        The id of the events to consider
+    tmin : float
+        Start time before event.
+    tmax : float
+        End time after event.
+    sfreq : float
+        Sampling frequency.
+
+    Returns
+    -------
+    evokeds_data : dict of ndarray
+        A dict of evoked data for each event type in event_id.
+    toeplitz : dict of ndarray
+        A dict of toeplitz matrix for each event type in event_id.
+    """
+    nmin = int(tmin * sfreq)
+    nmax = int(tmax * sfreq)
+
+    window = nmax - nmin
+    n_samples = data.shape[1]
+    toeplitz_mat = dict()
+    full_toep = list()
+    for eid in event_id:
+        # select events by type
+        ix_ev = events[:, -1] == event_id[eid]
+
+        # build toeplitz matrix
+        trig = np.zeros((n_samples, 1))
+        ix_trig = (events[ix_ev, 0]) + nmin
+        trig[ix_trig] = 1
+        toep_mat = linalg.toeplitz(trig[0:window], trig)
+        toeplitz_mat[eid] = toep_mat
+        full_toep.append(toep_mat)
+
+    # Concatenate toeplitz
+    full_toep = np.concatenate(full_toep)
+
+    # least square estimation
+    predictor = np.dot(linalg.pinv(np.dot(full_toep, full_toep.T)), full_toep)
+    all_evokeds = np.dot(predictor, data.T)
+    all_evokeds = np.vsplit(all_evokeds, len(event_id))
+
+    # parse evoked response
+    evoked_data = dict()
+    for idx, eid in enumerate(event_id):
+        evoked_data[eid] = all_evokeds[idx].T
+
+    return evoked_data, toeplitz_mat
+
+
+def _check_overlapp(epochs):
+    """check if events are overlapped."""
+    isi = np.diff(epochs.events[:, 0])
+    window = int((epochs.tmax - epochs.tmin) * epochs.info['sfreq'])
+    # Events are overlapped if the minimal inter-stimulus interval is smaller
+    # than the time window.
+    return isi.min() < window
+
+
+def _construct_signal_from_epochs(epochs):
+    """Reconstruct pseudo continuous signal from epochs."""
+    start = (np.min(epochs.events[:, 0]) +
+             int(epochs.tmin * epochs.info['sfreq']))
+    stop = (np.max(epochs.events[:, 0]) +
+            int(epochs.tmax * epochs.info['sfreq']) + 1)
+
+    n_samples = stop - start
+    epochs_data = epochs.get_data()
+    n_epochs, n_channels, n_times = epochs_data.shape
+    events_pos = epochs.events[:, 0] - epochs.events[0, 0]
+
+    data = np.zeros((n_channels, n_samples))
+    for idx in range(n_epochs):
+        onset = events_pos[idx]
+        offset = onset + n_times
+        data[:, onset:offset] = epochs_data[idx]
+
+    return data
+
+
+def least_square_evoked(epochs, return_toeplitz=False):
+    """Least square estimation of evoked response from a Epochs instance.
+
+    Parameters
+    ----------
+    epochs : Epochs instance
+        An instance of Epochs.
+    return_toeplitz : bool (default False)
+        If true, compute the toeplitz matrix.
+
+    Returns
+    -------
+    evokeds : dict of evoked instance
+        An dict of evoked instance for each event type in epochs.event_id.
+    toeplitz : dict of ndarray
+        If return_toeplitz is true, return the toeplitz matrix for each event
+        type in epochs.event_id.
+    """
+    if not isinstance(epochs, _BaseEpochs):
+        raise ValueError('epochs must be an instance of `mne.Epochs`')
+
+    events = epochs.events.copy()
+    events[:, 0] -= events[0, 0] + int(epochs.tmin * epochs.info['sfreq'])
+    data = _construct_signal_from_epochs(epochs)
+    evoked_data, toeplitz = _least_square_evoked(data, events, epochs.event_id,
+                                                 tmin=epochs.tmin,
+                                                 tmax=epochs.tmax,
+                                                 sfreq=epochs.info['sfreq'])
+    evokeds = dict()
+    info = cp.deepcopy(epochs.info)
+    for name, data in evoked_data.items():
+        n_events = len(events[events[:, 2] == epochs.event_id[name]])
+        evoked = EvokedArray(data, info, tmin=epochs.tmin,
+                             comment=name, nave=n_events)
+        evokeds[name] = evoked
+
+    if return_toeplitz:
+        return evokeds, toeplitz
+
+    return evokeds
+
+
+class Xdawn(TransformerMixin, ContainsMixin):
+
+    """Implementation of the Xdawn Algorithm.
+
+    Xdawn is a spatial filtering method designed to improve the signal
+    to signal + noise ratio (SSNR) of the ERP responses. Xdawn was originaly
+    designed for P300 evoked potential by enhancing the target response with
+    respect to the non-target response. This implementation is a generalization
+    to any type of ERP.
+
+    Parameters
+    ----------
+    n_components : int (default 2)
+        The number of components to decompose M/EEG signals.
+    signal_cov : None | Covariance | ndarray, shape (n_channels, n_channels)
+        (default None). The signal covariance used for whitening of the data.
+        if None, the covariance is estimated from the epochs signal.
+    correct_overlap : 'auto' or bool (default 'auto')
+        Apply correction for overlaped ERP for the estimation of evokeds
+        responses. if 'auto', the overlapp correction is chosen in function
+        of the events in epochs.events.
+    reg : float | str | None (default None)
+        if not None, allow regularization for covariance estimation
+        if float, shrinkage covariance is used (0 <= shrinkage <= 1).
+        if str, optimal shrinkage using Ledoit-Wolf Shrinkage ('ledoit_wolf')
+        or Oracle Approximating Shrinkage ('oas').
+
+    Attributes
+    ----------
+    filters_ : dict of ndarray
+        If fit, the Xdawn components used to decompose the data for each event
+        type, else empty.
+    patterns_ : dict of ndarray
+        If fit, the Xdawn patterns used to restore M/EEG signals for each event
+        type, else empty.
+    evokeds_ : dict of evoked instance
+        If fit, the evoked response for each event type.
+
+    Notes
+    -----
+    .. versionadded:: 0.10
+
+    See Also
+    --------
+    ICA
+    CSP
+
+    References
+    ----------
+    [1] Rivet, B., Souloumiac, A., Attina, V., & Gibert, G. (2009). xDAWN
+    algorithm to enhance evoked potentials: application to brain-computer
+    interface. Biomedical Engineering, IEEE Transactions on, 56(8), 2035-2043.
+
+    [2] Rivet, B., Cecotti, H., Souloumiac, A., Maby, E., & Mattout, J. (2011,
+    August). Theoretical analysis of xDAWN algorithm: application to an
+    efficient sensor selection in a P300 BCI. In Signal Processing Conference,
+    2011 19th European (pp. 1382-1386). IEEE.
+    """
+
+    def __init__(self, n_components=2, signal_cov=None, correct_overlap='auto',
+                 reg=None):
+        """init xdawn."""
+        self.n_components = n_components
+        self.signal_cov = signal_cov
+        self.reg = reg
+        self.filters_ = dict()
+        self.patterns_ = dict()
+        self.evokeds_ = dict()
+
+        if correct_overlap not in ['auto', True, False]:
+            raise ValueError('correct_overlap must be a bool or "auto"')
+        self.correct_overlap = correct_overlap
+
+    def fit(self, epochs, y=None):
+        """Fit Xdawn from epochs.
+
+        Parameters
+        ----------
+        epochs : Epochs object
+            An instance of Epoch on which Xdawn filters will be trained.
+        y : ndarray | None (default None)
+            Not used, here for compatibility with decoding API.
+
+        Returns
+        -------
+        self : Xdawn instance
+            The Xdawn instance.
+        """
+        if self.correct_overlap == 'auto':
+            self.correct_overlap = _check_overlapp(epochs)
+
+        # Extract signal covariance
+        if self.signal_cov is None:
+            if self.correct_overlap:
+                sig_data = _construct_signal_from_epochs(epochs)
+            else:
+                sig_data = np.hstack(epochs.get_data())
+            self.signal_cov_ = _regularized_covariance(sig_data, self.reg)
+        elif isinstance(self.signal_cov, Covariance):
+            self.signal_cov_ = self.signal_cov.data
+        elif isinstance(self.signal_cov, np.ndarray):
+            self.signal_cov_ = self.signal_cov
+        else:
+            raise ValueError('signal_cov must be None, a covariance instance '
+                             'or a ndarray')
+
+        # estimates evoked covariance
+        self.evokeds_cov_ = dict()
+        if self.correct_overlap:
+            if epochs.baseline is not None:
+                raise ValueError('Baseline correction must be None if overlap '
+                                 'correction activated')
+            evokeds, toeplitz = least_square_evoked(epochs,
+                                                    return_toeplitz=True)
+        else:
+            evokeds = dict()
+            toeplitz = dict()
+            for eid in epochs.event_id:
+                evokeds[eid] = epochs[eid].average()
+                toeplitz[eid] = 1.0
+        self.evokeds_ = evokeds
+
+        for eid in epochs.event_id:
+            data = np.dot(evokeds[eid].data, toeplitz[eid])
+            self.evokeds_cov_[eid] = _regularized_covariance(data, self.reg)
+
+        # estimates spatial filters
+        for eid in epochs.event_id:
+
+            if self.signal_cov_.shape != self.evokeds_cov_[eid].shape:
+                raise ValueError('Size of signal cov must be the same as the'
+                                 ' number of channels in epochs')
+
+            evals, evecs = linalg.eigh(self.evokeds_cov_[eid],
+                                       self.signal_cov_)
+            evecs = evecs[:, np.argsort(evals)[::-1]]  # sort eigenvectors
+            evecs /= np.sqrt(np.sum(evecs ** 2, axis=0))
+
+            self.filters_[eid] = evecs
+            self.patterns_[eid] = linalg.inv(evecs.T)
+
+        # store some values
+        self.ch_names = epochs.ch_names
+        self.exclude = list(range(self.n_components, len(self.ch_names)))
+        self.event_id = epochs.event_id
+        return self
+
+    def transform(self, epochs):
+        """Apply Xdawn dim reduction.
+
+        Parameters
+        ----------
+        epochs : Epochs | ndarray, shape (n_epochs, n_channels, n_times)
+            Data on which Xdawn filters will be applied.
+
+        Returns
+        -------
+        X : ndarray, shape (n_epochs, n_components * event_types, n_times)
+            Spatially filtered signals.
+        """
+        if isinstance(epochs, _BaseEpochs):
+            data = epochs.get_data()
+        elif isinstance(epochs, np.ndarray):
+            data = epochs
+        else:
+            raise ValueError('Data input must be of Epoch '
+                             'type or numpy array')
+
+        # create full matrix of spatial filter
+        full_filters = list()
+        for filt in self.filters_.values():
+            full_filters.append(filt[:, 0:self.n_components])
+        full_filters = np.concatenate(full_filters, axis=1)
+
+        # Apply spatial filters
+        X = np.dot(full_filters.T, data)
+        X = X.transpose((1, 0, 2))
+        return X
+
+    def apply(self, inst, event_id=None, include=None, exclude=None):
+        """Remove selected components from the signal.
+
+        Given the unmixing matrix, transform data,
+        zero out components, and inverse transform the data.
+        This procedure will reconstruct M/EEG signals from which
+        the dynamics described by the excluded components is subtracted.
+
+        Parameters
+        ----------
+        inst : instance of Raw | Epochs | Evoked
+            The data to be processed.
+        event_id : dict | list of str | None (default None)
+            The kind of event to apply. if None, a dict of inst will be return
+            one for each type of event xdawn has been fitted.
+        include : array_like of int | None (default None)
+            The indices refering to columns in the ummixing matrix. The
+            components to be kept. If None, the first n_components (as defined
+            in the Xdawn constructor) will be kept.
+        exclude : array_like of int | None (default None)
+            The indices refering to columns in the ummixing matrix. The
+            components to be zeroed out. If None, all the components except the
+            first n_components will be exclude.
+
+        Returns
+        -------
+        out : dict of instance
+            A dict of instance (from the same type as inst input) for each
+            event type in event_id.
+        """
+        if event_id is None:
+            event_id = self.event_id
+
+        if isinstance(inst, _BaseRaw):
+            out = self._apply_raw(raw=inst, include=include, exclude=exclude,
+                                  event_id=event_id)
+        elif isinstance(inst, _BaseEpochs):
+            out = self._apply_epochs(epochs=inst, include=include,
+                                     exclude=exclude, event_id=event_id)
+        elif isinstance(inst, Evoked):
+            out = self._apply_evoked(evoked=inst, include=include,
+                                     exclude=exclude, event_id=event_id)
+        else:
+            raise ValueError('Data input must be Raw, Epochs or Evoked type')
+        return out
+
+    def _apply_raw(self, raw, include, exclude, event_id):
+        """Aux method."""
+        if not raw.preload:
+            raise ValueError('Raw data must be preloaded to apply Xdawn')
+
+        picks = pick_types(raw.info, meg=False, include=self.ch_names,
+                           exclude='bads')
+        raws = dict()
+        for eid in event_id:
+            data = raw[picks, :][0]
+
+            data = self._pick_sources(data, include, exclude, eid)
+
+            raw_r = raw.copy()
+
+            raw_r[picks, :] = data
+            raws[eid] = raw_r
+        return raws
+
+    def _apply_epochs(self, epochs, include, exclude, event_id):
+        """Aux method."""
+        if not epochs.preload:
+            raise ValueError('Epochs must be preloaded to apply Xdawn')
+
+        picks = pick_types(epochs.info, meg=False, ref_meg=False,
+                           include=self.ch_names, exclude='bads')
+
+        # special case where epochs come picked but fit was 'unpicked'.
+        if len(picks) != len(self.ch_names):
+            raise RuntimeError('Epochs don\'t match fitted data: %i channels '
+                               'fitted but %i channels supplied. \nPlease '
+                               'provide Epochs compatible with '
+                               'xdawn.ch_names' % (len(self.ch_names),
+                                                   len(picks)))
+
+        epochs_dict = dict()
+        data = np.hstack(epochs.get_data()[:, picks])
+
+        for eid in event_id:
+
+            data_r = self._pick_sources(data, include, exclude, eid)
+            data_r = np.array(np.split(data_r, len(epochs.events), 1))
+            info_r = cp.deepcopy(epochs.info)
+            epochs_r = EpochsArray(data=data_r, info=info_r,
+                                   events=epochs.events, tmin=epochs.tmin,
+                                   event_id=epochs.event_id, verbose=False)
+            epochs_r.preload = True
+            epochs_dict[eid] = epochs_r
+
+        return epochs_dict
+
+    def _apply_evoked(self, evoked, include, exclude, event_id):
+        """Aux method."""
+        picks = pick_types(evoked.info, meg=False, ref_meg=False,
+                           include=self.ch_names,
+                           exclude='bads')
+
+        # special case where evoked come picked but fit was 'unpicked'.
+        if len(picks) != len(self.ch_names):
+            raise RuntimeError('Evoked does not match fitted data: %i channels'
+                               ' fitted but %i channels supplied. \nPlease '
+                               'provide an Evoked object that\'s compatible '
+                               'with xdawn.ch_names' % (len(self.ch_names),
+                                                        len(picks)))
+
+        data = evoked.data[picks]
+        evokeds = dict()
+
+        for eid in event_id:
+
+            data_r = self._pick_sources(data, include, exclude, eid)
+            evokeds[eid] = evoked.copy()
+
+            # restore evoked
+            evokeds[eid].data[picks] = data_r
+
+        return evokeds
+
+    def _pick_sources(self, data, include, exclude, eid):
+        """Aux method."""
+        fast_dot = _get_fast_dot()
+        if exclude is None:
+            exclude = self.exclude
+        else:
+            exclude = list(set(list(self.exclude) + list(exclude)))
+
+        logger.info('Transforming to Xdawn space')
+
+        # Apply unmixing
+        sources = fast_dot(self.filters_[eid].T, data)
+
+        if include not in (None, []):
+            mask = np.ones(len(sources), dtype=np.bool)
+            mask[np.unique(include)] = False
+            sources[mask] = 0.
+            logger.info('Zeroing out %i Xdawn components' % mask.sum())
+        elif exclude not in (None, []):
+            exclude_ = np.unique(exclude)
+            sources[exclude_] = 0.
+            logger.info('Zeroing out %i Xdawn components' % len(exclude_))
+        logger.info('Inverse transforming to sensor space')
+        data = fast_dot(self.patterns_[eid], sources)
+
+        return data
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/proj.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/proj.py
new file mode 100644
index 0000000..c146331
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/proj.py
@@ -0,0 +1,396 @@
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#
+# License: BSD (3-clause)
+
+import numpy as np
+from scipy import linalg
+
+from . import io, Epochs
+from .utils import check_fname, logger, verbose
+from .io.pick import pick_types, pick_types_forward
+from .io.proj import Projection, _has_eeg_average_ref_proj
+from .event import make_fixed_length_events
+from .parallel import parallel_func
+from .cov import _check_n_samples
+from .forward import (is_fixed_orient, _subject_from_forward,
+                      convert_forward_solution)
+from .source_estimate import SourceEstimate, VolSourceEstimate
+from .io.proj import make_projector, make_eeg_average_ref_proj
+
+
+def read_proj(fname):
+    """Read projections from a FIF file.
+
+    Parameters
+    ----------
+    fname : string
+        The name of file containing the projections vectors. It should end with
+        -proj.fif or -proj.fif.gz.
+
+    Returns
+    -------
+    projs : list
+        The list of projection vectors.
+
+    See Also
+    --------
+    write_proj
+    """
+    check_fname(fname, 'projection', ('-proj.fif', '-proj.fif.gz'))
+
+    ff, tree, _ = io.fiff_open(fname)
+    with ff as fid:
+        projs = io.proj._read_proj(fid, tree)
+    return projs
+
+
+def write_proj(fname, projs):
+    """Write projections to a FIF file.
+
+    Parameters
+    ----------
+    fname : string
+        The name of file containing the projections vectors. It should end with
+        -proj.fif or -proj.fif.gz.
+
+    projs : list
+        The list of projection vectors.
+
+    See Also
+    --------
+    read_proj
+    """
+    check_fname(fname, 'projection', ('-proj.fif', '-proj.fif.gz'))
+
+    fid = io.write.start_file(fname)
+    io.proj._write_proj(fid, projs)
+    io.write.end_file(fid)
+
+
+ at verbose
+def _compute_proj(data, info, n_grad, n_mag, n_eeg, desc_prefix, verbose=None):
+    mag_ind = pick_types(info, meg='mag', ref_meg=False, exclude='bads')
+    grad_ind = pick_types(info, meg='grad', ref_meg=False, exclude='bads')
+    eeg_ind = pick_types(info, meg=False, eeg=True, ref_meg=False,
+                         exclude='bads')
+
+    if (n_grad > 0) and len(grad_ind) == 0:
+        logger.info("No gradiometers found. Forcing n_grad to 0")
+        n_grad = 0
+    if (n_mag > 0) and len(mag_ind) == 0:
+        logger.info("No magnetometers found. Forcing n_mag to 0")
+        n_mag = 0
+    if (n_eeg > 0) and len(eeg_ind) == 0:
+        logger.info("No EEG channels found. Forcing n_eeg to 0")
+        n_eeg = 0
+
+    ch_names = info['ch_names']
+    grad_names, mag_names, eeg_names = ([ch_names[k] for k in ind]
+                                        for ind in [grad_ind, mag_ind,
+                                                    eeg_ind])
+
+    projs = []
+    for n, ind, names, desc in zip([n_grad, n_mag, n_eeg],
+                                   [grad_ind, mag_ind, eeg_ind],
+                                   [grad_names, mag_names, eeg_names],
+                                   ['planar', 'axial', 'eeg']):
+        if n == 0:
+            continue
+        data_ind = data[ind][:, ind]
+        U = linalg.svd(data_ind, full_matrices=False,
+                       overwrite_a=True)[0][:, :n]
+        for k, u in enumerate(U.T):
+            proj_data = dict(col_names=names, row_names=None,
+                             data=u[np.newaxis, :], nrow=1, ncol=u.size)
+            this_desc = "%s-%s-PCA-%02d" % (desc, desc_prefix, k + 1)
+            logger.info("Adding projection: %s" % this_desc)
+            proj = Projection(active=False, data=proj_data,
+                              desc=this_desc, kind=1)
+            projs.append(proj)
+
+    return projs
+
+
+ at verbose
+def compute_proj_epochs(epochs, n_grad=2, n_mag=2, n_eeg=2, n_jobs=1,
+                        desc_prefix=None, verbose=None):
+    """Compute SSP (spatial space projection) vectors on Epochs
+
+    Parameters
+    ----------
+    epochs : instance of Epochs
+        The epochs containing the artifact
+    n_grad : int
+        Number of vectors for gradiometers
+    n_mag : int
+        Number of vectors for magnetometers
+    n_eeg : int
+        Number of vectors for EEG channels
+    n_jobs : int
+        Number of jobs to use to compute covariance
+    desc_prefix : str | None
+        The description prefix to use. If None, one will be created based on
+        the event_id, tmin, and tmax.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    projs: list
+        List of projection vectors
+
+    See Also
+    --------
+    compute_proj_raw, compute_proj_evoked
+    """
+    # compute data covariance
+    data = _compute_cov_epochs(epochs, n_jobs)
+    event_id = epochs.event_id
+    if event_id is None or len(list(event_id.keys())) == 0:
+        event_id = '0'
+    elif len(event_id.keys()) == 1:
+        event_id = str(list(event_id.values())[0])
+    else:
+        event_id = 'Multiple-events'
+    if desc_prefix is None:
+        desc_prefix = "%s-%-.3f-%-.3f" % (event_id, epochs.tmin, epochs.tmax)
+    return _compute_proj(data, epochs.info, n_grad, n_mag, n_eeg, desc_prefix)
+
+
+def _compute_cov_epochs(epochs, n_jobs):
+    """Helper function for computing epochs covariance"""
+    parallel, p_fun, _ = parallel_func(np.dot, n_jobs)
+    data = parallel(p_fun(e, e.T) for e in epochs)
+    n_epochs = len(data)
+    if n_epochs == 0:
+        raise RuntimeError('No good epochs found')
+
+    n_chan, n_samples = epochs.info['nchan'], len(epochs.times)
+    _check_n_samples(n_samples * n_epochs, n_chan)
+    data = sum(data)
+    return data
+
+
+ at verbose
+def compute_proj_evoked(evoked, n_grad=2, n_mag=2, n_eeg=2, verbose=None):
+    """Compute SSP (spatial space projection) vectors on Evoked
+
+    Parameters
+    ----------
+    evoked : instance of Evoked
+        The Evoked obtained by averaging the artifact
+    n_grad : int
+        Number of vectors for gradiometers
+    n_mag : int
+        Number of vectors for magnetometers
+    n_eeg : int
+        Number of vectors for EEG channels
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    projs : list
+        List of projection vectors
+
+    See Also
+    --------
+    compute_proj_raw, compute_proj_epochs
+    """
+    data = np.dot(evoked.data, evoked.data.T)  # compute data covariance
+    desc_prefix = "%-.3f-%-.3f" % (evoked.times[0], evoked.times[-1])
+    return _compute_proj(data, evoked.info, n_grad, n_mag, n_eeg, desc_prefix)
+
+
+ at verbose
+def compute_proj_raw(raw, start=0, stop=None, duration=1, n_grad=2, n_mag=2,
+                     n_eeg=0, reject=None, flat=None, n_jobs=1, verbose=None):
+    """Compute SSP (spatial space projection) vectors on Raw
+
+    Parameters
+    ----------
+    raw : instance of Raw
+        A raw object to use the data from.
+    start : float
+        Time (in sec) to start computing SSP.
+    stop : float
+        Time (in sec) to stop computing SSP.
+        None will go to the end of the file.
+    duration : float
+        Duration (in sec) to chunk data into for SSP
+        If duration is None, data will not be chunked.
+    n_grad : int
+        Number of vectors for gradiometers.
+    n_mag : int
+        Number of vectors for magnetometers.
+    n_eeg : int
+        Number of vectors for EEG channels.
+    reject : dict | None
+        Epoch rejection configuration (see Epochs).
+    flat : dict | None
+        Epoch flat configuration (see Epochs).
+    n_jobs : int
+        Number of jobs to use to compute covariance.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    projs: list
+        List of projection vectors
+
+    See Also
+    --------
+    compute_proj_epochs, compute_proj_evoked
+    """
+    if duration is not None:
+        events = make_fixed_length_events(raw, 999, start, stop, duration)
+        epochs = Epochs(raw, events, None, tmin=0., tmax=duration,
+                        picks=pick_types(raw.info, meg=True, eeg=True,
+                                         eog=True, ecg=True, emg=True,
+                                         exclude='bads'),
+                        reject=reject, flat=flat)
+        data = _compute_cov_epochs(epochs, n_jobs)
+        info = epochs.info
+        if not stop:
+            stop = raw.n_times / raw.info['sfreq']
+    else:
+        # convert to sample indices
+        start = max(raw.time_as_index(start)[0], 0)
+        stop = raw.time_as_index(stop)[0] if stop else raw.n_times
+        stop = min(stop, raw.n_times)
+        data, times = raw[:, start:stop]
+        _check_n_samples(stop - start, data.shape[0])
+        data = np.dot(data, data.T)  # compute data covariance
+        info = raw.info
+        # convert back to times
+        start = start / raw.info['sfreq']
+        stop = stop / raw.info['sfreq']
+
+    desc_prefix = "Raw-%-.3f-%-.3f" % (start, stop)
+    projs = _compute_proj(data, info, n_grad, n_mag, n_eeg, desc_prefix)
+    return projs
+
+
+def sensitivity_map(fwd, projs=None, ch_type='grad', mode='fixed', exclude=[],
+                    verbose=None):
+    """Compute sensitivity map
+
+    Such maps are used to know how much sources are visible by a type
+    of sensor, and how much projections shadow some sources.
+
+    Parameters
+    ----------
+    fwd : dict
+        The forward operator.
+    projs : list
+        List of projection vectors.
+    ch_type : 'grad' | 'mag' | 'eeg'
+        The type of sensors to use.
+    mode : str
+        The type of sensitivity map computed. See manual. Should be 'free',
+        'fixed', 'ratio', 'radiality', 'angle', 'remaining', or 'dampening'
+        corresponding to the argument --map 1, 2, 3, 4, 5, 6 and 7 of the
+        command mne_sensitivity_map.
+    exclude : list of string | str
+        List of channels to exclude. If empty do not exclude any (default).
+        If 'bads', exclude channels in fwd['info']['bads'].
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    stc : SourceEstimate | VolSourceEstimate
+        The sensitivity map as a SourceEstimate or VolSourceEstimate instance
+        for visualization.
+    """
+    # check strings
+    if ch_type not in ['eeg', 'grad', 'mag']:
+        raise ValueError("ch_type should be 'eeg', 'mag' or 'grad (got %s)"
+                         % ch_type)
+    if mode not in ['free', 'fixed', 'ratio', 'radiality', 'angle',
+                    'remaining', 'dampening']:
+        raise ValueError('Unknown mode type (got %s)' % mode)
+
+    # check forward
+    if is_fixed_orient(fwd, orig=True):
+        raise ValueError('fwd should must be computed with free orientation')
+    fwd = convert_forward_solution(fwd, surf_ori=True, force_fixed=False,
+                                   verbose=False)
+    if not fwd['surf_ori'] or is_fixed_orient(fwd):
+        raise RuntimeError('Error converting solution, please notify '
+                           'mne-python developers')
+
+    # limit forward
+    if ch_type == 'eeg':
+        fwd = pick_types_forward(fwd, meg=False, eeg=True, exclude=exclude)
+    else:
+        fwd = pick_types_forward(fwd, meg=ch_type, eeg=False, exclude=exclude)
+
+    gain = fwd['sol']['data']
+
+    # Make sure EEG has average
+    if ch_type == 'eeg':
+        if projs is None or not _has_eeg_average_ref_proj(projs):
+            eeg_ave = [make_eeg_average_ref_proj(fwd['info'])]
+        else:
+            eeg_ave = []
+        projs = eeg_ave if projs is None else projs + eeg_ave
+
+    # Construct the projector
+    if projs is not None:
+        proj, ncomp, U = make_projector(projs, fwd['sol']['row_names'],
+                                        include_active=True)
+        # do projection for most types
+        if mode not in ['angle', 'remaining', 'dampening']:
+            gain = np.dot(proj, gain)
+
+    # can only run the last couple methods if there are projectors
+    elif mode in ['angle', 'remaining', 'dampening']:
+        raise ValueError('No projectors used, cannot compute %s' % mode)
+
+    n_sensors, n_dipoles = gain.shape
+    n_locations = n_dipoles // 3
+    sensitivity_map = np.empty(n_locations)
+
+    for k in range(n_locations):
+        gg = gain[:, 3 * k:3 * (k + 1)]
+        if mode != 'fixed':
+            s = linalg.svd(gg, full_matrices=False, compute_uv=False)
+        if mode == 'free':
+            sensitivity_map[k] = s[0]
+        else:
+            gz = linalg.norm(gg[:, 2])  # the normal component
+            if mode == 'fixed':
+                sensitivity_map[k] = gz
+            elif mode == 'ratio':
+                sensitivity_map[k] = gz / s[0]
+            elif mode == 'radiality':
+                sensitivity_map[k] = 1. - (gz / s[0])
+            else:
+                if mode == 'angle':
+                    co = linalg.norm(np.dot(gg[:, 2], U))
+                    sensitivity_map[k] = co / gz
+                else:
+                    p = linalg.norm(np.dot(proj, gg[:, 2]))
+                    if mode == 'remaining':
+                        sensitivity_map[k] = p / gz
+                    elif mode == 'dampening':
+                        sensitivity_map[k] = 1. - p / gz
+                    else:
+                        raise ValueError('Unknown mode type (got %s)' % mode)
+
+    # only normalize fixed and free methods
+    if mode in ['fixed', 'free']:
+        sensitivity_map /= np.max(sensitivity_map)
+
+    subject = _subject_from_forward(fwd)
+    if fwd['src'][0]['type'] == 'vol':  # volume source space
+        vertices = fwd['src'][0]['vertno']
+        SEClass = VolSourceEstimate
+    else:
+        vertices = [fwd['src'][0]['vertno'], fwd['src'][1]['vertno']]
+        SEClass = SourceEstimate
+    stc = SEClass(sensitivity_map[:, np.newaxis], vertices=vertices, tmin=0,
+                  tstep=1, subject=subject)
+    return stc
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/realtime/__init__.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/realtime/__init__.py
new file mode 100644
index 0000000..cee63e9
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/realtime/__init__.py
@@ -0,0 +1,14 @@
+""" Module for realtime MEG data using mne_rt_server """
+
+# Authors: Christoph Dinh <chdinh at nmr.mgh.harvard.edu>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#          Mainak Jas <mainak at neuro.hut.fi>
+#          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+from .client import RtClient
+from .epochs import RtEpochs
+from .mockclient import MockRtClient
+from .fieldtrip_client import FieldTripClient
+from .stim_server_client import StimServer, StimClient
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/realtime/client.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/realtime/client.py
new file mode 100644
index 0000000..e17e102
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/realtime/client.py
@@ -0,0 +1,375 @@
+from __future__ import print_function
+# Authors: Christoph Dinh <chdinh at nmr.mgh.harvard.edu>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+import socket
+import time
+from ..externals.six.moves import StringIO
+import threading
+
+import numpy as np
+
+from ..utils import logger, verbose
+from ..io.constants import FIFF
+from ..io.meas_info import read_meas_info
+from ..io.tag import Tag, read_tag
+from ..io.tree import make_dir_tree
+
+# Constants for fiff realtime fiff messages
+MNE_RT_GET_CLIENT_ID = 1
+MNE_RT_SET_CLIENT_ALIAS = 2
+
+
+def _recv_tag_raw(sock):
+    """Read a tag and the associated data from a socket
+
+    Parameters
+    ----------
+    sock : socket.socket
+        The socket from which to read the tag.
+
+    Returns
+    -------
+    tag : instance of Tag
+        The tag.
+    buff : str
+        The raw data of the tag (including header).
+    """
+    s = sock.recv(4 * 4)
+    if len(s) != 16:
+        raise RuntimeError('Not enough bytes received, something is wrong. '
+                           'Make sure the mne_rt_server is running.')
+    tag = Tag(*np.fromstring(s, '>i4'))
+    n_received = 0
+    rec_buff = [s]
+    while n_received < tag.size:
+        n_buffer = min(4096, tag.size - n_received)
+        this_buffer = sock.recv(n_buffer)
+        rec_buff.append(this_buffer)
+        n_received += len(this_buffer)
+
+    if n_received != tag.size:
+        raise RuntimeError('Not enough bytes received, something is wrong. '
+                           'Make sure the mne_rt_server is running.')
+
+    buff = ''.join(rec_buff)
+
+    return tag, buff
+
+
+def _buffer_recv_worker(rt_client, nchan):
+    """Worker thread that constantly receives buffers"""
+    try:
+        for raw_buffer in rt_client.raw_buffers(nchan):
+            rt_client._push_raw_buffer(raw_buffer)
+    except RuntimeError as err:
+        # something is wrong, the server stopped (or something)
+        rt_client._recv_thread = None
+        print('Buffer receive thread stopped: %s' % err)
+
+
+class RtClient(object):
+    """Realtime Client
+
+    Client to communicate with mne_rt_server
+
+    Parameters
+    ----------
+    host : str
+        Hostname (or IP address) of the host where mne_rt_server is running.
+
+    cmd_port : int
+        Port to use for the command connection.
+
+    data_port : int
+        Port to use for the data connection.
+
+    timeout : float
+        Communication timeout in seconds.
+
+    verbose : bool, str, int, or None
+        Log verbosity see mne.verbose.
+    """
+    @verbose
+    def __init__(self, host, cmd_port=4217, data_port=4218, timeout=1.0,
+                 verbose=None):
+        self._host = host
+        self._data_port = data_port
+        self._cmd_port = cmd_port
+        self._timeout = timeout
+
+        try:
+            self._cmd_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+            self._cmd_sock.settimeout(timeout)
+            self._cmd_sock.connect((host, cmd_port))
+            self._cmd_sock.setblocking(0)
+        except Exception:
+            raise RuntimeError('Setting up command connection (host: %s '
+                               'port: %d) failed. Make sure mne_rt_server '
+                               'is running. ' % (host, cmd_port))
+
+        try:
+            self._data_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+            self._data_sock.settimeout(timeout)
+            self._data_sock.connect((host, data_port))
+            self._data_sock.setblocking(1)
+        except Exception:
+            raise RuntimeError('Setting up data connection (host: %s '
+                               'port: %d) failed. Make sure mne_rt_server '
+                               'is running.' % (host, data_port))
+
+        self.verbose = verbose
+
+        # get my client ID
+        self._client_id = self.get_client_id()
+
+        self._recv_thread = None
+        self._recv_callbacks = list()
+
+    def _send_command(self, command):
+        """Send a command to the server
+
+        Parameters
+        ----------
+        command : str
+            The command to send.
+
+        Returns
+        -------
+        resp : str
+            The response from the server.
+        """
+
+        logger.debug('Sending command: %s' % command)
+        command += '\n'
+        self._cmd_sock.sendall(command.encode('utf-8'))
+
+        buf, chunk, begin = [], '', time.time()
+        while True:
+            # if we got some data, then break after wait sec
+            if buf and time.time() - begin > self._timeout:
+                break
+            # if we got no data at all, wait a little longer
+            elif time.time() - begin > self._timeout * 2:
+                break
+            try:
+                chunk = self._cmd_sock.recv(8192)
+                if chunk:
+                    buf.append(chunk)
+                    begin = time.time()
+                else:
+                    time.sleep(0.1)
+            except:
+                pass
+
+        return ''.join(buf)
+
+    def _send_fiff_command(self, command, data=None):
+        """Send a command through the data connection as a fiff tag
+
+        Parameters
+        ----------
+        command : int
+            The command code.
+
+        data : str
+            Additional data to send.
+        """
+        kind = FIFF.FIFF_MNE_RT_COMMAND
+        type = FIFF.FIFFT_VOID
+        size = 4
+        if data is not None:
+            size += len(data)  # first 4 bytes are the command code
+        next = 0
+
+        msg = np.array(kind, dtype='>i4').tostring()
+        msg += np.array(type, dtype='>i4').tostring()
+        msg += np.array(size, dtype='>i4').tostring()
+        msg += np.array(next, dtype='>i4').tostring()
+
+        msg += np.array(command, dtype='>i4').tostring()
+        if data is not None:
+            msg += np.array(data, dtype='>c').tostring()
+
+        self._data_sock.sendall(msg)
+
+    def get_measurement_info(self):
+        """Get the measurement information
+
+        Returns
+        -------
+        info : dict
+            The measurement information.
+        """
+        cmd = 'measinfo %d' % self._client_id
+        self._send_command(cmd)
+
+        buff = []
+        directory = []
+        pos = 0
+        while True:
+            tag, this_buff = _recv_tag_raw(self._data_sock)
+            tag.pos = pos
+            pos += 16 + tag.size
+            directory.append(tag)
+            buff.append(this_buff)
+            if tag.kind == FIFF.FIFF_BLOCK_END and tag.type == FIFF.FIFFT_INT:
+                val = np.fromstring(this_buff[-4:], dtype=">i4")
+                if val == FIFF.FIFFB_MEAS_INFO:
+                    break
+
+        buff = ''.join(buff)
+
+        fid = StringIO(buff)
+        tree, _ = make_dir_tree(fid, directory)
+        info, meas = read_meas_info(fid, tree)
+
+        return info
+
+    def set_client_alias(self, alias):
+        """Set client alias
+
+        Parameters
+        ----------
+        alias : str
+            The client alias.
+        """
+        self._send_fiff_command(MNE_RT_SET_CLIENT_ALIAS, alias)
+
+    def get_client_id(self):
+        """Get the client ID
+
+        Returns
+        -------
+        id : int
+            The client ID.
+        """
+        self._send_fiff_command(MNE_RT_GET_CLIENT_ID)
+
+        # ID is send as answer
+        tag, buff = _recv_tag_raw(self._data_sock)
+        if (tag.kind == FIFF.FIFF_MNE_RT_CLIENT_ID and
+                tag.type == FIFF.FIFFT_INT):
+            client_id = int(np.fromstring(buff[-4:], dtype=">i4"))
+        else:
+            raise RuntimeError('wrong tag received')
+
+        return client_id
+
+    def start_measurement(self):
+        """Start the measurement"""
+        cmd = 'start %d' % self._client_id
+        self._send_command(cmd)
+
+    def stop_measurement(self):
+        """Stop the measurement"""
+        self._send_command('stop-all')
+
+    def start_receive_thread(self, nchan):
+        """Start the receive thread
+
+        If the measurement has not been started, it will also be started.
+
+        Parameters
+        ----------
+        nchan : int
+            The number of channels in the data.
+        """
+
+        if self._recv_thread is None:
+            self.start_measurement()
+
+            self._recv_thread = threading.Thread(target=_buffer_recv_worker,
+                                                 args=(self, nchan))
+            self._recv_thread.start()
+
+    def stop_receive_thread(self, stop_measurement=False):
+        """Stop the receive thread
+
+        Parameters
+        ----------
+        stop_measurement : bool
+            Also stop the measurement.
+        """
+        if self._recv_thread is not None:
+            self._recv_thread.stop()
+            self._recv_thread = None
+
+        if stop_measurement:
+            self.stop_measurement()
+
+    def register_receive_callback(self, callback):
+        """Register a raw buffer receive callback
+
+        Parameters
+        ----------
+        callback : callable
+            The callback. The raw buffer is passed as the first parameter
+            to callback.
+        """
+        if callback not in self._recv_callbacks:
+            self._recv_callbacks.append(callback)
+
+    def unregister_receive_callback(self, callback):
+        """Unregister a raw buffer receive callback
+
+        Parameters
+        ----------
+        callback : function
+            The callback to unregister.
+        """
+        if callback in self._recv_callbacks:
+            self._recv_callbacks.remove(callback)
+
+    def _push_raw_buffer(self, raw_buffer):
+        """Push raw buffer to clients using callbacks"""
+        for callback in self._recv_callbacks:
+            callback(raw_buffer)
+
+    def read_raw_buffer(self, nchan):
+        """Read a single buffer with raw data
+
+        Parameters
+        ----------
+        nchan : int
+            The number of channels (info['nchan']).
+
+        Returns
+        -------
+        raw_buffer : float array, shape=(nchan, n_times)
+            The raw data.
+        """
+        tag, this_buff = _recv_tag_raw(self._data_sock)
+
+        # skip tags until we get a data buffer
+        while tag.kind != FIFF.FIFF_DATA_BUFFER:
+            tag, this_buff = _recv_tag_raw(self._data_sock)
+
+        buff = StringIO(this_buff)
+        tag = read_tag(buff)
+        raw_buffer = tag.data.reshape(-1, nchan).T
+
+        return raw_buffer
+
+    def raw_buffers(self, nchan):
+        """Return an iterator over raw buffers
+
+        Parameters
+        ----------
+        nchan : int
+            The number of channels (info['nchan']).
+
+        Returns
+        -------
+        raw_buffer : generator
+            Generator for iteration over raw buffers.
+        """
+        while True:
+            raw_buffer = self.read_raw_buffer(nchan)
+            if raw_buffer is not None:
+                yield raw_buffer
+            else:
+                break
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/realtime/epochs.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/realtime/epochs.py
new file mode 100644
index 0000000..1bf0df7
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/realtime/epochs.py
@@ -0,0 +1,420 @@
+# Authors: Christoph Dinh <chdinh at nmr.mgh.harvard.edu>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
+#          Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Denis Engemann <denis.engemann at gmail.com>
+#
+# License: BSD (3-clause)
+import time
+import copy
+
+import numpy as np
+
+from .. import pick_channels
+from ..utils import logger, verbose
+from ..epochs import _BaseEpochs
+from ..event import _find_events
+
+
+class RtEpochs(_BaseEpochs):
+    """Realtime Epochs
+
+    Can receive epochs in real time from an RtClient.
+
+    For example, to get some epochs from a running mne_rt_server on
+    'localhost', you could use::
+
+        client = mne.realtime.RtClient('localhost')
+        event_id, tmin, tmax = 1, -0.2, 0.5
+
+        epochs = mne.realtime.RtEpochs(client, event_id, tmin, tmax)
+        epochs.start()  # start the measurement and start receiving epochs
+
+        evoked_1 = epochs.average()  # computed over all epochs
+        evoked_2 = epochs[-5:].average()  # computed over the last 5 epochs
+
+    Parameters
+    ----------
+    client : instance of mne.realtime.RtClient
+        The realtime client.
+    event_id : int | list of int
+        The id of the event to consider. If int, only events with the
+        ID specified by event_id are considered. Multiple event ID's
+        can be specified using a list.
+    tmin : float
+        Start time before event.
+    tmax : float
+        End time after event.
+    stim_channel : string or list of string
+        Name of the stim channel or all the stim channels affected by
+        the trigger.
+    sleep_time : float
+        Time in seconds to wait between checking for new epochs when epochs
+        are requested and the receive queue is empty.
+    baseline : None (default) or tuple of length 2
+        The time interval to apply baseline correction.
+        If None do not apply it. If baseline is (a, b)
+        the interval is between "a (s)" and "b (s)".
+        If a is None the beginning of the data is used
+        and if b is None then b is set to the end of the interval.
+        If baseline is equal to (None, None) all the time
+        interval is used.
+    picks : array-like of int | None (default)
+        Indices of channels to include (if None, all channels are used).
+    name : string
+        Comment that describes the Evoked data created.
+    reject : dict | None
+        Rejection parameters based on peak-to-peak amplitude.
+        Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'.
+        If reject is None then no rejection is done. Example::
+
+            reject = dict(grad=4000e-13, # T / m (gradiometers)
+                          mag=4e-12, # T (magnetometers)
+                          eeg=40e-6, # uV (EEG channels)
+                          eog=250e-6 # uV (EOG channels))
+
+    flat : dict | None
+        Rejection parameters based on flatness of signal.
+        Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg', and values
+        are floats that set the minimum acceptable peak-to-peak amplitude.
+        If flat is None then no rejection is done.
+    proj : bool, optional
+        Apply SSP projection vectors
+    decim : int
+        Factor by which to downsample the data from the raw file upon import.
+        Warning: This simply selects every nth sample, data is not filtered
+        here. If data is not properly filtered, aliasing artifacts may occur.
+    reject_tmin : scalar | None
+        Start of the time window used to reject epochs (with the default None,
+        the window will start with tmin).
+    reject_tmax : scalar | None
+        End of the time window used to reject epochs (with the default None,
+        the window will end with tmax).
+    detrend : int | None
+        If 0 or 1, the data channels (MEG and EEG) will be detrended when
+        loaded. 0 is a constant (DC) detrend, 1 is a linear detrend. None
+        is no detrending. Note that detrending is performed before baseline
+        correction. If no DC offset is preferred (zeroth order detrending),
+        either turn off baseline correction, as this may introduce a DC
+        shift, or set baseline correction to use the entire time interval
+        (will yield equivalent results but be slower).
+    add_eeg_ref : bool
+        If True, an EEG average reference will be added (unless one
+        already exists).
+    isi_max : float
+        The maximmum time in seconds between epochs. If no epoch
+        arrives in the next isi_max seconds the RtEpochs stops.
+    find_events : dict
+        The arguments to the real-time `find_events` method as a dictionary.
+        If `find_events` is None, then default values are used.
+        Valid keys are 'output' | 'consecutive' | 'min_duration' | 'mask'.
+        Example (also default values)::
+
+            find_events = dict(output='onset', consecutive='increasing',
+                               min_duration=0, mask=0)
+
+        See mne.find_events for detailed explanation of these options.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+        Defaults to client.verbose.
+
+    Attributes
+    ----------
+    info : dict
+        Measurement info.
+    event_id : dict
+        Names of  of conditions corresponding to event_ids.
+    ch_names : list of string
+        List of channels' names.
+    events : array, shape (n_events, 3)
+        The events associated with the epochs currently in the queue.
+    verbose : bool, str, int, or None
+        See above.
+    """
+    @verbose
+    def __init__(self, client, event_id, tmin, tmax, stim_channel='STI 014',
+                 sleep_time=0.1, baseline=(None, 0), picks=None,
+                 name='Unknown', reject=None, flat=None, proj=True,
+                 decim=1, reject_tmin=None, reject_tmax=None, detrend=None,
+                 add_eeg_ref=True, isi_max=2., find_events=None, verbose=None):
+
+        info = client.get_measurement_info()
+
+        # the measurement info of the data as we receive it
+        self._client_info = copy.deepcopy(info)
+
+        verbose = client.verbose if verbose is None else verbose
+
+        # call _BaseEpochs constructor
+        super(RtEpochs, self).__init__(
+            info, None, None, event_id, tmin, tmax, baseline, picks=picks,
+            name=name, reject=reject, flat=flat, decim=decim,
+            reject_tmin=reject_tmin, reject_tmax=reject_tmax, detrend=detrend,
+            add_eeg_ref=add_eeg_ref, verbose=verbose, proj=True)
+
+        self._client = client
+
+        if not isinstance(stim_channel, list):
+            stim_channel = [stim_channel]
+
+        stim_picks = pick_channels(self._client_info['ch_names'],
+                                   include=stim_channel, exclude=[])
+
+        if len(stim_picks) == 0:
+            raise ValueError('No stim channel found to extract event '
+                             'triggers.')
+
+        self._stim_picks = stim_picks
+
+        # find_events default options
+        self._find_events_kwargs = dict(output='onset',
+                                        consecutive='increasing',
+                                        min_duration=0, mask=0)
+        # update default options if dictionary is provided
+        if find_events is not None:
+            self._find_events_kwargs.update(find_events)
+        min_samples = (self._find_events_kwargs['min_duration'] *
+                       self.info['sfreq'])
+        self._find_events_kwargs.pop('min_duration', None)
+        self._find_events_kwargs['min_samples'] = min_samples
+
+        self._sleep_time = sleep_time
+
+        # add calibration factors
+        cals = np.zeros(self._client_info['nchan'])
+        for k in range(self._client_info['nchan']):
+            cals[k] = (self._client_info['chs'][k]['range'] *
+                       self._client_info['chs'][k]['cal'])
+        self._cals = cals[:, None]
+
+        # FIFO queues for received epochs and events
+        self._epoch_queue = list()
+        self._events = list()
+
+        # variables needed for receiving raw buffers
+        self._last_buffer = None
+        self._first_samp = 0
+        self._event_backlog = list()
+
+        # Number of good and bad epochs received
+        self._n_good = 0
+        self._n_bad = 0
+
+        self._started = False
+        self._last_time = time.time()
+
+        self.isi_max = isi_max
+
+    @property
+    def events(self):
+        """The events associated with the epochs currently in the queue."""
+        return np.array(self._events)
+
+    def start(self):
+        """Start receiving epochs
+
+        The measurement will be started if it has not already been started.
+        """
+        if not self._started:
+            # register the callback
+            self._client.register_receive_callback(self._process_raw_buffer)
+
+            # start the measurement and the receive thread
+            nchan = self._client_info['nchan']
+            self._client.start_receive_thread(nchan)
+            self._started = True
+            self._last_time = np.inf  # init delay counter. Will stop iters
+
+    def stop(self, stop_receive_thread=False, stop_measurement=False):
+        """Stop receiving epochs
+
+        Parameters
+        ----------
+        stop_receive_thread : bool
+            Stop the receive thread. Note: Other RtEpochs instances will also
+            stop receiving epochs when the receive thread is stopped. The
+            receive thread will always be stopped if stop_measurement is True.
+
+        stop_measurement : bool
+            Also stop the measurement. Note: Other clients attached to the
+            server will also stop receiving data.
+        """
+        if self._started:
+            self._client.unregister_receive_callback(self._process_raw_buffer)
+            self._started = False
+
+        if stop_receive_thread or stop_measurement:
+            self._client.stop_receive_thread(stop_measurement=stop_measurement)
+
+    def next(self, return_event_id=False):
+        """To make iteration over epochs easy.
+
+        Parameters
+        ----------
+        return_event_id : bool
+            If True, return both an epoch and and event_id.
+
+        Returns
+        -------
+        epoch : instance of Epochs
+            The epoch.
+        event_id : int
+            The event id. Only returned if ``return_event_id`` is ``True``.
+        """
+        first = True
+        while True:
+            current_time = time.time()
+            if current_time > (self._last_time + self.isi_max):
+                logger.info('Time of %s seconds exceeded.' % self.isi_max)
+                raise StopIteration
+            if len(self._epoch_queue) > self._current:
+                epoch = self._epoch_queue[self._current]
+                event_id = self._events[self._current][-1]
+                self._current += 1
+                self._last_time = current_time
+                if return_event_id:
+                    return epoch, event_id
+                else:
+                    return epoch
+            if self._started:
+                if first:
+                    logger.info('Waiting for epoch %d' % (self._current + 1))
+                    first = False
+                time.sleep(self._sleep_time)
+            else:
+                raise RuntimeError('Not enough epochs in queue and currently '
+                                   'not receiving epochs, cannot get epochs!')
+
+    def _get_data(self):
+        """Return the data for n_epochs epochs"""
+
+        epochs = list()
+        for epoch in self:
+            epochs.append(epoch)
+
+        data = np.array(epochs)
+
+        return data
+
+    def _process_raw_buffer(self, raw_buffer):
+        """Process raw buffer (callback from RtClient)
+
+        Note: Do not print log messages during regular use. It will be printed
+        asynchronously which is annoying when working in an interactive shell.
+
+        Parameters
+        ----------
+        raw_buffer : array of float, shape=(nchan, n_times)
+            The raw buffer.
+        """
+        verbose = 'ERROR'
+        sfreq = self.info['sfreq']
+        n_samp = len(self._raw_times)
+
+        # relative start and stop positions in samples
+        tmin_samp = int(round(sfreq * self.tmin))
+        tmax_samp = tmin_samp + n_samp
+
+        last_samp = self._first_samp + raw_buffer.shape[1] - 1
+
+        # apply calibration without inplace modification
+        raw_buffer = self._cals * raw_buffer
+
+        # detect events
+        data = np.abs(raw_buffer[self._stim_picks]).astype(np.int)
+        data = np.atleast_2d(data)
+        buff_events = _find_events(data, self._first_samp, verbose=verbose,
+                                   **self._find_events_kwargs)
+
+        events = self._event_backlog
+        for event_id in self.event_id.values():
+            idx = np.where(buff_events[:, -1] == event_id)[0]
+            events.extend(zip(list(buff_events[idx, 0]),
+                              list(buff_events[idx, -1])))
+
+        events.sort()
+
+        event_backlog = list()
+        for event_samp, event_id in events:
+            epoch = None
+            if (event_samp + tmin_samp >= self._first_samp and
+                    event_samp + tmax_samp <= last_samp):
+                # easy case: whole epoch is in this buffer
+                start = event_samp + tmin_samp - self._first_samp
+                stop = event_samp + tmax_samp - self._first_samp
+                epoch = raw_buffer[:, start:stop]
+            elif (event_samp + tmin_samp < self._first_samp and
+                    event_samp + tmax_samp <= last_samp):
+                # have to use some samples from previous buffer
+                if self._last_buffer is None:
+                    continue
+                n_last = self._first_samp - (event_samp + tmin_samp)
+                n_this = n_samp - n_last
+                epoch = np.c_[self._last_buffer[:, -n_last:],
+                              raw_buffer[:, :n_this]]
+            elif event_samp + tmax_samp > last_samp:
+                # we need samples from the future
+                # we will process this epoch with the next buffer
+                event_backlog.append((event_samp, event_id))
+            else:
+                raise RuntimeError('Unhandled case..')
+
+            if epoch is not None:
+                self._append_epoch_to_queue(epoch, event_samp, event_id)
+
+        # set things up for processing of next buffer
+        self._event_backlog = event_backlog
+        n_buffer = raw_buffer.shape[1]
+        if self._last_buffer is None:
+            self._last_buffer = raw_buffer
+            self._first_samp = last_samp + 1
+        elif self._last_buffer.shape[1] <= n_samp + n_buffer:
+            self._last_buffer = np.c_[self._last_buffer, raw_buffer]
+        else:
+            # do not increase size of _last_buffer any further
+            self._first_samp = self._first_samp + n_buffer
+            self._last_buffer[:, :-n_buffer] = self._last_buffer[:, n_buffer:]
+            self._last_buffer[:, -n_buffer:] = raw_buffer
+
+    def _append_epoch_to_queue(self, epoch, event_samp, event_id):
+        """Append a (raw) epoch to queue
+
+        Note: Do not print log messages during regular use. It will be printed
+        asynchronously which is annyoing when working in an interactive shell.
+
+        Parameters
+        ----------
+        epoch : array of float, shape=(nchan, n_times)
+            The raw epoch (only calibration has been applied) over all
+            channels.
+        event_samp : int
+            The time in samples when the epoch occurred.
+        event_id : int
+            The event ID of the epoch.
+        """
+        # select the channels
+        epoch = epoch[self.picks, :]
+
+        # Detrend, baseline correct, decimate
+        epoch = self._detrend_offset_decim(epoch, verbose='ERROR')
+
+        # apply SSP
+        epoch = self._project_epoch(epoch)
+
+        # Decide if this is a good epoch
+        is_good, _ = self._is_good_epoch(epoch, verbose='ERROR')
+
+        if is_good:
+            self._epoch_queue.append(epoch)
+            self._events.append((event_samp, 0, event_id))
+            self._n_good += 1
+        else:
+            self._n_bad += 1
+
+    def __repr__(self):
+        s = 'good / bad epochs received: %d / %d, epochs in queue: %d, '\
+            % (self._n_good, self._n_bad, len(self._epoch_queue))
+        s += ', tmin : %s (s)' % self.tmin
+        s += ', tmax : %s (s)' % self.tmax
+        s += ', baseline : %s' % str(self.baseline)
+        return '<RtEpochs  |  %s>' % s
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/realtime/fieldtrip_client.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/realtime/fieldtrip_client.py
new file mode 100644
index 0000000..24820ea
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/realtime/fieldtrip_client.py
@@ -0,0 +1,351 @@
+# Author: Mainak Jas
+#
+# License: BSD (3-clause)
+
+import re
+import copy
+import time
+import threading
+import warnings
+import numpy as np
+
+from ..io.constants import FIFF
+from ..io.meas_info import _empty_info
+from ..io.pick import pick_info
+from ..epochs import EpochsArray
+from ..utils import logger
+from ..externals.FieldTrip import Client as FtClient
+
+
+def _buffer_recv_worker(ft_client):
+    """Worker thread that constantly receives buffers."""
+
+    try:
+        for raw_buffer in ft_client.iter_raw_buffers():
+            ft_client._push_raw_buffer(raw_buffer)
+    except RuntimeError as err:
+        # something is wrong, the server stopped (or something)
+        ft_client._recv_thread = None
+        print('Buffer receive thread stopped: %s' % err)
+
+
+class FieldTripClient(object):
+    """ Realtime FieldTrip client
+
+    Parameters
+    ----------
+    info : dict | None
+        The measurement info read in from a file. If None, it is guessed from
+        the Fieldtrip Header object.
+    host : str
+        Hostname (or IP address) of the host where Fieldtrip buffer is running.
+    port : int
+        Port to use for the connection.
+    wait_max : float
+        Maximum time (in seconds) to wait for Fieldtrip buffer to start
+    tmin : float | None
+        Time instant to start receiving buffers. If None, start from the latest
+        samples available.
+    tmax : float
+        Time instant to stop receiving buffers.
+    buffer_size : int
+        Size of each buffer in terms of number of samples.
+    verbose : bool, str, int, or None
+        Log verbosity see mne.verbose.
+    """
+    def __init__(self, info=None, host='localhost', port=1972, wait_max=30,
+                 tmin=None, tmax=np.inf, buffer_size=1000, verbose=None):
+        self.verbose = verbose
+
+        self.info = info
+        self.wait_max = wait_max
+        self.tmin = tmin
+        self.tmax = tmax
+        self.buffer_size = buffer_size
+
+        self.host = host
+        self.port = port
+
+        self._recv_thread = None
+        self._recv_callbacks = list()
+
+    def __enter__(self):
+        # instantiate Fieldtrip client and connect
+        self.ft_client = FtClient()
+
+        # connect to FieldTrip buffer
+        logger.info("FieldTripClient: Waiting for server to start")
+        start_time, current_time = time.time(), time.time()
+        success = False
+        while current_time < (start_time + self.wait_max):
+            try:
+                self.ft_client.connect(self.host, self.port)
+                logger.info("FieldTripClient: Connected")
+                success = True
+                break
+            except:
+                current_time = time.time()
+                time.sleep(0.1)
+
+        if not success:
+            raise RuntimeError('Could not connect to FieldTrip Buffer')
+
+        # retrieve header
+        logger.info("FieldTripClient: Retrieving header")
+        start_time, current_time = time.time(), time.time()
+        while current_time < (start_time + self.wait_max):
+            self.ft_header = self.ft_client.getHeader()
+            if self.ft_header is None:
+                current_time = time.time()
+                time.sleep(0.1)
+            else:
+                break
+
+        if self.ft_header is None:
+            raise RuntimeError('Failed to retrieve Fieldtrip header!')
+        else:
+            logger.info("FieldTripClient: Header retrieved")
+
+        self.info = self._guess_measurement_info()
+        self.ch_names = self.ft_header.labels
+
+        # find start and end samples
+
+        sfreq = self.info['sfreq']
+
+        if self.tmin is None:
+            self.tmin_samp = max(0, self.ft_header.nSamples - 1)
+        else:
+            self.tmin_samp = int(round(sfreq * self.tmin))
+
+        if self.tmax != np.inf:
+            self.tmax_samp = int(round(sfreq * self.tmax))
+        else:
+            self.tmax_samp = np.iinfo(np.uint32).max
+
+        return self
+
+    def __exit__(self, type, value, traceback):
+        self.ft_client.disconnect()
+
+    def _guess_measurement_info(self):
+        """
+        Creates a minimal Info dictionary required for epoching, averaging
+        et al.
+        """
+
+        if self.info is None:
+
+            warnings.warn('Info dictionary not provided. Trying to guess it '
+                          'from FieldTrip Header object')
+
+            info = _empty_info()  # create info dictionary
+
+            # modify info attributes according to the FieldTrip Header object
+            info['nchan'] = self.ft_header.nChannels
+            info['sfreq'] = self.ft_header.fSample
+            info['ch_names'] = self.ft_header.labels
+
+            info['comps'] = list()
+            info['projs'] = list()
+            info['bads'] = list()
+
+            # channel dictionary list
+            info['chs'] = []
+
+            for idx, ch in enumerate(info['ch_names']):
+                this_info = dict()
+
+                this_info['scanno'] = idx
+
+                # extract numerical part of channel name
+                this_info['logno'] = int(re.findall('[^\W\d_]+|\d+', ch)[-1])
+
+                if ch.startswith('EEG'):
+                    this_info['kind'] = FIFF.FIFFV_EEG_CH
+                elif ch.startswith('MEG'):
+                    this_info['kind'] = FIFF.FIFFV_MEG_CH
+                elif ch.startswith('MCG'):
+                    this_info['kind'] = FIFF.FIFFV_MCG_CH
+                elif ch.startswith('EOG'):
+                    this_info['kind'] = FIFF.FIFFV_EOG_CH
+                elif ch.startswith('EMG'):
+                    this_info['kind'] = FIFF.FIFFV_EMG_CH
+                elif ch.startswith('STI'):
+                    this_info['kind'] = FIFF.FIFFV_STIM_CH
+                elif ch.startswith('ECG'):
+                    this_info['kind'] = FIFF.FIFFV_ECG_CH
+                elif ch.startswith('MISC'):
+                    this_info['kind'] = FIFF.FIFFV_MISC_CH
+
+                # Fieldtrip already does calibration
+                this_info['range'] = 1.0
+                this_info['cal'] = 1.0
+
+                this_info['ch_name'] = ch
+                this_info['loc'] = None
+
+                if ch.startswith('EEG'):
+                    this_info['coord_frame'] = FIFF.FIFFV_COORD_HEAD
+                elif ch.startswith('MEG'):
+                    this_info['coord_frame'] = FIFF.FIFFV_COORD_DEVICE
+                else:
+                    this_info['coord_frame'] = FIFF.FIFFV_COORD_UNKNOWN
+
+                if ch.startswith('MEG') and ch.endswith('1'):
+                    this_info['unit'] = FIFF.FIFF_UNIT_T
+                elif ch.startswith('MEG') and (ch.endswith('2') or
+                                               ch.endswith('3')):
+                    this_info['unit'] = FIFF.FIFF_UNIT_T_M
+                else:
+                    this_info['unit'] = FIFF.FIFF_UNIT_V
+
+                this_info['unit_mul'] = 0
+
+                info['chs'].append(this_info)
+
+        else:
+
+            # XXX: the data in real-time mode and offline mode
+            # does not match unless this is done
+            self.info['projs'] = list()
+
+            # FieldTrip buffer already does the calibration
+            for this_info in self.info['chs']:
+                this_info['range'] = 1.0
+                this_info['cal'] = 1.0
+                this_info['unit_mul'] = 0
+
+            info = copy.deepcopy(self.info)
+
+        return info
+
+    def get_measurement_info(self):
+        """Returns the measurement info.
+
+        Returns
+        -------
+        self.info : dict
+            The measurement info.
+        """
+        return self.info
+
+    def get_data_as_epoch(self, n_samples=1024, picks=None):
+        """Returns last n_samples from current time.
+
+        Parameters
+        ----------
+        n_samples : int
+            Number of samples to fetch.
+        picks : array-like of int | None
+            If None all channels are kept
+            otherwise the channels indices in picks are kept.
+
+        Returns
+        -------
+        epoch : instance of Epochs
+            The samples fetched as an Epochs object.
+
+        See Also
+        --------
+        Epochs.iter_evoked
+        """
+        ft_header = self.ft_client.getHeader()
+        last_samp = ft_header.nSamples - 1
+        start = last_samp - n_samples + 1
+        stop = last_samp
+        events = np.expand_dims(np.array([start, 1, 1]), axis=0)
+
+        # get the data
+        data = self.ft_client.getData([start, stop]).transpose()
+
+        # create epoch from data
+        info = self.info
+        if picks is not None:
+            info = pick_info(info, picks, copy=True)
+        epoch = EpochsArray(data[picks][np.newaxis], info, events)
+
+        return epoch
+
+    def register_receive_callback(self, callback):
+        """Register a raw buffer receive callback.
+
+        Parameters
+        ----------
+        callback : callable
+            The callback. The raw buffer is passed as the first parameter
+            to callback.
+        """
+        if callback not in self._recv_callbacks:
+            self._recv_callbacks.append(callback)
+
+    def unregister_receive_callback(self, callback):
+        """Unregister a raw buffer receive callback
+
+        Parameters
+        ----------
+        callback : callable
+            The callback to unregister.
+        """
+        if callback in self._recv_callbacks:
+            self._recv_callbacks.remove(callback)
+
+    def _push_raw_buffer(self, raw_buffer):
+        """Push raw buffer to clients using callbacks."""
+        for callback in self._recv_callbacks:
+            callback(raw_buffer)
+
+    def start_receive_thread(self, nchan):
+        """Start the receive thread.
+
+        If the measurement has not been started, it will also be started.
+
+        Parameters
+        ----------
+        nchan : int
+            The number of channels in the data.
+        """
+
+        if self._recv_thread is None:
+
+            self._recv_thread = threading.Thread(target=_buffer_recv_worker,
+                                                 args=(self, ))
+            self._recv_thread.daemon = True
+            self._recv_thread.start()
+
+    def stop_receive_thread(self, stop_measurement=False):
+        """Stop the receive thread
+
+        Parameters
+        ----------
+        stop_measurement : bool
+            Also stop the measurement.
+        """
+        if self._recv_thread is not None:
+            self._recv_thread.stop()
+            self._recv_thread = None
+
+    def iter_raw_buffers(self):
+        """Return an iterator over raw buffers
+
+        Returns
+        -------
+        raw_buffer : generator
+            Generator for iteration over raw buffers.
+        """
+
+        iter_times = zip(range(self.tmin_samp, self.tmax_samp,
+                               self.buffer_size),
+                         range(self.tmin_samp + self.buffer_size - 1,
+                               self.tmax_samp, self.buffer_size))
+
+        for ii, (start, stop) in enumerate(iter_times):
+
+            # wait for correct number of samples to be available
+            self.ft_client.wait(stop, np.iinfo(np.uint32).max,
+                                np.iinfo(np.uint32).max)
+
+            # get the samples
+            raw_buffer = self.ft_client.getData([start, stop]).transpose()
+
+            yield raw_buffer
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/realtime/mockclient.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/realtime/mockclient.py
new file mode 100644
index 0000000..8795b88
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/realtime/mockclient.py
@@ -0,0 +1,190 @@
+# Authors: Mainak Jas <mainak at neuro.hut.fi>
+#          Denis Engemann <denis.engemann at gmail.com>
+#          Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#
+# License: BSD (3-clause)
+
+import copy
+import numpy as np
+from ..event import find_events
+
+
+class MockRtClient(object):
+    """Mock Realtime Client
+
+    Parameters
+    ----------
+    raw : instance of Raw object
+        The raw object which simulates the RtClient
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+    """
+    def __init__(self, raw, verbose=None):
+        self.raw = raw
+        self.info = copy.deepcopy(self.raw.info)
+        self.verbose = verbose
+
+        self._current = dict()  # pointer to current index for the event
+        self._last = dict()  # Last index for the event
+
+    def get_measurement_info(self):
+        """Returns the measurement info.
+
+        Returns
+        -------
+        self.info : dict
+            The measurement info.
+        """
+        return self.info
+
+    def send_data(self, epochs, picks, tmin, tmax, buffer_size):
+        """Read from raw object and send them to RtEpochs for processing.
+
+        Parameters
+        ----------
+        epochs : instance of RtEpochs
+            The epochs object.
+        picks : array-like of int
+            Indices of channels.
+        tmin : float
+            Time instant to start receiving buffers.
+        tmax : float
+            Time instant to stop receiving buffers.
+        buffer_size : int
+            Size of each buffer in terms of number of samples.
+        """
+        # this is important to emulate a thread, instead of automatically
+        # or constantly sending data, we will invoke this explicitly to send
+        # the next buffer
+
+        sfreq = self.info['sfreq']
+        tmin_samp = int(round(sfreq * tmin))
+        tmax_samp = int(round(sfreq * tmax))
+
+        iter_times = zip(list(range(tmin_samp, tmax_samp, buffer_size)),
+                         list(range(buffer_size, tmax_samp, buffer_size)))
+
+        for ii, (start, stop) in enumerate(iter_times):
+            # channels are picked in _append_epoch_to_queue. No need to pick
+            # here
+            data, times = self.raw[:, start:stop]
+
+            # to undo the calibration done in _process_raw_buffer
+            cals = np.array([[self.info['chs'][k]['range'] *
+                              self.info['chs'][k]['cal'] for k in picks]]).T
+
+            data[picks, :] = data[picks, :] / cals
+
+            epochs._process_raw_buffer(data)
+
+    # The following methods do not seem to be important for this use case,
+    # but they need to be present for the emulation to work because
+    # RtEpochs expects them to be there.
+
+    def get_event_data(self, event_id, tmin, tmax, picks, stim_channel=None,
+                       min_duration=0):
+        """Simulate the data for a particular event-id.
+
+        The epochs corresponding to a particular event-id are returned. The
+        method remembers the epoch that was returned in the previous call and
+        returns the next epoch in sequence. Once all epochs corresponding to
+        an event-id have been exhausted, the method returns None.
+
+        Parameters
+        ----------
+        event_id : int
+            The id of the event to consider.
+        tmin : float
+            Start time before event.
+        tmax : float
+            End time after event.
+        picks : array-like of int
+            Indices of channels.
+        stim_channel : None | string | list of string
+            Name of the stim channel or all the stim channels
+            affected by the trigger. If None, the config variables
+            'MNE_STIM_CHANNEL', 'MNE_STIM_CHANNEL_1', 'MNE_STIM_CHANNEL_2',
+            etc. are read. If these are not found, it will default to
+            'STI 014'.
+        min_duration : float
+            The minimum duration of a change in the events channel required
+            to consider it as an event (in seconds).
+
+        Returns
+        -------
+        data : 2D array with shape [n_channels, n_times]
+            The epochs that are being simulated
+        """
+
+        # Get the list of all events
+        events = find_events(self.raw, stim_channel=stim_channel,
+                             verbose=False, output='onset',
+                             consecutive='increasing',
+                             min_duration=min_duration)
+
+        # Get the list of only the specified event
+        idx = np.where(events[:, -1] == event_id)[0]
+        event_samp = events[idx, 0]
+
+        # Only do this the first time for each event type
+        if event_id not in self._current:
+
+            # Initialize pointer for the event to 0
+            self._current[event_id] = 0
+            self._last[event_id] = len(event_samp)
+
+        # relative start and stop positions in samples
+        tmin_samp = int(round(self.info['sfreq'] * tmin))
+        tmax_samp = int(round(self.info['sfreq'] * tmax)) + 1
+
+        if self._current[event_id] < self._last[event_id]:
+
+            # Select the current event from the events list
+            ev_samp = event_samp[self._current[event_id]]
+
+            # absolute start and stop positions in samples
+            start = ev_samp + tmin_samp - self.raw.first_samp
+            stop = ev_samp + tmax_samp - self.raw.first_samp
+
+            self._current[event_id] += 1  # increment pointer
+
+            data, _ = self.raw[picks, start:stop]
+
+            return data
+
+        else:
+            return None
+
+    def register_receive_callback(self, x):
+        """API boilerplate
+
+        Parameters
+        ----------
+        x : None
+            Not used.
+        """
+        pass
+
+    def start_receive_thread(self, x):
+        """API boilerplate
+
+        Parameters
+        ----------
+        x : None
+            Not used.
+        """
+        pass
+
+    def unregister_receive_callback(self, x):
+        """API boilerplate
+
+        Parameters
+        ----------
+        x : None
+            Not used.
+        """
+        pass
+
+    def _stop_receive_thread(self):
+        """API boilerplate"""
+        pass
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/realtime/stim_server_client.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/realtime/stim_server_client.py
new file mode 100644
index 0000000..f06cf0d
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/realtime/stim_server_client.py
@@ -0,0 +1,314 @@
+# Author: Mainak Jas <mainak at neuro.hut.fi>
+# License: BSD (3-clause)
+
+from ..externals.six.moves import queue
+import time
+import socket
+from ..externals.six.moves import socketserver
+import threading
+
+import numpy as np
+
+from ..utils import logger, verbose
+
+
+class _ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
+    """Creates a threaded TCP server
+
+    Parameters
+    ----------
+    server_address : str
+        Address on which server is listening
+    request_handler_class : subclass of BaseRequestHandler
+         _TriggerHandler which defines the handle method
+    stim_server : instance of StimServer
+        object of StimServer class
+    """
+
+    def __init__(self, server_address, request_handler_class,
+                 stim_server):
+
+        # Basically, this server is the same as a normal TCPServer class
+        # except that it has an additional attribute stim_server
+
+        # Create the server and bind it to the desired server address
+        socketserver.TCPServer.__init__(self, server_address,
+                                        request_handler_class,
+                                        False)
+
+        self.stim_server = stim_server
+
+
+class _TriggerHandler(socketserver.BaseRequestHandler):
+    """Request handler on the server side."""
+
+    def handle(self):
+        """Method to handle requests on the server side."""
+
+        self.request.settimeout(None)
+
+        while self.server.stim_server._running:
+            data = self.request.recv(1024)  # clip input at 1Kb
+            data = data.decode()  # need to turn it into a string (Py3k)
+
+            if data == 'add client':
+                # Add stim_server._client
+                client_id = self.server.stim_server \
+                                ._add_client(self.client_address[0],
+                                             self)
+
+                # Instantiate queue for communication between threads
+                # Note: new queue for each handler
+                if not hasattr(self, '_tx_queue'):
+                    self._tx_queue = queue.Queue()
+
+                self.request.sendall("Client added".encode('utf-8'))
+
+                # Mark the client as running
+                for client in self.server.stim_server._clients:
+                    if client['id'] == client_id:
+                        client['running'] = True
+
+            elif data == 'get trigger':
+
+                # Pop triggers and send them
+                if (self._tx_queue.qsize() > 0 and
+                        self.server.stim_server, '_clients'):
+
+                    trigger = self._tx_queue.get()
+                    self.request.sendall(str(trigger).encode('utf-8'))
+                else:
+                    self.request.sendall("Empty".encode('utf-8'))
+
+
+class StimServer(object):
+    """Stimulation Server
+
+    Server to communicate with StimClient(s).
+
+    Parameters
+    ----------
+    ip : str
+        IP address of the host where StimServer is running.
+    port : int
+        The port to which the stimulation server must bind to.
+    n_clients : int
+        The number of clients which will connect to the server.
+
+    See Also
+    --------
+    StimClient
+    """
+
+    def __init__(self, ip='localhost', port=4218, n_clients=1):
+
+        # Start a threaded TCP server, binding to localhost on specified port
+        self._data = _ThreadedTCPServer((ip, port),
+                                        _TriggerHandler, self)
+        self.n_clients = n_clients
+
+    def __enter__(self):
+        # This is done to avoid "[Errno 98] Address already in use"
+        self._data.allow_reuse_address = True
+        self._data.server_bind()
+        self._data.server_activate()
+
+        # Start a thread for the server
+        self._thread = threading.Thread(target=self._data.serve_forever)
+
+        # Ctrl-C will cleanly kill all spawned threads
+        # Once the main thread exits, other threads will exit
+        self._thread.daemon = True
+        self._thread.start()
+
+        self._running = False
+        self._clients = list()
+        return self
+
+    def __exit__(self, type, value, traceback):
+        self.shutdown()
+
+    @verbose
+    def start(self, timeout=np.inf, verbose=None):
+        """Method to start the server.
+
+        Parameters
+        ----------
+        timeout : float
+            Maximum time to wait for clients to be added.
+        verbose : bool, str, int, or None
+            If not None, override default verbose level (see mne.verbose).
+        """
+
+        # Start server
+        if not self._running:
+            logger.info('RtServer: Start')
+            self._running = True
+
+            start_time = time.time()  # init delay counter.
+
+            # wait till n_clients are added
+            while (len(self._clients) < self.n_clients):
+                current_time = time.time()
+
+                if (current_time > start_time + timeout):
+                    raise StopIteration
+
+                time.sleep(0.1)
+
+    @verbose
+    def _add_client(self, ip, sock, verbose=None):
+        """Add client.
+
+        Parameters
+        ----------
+        ip : str
+            IP address of the client.
+        sock : instance of socket.socket
+            The client socket.
+        verbose : bool, str, int, or None
+            If not None, override default verbose level (see mne.verbose).
+        """
+
+        logger.info("Adding client with ip = %s" % ip)
+
+        client = dict(ip=ip, id=len(self._clients), running=False, socket=sock)
+        self._clients.append(client)
+
+        return client['id']
+
+    @verbose
+    def shutdown(self, verbose=None):
+        """Method to shutdown the client and server.
+
+        Parameters
+        ----------
+        verbose : bool, str, int, or None
+            If not None, override default verbose level (see mne.verbose).
+        """
+
+        logger.info("Shutting down ...")
+
+        # stop running all the clients
+        if hasattr(self, '_clients'):
+            for client in self._clients:
+                client['running'] = False
+
+        self._running = False
+
+        self._data.shutdown()
+        self._data.server_close()
+        self._data.socket.close()
+
+    @verbose
+    def add_trigger(self, trigger, verbose=None):
+        """Method to add a trigger.
+
+        Parameters
+        ----------
+        trigger : int
+            The trigger to be added to the queue for sending to StimClient.
+        verbose : bool, str, int, or None
+            If not None, override default verbose level (see mne.verbose).
+
+        See Also
+        --------
+        StimClient.get_trigger
+        """
+
+        for client in self._clients:
+            client_id = client['id']
+            logger.info("Sending trigger %d to client %d"
+                        % (trigger, client_id))
+            client['socket']._tx_queue.put(trigger)
+
+
+class StimClient(object):
+    """Stimulation Client
+
+    Client to communicate with StimServer
+
+    Parameters
+    ----------
+    host : str
+        Hostname (or IP address) of the host where StimServer is running.
+    port : int
+        Port to use for the connection.
+    timeout : float
+        Communication timeout in seconds.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    See Also
+    --------
+    StimServer
+    """
+
+    @verbose
+    def __init__(self, host, port=4218, timeout=5.0, verbose=None):
+        self._host = host
+        self._port = port
+
+        try:
+            logger.info("Setting up client socket")
+            self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+            self._sock.settimeout(timeout)
+            self._sock.connect((host, port))
+
+            logger.info("Establishing connection with server")
+            data = "add client".encode('utf-8')
+            n_sent = self._sock.send(data)
+            if n_sent != len(data):
+                raise RuntimeError('Could not communicate with server')
+            resp = self._sock.recv(1024).decode()  # turn bytes into str (Py3k)
+
+            if resp == 'Client added':
+                logger.info("Connection established")
+            else:
+                raise RuntimeError('Client not added')
+
+        except Exception:
+            raise RuntimeError('Setting up acquisition <-> stimulation '
+                               'computer connection (host: %s '
+                               'port: %d) failed. Make sure StimServer '
+                               'is running.' % (host, port))
+
+    def close(self):
+        """Close the socket object"""
+        self._sock.close()
+
+    @verbose
+    def get_trigger(self, timeout=5.0, verbose=None):
+        """Method to get triggers from StimServer.
+
+        Parameters
+        ----------
+        timeout : float
+            maximum time to wait for a valid trigger from the server
+        verbose : bool, str, int, or None
+            If not None, override default verbose level (see mne.verbose).
+
+        See Also
+        --------
+        StimServer.add_trigger
+        """
+        start_time = time.time()  # init delay counter. Will stop iterations
+
+        while True:
+            try:
+                current_time = time.time()
+
+                # Raise timeout error
+                if current_time > (start_time + timeout):
+                        logger.info("received nothing")
+                        return None
+
+                self._sock.send("get trigger".encode('utf-8'))
+                trigger = self._sock.recv(1024)
+
+                if trigger != 'Empty':
+                    logger.info("received trigger %s" % str(trigger))
+                    return int(trigger)
+
+            except RuntimeError as err:
+                logger.info('Cannot receive triggers: %s' % (err))
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/realtime/tests/__init__.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/realtime/tests/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/realtime/tests/test_fieldtrip_client.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/realtime/tests/test_fieldtrip_client.py
new file mode 100644
index 0000000..c17a4a5
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/realtime/tests/test_fieldtrip_client.py
@@ -0,0 +1,89 @@
+# Author: Mainak Jas <mainak at neuro.hut.fi>
+#
+# License: BSD (3-clause)
+
+import time
+import os
+import threading
+import subprocess
+import warnings
+import os.path as op
+
+from nose.tools import assert_true, assert_equal
+
+import mne
+from mne.utils import requires_neuromag2ft, run_tests_if_main
+from mne.realtime import FieldTripClient
+from mne.externals.six.moves import queue
+
+base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
+raw_fname = op.realpath(op.join(base_dir, 'test_raw.fif'))
+
+warnings.simplefilter('always')  # enable b/c these tests throw warnings
+
+
+def _run_buffer(kill_signal, neuromag2ft_fname):
+    # Works with neuromag2ft-3.0.2
+    cmd = (neuromag2ft_fname, '--file', raw_fname, '--speed', '4.0')
+
+    process = subprocess.Popen(cmd, stdout=subprocess.PIPE,
+                               stderr=subprocess.PIPE)
+    # Let measurement continue for the entire duration
+    kill_signal.get(timeout=10.0)
+    process.terminate()
+
+
+ at requires_neuromag2ft
+def test_fieldtrip_client():
+    """Test fieldtrip_client"""
+
+    neuromag2ft_fname = op.realpath(op.join(os.environ['NEUROMAG2FT_ROOT'],
+                                    'neuromag2ft'))
+
+    kill_signal = queue.Queue()
+    thread = threading.Thread(target=_run_buffer, args=(kill_signal,
+                                                        neuromag2ft_fname))
+    thread.daemon = True
+    thread.start()
+    time.sleep(0.25)
+
+    try:
+        # Start the FieldTrip buffer
+        with warnings.catch_warnings(record=True) as w:
+            warnings.simplefilter('always')
+            with FieldTripClient(host='localhost', port=1972,
+                                 tmax=5, wait_max=1) as rt_client:
+                tmin_samp1 = rt_client.tmin_samp
+
+        time.sleep(1)  # Pause measurement
+        assert_true(len(w) >= 1)
+
+        # Start the FieldTrip buffer again
+        with warnings.catch_warnings(record=True) as w:
+            warnings.simplefilter('always')
+            with FieldTripClient(host='localhost', port=1972,
+                                 tmax=5, wait_max=1) as rt_client:
+                raw_info = rt_client.get_measurement_info()
+
+                tmin_samp2 = rt_client.tmin_samp
+                picks = mne.pick_types(raw_info, meg='grad', eeg=False,
+                                       stim=False, eog=False)
+                epoch = rt_client.get_data_as_epoch(n_samples=5, picks=picks)
+                n_channels, n_samples = epoch.get_data().shape[1:]
+
+                epoch2 = rt_client.get_data_as_epoch(n_samples=5, picks=picks)
+                n_channels2, n_samples2 = epoch2.get_data().shape[1:]
+
+        assert_true(tmin_samp2 > tmin_samp1)
+        assert_true(len(w) >= 1)
+        assert_equal(n_samples, 5)
+        assert_equal(n_samples2, 5)
+        assert_equal(n_channels, len(picks))
+        assert_equal(n_channels2, len(picks))
+        kill_signal.put(False)  # stop the buffer
+    except:
+        kill_signal.put(False)  # stop the buffer even if tests fail
+        raise
+
+
+run_tests_if_main()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/realtime/tests/test_mockclient.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/realtime/tests/test_mockclient.py
new file mode 100644
index 0000000..4dbb860
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/realtime/tests/test_mockclient.py
@@ -0,0 +1,144 @@
+import os.path as op
+
+from nose.tools import assert_true
+from numpy.testing import assert_array_equal
+
+import mne
+from mne import Epochs, read_events, pick_channels
+from mne.utils import run_tests_if_main
+from mne.realtime import MockRtClient, RtEpochs
+
+base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
+raw_fname = op.join(base_dir, 'test_raw.fif')
+event_name = op.join(base_dir, 'test-eve.fif')
+
+events = read_events(event_name)
+
+
+def test_mockclient():
+    """Test the RtMockClient."""
+
+    raw = mne.io.Raw(raw_fname, preload=True, verbose=False)
+    picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=True,
+                           stim=True, exclude=raw.info['bads'])
+
+    event_id, tmin, tmax = 1, -0.2, 0.5
+
+    epochs = Epochs(raw, events[:7], event_id=event_id, tmin=tmin, tmax=tmax,
+                    picks=picks, baseline=(None, 0), preload=True)
+    data = epochs.get_data()
+
+    rt_client = MockRtClient(raw)
+    rt_epochs = RtEpochs(rt_client, event_id, tmin, tmax, picks=picks,
+                         isi_max=0.5)
+
+    rt_epochs.start()
+    rt_client.send_data(rt_epochs, picks, tmin=0, tmax=10, buffer_size=1000)
+
+    rt_data = rt_epochs.get_data()
+
+    assert_true(rt_data.shape == data.shape)
+    assert_array_equal(rt_data, data)
+
+
+def test_get_event_data():
+    """Test emulation of realtime data stream."""
+
+    raw = mne.io.Raw(raw_fname, preload=True, verbose=False)
+    picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=True,
+                           stim=True, exclude=raw.info['bads'])
+
+    event_id, tmin, tmax = 2, -0.1, 0.3
+    epochs = Epochs(raw, events, event_id=event_id,
+                    tmin=tmin, tmax=tmax, picks=picks, baseline=None,
+                    preload=True, proj=False)
+
+    data = epochs.get_data()[0, :, :]
+
+    rt_client = MockRtClient(raw)
+    rt_data = rt_client.get_event_data(event_id=event_id, tmin=tmin,
+                                       tmax=tmax, picks=picks,
+                                       stim_channel='STI 014')
+
+    assert_array_equal(rt_data, data)
+
+
+def test_find_events():
+    """Test find_events in rt_epochs."""
+
+    raw = mne.io.Raw(raw_fname, preload=True, verbose=False)
+    picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=True,
+                           stim=True, exclude=raw.info['bads'])
+
+    event_id = [0, 5, 6]
+    tmin, tmax = -0.2, 0.5
+
+    stim_channel = 'STI 014'
+    stim_channel_idx = pick_channels(raw.info['ch_names'],
+                                     include=[stim_channel])
+
+    # Reset some data for ease of comparison
+    raw._first_samps[0] = 0
+    raw.info['sfreq'] = 1000
+    # Test that we can handle consecutive events with no gap
+    raw._data[stim_channel_idx, :] = 0
+    raw._data[stim_channel_idx, 500:520] = 5
+    raw._data[stim_channel_idx, 520:530] = 6
+    raw._data[stim_channel_idx, 530:532] = 5
+    raw._data[stim_channel_idx, 540] = 6
+    raw._update_times()
+
+    # consecutive=False
+    find_events = dict(consecutive=False)
+
+    rt_client = MockRtClient(raw)
+    rt_epochs = RtEpochs(rt_client, event_id, tmin, tmax, picks=picks,
+                         stim_channel='STI 014', isi_max=0.5,
+                         find_events=find_events)
+    rt_client.send_data(rt_epochs, picks, tmin=0, tmax=10, buffer_size=1000)
+    rt_epochs.start()
+    events = [5, 6]
+    for ii, ev in enumerate(rt_epochs.iter_evoked()):
+        assert_true(ev.comment == str(events[ii]))
+    assert_true(ii == 1)
+
+    # consecutive=True
+    find_events = dict(consecutive=True)
+    rt_client = MockRtClient(raw)
+    rt_epochs = RtEpochs(rt_client, event_id, tmin, tmax, picks=picks,
+                         stim_channel='STI 014', isi_max=0.5,
+                         find_events=find_events)
+    rt_client.send_data(rt_epochs, picks, tmin=0, tmax=10, buffer_size=1000)
+    rt_epochs.start()
+    events = [5, 6, 5, 6]
+    for ii, ev in enumerate(rt_epochs.iter_evoked()):
+        assert_true(ev.comment == str(events[ii]))
+    assert_true(ii == 3)
+
+    # min_duration=0.002
+    find_events = dict(consecutive=False, min_duration=0.002)
+    rt_client = MockRtClient(raw)
+    rt_epochs = RtEpochs(rt_client, event_id, tmin, tmax, picks=picks,
+                         stim_channel='STI 014', isi_max=0.5,
+                         find_events=find_events)
+    rt_client.send_data(rt_epochs, picks, tmin=0, tmax=10, buffer_size=1000)
+    rt_epochs.start()
+    events = [5]
+    for ii, ev in enumerate(rt_epochs.iter_evoked()):
+        assert_true(ev.comment == str(events[ii]))
+    assert_true(ii == 0)
+
+    # ouput='step', consecutive=True
+    find_events = dict(output='step', consecutive=True)
+    rt_client = MockRtClient(raw)
+    rt_epochs = RtEpochs(rt_client, event_id, tmin, tmax, picks=picks,
+                         stim_channel='STI 014', isi_max=0.5,
+                         find_events=find_events)
+    rt_client.send_data(rt_epochs, picks, tmin=0, tmax=10, buffer_size=1000)
+    rt_epochs.start()
+    events = [5, 6, 5, 0, 6, 0]
+    for ii, ev in enumerate(rt_epochs.iter_evoked()):
+        assert_true(ev.comment == str(events[ii]))
+    assert_true(ii == 5)
+
+run_tests_if_main()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/realtime/tests/test_stim_client_server.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/realtime/tests/test_stim_client_server.py
new file mode 100644
index 0000000..b0e5835
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/realtime/tests/test_stim_client_server.py
@@ -0,0 +1,84 @@
+import threading
+import time
+from nose.tools import assert_equal, assert_raises, assert_true
+
+from mne.realtime import StimServer, StimClient
+from mne.externals.six.moves import queue
+from mne.utils import requires_good_network, run_tests_if_main
+
+
+_server = None
+_have_put_in_trigger = False
+_max_wait = 10.
+
+
+ at requires_good_network
+def test_connection():
+    """Test TCP/IP connection for StimServer <-> StimClient.
+    """
+    global _server, _have_put_in_trigger
+
+    # have to start a thread to simulate the effect of two
+    # different computers since stim_server.start() is designed to
+    # be a blocking method
+
+    # use separate queues because timing matters
+    trig_queue1 = queue.Queue()
+    trig_queue2 = queue.Queue()
+
+    # start a thread to emulate 1st client
+    thread1 = threading.Thread(target=_connect_client, args=(trig_queue1,))
+    thread1.daemon = True
+
+    # start another thread to emulate 2nd client
+    thread2 = threading.Thread(target=_connect_client, args=(trig_queue2,))
+    thread2.daemon = True
+
+    thread1.start()
+    thread2.start()
+    with StimServer('localhost', port=4218, n_clients=2) as stim_server:
+        _server = stim_server
+        stim_server.start(timeout=10.0)  # don't allow test to hang
+
+        # Add the trigger to the queue for both clients
+        stim_server.add_trigger(20)
+        _have_put_in_trigger = True  # monkey patch
+
+        # the assert_equal must be in the test_connection() method
+        # Hence communication between threads is necessary
+        trig1 = trig_queue1.get(timeout=_max_wait)
+        trig2 = trig_queue2.get(timeout=_max_wait)
+        assert_equal(trig1, 20)
+
+        # test if both clients receive the same trigger
+        assert_equal(trig1, trig2)
+
+    # test timeout for stim_server
+    with StimServer('localhost', port=4218) as stim_server:
+        assert_raises(StopIteration, stim_server.start, 0.1)
+
+
+def _connect_client(trig_queue):
+    """Helper method that instantiates the StimClient.
+    """
+    # just wait till the main thread reaches stim_server.start()
+    t0 = time.time()
+    while (time.time() - t0 < _max_wait and
+           (_server is None or not _server._running)):
+        time.sleep(0.01)
+    assert_true(_server is not None and _server._running)
+
+    # instantiate StimClient
+    stim_client = StimClient('localhost', port=4218)
+
+    # wait for script to reach stim_server.add_trigger()
+    t0 = time.time()
+    while (time.time() - t0 < _max_wait and not _have_put_in_trigger):
+        time.sleep(0.01)
+    assert_true(_have_put_in_trigger)
+
+    trig_queue.put(stim_client.get_trigger())
+    stim_client.close()
+
+
+run_tests_if_main()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/report.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/report.py
new file mode 100644
index 0000000..f812263
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/report.py
@@ -0,0 +1,1824 @@
+"""Generate html report from MNE database
+"""
+
+# Authors: Alex Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Mainak Jas <mainak at neuro.hut.fi>
+#          Teon Brooks <teon.brooks at gmail.com>
+#
+# License: BSD (3-clause)
+
+import os
+import os.path as op
+import fnmatch
+import re
+import codecs
+import time
+from glob import glob
+import warnings
+import base64
+from datetime import datetime as dt
+
+import numpy as np
+
+from . import read_evokeds, read_events, pick_types, read_cov
+from .io import Raw, read_info
+from .utils import _TempDir, logger, verbose, get_subjects_dir
+from .viz import plot_events, plot_trans, plot_cov
+from .viz._3d import _plot_mri_contours
+from .forward import read_forward_solution
+from .epochs import read_epochs
+from .minimum_norm import read_inverse_operator
+from .parallel import parallel_func, check_n_jobs
+
+from .externals.tempita import HTMLTemplate, Template
+from .externals.six import BytesIO
+from .externals.six import moves, PY3
+
+VALID_EXTENSIONS = ['raw.fif', 'raw.fif.gz', 'sss.fif', 'sss.fif.gz',
+                    '-eve.fif', '-eve.fif.gz', '-cov.fif', '-cov.fif.gz',
+                    '-trans.fif', '-trans.fif.gz', '-fwd.fif', '-fwd.fif.gz',
+                    '-epo.fif', '-epo.fif.gz', '-inv.fif', '-inv.fif.gz',
+                    '-ave.fif', '-ave.fif.gz', 'T1.mgz']
+SECTION_ORDER = ['raw', 'events', 'epochs', 'evoked', 'covariance', 'trans',
+                 'mri', 'forward', 'inverse']
+
+###############################################################################
+# PLOTTING FUNCTIONS
+
+
+def _fig_to_img(function=None, fig=None, image_format='png',
+                scale=None, **kwargs):
+    """Wrapper function to plot figure and create a binary image"""
+
+    import matplotlib.pyplot as plt
+    from matplotlib.figure import Figure
+    if not isinstance(fig, Figure) and function is None:
+        from scipy.misc import imread
+        mayavi = None
+        try:
+            from mayavi import mlab  # noqa, mlab imported
+            import mayavi
+        except:  # on some systems importing Mayavi raises SystemExit (!)
+            warnings.warn('Could not import mayavi. Trying to render '
+                          '`mayavi.core.scene.Scene` figure instances'
+                          ' will throw an error.')
+        tempdir = _TempDir()
+        temp_fname = op.join(tempdir, 'test')
+        if fig.scene is not None:
+            fig.scene.save_png(temp_fname)
+            img = imread(temp_fname)
+            os.remove(temp_fname)
+        else:  # Testing mode
+            img = np.zeros((2, 2, 3))
+
+        mayavi.mlab.close(fig)
+        fig = plt.figure()
+        plt.imshow(img)
+        plt.axis('off')
+
+    if function is not None:
+        plt.close('all')
+        fig = function(**kwargs)
+    output = BytesIO()
+    if scale is not None:
+        _scale_mpl_figure(fig, scale)
+    fig.savefig(output, format=image_format, bbox_inches='tight',
+                dpi=fig.get_dpi())
+    plt.close(fig)
+    output = output.getvalue()
+    return (output if image_format == 'svg' else
+            base64.b64encode(output).decode('ascii'))
+
+
+def _scale_mpl_figure(fig, scale):
+    """Magic scaling helper
+
+    Keeps font-size and artist sizes constant
+    0.5 : current font - 4pt
+    2.0 : current font + 4pt
+
+    XXX it's unclear why this works, but good to go for most cases
+    """
+    fig.set_size_inches(fig.get_size_inches() * scale)
+    fig.set_dpi(fig.get_dpi() * scale)
+    import matplotlib as mpl
+    if scale >= 1:
+        sfactor = scale ** 2
+    elif scale < 1:
+        sfactor = -((1. / scale) ** 2)
+    for text in fig.findobj(mpl.text.Text):
+        fs = text.get_fontsize()
+        new_size = fs + sfactor
+        if new_size <= 0:
+            raise ValueError('could not rescale matplotlib fonts, consider '
+                             'increasing "scale"')
+        text.set_fontsize(new_size)
+
+    fig.canvas.draw()
+
+
+def _figs_to_mrislices(sl, n_jobs, **kwargs):
+    import matplotlib.pyplot as plt
+    plt.close('all')
+    use_jobs = min(n_jobs, max(1, len(sl)))
+    parallel, p_fun, _ = parallel_func(_plot_mri_contours, use_jobs)
+    outs = parallel(p_fun(slices=s, **kwargs)
+                    for s in np.array_split(sl, use_jobs))
+    for o in outs[1:]:
+        outs[0] += o
+    return outs[0]
+
+
+def _iterate_trans_views(function, **kwargs):
+    """Auxiliary function to iterate over views in trans fig.
+    """
+    from scipy.misc import imread
+    import matplotlib.pyplot as plt
+    import mayavi
+    fig = function(**kwargs)
+
+    assert isinstance(fig, mayavi.core.scene.Scene)
+
+    views = [(90, 90), (0, 90), (0, -90)]
+    fig2, axes = plt.subplots(1, len(views))
+    for view, ax in zip(views, axes):
+        mayavi.mlab.view(view[0], view[1])
+        # XXX: save_bmp / save_png / ...
+        tempdir = _TempDir()
+        temp_fname = op.join(tempdir, 'test.png')
+        if fig.scene is not None:
+            fig.scene.save_png(temp_fname)
+            im = imread(temp_fname)
+        else:  # Testing mode
+            im = np.zeros((2, 2, 3))
+        ax.imshow(im)
+        ax.axis('off')
+
+    mayavi.mlab.close(fig)
+    img = _fig_to_img(fig=fig2)
+    return img
+
+###############################################################################
+# TOC FUNCTIONS
+
+
+def _is_bad_fname(fname):
+    """Auxiliary function for identifying bad file naming patterns
+       and highlighting them in red in the TOC.
+    """
+    if fname.endswith('(whitened)'):
+        fname = fname[:-11]
+
+    if not fname.endswith(tuple(VALID_EXTENSIONS + ['bem', 'custom'])):
+        return 'red'
+    else:
+        return ''
+
+
+def _get_toc_property(fname):
+    """Auxiliary function to assign class names to TOC
+       list elements to allow toggling with buttons.
+    """
+    if fname.endswith(('-eve.fif', '-eve.fif.gz')):
+        div_klass = 'events'
+        tooltip = fname
+        text = op.basename(fname)
+    elif fname.endswith(('-ave.fif', '-ave.fif.gz')):
+        div_klass = 'evoked'
+        tooltip = fname
+        text = op.basename(fname)
+    elif fname.endswith(('-cov.fif', '-cov.fif.gz')):
+        div_klass = 'covariance'
+        tooltip = fname
+        text = op.basename(fname)
+    elif fname.endswith(('raw.fif', 'raw.fif.gz',
+                         'sss.fif', 'sss.fif.gz')):
+        div_klass = 'raw'
+        tooltip = fname
+        text = op.basename(fname)
+    elif fname.endswith(('-trans.fif', '-trans.fif.gz')):
+        div_klass = 'trans'
+        tooltip = fname
+        text = op.basename(fname)
+    elif fname.endswith(('-fwd.fif', '-fwd.fif.gz')):
+        div_klass = 'forward'
+        tooltip = fname
+        text = op.basename(fname)
+    elif fname.endswith(('-inv.fif', '-inv.fif.gz')):
+        div_klass = 'inverse'
+        tooltip = fname
+        text = op.basename(fname)
+    elif fname.endswith(('-epo.fif', '-epo.fif.gz')):
+        div_klass = 'epochs'
+        tooltip = fname
+        text = op.basename(fname)
+    elif fname.endswith(('.nii', '.nii.gz', '.mgh', '.mgz')):
+        div_klass = 'mri'
+        tooltip = 'MRI'
+        text = 'MRI'
+    elif fname.endswith(('bem')):
+        div_klass = 'mri'
+        tooltip = 'MRI'
+        text = 'MRI'
+    elif fname.endswith('(whitened)'):
+        div_klass = 'evoked'
+        tooltip = fname
+        text = op.basename(fname[:-11]) + '(whitened)'
+    else:
+        div_klass = fname.split('-#-')[1]
+        tooltip = fname.split('-#-')[0]
+        text = fname.split('-#-')[0]
+
+    return div_klass, tooltip, text
+
+
+def _iterate_files(report, fnames, info, cov, baseline, sfreq, on_error):
+    """Auxiliary function to parallel process in batch mode.
+    """
+    htmls, report_fnames, report_sectionlabels = [], [], []
+
+    def _update_html(html, report_fname, report_sectionlabel):
+        """Update the lists above."""
+        htmls.append(html)
+        report_fnames.append(report_fname)
+        report_sectionlabels.append(report_sectionlabel)
+
+    for fname in fnames:
+        logger.info("Rendering : %s"
+                    % op.join('...' + report.data_path[-20:],
+                              fname))
+        try:
+            if fname.endswith(('raw.fif', 'raw.fif.gz',
+                               'sss.fif', 'sss.fif.gz')):
+                html = report._render_raw(fname)
+                report_fname = fname
+                report_sectionlabel = 'raw'
+            elif fname.endswith(('-fwd.fif', '-fwd.fif.gz')):
+                html = report._render_forward(fname)
+                report_fname = fname
+                report_sectionlabel = 'forward'
+            elif fname.endswith(('-inv.fif', '-inv.fif.gz')):
+                html = report._render_inverse(fname)
+                report_fname = fname
+                report_sectionlabel = 'inverse'
+            elif fname.endswith(('-ave.fif', '-ave.fif.gz')):
+                if cov is not None:
+                    html = report._render_whitened_evoked(fname, cov, baseline)
+                    report_fname = fname + ' (whitened)'
+                    report_sectionlabel = 'evoked'
+                    _update_html(html, report_fname, report_sectionlabel)
+
+                html = report._render_evoked(fname, baseline)
+                report_fname = fname
+                report_sectionlabel = 'evoked'
+            elif fname.endswith(('-eve.fif', '-eve.fif.gz')):
+                html = report._render_eve(fname, sfreq)
+                report_fname = fname
+                report_sectionlabel = 'events'
+            elif fname.endswith(('-epo.fif', '-epo.fif.gz')):
+                html = report._render_epochs(fname)
+                report_fname = fname
+                report_sectionlabel = 'epochs'
+            elif (fname.endswith(('-cov.fif', '-cov.fif.gz')) and
+                  report.info_fname is not None):
+                html = report._render_cov(fname, info)
+                report_fname = fname
+                report_sectionlabel = 'covariance'
+            elif (fname.endswith(('-trans.fif', '-trans.fif.gz')) and
+                  report.info_fname is not None and report.subjects_dir
+                  is not None and report.subject is not None):
+                html = report._render_trans(fname, report.data_path, info,
+                                            report.subject,
+                                            report.subjects_dir)
+                report_fname = fname
+                report_sectionlabel = 'trans'
+            else:
+                html = None
+                report_fname = None
+                report_sectionlabel = None
+        except Exception as e:
+            if on_error == 'warn':
+                logger.warning('Failed to process file %s:\n"%s"' % (fname, e))
+            elif on_error == 'raise':
+                raise
+            html = None
+            report_fname = None
+            report_sectionlabel = None
+        _update_html(html, report_fname, report_sectionlabel)
+
+    return htmls, report_fnames, report_sectionlabels
+
+###############################################################################
+# IMAGE FUNCTIONS
+
+
+def _build_image(data, cmap='gray'):
+    """Build an image encoded in base64.
+    """
+
+    import matplotlib.pyplot as plt
+    from matplotlib.figure import Figure
+    from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
+
+    figsize = data.shape[::-1]
+    if figsize[0] == 1:
+        figsize = tuple(figsize[1:])
+        data = data[:, :, 0]
+    fig = Figure(figsize=figsize, dpi=1.0, frameon=False)
+    FigureCanvas(fig)
+    cmap = getattr(plt.cm, cmap, plt.cm.gray)
+    fig.figimage(data, cmap=cmap)
+    output = BytesIO()
+    fig.savefig(output, dpi=1.0, format='png')
+    return base64.b64encode(output.getvalue()).decode('ascii')
+
+
+def _iterate_sagittal_slices(array, limits=None):
+    """Iterate sagittal slice.
+    """
+    shape = array.shape[0]
+    for ind in range(shape):
+        if limits and ind not in limits:
+            continue
+        yield ind, array[ind, :, :]
+
+
+def _iterate_axial_slices(array, limits=None):
+    """Iterate axial slice.
+    """
+    shape = array.shape[1]
+    for ind in range(shape):
+        if limits and ind not in limits:
+            continue
+        yield ind, array[:, ind, :]
+
+
+def _iterate_coronal_slices(array, limits=None):
+    """Iterate coronal slice.
+    """
+    shape = array.shape[2]
+    for ind in range(shape):
+        if limits and ind not in limits:
+            continue
+        yield ind, np.flipud(np.rot90(array[:, :, ind]))
+
+
+def _iterate_mri_slices(name, ind, global_id, slides_klass, data, cmap,
+                        image_format='png'):
+    """Auxiliary function for parallel processing of mri slices.
+    """
+    img_klass = 'slideimg-%s' % name
+
+    caption = u'Slice %s %s' % (name, ind)
+    slice_id = '%s-%s-%s' % (name, global_id, ind)
+    div_klass = 'span12 %s' % slides_klass
+    img = _build_image(data, cmap=cmap)
+    first = True if ind == 0 else False
+    html = _build_html_image(img, slice_id, div_klass,
+                             img_klass, caption, first)
+    return ind, html
+
+
+###############################################################################
+# HTML functions
+
+def _build_html_image(img, id, div_klass, img_klass, caption=None, show=True):
+    """Build a html image from a slice array.
+    """
+    html = []
+    add_style = u'' if show else u'style="display: none"'
+    html.append(u'<li class="%s" id="%s" %s>' % (div_klass, id, add_style))
+    html.append(u'<div class="thumbnail">')
+    html.append(u'<img class="%s" alt="" style="width:90%%;" '
+                'src="data:image/png;base64,%s">'
+                % (img_klass, img))
+    html.append(u'</div>')
+    if caption:
+        html.append(u'<h4>%s</h4>' % caption)
+    html.append(u'</li>')
+    return u'\n'.join(html)
+
+slider_template = HTMLTemplate(u"""
+<script>$("#{{slider_id}}").slider({
+                       range: "min",
+                       /*orientation: "vertical",*/
+                       min: {{minvalue}},
+                       max: {{maxvalue}},
+                       step: {{step}},
+                       value: {{startvalue}},
+                       create: function(event, ui) {
+                       $(".{{klass}}").hide();
+                       $("#{{klass}}-{{startvalue}}").show();},
+                       stop: function(event, ui) {
+                       var list_value = $("#{{slider_id}}").slider("value");
+                       $(".{{klass}}").hide();
+                       $("#{{klass}}-"+list_value).show();}
+                       })</script>
+""")
+
+slider_full_template = Template(u"""
+<li class="{{div_klass}}" id="{{id}}">
+<h4>{{title}}</h4>
+<div class="thumbnail">
+    <ul><li class="slider">
+        <div class="row">
+            <div class="col-md-6 col-md-offset-3">
+                <div id="{{slider_id}}"></div>
+                <ul class="thumbnails">
+                    {{image_html}}
+                </ul>
+                {{html}}
+            </div>
+        </div>
+    </li></ul>
+</div>
+</li>
+""")
+
+
+def _build_html_slider(slices_range, slides_klass, slider_id,
+                       start_value=None):
+    """Build an html slider for a given slices range and a slices klass.
+    """
+    if start_value is None:
+        start_value = slices_range[len(slices_range) // 2]
+    return slider_template.substitute(slider_id=slider_id,
+                                      klass=slides_klass,
+                                      step=slices_range[1] - slices_range[0],
+                                      minvalue=slices_range[0],
+                                      maxvalue=slices_range[-1],
+                                      startvalue=start_value)
+
+
+###############################################################################
+# HTML scan renderer
+
+header_template = Template(u"""
+<!DOCTYPE html>
+<html lang="fr">
+<head>
+{{include}}
+<script type="text/javascript">
+
+        var toggle_state = false;
+        $(document).on('keydown', function (event) {
+            if (event.which == 84){
+                if (!toggle_state)
+                    $('.has_toggle').trigger('click');
+                else if (toggle_state)
+                    $('.has_toggle').trigger('click');
+            toggle_state = !toggle_state;
+            }
+        });
+
+        function togglebutton(class_name){
+            $(class_name).toggle();
+
+            if ($(class_name + '-btn').hasClass('active'))
+                $(class_name + '-btn').removeClass('active');
+            else
+                $(class_name + '-btn').addClass('active');
+        }
+
+        /* Scroll down on click to #id so that caption is not hidden
+        by navbar */
+        var shiftWindow = function() { scrollBy(0, -60) };
+        if (location.hash) shiftWindow();
+        window.addEventListener("hashchange", shiftWindow);
+
+        </script>
+<style type="text/css">
+
+body {
+    line-height: 1.5em;
+    font-family: arial, sans-serif;
+}
+
+h1 {
+    font-size: 30px;
+    text-align: center;
+}
+
+h4 {
+    text-align: center;
+}
+
+ at link-color:       @brand-primary;
+ at link-hover-color: darken(@link-color, 15%);
+
+a{
+    color: @link-color;
+    &:hover {
+        color: @link-hover-color;
+        text-decoration: underline;
+  }
+}
+
+li{
+    list-style-type:none;
+}
+
+#wrapper {
+    text-align: left;
+    margin: 5em auto;
+    width: 700px;
+}
+
+#container{
+    position: relative;
+}
+
+#content{
+    margin-left: 22%;
+    margin-top: 60px;
+    width: 75%;
+}
+
+#toc {
+  margin-top: navbar-height;
+  position: fixed;
+  width: 20%;
+  height: 90%;
+  overflow: auto;
+}
+
+#toc li {
+    overflow: hidden;
+    padding-bottom: 2px;
+    margin-left: 20px;
+}
+
+#toc span {
+    float: left;
+    padding: 0 2px 3px 0;
+}
+
+div.footer {
+    background-color: #C0C0C0;
+    color: #000000;
+    padding: 3px 8px 3px 0;
+    clear: both;
+    font-size: 0.8em;
+    text-align: right;
+}
+
+</style>
+</head>
+<body>
+
+<nav class="navbar navbar-inverse navbar-fixed-top" role="navigation">
+    <div class="container-fluid">
+        <div class="navbar-header navbar-left">
+            <ul class="nav nav-pills"><li class="active">
+                <a class="navbar-btn" data-toggle="collapse"
+                data-target="#viewnavbar" href="javascript:void(0)">
+                ></a></li></ul>
+    </div>
+        <h3 class="navbar-text" style="color:white">{{title}}</h3>
+        <ul class="nav nav-pills navbar-right" style="margin-top: 7px;"
+        id="viewnavbar">
+
+        {{for section in sections}}
+
+        <li class="active {{sectionvars[section]}}-btn">
+           <a href="javascript:void(0)"
+           onclick="togglebutton('.{{sectionvars[section]}}')"
+           class="has_toggle">
+    {{section if section != 'mri' else 'MRI'}}
+           </a>
+        </li>
+
+        {{endfor}}
+
+        </ul>
+    </div>
+</nav>
+""")
+
+footer_template = HTMLTemplate(u"""
+</div></body>
+<div class="footer">
+        © Copyright 2012-{{current_year}}, MNE Developers.
+      Created on {{date}}.
+      Powered by <a href="http://martinos.org/mne">MNE.
+</div>
+</html>
+""")
+
+html_template = Template(u"""
+<li class="{{div_klass}}" id="{{id}}">
+    <h4>{{caption}}</h4>
+    <div class="thumbnail">{{html}}</div>
+</li>
+""")
+
+image_template = Template(u"""
+
+{{default interactive = False}}
+{{default width = 50}}
+{{default id = False}}
+{{default image_format = 'png'}}
+{{default scale = None}}
+{{default comment = None}}
+
+<li class="{{div_klass}}" {{if id}}id="{{id}}"{{endif}}
+{{if not show}}style="display: none"{{endif}}>
+
+{{if caption}}
+<h4>{{caption}}</h4>
+{{endif}}
+<div class="thumbnail">
+{{if not interactive}}
+    {{if image_format == 'png'}}
+        {{if scale is not None}}
+            <img alt="" style="width:{{width}}%;"
+             src="data:image/png;base64,{{img}}">
+        {{else}}
+            <img alt=""
+             src="data:image/png;base64,{{img}}">
+        {{endif}}
+    {{elif image_format == 'gif'}}
+        {{if scale is not None}}
+            <img alt="" style="width:{{width}}%;"
+             src="data:image/gif;base64,{{img}}">
+        {{else}}
+            <img alt=""
+             src="data:image/gif;base64,{{img}}">
+        {{endif}}
+    {{elif image_format == 'svg'}}
+        <div style="text-align:center;">
+            {{img}}
+        </div>
+    {{endif}}
+    {{if comment is not None}}
+        <br><br>
+        <div style="text-align:center;">
+            <style>
+                p.test {word-wrap: break-word;}
+            </style>
+            <p class="test">
+                {{comment}}
+            </p>
+        </div>
+    {{endif}}
+{{else}}
+    <center>{{interactive}}</center>
+{{endif}}
+</div>
+</li>
+""")
+
+repr_template = Template(u"""
+<li class="{{div_klass}}" id="{{id}}">
+<h4>{{caption}}</h4><hr>
+{{repr}}
+<hr></li>
+""")
+
+raw_template = Template(u"""
+<li class="{{div_klass}}" id="{{id}}">
+<h4>{{caption}}</h4>
+<table class="table table-hover">
+    <tr>
+        <th>Measurement date</th>
+        {{if meas_date is not None}}
+        <td>{{meas_date}}</td>
+        {{else}}<td>Unknown</td>{{endif}}
+    </tr>
+    <tr>
+        <th>Experimenter</th>
+        {{if info['experimenter'] is not None}}
+        <td>{{info['experimenter']}}</td>
+        {{else}}<td>Unknown</td>{{endif}}
+    </tr>
+    <tr>
+        <th>Digitized points</th>
+        {{if info['dig'] is not None}}
+        <td>{{len(info['dig'])}} points</td>
+        {{else}}
+        <td>Not available</td>
+        {{endif}}
+    </tr>
+    <tr>
+        <th>Good channels</th>
+        <td>{{n_mag}} magnetometer, {{n_grad}} gradiometer,
+            and {{n_eeg}} EEG channels</td>
+    </tr>
+    <tr>
+        <th>Bad channels</th>
+        {{if info['bads'] is not None}}
+        <td>{{', '.join(info['bads'])}}</td>
+        {{else}}<td>None</td>{{endif}}
+    </tr>
+    <tr>
+        <th>EOG channels</th>
+        <td>{{eog}}</td>
+    </tr>
+    <tr>
+        <th>ECG channels</th>
+        <td>{{ecg}}</td>
+    <tr>
+        <th>Measurement time range</th>
+        <td>{{u'%0.2f' % tmin}} to {{u'%0.2f' % tmax}} sec.</td>
+    </tr>
+    <tr>
+        <th>Sampling frequency</th>
+        <td>{{u'%0.2f' % info['sfreq']}} Hz</td>
+    </tr>
+    <tr>
+        <th>Highpass</th>
+        <td>{{u'%0.2f' % info['highpass']}} Hz</td>
+    </tr>
+     <tr>
+        <th>Lowpass</th>
+        <td>{{u'%0.2f' % info['lowpass']}} Hz</td>
+    </tr>
+</table>
+</li>
+""")
+
+
+toc_list = Template(u"""
+<li class="{{div_klass}}">
+    {{if id}}
+        <a href="javascript:void(0)" onclick="window.location.hash={{id}};">
+    {{endif}}
+<span title="{{tooltip}}" style="color:{{color}}"> {{text}}</span>
+{{if id}}</a>{{endif}}
+</li>
+""")
+
+
+def _check_scale(scale):
+    """Helper to ensure valid scale value is passed"""
+    if np.isscalar(scale) and scale <= 0:
+        raise ValueError('scale must be positive, not %s' % scale)
+
+
+class Report(object):
+    """Object for rendering HTML
+
+    Parameters
+    ----------
+    info_fname : str
+        Name of the file containing the info dictionary.
+    subjects_dir : str | None
+        Path to the SUBJECTS_DIR. If None, the path is obtained by using
+        the environment variable SUBJECTS_DIR.
+    subject : str | None
+        Subject name.
+    title : str
+        Title of the report.
+    cov_fname : str
+        Name of the file containing the noise covariance.
+    baseline : None or tuple of length 2 (default (None, 0))
+        The time interval to apply baseline correction for evokeds.
+        If None do not apply it. If baseline is (a, b)
+        the interval is between "a (s)" and "b (s)".
+        If a is None the beginning of the data is used
+        and if b is None then b is set to the end of the interval.
+        If baseline is equal to (None, None) all the time
+        interval is used.
+        The baseline (a, b) includes both endpoints, i.e. all
+        timepoints t such that a <= t <= b.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Notes
+    -----
+    To toggle the show/hide state of all sections in the html report, press 't'
+
+    .. versionadded:: 0.8.0
+    """
+
+    def __init__(self, info_fname=None, subjects_dir=None,
+                 subject=None, title=None, cov_fname=None, baseline=None,
+                 verbose=None):
+
+        self.info_fname = info_fname
+        self.cov_fname = cov_fname
+        self.baseline = baseline
+        self.subjects_dir = get_subjects_dir(subjects_dir, raise_error=False)
+        self.subject = subject
+        self.title = title
+        self.verbose = verbose
+
+        self.initial_id = 0
+        self.html = []
+        self.fnames = []  # List of file names rendered
+        self.sections = []  # List of sections
+        self._sectionlabels = []  # Section labels
+        self._sectionvars = {}  # Section variable names in js
+        # boolean to specify if sections should be ordered in natural
+        # order of processing (raw -> events ... -> inverse)
+        self._sort_sections = False
+
+        self._init_render()  # Initialize the renderer
+
+    def _get_id(self):
+        """Get id of plot.
+        """
+        self.initial_id += 1
+        return self.initial_id
+
+    def _validate_input(self, items, captions, section, comments=None):
+        """Validate input.
+        """
+        if not isinstance(items, (list, tuple)):
+            items = [items]
+        if not isinstance(captions, (list, tuple)):
+            captions = [captions]
+        if not isinstance(comments, (list, tuple)):
+            if comments is None:
+                comments = [comments] * len(captions)
+            else:
+                comments = [comments]
+        if len(comments) != len(items):
+            raise ValueError('Comments and report items must have the same '
+                             'length or comments should be None.')
+        elif len(captions) != len(items):
+            raise ValueError('Captions and report items must have the same '
+                             'length.')
+
+        # Book-keeping of section names
+        if section not in self.sections:
+            self.sections.append(section)
+            self._sectionvars[section] = _clean_varnames(section)
+
+        return items, captions, comments
+
+    def _add_figs_to_section(self, figs, captions, section='custom',
+                             image_format='png', scale=None, comments=None):
+        """Auxiliary method for `add_section` and `add_figs_to_section`.
+        """
+
+        figs, captions, comments = self._validate_input(figs, captions,
+                                                        section, comments)
+        _check_scale(scale)
+        for fig, caption, comment in zip(figs, captions, comments):
+            caption = 'custom plot' if caption == '' else caption
+            sectionvar = self._sectionvars[section]
+            global_id = self._get_id()
+            div_klass = self._sectionvars[section]
+            img_klass = self._sectionvars[section]
+
+            img = _fig_to_img(fig=fig, scale=scale,
+                              image_format=image_format)
+            html = image_template.substitute(img=img, id=global_id,
+                                             div_klass=div_klass,
+                                             img_klass=img_klass,
+                                             caption=caption,
+                                             show=True,
+                                             image_format=image_format,
+                                             comment=comment)
+            self.fnames.append('%s-#-%s-#-custom' % (caption, sectionvar))
+            self._sectionlabels.append(sectionvar)
+            self.html.append(html)
+
+    def add_figs_to_section(self, figs, captions, section='custom',
+                            scale=None, image_format='png', comments=None):
+        """Append custom user-defined figures.
+
+        Parameters
+        ----------
+        figs : list of figures.
+            Each figure in the list can be an instance of
+            matplotlib.pyplot.Figure, mayavi.core.scene.Scene,
+            or np.ndarray (images read in using scipy.imread).
+        captions : list of str
+            A list of captions to the figures.
+        section : str
+            Name of the section. If section already exists, the figures
+            will be appended to the end of the section
+        scale : float | None | callable
+            Scale the images maintaining the aspect ratio.
+            If None, no scaling is applied. If float, scale will determine
+            the relative scaling (might not work for scale <= 1 depending on
+            font sizes). If function, should take a figure object as input
+            parameter. Defaults to None.
+        image_format : {'png', 'svg'}
+            The image format to be used for the report. Defaults to 'png'.
+        comments : None | str | list of str
+            A string of text or a list of strings of text to be appended after
+            the figure.
+        """
+        return self._add_figs_to_section(figs=figs, captions=captions,
+                                         section=section, scale=scale,
+                                         image_format=image_format,
+                                         comments=comments)
+
+    def add_images_to_section(self, fnames, captions, scale=None,
+                              section='custom', comments=None):
+        """Append custom user-defined images.
+
+        Parameters
+        ----------
+        fnames : str | list of str
+            A filename or a list of filenames from which images are read.
+            Images can be PNG, GIF or SVG.
+        captions : str | list of str
+            A caption or a list of captions to the images.
+        scale : float | None
+            Scale the images maintaining the aspect ratio.
+            Defaults to None. If None, no scaling will be applied.
+        section : str
+            Name of the section. If section already exists, the images
+            will be appended to the end of the section.
+        comments : None | str | list of str
+            A string of text or a list of strings of text to be appended after
+            the image.
+        """
+        # Note: using scipy.misc is equivalent because scipy internally
+        # imports PIL anyway. It's not possible to redirect image output
+        # to binary string using scipy.misc.
+        fnames, captions, comments = self._validate_input(fnames, captions,
+                                                          section, comments)
+        _check_scale(scale)
+
+        for fname, caption, comment in zip(fnames, captions, comments):
+            caption = 'custom plot' if caption == '' else caption
+            sectionvar = self._sectionvars[section]
+            global_id = self._get_id()
+            div_klass = self._sectionvars[section]
+            img_klass = self._sectionvars[section]
+
+            image_format = os.path.splitext(fname)[1][1:]
+            image_format = image_format.lower()
+
+            if image_format not in ['png', 'gif', 'svg']:
+                raise ValueError("Unknown image format. Only 'png', 'gif' or "
+                                 "'svg' are supported. Got %s" % image_format)
+
+            # Convert image to binary string.
+            output = BytesIO()
+            with open(fname, 'rb') as f:
+                output.write(f.read())
+            img = base64.b64encode(output.getvalue()).decode('ascii')
+            html = image_template.substitute(img=img, id=global_id,
+                                             image_format=image_format,
+                                             div_klass=div_klass,
+                                             img_klass=img_klass,
+                                             caption=caption,
+                                             width=scale,
+                                             comment=comment,
+                                             show=True)
+            self.fnames.append('%s-#-%s-#-custom' % (caption, sectionvar))
+            self._sectionlabels.append(sectionvar)
+            self.html.append(html)
+
+    def add_htmls_to_section(self, htmls, captions, section='custom'):
+        """Append htmls to the report.
+
+        Parameters
+        ----------
+        htmls : str | list of str
+            An html str or a list of html str.
+        captions : str | list of str
+            A caption or a list of captions to the htmls.
+        section : str
+            Name of the section. If section already exists, the images
+            will be appended to the end of the section.
+
+        Notes
+        -----
+        .. versionadded:: 0.9.0
+        """
+        htmls, captions, _ = self._validate_input(htmls, captions, section)
+        for html, caption in zip(htmls, captions):
+            caption = 'custom plot' if caption == '' else caption
+            sectionvar = self._sectionvars[section]
+            global_id = self._get_id()
+            div_klass = self._sectionvars[section]
+
+            self.fnames.append('%s-#-%s-#-custom' % (caption, sectionvar))
+            self._sectionlabels.append(sectionvar)
+            self.html.append(
+                html_template.substitute(div_klass=div_klass, id=global_id,
+                                         caption=caption, html=html))
+
+    def add_bem_to_section(self, subject, caption='BEM', section='bem',
+                           decim=2, n_jobs=1, subjects_dir=None):
+        """Renders a bem slider html str.
+
+        Parameters
+        ----------
+        subject : str
+            Subject name.
+        caption : str
+            A caption for the bem.
+        section : str
+            Name of the section. If section already exists, the bem
+            will be appended to the end of the section.
+        decim : int
+            Use this decimation factor for generating MRI/BEM images
+            (since it can be time consuming).
+        n_jobs : int
+          Number of jobs to run in parallel.
+        subjects_dir : str | None
+            Path to the SUBJECTS_DIR. If None, the path is obtained by using
+            the environment variable SUBJECTS_DIR.
+
+        Notes
+        -----
+        .. versionadded:: 0.9.0
+        """
+        caption = 'custom plot' if caption == '' else caption
+        html = self._render_bem(subject=subject, subjects_dir=subjects_dir,
+                                decim=decim, n_jobs=n_jobs, section=section,
+                                caption=caption)
+        html, caption, _ = self._validate_input(html, caption, section)
+        sectionvar = self._sectionvars[section]
+
+        self.fnames.append('%s-#-%s-#-custom' % (caption[0], sectionvar))
+        self._sectionlabels.append(sectionvar)
+        self.html.extend(html)
+
+    def add_slider_to_section(self, figs, captions=None, section='custom',
+                              title='Slider', scale=None, image_format='png'):
+        """Renders a slider of figs to the report.
+
+        Parameters
+        ----------
+        figs : list of figures.
+            Each figure in the list can be an instance of
+            matplotlib.pyplot.Figure, mayavi.core.scene.Scene,
+            or np.ndarray (images read in using scipy.imread).
+        captions : list of str | list of float | None
+            A list of captions to the figures. If float, a str will be
+            constructed as `%f s`. If None, it will default to
+            `Data slice %d`.
+        section : str
+            Name of the section. If section already exists, the figures
+            will be appended to the end of the section.
+        title : str
+            The title of the slider.
+        scale : float | None | callable
+            Scale the images maintaining the aspect ratio.
+            If None, no scaling is applied. If float, scale will determine
+            the relative scaling (might not work for scale <= 1 depending on
+            font sizes). If function, should take a figure object as input
+            parameter. Defaults to None.
+        image_format : {'png', 'svg'}
+            The image format to be used for the report. Defaults to 'png'.
+
+        Notes
+        -----
+        .. versionadded:: 0.10.0
+        """
+
+        _check_scale(scale)
+        if not isinstance(figs[0], list):
+            figs = [figs]
+        else:
+            raise NotImplementedError('`add_slider_to_section` '
+                                      'can only add one slider at a time.')
+        figs, _, _ = self._validate_input(figs, section, section)
+
+        sectionvar = self._sectionvars[section]
+        self._sectionlabels.append(sectionvar)
+        global_id = self._get_id()
+        img_klass = self._sectionvars[section]
+        name = 'slider'
+
+        html = []
+        slides_klass = '%s-%s' % (name, global_id)
+        div_klass = 'span12 %s' % slides_klass
+
+        if isinstance(figs[0], list):
+            figs = figs[0]
+        sl = np.arange(0, len(figs))
+        slices = []
+        img_klass = 'slideimg-%s' % name
+
+        if captions is None:
+            captions = ['Data slice %d' % ii for ii in sl]
+        elif isinstance(captions, (list, tuple, np.ndarray)):
+            if len(figs) != len(captions):
+                raise ValueError('Captions must be the same length as the '
+                                 'number of slides.')
+            if isinstance(captions[0], (float, int)):
+                captions = ['%0.3f s' % caption for caption in captions]
+        else:
+            raise TypeError('Captions must be None or an iterable of '
+                            'float, int, str, Got %s' % type(captions))
+        for ii, (fig, caption) in enumerate(zip(figs, captions)):
+            img = _fig_to_img(fig=fig, scale=scale, image_format=image_format)
+            slice_id = '%s-%s-%s' % (name, global_id, sl[ii])
+            first = True if ii == 0 else False
+            slices.append(_build_html_image(img, slice_id, div_klass,
+                          img_klass, caption, first))
+        # Render the slider
+        slider_id = 'select-%s-%s' % (name, global_id)
+        # Render the slices
+        image_html = u'\n'.join(slices)
+        html.append(_build_html_slider(sl, slides_klass, slider_id,
+                                       start_value=0))
+        html = '\n'.join(html)
+
+        slider_klass = sectionvar
+        self.html.append(
+            slider_full_template.substitute(id=global_id, title=title,
+                                            div_klass=slider_klass,
+                                            slider_id=slider_id, html=html,
+                                            image_html=image_html))
+
+        self.fnames.append('%s-#-%s-#-custom' % (section, sectionvar))
+
+    ###########################################################################
+    # HTML rendering
+    def _render_one_axis(self, slices_iter, name, global_id, cmap,
+                         n_elements, n_jobs):
+        """Render one axis of the array.
+        """
+        global_id = global_id or name
+        html = []
+        slices, slices_range = [], []
+        html.append(u'<div class="col-xs-6 col-md-4">')
+        slides_klass = '%s-%s' % (name, global_id)
+
+        use_jobs = min(n_jobs, max(1, n_elements))
+        parallel, p_fun, _ = parallel_func(_iterate_mri_slices, use_jobs)
+        r = parallel(p_fun(name, ind, global_id, slides_klass, data, cmap)
+                     for ind, data in slices_iter)
+        slices_range, slices = zip(*r)
+
+        # Render the slider
+        slider_id = 'select-%s-%s' % (name, global_id)
+        html.append(u'<div id="%s"></div>' % slider_id)
+        html.append(u'<ul class="thumbnails">')
+        # Render the slices
+        html.append(u'\n'.join(slices))
+        html.append(u'</ul>')
+        html.append(_build_html_slider(slices_range, slides_klass, slider_id))
+        html.append(u'</div>')
+        return '\n'.join(html)
+
+    ###########################################################################
+    # global rendering functions
+    @verbose
+    def _init_render(self, verbose=None):
+        """Initialize the renderer.
+        """
+
+        inc_fnames = ['/usr/share/javascript/jquery/jquery.min.js',
+                      '/usr/share/javascript/jquery-ui/jquery-ui.min.js',
+                      'bootstrap.min.js', 'jquery-ui.min.css',
+                      'bootstrap.min.css']
+
+        include = list()
+        for inc_fname in inc_fnames:
+            logger.info('Embedding : %s' % inc_fname)
+            if not os.path.isabs(inc_fname):
+                inc_fname = op.join(op.dirname(__file__), 'html', inc_fname)
+            f = open(inc_fname, 'r')
+            if PY3:
+                f_contents = f.read()
+            else:
+                f_contents = f.read().decode('UTF-8')
+            if inc_fname.endswith('.js'):
+                include.append(u'<script type="text/javascript">' +
+                               f_contents + u'</script>')
+            elif inc_fname.endswith('.css'):
+                include.append(u'<style type="text/css">' +
+                               f_contents + u'</style>')
+            f.close()
+
+        self.include = ''.join(include)
+
+    @verbose
+    def parse_folder(self, data_path, pattern='*.fif', n_jobs=1, mri_decim=2,
+                     sort_sections=True, on_error='warn', verbose=None):
+        """Renders all the files in the folder.
+
+        Parameters
+        ----------
+        data_path : str
+            Path to the folder containing data whose HTML report will be
+            created.
+        pattern : str | list of str
+            Filename pattern(s) to include in the report.
+            Example: [\*raw.fif, \*ave.fif] will include Raw as well as Evoked
+            files.
+        n_jobs : int
+          Number of jobs to run in parallel.
+        mri_decim : int
+            Use this decimation factor for generating MRI/BEM images
+            (since it can be time consuming).
+        sort_sections : bool
+            If True, sort sections in the order: raw -> events -> epochs
+             -> evoked -> covariance -> trans -> mri -> forward -> inverse.
+        on_error : str
+            What to do if a file cannot be rendered. Can be 'ignore',
+            'warn' (default), or 'raise'.
+        verbose : bool, str, int, or None
+            If not None, override default verbose level (see mne.verbose).
+        """
+        valid_errors = ['ignore', 'warn', 'raise']
+        if on_error not in valid_errors:
+            raise ValueError('on_error must be one of %s, not %s'
+                             % (valid_errors, on_error))
+        self._sort = sort_sections
+
+        n_jobs = check_n_jobs(n_jobs)
+        self.data_path = data_path
+
+        if self.title is None:
+            self.title = 'MNE Report for ...%s' % self.data_path[-20:]
+
+        if not isinstance(pattern, (list, tuple)):
+            pattern = [pattern]
+
+        # iterate through the possible patterns
+        fnames = list()
+        for p in pattern:
+            fnames.extend(_recursive_search(self.data_path, p))
+
+        if self.info_fname is not None:
+            info = read_info(self.info_fname)
+            sfreq = info['sfreq']
+        else:
+            warnings.warn('`info_fname` not provided. Cannot render'
+                          '-cov.fif(.gz) and -trans.fif(.gz) files.')
+            info, sfreq = None, None
+
+        cov = None
+        if self.cov_fname is not None:
+            cov = read_cov(self.cov_fname)
+        baseline = self.baseline
+
+        # render plots in parallel; check that n_jobs <= # of files
+        logger.info('Iterating over %s potential files (this may take some '
+                    'time)' % len(fnames))
+        use_jobs = min(n_jobs, max(1, len(fnames)))
+        parallel, p_fun, _ = parallel_func(_iterate_files, use_jobs)
+        r = parallel(p_fun(self, fname, info, cov, baseline, sfreq, on_error)
+                     for fname in np.array_split(fnames, use_jobs))
+        htmls, report_fnames, report_sectionlabels = zip(*r)
+
+        # combine results from n_jobs discarding plots not rendered
+        self.html = [html for html in sum(htmls, []) if html is not None]
+        self.fnames = [fname for fname in sum(report_fnames, []) if
+                       fname is not None]
+        self._sectionlabels = [slabel for slabel in
+                               sum(report_sectionlabels, [])
+                               if slabel is not None]
+
+        # find unique section labels
+        self.sections = sorted(set(self._sectionlabels))
+        self._sectionvars = dict(zip(self.sections, self.sections))
+
+        # render mri
+        if self.subjects_dir is not None and self.subject is not None:
+            logger.info('Rendering BEM')
+            self.html.append(self._render_bem(self.subject, self.subjects_dir,
+                                              mri_decim, n_jobs))
+            self.fnames.append('bem')
+            self._sectionlabels.append('mri')
+        else:
+            warnings.warn('`subjects_dir` and `subject` not provided.'
+                          ' Cannot render MRI and -trans.fif(.gz) files.')
+
+    def save(self, fname=None, open_browser=True, overwrite=False):
+        """Save html report and open it in browser.
+
+        Parameters
+        ----------
+        fname : str
+            File name of the report.
+        open_browser : bool
+            Open html browser after saving if True.
+        overwrite : bool
+            If True, overwrite report if it already exists.
+        """
+
+        if fname is None:
+            if not hasattr(self, 'data_path'):
+                self.data_path = op.dirname(__file__)
+                warnings.warn('`data_path` not provided. Using %s instead'
+                              % self.data_path)
+            fname = op.realpath(op.join(self.data_path, 'report.html'))
+        else:
+            fname = op.realpath(fname)
+
+        self._render_toc()
+
+        html = footer_template.substitute(date=time.strftime("%B %d, %Y"),
+                                          current_year=time.strftime("%Y"))
+        self.html.append(html)
+
+        if not overwrite and op.isfile(fname):
+            msg = ('Report already exists at location %s. '
+                   'Overwrite it (y/[n])? '
+                   % fname)
+            answer = moves.input(msg)
+            if answer.lower() == 'y':
+                overwrite = True
+
+        if overwrite or not op.isfile(fname):
+            logger.info('Saving report to location %s' % fname)
+            fobj = codecs.open(fname, 'w', 'utf-8')
+            fobj.write(_fix_global_ids(u''.join(self.html)))
+            fobj.close()
+
+            # remove header, TOC and footer to allow more saves
+            self.html.pop(0)
+            self.html.pop(0)
+            self.html.pop()
+
+        if open_browser:
+            import webbrowser
+            webbrowser.open_new_tab('file://' + fname)
+
+        return fname
+
+    @verbose
+    def _render_toc(self, verbose=None):
+        """Render the Table of Contents.
+        """
+
+        logger.info('Rendering : Table of Contents')
+
+        html_toc = u'<div id="container">'
+        html_toc += u'<div id="toc"><center><h4>CONTENTS</h4></center>'
+
+        global_id = 1
+
+        # Reorder self.sections to reflect natural ordering
+        if self._sort_sections:
+            sections = list(set(self.sections) & set(SECTION_ORDER))
+            custom = [section for section in self.sections if section
+                      not in SECTION_ORDER]
+            order = [sections.index(section) for section in SECTION_ORDER if
+                     section in sections]
+            self.sections = np.array(sections)[order].tolist() + custom
+
+        # Sort by section
+        html, fnames, sectionlabels = [], [], []
+        for section in self.sections:
+            logger.info('%s' % section)
+            for sectionlabel, this_html, fname in (zip(self._sectionlabels,
+                                                   self.html, self.fnames)):
+                if self._sectionvars[section] == sectionlabel:
+                    html.append(this_html)
+                    fnames.append(fname)
+                    sectionlabels.append(sectionlabel)
+                    logger.info('\t... %s' % fname[-20:])
+                    color = _is_bad_fname(fname)
+                    div_klass, tooltip, text = _get_toc_property(fname)
+
+                    # loop through conditions for evoked
+                    if fname.endswith(('-ave.fif', '-ave.fif.gz',
+                                      '(whitened)')):
+                        text = os.path.basename(fname)
+                        if fname.endswith('(whitened)'):
+                            fname = fname[:-11]
+                        # XXX: remove redundant read_evokeds
+                        evokeds = read_evokeds(fname, verbose=False)
+
+                        html_toc += toc_list.substitute(
+                            div_klass=div_klass, id=None, tooltip=fname,
+                            color='#428bca', text=text)
+
+                        html_toc += u'<li class="evoked"><ul>'
+                        for ev in evokeds:
+                            html_toc += toc_list.substitute(
+                                div_klass=div_klass, id=global_id,
+                                tooltip=fname, color=color, text=ev.comment)
+                            global_id += 1
+                        html_toc += u'</ul></li>'
+
+                    elif fname.endswith(tuple(VALID_EXTENSIONS +
+                                        ['bem', 'custom'])):
+                        html_toc += toc_list.substitute(div_klass=div_klass,
+                                                        id=global_id,
+                                                        tooltip=tooltip,
+                                                        color=color,
+                                                        text=text)
+                        global_id += 1
+
+        html_toc += u'\n</ul></div>'
+        html_toc += u'<div id="content">'
+
+        # The sorted html (according to section)
+        self.html = html
+        self.fnames = fnames
+        self._sectionlabels = sectionlabels
+
+        html_header = header_template.substitute(title=self.title,
+                                                 include=self.include,
+                                                 sections=self.sections,
+                                                 sectionvars=self._sectionvars)
+        self.html.insert(0, html_header)  # Insert header at position 0
+        self.html.insert(1, html_toc)  # insert TOC
+
+    def _render_array(self, array, global_id=None, cmap='gray',
+                      limits=None, n_jobs=1):
+        """Render mri without bem contours.
+        """
+        html = []
+        html.append(u'<div class="row">')
+        # Axial
+        limits = limits or {}
+        axial_limit = limits.get('axial')
+        axial_slices_gen = _iterate_axial_slices(array, axial_limit)
+        html.append(
+            self._render_one_axis(axial_slices_gen, 'axial',
+                                  global_id, cmap, array.shape[1], n_jobs))
+        # Sagittal
+        sagittal_limit = limits.get('sagittal')
+        sagittal_slices_gen = _iterate_sagittal_slices(array, sagittal_limit)
+        html.append(
+            self._render_one_axis(sagittal_slices_gen, 'sagittal',
+                                  global_id, cmap, array.shape[1], n_jobs))
+        html.append(u'</div>')
+        html.append(u'<div class="row">')
+        # Coronal
+        coronal_limit = limits.get('coronal')
+        coronal_slices_gen = _iterate_coronal_slices(array, coronal_limit)
+        html.append(
+            self._render_one_axis(coronal_slices_gen, 'coronal',
+                                  global_id, cmap, array.shape[1], n_jobs))
+        # Close section
+        html.append(u'</div>')
+        return '\n'.join(html)
+
+    def _render_one_bem_axis(self, mri_fname, surf_fnames, global_id,
+                             shape, orientation='coronal', decim=2, n_jobs=1):
+        """Render one axis of bem contours.
+        """
+        orientation_name2axis = dict(sagittal=0, axial=1, coronal=2)
+        orientation_axis = orientation_name2axis[orientation]
+        n_slices = shape[orientation_axis]
+        orig_size = np.roll(shape, orientation_axis)[[1, 2]]
+
+        name = orientation
+        html = []
+        html.append(u'<div class="col-xs-6 col-md-4">')
+        slides_klass = '%s-%s' % (name, global_id)
+
+        sl = np.arange(0, n_slices, decim)
+        kwargs = dict(mri_fname=mri_fname, surf_fnames=surf_fnames, show=False,
+                      orientation=orientation, img_output=orig_size)
+        imgs = _figs_to_mrislices(sl, n_jobs, **kwargs)
+        slices = []
+        img_klass = 'slideimg-%s' % name
+        div_klass = 'span12 %s' % slides_klass
+        for ii, img in enumerate(imgs):
+            slice_id = '%s-%s-%s' % (name, global_id, sl[ii])
+            caption = u'Slice %s %s' % (name, sl[ii])
+            first = True if ii == 0 else False
+            slices.append(_build_html_image(img, slice_id, div_klass,
+                          img_klass, caption, first))
+
+        # Render the slider
+        slider_id = 'select-%s-%s' % (name, global_id)
+        html.append(u'<div id="%s"></div>' % slider_id)
+        html.append(u'<ul class="thumbnails">')
+        # Render the slices
+        html.append(u'\n'.join(slices))
+        html.append(u'</ul>')
+        html.append(_build_html_slider(sl, slides_klass, slider_id))
+        html.append(u'</div>')
+        return '\n'.join(html)
+
+    def _render_image(self, image, cmap='gray', n_jobs=1):
+        """Render one slice of mri without bem.
+        """
+        import nibabel as nib
+
+        global_id = self._get_id()
+
+        if 'mri' not in self.sections:
+            self.sections.append('mri')
+            self._sectionvars['mri'] = 'mri'
+
+        nim = nib.load(image)
+        data = nim.get_data()
+        shape = data.shape
+        limits = {'sagittal': range(0, shape[0], 2),
+                  'axial': range(0, shape[1], 2),
+                  'coronal': range(0, shape[2], 2)}
+        name = op.basename(image)
+        html = u'<li class="mri" id="%d">\n' % global_id
+        html += u'<h2>%s</h2>\n' % name
+        html += self._render_array(data, global_id=global_id,
+                                   cmap=cmap, limits=limits,
+                                   n_jobs=n_jobs)
+        html += u'</li>\n'
+        return html
+
+    def _render_raw(self, raw_fname):
+        """Render raw.
+        """
+        global_id = self._get_id()
+        div_klass = 'raw'
+        caption = u'Raw : %s' % raw_fname
+
+        raw = Raw(raw_fname)
+
+        n_eeg = len(pick_types(raw.info, meg=False, eeg=True))
+        n_grad = len(pick_types(raw.info, meg='grad'))
+        n_mag = len(pick_types(raw.info, meg='mag'))
+        pick_eog = pick_types(raw.info, meg=False, eog=True)
+        if len(pick_eog) > 0:
+            eog = ', '.join(np.array(raw.info['ch_names'])[pick_eog])
+        else:
+            eog = 'Not available'
+        pick_ecg = pick_types(raw.info, meg=False, ecg=True)
+        if len(pick_ecg) > 0:
+            ecg = ', '.join(np.array(raw.info['ch_names'])[pick_ecg])
+        else:
+            ecg = 'Not available'
+        meas_date = raw.info['meas_date']
+        if meas_date is not None:
+            meas_date = dt.fromtimestamp(meas_date[0]).strftime("%B %d, %Y")
+        tmin = raw.first_samp / raw.info['sfreq']
+        tmax = raw.last_samp / raw.info['sfreq']
+
+        html = raw_template.substitute(div_klass=div_klass,
+                                       id=global_id,
+                                       caption=caption,
+                                       info=raw.info,
+                                       meas_date=meas_date,
+                                       n_eeg=n_eeg, n_grad=n_grad,
+                                       n_mag=n_mag, eog=eog,
+                                       ecg=ecg, tmin=tmin, tmax=tmax)
+        return html
+
+    def _render_forward(self, fwd_fname):
+        """Render forward.
+        """
+        div_klass = 'forward'
+        caption = u'Forward: %s' % fwd_fname
+        fwd = read_forward_solution(fwd_fname)
+        repr_fwd = re.sub('>', '', re.sub('<', '', repr(fwd)))
+        global_id = self._get_id()
+        html = repr_template.substitute(div_klass=div_klass,
+                                        id=global_id,
+                                        caption=caption,
+                                        repr=repr_fwd)
+        return html
+
+    def _render_inverse(self, inv_fname):
+        """Render inverse.
+        """
+        div_klass = 'inverse'
+        caption = u'Inverse: %s' % inv_fname
+        inv = read_inverse_operator(inv_fname)
+        repr_inv = re.sub('>', '', re.sub('<', '', repr(inv)))
+        global_id = self._get_id()
+        html = repr_template.substitute(div_klass=div_klass,
+                                        id=global_id,
+                                        caption=caption,
+                                        repr=repr_inv)
+        return html
+
+    def _render_evoked(self, evoked_fname, baseline=None, figsize=None):
+        """Render evoked.
+        """
+        evokeds = read_evokeds(evoked_fname, baseline=baseline, verbose=False)
+
+        html = []
+        for ev in evokeds:
+            global_id = self._get_id()
+
+            kwargs = dict(show=False)
+            img = _fig_to_img(ev.plot, **kwargs)
+
+            caption = u'Evoked : %s (%s)' % (evoked_fname, ev.comment)
+            div_klass = 'evoked'
+            img_klass = 'evoked'
+            show = True
+            html.append(image_template.substitute(img=img, id=global_id,
+                                                  div_klass=div_klass,
+                                                  img_klass=img_klass,
+                                                  caption=caption,
+                                                  show=show))
+            has_types = []
+            if len(pick_types(ev.info, meg=False, eeg=True)) > 0:
+                has_types.append('eeg')
+            if len(pick_types(ev.info, meg='grad', eeg=False)) > 0:
+                has_types.append('grad')
+            if len(pick_types(ev.info, meg='mag', eeg=False)) > 0:
+                has_types.append('mag')
+            for ch_type in has_types:
+                kwargs.update(ch_type=ch_type)
+                img = _fig_to_img(ev.plot_topomap, **kwargs)
+                caption = u'Topomap (ch_type = %s)' % ch_type
+                html.append(image_template.substitute(img=img,
+                                                      div_klass=div_klass,
+                                                      img_klass=img_klass,
+                                                      caption=caption,
+                                                      show=show))
+
+        return '\n'.join(html)
+
+    def _render_eve(self, eve_fname, sfreq=None):
+        """Render events.
+        """
+        global_id = self._get_id()
+        events = read_events(eve_fname)
+
+        kwargs = dict(events=events, sfreq=sfreq, show=False)
+        img = _fig_to_img(plot_events, **kwargs)
+
+        caption = 'Events : ' + eve_fname
+        div_klass = 'events'
+        img_klass = 'events'
+        show = True
+
+        html = image_template.substitute(img=img, id=global_id,
+                                         div_klass=div_klass,
+                                         img_klass=img_klass,
+                                         caption=caption,
+                                         show=show)
+        return html
+
+    def _render_epochs(self, epo_fname):
+        """Render epochs.
+        """
+        global_id = self._get_id()
+
+        epochs = read_epochs(epo_fname)
+        kwargs = dict(subject=self.subject, show=False)
+        img = _fig_to_img(epochs.plot_drop_log, **kwargs)
+        caption = 'Epochs : ' + epo_fname
+        div_klass = 'epochs'
+        img_klass = 'epochs'
+        show = True
+        html = image_template.substitute(img=img, id=global_id,
+                                         div_klass=div_klass,
+                                         img_klass=img_klass,
+                                         caption=caption,
+                                         show=show)
+        return html
+
+    def _render_cov(self, cov_fname, info_fname):
+        """Render cov.
+        """
+        global_id = self._get_id()
+        cov = read_cov(cov_fname)
+        fig, _ = plot_cov(cov, info_fname, show=False)
+        img = _fig_to_img(fig=fig)
+        caption = 'Covariance : %s (n_samples: %s)' % (cov_fname, cov.nfree)
+        div_klass = 'covariance'
+        img_klass = 'covariance'
+        show = True
+        html = image_template.substitute(img=img, id=global_id,
+                                         div_klass=div_klass,
+                                         img_klass=img_klass,
+                                         caption=caption,
+                                         show=show)
+        return html
+
+    def _render_whitened_evoked(self, evoked_fname, noise_cov, baseline):
+        """Show whitened evoked.
+        """
+        global_id = self._get_id()
+
+        evokeds = read_evokeds(evoked_fname, verbose=False)
+
+        html = []
+        for ev in evokeds:
+
+            ev = read_evokeds(evoked_fname, ev.comment, baseline=baseline,
+                              verbose=False)
+
+            global_id = self._get_id()
+
+            kwargs = dict(noise_cov=noise_cov, show=False)
+            img = _fig_to_img(ev.plot_white, **kwargs)
+
+            caption = u'Whitened evoked : %s (%s)' % (evoked_fname, ev.comment)
+            div_klass = 'evoked'
+            img_klass = 'evoked'
+            show = True
+            html.append(image_template.substitute(img=img, id=global_id,
+                                                  div_klass=div_klass,
+                                                  img_klass=img_klass,
+                                                  caption=caption,
+                                                  show=show))
+        return '\n'.join(html)
+
+    def _render_trans(self, trans, path, info, subject,
+                      subjects_dir, image_format='png'):
+        """Render trans.
+        """
+        kwargs = dict(info=info, trans=trans, subject=subject,
+                      subjects_dir=subjects_dir)
+        try:
+            img = _iterate_trans_views(function=plot_trans, **kwargs)
+        except IOError:
+            img = _iterate_trans_views(function=plot_trans, source='head',
+                                       **kwargs)
+
+        if img is not None:
+            global_id = self._get_id()
+            caption = 'Trans : ' + trans
+            div_klass = 'trans'
+            img_klass = 'trans'
+            show = True
+            html = image_template.substitute(img=img, id=global_id,
+                                             div_klass=div_klass,
+                                             img_klass=img_klass,
+                                             caption=caption,
+                                             width=75,
+                                             show=show)
+            return html
+
+    def _render_bem(self, subject, subjects_dir, decim, n_jobs,
+                    section='mri', caption='BEM'):
+        """Render mri+bem.
+        """
+        import nibabel as nib
+
+        subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
+
+        # Get the MRI filename
+        mri_fname = op.join(subjects_dir, subject, 'mri', 'T1.mgz')
+        if not op.isfile(mri_fname):
+            warnings.warn('MRI file "%s" does not exist' % mri_fname)
+
+        # Get the BEM surface filenames
+        bem_path = op.join(subjects_dir, subject, 'bem')
+
+        if not op.isdir(bem_path):
+            warnings.warn('Subject bem directory "%s" does not exist' %
+                          bem_path)
+            return self._render_image(mri_fname, cmap='gray', n_jobs=n_jobs)
+
+        surf_fnames = []
+        for surf_name in ['*inner_skull', '*outer_skull', '*outer_skin']:
+            surf_fname = glob(op.join(bem_path, surf_name + '.surf'))
+            if len(surf_fname) > 0:
+                surf_fname = surf_fname[0]
+            else:
+                warnings.warn('No surface found for %s.' % surf_name)
+                return self._render_image(mri_fname, cmap='gray')
+            surf_fnames.append(surf_fname)
+
+        # XXX : find a better way to get max range of slices
+        nim = nib.load(mri_fname)
+        data = nim.get_data()
+        shape = data.shape
+        del data  # free up memory
+
+        html = []
+
+        global_id = self._get_id()
+
+        if section == 'mri' and 'mri' not in self.sections:
+            self.sections.append('mri')
+            self._sectionvars['mri'] = 'mri'
+
+        name = caption
+
+        html += u'<li class="mri" id="%d">\n' % global_id
+        html += u'<h2>%s</h2>\n' % name
+        html += u'<div class="row">'
+        html += self._render_one_bem_axis(mri_fname, surf_fnames, global_id,
+                                          shape, 'axial', decim, n_jobs)
+        html += self._render_one_bem_axis(mri_fname, surf_fnames, global_id,
+                                          shape, 'sagittal', decim, n_jobs)
+        html += u'</div><div class="row">'
+        html += self._render_one_bem_axis(mri_fname, surf_fnames, global_id,
+                                          shape, 'coronal', decim, n_jobs)
+        html += u'</div>'
+        html += u'</li>\n'
+        return ''.join(html)
+
+
+def _clean_varnames(s):
+
+    # Remove invalid characters
+    s = re.sub('[^0-9a-zA-Z_]', '', s)
+
+    # add report_ at the beginning so that the javascript class names
+    # are valid ones
+    return 'report_' + s
+
+
+def _recursive_search(path, pattern):
+    """Auxiliary function for recursive_search of the directory.
+    """
+    filtered_files = list()
+    for dirpath, dirnames, files in os.walk(path):
+        for f in fnmatch.filter(files, pattern):
+            # only the following file types are supported
+            # this ensures equitable distribution of jobs
+            if f.endswith(tuple(VALID_EXTENSIONS)):
+                filtered_files.append(op.realpath(op.join(dirpath, f)))
+
+    return filtered_files
+
+
+def _fix_global_ids(html):
+    """Auxiliary function for fixing the global_ids after reordering in
+       _render_toc().
+    """
+    html = re.sub('id="\d+"', 'id="###"', html)
+    global_id = 1
+    while len(re.findall('id="###"', html)) > 0:
+        html = re.sub('id="###"', 'id="%s"' % global_id, html, count=1)
+        global_id += 1
+    return html
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/selection.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/selection.py
new file mode 100644
index 0000000..cef816a
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/selection.py
@@ -0,0 +1,111 @@
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+from os import path
+
+from .utils import logger, verbose
+from .externals import six
+
+
+ at verbose
+def read_selection(name, fname=None, verbose=None):
+    """Read channel selection from file
+
+    By default, the selections used in mne_browse_raw are supported*.
+    Additional selections can be added by specifying a selection file (e.g.
+    produced using mne_browse_raw) using the fname parameter.
+
+    The name parameter can be a string or a list of string. The returned
+    selection will be the combination of all selections in the file where
+    (at least) one element in name is a substring of the selection name in
+    the file. For example, "name = ['temporal', 'Right-frontal']" will produce
+    a comination of "Left-temporal", "Right-temporal", and "Right-frontal".
+
+    The included selections are:
+
+        * ``Vertex``
+        * ``Left-temporal``
+        * ``Right-temporal``
+        * ``Left-parietal``
+        * ``Right-parietal``
+        * ``Left-occipital``
+        * ``Right-occipital``
+        * ``Left-frontal``
+        * ``Right-frontal``
+
+
+    Parameters
+    ----------
+    name : string or list of string
+        Name of the selection. If is a list, the selections are combined.
+    fname : string
+        Filename of the selection file (if None, built-in selections are used).
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    sel : list of string
+        List with channel names in the selection.
+    """
+
+    # convert name to list of string
+    if isinstance(name, tuple):
+        name = list(name)
+
+    if not isinstance(name, list):
+        name = [name]
+
+    # use built-in selections by default
+    if fname is None:
+        fname = path.join(path.dirname(__file__), 'data', 'mne_analyze.sel')
+
+    if not path.exists(fname):
+        raise ValueError('The file %s does not exist.' % fname)
+
+    # use this to make sure we find at least one match for each name
+    name_found = {}
+    for n in name:
+        name_found[n] = False
+
+    fid = open(fname, 'r')
+    sel = []
+
+    for line in fid:
+        line = line.strip()
+
+        # skip blank lines and comments
+        if len(line) == 0 or line[0] == '#':
+            continue
+
+        # get the name of the selection in the file
+        pos = line.find(':')
+        if pos < 0:
+            logger.info('":" delimiter not found in selections file, '
+                        'skipping line')
+            continue
+
+        sel_name_file = line[:pos]
+
+        # search for substring match with name provided
+        for n in name:
+            if sel_name_file.find(n) >= 0:
+                sel.extend(line[pos + 1:].split('|'))
+                name_found[n] = True
+                break
+
+    fid.close()
+
+    # make sure we found at least one match for each name
+    for n, found in six.iteritems(name_found):
+        if not found:
+            raise ValueError('No match for selection name "%s" found' % n)
+
+    # make the selection a sorted list with unique elements
+    sel = list(set(sel))
+    sel.sort()
+
+    return sel
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/simulation/__init__.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/simulation/__init__.py
new file mode 100644
index 0000000..081654b
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/simulation/__init__.py
@@ -0,0 +1,9 @@
+"""Data simulation code
+"""
+
+from .evoked import (generate_evoked, generate_noise_evoked, add_noise_evoked,
+                     simulate_evoked, simulate_noise_evoked)
+from .raw import simulate_raw
+from .source import (select_source_in_label, generate_sparse_stc, generate_stc,
+                     simulate_sparse_stc)
+from .metrics import source_estimate_quantification
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/simulation/evoked.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/simulation/evoked.py
new file mode 100644
index 0000000..d349706
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/simulation/evoked.py
@@ -0,0 +1,214 @@
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Daniel Strohmeier <daniel.strohmeier at tu-ilmenau.de>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+import copy
+import warnings
+
+import numpy as np
+
+from ..io.pick import pick_channels_cov
+from ..forward import apply_forward
+from ..utils import check_random_state, verbose, _time_mask, deprecated
+
+
+ at deprecated('"generate_evoked" is deprecated and will be removed in '
+            'MNE-0.11. Please use simulate_evoked instead')
+def generate_evoked(fwd, stc, evoked, cov, snr=3, tmin=None,
+                    tmax=None, iir_filter=None, random_state=None,
+                    verbose=None):
+    """Generate noisy evoked data
+
+    Parameters
+    ----------
+    fwd : dict
+        a forward solution.
+    stc : SourceEstimate object
+        The source time courses.
+    evoked : None | Evoked object
+        An instance of evoked used as template.
+    cov : Covariance object
+        The noise covariance
+    snr : float
+        signal to noise ratio in dB. It corresponds to
+        10 * log10( var(signal) / var(noise) ).
+    tmin : float | None
+        start of time interval to estimate SNR. If None first time point
+        is used.
+    tmax : float | None
+        start of time interval to estimate SNR. If None last time point
+        is used.
+    iir_filter : None | array
+        IIR filter coefficients (denominator) e.g. [1, -1, 0.2].
+    random_state : None | int | np.random.RandomState
+        To specify the random generator state.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    evoked : Evoked object
+        The simulated evoked data
+    """
+    return simulate_evoked(fwd, stc, evoked.info, cov, snr, tmin,
+                           tmax, iir_filter, random_state, verbose)
+
+
+ at verbose
+def simulate_evoked(fwd, stc, info, cov, snr=3., tmin=None, tmax=None,
+                    iir_filter=None, random_state=None, verbose=None):
+    """Generate noisy evoked data
+
+    Parameters
+    ----------
+    fwd : dict
+        a forward solution.
+    stc : SourceEstimate object
+        The source time courses.
+    info : dict
+        Measurement info to generate the evoked.
+    cov : Covariance object
+        The noise covariance.
+    snr : float
+        signal to noise ratio in dB. It corresponds to
+        10 * log10( var(signal) / var(noise) ).
+    tmin : float | None
+        start of time interval to estimate SNR. If None first time point
+        is used.
+    tmax : float | None
+        start of time interval to estimate SNR. If None last time point
+        is used.
+    iir_filter : None | array
+        IIR filter coefficients (denominator) e.g. [1, -1, 0.2].
+    random_state : None | int | np.random.RandomState
+        To specify the random generator state.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    evoked : Evoked object
+        The simulated evoked data
+
+    Notes
+    -----
+    .. versionadded:: 0.10.0
+    """
+    evoked = apply_forward(fwd, stc, info)
+    if snr < np.inf:
+        noise = simulate_noise_evoked(evoked, cov, iir_filter, random_state)
+        evoked_noise = add_noise_evoked(evoked, noise, snr,
+                                        tmin=tmin, tmax=tmax)
+    else:
+        evoked_noise = evoked
+    return evoked_noise
+
+
+ at deprecated('"generate_noise_evoked" is deprecated and will be removed in '
+            'MNE-0.11. Please use simulate_noise_evoked instead')
+def generate_noise_evoked(evoked, cov, iir_filter=None, random_state=None):
+    """Creates noise as a multivariate Gaussian
+
+    The spatial covariance of the noise is given from the cov matrix.
+
+    Parameters
+    ----------
+    evoked : instance of Evoked
+        An instance of evoked used as template.
+    cov : instance of Covariance
+        The noise covariance.
+    iir_filter : None | array
+        IIR filter coefficients (denominator as it is an AR filter).
+    random_state : None | int | np.random.RandomState
+        To specify the random generator state.
+
+    Returns
+    -------
+    noise : evoked object
+        an instance of evoked
+    """
+    return simulate_noise_evoked(evoked, cov, iir_filter, random_state)
+
+
+def simulate_noise_evoked(evoked, cov, iir_filter=None, random_state=None):
+    """Creates noise as a multivariate Gaussian
+
+    The spatial covariance of the noise is given from the cov matrix.
+
+    Parameters
+    ----------
+    evoked : evoked object
+        an instance of evoked used as template
+    cov : Covariance object
+        The noise covariance
+    iir_filter : None | array
+        IIR filter coefficients (denominator)
+    random_state : None | int | np.random.RandomState
+        To specify the random generator state.
+
+    Returns
+    -------
+    noise : evoked object
+        an instance of evoked
+
+    Notes
+    -----
+    .. versionadded:: 0.10.0
+    """
+    noise = evoked.copy()
+    noise.data = _generate_noise(evoked.info, cov, iir_filter, random_state,
+                                 evoked.data.shape[1])[0]
+    return noise
+
+
+def _generate_noise(info, cov, iir_filter, random_state, n_samples, zi=None):
+    """Helper to create spatially colored and temporally IIR-filtered noise"""
+    from scipy.signal import lfilter
+    noise_cov = pick_channels_cov(cov, include=info['ch_names'], exclude=[])
+    rng = check_random_state(random_state)
+    c = np.diag(noise_cov.data) if noise_cov['diag'] else noise_cov.data
+    mu_channels = np.zeros(len(c))
+    # we almost always get a positive semidefinite warning here, so squash it
+    with warnings.catch_warnings(record=True):
+        noise = rng.multivariate_normal(mu_channels, c, n_samples).T
+    if iir_filter is not None:
+        if zi is None:
+            zi = np.zeros((len(c), len(iir_filter) - 1))
+        noise, zf = lfilter([1], iir_filter, noise, axis=-1, zi=zi)
+    else:
+        zf = None
+    return noise, zf
+
+
+def add_noise_evoked(evoked, noise, snr, tmin=None, tmax=None):
+    """Adds noise to evoked object with specified SNR.
+
+    SNR is computed in the interval from tmin to tmax.
+
+    Parameters
+    ----------
+    evoked : Evoked object
+        An instance of evoked with signal
+    noise : Evoked object
+        An instance of evoked with noise
+    snr : float
+        signal to noise ratio in dB. It corresponds to
+        10 * log10( var(signal) / var(noise) )
+    tmin : float
+        start time before event
+    tmax : float
+        end time after event
+
+    Returns
+    -------
+    evoked_noise : Evoked object
+        An instance of evoked corrupted by noise
+    """
+    evoked = copy.deepcopy(evoked)
+    tmask = _time_mask(evoked.times, tmin, tmax)
+    tmp = 10 * np.log10(np.mean((evoked.data[:, tmask] ** 2).ravel()) /
+                        np.mean((noise.data ** 2).ravel()))
+    noise.data = 10 ** ((tmp - float(snr)) / 20) * noise.data
+    evoked.data += noise.data
+    return evoked
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/simulation/metrics.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/simulation/metrics.py
new file mode 100644
index 0000000..aede064
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/simulation/metrics.py
@@ -0,0 +1,68 @@
+# Authors: Yousra Bekhti <yousra.bekhti at gmail.com>
+#          Mark Wronkiewicz <wronk at uw.edu>
+#
+# License: BSD (3-clause)
+
+import numpy as np
+from scipy.linalg import norm
+
+# TODO: Add more localization accuracy functions. For example, distance between
+#       true dipole position (in simulated stc) and the centroid of the
+#       estimated activity.
+
+
+def _check_stc(stc1, stc2):
+    """Helper for checking that stcs are compatible"""
+    if stc1.data.shape != stc2.data.shape:
+        raise ValueError('Data in stcs must have the same size')
+    if np.all(stc1.times != stc2.times):
+        raise ValueError('Times of two stcs must match.')
+
+
+def source_estimate_quantification(stc1, stc2, metric='rms'):
+    """Helper function to calculate matrix similarities.
+
+    Parameters
+    ----------
+    stc1 : SourceEstimate
+        First source estimate for comparison.
+    stc2 : SourceEstimate
+        Second source estimate for comparison.
+    metric : str
+        Metric to calculate, 'rms' or 'cosine'.
+
+    Returns
+    -------
+    score : float | array
+        Calculated metric.
+
+    Notes
+    -----
+    Metric calculation has multiple options:
+
+        * rms: Root mean square of difference between stc data matrices.
+        * cosine: Normalized correlation of all elements in stc data matrices.
+
+    Notes
+    -----
+    .. versionadded:: 0.10.0
+    """
+    known_metrics = ['rms', 'cosine']
+    if metric not in known_metrics:
+        raise ValueError('metric must be a str from the known metrics: '
+                         '"rms" or "cosine"')
+
+    # This is checking that the datas are having the same size meaning
+    # no comparison between distributed and sparse can be done so far.
+    _check_stc(stc1, stc2)
+    data1, data2 = stc1.data, stc2.data
+
+    # Calculate root mean square difference between two matrices
+    if metric == 'rms':
+        score = np.sqrt(np.mean((data1 - data2) ** 2))
+
+    # Calculate correlation coefficient between matrix elements
+    elif metric == 'cosine':
+        score = 1. - (np.dot(data1.flatten(), data2.flatten()) /
+                      (norm(data1) * norm(data2)))
+    return score
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/simulation/raw.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/simulation/raw.py
new file mode 100644
index 0000000..39a16c7
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/simulation/raw.py
@@ -0,0 +1,569 @@
+# -*- coding: utf-8 -*-
+# Authors: Mark Wronkiewicz <wronk at uw.edu>
+#          Yousra Bekhti <yousra.bekhti at gmail.com>
+#          Eric Larson <larson.eric.d at gmail.com>
+#
+# License: BSD (3-clause)
+
+import numpy as np
+import warnings
+from copy import deepcopy
+
+from .evoked import _generate_noise
+from ..event import _get_stim_channel
+from ..io.pick import pick_types, pick_info, pick_channels
+from ..source_estimate import VolSourceEstimate
+from ..cov import make_ad_hoc_cov, read_cov
+from ..bem import fit_sphere_to_headshape, make_sphere_model, read_bem_solution
+from ..io import RawArray, _BaseRaw
+from ..chpi import get_chpi_positions, _get_hpi_info
+from ..io.constants import FIFF
+from ..forward import (_magnetic_dipole_field_vec, _merge_meg_eeg_fwds,
+                       _stc_src_sel, convert_forward_solution,
+                       _prepare_for_forward, _prep_meg_channels,
+                       _compute_forwards, _to_forward_dict)
+from ..transforms import _get_mri_head_t, transform_surface_to
+from ..source_space import _ensure_src, _points_outside_surface
+from ..source_estimate import _BaseSourceEstimate
+from ..utils import logger, verbose, check_random_state
+from ..externals.six import string_types
+
+
+def _log_ch(start, info, ch):
+    """Helper to log channel information"""
+    if ch is not None:
+        extra, just, ch = ' stored on channel:', 50, info['ch_names'][ch]
+    else:
+        extra, just, ch = ' not stored', 0, ''
+    logger.info((start + extra).ljust(just) + ch)
+
+
+ at verbose
+def simulate_raw(raw, stc, trans, src, bem, cov='simple',
+                 blink=False, ecg=False, chpi=False, head_pos=None,
+                 mindist=1.0, interp='cos2', iir_filter=None, n_jobs=1,
+                 random_state=None, verbose=None):
+    """Simulate raw data with head movements
+
+    Parameters
+    ----------
+    raw : instance of Raw
+        The raw template to use for simulation. The ``info``, ``times``,
+        and potentially ``first_samp`` properties will be used.
+    stc : instance of SourceEstimate
+        The source estimate to use to simulate data. Must have the same
+        sample rate as the raw data.
+    trans : dict | str | None
+        Either a transformation filename (usually made using mne_analyze)
+        or an info dict (usually opened using read_trans()).
+        If string, an ending of `.fif` or `.fif.gz` will be assumed to
+        be in FIF format, any other ending will be assumed to be a text
+        file with a 4x4 transformation matrix (like the `--trans` MNE-C
+        option). If trans is None, an identity transform will be used.
+    src : str | instance of SourceSpaces
+        Source space corresponding to the stc. If string, should be a source
+        space filename. Can also be an instance of loaded or generated
+        SourceSpaces.
+    bem : str | dict
+        BEM solution  corresponding to the stc. If string, should be a BEM
+        solution filename (e.g., "sample-5120-5120-5120-bem-sol.fif").
+    cov : instance of Covariance | str | None
+        The sensor covariance matrix used to generate noise. If None,
+        no noise will be added. If 'simple', a basic (diagonal) ad-hoc
+        noise covariance will be used. If a string, then the covariance
+        will be loaded.
+    blink : bool
+        If true, add simulated blink artifacts. See Notes for details.
+    ecg : bool
+        If true, add simulated ECG artifacts. See Notes for details.
+    chpi : bool
+        If true, simulate continuous head position indicator information.
+        Valid cHPI information must encoded in ``raw.info['hpi_meas']``
+        to use this option.
+
+        .. warning:: This feature is currently experimental.
+
+    head_pos : None | str | dict | tuple
+        Name of the position estimates file. Should be in the format of
+        the files produced by maxfilter. If dict, keys should
+        be the time points and entries should be 4x4 ``dev_head_t``
+        matrices. If None, the original head position (from
+        ``info['dev_head_t']``) will be used. If tuple, should have the
+        same format as data returned by `get_chpi_positions`.
+    mindist : float
+        Minimum distance between sources and the inner skull boundary
+        to use during forward calculation.
+    interp : str
+        Either 'cos2', 'linear', or 'zero', the type of forward-solution
+        interpolation to use between forward solutions at different
+        head positions.
+    iir_filter : None | array
+        IIR filter coefficients (denominator) e.g. [1, -1, 0.2].
+    n_jobs : int
+        Number of jobs to use.
+    random_state : None | int | np.random.RandomState
+        The random generator state used for blink, ECG, and sensor
+        noise randomization.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    raw : instance of Raw
+        The simulated raw file.
+
+    Notes
+    -----
+    Events coded with the position number (starting at 1) will be stored
+    in the trigger channel (if available) at times corresponding to t=0
+    in the ``stc``.
+
+    The resulting SNR will be determined by the structure of the noise
+    covariance, the amplitudes of ``stc``, and the head position(s) provided.
+
+    The blink and ECG artifacts are generated by 1) placing impulses at
+    random times of activation, and 2) convolving with activation kernel
+    functions. In both cases, the scale-factors of the activation functions
+    (and for the resulting EOG and ECG channel traces) were chosen based on
+    visual inspection to yield amplitudes generally consistent with those
+    seen in experimental data. Noisy versions of the blink and ECG
+    activations will be stored in the first EOG and ECG channel in the
+    raw file, respectively, if they exist.
+
+    For blink artifacts:
+
+        1. Random activation times are drawn from an inhomogeneous poisson
+           process whose blink rate oscillates between 4.5 blinks/minute
+           and 17 blinks/minute based on the low (reading) and high (resting)
+           blink rates from [1]_.
+        2. The activation kernel is a 250 ms Hanning window.
+        3. Two activated dipoles are located in the z=0 plane (in head
+           coordinates) at ±30 degrees away from the y axis (nasion).
+        4. Activations affect MEG and EEG channels.
+
+    For ECG artifacts:
+
+        1. Random inter-beat intervals are drawn from a uniform distribution
+           of times corresponding to 40 and 80 beats per minute.
+        2. The activation function is the sum of three Hanning windows with
+           varying durations and scales to make a more complex waveform.
+        3. The activated dipole is located one (estimated) head radius to
+           the left (-x) of head center and three head radii below (+z)
+           head center; this dipole is oriented in the +x direction.
+        4. Activations only affect MEG channels.
+
+    .. versionadded:: 0.10.0
+
+    References
+    ----------
+    .. [1] Bentivoglio et al. "Analysis of blink rate patterns in normal
+           subjects" Movement Disorders, 1997 Nov;12(6):1028-34.
+    """
+    if not isinstance(raw, _BaseRaw):
+        raise TypeError('raw should be an instance of Raw')
+    times, info, first_samp = raw.times, raw.info, raw.first_samp
+    raw_verbose = raw.verbose
+
+    # Check for common flag errors and try to override
+    if not isinstance(stc, _BaseSourceEstimate):
+        raise TypeError('stc must be a SourceEstimate')
+    if not np.allclose(info['sfreq'], 1. / stc.tstep):
+        raise ValueError('stc and info must have same sample rate')
+    if len(stc.times) <= 2:  # to ensure event encoding works
+        raise ValueError('stc must have at least three time points')
+
+    stim = False if len(pick_types(info, meg=False, stim=True)) == 0 else True
+
+    rng = check_random_state(random_state)
+    if interp not in ('cos2', 'linear', 'zero'):
+        raise ValueError('interp must be "cos2", "linear", or "zero"')
+
+    if head_pos is None:  # use pos from file
+        dev_head_ts = [info['dev_head_t']] * 2
+        offsets = np.array([0, len(times)])
+        interp = 'zero'
+    # Use position data to simulate head movement
+    else:
+        if isinstance(head_pos, string_types):
+            head_pos = get_chpi_positions(head_pos, verbose=False)
+        if isinstance(head_pos, tuple):  # can be an already-loaded pos file
+            transs, rots, ts = head_pos
+            ts -= first_samp / info['sfreq']  # MF files need reref
+            dev_head_ts = [np.r_[np.c_[r, t[:, np.newaxis]], [[0, 0, 0, 1]]]
+                           for r, t in zip(rots, transs)]
+            del transs, rots
+        elif isinstance(head_pos, dict):
+            ts = np.array(list(head_pos.keys()), float)
+            ts.sort()
+            dev_head_ts = [head_pos[float(tt)] for tt in ts]
+        else:
+            raise TypeError('unknown head_pos type %s' % type(head_pos))
+        bad = ts < 0
+        if bad.any():
+            raise RuntimeError('All position times must be >= 0, found %s/%s'
+                               '< 0' % (bad.sum(), len(bad)))
+        bad = ts > times[-1]
+        if bad.any():
+            raise RuntimeError('All position times must be <= t_end (%0.1f '
+                               'sec), found %s/%s bad values (is this a split '
+                               'file?)' % (times[-1], bad.sum(), len(bad)))
+        if ts[0] > 0:
+            ts = np.r_[[0.], ts]
+            dev_head_ts.insert(0, info['dev_head_t']['trans'])
+        dev_head_ts = [{'trans': d, 'to': info['dev_head_t']['to'],
+                        'from': info['dev_head_t']['from']}
+                       for d in dev_head_ts]
+        if ts[-1] < times[-1]:
+            dev_head_ts.append(dev_head_ts[-1])
+            ts = np.r_[ts, [times[-1]]]
+        offsets = raw.time_as_index(ts)
+        offsets[-1] = len(times)  # fix for roundoff error
+        assert offsets[-2] != offsets[-1]
+        del ts
+
+    src = _ensure_src(src, verbose=False)
+    if isinstance(bem, string_types):
+        bem = read_bem_solution(bem, verbose=False)
+    if isinstance(cov, string_types):
+        if cov == 'simple':
+            cov = make_ad_hoc_cov(info, verbose=False)
+        else:
+            cov = read_cov(cov, verbose=False)
+    assert np.array_equal(offsets, np.unique(offsets))
+    assert len(offsets) == len(dev_head_ts)
+    approx_events = int((len(times) / info['sfreq']) /
+                        (stc.times[-1] - stc.times[0]))
+    logger.info('Provided parameters will provide approximately %s event%s'
+                % (approx_events, '' if approx_events == 1 else 's'))
+
+    # Extract necessary info
+    meeg_picks = pick_types(info, meg=True, eeg=True, exclude=[])  # for sim
+    meg_picks = pick_types(info, meg=True, eeg=False, exclude=[])  # for CHPI
+    fwd_info = pick_info(info, meeg_picks)
+    fwd_info['projs'] = []  # Ensure no 'projs' applied
+    logger.info('Setting up raw simulation: %s position%s, "%s" interpolation'
+                % (len(dev_head_ts), 's' if len(dev_head_ts) != 1 else '',
+                   interp))
+
+    verts = stc.vertices
+    verts = [verts] if isinstance(stc, VolSourceEstimate) else verts
+    src = _restrict_source_space_to(src, verts)
+
+    # array used to store result
+    raw_data = np.zeros((len(info['ch_names']), len(times)))
+
+    # figure out our cHPI, ECG, and blink dipoles
+    R, r0 = fit_sphere_to_headshape(info, verbose=False)[:2]
+    R /= 1000.
+    r0 /= 1000.
+    ecg_rr = blink_rrs = exg_bem = hpi_rrs = None
+    ecg = ecg and len(meg_picks) > 0
+    chpi = chpi and len(meg_picks) > 0
+    if chpi:
+        hpi_freqs, hpi_rrs, hpi_pick, hpi_on = _get_hpi_info(info)[:4]
+        hpi_nns = hpi_rrs / np.sqrt(np.sum(hpi_rrs * hpi_rrs,
+                                           axis=1))[:, np.newaxis]
+        # turn on cHPI in file
+        raw_data[hpi_pick, :] = hpi_on
+        _log_ch('cHPI status bits enbled and', info, hpi_pick)
+    if blink or ecg:
+        exg_bem = make_sphere_model(r0, head_radius=R,
+                                    relative_radii=(0.97, 0.98, 0.99, 1.),
+                                    sigmas=(0.33, 1.0, 0.004, 0.33),
+                                    verbose=False)
+    if blink:
+        # place dipoles at 45 degree angles in z=0 plane
+        blink_rrs = np.array([[np.cos(np.pi / 3.), np.sin(np.pi / 3.), 0.],
+                              [-np.cos(np.pi / 3.), np.sin(np.pi / 3), 0.]])
+        blink_rrs /= np.sqrt(np.sum(blink_rrs *
+                                    blink_rrs, axis=1))[:, np.newaxis]
+        blink_rrs *= 0.96 * R
+        blink_rrs += r0
+        # oriented upward
+        blink_nns = np.array([[0., 0., 1.], [0., 0., 1.]])
+        # Blink times drawn from an inhomogeneous poisson process
+        # by 1) creating the rate and 2) pulling random numbers
+        blink_rate = (1 + np.cos(2 * np.pi * 1. / 60. * times)) / 2.
+        blink_rate *= 12.5 / 60.
+        blink_rate += 4.5 / 60.
+        blink_data = rng.rand(len(times)) < blink_rate / info['sfreq']
+        blink_data = blink_data * (rng.rand(len(times)) + 0.5)  # varying amps
+        # Activation kernel is a simple hanning window
+        blink_kernel = np.hanning(int(0.25 * info['sfreq']))
+        blink_data = np.convolve(blink_data, blink_kernel,
+                                 'same')[np.newaxis, :] * 1e-7
+        # Add rescaled noisy data to EOG ch
+        ch = pick_types(info, meg=False, eeg=False, eog=True)
+        noise = rng.randn(blink_data.shape[1]) * 5e-6
+        if len(ch) >= 1:
+            ch = ch[-1]
+            raw_data[ch, :] = blink_data * 1e3 + noise
+        else:
+            ch = None
+        _log_ch('Blinks simulated and trace', info, ch)
+        del blink_kernel, blink_rate, noise
+    if ecg:
+        ecg_rr = np.array([[-R, 0, -3 * R]])
+        max_beats = int(np.ceil(times[-1] * 80. / 60.))
+        # activation times with intervals drawn from a uniform distribution
+        # based on activation rates between 40 and 80 beats per minute
+        cardiac_idx = np.cumsum(rng.uniform(60. / 80., 60. / 40., max_beats) *
+                                info['sfreq']).astype(int)
+        cardiac_idx = cardiac_idx[cardiac_idx < len(times)]
+        cardiac_data = np.zeros(len(times))
+        cardiac_data[cardiac_idx] = 1
+        # kernel is the sum of three hanning windows
+        cardiac_kernel = np.concatenate([
+            2 * np.hanning(int(0.04 * info['sfreq'])),
+            -0.3 * np.hanning(int(0.05 * info['sfreq'])),
+            0.2 * np.hanning(int(0.26 * info['sfreq']))], axis=-1)
+        ecg_data = np.convolve(cardiac_data, cardiac_kernel,
+                               'same')[np.newaxis, :] * 15e-8
+        # Add rescaled noisy data to ECG ch
+        ch = pick_types(info, meg=False, eeg=False, ecg=True)
+        noise = rng.randn(ecg_data.shape[1]) * 1.5e-5
+        if len(ch) >= 1:
+            ch = ch[-1]
+            raw_data[ch, :] = ecg_data * 2e3 + noise
+        else:
+            ch = None
+        _log_ch('ECG simulated and trace', info, ch)
+        del cardiac_data, cardiac_kernel, max_beats, cardiac_idx
+
+    stc_event_idx = np.argmin(np.abs(stc.times))
+    if stim:
+        event_ch = pick_channels(info['ch_names'],
+                                 _get_stim_channel(None, info))[0]
+        raw_data[event_ch, :] = 0.
+    else:
+        event_ch = None
+    _log_ch('Event information', info, event_ch)
+    used = np.zeros(len(times), bool)
+    stc_indices = np.arange(len(times)) % len(stc.times)
+    raw_data[meeg_picks, :] = 0.
+    hpi_mag = 70e-9
+    last_fwd = last_fwd_chpi = last_fwd_blink = last_fwd_ecg = src_sel = None
+    zf = None  # final filter conditions for the noise
+    # don't process these any more if no MEG present
+    for fi, (fwd, fwd_blink, fwd_ecg, fwd_chpi) in \
+        enumerate(_iter_forward_solutions(
+            fwd_info, trans, src, bem, exg_bem, dev_head_ts, mindist,
+            hpi_rrs, blink_rrs, ecg_rr, n_jobs)):
+        # must be fixed orientation
+        # XXX eventually we could speed this up by allowing the forward
+        # solution code to only compute the normal direction
+        fwd = convert_forward_solution(fwd, surf_ori=True,
+                                       force_fixed=True, verbose=False)
+        if blink:
+            fwd_blink = fwd_blink['sol']['data']
+            for ii in range(len(blink_rrs)):
+                fwd_blink[:, ii] = np.dot(fwd_blink[:, 3 * ii:3 * (ii + 1)],
+                                          blink_nns[ii])
+            fwd_blink = fwd_blink[:, :len(blink_rrs)]
+            fwd_blink = fwd_blink.sum(axis=1)[:, np.newaxis]
+        # just use one arbitrary direction
+        if ecg:
+            fwd_ecg = fwd_ecg['sol']['data'][:, [0]]
+
+        # align cHPI magnetic dipoles in approx. radial direction
+        if chpi:
+            for ii in range(len(hpi_rrs)):
+                fwd_chpi[:, ii] = np.dot(fwd_chpi[:, 3 * ii:3 * (ii + 1)],
+                                         hpi_nns[ii])
+            fwd_chpi = fwd_chpi[:, :len(hpi_rrs)].copy()
+
+        if src_sel is None:
+            src_sel = _stc_src_sel(fwd['src'], stc)
+            verts = stc.vertices
+            verts = [verts] if isinstance(stc, VolSourceEstimate) else verts
+            diff_ = sum([len(v) for v in verts]) - len(src_sel)
+            if diff_ != 0:
+                warnings.warn('%s STC vertices omitted due to fwd calculation'
+                              % (diff_,))
+        if last_fwd is None:
+            last_fwd, last_fwd_blink, last_fwd_ecg, last_fwd_chpi = \
+                fwd, fwd_blink, fwd_ecg, fwd_chpi
+            continue
+
+        # set up interpolation
+        n_pts = offsets[fi] - offsets[fi - 1]
+        if interp == 'zero':
+            interps = None
+        else:
+            if interp == 'linear':
+                interps = np.linspace(1, 0, n_pts, endpoint=False)
+            else:  # interp == 'cos2':
+                interps = np.cos(0.5 * np.pi * np.arange(n_pts)) ** 2
+            interps = np.array([interps, 1 - interps])
+
+        assert not used[offsets[fi - 1]:offsets[fi]].any()
+        event_idxs = np.where(stc_indices[offsets[fi - 1]:offsets[fi]] ==
+                              stc_event_idx)[0] + offsets[fi - 1]
+        if stim:
+            raw_data[event_ch, event_idxs] = fi
+
+        logger.info('  Simulating data for %0.3f-%0.3f sec with %s event%s'
+                    % (tuple(offsets[fi - 1:fi + 1] / info['sfreq']) +
+                       (len(event_idxs), '' if len(event_idxs) == 1 else 's')))
+
+        # Process data in large chunks to save on memory
+        chunk_size = 10000
+        chunks = np.concatenate((np.arange(offsets[fi - 1], offsets[fi],
+                                           chunk_size), [offsets[fi]]))
+        for start, stop in zip(chunks[:-1], chunks[1:]):
+            assert stop - start <= chunk_size
+
+            used[start:stop] = True
+            if interp == 'zero':
+                this_interp = None
+            else:
+                this_interp = interps[:, start - chunks[0]:stop - chunks[0]]
+            time_sl = slice(start, stop)
+            this_t = np.arange(start, stop) / info['sfreq']
+            stc_idxs = stc_indices[time_sl]
+
+            # simulate brain data
+            raw_data[meeg_picks, time_sl] = \
+                _interp(last_fwd['sol']['data'], fwd['sol']['data'],
+                        stc.data[:, stc_idxs][src_sel], this_interp)
+
+            # add sensor noise, ECG, blink, cHPI
+            if cov is not None:
+                noise, zf = _generate_noise(fwd_info, cov, iir_filter, rng,
+                                            len(stc_idxs), zi=zf)
+                raw_data[meeg_picks, time_sl] += noise
+            if blink:
+                raw_data[meeg_picks, time_sl] += \
+                    _interp(last_fwd_blink, fwd_blink, blink_data[:, time_sl],
+                            this_interp)
+            if ecg:
+                raw_data[meg_picks, time_sl] += \
+                    _interp(last_fwd_ecg, fwd_ecg, ecg_data[:, time_sl],
+                            this_interp)
+            if chpi:
+                sinusoids = np.zeros((len(hpi_freqs), len(stc_idxs)))
+                for fidx, freq in enumerate(hpi_freqs):
+                    sinusoids[fidx] = 2 * np.pi * freq * this_t
+                    sinusoids[fidx] = hpi_mag * np.sin(sinusoids[fidx])
+                raw_data[meg_picks, time_sl] += \
+                    _interp(last_fwd_chpi, fwd_chpi, sinusoids, this_interp)
+
+        assert used[offsets[fi - 1]:offsets[fi]].all()
+
+        # prepare for next iteration
+        last_fwd, last_fwd_blink, last_fwd_ecg, last_fwd_chpi = \
+            fwd, fwd_blink, fwd_ecg, fwd_chpi
+    assert used.all()
+    raw = RawArray(raw_data, info, verbose=False)
+    raw.verbose = raw_verbose
+    logger.info('Done')
+    return raw
+
+
+def _iter_forward_solutions(info, trans, src, bem, exg_bem, dev_head_ts,
+                            mindist, hpi_rrs, blink_rrs, ecg_rrs, n_jobs):
+    """Calculate a forward solution for a subject"""
+    mri_head_t, trans = _get_mri_head_t(trans)
+    logger.info('Setting up forward solutions')
+    megcoils, meg_info, compcoils, megnames, eegels, eegnames, rr, info, \
+        update_kwargs, bem = _prepare_for_forward(
+            src, mri_head_t, info, bem, mindist, n_jobs, verbose=False)
+    del (src, mindist)
+
+    eegfwd = _compute_forwards(rr, bem, [eegels], [None],
+                               [None], ['eeg'], n_jobs, verbose=False)[0]
+    eegfwd = _to_forward_dict(eegfwd, eegnames)
+    if blink_rrs is not None:
+        eegblink = _compute_forwards(blink_rrs, exg_bem, [eegels], [None],
+                                     [None], ['eeg'], n_jobs,
+                                     verbose=False)[0]
+        eegblink = _to_forward_dict(eegblink, eegnames)
+
+    # short circuit here if there are no MEG channels (don't need to iterate)
+    if len(pick_types(info, meg=True)) == 0:
+        eegfwd.update(**update_kwargs)
+        for _ in dev_head_ts:
+            yield eegfwd, eegblink, None, None
+        return
+
+    coord_frame = FIFF.FIFFV_COORD_HEAD
+    if not bem['is_sphere']:
+        idx = np.where(np.array([s['id'] for s in bem['surfs']]) ==
+                       FIFF.FIFFV_BEM_SURF_ID_BRAIN)[0]
+        assert len(idx) == 1
+        bem_surf = transform_surface_to(bem['surfs'][idx[0]], coord_frame,
+                                        mri_head_t)
+    for ti, dev_head_t in enumerate(dev_head_ts):
+        # Could be *slightly* more efficient not to do this N times,
+        # but the cost here is tiny compared to actual fwd calculation
+        logger.info('Computing gain matrix for transform #%s/%s'
+                    % (ti + 1, len(dev_head_ts)))
+        info = deepcopy(info)
+        info['dev_head_t'] = dev_head_t
+        megcoils, compcoils, megnames, meg_info = \
+            _prep_meg_channels(info, True, [], False, verbose=False)
+
+        # Make sure our sensors are all outside our BEM
+        coil_rr = [coil['r0'] for coil in megcoils]
+        if not bem['is_sphere']:
+            outside = _points_outside_surface(coil_rr, bem_surf, n_jobs,
+                                              verbose=False)
+        else:
+            d = coil_rr - bem['r0']
+            outside = np.sqrt(np.sum(d * d, axis=1)) > bem.radius
+        if not outside.all():
+            raise RuntimeError('%s MEG sensors collided with inner skull '
+                               'surface for transform %s'
+                               % (np.sum(~outside), ti))
+
+        # Compute forward
+        megfwd = _compute_forwards(rr, bem, [megcoils], [compcoils],
+                                   [meg_info], ['meg'], n_jobs,
+                                   verbose=False)[0]
+        megfwd = _to_forward_dict(megfwd, megnames)
+        fwd = _merge_meg_eeg_fwds(megfwd, eegfwd, verbose=False)
+        fwd.update(**update_kwargs)
+
+        fwd_blink = fwd_ecg = fwd_chpi = None
+        if blink_rrs is not None:
+            megblink = _compute_forwards(blink_rrs, exg_bem, [megcoils],
+                                         [compcoils], [meg_info], ['meg'],
+                                         n_jobs, verbose=False)[0]
+            megblink = _to_forward_dict(megblink, megnames)
+            fwd_blink = _merge_meg_eeg_fwds(megblink, eegblink, verbose=False)
+        if ecg_rrs is not None:
+            megecg = _compute_forwards(ecg_rrs, exg_bem, [megcoils],
+                                       [compcoils], [meg_info], ['meg'],
+                                       n_jobs, verbose=False)[0]
+            fwd_ecg = _to_forward_dict(megecg, megnames)
+        if hpi_rrs is not None:
+            fwd_chpi = _magnetic_dipole_field_vec(hpi_rrs, megcoils).T
+        yield fwd, fwd_blink, fwd_ecg, fwd_chpi
+
+
+def _restrict_source_space_to(src, vertices):
+    """Helper to trim down a source space"""
+    assert len(src) == len(vertices)
+    src = deepcopy(src)
+    for s, v in zip(src, vertices):
+        s['inuse'].fill(0)
+        s['nuse'] = len(v)
+        s['vertno'] = v
+        s['inuse'][s['vertno']] = 1
+        del s['pinfo']
+        del s['nuse_tri']
+        del s['use_tris']
+        del s['patch_inds']
+    return src
+
+
+def _interp(data_1, data_2, stc_data, interps):
+    """Helper to interpolate"""
+    out_data = np.dot(data_1, stc_data)
+    if interps is not None:
+        out_data *= interps[0]
+        data_1 = np.dot(data_1, stc_data)
+        data_1 *= interps[1]
+        out_data += data_1
+        del data_1
+    return out_data
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/simulation/source.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/simulation/source.py
new file mode 100644
index 0000000..45293fe
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/simulation/source.py
@@ -0,0 +1,329 @@
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#          Daniel Strohmeier <daniel.strohmeier at tu-ilmenau.de>
+#
+# License: BSD (3-clause)
+
+import numpy as np
+
+from ..source_estimate import SourceEstimate, VolSourceEstimate
+from ..source_space import _ensure_src
+from ..utils import check_random_state, deprecated, logger
+from ..externals.six.moves import zip
+
+
+def select_source_in_label(src, label, random_state=None):
+    """Select source positions using a label
+
+    Parameters
+    ----------
+    src : list of dict
+        The source space
+    label : Label
+        the label (read with mne.read_label)
+    random_state : None | int | np.random.RandomState
+        To specify the random generator state.
+
+    Returns
+    -------
+    lh_vertno : list
+        selected source coefficients on the left hemisphere
+    rh_vertno : list
+        selected source coefficients on the right hemisphere
+    """
+    lh_vertno = list()
+    rh_vertno = list()
+
+    rng = check_random_state(random_state)
+
+    if label.hemi == 'lh':
+        src_sel_lh = np.intersect1d(src[0]['vertno'], label.vertices)
+        idx_select = rng.randint(0, len(src_sel_lh), 1)
+        lh_vertno.append(src_sel_lh[idx_select][0])
+    else:
+        src_sel_rh = np.intersect1d(src[1]['vertno'], label.vertices)
+        idx_select = rng.randint(0, len(src_sel_rh), 1)
+        rh_vertno.append(src_sel_rh[idx_select][0])
+
+    return lh_vertno, rh_vertno
+
+
+ at deprecated('"generate_sparse_stc" is deprecated and will be removed in'
+            'MNE-0.11. Please use simulate_sparse_stc instead')
+def generate_sparse_stc(src, labels, stc_data, tmin, tstep, random_state=None):
+    """Generate sparse sources time courses from waveforms and labels
+
+    This function randomly selects a single vertex in each label and assigns
+    a waveform from stc_data to it.
+
+    Parameters
+    ----------
+    src : list of dict
+        The source space
+    labels : list of Labels
+        The labels
+    stc_data : array (shape: len(labels) x n_times)
+        The waveforms
+    tmin : float
+        The beginning of the timeseries
+    tstep : float
+        The time step (1 / sampling frequency)
+    random_state : None | int | np.random.RandomState
+        To specify the random generator state.
+
+    Returns
+    -------
+    stc : SourceEstimate
+        The generated source time courses.
+    """
+    if len(labels) != len(stc_data):
+        raise ValueError('labels and stc_data must have the same length')
+
+    rng = check_random_state(random_state)
+    vertno = [[], []]
+    lh_data = list()
+    rh_data = list()
+    for label_data, label in zip(stc_data, labels):
+        lh_vertno, rh_vertno = select_source_in_label(src, label, rng)
+        vertno[0] += lh_vertno
+        vertno[1] += rh_vertno
+        if len(lh_vertno) != 0:
+            lh_data.append(np.atleast_2d(label_data))
+        elif len(rh_vertno) != 0:
+            rh_data.append(np.atleast_2d(label_data))
+        else:
+            raise ValueError('No vertno found.')
+
+    vertno = [np.array(v) for v in vertno]
+
+    # the data is in the order left, right
+    data = list()
+    if len(vertno[0]) != 0:
+        idx = np.argsort(vertno[0])
+        vertno[0] = vertno[0][idx]
+        data.append(np.concatenate(lh_data)[idx])
+
+    if len(vertno[1]) != 0:
+        idx = np.argsort(vertno[1])
+        vertno[1] = vertno[1][idx]
+        data.append(np.concatenate(rh_data)[idx])
+
+    data = np.concatenate(data)
+
+    stc = SourceEstimate(data, vertices=vertno, tmin=tmin, tstep=tstep)
+
+    return stc
+
+
+def simulate_sparse_stc(src, n_dipoles, times,
+                        data_fun=lambda t: 1e-7 * np.sin(20 * np.pi * t),
+                        labels=None, random_state=None):
+    """Generate sparse (n_dipoles) sources time courses from data_fun
+
+    This function randomly selects n_dipoles vertices in the whole cortex
+    or one single vertex in each label if labels is not None. It uses data_fun
+    to generate waveforms for each vertex.
+
+    Parameters
+    ----------
+    src : instance of SourceSpaces
+        The source space.
+    n_dipoles : int
+        Number of dipoles to simulate.
+    times : array
+        Time array
+    data_fun : callable
+        Function to generate the waveforms. The default is a 100 nAm, 10 Hz
+        sinusoid as ``1e-7 * np.sin(20 * pi * t)``. The function should take
+        as input the array of time samples in seconds and return an array of
+        the same length containing the time courses.
+    labels : None | list of Labels
+        The labels. The default is None, otherwise its size must be n_dipoles.
+    random_state : None | int | np.random.RandomState
+        To specify the random generator state.
+
+    Returns
+    -------
+    stc : SourceEstimate
+        The generated source time courses.
+
+    Notes
+    -----
+    .. versionadded:: 0.10.0
+    """
+    rng = check_random_state(random_state)
+    src = _ensure_src(src, verbose=False)
+    data = np.zeros((n_dipoles, len(times)))
+    for i_dip in range(n_dipoles):
+        data[i_dip, :] = data_fun(times)
+
+    if labels is None:
+        # can be vol or surface source space
+        offsets = np.linspace(0, n_dipoles, len(src) + 1).astype(int)
+        n_dipoles_ss = np.diff(offsets)
+        # don't use .choice b/c not on old numpy
+        vs = [s['vertno'][np.sort(rng.permutation(np.arange(s['nuse']))[:n])]
+              for n, s in zip(n_dipoles_ss, src)]
+        datas = data
+    else:
+        if n_dipoles != len(labels):
+            logger.warning('The number of labels is different from the number '
+                           'of dipoles. %s dipole(s) will be generated.'
+                           % min(n_dipoles, len(labels)))
+        labels = labels[:n_dipoles] if n_dipoles < len(labels) else labels
+
+        vertno = [[], []]
+        lh_data = [np.empty((0, data.shape[1]))]
+        rh_data = [np.empty((0, data.shape[1]))]
+        for i, label in enumerate(labels):
+            lh_vertno, rh_vertno = select_source_in_label(src, label, rng)
+            vertno[0] += lh_vertno
+            vertno[1] += rh_vertno
+            if len(lh_vertno) != 0:
+                lh_data.append(data[i][np.newaxis])
+            elif len(rh_vertno) != 0:
+                rh_data.append(data[i][np.newaxis])
+            else:
+                raise ValueError('No vertno found.')
+        vs = [np.array(v) for v in vertno]
+        datas = [np.concatenate(d) for d in [lh_data, rh_data]]
+        # need to sort each hemi by vertex number
+        for ii in range(2):
+            order = np.argsort(vs[ii])
+            vs[ii] = vs[ii][order]
+            if len(order) > 0:  # fix for old numpy
+                datas[ii] = datas[ii][order]
+        datas = np.concatenate(datas)
+
+    tmin, tstep = times[0], np.diff(times[:2])[0]
+    assert datas.shape == data.shape
+    cls = SourceEstimate if len(vs) == 2 else VolSourceEstimate
+    stc = cls(datas, vertices=vs, tmin=tmin, tstep=tstep)
+    return stc
+
+
+ at deprecated('"generate_stc" is deprecated and will be removed in'
+            'MNE-0.11. Please use simulate_stc instead')
+def generate_stc(src, labels, stc_data, tmin, tstep, value_fun=None):
+    """Generate sources time courses from waveforms and labels
+
+    This function generates a source estimate with extended sources by
+    filling the labels with the waveforms given in stc_data.
+
+    By default, the vertices within a label are assigned the same waveform.
+    The waveforms can be scaled for each vertex by using the label values
+    and value_fun. E.g.,
+
+    # create a source label where the values are the distance from the center
+    labels = circular_source_labels('sample', 0, 10, 0)
+
+    # sources with decaying strength (x will be the distance from the center)
+    fun = lambda x: exp(- x / 10)
+    stc = generate_stc(fwd, labels, stc_data, tmin, tstep, fun)
+
+    Parameters
+    ----------
+    src : list of dict
+        The source space
+    labels : list of Labels
+        The labels
+    stc_data : array (shape: len(labels) x n_times)
+        The waveforms
+    tmin : float
+        The beginning of the timeseries
+    tstep : float
+        The time step (1 / sampling frequency)
+    value_fun : function
+        Function to apply to the label values
+
+    Returns
+    -------
+    stc : SourceEstimate
+        The generated source time courses.
+    """
+    return simulate_stc(src, labels, stc_data, tmin, tstep, value_fun)
+
+
+def simulate_stc(src, labels, stc_data, tmin, tstep, value_fun=None):
+    """Simulate sources time courses from waveforms and labels
+
+    This function generates a source estimate with extended sources by
+    filling the labels with the waveforms given in stc_data.
+
+    By default, the vertices within a label are assigned the same waveform.
+    The waveforms can be scaled for each vertex by using the label values
+    and value_fun. E.g.,
+
+    # create a source label where the values are the distance from the center
+    labels = circular_source_labels('sample', 0, 10, 0)
+
+    # sources with decaying strength (x will be the distance from the center)
+    fun = lambda x: exp(- x / 10)
+    stc = generate_stc(fwd, labels, stc_data, tmin, tstep, fun)
+
+    Parameters
+    ----------
+    src : list of dict
+        The source space
+    labels : list of Labels
+        The labels
+    stc_data : array (shape: len(labels) x n_times)
+        The waveforms
+    tmin : float
+        The beginning of the timeseries
+    tstep : float
+        The time step (1 / sampling frequency)
+    value_fun : function
+        Function to apply to the label values
+
+    Returns
+    -------
+    stc : SourceEstimate
+        The generated source time courses.
+    """
+    if len(labels) != len(stc_data):
+        raise ValueError('labels and stc_data must have the same length')
+
+    vertno = [[], []]
+    stc_data_extended = [[], []]
+    hemi_to_ind = {'lh': 0, 'rh': 1}
+    for i, label in enumerate(labels):
+        hemi_ind = hemi_to_ind[label.hemi]
+        src_sel = np.intersect1d(src[hemi_ind]['vertno'],
+                                 label.vertices)
+        if value_fun is not None:
+            idx_sel = np.searchsorted(label.vertices, src_sel)
+            values_sel = np.array([value_fun(v) for v in
+                                   label.values[idx_sel]])
+
+            data = np.outer(values_sel, stc_data[i])
+        else:
+            data = np.tile(stc_data[i], (len(src_sel), 1))
+
+        vertno[hemi_ind].append(src_sel)
+        stc_data_extended[hemi_ind].append(np.atleast_2d(data))
+
+    # format the vertno list
+    for idx in (0, 1):
+        if len(vertno[idx]) > 1:
+            vertno[idx] = np.concatenate(vertno[idx])
+        elif len(vertno[idx]) == 1:
+            vertno[idx] = vertno[idx][0]
+    vertno = [np.array(v) for v in vertno]
+
+    # the data is in the order left, right
+    data = list()
+    if len(vertno[0]) != 0:
+        idx = np.argsort(vertno[0])
+        vertno[0] = vertno[0][idx]
+        data.append(np.concatenate(stc_data_extended[0])[idx])
+
+    if len(vertno[1]) != 0:
+        idx = np.argsort(vertno[1])
+        vertno[1] = vertno[1][idx]
+        data.append(np.concatenate(stc_data_extended[1])[idx])
+
+    data = np.concatenate(data)
+
+    stc = SourceEstimate(data, vertices=vertno, tmin=tmin, tstep=tstep)
+    return stc
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/simulation/tests/__init__.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/simulation/tests/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/simulation/tests/test_evoked.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/simulation/tests/test_evoked.py
new file mode 100644
index 0000000..262a670
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/simulation/tests/test_evoked.py
@@ -0,0 +1,75 @@
+# Author: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#
+# License: BSD (3-clause)
+
+import os.path as op
+
+import numpy as np
+from numpy.testing import assert_array_almost_equal, assert_array_equal
+from nose.tools import assert_true, assert_raises
+import warnings
+
+from mne.datasets import testing
+from mne import read_forward_solution
+from mne.simulation import simulate_sparse_stc, simulate_evoked
+from mne import read_cov
+from mne.io import Raw
+from mne import pick_types_forward, read_evokeds
+from mne.utils import run_tests_if_main
+
+warnings.simplefilter('always')
+
+data_path = testing.data_path(download=False)
+fwd_fname = op.join(data_path, 'MEG', 'sample',
+                    'sample_audvis_trunc-meg-eeg-oct-6-fwd.fif')
+raw_fname = op.join(op.dirname(__file__), '..', '..', 'io', 'tests',
+                    'data', 'test_raw.fif')
+ave_fname = op.join(op.dirname(__file__), '..', '..', 'io', 'tests',
+                    'data', 'test-ave.fif')
+cov_fname = op.join(op.dirname(__file__), '..', '..', 'io', 'tests',
+                    'data', 'test-cov.fif')
+
+
+ at testing.requires_testing_data
+def test_simulate_evoked():
+    """ Test simulation of evoked data """
+
+    raw = Raw(raw_fname)
+    fwd = read_forward_solution(fwd_fname, force_fixed=True)
+    fwd = pick_types_forward(fwd, meg=True, eeg=True, exclude=raw.info['bads'])
+    cov = read_cov(cov_fname)
+
+    evoked_template = read_evokeds(ave_fname, condition=0, baseline=None)
+    evoked_template.pick_types(meg=True, eeg=True, exclude=raw.info['bads'])
+
+    snr = 6  # dB
+    tmin = -0.1
+    sfreq = 1000.  # Hz
+    tstep = 1. / sfreq
+    n_samples = 600
+    times = np.linspace(tmin, tmin + n_samples * tstep, n_samples)
+
+    # Generate times series for 2 dipoles
+    stc = simulate_sparse_stc(fwd['src'], n_dipoles=2, times=times)
+    stc._data *= 1e-9
+
+    # Generate noisy evoked data
+    iir_filter = [1, -0.9]
+    evoked = simulate_evoked(fwd, stc, evoked_template.info, cov, snr,
+                             tmin=0.0, tmax=0.2, iir_filter=iir_filter)
+    assert_array_almost_equal(evoked.times, stc.times)
+    assert_true(len(evoked.data) == len(fwd['sol']['data']))
+
+    # make a vertex that doesn't exist in fwd, should throw error
+    stc_bad = stc.copy()
+    mv = np.max(fwd['src'][0]['vertno'][fwd['src'][0]['inuse']])
+    stc_bad.vertices[0][0] = mv + 1
+    assert_raises(RuntimeError, simulate_evoked, fwd, stc_bad,
+                  evoked_template.info, cov, snr, tmin=0.0, tmax=0.2)
+    evoked_1 = simulate_evoked(fwd, stc, evoked_template.info, cov, np.inf,
+                               tmin=0.0, tmax=0.2)
+    evoked_2 = simulate_evoked(fwd, stc, evoked_template.info, cov, np.inf,
+                               tmin=0.0, tmax=0.2)
+    assert_array_equal(evoked_1.data, evoked_2.data)
+
+run_tests_if_main()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/simulation/tests/test_metrics.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/simulation/tests/test_metrics.py
new file mode 100644
index 0000000..c6915ea
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/simulation/tests/test_metrics.py
@@ -0,0 +1,52 @@
+# Author: Yousra Bekhti <yousra.bekhti at gmail.com>
+#         Mark Wronkiewicz <wronk at uw.edu>
+#
+# License: BSD (3-clause)
+
+
+import os.path as op
+
+import numpy as np
+from numpy.testing import assert_almost_equal
+from nose.tools import assert_true, assert_raises
+import warnings
+
+from mne import read_source_spaces
+from mne.datasets import testing
+from mne.simulation import simulate_sparse_stc, source_estimate_quantification
+from mne.utils import run_tests_if_main
+
+warnings.simplefilter('always')
+
+data_path = testing.data_path(download=False)
+src_fname = op.join(data_path, 'subjects', 'sample', 'bem',
+                    'sample-oct-6-src.fif')
+
+
+ at testing.requires_testing_data
+def test_metrics():
+    """Test simulation metrics"""
+    src = read_source_spaces(src_fname)
+    times = np.arange(600) / 1000.
+    rng = np.random.RandomState(42)
+    stc1 = simulate_sparse_stc(src, n_dipoles=2, times=times, random_state=rng)
+    stc2 = simulate_sparse_stc(src, n_dipoles=2, times=times, random_state=rng)
+    E1_rms = source_estimate_quantification(stc1, stc1, metric='rms')
+    E2_rms = source_estimate_quantification(stc2, stc2, metric='rms')
+    E1_cos = source_estimate_quantification(stc1, stc1, metric='cosine')
+    E2_cos = source_estimate_quantification(stc2, stc2, metric='cosine')
+
+    # ### Tests to add
+    assert_true(E1_rms == 0.)
+    assert_true(E2_rms == 0.)
+    assert_almost_equal(E1_cos, 0.)
+    assert_almost_equal(E2_cos, 0.)
+    stc_bad = stc2.copy().crop(0, 0.5)
+    assert_raises(ValueError, source_estimate_quantification, stc1, stc_bad)
+    stc_bad = stc2.copy()
+    stc_bad.times -= 0.1
+    assert_raises(ValueError, source_estimate_quantification, stc1, stc_bad)
+    assert_raises(ValueError, source_estimate_quantification, stc1, stc2,
+                  metric='foo')
+
+run_tests_if_main()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/simulation/tests/test_raw.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/simulation/tests/test_raw.py
new file mode 100644
index 0000000..186ae3e
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/simulation/tests/test_raw.py
@@ -0,0 +1,248 @@
+# Authors: Mark Wronkiewicz <wronk at uw.edu>
+#          Yousra Bekhti <yousra.bekhti at gmail.com>
+#          Eric Larson <larson.eric.d at gmail.com>
+#
+# License: BSD (3-clause)
+
+import os.path as op
+import warnings
+from copy import deepcopy
+
+import numpy as np
+from numpy.testing import assert_allclose, assert_array_equal
+from nose.tools import assert_true, assert_raises
+
+from mne import (read_source_spaces, pick_types, read_trans, read_cov,
+                 make_sphere_model, create_info, setup_volume_source_space)
+from mne.chpi import (_calculate_chpi_positions, get_chpi_positions,
+                      _get_hpi_info)
+from mne.tests.test_chpi import _compare_positions
+from mne.datasets import testing
+from mne.simulation import simulate_sparse_stc, simulate_raw
+from mne.io import Raw, RawArray
+from mne.time_frequency import compute_raw_psd
+from mne.utils import _TempDir, run_tests_if_main, requires_version, slow_test
+from mne.fixes import isclose
+
+
+warnings.simplefilter('always')
+
+data_path = testing.data_path(download=False)
+raw_fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc_raw.fif')
+cov_fname = op.join(data_path, 'MEG', 'sample',
+                    'sample_audvis_trunc-cov.fif')
+trans_fname = op.join(data_path, 'MEG', 'sample',
+                      'sample_audvis_trunc-trans.fif')
+bem_path = op.join(data_path, 'subjects', 'sample', 'bem')
+src_fname = op.join(bem_path, 'sample-oct-2-src.fif')
+bem_fname = op.join(bem_path, 'sample-320-320-320-bem-sol.fif')
+
+raw_chpi_fname = op.join(data_path, 'SSS', 'test_move_anon_raw.fif')
+pos_fname = op.join(data_path, 'SSS', 'test_move_anon_raw_subsampled.pos')
+
+
+def _make_stc(raw, src):
+    """Helper to make a STC"""
+    seed = 42
+    sfreq = raw.info['sfreq']  # Hz
+    tstep = 1. / sfreq
+    n_samples = len(raw.times) // 10
+    times = np.arange(0, n_samples) * tstep
+    stc = simulate_sparse_stc(src, 10, times, random_state=seed)
+    return stc
+
+
+def _get_data():
+    """Helper to get some starting data"""
+    # raw with ECG channel
+    raw = Raw(raw_fname).crop(0., 5.0).load_data()
+    data_picks = pick_types(raw.info, meg=True, eeg=True)
+    other_picks = pick_types(raw.info, meg=False, stim=True, eog=True)
+    picks = np.sort(np.concatenate((data_picks[::16], other_picks)))
+    raw = raw.pick_channels([raw.ch_names[p] for p in picks])
+    ecg = RawArray(np.zeros((1, len(raw.times))),
+                   create_info(['ECG 063'], raw.info['sfreq'], 'ecg'))
+    for key in ('dev_head_t', 'buffer_size_sec', 'highpass', 'lowpass',
+                'filename', 'dig'):
+        ecg.info[key] = raw.info[key]
+    raw.add_channels([ecg])
+
+    src = read_source_spaces(src_fname)
+    trans = read_trans(trans_fname)
+    sphere = make_sphere_model('auto', 'auto', raw.info)
+    stc = _make_stc(raw, src)
+    return raw, src, stc, trans, sphere
+
+
+ at testing.requires_testing_data
+def test_simulate_raw_sphere():
+    """Test simulation of raw data with sphere model"""
+    seed = 42
+    raw, src, stc, trans, sphere = _get_data()
+    assert_true(len(pick_types(raw.info, meg=False, ecg=True)) == 1)
+
+    # head pos
+    head_pos_sim = dict()
+    # these will be at 1., 2., ... sec
+    shifts = [[0.001, 0., -0.001], [-0.001, 0.001, 0.]]
+
+    for time_key, shift in enumerate(shifts):
+        # Create 4x4 matrix transform and normalize
+        temp_trans = deepcopy(raw.info['dev_head_t'])
+        temp_trans['trans'][:3, 3] += shift
+        head_pos_sim[time_key + 1.] = temp_trans['trans']
+
+    #
+    # Test raw simulation with basic parameters
+    #
+    raw_sim = simulate_raw(raw, stc, trans, src, sphere, read_cov(cov_fname),
+                           head_pos=head_pos_sim,
+                           blink=True, ecg=True, random_state=seed)
+    raw_sim_2 = simulate_raw(raw, stc, trans_fname, src_fname, sphere,
+                             cov_fname, head_pos=head_pos_sim,
+                             blink=True, ecg=True, random_state=seed)
+    assert_array_equal(raw_sim_2[:][0], raw_sim[:][0])
+    # Test IO on processed data
+    tempdir = _TempDir()
+    test_outname = op.join(tempdir, 'sim_test_raw.fif')
+    raw_sim.save(test_outname)
+
+    raw_sim_loaded = Raw(test_outname, preload=True, proj=False,
+                         allow_maxshield=True)
+    assert_allclose(raw_sim_loaded[:][0], raw_sim[:][0], rtol=1e-6, atol=1e-20)
+    del raw_sim, raw_sim_2
+    # with no cov (no noise) but with artifacts, most time periods should match
+    # but the EOG/ECG channels should not
+    for ecg, eog in ((True, False), (False, True), (True, True)):
+        raw_sim_3 = simulate_raw(raw, stc, trans, src, sphere,
+                                 cov=None, head_pos=head_pos_sim,
+                                 blink=eog, ecg=ecg, random_state=seed)
+        raw_sim_4 = simulate_raw(raw, stc, trans, src, sphere,
+                                 cov=None, head_pos=head_pos_sim,
+                                 blink=False, ecg=False, random_state=seed)
+        picks = np.arange(len(raw.ch_names))
+        diff_picks = pick_types(raw.info, meg=False, ecg=ecg, eog=eog)
+        these_picks = np.setdiff1d(picks, diff_picks)
+        close = isclose(raw_sim_3[these_picks][0],
+                        raw_sim_4[these_picks][0], atol=1e-20)
+        assert_true(np.mean(close) > 0.7)
+        far = ~isclose(raw_sim_3[diff_picks][0],
+                       raw_sim_4[diff_picks][0], atol=1e-20)
+        assert_true(np.mean(far) > 0.99)
+    del raw_sim_3, raw_sim_4
+
+    # make sure it works with EEG-only and MEG-only
+    raw_sim_meg = simulate_raw(raw.pick_types(meg=True, eeg=False, copy=True),
+                               stc, trans, src, sphere, cov=None,
+                               ecg=True, blink=True, random_state=seed)
+    raw_sim_eeg = simulate_raw(raw.pick_types(meg=False, eeg=True, copy=True),
+                               stc, trans, src, sphere, cov=None,
+                               ecg=True, blink=True, random_state=seed)
+    raw_sim_meeg = simulate_raw(raw.pick_types(meg=True, eeg=True, copy=True),
+                                stc, trans, src, sphere, cov=None,
+                                ecg=True, blink=True, random_state=seed)
+    assert_allclose(np.concatenate((raw_sim_meg[:][0], raw_sim_eeg[:][0])),
+                    raw_sim_meeg[:][0], rtol=1e-7, atol=1e-20)
+    del raw_sim_meg, raw_sim_eeg, raw_sim_meeg
+
+    # check that different interpolations are similar given small movements
+    raw_sim_cos = simulate_raw(raw, stc, trans, src, sphere,
+                               head_pos=head_pos_sim,
+                               random_state=seed)
+    raw_sim_lin = simulate_raw(raw, stc, trans, src, sphere,
+                               head_pos=head_pos_sim, interp='linear',
+                               random_state=seed)
+    assert_allclose(raw_sim_cos[:][0], raw_sim_lin[:][0],
+                    rtol=1e-5, atol=1e-20)
+    del raw_sim_cos, raw_sim_lin
+
+    # Make impossible transform (translate up into helmet) and ensure failure
+    head_pos_sim_err = deepcopy(head_pos_sim)
+    head_pos_sim_err[1.][2, 3] -= 0.1  # z trans upward 10cm
+    assert_raises(RuntimeError, simulate_raw, raw, stc, trans, src, sphere,
+                  ecg=False, blink=False, head_pos=head_pos_sim_err)
+    assert_raises(RuntimeError, simulate_raw, raw, stc, trans, src,
+                  bem_fname, ecg=False, blink=False,
+                  head_pos=head_pos_sim_err)
+    # other degenerate conditions
+    assert_raises(TypeError, simulate_raw, 'foo', stc, trans, src, sphere)
+    assert_raises(TypeError, simulate_raw, raw, 'foo', trans, src, sphere)
+    assert_raises(ValueError, simulate_raw, raw, stc.copy().crop(0, 0),
+                  trans, src, sphere)
+    stc_bad = stc.copy()
+    stc_bad.tstep += 0.1
+    assert_raises(ValueError, simulate_raw, raw, stc_bad, trans, src, sphere)
+    assert_raises(RuntimeError, simulate_raw, raw, stc, trans, src, sphere,
+                  chpi=True)  # no cHPI info
+    assert_raises(ValueError, simulate_raw, raw, stc, trans, src, sphere,
+                  interp='foo')
+    assert_raises(TypeError, simulate_raw, raw, stc, trans, src, sphere,
+                  head_pos=1.)
+    assert_raises(RuntimeError, simulate_raw, raw, stc, trans, src, sphere,
+                  head_pos=pos_fname)  # ends up with t>t_end
+    head_pos_sim_err = deepcopy(head_pos_sim)
+    head_pos_sim_err[-1.] = head_pos_sim_err[1.]  # negative time
+    assert_raises(RuntimeError, simulate_raw, raw, stc, trans, src, sphere,
+                  head_pos=head_pos_sim_err)
+
+
+ at testing.requires_testing_data
+def test_simulate_raw_bem():
+    """Test simulation of raw data with BEM"""
+    seed = 42
+    raw, src, stc, trans, sphere = _get_data()
+    raw_sim_sph = simulate_raw(raw, stc, trans, src, sphere, cov=None,
+                               ecg=True, blink=True, random_state=seed)
+    raw_sim_bem = simulate_raw(raw, stc, trans, src, bem_fname, cov=None,
+                               ecg=True, blink=True, random_state=seed,
+                               n_jobs=2)
+    # some components (especially radial) might not match that well,
+    # so just make sure that most components have high correlation
+    assert_array_equal(raw_sim_sph.ch_names, raw_sim_bem.ch_names)
+    picks = pick_types(raw.info, meg=True, eeg=True)
+    n_ch = len(picks)
+    corr = np.corrcoef(raw_sim_sph[picks][0], raw_sim_bem[picks][0])
+    assert_array_equal(corr.shape, (2 * n_ch, 2 * n_ch))
+    assert_true(np.median(np.diag(corr[:n_ch, -n_ch:])) > 0.9)
+
+
+ at slow_test
+ at requires_version('numpy', '1.7')
+ at requires_version('scipy', '0.12')
+ at testing.requires_testing_data
+def test_simulate_raw_chpi():
+    """Test simulation of raw data with cHPI"""
+    with warnings.catch_warnings(record=True):  # MaxShield
+        raw = Raw(raw_chpi_fname, allow_maxshield=True)
+    sphere = make_sphere_model('auto', 'auto', raw.info)
+    # make sparse spherical source space
+    sphere_vol = tuple(sphere['r0'] * 1000.) + (sphere.radius * 1000.,)
+    src = setup_volume_source_space('sample', sphere=sphere_vol, pos=70.)
+    stc = _make_stc(raw, src)
+    # simulate data with cHPI on
+    raw_sim = simulate_raw(raw, stc, None, src, sphere, cov=None, chpi=False)
+    # need to trim extra samples off this one
+    raw_chpi = simulate_raw(raw, stc, None, src, sphere, cov=None, chpi=True,
+                            head_pos=pos_fname)
+    # test cHPI indication
+    hpi_freqs, _, hpi_pick, hpi_on, _ = _get_hpi_info(raw.info)
+    assert_allclose(raw_sim[hpi_pick][0], 0.)
+    assert_allclose(raw_chpi[hpi_pick][0], hpi_on)
+    # test that the cHPI signals make some reasonable values
+    psd_sim, freqs_sim = compute_raw_psd(raw_sim)
+    psd_chpi, freqs_chpi = compute_raw_psd(raw_chpi)
+    assert_array_equal(freqs_sim, freqs_chpi)
+    freq_idx = np.sort([np.argmin(np.abs(freqs_sim - f)) for f in hpi_freqs])
+    picks_meg = pick_types(raw.info, meg=True, eeg=False)
+    picks_eeg = pick_types(raw.info, meg=False, eeg=True)
+    assert_allclose(psd_sim[picks_eeg], psd_chpi[picks_eeg], atol=1e-20)
+    assert_true((psd_chpi[picks_meg][:, freq_idx] >
+                 100 * psd_sim[picks_meg][:, freq_idx]).all())
+    # test localization based on cHPI information
+    trans_sim, rot_sim, t_sim = _calculate_chpi_positions(raw_chpi)
+    trans, rot, t = get_chpi_positions(pos_fname)
+    t -= raw.first_samp / raw.info['sfreq']
+    _compare_positions((trans, rot, t), (trans_sim, rot_sim, t_sim),
+                       max_dist=0.005)
+
+run_tests_if_main()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/simulation/tests/test_source.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/simulation/tests/test_source.py
new file mode 100644
index 0000000..ee6eb84
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/simulation/tests/test_source.py
@@ -0,0 +1,201 @@
+import os.path as op
+
+import numpy as np
+from numpy.testing import assert_array_almost_equal, assert_array_equal
+from nose.tools import assert_true
+
+from mne.datasets import testing
+from mne import read_label, read_forward_solution, pick_types_forward
+from mne.label import Label
+from mne.simulation.source import simulate_stc, simulate_sparse_stc
+from mne.utils import run_tests_if_main
+
+
+data_path = testing.data_path(download=False)
+fname_fwd = op.join(data_path, 'MEG', 'sample',
+                    'sample_audvis_trunc-meg-eeg-oct-6-fwd.fif')
+label_names = ['Aud-lh', 'Aud-rh', 'Vis-rh']
+
+label_names_single_hemi = ['Aud-rh', 'Vis-rh']
+
+
+def read_forward_solution_meg(*args, **kwargs):
+    fwd = read_forward_solution(*args, **kwargs)
+    fwd = pick_types_forward(fwd, meg=True, eeg=False)
+    return fwd
+
+
+ at testing.requires_testing_data
+def test_simulate_stc():
+    """ Test generation of source estimate """
+    fwd = read_forward_solution_meg(fname_fwd, force_fixed=True)
+    labels = [read_label(op.join(data_path, 'MEG', 'sample', 'labels',
+                         '%s.label' % label)) for label in label_names]
+    mylabels = []
+    for i, label in enumerate(labels):
+        new_label = Label(vertices=label.vertices,
+                          pos=label.pos,
+                          values=2 * i * np.ones(len(label.values)),
+                          hemi=label.hemi,
+                          comment=label.comment)
+        mylabels.append(new_label)
+
+    n_times = 10
+    tmin = 0
+    tstep = 1e-3
+
+    stc_data = np.ones((len(labels), n_times))
+    stc = simulate_stc(fwd['src'], mylabels, stc_data, tmin, tstep)
+
+    for label in labels:
+        if label.hemi == 'lh':
+            hemi_idx = 0
+        else:
+            hemi_idx = 1
+
+        idx = np.intersect1d(stc.vertices[hemi_idx], label.vertices)
+        idx = np.searchsorted(stc.vertices[hemi_idx], idx)
+
+        if hemi_idx == 1:
+            idx += len(stc.vertices[0])
+
+        assert_true(np.all(stc.data[idx] == 1.0))
+        assert_true(stc.data[idx].shape[1] == n_times)
+
+    # test with function
+    def fun(x):
+        return x ** 2
+    stc = simulate_stc(fwd['src'], mylabels, stc_data, tmin, tstep, fun)
+
+    # the first label has value 0, the second value 2, the third value 6
+
+    for i, label in enumerate(labels):
+        if label.hemi == 'lh':
+            hemi_idx = 0
+        else:
+            hemi_idx = 1
+
+        idx = np.intersect1d(stc.vertices[hemi_idx], label.vertices)
+        idx = np.searchsorted(stc.vertices[hemi_idx], idx)
+
+        if hemi_idx == 1:
+            idx += len(stc.vertices[0])
+
+        res = ((2. * i) ** 2.) * np.ones((len(idx), n_times))
+        assert_array_almost_equal(stc.data[idx], res)
+
+
+ at testing.requires_testing_data
+def test_simulate_sparse_stc():
+    """ Test generation of sparse source estimate """
+    fwd = read_forward_solution_meg(fname_fwd, force_fixed=True)
+    labels = [read_label(op.join(data_path, 'MEG', 'sample', 'labels',
+                         '%s.label' % label)) for label in label_names]
+
+    n_times = 10
+    tmin = 0
+    tstep = 1e-3
+    times = np.arange(n_times, dtype=np.float) * tstep + tmin
+
+    stc_1 = simulate_sparse_stc(fwd['src'], len(labels), times,
+                                labels=labels, random_state=0)
+
+    assert_true(stc_1.data.shape[0] == len(labels))
+    assert_true(stc_1.data.shape[1] == n_times)
+
+    # make sure we get the same result when using the same seed
+    stc_2 = simulate_sparse_stc(fwd['src'], len(labels), times,
+                                labels=labels, random_state=0)
+
+    assert_array_equal(stc_1.lh_vertno, stc_2.lh_vertno)
+    assert_array_equal(stc_1.rh_vertno, stc_2.rh_vertno)
+
+
+ at testing.requires_testing_data
+def test_generate_stc_single_hemi():
+    """ Test generation of source estimate, single hemi """
+    fwd = read_forward_solution_meg(fname_fwd, force_fixed=True)
+    labels_single_hemi = [read_label(op.join(data_path, 'MEG', 'sample',
+                                             'labels', '%s.label' % label))
+                          for label in label_names_single_hemi]
+    mylabels = []
+    for i, label in enumerate(labels_single_hemi):
+        new_label = Label(vertices=label.vertices,
+                          pos=label.pos,
+                          values=2 * i * np.ones(len(label.values)),
+                          hemi=label.hemi,
+                          comment=label.comment)
+        mylabels.append(new_label)
+
+    n_times = 10
+    tmin = 0
+    tstep = 1e-3
+
+    stc_data = np.ones((len(labels_single_hemi), n_times))
+    stc = simulate_stc(fwd['src'], mylabels, stc_data, tmin, tstep)
+
+    for label in labels_single_hemi:
+        if label.hemi == 'lh':
+            hemi_idx = 0
+        else:
+            hemi_idx = 1
+
+        idx = np.intersect1d(stc.vertices[hemi_idx], label.vertices)
+        idx = np.searchsorted(stc.vertices[hemi_idx], idx)
+
+        if hemi_idx == 1:
+            idx += len(stc.vertices[0])
+
+        assert_true(np.all(stc.data[idx] == 1.0))
+        assert_true(stc.data[idx].shape[1] == n_times)
+
+    # test with function
+    def fun(x):
+        return x ** 2
+    stc = simulate_stc(fwd['src'], mylabels, stc_data, tmin, tstep, fun)
+
+    # the first label has value 0, the second value 2, the third value 6
+
+    for i, label in enumerate(labels_single_hemi):
+        if label.hemi == 'lh':
+            hemi_idx = 0
+        else:
+            hemi_idx = 1
+
+        idx = np.intersect1d(stc.vertices[hemi_idx], label.vertices)
+        idx = np.searchsorted(stc.vertices[hemi_idx], idx)
+
+        if hemi_idx == 1:
+            idx += len(stc.vertices[0])
+
+        res = ((2. * i) ** 2.) * np.ones((len(idx), n_times))
+        assert_array_almost_equal(stc.data[idx], res)
+
+
+ at testing.requires_testing_data
+def test_simulate_sparse_stc_single_hemi():
+    """ Test generation of sparse source estimate """
+    fwd = read_forward_solution_meg(fname_fwd, force_fixed=True)
+    n_times = 10
+    tmin = 0
+    tstep = 1e-3
+    times = np.arange(n_times, dtype=np.float) * tstep + tmin
+
+    labels_single_hemi = [read_label(op.join(data_path, 'MEG', 'sample',
+                                             'labels', '%s.label' % label))
+                          for label in label_names_single_hemi]
+
+    stc_1 = simulate_sparse_stc(fwd['src'], len(labels_single_hemi), times,
+                                labels=labels_single_hemi, random_state=0)
+
+    assert_true(stc_1.data.shape[0] == len(labels_single_hemi))
+    assert_true(stc_1.data.shape[1] == n_times)
+
+    # make sure we get the same result when using the same seed
+    stc_2 = simulate_sparse_stc(fwd['src'], len(labels_single_hemi), times,
+                                labels=labels_single_hemi, random_state=0)
+
+    assert_array_equal(stc_1.lh_vertno, stc_2.lh_vertno)
+    assert_array_equal(stc_1.rh_vertno, stc_2.rh_vertno)
+
+run_tests_if_main()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/source_estimate.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/source_estimate.py
new file mode 100644
index 0000000..7c20c71
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/source_estimate.py
@@ -0,0 +1,2856 @@
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#          Mads Jensen <mje.mads at gmail.com>
+#
+# License: BSD (3-clause)
+
+import os
+import copy
+from math import ceil
+import warnings
+
+import numpy as np
+from scipy import linalg, sparse
+from scipy.sparse import coo_matrix
+
+from .filter import resample
+from .evoked import _get_peak
+from .parallel import parallel_func
+from .surface import (read_surface, _get_ico_surface, read_morph_map,
+                      _compute_nearest, mesh_edges)
+from .source_space import (_ensure_src, _get_morph_src_reordering,
+                           _ensure_src_subject)
+from .utils import (get_subjects_dir, _check_subject, logger, verbose,
+                    _time_mask)
+from .viz import plot_source_estimates
+from .fixes import in1d, sparse_block_diag
+from .io.base import ToDataFrameMixin
+from .externals.six.moves import zip
+from .externals.six import string_types
+from .externals.h5io import read_hdf5, write_hdf5
+
+
+def _read_stc(filename):
+    """ Aux Function
+    """
+    fid = open(filename, 'rb')
+
+    stc = dict()
+
+    fid.seek(0, 2)  # go to end of file
+    file_length = fid.tell()
+    fid.seek(0, 0)  # go to beginning of file
+
+    # read tmin in ms
+    stc['tmin'] = float(np.fromfile(fid, dtype=">f4", count=1))
+    stc['tmin'] /= 1000.0
+
+    # read sampling rate in ms
+    stc['tstep'] = float(np.fromfile(fid, dtype=">f4", count=1))
+    stc['tstep'] /= 1000.0
+
+    # read number of vertices/sources
+    vertices_n = int(np.fromfile(fid, dtype=">u4", count=1))
+
+    # read the source vector
+    stc['vertices'] = np.fromfile(fid, dtype=">u4", count=vertices_n)
+
+    # read the number of timepts
+    data_n = int(np.fromfile(fid, dtype=">u4", count=1))
+
+    if (vertices_n and  # vertices_n can be 0 (empty stc)
+            ((file_length / 4 - 4 - vertices_n) % (data_n * vertices_n)) != 0):
+        raise ValueError('incorrect stc file size')
+
+    # read the data matrix
+    stc['data'] = np.fromfile(fid, dtype=">f4", count=vertices_n * data_n)
+    stc['data'] = stc['data'].reshape([data_n, vertices_n]).T
+
+    # close the file
+    fid.close()
+    return stc
+
+
+def _write_stc(filename, tmin, tstep, vertices, data):
+    """Write an STC file
+
+    Parameters
+    ----------
+    filename : string
+        The name of the STC file.
+    tmin : float
+        The first time point of the data in seconds.
+    tstep : float
+        Time between frames in seconds.
+    vertices : array of integers
+        Vertex indices (0 based).
+    data : 2D array
+        The data matrix (nvert * ntime).
+    """
+    fid = open(filename, 'wb')
+
+    # write start time in ms
+    fid.write(np.array(1000 * tmin, dtype='>f4').tostring())
+    # write sampling rate in ms
+    fid.write(np.array(1000 * tstep, dtype='>f4').tostring())
+    # write number of vertices
+    fid.write(np.array(vertices.shape[0], dtype='>u4').tostring())
+    # write the vertex indices
+    fid.write(np.array(vertices, dtype='>u4').tostring())
+
+    # write the number of timepts
+    fid.write(np.array(data.shape[1], dtype='>u4').tostring())
+    #
+    # write the data
+    #
+    fid.write(np.array(data.T, dtype='>f4').tostring())
+
+    # close the file
+    fid.close()
+
+
+def _read_3(fid):
+    """ Read 3 byte integer from file
+    """
+    data = np.fromfile(fid, dtype=np.uint8, count=3).astype(np.int32)
+
+    out = np.left_shift(data[0], 16) + np.left_shift(data[1], 8) + data[2]
+
+    return out
+
+
+def _read_w(filename):
+    """Read a w file and return as dict
+
+    w files contain activations or source reconstructions for a single time
+    point.
+
+    Parameters
+    ----------
+    filename : string
+        The name of the w file.
+
+    Returns
+    -------
+    data: dict
+        The w structure. It has the following keys:
+           vertices       vertex indices (0 based)
+           data           The data matrix (nvert long)
+    """
+
+    with open(filename, 'rb', buffering=0) as fid:  # buffering=0 for np bug
+        # skip first 2 bytes
+        fid.read(2)
+
+        # read number of vertices/sources (3 byte integer)
+        vertices_n = int(_read_3(fid))
+
+        vertices = np.zeros((vertices_n), dtype=np.int32)
+        data = np.zeros((vertices_n), dtype=np.float32)
+
+        # read the vertices and data
+        for i in range(vertices_n):
+            vertices[i] = _read_3(fid)
+            data[i] = np.fromfile(fid, dtype='>f4', count=1)[0]
+
+        w = dict()
+        w['vertices'] = vertices
+        w['data'] = data
+
+    return w
+
+
+def _write_3(fid, val):
+    """ Write 3 byte integer to file
+    """
+
+    f_bytes = np.zeros((3), dtype=np.uint8)
+
+    f_bytes[0] = (val >> 16) & 255
+    f_bytes[1] = (val >> 8) & 255
+    f_bytes[2] = val & 255
+
+    fid.write(f_bytes.tostring())
+
+
+def _write_w(filename, vertices, data):
+    """Read a w file
+
+    w files contain activations or source reconstructions for a single time
+    point.
+
+    Parameters
+    ----------
+    filename: string
+        The name of the w file.
+    vertices: array of int
+        Vertex indices (0 based).
+    data: 1D array
+        The data array (nvert).
+    """
+
+    assert(len(vertices) == len(data))
+
+    fid = open(filename, 'wb')
+
+    # write 2 zero bytes
+    fid.write(np.zeros((2), dtype=np.uint8).tostring())
+
+    # write number of vertices/sources (3 byte integer)
+    vertices_n = len(vertices)
+    _write_3(fid, vertices_n)
+
+    # write the vertices and data
+    for i in range(vertices_n):
+        _write_3(fid, vertices[i])
+        # XXX: without float() endianness is wrong, not sure why
+        fid.write(np.array(float(data[i]), dtype='>f4').tostring())
+
+    # close the file
+    fid.close()
+
+
+def read_source_estimate(fname, subject=None):
+    """Read a soure estimate object
+
+    Parameters
+    ----------
+    fname : str
+        Path to (a) source-estimate file(s).
+    subject : str | None
+        Name of the subject the source estimate(s) is (are) from.
+        It is good practice to set this attribute to avoid combining
+        incompatible labels and SourceEstimates (e.g., ones from other
+        subjects). Note that due to file specification limitations, the
+        subject name isn't saved to or loaded from files written to disk.
+
+    Returns
+    -------
+    stc : SourceEstimate | VolSourceEstimate
+        The soure estimate object loaded from file.
+
+    Notes
+    -----
+     - for volume source estimates, ``fname`` should provide the path to a
+       single file named '*-vl.stc` or '*-vol.stc'
+     - for surface source estimates, ``fname`` should either provide the
+       path to the file corresponding to a single hemisphere ('*-lh.stc',
+       '*-rh.stc') or only specify the asterisk part in these patterns. In any
+       case, the function expects files for both hemisphere with names
+       following this pattern.
+     - for single time point .w files, ``fname`` should follow the same
+       pattern as for surface estimates, except that files are named
+       '*-lh.w' and '*-rh.w'.
+    """
+    fname_arg = fname
+
+    # make sure corresponding file(s) can be found
+    ftype = None
+    if os.path.exists(fname):
+        if fname.endswith('-vl.stc') or fname.endswith('-vol.stc') or \
+                fname.endswith('-vl.w') or fname.endswith('-vol.w'):
+            ftype = 'volume'
+        elif fname.endswith('.stc'):
+            ftype = 'surface'
+            if fname.endswith(('-lh.stc', '-rh.stc')):
+                fname = fname[:-7]
+            else:
+                err = ("Invalid .stc filename: %r; needs to end with "
+                       "hemisphere tag ('...-lh.stc' or '...-rh.stc')"
+                       % fname)
+                raise IOError(err)
+        elif fname.endswith('.w'):
+            ftype = 'w'
+            if fname.endswith(('-lh.w', '-rh.w')):
+                fname = fname[:-5]
+            else:
+                err = ("Invalid .w filename: %r; needs to end with "
+                       "hemisphere tag ('...-lh.w' or '...-rh.w')"
+                       % fname)
+                raise IOError(err)
+        elif fname.endswith('-stc.h5'):
+            ftype = 'h5'
+            fname = fname[:-7]
+        else:
+            raise RuntimeError('Unknown extension for file %s' % fname_arg)
+
+    if ftype is not 'volume':
+        stc_exist = [os.path.exists(f)
+                     for f in [fname + '-rh.stc', fname + '-lh.stc']]
+        w_exist = [os.path.exists(f)
+                   for f in [fname + '-rh.w', fname + '-lh.w']]
+        h5_exist = os.path.exists(fname + '-stc.h5')
+        if all(stc_exist) and (ftype is not 'w'):
+            ftype = 'surface'
+        elif all(w_exist):
+            ftype = 'w'
+        elif h5_exist:
+            ftype = 'h5'
+        elif any(stc_exist) or any(w_exist):
+            raise IOError("Hemisphere missing for %r" % fname_arg)
+        else:
+            raise IOError("SourceEstimate File(s) not found for: %r"
+                          % fname_arg)
+
+    # read the files
+    if ftype == 'volume':  # volume source space
+        if fname.endswith('.stc'):
+            kwargs = _read_stc(fname)
+        elif fname.endswith('.w'):
+            kwargs = _read_w(fname)
+            kwargs['data'] = kwargs['data'][:, np.newaxis]
+            kwargs['tmin'] = 0.0
+            kwargs['tstep'] = 0.0
+        else:
+            raise IOError('Volume source estimate must end with .stc or .w')
+    elif ftype == 'surface':  # stc file with surface source spaces
+        lh = _read_stc(fname + '-lh.stc')
+        rh = _read_stc(fname + '-rh.stc')
+        assert lh['tmin'] == rh['tmin']
+        assert lh['tstep'] == rh['tstep']
+        kwargs = lh.copy()
+        kwargs['data'] = np.r_[lh['data'], rh['data']]
+        kwargs['vertices'] = [lh['vertices'], rh['vertices']]
+    elif ftype == 'w':  # w file with surface source spaces
+        lh = _read_w(fname + '-lh.w')
+        rh = _read_w(fname + '-rh.w')
+        kwargs = lh.copy()
+        kwargs['data'] = np.atleast_2d(np.r_[lh['data'], rh['data']]).T
+        kwargs['vertices'] = [lh['vertices'], rh['vertices']]
+        # w files only have a single time point
+        kwargs['tmin'] = 0.0
+        kwargs['tstep'] = 1.0
+    elif ftype == 'h5':
+        kwargs = read_hdf5(fname + '-stc.h5', title='mnepython')
+
+    if ftype != 'volume':
+        # Make sure the vertices are ordered
+        vertices = kwargs['vertices']
+        if any(np.any(np.diff(v.astype(int)) <= 0) for v in vertices):
+            sidx = [np.argsort(verts) for verts in vertices]
+            vertices = [verts[idx] for verts, idx in zip(vertices, sidx)]
+            data = kwargs['data'][np.r_[sidx[0], len(sidx[0]) + sidx[1]]]
+            kwargs['vertices'] = vertices
+            kwargs['data'] = data
+
+    if 'subject' not in kwargs:
+        kwargs['subject'] = subject
+    if subject is not None and subject != kwargs['subject']:
+        raise RuntimeError('provided subject name "%s" does not match '
+                           'subject name from the file "%s'
+                           % (subject, kwargs['subject']))
+
+    if ftype == 'volume':
+        stc = VolSourceEstimate(**kwargs)
+    else:
+        stc = SourceEstimate(**kwargs)
+
+    return stc
+
+
+def _make_stc(data, vertices, tmin=None, tstep=None, subject=None):
+    """Helper function to generate a surface, volume or mixed source estimate
+    """
+
+    if isinstance(vertices, list) and len(vertices) == 2:
+        # make a surface source estimate
+        stc = SourceEstimate(data, vertices=vertices, tmin=tmin, tstep=tstep,
+                             subject=subject)
+    elif isinstance(vertices, np.ndarray) or isinstance(vertices, list)\
+            and len(vertices) == 1:
+        stc = VolSourceEstimate(data, vertices=vertices, tmin=tmin,
+                                tstep=tstep, subject=subject)
+    elif isinstance(vertices, list) and len(vertices) > 2:
+        # make a mixed source estimate
+        stc = MixedSourceEstimate(data, vertices=vertices, tmin=tmin,
+                                  tstep=tstep, subject=subject)
+    else:
+        raise ValueError('vertices has to be either a list with one or more '
+                         'arrays or an array')
+    return stc
+
+
+def _verify_source_estimate_compat(a, b):
+    """Make sure two SourceEstimates are compatible for arith. operations"""
+    compat = False
+    if len(a.vertices) == len(b.vertices):
+        if all(np.array_equal(av, vv)
+               for av, vv in zip(a.vertices, b.vertices)):
+            compat = True
+    if not compat:
+        raise ValueError('Cannot combine SourceEstimates that do not have the '
+                         'same vertices. Consider using stc.expand().')
+    if a.subject != b.subject:
+        raise ValueError('source estimates do not have the same subject '
+                         'names, %r and %r' % (a.subject, b.subject))
+
+
+class _BaseSourceEstimate(ToDataFrameMixin, object):
+    """Abstract base class for source estimates
+
+    Parameters
+    ----------
+    data : array of shape (n_dipoles, n_times) | 2-tuple (kernel, sens_data)
+        The data in source space. The data can either be a single array or
+        a tuple with two arrays: "kernel" shape (n_vertices, n_sensors) and
+        "sens_data" shape (n_sensors, n_times). In this case, the source
+        space data corresponds to "numpy.dot(kernel, sens_data)".
+    vertices : array | list of two arrays
+        Vertex numbers corresponding to the data.
+    tmin : float
+        Time point of the first sample in data.
+    tstep : float
+        Time step between successive samples in data.
+    subject : str | None
+        The subject name. While not necessary, it is safer to set the
+        subject parameter to avoid analysis errors.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Attributes
+    ----------
+    subject : str | None
+        The subject name.
+    times : array of shape (n_times,)
+        The time vector.
+    vertices : array or list of arrays of shape (n_dipoles,)
+        The indices of the dipoles in the different source spaces. Can
+        be an array if there is only one source space (e.g., for volumes).
+    data : array of shape (n_dipoles, n_times)
+        The data in source space.
+    shape : tuple
+        The shape of the data. A tuple of int (n_dipoles, n_times).
+    """
+    @verbose
+    def __init__(self, data, vertices=None, tmin=None, tstep=None,
+                 subject=None, verbose=None):
+        kernel, sens_data = None, None
+        if isinstance(data, tuple):
+            if len(data) != 2:
+                raise ValueError('If data is a tuple it has to be length 2')
+            kernel, sens_data = data
+            data = None
+            if kernel.shape[1] != sens_data.shape[0]:
+                raise ValueError('kernel and sens_data have invalid '
+                                 'dimensions')
+
+        if isinstance(vertices, list):
+            if not all(isinstance(v, np.ndarray) for v in vertices):
+                raise ValueError('Vertices, if a list, must contain numpy '
+                                 'arrays')
+
+            if any(np.any(np.diff(v.astype(int)) <= 0) for v in vertices):
+                raise ValueError('Vertices must be ordered in increasing '
+                                 'order.')
+
+            n_src = sum([len(v) for v in vertices])
+
+            if len(vertices) == 1:
+                vertices = vertices[0]
+        elif isinstance(vertices, np.ndarray):
+            n_src = len(vertices)
+        else:
+            raise ValueError('Vertices must be a list or numpy array')
+
+        # safeguard the user against doing something silly
+        if data is not None and data.shape[0] != n_src:
+            raise ValueError('Number of vertices (%i) and stc.shape[0] (%i) '
+                             'must match' % (n_src, data.shape[0]))
+
+        self._data = data
+        self.tmin = tmin
+        self.tstep = tstep
+        self.vertices = vertices
+        self.verbose = verbose
+        self._kernel = kernel
+        self._sens_data = sens_data
+        self._kernel_removed = False
+        self.times = None
+        self._update_times()
+        self.subject = _check_subject(None, subject, False)
+
+    def _remove_kernel_sens_data_(self):
+        """Remove kernel and sensor space data and compute self._data
+        """
+        if self._kernel is not None or self._sens_data is not None:
+            self._kernel_removed = True
+            self._data = np.dot(self._kernel, self._sens_data)
+            self._kernel = None
+            self._sens_data = None
+
+    def crop(self, tmin=None, tmax=None):
+        """Restrict SourceEstimate to a time interval
+
+        Parameters
+        ----------
+        tmin : float | None
+            The first time point in seconds. If None the first present is used.
+        tmax : float | None
+            The last time point in seconds. If None the last present is used.
+        """
+        mask = _time_mask(self.times, tmin, tmax)
+        self.tmin = self.times[np.where(mask)[0][0]]
+        if self._kernel is not None and self._sens_data is not None:
+            self._sens_data = self._sens_data[:, mask]
+        else:
+            self._data = self._data[:, mask]
+
+        self._update_times()
+        return self  # return self for chaining methods
+
+    @verbose
+    def resample(self, sfreq, npad=100, window='boxcar', n_jobs=1,
+                 verbose=None):
+        """Resample data
+
+        Parameters
+        ----------
+        sfreq : float
+            New sample rate to use.
+        npad : int
+            Amount to pad the start and end of the data.
+        window : string or tuple
+            Window to use in resampling. See scipy.signal.resample.
+        n_jobs : int
+            Number of jobs to run in parallel.
+        verbose : bool, str, int, or None
+            If not None, override default verbose level (see mne.verbose).
+            Defaults to self.verbose.
+
+        Notes
+        -----
+        For some data, it may be more accurate to use npad=0 to reduce
+        artifacts. This is dataset dependent -- check your data!
+
+        Note that the sample rate of the original data is inferred from tstep.
+        """
+        # resampling in sensor instead of source space gives a somewhat
+        # different result, so we don't allow it
+        self._remove_kernel_sens_data_()
+
+        o_sfreq = 1.0 / self.tstep
+        self._data = resample(self._data, sfreq, o_sfreq, npad, n_jobs=n_jobs)
+
+        # adjust indirectly affected variables
+        self.tstep = 1.0 / sfreq
+        self._update_times()
+
+    @property
+    def data(self):
+        """Numpy array of source estimate data"""
+        if self._data is None:
+            # compute the solution the first time the data is accessed and
+            # remove the kernel and sensor data
+            self._remove_kernel_sens_data_()
+        return self._data
+
+    @property
+    def shape(self):
+        """Shape of the data"""
+        if self._data is not None:
+            return self._data.shape
+        return (self._kernel.shape[0], self._sens_data.shape[1])
+
+    def _update_times(self):
+        """Update the times attribute after changing tmin, tmax, or tstep"""
+        self.times = self.tmin + (self.tstep * np.arange(self.shape[1]))
+
+    def __add__(self, a):
+        stc = copy.deepcopy(self)
+        stc += a
+        return stc
+
+    def __iadd__(self, a):
+        self._remove_kernel_sens_data_()
+        if isinstance(a, _BaseSourceEstimate):
+            _verify_source_estimate_compat(self, a)
+            self._data += a.data
+        else:
+            self._data += a
+        return self
+
+    def mean(self):
+        """Make a summary stc file with mean power between tmin and tmax.
+
+        Returns
+        -------
+        stc : instance of SourceEstimate
+            The modified stc (method operates inplace).
+        """
+        data = self.data
+        tmax = self.tmin + self.tstep * data.shape[1]
+        tmin = (self.tmin + tmax) / 2.
+        tstep = tmax - self.tmin
+        mean_stc = SourceEstimate(self.data.mean(axis=1)[:, np.newaxis],
+                                  vertices=self.vertices, tmin=tmin,
+                                  tstep=tstep, subject=self.subject)
+        return mean_stc
+
+    def __sub__(self, a):
+        stc = copy.deepcopy(self)
+        stc -= a
+        return stc
+
+    def __isub__(self, a):
+        self._remove_kernel_sens_data_()
+        if isinstance(a, _BaseSourceEstimate):
+            _verify_source_estimate_compat(self, a)
+            self._data -= a.data
+        else:
+            self._data -= a
+        return self
+
+    def __truediv__(self, a):
+        return self.__div__(a)
+
+    def __div__(self, a):
+        stc = copy.deepcopy(self)
+        stc /= a
+        return stc
+
+    def __itruediv__(self, a):
+        return self.__idiv__(a)
+
+    def __idiv__(self, a):
+        self._remove_kernel_sens_data_()
+        if isinstance(a, _BaseSourceEstimate):
+            _verify_source_estimate_compat(self, a)
+            self._data /= a.data
+        else:
+            self._data /= a
+        return self
+
+    def __mul__(self, a):
+        stc = copy.deepcopy(self)
+        stc *= a
+        return stc
+
+    def __imul__(self, a):
+        self._remove_kernel_sens_data_()
+        if isinstance(a, _BaseSourceEstimate):
+            _verify_source_estimate_compat(self, a)
+            self._data *= a.data
+        else:
+            self._data *= a
+        return self
+
+    def __pow__(self, a):
+        stc = copy.deepcopy(self)
+        stc **= a
+        return stc
+
+    def __ipow__(self, a):
+        self._remove_kernel_sens_data_()
+        self._data **= a
+        return self
+
+    def __radd__(self, a):
+        return self + a
+
+    def __rsub__(self, a):
+        return self - a
+
+    def __rmul__(self, a):
+        return self * a
+
+    def __rdiv__(self, a):
+        return self / a
+
+    def __neg__(self):
+        stc = copy.deepcopy(self)
+        stc._remove_kernel_sens_data_()
+        stc._data *= -1
+        return stc
+
+    def __pos__(self):
+        return self
+
+    def sqrt(self):
+        """Take the square root
+
+        Returns
+        -------
+        stc : instance of SourceEstimate
+            A copy of the SourceEstimate with sqrt(data).
+        """
+        return self ** (0.5)
+
+    def copy(self):
+        """Return copy of SourceEstimate instance"""
+        return copy.deepcopy(self)
+
+    def bin(self, width, tstart=None, tstop=None, func=np.mean):
+        """Returns a SourceEstimate object with data summarized over time bins
+
+        Time bins of ``width`` seconds. This method is intended for
+        visualization only. No filter is applied to the data before binning,
+        making the method inappropriate as a tool for downsampling data.
+
+        Parameters
+        ----------
+        width : scalar
+            Width of the individual bins in seconds.
+        tstart : scalar | None
+            Time point where the first bin starts. The default is the first
+            time point of the stc.
+        tstop : scalar | None
+            Last possible time point contained in a bin (if the last bin would
+            be shorter than width it is dropped). The default is the last time
+            point of the stc.
+        func : callable
+            Function that is applied to summarize the data. Needs to accept a
+            numpy.array as first input and an ``axis`` keyword argument.
+
+        Returns
+        -------
+        stc : instance of SourceEstimate
+            The binned SourceEstimate.
+        """
+        if tstart is None:
+            tstart = self.tmin
+        if tstop is None:
+            tstop = self.times[-1]
+
+        times = np.arange(tstart, tstop + self.tstep, width)
+        nv, _ = self.shape
+        nt = len(times) - 1
+        data = np.empty((nv, nt), dtype=self.data.dtype)
+        for i in range(nt):
+            idx = (self.times >= times[i]) & (self.times < times[i + 1])
+            data[:, i] = func(self.data[:, idx], axis=1)
+
+        tmin = times[0] + width / 2.
+        stc = _make_stc(data, vertices=self.vertices,
+                        tmin=tmin, tstep=width, subject=self.subject)
+        return stc
+
+    def transform_data(self, func, idx=None, tmin_idx=None, tmax_idx=None):
+        """Get data after a linear (time) transform has been applied
+
+        The transorm is applied to each source time course independently.
+
+
+        Parameters
+        ----------
+        func : callable
+            The transform to be applied, including parameters (see, e.g.,
+            `mne.fixes.partial`). The first parameter of the function is the
+            input data. The first return value is the transformed data,
+            remaining outputs are ignored. The first dimension of the
+            transformed data has to be the same as the first dimension of the
+            input data.
+        idx : array | None
+            Indicices of source time courses for which to compute transform.
+            If None, all time courses are used.
+        tmin_idx : int | None
+            Index of first time point to include. If None, the index of the
+            first time point is used.
+        tmax_idx : int | None
+            Index of the first time point not to include. If None, time points
+            up to (and including) the last time point are included.
+
+        Returns
+        -------
+        data_t : ndarray
+            The transformed data.
+
+        Notes
+        -----
+        Applying transforms can be significantly faster if the
+        SourceEstimate object was created using "(kernel, sens_data)", for
+        the "data" parameter as the transform is applied in sensor space.
+        Inverse methods, e.g., "apply_inverse_epochs", or "lcmv_epochs" do
+        this automatically (if possible).
+        """
+
+        if idx is None:
+            # use all time courses by default
+            idx = slice(None, None)
+
+        if self._kernel is None and self._sens_data is None:
+            if self._kernel_removed:
+                warnings.warn('Performance can be improved by not accessing '
+                              'the data attribute before calling this method.')
+
+            # transform source space data directly
+            data_t = func(self.data[idx, tmin_idx:tmax_idx])
+
+            if isinstance(data_t, tuple):
+                # use only first return value
+                data_t = data_t[0]
+        else:
+            # apply transform in sensor space
+            sens_data_t = func(self._sens_data[:, tmin_idx:tmax_idx])
+
+            if isinstance(sens_data_t, tuple):
+                # use only first return value
+                sens_data_t = sens_data_t[0]
+
+            # apply inverse
+            data_shape = sens_data_t.shape
+            if len(data_shape) > 2:
+                # flatten the last dimensions
+                sens_data_t = sens_data_t.reshape(data_shape[0],
+                                                  np.prod(data_shape[1:]))
+
+            data_t = np.dot(self._kernel[idx, :], sens_data_t)
+
+            # restore original shape if necessary
+            if len(data_shape) > 2:
+                data_t = data_t.reshape(data_t.shape[0], *data_shape[1:])
+
+        return data_t
+
+    def transform(self, func, idx=None, tmin=None, tmax=None, copy=False):
+        """Apply linear transform
+
+        The transform is applied to each source time course independently.
+
+        Parameters
+        ----------
+        func : callable
+            The transform to be applied, including parameters (see, e.g.,
+            mne.fixes.partial). The first parameter of the function is the
+            input data. The first two dimensions of the transformed data
+            should be (i) vertices and (ii) time.  Transforms which yield 3D
+            output (e.g. time-frequency transforms) are valid, so long as the
+            first two dimensions are vertices and time.  In this case, the
+            copy parameter (see below) must be True and a list of
+            SourceEstimates, rather than a single instance of SourceEstimate,
+            will be returned, one for each index of the 3rd dimension of the
+            transformed data.  In the case of transforms yielding 2D output
+            (e.g. filtering), the user has the option of modifying the input
+            inplace (copy = False) or returning a new instance of
+            SourceEstimate (copy = True) with the transformed data.
+        idx : array | None
+            Indices of source time courses for which to compute transform.
+            If None, all time courses are used.
+        tmin : float | int | None
+            First time point to include (ms). If None, self.tmin is used.
+        tmax : float | int | None
+            Last time point to include (ms). If None, self.tmax is used.
+        copy : bool
+            If True, return a new instance of SourceEstimate instead of
+            modifying the input inplace.
+
+        Returns
+        -------
+        stcs : instance of SourceEstimate | list
+            The transformed stc or, in the case of transforms which yield
+            N-dimensional output (where N > 2), a list of stcs. For a list,
+            copy must be True.
+
+        Notes
+        -----
+        Applying transforms can be significantly faster if the
+        SourceEstimate object was created using "(kernel, sens_data)", for
+        the "data" parameter as the transform is applied in sensor space.
+        Inverse methods, e.g., "apply_inverse_epochs", or "lcmv_epochs" do
+        this automatically (if possible).
+        """
+
+        # min and max data indices to include
+        times = np.round(1000 * self.times)
+        t_idx = np.where(_time_mask(times, tmin, tmax))[0]
+        if tmin is None:
+            tmin_idx = None
+        else:
+            tmin_idx = t_idx[0]
+
+        if tmax is None:
+            tmax_idx = None
+        else:
+            tmax_idx = t_idx[-1]
+
+        data_t = self.transform_data(func, idx=idx, tmin_idx=tmin_idx,
+                                     tmax_idx=tmax_idx)
+
+        # account for change in n_vertices
+        if idx is not None:
+            idx_lh = idx[idx < len(self.lh_vertno)]
+            idx_rh = idx[idx >= len(self.lh_vertno)] - len(self.lh_vertno)
+            verts_lh = self.lh_vertno[idx_lh]
+            verts_rh = self.rh_vertno[idx_rh]
+        else:
+            verts_lh = self.lh_vertno
+            verts_rh = self.rh_vertno
+        verts = [verts_lh, verts_rh]
+
+        tmin_idx = 0 if tmin_idx is None else tmin_idx
+        tmax_idx = -1 if tmax_idx is None else tmax_idx
+
+        tmin = self.times[tmin_idx]
+
+        times = np.arange(self.times[tmin_idx],
+                          self.times[tmax_idx] + self.tstep / 2, self.tstep)
+
+        if data_t.ndim > 2:
+            # return list of stcs if transformed data has dimensionality > 2
+            if copy:
+                stcs = [SourceEstimate(data_t[:, :, a], verts, tmin,
+                                       self.tstep, self.subject)
+                        for a in range(data_t.shape[-1])]
+            else:
+                raise ValueError('copy must be True if transformed data has '
+                                 'more than 2 dimensions')
+        else:
+            # return new or overwritten stc
+            stcs = self if not copy else self.copy()
+            stcs._data, stcs.vertices = data_t, verts
+            stcs.tmin, stcs.times = tmin, times
+
+        return stcs
+
+
+class SourceEstimate(_BaseSourceEstimate):
+    """Container for surface source estimates
+
+    Parameters
+    ----------
+    data : array of shape (n_dipoles, n_times) | 2-tuple (kernel, sens_data)
+        The data in source space. The data can either be a single array or
+        a tuple with two arrays: "kernel" shape (n_vertices, n_sensors) and
+        "sens_data" shape (n_sensors, n_times). In this case, the source
+        space data corresponds to "numpy.dot(kernel, sens_data)".
+    vertices : list of two arrays
+        Vertex numbers corresponding to the data.
+    tmin : scalar
+        Time point of the first sample in data.
+    tstep : scalar
+        Time step between successive samples in data.
+    subject : str | None
+        The subject name. While not necessary, it is safer to set the
+        subject parameter to avoid analysis errors.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Attributes
+    ----------
+    subject : str | None
+        The subject name.
+    times : array of shape (n_times,)
+        The time vector.
+    vertices : list of two arrays of shape (n_dipoles,)
+        The indices of the dipoles in the left and right source space.
+    data : array of shape (n_dipoles, n_times)
+        The data in source space.
+    shape : tuple
+        The shape of the data. A tuple of int (n_dipoles, n_times).
+    """
+    @verbose
+    def __init__(self, data, vertices=None, tmin=None, tstep=None,
+                 subject=None, verbose=None):
+
+        if not (isinstance(vertices, list) and len(vertices) == 2):
+            raise ValueError('Vertices, if a list, must contain two '
+                             'numpy arrays')
+
+        _BaseSourceEstimate.__init__(self, data, vertices=vertices, tmin=tmin,
+                                     tstep=tstep, subject=subject,
+                                     verbose=verbose)
+
+    @verbose
+    def save(self, fname, ftype='stc', verbose=None):
+        """Save the source estimates to a file
+
+        Parameters
+        ----------
+        fname : string
+            The stem of the file name. The file names used for surface source
+            spaces are obtained by adding "-lh.stc" and "-rh.stc" (or "-lh.w"
+            and "-rh.w") to the stem provided, for the left and the right
+            hemisphere, respectively.
+        ftype : string
+            File format to use. Allowed values are "stc" (default), "w",
+            and "h5". The "w" format only supports a single time point.
+        verbose : bool, str, int, or None
+            If not None, override default verbose level (see mne.verbose).
+            Defaults to self.verbose.
+        """
+        if ftype not in ('stc', 'w', 'h5'):
+            raise ValueError('ftype must be "stc", "w", or "h5", not "%s"'
+                             % ftype)
+
+        lh_data = self.data[:len(self.lh_vertno)]
+        rh_data = self.data[-len(self.rh_vertno):]
+
+        if ftype == 'stc':
+            logger.info('Writing STC to disk...')
+            _write_stc(fname + '-lh.stc', tmin=self.tmin, tstep=self.tstep,
+                       vertices=self.lh_vertno, data=lh_data)
+            _write_stc(fname + '-rh.stc', tmin=self.tmin, tstep=self.tstep,
+                       vertices=self.rh_vertno, data=rh_data)
+        elif ftype == 'w':
+            if self.shape[1] != 1:
+                raise ValueError('w files can only contain a single time '
+                                 'point')
+            logger.info('Writing STC to disk (w format)...')
+            _write_w(fname + '-lh.w', vertices=self.lh_vertno,
+                     data=lh_data[:, 0])
+            _write_w(fname + '-rh.w', vertices=self.rh_vertno,
+                     data=rh_data[:, 0])
+        elif ftype == 'h5':
+            write_hdf5(fname + '-stc.h5',
+                       dict(vertices=self.vertices, data=self.data,
+                            tmin=self.tmin, tstep=self.tstep,
+                            subject=self.subject), title='mnepython')
+        logger.info('[done]')
+
+    def __repr__(self):
+        if isinstance(self.vertices, list):
+            nv = sum([len(v) for v in self.vertices])
+        else:
+            nv = self.vertices.size
+        s = "%d vertices" % nv
+        if self.subject is not None:
+            s += ", subject : %s" % self.subject
+        s += ", tmin : %s (ms)" % (1e3 * self.tmin)
+        s += ", tmax : %s (ms)" % (1e3 * self.times[-1])
+        s += ", tstep : %s (ms)" % (1e3 * self.tstep)
+        s += ", data size : %s x %s" % self.shape
+        return "<SourceEstimate  |  %s>" % s
+
+    @property
+    def lh_data(self):
+        return self.data[:len(self.lh_vertno)]
+
+    @property
+    def rh_data(self):
+        return self.data[len(self.lh_vertno):]
+
+    @property
+    def lh_vertno(self):
+        return self.vertices[0]
+
+    @property
+    def rh_vertno(self):
+        return self.vertices[1]
+
+    def _hemilabel_stc(self, label):
+
+        if label.hemi == 'lh':
+            stc_vertices = self.vertices[0]
+        else:
+            stc_vertices = self.vertices[1]
+
+        # find index of the Label's vertices
+        idx = np.nonzero(in1d(stc_vertices, label.vertices))[0]
+
+        # find output vertices
+        vertices = stc_vertices[idx]
+
+        # find data
+        if label.hemi == 'rh':
+            values = self.data[idx + len(self.vertices[0])]
+        else:
+            values = self.data[idx]
+
+        return vertices, values
+
+    def in_label(self, label):
+        """Returns a SourceEstimate object restricted to a label
+
+        SourceEstimate contains the time course of
+        activation of all sources inside the label.
+
+        Parameters
+        ----------
+        label : Label | BiHemiLabel
+            The label (as created for example by mne.read_label). If the label
+            does not match any sources in the SourceEstimate, a ValueError is
+            raised.
+        """
+        # make sure label and stc are compatible
+        if label.subject is not None and self.subject is not None \
+                and label.subject != self.subject:
+            raise RuntimeError('label and stc must have same subject names, '
+                               'currently "%s" and "%s"' % (label.subject,
+                                                            self.subject))
+
+        if label.hemi == 'both':
+            lh_vert, lh_val = self._hemilabel_stc(label.lh)
+            rh_vert, rh_val = self._hemilabel_stc(label.rh)
+            vertices = [lh_vert, rh_vert]
+            values = np.vstack((lh_val, rh_val))
+        elif label.hemi == 'lh':
+            lh_vert, values = self._hemilabel_stc(label)
+            vertices = [lh_vert, np.array([], int)]
+        elif label.hemi == 'rh':
+            rh_vert, values = self._hemilabel_stc(label)
+            vertices = [np.array([], int), rh_vert]
+        else:
+            raise TypeError("Expected  Label or BiHemiLabel; got %r" % label)
+
+        if sum([len(v) for v in vertices]) == 0:
+            raise ValueError('No vertices match the label in the stc file')
+
+        label_stc = SourceEstimate(values, vertices=vertices,
+                                   tmin=self.tmin, tstep=self.tstep,
+                                   subject=self.subject)
+        return label_stc
+
+    def expand(self, vertices):
+        """Expand SourceEstimate to include more vertices
+
+        This will add rows to stc.data (zero-filled) and modify stc.vertices
+        to include all vertices in stc.vertices and the input vertices.
+
+        Parameters
+        ----------
+        vertices : list of array
+            New vertices to add. Can also contain old values.
+
+        Returns
+        -------
+        stc : instance of SourceEstimate
+            The modified stc (note: method operates inplace).
+        """
+        if not isinstance(vertices, list):
+            raise TypeError('vertices must be a list')
+        if not len(self.vertices) == len(vertices):
+            raise ValueError('vertices must have the same length as '
+                             'stc.vertices')
+
+        # can no longer use kernel and sensor data
+        self._remove_kernel_sens_data_()
+
+        inserters = list()
+        offsets = [0]
+        for vi, (v_old, v_new) in enumerate(zip(self.vertices, vertices)):
+            v_new = np.setdiff1d(v_new, v_old)
+            inds = np.searchsorted(v_old, v_new)
+            # newer numpy might overwrite inds after np.insert, copy here
+            inserters += [inds.copy()]
+            offsets += [len(v_old)]
+            self.vertices[vi] = np.insert(v_old, inds, v_new)
+        inds = [ii + offset for ii, offset in zip(inserters, offsets[:-1])]
+        inds = np.concatenate(inds)
+        new_data = np.zeros((len(inds), self._data.shape[1]))
+        self._data = np.insert(self._data, inds, new_data, axis=0)
+        return self
+
+    @verbose
+    def extract_label_time_course(self, labels, src, mode='mean_flip',
+                                  allow_empty=False, verbose=None):
+        """Extract label time courses for lists of labels
+
+        This function will extract one time course for each label. The way the
+        time courses are extracted depends on the mode parameter.
+
+        Valid values for mode are:
+
+            - 'mean': Average within each label.
+            - 'mean_flip': Average within each label with sign flip depending
+              on source orientation.
+            - 'pca_flip': Apply an SVD to the time courses within each label
+              and use the scaled and sign-flipped first right-singular vector
+              as the label time course. The scaling is performed such that the
+              power of the label time course is the same as the average
+              per-vertex time course power within the label. The sign of the
+              resulting time course is adjusted by multiplying it with
+              "sign(dot(u, flip))" where u is the first left-singular vector,
+              and flip is a sing-flip vector based on the vertex normals. This
+              procedure assures that the phase does not randomly change by 180
+              degrees from one stc to the next.
+            - 'max': Max value within each label.
+
+
+        Parameters
+        ----------
+        labels : Label | list of Label
+            The labels for which to extract the time courses.
+        src : list
+            Source spaces for left and right hemisphere.
+        mode : str
+            Extraction mode, see explanation above.
+        allow_empty : bool
+            Instead of emitting an error, return all-zero time course for
+            labels that do not have any vertices in the source estimate.
+        verbose : bool, str, int, or None
+            If not None, override default verbose level (see mne.verbose).
+
+        Returns
+        -------
+        label_tc : array, shape=(len(labels), n_times)
+            Extracted time course for each label.
+
+        See Also
+        --------
+        extract_label_time_course : extract time courses for multiple STCs
+        """
+        label_tc = extract_label_time_course(self, labels, src, mode=mode,
+                                             return_generator=False,
+                                             allow_empty=allow_empty,
+                                             verbose=verbose)
+
+        return label_tc
+
+    def center_of_mass(self, subject=None, hemi=None, restrict_vertices=False,
+                       subjects_dir=None):
+        """Return the vertex on a given surface that is at the center of mass
+        of  the activity in stc. Note that all activity must occur in a single
+        hemisphere, otherwise an error is returned. The "mass" of each point in
+        space for computing the spatial center of mass is computed by summing
+        across time, and vice-versa for each point in time in computing the
+        temporal center of mass. This is useful for quantifying spatio-temporal
+        cluster locations, especially when combined with the function
+        mne.source_space.vertex_to_mni().
+
+        Parameters
+        ----------
+        subject : string | None
+            The subject the stc is defined for.
+        hemi : int, or None
+            Calculate the center of mass for the left (0) or right (1)
+            hemisphere. If None, one of the hemispheres must be all zeroes,
+            and the center of mass will be calculated for the other
+            hemisphere (useful for getting COM for clusters).
+        restrict_vertices : bool, or array of int
+            If True, returned vertex will be one from stc. Otherwise, it could
+            be any vertex from surf. If an array of int, the returned vertex
+            will come from that array. For most accuruate estimates, do not
+            restrict vertices.
+        subjects_dir : str, or None
+            Path to the SUBJECTS_DIR. If None, the path is obtained by using
+            the environment variable SUBJECTS_DIR.
+
+        Returns
+        -------
+        vertex : int
+            Vertex of the spatial center of mass for the inferred hemisphere,
+            with each vertex weighted by the sum of the stc across time. For a
+            boolean stc, then, this would be weighted purely by the duration
+            each vertex was active.
+        hemi : int
+            Hemisphere the vertex was taken from.
+        t : float
+            Time of the temporal center of mass (weighted by the sum across
+            source vertices).
+
+        References:
+            Used in Larson and Lee, "The cortical dynamics underlying effective
+            switching of auditory spatial attention", NeuroImage 2012.
+        """
+        subject = _check_subject(self.subject, subject)
+
+        values = np.sum(self.data, axis=1)  # sum across time
+        vert_inds = [np.arange(len(self.vertices[0])),
+                     np.arange(len(self.vertices[1])) + len(self.vertices[0])]
+        if hemi is None:
+            hemi = np.where(np.array([np.sum(values[vi])
+                            for vi in vert_inds]))[0]
+            if not len(hemi) == 1:
+                raise ValueError('Could not infer hemisphere')
+            hemi = hemi[0]
+        if hemi not in [0, 1]:
+            raise ValueError('hemi must be 0 or 1')
+
+        subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
+
+        values = values[vert_inds[hemi]]
+
+        hemis = ['lh', 'rh']
+        surf = os.path.join(subjects_dir, subject, 'surf',
+                            hemis[hemi] + '.sphere')
+
+        if isinstance(surf, string_types):  # read in surface
+            surf = read_surface(surf)
+
+        if restrict_vertices is False:
+            restrict_vertices = np.arange(surf[0].shape[0])
+        elif restrict_vertices is True:
+            restrict_vertices = self.vertices[hemi]
+
+        if np.any(self.data < 0):
+            raise ValueError('Cannot compute COM with negative values')
+
+        pos = surf[0][self.vertices[hemi], :].T
+        c_o_m = np.sum(pos * values, axis=1) / np.sum(values)
+
+        # Find the vertex closest to the COM
+        vertex = np.argmin(np.sqrt(np.mean((surf[0][restrict_vertices, :] -
+                                            c_o_m) ** 2, axis=1)))
+        vertex = restrict_vertices[vertex]
+
+        # do time center of mass by using the values across space
+        masses = np.sum(self.data, axis=0).astype(float)
+        t_ind = np.sum(masses * np.arange(self.shape[1])) / np.sum(masses)
+        t = self.tmin + self.tstep * t_ind
+        return vertex, hemi, t
+
+    def plot(self, subject=None, surface='inflated', hemi='lh',
+             colormap='auto', time_label='time=%0.2f ms',
+             smoothing_steps=10, transparent=None, alpha=1.0,
+             time_viewer=False, config_opts=None, subjects_dir=None,
+             figure=None, views='lat', colorbar=True, clim='auto'):
+        """Plot SourceEstimates with PySurfer
+
+        Note: PySurfer currently needs the SUBJECTS_DIR environment variable,
+        which will automatically be set by this function. Plotting multiple
+        SourceEstimates with different values for subjects_dir will cause
+        PySurfer to use the wrong FreeSurfer surfaces when using methods of
+        the returned Brain object. It is therefore recommended to set the
+        SUBJECTS_DIR environment variable or always use the same value for
+        subjects_dir (within the same Python session).
+
+        Parameters
+        ----------
+        subject : str | None
+            The subject name corresponding to FreeSurfer environment
+            variable SUBJECT. If None stc.subject will be used. If that
+            is None, the environment will be used.
+        surface : str
+            The type of surface (inflated, white etc.).
+        hemi : str, 'lh' | 'rh' | 'split' | 'both'
+            The hemisphere to display.
+        colormap : str | np.ndarray of float, shape(n_colors, 3 | 4)
+            Name of colormap to use or a custom look up table. If array, must
+            be (n x 3) or (n x 4) array for with RGB or RGBA values between
+            0 and 255. If 'auto', either 'hot' or 'mne' will be chosen
+            based on whether 'lims' or 'pos_lims' are specified in `clim`.
+        time_label : str
+            How to print info about the time instant visualized.
+        smoothing_steps : int
+            The amount of smoothing.
+        transparent : bool | None
+            If True, use a linear transparency between fmin and fmid.
+            None will choose automatically based on colormap type.
+        alpha : float
+            Alpha value to apply globally to the overlay.
+        time_viewer : bool
+            Display time viewer GUI.
+        config_opts : dict
+            Keyword arguments for Brain initialization.
+            See pysurfer.viz.Brain.
+        subjects_dir : str
+            The path to the FreeSurfer subjects reconstructions.
+            It corresponds to FreeSurfer environment variable SUBJECTS_DIR.
+        figure : instance of mayavi.core.scene.Scene | None
+            If None, the last figure will be cleaned and a new figure will
+            be created.
+        views : str | list
+            View to use. See surfer.Brain().
+        colorbar : bool
+            If True, display colorbar on scene.
+        clim : str | dict
+            Colorbar properties specification. If 'auto', set clim
+            automatically based on data percentiles. If dict, should contain:
+
+                kind : str
+                    Flag to specify type of limits. 'value' or 'percent'.
+                lims : list | np.ndarray | tuple of float, 3 elements
+                    Note: Only use this if 'colormap' is not 'mne'.
+                    Left, middle, and right bound for colormap.
+                pos_lims : list | np.ndarray | tuple of float, 3 elements
+                    Note: Only use this if 'colormap' is 'mne'.
+                    Left, middle, and right bound for colormap. Positive values
+                    will be mirrored directly across zero during colormap
+                    construction to obtain negative control points.
+
+
+        Returns
+        -------
+        brain : Brain
+            A instance of surfer.viz.Brain from PySurfer.
+        """
+        brain = plot_source_estimates(self, subject, surface=surface,
+                                      hemi=hemi, colormap=colormap,
+                                      time_label=time_label,
+                                      smoothing_steps=smoothing_steps,
+                                      transparent=transparent, alpha=alpha,
+                                      time_viewer=time_viewer,
+                                      config_opts=config_opts,
+                                      subjects_dir=subjects_dir, figure=figure,
+                                      views=views, colorbar=colorbar,
+                                      clim=clim)
+        return brain
+
+    @verbose
+    def to_original_src(self, src_orig, subject_orig=None,
+                        subjects_dir=None, verbose=None):
+        """Return a SourceEstimate from morphed source to the original subject
+
+        Parameters
+        ----------
+        src_orig : instance of SourceSpaces
+            The original source spaces that were morphed to the current
+            subject.
+        subject_orig : str | None
+            The original subject. For most source spaces this shouldn't need
+            to be provided, since it is stored in the source space itself.
+        subjects_dir : string, or None
+            Path to SUBJECTS_DIR if it is not set in the environment.
+        verbose : bool, str, int, or None
+            If not None, override default verbose level (see mne.verbose).
+
+        See Also
+        --------
+        morph_source_spaces
+
+        Notes
+        -----
+        .. versionadded:: 0.10.0
+        """
+        if self.subject is None:
+            raise ValueError('stc.subject must be set')
+        src_orig = _ensure_src(src_orig)
+        subject_orig = _ensure_src_subject(src_orig, subject_orig)
+        data_idx, vertices = _get_morph_src_reordering(
+            self.vertices, src_orig, subject_orig, self.subject, subjects_dir)
+        return SourceEstimate(self._data[data_idx], vertices,
+                              self.tmin, self.tstep, subject_orig)
+
+    @verbose
+    def morph(self, subject_to, grade=5, smooth=None, subjects_dir=None,
+              buffer_size=64, n_jobs=1, subject_from=None, sparse=False,
+              verbose=None):
+        """Morph a source estimate from one subject to another
+
+        Parameters
+        ----------
+        subject_to : string
+            Name of the subject on which to morph as named in the SUBJECTS_DIR
+        grade : int, list (of two arrays), or None
+            Resolution of the icosahedral mesh (typically 5). If None, all
+            vertices will be used (potentially filling the surface). If a list,
+            then values will be morphed to the set of vertices specified in
+            in grade[0] and grade[1]. Note that specifying the vertices (e.g.,
+            grade=[np.arange(10242), np.arange(10242)] for fsaverage on a
+            standard grade 5 source space) can be substantially faster than
+            computing vertex locations. Note that if subject='fsaverage'
+            and 'grade=5', this set of vertices will automatically be used
+            (instead of computed) for speed, since this is a common morph.
+            NOTE : If sparse=True, grade has to be set to None.
+        smooth : int or None
+            Number of iterations for the smoothing of the surface data.
+            If None, smooth is automatically defined to fill the surface
+            with non-zero values.
+        subjects_dir : string, or None
+            Path to SUBJECTS_DIR if it is not set in the environment.
+        buffer_size : int
+            Morph data in chunks of `buffer_size` time instants.
+            Saves memory when morphing long time intervals.
+        n_jobs : int
+            Number of jobs to run in parallel.
+        subject_from : string
+            Name of the original subject as named in the SUBJECTS_DIR.
+            If None, self.subject will be used.
+        sparse : bool
+            Morph as a sparse source estimate. If True the only
+            parameters used are subject_to and subject_from,
+            and grade has to be None.
+        verbose : bool, str, int, or None
+            If not None, override default verbose level (see mne.verbose).
+
+        Returns
+        -------
+        stc_to : SourceEstimate
+            Source estimate for the destination subject.
+        """
+        subject_from = _check_subject(self.subject, subject_from)
+        if sparse:
+            if grade is not None:
+                raise RuntimeError('grade must be set to None if sparse=True.')
+            return _morph_sparse(self, subject_from, subject_to, subjects_dir)
+        else:
+            return morph_data(subject_from, subject_to, self, grade, smooth,
+                              subjects_dir, buffer_size, n_jobs, verbose)
+
+    def morph_precomputed(self, subject_to, vertices_to, morph_mat,
+                          subject_from=None):
+        """Morph source estimate between subjects using a precomputed matrix
+
+        Parameters
+        ----------
+        subject_to : string
+            Name of the subject on which to morph as named in the SUBJECTS_DIR.
+        vertices_to : list of array of int
+            The vertices on the destination subject's brain.
+        morph_mat : sparse matrix
+            The morphing matrix, usually from compute_morph_matrix.
+        subject_from : string | None
+            Name of the original subject as named in the SUBJECTS_DIR.
+            If None, self.subject will be used.
+
+        Returns
+        -------
+        stc_to : SourceEstimate
+            Source estimate for the destination subject.
+        """
+        subject_from = _check_subject(self.subject, subject_from)
+        return morph_data_precomputed(subject_from, subject_to, self,
+                                      vertices_to, morph_mat)
+
+    def get_peak(self, hemi=None, tmin=None, tmax=None, mode='abs',
+                 vert_as_index=False, time_as_index=False):
+        """Get location and latency of peak amplitude
+
+        Parameters
+        ----------
+        hemi : {'lh', 'rh', None}
+            The hemi to be considered. If None, the entire source space is
+            considered.
+        tmin : float | None
+            The minimum point in time to be considered for peak getting.
+        tmax : float | None
+            The maximum point in time to be considered for peak getting.
+        mode : {'pos', 'neg', 'abs'}
+            How to deal with the sign of the data. If 'pos' only positive
+            values will be considered. If 'neg' only negative values will
+            be considered. If 'abs' absolute values will be considered.
+            Defaults to 'abs'.
+        vert_as_index : bool
+            whether to return the vertex index instead of of its ID.
+            Defaults to False.
+        time_as_index : bool
+            Whether to return the time index instead of the latency.
+            Defaults to False.
+
+        Returns
+        -------
+        pos : int
+            The vertex exhibiting the maximum response, either ID or index.
+        latency : float | int
+            The time point of the maximum response, either latency in seconds
+            or index.
+        """
+        data = {'lh': self.lh_data, 'rh': self.rh_data, None: self.data}[hemi]
+        vertno = {'lh': self.lh_vertno, 'rh': self.rh_vertno,
+                  None: np.concatenate(self.vertices)}[hemi]
+
+        vert_idx, time_idx = _get_peak(data, self.times, tmin, tmax, mode)
+
+        return (vert_idx if vert_as_index else vertno[vert_idx],
+                time_idx if time_as_index else self.times[time_idx])
+
+
+class VolSourceEstimate(_BaseSourceEstimate):
+    """Container for volume source estimates
+
+    Parameters
+    ----------
+    data : array of shape (n_dipoles, n_times) | 2-tuple (kernel, sens_data)
+        The data in source space. The data can either be a single array or
+        a tuple with two arrays: "kernel" shape (n_vertices, n_sensors) and
+        "sens_data" shape (n_sensors, n_times). In this case, the source
+        space data corresponds to "numpy.dot(kernel, sens_data)".
+    vertices : array
+        Vertex numbers corresponding to the data.
+    tmin : scalar
+        Time point of the first sample in data.
+    tstep : scalar
+        Time step between successive samples in data.
+    subject : str | None
+        The subject name. While not necessary, it is safer to set the
+        subject parameter to avoid analysis errors.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Attributes
+    ----------
+    subject : str | None
+        The subject name.
+    times : array of shape (n_times,)
+        The time vector.
+    vertices : array of shape (n_dipoles,)
+        The indices of the dipoles in the source space.
+    data : array of shape (n_dipoles, n_times)
+        The data in source space.
+    shape : tuple
+        The shape of the data. A tuple of int (n_dipoles, n_times).
+
+    Notes
+    -----
+    .. versionadded:: 0.9.0
+    """
+    @verbose
+    def __init__(self, data, vertices=None, tmin=None, tstep=None,
+                 subject=None, verbose=None):
+
+        if not (isinstance(vertices, np.ndarray) or
+                isinstance(vertices, list) and len(vertices) == 1):
+            raise ValueError('Vertices must be a numpy array or a list with '
+                             'one array')
+
+        _BaseSourceEstimate.__init__(self, data, vertices=vertices, tmin=tmin,
+                                     tstep=tstep, subject=subject,
+                                     verbose=verbose)
+
+    @verbose
+    def save(self, fname, ftype='stc', verbose=None):
+        """Save the source estimates to a file
+
+        Parameters
+        ----------
+        fname : string
+            The stem of the file name. The stem is extended with "-vl.stc"
+            or "-vl.w".
+        ftype : string
+            File format to use. Allowed values are "stc" (default) and "w".
+            The "w" format only supports a single time point.
+        verbose : bool, str, int, or None
+            If not None, override default verbose level (see mne.verbose).
+            Defaults to self.verbose.
+        """
+        if ftype not in ['stc', 'w']:
+            raise ValueError('ftype must be "stc" or "w", not "%s"' % ftype)
+
+        if ftype == 'stc':
+            logger.info('Writing STC to disk...')
+            if not (fname.endswith('-vl.stc') or fname.endswith('-vol.stc')):
+                fname += '-vl.stc'
+            _write_stc(fname, tmin=self.tmin, tstep=self.tstep,
+                       vertices=self.vertices, data=self.data)
+        elif ftype == 'w':
+            logger.info('Writing STC to disk (w format)...')
+            if not (fname.endswith('-vl.w') or fname.endswith('-vol.w')):
+                fname += '-vl.w'
+            _write_w(fname, vertices=self.vertices, data=self.data)
+
+        logger.info('[done]')
+
+    def save_as_volume(self, fname, src, dest='mri', mri_resolution=False):
+        """Save a volume source estimate in a nifti file
+
+        Parameters
+        ----------
+        fname : string
+            The name of the generated nifti file.
+        src : list
+            The list of source spaces (should actually be of length 1)
+        dest : 'mri' | 'surf'
+            If 'mri' the volume is defined in the coordinate system of
+            the original T1 image. If 'surf' the coordinate system
+            of the FreeSurfer surface is used (Surface RAS).
+        mri_resolution: bool
+            It True the image is saved in MRI resolution.
+            WARNING: if you have many time points the file produced can be
+            huge.
+
+        Returns
+        -------
+        img : instance Nifti1Image
+            The image object.
+
+        Notes
+        -----
+        .. versionadded:: 0.9.0
+        """
+        save_stc_as_volume(fname, self, src, dest=dest,
+                           mri_resolution=mri_resolution)
+
+    def as_volume(self, src, dest='mri', mri_resolution=False):
+        """Export volume source estimate as a nifti object
+
+        Parameters
+        ----------
+        src : list
+            The list of source spaces (should actually be of length 1)
+        dest : 'mri' | 'surf'
+            If 'mri' the volume is defined in the coordinate system of
+            the original T1 image. If 'surf' the coordinate system
+            of the FreeSurfer surface is used (Surface RAS).
+        mri_resolution: bool
+            It True the image is saved in MRI resolution.
+            WARNING: if you have many time points the file produced can be
+            huge.
+
+        Returns
+        -------
+        img : instance Nifti1Image
+            The image object.
+
+        Notes
+        -----
+        .. versionadded:: 0.9.0
+        """
+        return save_stc_as_volume(None, self, src, dest=dest,
+                                  mri_resolution=mri_resolution)
+
+    def __repr__(self):
+        if isinstance(self.vertices, list):
+            nv = sum([len(v) for v in self.vertices])
+        else:
+            nv = self.vertices.size
+        s = "%d vertices" % nv
+        if self.subject is not None:
+            s += ", subject : %s" % self.subject
+        s += ", tmin : %s (ms)" % (1e3 * self.tmin)
+        s += ", tmax : %s (ms)" % (1e3 * self.times[-1])
+        s += ", tstep : %s (ms)" % (1e3 * self.tstep)
+        s += ", data size : %s x %s" % self.shape
+        return "<VolSourceEstimate  |  %s>" % s
+
+    def get_peak(self, tmin=None, tmax=None, mode='abs',
+                 vert_as_index=False, time_as_index=False):
+        """Get location and latency of peak amplitude
+
+        Parameters
+        ----------
+        tmin : float | None
+            The minimum point in time to be considered for peak getting.
+        tmax : float | None
+            The maximum point in time to be considered for peak getting.
+        mode : {'pos', 'neg', 'abs'}
+            How to deal with the sign of the data. If 'pos' only positive
+            values will be considered. If 'neg' only negative values will
+            be considered. If 'abs' absolute values will be considered.
+            Defaults to 'abs'.
+        vert_as_index : bool
+            whether to return the vertex index instead of of its ID.
+            Defaults to False.
+        time_as_index : bool
+            Whether to return the time index instead of the latency.
+            Defaults to False.
+
+        Returns
+        -------
+        pos : int
+            The vertex exhibiting the maximum response, either ID or index.
+        latency : float
+            The latency in seconds.
+        """
+
+        vert_idx, time_idx = _get_peak(self.data, self.times, tmin, tmax,
+                                       mode)
+
+        return (vert_idx if vert_as_index else self.vertices[vert_idx],
+                time_idx if time_as_index else self.times[time_idx])
+
+
+class MixedSourceEstimate(_BaseSourceEstimate):
+    """Container for mixed surface and volume source estimates
+
+    Parameters
+    ----------
+    data : array of shape (n_dipoles, n_times) | 2-tuple (kernel, sens_data)
+        The data in source space. The data can either be a single array or
+        a tuple with two arrays: "kernel" shape (n_vertices, n_sensors) and
+        "sens_data" shape (n_sensors, n_times). In this case, the source
+        space data corresponds to "numpy.dot(kernel, sens_data)".
+    vertices : list of arrays
+        Vertex numbers corresponding to the data.
+    tmin : scalar
+        Time point of the first sample in data.
+    tstep : scalar
+        Time step between successive samples in data.
+    subject : str | None
+        The subject name. While not necessary, it is safer to set the
+        subject parameter to avoid analysis errors.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Attributes
+    ----------
+    subject : str | None
+        The subject name.
+    times : array of shape (n_times,)
+        The time vector.
+    vertices : list of arrays of shape (n_dipoles,)
+        The indices of the dipoles in each source space.
+    data : array of shape (n_dipoles, n_times)
+        The data in source space.
+    shape : tuple
+        The shape of the data. A tuple of int (n_dipoles, n_times).
+
+    Notes
+    -----
+    .. versionadded:: 0.9.0
+    """
+    @verbose
+    def __init__(self, data, vertices=None, tmin=None, tstep=None,
+                 subject=None, verbose=None):
+
+        if not isinstance(vertices, list) or len(vertices) < 2:
+            raise ValueError('Vertices must be a list of numpy arrays with '
+                             'one array per source space.')
+
+        _BaseSourceEstimate.__init__(self, data, vertices=vertices, tmin=tmin,
+                                     tstep=tstep, subject=subject,
+                                     verbose=verbose)
+
+    def plot_surface(self, src, subject=None, surface='inflated', hemi='lh',
+                     colormap='auto', time_label='time=%02.f ms',
+                     smoothing_steps=10,
+                     transparent=None, alpha=1.0, time_viewer=False,
+                     config_opts={}, subjects_dir=None, figure=None,
+                     views='lat', colorbar=True, clim='auto'):
+        """Plot surface source estimates with PySurfer
+
+        Note: PySurfer currently needs the SUBJECTS_DIR environment variable,
+        which will automatically be set by this function. Plotting multiple
+        SourceEstimates with different values for subjects_dir will cause
+        PySurfer to use the wrong FreeSurfer surfaces when using methods of
+        the returned Brain object. It is therefore recommended to set the
+        SUBJECTS_DIR environment variable or always use the same value for
+        subjects_dir (within the same Python session).
+
+        Parameters
+        ----------
+        src : SourceSpaces
+            The source spaces to plot.
+        subject : str | None
+            The subject name corresponding to FreeSurfer environment
+            variable SUBJECT. If None stc.subject will be used. If that
+            is None, the environment will be used.
+        surface : str
+            The type of surface (inflated, white etc.).
+        hemi : str, 'lh' | 'rh' | 'split' | 'both'
+            The hemisphere to display. Using 'both' or 'split' requires
+            PySurfer version 0.4 or above.
+        colormap : str | np.ndarray of float, shape(n_colors, 3 | 4)
+            Name of colormap to use. See `plot_source_estimates`.
+        time_label : str
+            How to print info about the time instant visualized.
+        smoothing_steps : int
+            The amount of smoothing.
+        transparent : bool | None
+            If True, use a linear transparency between fmin and fmid.
+            None will choose automatically based on colormap type.
+        alpha : float
+            Alpha value to apply globally to the overlay.
+        time_viewer : bool
+            Display time viewer GUI.
+        config_opts : dict
+            Keyword arguments for Brain initialization.
+            See pysurfer.viz.Brain.
+        subjects_dir : str
+            The path to the FreeSurfer subjects reconstructions.
+            It corresponds to FreeSurfer environment variable SUBJECTS_DIR.
+        figure : instance of mayavi.core.scene.Scene | None
+            If None, the last figure will be cleaned and a new figure will
+            be created.
+        views : str | list
+            View to use. See surfer.Brain().
+        colorbar : bool
+            If True, display colorbar on scene.
+        clim : str | dict
+            Colorbar properties specification. See `plot_source_estimates`.
+
+        Returns
+        -------
+        brain : Brain
+            A instance of surfer.viz.Brain from PySurfer.
+        """
+
+        # extract surface source spaces
+        src = _ensure_src(src)
+        surf = [s for s in src if s['type'] == 'surf']
+        if len(surf) != 2:
+            raise ValueError('Source space must contain exactly two surfaces.')
+
+        # extract surface source estimate
+        data = self.data[:surf[0]['nuse'] + surf[1]['nuse']]
+        vertices = [s['vertno'] for s in surf]
+
+        stc = SourceEstimate(data, vertices, self.tmin, self.tstep,
+                             self.subject, self.verbose)
+
+        return plot_source_estimates(stc, subject, surface=surface, hemi=hemi,
+                                     colormap=colormap, time_label=time_label,
+                                     smoothing_steps=smoothing_steps,
+                                     transparent=transparent, alpha=alpha,
+                                     time_viewer=time_viewer,
+                                     config_opts=config_opts,
+                                     subjects_dir=subjects_dir, figure=figure,
+                                     views=views, colorbar=colorbar, clim=clim)
+
+
+###############################################################################
+# Morphing
+
+
+ at verbose
+def _morph_buffer(data, idx_use, e, smooth, n_vertices, nearest, maps,
+                  warn=True, verbose=None):
+    """Morph data from one subject's source space to another
+
+    Parameters
+    ----------
+    data : array, or csr sparse matrix
+        A n_vertices x n_times (or other dimension) dataset to morph.
+    idx_use : array of int
+        Vertices from the original subject's data.
+    e : sparse matrix
+        The mesh edges of the "from" subject.
+    smooth : int
+        Number of smoothing iterations to perform. A hard limit of 100 is
+        also imposed.
+    n_vertices : int
+        Number of vertices.
+    nearest : array of int
+        Vertices on the destination surface to use.
+    maps : sparse matrix
+        Morph map from one subject to the other.
+    warn : bool
+        If True, warn if not all vertices were used.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    data_morphed : array, or csr sparse matrix
+        The morphed data (same type as input).
+    """
+
+    n_iter = 99  # max nb of smoothing iterations (minus one)
+    if smooth is not None:
+        if smooth <= 0:
+            raise ValueError('The number of smoothing operations ("smooth") '
+                             'has to be at least 1.')
+        smooth -= 1
+    # make sure we're in CSR format
+    e = e.tocsr()
+    if sparse.issparse(data):
+        use_sparse = True
+        if not isinstance(data, sparse.csr_matrix):
+            data = data.tocsr()
+    else:
+        use_sparse = False
+    done = False
+    # do the smoothing
+    for k in range(n_iter + 1):
+        # get the row sum
+        mult = np.zeros(e.shape[1])
+        mult[idx_use] = 1
+        idx_use_data = idx_use
+        data_sum = e * mult
+
+        # new indices are non-zero sums
+        idx_use = np.where(data_sum)[0]
+
+        # typically want to make the next iteration have these indices
+        idx_out = idx_use
+
+        # figure out if this is the last iteration
+        if smooth is None:
+            if k == n_iter or len(idx_use) >= n_vertices:
+                # stop when vertices filled
+                idx_out = None
+                done = True
+        elif k == smooth:
+            idx_out = None
+            done = True
+
+        # do standard smoothing multiplication
+        data = _morph_mult(data, e, use_sparse, idx_use_data, idx_out)
+
+        if done is True:
+            break
+
+        # do standard normalization
+        if use_sparse:
+            data.data /= data_sum[idx_use].repeat(np.diff(data.indptr))
+        else:
+            data /= data_sum[idx_use][:, None]
+
+    # do special normalization for last iteration
+    if use_sparse:
+        data_sum[data_sum == 0] = 1
+        data.data /= data_sum.repeat(np.diff(data.indptr))
+    else:
+        data[idx_use, :] /= data_sum[idx_use][:, None]
+    if len(idx_use) != len(data_sum) and warn:
+        warnings.warn('%s/%s vertices not included in smoothing, consider '
+                      'increasing the number of steps'
+                      % (len(data_sum) - len(idx_use), len(data_sum)))
+
+    logger.info('    %d smooth iterations done.' % (k + 1))
+    data_morphed = maps[nearest, :] * data
+    return data_morphed
+
+
+def _morph_mult(data, e, use_sparse, idx_use_data, idx_use_out=None):
+    """Helper for morphing
+
+    Equivalent to "data = (e[:, idx_use_data] * data)[idx_use_out]"
+    but faster.
+    """
+    if len(idx_use_data) < e.shape[1]:
+        if use_sparse:
+            data = e[:, idx_use_data] * data
+        else:
+            # constructing a new sparse matrix is faster than sub-indexing
+            # e[:, idx_use_data]!
+            col, row = np.meshgrid(np.arange(data.shape[1]), idx_use_data)
+            d_sparse = sparse.csr_matrix((data.ravel(),
+                                          (row.ravel(), col.ravel())),
+                                         shape=(e.shape[1], data.shape[1]))
+            data = e * d_sparse
+            data = np.asarray(data.todense())
+    else:
+        data = e * data
+
+    # trim data
+    if idx_use_out is not None:
+        data = data[idx_use_out]
+    return data
+
+
+def _get_subject_sphere_tris(subject, subjects_dir):
+    spheres = [os.path.join(subjects_dir, subject, 'surf',
+                            xh + '.sphere.reg') for xh in ['lh', 'rh']]
+    tris = [read_surface(s)[1] for s in spheres]
+    return tris
+
+
+def _sparse_argmax_nnz_row(csr_mat):
+    """Return index of the maximum non-zero index in each row
+    """
+    n_rows = csr_mat.shape[0]
+    idx = np.empty(n_rows, dtype=np.int)
+    for k in range(n_rows):
+        row = csr_mat[k].tocoo()
+        idx[k] = row.col[np.argmax(row.data)]
+    return idx
+
+
+def _morph_sparse(stc, subject_from, subject_to, subjects_dir=None):
+    """Morph sparse source estimates to an other subject
+
+    Parameters
+    ----------
+    stc : SourceEstimate
+        The sparse STC.
+    subject_from : str
+        The subject on which stc is defined.
+    subject_to : str
+        The target subject.
+    subjects_dir : str
+        Path to SUBJECTS_DIR if it is not set in the environment.
+
+    Returns
+    -------
+    stc_morph : SourceEstimate
+        The morphed source estimates.
+    """
+    maps = read_morph_map(subject_to, subject_from, subjects_dir)
+    stc_morph = stc.copy()
+    stc_morph.subject = subject_to
+
+    cnt = 0
+    for k, hemi in enumerate(['lh', 'rh']):
+        if stc.vertices[k].size > 0:
+            map_hemi = maps[k]
+            vertno_k = _sparse_argmax_nnz_row(map_hemi[stc.vertices[k]])
+            order = np.argsort(vertno_k)
+            n_active_hemi = len(vertno_k)
+            data_hemi = stc_morph._data[cnt:cnt + n_active_hemi]
+            stc_morph._data[cnt:cnt + n_active_hemi] = data_hemi[order]
+            stc_morph.vertices[k] = vertno_k[order]
+            cnt += n_active_hemi
+        else:
+            stc_morph.vertices[k] = np.array([], int)
+
+    return stc_morph
+
+
+ at verbose
+def morph_data(subject_from, subject_to, stc_from, grade=5, smooth=None,
+               subjects_dir=None, buffer_size=64, n_jobs=1, warn=True,
+               verbose=None):
+    """Morph a source estimate from one subject to another
+
+    Parameters
+    ----------
+    subject_from : string
+        Name of the original subject as named in the SUBJECTS_DIR
+    subject_to : string
+        Name of the subject on which to morph as named in the SUBJECTS_DIR
+    stc_from : SourceEstimate
+        Source estimates for subject "from" to morph
+    grade : int, list (of two arrays), or None
+        Resolution of the icosahedral mesh (typically 5). If None, all
+        vertices will be used (potentially filling the surface). If a list,
+        then values will be morphed to the set of vertices specified in
+        in grade[0] and grade[1]. Note that specifying the vertices (e.g.,
+        grade=[np.arange(10242), np.arange(10242)] for fsaverage on a
+        standard grade 5 source space) can be substantially faster than
+        computing vertex locations. Note that if subject='fsaverage'
+        and 'grade=5', this set of vertices will automatically be used
+        (instead of computed) for speed, since this is a common morph.
+    smooth : int or None
+        Number of iterations for the smoothing of the surface data.
+        If None, smooth is automatically defined to fill the surface
+        with non-zero values.
+    subjects_dir : string, or None
+        Path to SUBJECTS_DIR if it is not set in the environment.
+    buffer_size : int
+        Morph data in chunks of `buffer_size` time instants.
+        Saves memory when morphing long time intervals.
+    n_jobs : int
+        Number of jobs to run in parallel
+    warn : bool
+        If True, warn if not all vertices were used.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    stc_to : SourceEstimate
+        Source estimate for the destination subject.
+    """
+    if not isinstance(stc_from, SourceEstimate):
+        raise ValueError('Morphing is only possible with surface source '
+                         'estimates')
+
+    logger.info('Morphing data...')
+    subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
+    nearest = grade_to_vertices(subject_to, grade, subjects_dir, n_jobs)
+    tris = _get_subject_sphere_tris(subject_from, subjects_dir)
+    maps = read_morph_map(subject_from, subject_to, subjects_dir)
+
+    # morph the data
+    data = [stc_from.lh_data, stc_from.rh_data]
+    data_morphed = [None, None]
+
+    n_chunks = ceil(stc_from.data.shape[1] / float(buffer_size))
+
+    parallel, my_morph_buffer, _ = parallel_func(_morph_buffer, n_jobs)
+
+    for hemi in [0, 1]:
+        e = mesh_edges(tris[hemi])
+        e.data[e.data == 2] = 1
+        n_vertices = e.shape[0]
+        e = e + sparse.eye(n_vertices, n_vertices)
+        idx_use = stc_from.vertices[hemi]
+        if len(idx_use) == 0:
+            continue
+        data_morphed[hemi] = np.concatenate(
+            parallel(my_morph_buffer(data_buffer, idx_use, e, smooth,
+                                     n_vertices, nearest[hemi], maps[hemi],
+                                     warn=warn)
+                     for data_buffer
+                     in np.array_split(data[hemi], n_chunks, axis=1)), axis=1)
+
+    vertices = [nearest[0], nearest[1]]
+    if data_morphed[0] is None:
+        if data_morphed[1] is None:
+            data = np.r_[[], []]
+            vertices = [np.array([], int), np.array([], int)]
+        else:
+            data = data_morphed[1]
+            vertices = [np.array([], int), vertices[1]]
+    elif data_morphed[1] is None:
+        data = data_morphed[0]
+        vertices = [vertices[0], np.array([], int)]
+    else:
+        data = np.r_[data_morphed[0], data_morphed[1]]
+
+    stc_to = SourceEstimate(data, vertices, stc_from.tmin, stc_from.tstep,
+                            subject=subject_to, verbose=stc_from.verbose)
+    logger.info('[done]')
+
+    return stc_to
+
+
+ at verbose
+def compute_morph_matrix(subject_from, subject_to, vertices_from, vertices_to,
+                         smooth=None, subjects_dir=None, warn=True,
+                         verbose=None):
+    """Get a matrix that morphs data from one subject to another
+
+    Parameters
+    ----------
+    subject_from : string
+        Name of the original subject as named in the SUBJECTS_DIR
+    subject_to : string
+        Name of the subject on which to morph as named in the SUBJECTS_DIR
+    vertices_from : list of arrays of int
+        Vertices for each hemisphere (LH, RH) for subject_from
+    vertices_to : list of arrays of int
+        Vertices for each hemisphere (LH, RH) for subject_to
+    smooth : int or None
+        Number of iterations for the smoothing of the surface data.
+        If None, smooth is automatically defined to fill the surface
+        with non-zero values.
+    subjects_dir : string
+        Path to SUBJECTS_DIR is not set in the environment
+    warn : bool
+        If True, warn if not all vertices were used.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    morph_matrix : sparse matrix
+        matrix that morphs data from subject_from to subject_to
+    """
+    logger.info('Computing morph matrix...')
+    subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
+    tris = _get_subject_sphere_tris(subject_from, subjects_dir)
+    maps = read_morph_map(subject_from, subject_to, subjects_dir)
+
+    morpher = [None] * 2
+    for hemi in [0, 1]:
+        e = mesh_edges(tris[hemi])
+        e.data[e.data == 2] = 1
+        n_vertices = e.shape[0]
+        e = e + sparse.eye(n_vertices, n_vertices)
+        idx_use = vertices_from[hemi]
+        if len(idx_use) == 0:
+            morpher[hemi] = []
+            continue
+        m = sparse.eye(len(idx_use), len(idx_use), format='csr')
+        morpher[hemi] = _morph_buffer(m, idx_use, e, smooth, n_vertices,
+                                      vertices_to[hemi], maps[hemi], warn=warn)
+    # be careful about zero-length arrays
+    if isinstance(morpher[0], list):
+        morpher = morpher[1]
+    elif isinstance(morpher[1], list):
+        morpher = morpher[0]
+    else:
+        morpher = sparse_block_diag(morpher, format='csr')
+    logger.info('[done]')
+    return morpher
+
+
+ at verbose
+def grade_to_vertices(subject, grade, subjects_dir=None, n_jobs=1,
+                      verbose=None):
+    """Convert a grade to source space vertices for a given subject
+
+    Parameters
+    ----------
+    subject : str
+        Name of the subject
+    grade : int
+        Resolution of the icosahedral mesh (typically 5). If None, all
+        vertices will be used (potentially filling the surface). If a list,
+        then values will be morphed to the set of vertices specified in
+        in grade[0] and grade[1]. Note that specifying the vertices (e.g.,
+        grade=[np.arange(10242), np.arange(10242)] for fsaverage on a
+        standard grade 5 source space) can be substantially faster than
+        computing vertex locations. Note that if subject='fsaverage'
+        and 'grade=5', this set of vertices will automatically be used
+        (instead of computed) for speed, since this is a common morph.
+    subjects_dir : string, or None
+        Path to SUBJECTS_DIR if it is not set in the environment
+    n_jobs : int
+        Number of jobs to run in parallel
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    vertices : list of arrays of int
+        Vertex numbers for LH and RH
+    """
+    # add special case for fsaverage for speed
+    if subject == 'fsaverage' and grade == 5:
+        return [np.arange(10242), np.arange(10242)]
+    subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
+
+    spheres_to = [os.path.join(subjects_dir, subject, 'surf',
+                               xh + '.sphere.reg') for xh in ['lh', 'rh']]
+    lhs, rhs = [read_surface(s)[0] for s in spheres_to]
+
+    if grade is not None:  # fill a subset of vertices
+        if isinstance(grade, list):
+            if not len(grade) == 2:
+                raise ValueError('grade as a list must have two elements '
+                                 '(arrays of output vertices)')
+            vertices = grade
+        else:
+            # find which vertices to use in "to mesh"
+            ico = _get_ico_tris(grade, return_surf=True)
+            lhs /= np.sqrt(np.sum(lhs ** 2, axis=1))[:, None]
+            rhs /= np.sqrt(np.sum(rhs ** 2, axis=1))[:, None]
+
+            # Compute nearest vertices in high dim mesh
+            parallel, my_compute_nearest, _ = \
+                parallel_func(_compute_nearest, n_jobs)
+            lhs, rhs, rr = [a.astype(np.float32)
+                            for a in [lhs, rhs, ico['rr']]]
+            vertices = parallel(my_compute_nearest(xhs, rr)
+                                for xhs in [lhs, rhs])
+            # Make sure the vertices are ordered
+            vertices = [np.sort(verts) for verts in vertices]
+    else:  # potentially fill the surface
+        vertices = [np.arange(lhs.shape[0]), np.arange(rhs.shape[0])]
+
+    return vertices
+
+
+def morph_data_precomputed(subject_from, subject_to, stc_from, vertices_to,
+                           morph_mat):
+    """Morph source estimate between subjects using a precomputed matrix
+
+    Parameters
+    ----------
+    subject_from : string
+        Name of the original subject as named in the SUBJECTS_DIR.
+    subject_to : string
+        Name of the subject on which to morph as named in the SUBJECTS_DIR.
+    stc_from : SourceEstimate
+        Source estimates for subject "from" to morph.
+    vertices_to : list of array of int
+        The vertices on the destination subject's brain.
+    morph_mat : sparse matrix
+        The morphing matrix, typically from compute_morph_matrix.
+
+    Returns
+    -------
+    stc_to : SourceEstimate
+        Source estimate for the destination subject.
+    """
+    if not sparse.issparse(morph_mat):
+        raise ValueError('morph_mat must be a sparse matrix')
+
+    if not isinstance(vertices_to, list) or not len(vertices_to) == 2:
+        raise ValueError('vertices_to must be a list of length 2')
+
+    if not sum(len(v) for v in vertices_to) == morph_mat.shape[0]:
+        raise ValueError('number of vertices in vertices_to must match '
+                         'morph_mat.shape[0]')
+    if not stc_from.data.shape[0] == morph_mat.shape[1]:
+        raise ValueError('stc_from.data.shape[0] must be the same as '
+                         'morph_mat.shape[0]')
+
+    if stc_from.subject is not None and stc_from.subject != subject_from:
+        raise ValueError('stc_from.subject and subject_from must match')
+    data = morph_mat * stc_from.data
+    stc_to = SourceEstimate(data, vertices_to, stc_from.tmin, stc_from.tstep,
+                            verbose=stc_from.verbose, subject=subject_to)
+    return stc_to
+
+
+ at verbose
+def spatio_temporal_src_connectivity(src, n_times, dist=None, verbose=None):
+    """Compute connectivity for a source space activation over time
+
+    Parameters
+    ----------
+    src : source space
+        The source space.
+    n_times : int
+        Number of time instants.
+    dist : float, or None
+        Maximal geodesic distance (in m) between vertices in the
+        source space to consider neighbors. If None, immediate neighbors
+        are extracted from an ico surface.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    connectivity : sparse COO matrix
+        The connectivity matrix describing the spatio-temporal
+        graph structure. If N is the number of vertices in the
+        source space, the N first nodes in the graph are the
+        vertices are time 1, the nodes from 2 to 2N are the vertices
+        during time 2, etc.
+    """
+    if dist is None:
+        if src[0]['use_tris'] is None:
+            raise RuntimeError("The source space does not appear to be an ico "
+                               "surface. Connectivity cannot be extracted from"
+                               " non-ico source spaces.")
+        used_verts = [np.unique(s['use_tris']) for s in src]
+        lh_tris = np.searchsorted(used_verts[0], src[0]['use_tris'])
+        rh_tris = np.searchsorted(used_verts[1], src[1]['use_tris'])
+        tris = np.concatenate((lh_tris, rh_tris + np.max(lh_tris) + 1))
+        connectivity = spatio_temporal_tris_connectivity(tris, n_times)
+
+        # deal with source space only using a subset of vertices
+        masks = [in1d(u, s['vertno']) for s, u in zip(src, used_verts)]
+        if sum(u.size for u in used_verts) != connectivity.shape[0] / n_times:
+            raise ValueError('Used vertices do not match connectivity shape')
+        if [np.sum(m) for m in masks] != [len(s['vertno']) for s in src]:
+            raise ValueError('Vertex mask does not match number of vertices')
+        masks = np.concatenate(masks)
+        missing = 100 * float(len(masks) - np.sum(masks)) / len(masks)
+        if missing:
+            warnings.warn('%0.1f%% of original source space vertices have been'
+                          ' omitted, tri-based connectivity will have holes.\n'
+                          'Consider using distance-based connectivity or '
+                          'morphing data to all source space vertices.'
+                          % missing)
+            masks = np.tile(masks, n_times)
+            masks = np.where(masks)[0]
+            connectivity = connectivity.tocsr()
+            connectivity = connectivity[masks]
+            connectivity = connectivity[:, masks]
+            # return to original format
+            connectivity = connectivity.tocoo()
+
+        return connectivity
+    else:  # use distances computed and saved in the source space file
+        return spatio_temporal_dist_connectivity(src, n_times, dist)
+
+
+ at verbose
+def grade_to_tris(grade, verbose=None):
+    """Get tris defined for a certain grade
+
+    Parameters
+    ----------
+    grade : int
+        Grade of an icosahedral mesh.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    tris : list
+        2-element list containing Nx3 arrays of tris, suitable for use in
+        spatio_temporal_tris_connectivity.
+    """
+    a = _get_ico_tris(grade, None, False)
+    tris = np.concatenate((a, a + (np.max(a) + 1)))
+    return tris
+
+
+ at verbose
+def spatio_temporal_tris_connectivity(tris, n_times, remap_vertices=False,
+                                      verbose=None):
+    """Compute connectivity from triangles and time instants
+
+    Parameters
+    ----------
+    tris : array
+        N x 3 array defining triangles.
+    n_times : int
+        Number of time points
+    remap_vertices : bool
+        Reassign vertex indices based on unique values. Useful
+        to process a subset of triangles. Defaults to False.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    connectivity : sparse COO matrix
+        The connectivity matrix describing the spatio-temporal
+        graph structure. If N is the number of vertices in the
+        source space, the N first nodes in the graph are the
+        vertices are time 1, the nodes from 2 to 2N are the vertices
+        during time 2, etc.
+    """
+    if remap_vertices:
+        logger.info('Reassigning vertex indices.')
+        tris = np.searchsorted(np.unique(tris), tris)
+
+    edges = mesh_edges(tris).tocoo()
+    return _get_connectivity_from_edges(edges, n_times)
+
+
+ at verbose
+def spatio_temporal_dist_connectivity(src, n_times, dist, verbose=None):
+    """Compute connectivity from distances in a source space and time instants
+
+    Parameters
+    ----------
+    src : source space
+        The source space must have distances between vertices computed, such
+        that src['dist'] exists and is useful. This can be obtained using MNE
+        with a call to mne_add_patch_info with the --dist option.
+    n_times : int
+        Number of time points
+    dist : float
+        Maximal geodesic distance (in m) between vertices in the
+        source space to consider neighbors.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    connectivity : sparse COO matrix
+        The connectivity matrix describing the spatio-temporal
+        graph structure. If N is the number of vertices in the
+        source space, the N first nodes in the graph are the
+        vertices are time 1, the nodes from 2 to 2N are the vertices
+        during time 2, etc.
+    """
+    if src[0]['dist'] is None:
+        raise RuntimeError('src must have distances included, consider using\n'
+                           'mne_add_patch_info with --dist argument')
+    edges = sparse_block_diag([s['dist'][s['vertno'], :][:, s['vertno']]
+                              for s in src])
+    edges.data[:] = np.less_equal(edges.data, dist)
+    # clean it up and put it in coo format
+    edges = edges.tocsr()
+    edges.eliminate_zeros()
+    edges = edges.tocoo()
+    return _get_connectivity_from_edges(edges, n_times)
+
+
+ at verbose
+def spatial_src_connectivity(src, dist=None, verbose=None):
+    """Compute connectivity for a source space activation
+
+    Parameters
+    ----------
+    src : source space
+        The source space.
+    dist : float, or None
+        Maximal geodesic distance (in m) between vertices in the
+        source space to consider neighbors. If None, immediate neighbors
+        are extracted from an ico surface.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    connectivity : sparse COO matrix
+        The connectivity matrix describing the spatial graph structure.
+    """
+    return spatio_temporal_src_connectivity(src, 1, dist)
+
+
+ at verbose
+def spatial_tris_connectivity(tris, remap_vertices=False, verbose=None):
+    """Compute connectivity from triangles
+
+    Parameters
+    ----------
+    tris : array
+        N x 3 array defining triangles.
+    remap_vertices : bool
+        Reassign vertex indices based on unique values. Useful
+        to process a subset of triangles. Defaults to False.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    connectivity : sparse COO matrix
+        The connectivity matrix describing the spatial graph structure.
+    """
+    return spatio_temporal_tris_connectivity(tris, 1, remap_vertices)
+
+
+def spatial_dist_connectivity(src, dist, verbose=None):
+    """Compute connectivity from distances in a source space
+
+    Parameters
+    ----------
+    src : source space
+        The source space must have distances between vertices computed, such
+        that src['dist'] exists and is useful. This can be obtained using MNE
+        with a call to mne_add_patch_info with the --dist option.
+    dist : float
+        Maximal geodesic distance (in m) between vertices in the
+        source space to consider neighbors.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    connectivity : sparse COO matrix
+        The connectivity matrix describing the spatial graph structure.
+    """
+    return spatio_temporal_dist_connectivity(src, 1, dist)
+
+
+ at verbose
+def _get_connectivity_from_edges(edges, n_times, verbose=None):
+    """Given edges sparse matrix, create connectivity matrix"""
+    n_vertices = edges.shape[0]
+    logger.info("-- number of connected vertices : %d" % n_vertices)
+    nnz = edges.col.size
+    aux = n_vertices * np.arange(n_times)[:, None] * np.ones((1, nnz), np.int)
+    col = (edges.col[None, :] + aux).ravel()
+    row = (edges.row[None, :] + aux).ravel()
+    if n_times > 1:  # add temporal edges
+        o = (n_vertices * np.arange(n_times - 1)[:, None] +
+             np.arange(n_vertices)[None, :]).ravel()
+        d = (n_vertices * np.arange(1, n_times)[:, None] +
+             np.arange(n_vertices)[None, :]).ravel()
+        row = np.concatenate((row, o, d))
+        col = np.concatenate((col, d, o))
+    data = np.ones(edges.data.size * n_times + 2 * n_vertices * (n_times - 1),
+                   dtype=np.int)
+    connectivity = coo_matrix((data, (row, col)),
+                              shape=(n_times * n_vertices, ) * 2)
+    return connectivity
+
+
+ at verbose
+def _get_ico_tris(grade, verbose=None, return_surf=False):
+    """Get triangles for ico surface."""
+    ico = _get_ico_surface(grade)
+    if not return_surf:
+        return ico['tris']
+    else:
+        return ico
+
+
+def save_stc_as_volume(fname, stc, src, dest='mri', mri_resolution=False):
+    """Save a volume source estimate in a nifti file
+
+    Parameters
+    ----------
+    fname : string | None
+        The name of the generated nifti file. If None, the image is only
+        returned and not saved.
+    stc : instance of VolSourceEstimate
+        The source estimate
+    src : list
+        The list of source spaces (should actually be of length 1)
+    dest : 'mri' | 'surf'
+        If 'mri' the volume is defined in the coordinate system of
+        the original T1 image. If 'surf' the coordinate system
+        of the FreeSurfer surface is used (Surface RAS).
+    mri_resolution: bool
+        It True the image is saved in MRI resolution.
+        WARNING: if you have many time points the file produced can be
+        huge.
+
+    Returns
+    -------
+    img : instance Nifti1Image
+        The image object.
+    """
+    if not isinstance(stc, VolSourceEstimate):
+        raise Exception('Only volume source estimates can be saved as '
+                        'volumes')
+
+    n_times = stc.data.shape[1]
+    shape = src[0]['shape']
+    shape3d = (shape[2], shape[1], shape[0])
+    shape = (n_times, shape[2], shape[1], shape[0])
+    vol = np.zeros(shape)
+    mask3d = src[0]['inuse'].reshape(shape3d).astype(np.bool)
+
+    if mri_resolution:
+        mri_shape3d = (src[0]['mri_height'], src[0]['mri_depth'],
+                       src[0]['mri_width'])
+        mri_shape = (n_times, src[0]['mri_height'], src[0]['mri_depth'],
+                     src[0]['mri_width'])
+        mri_vol = np.zeros(mri_shape)
+        interpolator = src[0]['interpolator']
+
+    for k, v in enumerate(vol):
+        v[mask3d] = stc.data[:, k]
+        if mri_resolution:
+            mri_vol[k] = (interpolator * v.ravel()).reshape(mri_shape3d)
+
+    if mri_resolution:
+        vol = mri_vol
+
+    vol = vol.T
+
+    if mri_resolution:
+        affine = src[0]['vox_mri_t']['trans'].copy()
+    else:
+        affine = src[0]['src_mri_t']['trans'].copy()
+    if dest == 'mri':
+        affine = np.dot(src[0]['mri_ras_t']['trans'], affine)
+    affine[:3] *= 1e3
+
+    try:
+        import nibabel as nib  # lazy import to avoid dependency
+    except ImportError:
+        raise ImportError("nibabel is required to save volume images.")
+
+    header = nib.nifti1.Nifti1Header()
+    header.set_xyzt_units('mm', 'msec')
+    header['pixdim'][4] = 1e3 * stc.tstep
+    with warnings.catch_warnings(record=True):  # nibabel<->numpy warning
+        img = nib.Nifti1Image(vol, affine, header=header)
+        if fname is not None:
+            nib.save(img, fname)
+    return img
+
+
+def _get_label_flip(labels, label_vertidx, src):
+    """Helper function to get sign-flip for labels"""
+    # do the import here to avoid circular dependency
+    from .label import label_sign_flip
+    # get the sign-flip vector for every label
+    label_flip = list()
+    for label, vertidx in zip(labels, label_vertidx):
+        if label.hemi == 'both':
+            raise ValueError('BiHemiLabel not supported when using sign-flip')
+        if vertidx is not None:
+            flip = label_sign_flip(label, src)[:, None]
+        else:
+            flip = None
+        label_flip.append(flip)
+
+    return label_flip
+
+
+ at verbose
+def _gen_extract_label_time_course(stcs, labels, src, mode='mean',
+                                   allow_empty=False, verbose=None):
+    """Generator for extract_label_time_course"""
+
+    n_labels = len(labels)
+
+    # get vertices from source space, they have to be the same as in the stcs
+    vertno = [s['vertno'] for s in src]
+    nvert = [len(vn) for vn in vertno]
+
+    # do the initialization
+    label_vertidx = list()
+    for label in labels:
+        if label.hemi == 'both':
+            # handle BiHemiLabel
+            sub_labels = [label.lh, label.rh]
+        else:
+            sub_labels = [label]
+        this_vertidx = list()
+        for slabel in sub_labels:
+            if slabel.hemi == 'lh':
+                this_vertno = np.intersect1d(vertno[0], slabel.vertices)
+                vertidx = np.searchsorted(vertno[0], this_vertno)
+            elif slabel.hemi == 'rh':
+                this_vertno = np.intersect1d(vertno[1], slabel.vertices)
+                vertidx = nvert[0] + np.searchsorted(vertno[1], this_vertno)
+            else:
+                raise ValueError('label %s has invalid hemi' % label.name)
+            this_vertidx.append(vertidx)
+
+        # convert it to an array
+        this_vertidx = np.concatenate(this_vertidx)
+        if len(this_vertidx) == 0:
+            msg = ('source space does not contain any vertices for label %s'
+                   % label.name)
+            if not allow_empty:
+                raise ValueError(msg)
+            else:
+                logger.warning(msg + '. Assigning all-zero time series to '
+                               'label.')
+            this_vertidx = None  # to later check if label is empty
+
+        label_vertidx.append(this_vertidx)
+
+    # mode-dependent initalization
+    if mode == 'mean':
+        pass  # we have this here to catch invalid values for mode
+    elif mode == 'mean_flip':
+        # get the sign-flip vector for every label
+        label_flip = _get_label_flip(labels, label_vertidx, src)
+    elif mode == 'pca_flip':
+        # get the sign-flip vector for every label
+        label_flip = _get_label_flip(labels, label_vertidx, src)
+    elif mode == 'max':
+        pass  # we calculate the maximum value later
+    else:
+        raise ValueError('%s is an invalid mode' % mode)
+
+    # loop through source estimates and extract time series
+    for stc in stcs:
+        # make sure the stc is compatible with the source space
+        if len(stc.vertices[0]) != nvert[0] or \
+                len(stc.vertices[1]) != nvert[1]:
+            raise ValueError('stc not compatible with source space')
+        if any(np.any(svn != vn) for svn, vn in zip(stc.vertices, vertno)):
+            raise ValueError('stc not compatible with source space')
+
+        logger.info('Extracting time courses for %d labels (mode: %s)'
+                    % (n_labels, mode))
+
+        # do the extraction
+        label_tc = np.zeros((n_labels, stc.data.shape[1]),
+                            dtype=stc.data.dtype)
+        if mode == 'mean':
+            for i, vertidx in enumerate(label_vertidx):
+                if vertidx is not None:
+                    label_tc[i] = np.mean(stc.data[vertidx, :], axis=0)
+        elif mode == 'mean_flip':
+            for i, (vertidx, flip) in enumerate(zip(label_vertidx,
+                                                    label_flip)):
+                if vertidx is not None:
+                    label_tc[i] = np.mean(flip * stc.data[vertidx, :], axis=0)
+        elif mode == 'pca_flip':
+            for i, (vertidx, flip) in enumerate(zip(label_vertidx,
+                                                    label_flip)):
+                if vertidx is not None:
+                    U, s, V = linalg.svd(stc.data[vertidx, :],
+                                         full_matrices=False)
+                    # determine sign-flip
+                    sign = np.sign(np.dot(U[:, 0], flip))
+
+                    # use average power in label for scaling
+                    scale = linalg.norm(s) / np.sqrt(len(vertidx))
+
+                    label_tc[i] = sign * scale * V[0]
+        elif mode == 'max':
+            for i, vertidx in enumerate(label_vertidx):
+                if vertidx is not None:
+                    label_tc[i] = np.max(np.abs(stc.data[vertidx, :]), axis=0)
+        else:
+            raise ValueError('%s is an invalid mode' % mode)
+
+        # this is a generator!
+        yield label_tc
+
+
+ at verbose
+def extract_label_time_course(stcs, labels, src, mode='mean_flip',
+                              allow_empty=False, return_generator=False,
+                              verbose=None):
+    """Extract label time course for lists of labels and source estimates
+
+    This function will extract one time course for each label and source
+    estimate. The way the time courses are extracted depends on the mode
+    parameter.
+
+    Valid values for mode are:
+
+        - 'mean': Average within each label.
+        - 'mean_flip': Average within each label with sign flip depending
+          on source orientation.
+        - 'pca_flip': Apply an SVD to the time courses within each label
+          and use the scaled and sign-flipped first right-singular vector
+          as the label time course. The scaling is performed such that the
+          power of the label time course is the same as the average
+          per-vertex time course power within the label. The sign of the
+          resulting time course is adjusted by multiplying it with
+          "sign(dot(u, flip))" where u is the first left-singular vector,
+          and flip is a sing-flip vector based on the vertex normals. This
+          procedure assures that the phase does not randomly change by 180
+          degrees from one stc to the next.
+        - 'max': Max value within each label.
+
+
+    Parameters
+    ----------
+    stcs : SourceEstimate | list (or generator) of SourceEstimate
+        The source estimates from which to extract the time course.
+    labels : Label | list of Label
+        The labels for which to extract the time course.
+    src : list
+        Source spaces for left and right hemisphere.
+    mode : str
+        Extraction mode, see explanation above.
+    allow_empty : bool
+        Instead of emitting an error, return all-zero time courses for labels
+        that do not have any vertices in the source estimate.
+    return_generator : bool
+        If True, a generator instead of a list is returned.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    label_tc : array | list (or generator) of array, shape=(len(labels), n_times)
+        Extracted time course for each label and source estimate.
+    """  # noqa
+    # convert inputs to lists
+    if isinstance(stcs, SourceEstimate):
+        stcs = [stcs]
+        return_several = False
+        return_generator = False
+    else:
+        return_several = True
+
+    if not isinstance(labels, list):
+        labels = [labels]
+
+    label_tc = _gen_extract_label_time_course(stcs, labels, src, mode=mode,
+                                              allow_empty=allow_empty)
+
+    if not return_generator:
+        # do the extraction and return a list
+        label_tc = list(label_tc)
+
+    if not return_several:
+        # input was a single SoureEstimate, return single array
+        label_tc = label_tc[0]
+
+    return label_tc
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/source_space.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/source_space.py
new file mode 100644
index 0000000..4d99e0e
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/source_space.py
@@ -0,0 +1,2584 @@
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+import numpy as np
+import os
+import os.path as op
+from scipy import sparse, linalg
+from copy import deepcopy
+
+from .io.constants import FIFF
+from .io.tree import dir_tree_find
+from .io.tag import find_tag, read_tag
+from .io.open import fiff_open
+from .io.write import (start_block, end_block, write_int,
+                       write_float_sparse_rcs, write_string,
+                       write_float_matrix, write_int_matrix,
+                       write_coord_trans, start_file, end_file, write_id)
+from .bem import read_bem_surfaces
+from .surface import (read_surface, _create_surf_spacing, _get_ico_surface,
+                      _tessellate_sphere_surf, _get_surf_neighbors,
+                      _read_surface_geom, _normalize_vectors,
+                      _complete_surface_info, _compute_nearest,
+                      fast_cross_3d, _fast_cross_nd_sum, mesh_dist,
+                      _triangle_neighbors)
+from .utils import (get_subjects_dir, run_subprocess, has_freesurfer,
+                    has_nibabel, check_fname, logger, verbose,
+                    check_version, _get_call_line)
+from .fixes import in1d, partial, gzip_open, meshgrid
+from .parallel import parallel_func, check_n_jobs
+from .transforms import (invert_transform, apply_trans, _print_coord_trans,
+                         combine_transforms, _get_mri_head_t,
+                         _coord_frame_name, Transform)
+from .externals.six import string_types
+
+
+def _get_lut():
+    """Helper to get the FreeSurfer LUT"""
+    data_dir = op.join(op.dirname(__file__), 'data')
+    lut_fname = op.join(data_dir, 'FreeSurferColorLUT.txt')
+    return np.genfromtxt(lut_fname, dtype=None,
+                         usecols=(0, 1), names=['id', 'name'])
+
+
+def _get_lut_id(lut, label, use_lut):
+    """Helper to convert a label to a LUT ID number"""
+    if not use_lut:
+        return 1
+    assert isinstance(label, string_types)
+    mask = (lut['name'] == label.encode('utf-8'))
+    assert mask.sum() == 1
+    return lut['id'][mask]
+
+
+class SourceSpaces(list):
+    """Represent a list of source space
+
+    Currently implemented as a list of dictionaries containing the source
+    space information
+
+    Parameters
+    ----------
+    source_spaces : list
+        A list of dictionaries containing the source space information.
+    info : dict
+        Dictionary with information about the creation of the source space
+        file. Has keys 'working_dir' and 'command_line'.
+
+    Attributes
+    ----------
+    info : dict
+        Dictionary with information about the creation of the source space
+        file. Has keys 'working_dir' and 'command_line'.
+    """
+    def __init__(self, source_spaces, info=None):
+        super(SourceSpaces, self).__init__(source_spaces)
+        if info is None:
+            self.info = dict()
+        else:
+            self.info = dict(info)
+
+    def __repr__(self):
+        ss_repr = []
+        for ss in self:
+            ss_type = ss['type']
+            if ss_type == 'vol':
+                if 'seg_name' in ss:
+                    r = ("'vol' (%s), n_used=%i"
+                         % (ss['seg_name'], ss['nuse']))
+                else:
+                    r = ("'vol', shape=%s, n_used=%i"
+                         % (repr(ss['shape']), ss['nuse']))
+            elif ss_type == 'surf':
+                r = "'surf', n_vertices=%i, n_used=%i" % (ss['np'], ss['nuse'])
+            else:
+                r = "%r" % ss_type
+            coord_frame = ss['coord_frame']
+            if isinstance(coord_frame, np.ndarray):
+                coord_frame = coord_frame[0]
+            r += ', coordinate_frame=%s' % _coord_frame_name(coord_frame)
+            ss_repr.append('<%s>' % r)
+        ss_repr = ', '.join(ss_repr)
+        return "<SourceSpaces: [{ss}]>".format(ss=ss_repr)
+
+    def __add__(self, other):
+        return SourceSpaces(list.__add__(self, other))
+
+    def copy(self):
+        """Make a copy of the source spaces
+
+        Returns
+        -------
+        src : instance of SourceSpaces
+            The copied source spaces.
+        """
+        src = deepcopy(self)
+        return src
+
+    def save(self, fname):
+        """Save the source spaces to a fif file
+
+        Parameters
+        ----------
+        fname : str
+            File to write.
+        """
+        write_source_spaces(fname, self)
+
+    @verbose
+    def export_volume(self, fname, include_surfaces=True,
+                      include_discrete=True, dest='mri', trans=None,
+                      mri_resolution=False, use_lut=True, verbose=None):
+        """Exports source spaces to nifti or mgz file
+
+        Parameters
+        ----------
+        fname : str
+            Name of nifti or mgz file to write.
+        include_surfaces : bool
+            If True, include surface source spaces.
+        include_discrete : bool
+            If True, include discrete source spaces.
+        dest : 'mri' | 'surf'
+            If 'mri' the volume is defined in the coordinate system of the
+            original T1 image. If 'surf' the coordinate system of the
+            FreeSurfer surface is used (Surface RAS).
+        trans : dict, str, or None
+            Either a transformation filename (usually made using mne_analyze)
+            or an info dict (usually opened using read_trans()).
+            If string, an ending of `.fif` or `.fif.gz` will be assumed to be
+            in FIF format, any other ending will be assumed to be a text file
+            with a 4x4 transformation matrix (like the `--trans` MNE-C option.
+            Must be provided if source spaces are in head coordinates and
+            include_surfaces and mri_resolution are True.
+        mri_resolution : bool
+            If True, the image is saved in MRI resolution
+            (e.g. 256 x 256 x 256).
+        use_lut : bool
+            If True, assigns a numeric value to each source space that
+            corresponds to a color on the freesurfer lookup table.
+        verbose : bool, str, int, or None
+            If not None, override default verbose level (see mne.verbose).
+
+        Notes
+        -----
+        This method requires nibabel.
+        """
+
+        # import nibabel or raise error
+        try:
+            import nibabel as nib
+        except ImportError:
+            raise ImportError('This function requires nibabel.')
+
+        # Check coordinate frames of each source space
+        coord_frames = np.array([s['coord_frame'] for s in self])
+
+        # Raise error if trans is not provided when head coordinates are used
+        # and mri_resolution and include_surfaces are true
+        if (coord_frames == FIFF.FIFFV_COORD_HEAD).all():
+            coords = 'head'  # all sources in head coordinates
+            if mri_resolution and include_surfaces:
+                if trans is None:
+                    raise ValueError('trans containing mri to head transform '
+                                     'must be provided if mri_resolution and '
+                                     'include_surfaces are true and surfaces '
+                                     'are in head coordinates')
+
+            elif trans is not None:
+                logger.info('trans is not needed and will not be used unless '
+                            'include_surfaces and mri_resolution are True.')
+
+        elif (coord_frames == FIFF.FIFFV_COORD_MRI).all():
+            coords = 'mri'  # all sources in mri coordinates
+            if trans is not None:
+                logger.info('trans is not needed and will not be used unless '
+                            'sources are in head coordinates.')
+        # Raise error if all sources are not in the same space, or sources are
+        # not in mri or head coordinates
+        else:
+            raise ValueError('All sources must be in head coordinates or all '
+                             'sources must be in mri coordinates.')
+
+        # use lookup table to assign values to source spaces
+        logger.info('Reading FreeSurfer lookup table')
+        # read the lookup table
+        lut = _get_lut()
+
+        # Setup a dictionary of source types
+        src_types = dict(volume=[], surface=[], discrete=[])
+
+        # Populate dictionary of source types
+        for src in self:
+            # volume sources
+            if src['type'] == 'vol':
+                src_types['volume'].append(src)
+            # surface sources
+            elif src['type'] == 'surf':
+                src_types['surface'].append(src)
+            # discrete sources
+            elif src['type'] == 'discrete':
+                src_types['discrete'].append(src)
+            # raise an error if dealing with source type other than volume
+            # surface or discrete
+            else:
+                raise ValueError('Unrecognized source type: %s.' % src['type'])
+
+        # Get shape, inuse array and interpolation matrix from volume sources
+        first_vol = True  # mark the first volume source
+        # Loop through the volume sources
+        for vs in src_types['volume']:
+            # read the lookup table value for segmented volume
+            if 'seg_name' not in vs:
+                raise ValueError('Volume sources should be segments, '
+                                 'not the entire volume.')
+            # find the color value for this volume
+            i = _get_lut_id(lut, vs['seg_name'], use_lut)
+
+            if first_vol:
+                # get the inuse array
+                if mri_resolution:
+                    # read the mri file used to generate volumes
+                    aseg = nib.load(vs['mri_file'])
+
+                    # get the voxel space shape
+                    shape3d = (vs['mri_height'], vs['mri_depth'],
+                               vs['mri_width'])
+
+                    # get the values for this volume
+                    inuse = i * (aseg.get_data() == i).astype(int)
+                    # store as 1D array
+                    inuse = inuse.ravel((2, 1, 0))
+
+                else:
+                    inuse = i * vs['inuse']
+
+                    # get the volume source space shape
+                    shape = vs['shape']
+
+                    # read the shape in reverse order
+                    # (otherwise results are scrambled)
+                    shape3d = (shape[2], shape[1], shape[0])
+
+                first_vol = False
+
+            else:
+                # update the inuse array
+                if mri_resolution:
+
+                    # get the values for this volume
+                    use = i * (aseg.get_data() == i).astype(int)
+                    inuse += use.ravel((2, 1, 0))
+                else:
+                    inuse += i * vs['inuse']
+
+        # Raise error if there are no volume source spaces
+        if first_vol:
+            raise ValueError('Source spaces must contain at least one volume.')
+
+        # create 3d grid in the MRI_VOXEL coordinate frame
+        # len of inuse array should match shape regardless of mri_resolution
+        assert len(inuse) == np.prod(shape3d)
+
+        # setup the image in 3d space
+        img = inuse.reshape(shape3d).T
+
+        # include surface and/or discrete source spaces
+        if include_surfaces or include_discrete:
+
+            # setup affine transform for source spaces
+            if mri_resolution:
+                # get the MRI to MRI_VOXEL transform
+                affine = invert_transform(vs['vox_mri_t'])
+            else:
+                # get the MRI to SOURCE (MRI_VOXEL) transform
+                affine = invert_transform(vs['src_mri_t'])
+
+            # modify affine if in head coordinates
+            if coords == 'head':
+
+                # read mri -> head transformation
+                mri_head_t = _get_mri_head_t(trans)[0]
+
+                # get the HEAD to MRI transform
+                head_mri_t = invert_transform(mri_head_t)
+
+                # combine transforms, from HEAD to MRI_VOXEL
+                affine = combine_transforms(head_mri_t, affine,
+                                            'head', 'mri_voxel')
+
+            # loop through the surface source spaces
+            if include_surfaces:
+
+                # get the surface names (assumes left, right order. may want
+                # to add these names during source space generation
+                surf_names = ['Left-Cerebral-Cortex', 'Right-Cerebral-Cortex']
+
+                for i, surf in enumerate(src_types['surface']):
+                    # convert vertex positions from their native space
+                    # (either HEAD or MRI) to MRI_VOXEL space
+                    srf_rr = apply_trans(affine['trans'], surf['rr'])
+                    # convert to numeric indices
+                    ix_orig, iy_orig, iz_orig = srf_rr.T.round().astype(int)
+                    # clip indices outside of volume space
+                    ix_clip = np.maximum(np.minimum(ix_orig, shape3d[2] - 1),
+                                         0)
+                    iy_clip = np.maximum(np.minimum(iy_orig, shape3d[1] - 1),
+                                         0)
+                    iz_clip = np.maximum(np.minimum(iz_orig, shape3d[0] - 1),
+                                         0)
+                    # compare original and clipped indices
+                    n_diff = np.array((ix_orig != ix_clip, iy_orig != iy_clip,
+                                       iz_orig != iz_clip)).any(0).sum()
+                    # generate use warnings for clipping
+                    if n_diff > 0:
+                        logger.warning('%s surface vertices lay outside '
+                                       'of volume space. Consider using a '
+                                       'larger volume space.' % n_diff)
+                    # get surface id or use default value
+                    i = _get_lut_id(lut, surf_names[i], use_lut)
+                    # update image to include surface voxels
+                    img[ix_clip, iy_clip, iz_clip] = i
+
+            # loop through discrete source spaces
+            if include_discrete:
+                for i, disc in enumerate(src_types['discrete']):
+                    # convert vertex positions from their native space
+                    # (either HEAD or MRI) to MRI_VOXEL space
+                    disc_rr = apply_trans(affine['trans'], disc['rr'])
+                    # convert to numeric indices
+                    ix_orig, iy_orig, iz_orig = disc_rr.T.astype(int)
+                    # clip indices outside of volume space
+                    ix_clip = np.maximum(np.minimum(ix_orig, shape3d[2] - 1),
+                                         0)
+                    iy_clip = np.maximum(np.minimum(iy_orig, shape3d[1] - 1),
+                                         0)
+                    iz_clip = np.maximum(np.minimum(iz_orig, shape3d[0] - 1),
+                                         0)
+                    # compare original and clipped indices
+                    n_diff = np.array((ix_orig != ix_clip, iy_orig != iy_clip,
+                                       iz_orig != iz_clip)).any(0).sum()
+                    # generate use warnings for clipping
+                    if n_diff > 0:
+                        logger.warning('%s discrete vertices lay outside '
+                                       'of volume space. Consider using a '
+                                       'larger volume space.' % n_diff)
+                    # set default value
+                    img[ix_clip, iy_clip, iz_clip] = 1
+                    if use_lut:
+                        logger.info('Discrete sources do not have values on '
+                                    'the lookup table. Defaulting to 1.')
+
+        # calculate affine transform for image (MRI_VOXEL to RAS)
+        if mri_resolution:
+            # MRI_VOXEL to MRI transform
+            transform = vs['vox_mri_t'].copy()
+        else:
+            # MRI_VOXEL to MRI transform
+            # NOTE: 'src' indicates downsampled version of MRI_VOXEL
+            transform = vs['src_mri_t'].copy()
+        if dest == 'mri':
+            # combine with MRI to RAS transform
+            transform = combine_transforms(transform, vs['mri_ras_t'],
+                                           transform['from'],
+                                           vs['mri_ras_t']['to'])
+        # now setup the affine for volume image
+        affine = transform['trans']
+        # make sure affine converts from m to mm
+        affine[:3] *= 1e3
+
+        # save volume data
+
+        # setup image for file
+        if fname.endswith(('.nii', '.nii.gz')):  # save as nifit
+            # setup the nifti header
+            hdr = nib.Nifti1Header()
+            hdr.set_xyzt_units('mm')
+            # save the nifti image
+            img = nib.Nifti1Image(img, affine, header=hdr)
+        elif fname.endswith('.mgz'):  # save as mgh
+            # convert to float32 (float64 not currently supported)
+            img = img.astype('float32')
+            # save the mgh image
+            img = nib.freesurfer.mghformat.MGHImage(img, affine)
+        else:
+            raise(ValueError('Unrecognized file extension'))
+
+        # write image to file
+        nib.save(img, fname)
+
+
+def _add_patch_info(s):
+    """Patch information in a source space
+
+    Generate the patch information from the 'nearest' vector in
+    a source space. For vertex in the source space it provides
+    the list of neighboring vertices in the high resolution
+    triangulation.
+
+    Parameters
+    ----------
+    s : dict
+        The source space.
+    """
+    nearest = s['nearest']
+    if nearest is None:
+        s['pinfo'] = None
+        s['patch_inds'] = None
+        return
+
+    logger.info('    Computing patch statistics...')
+
+    indn = np.argsort(nearest)
+    nearest_sorted = nearest[indn]
+
+    steps = np.where(nearest_sorted[1:] != nearest_sorted[:-1])[0] + 1
+    starti = np.r_[[0], steps]
+    stopi = np.r_[steps, [len(nearest)]]
+
+    pinfo = list()
+    for start, stop in zip(starti, stopi):
+        pinfo.append(np.sort(indn[start:stop]))
+    s['pinfo'] = pinfo
+
+    # compute patch indices of the in-use source space vertices
+    patch_verts = nearest_sorted[steps - 1]
+    s['patch_inds'] = np.searchsorted(patch_verts, s['vertno'])
+
+    logger.info('    Patch information added...')
+
+
+ at verbose
+def _read_source_spaces_from_tree(fid, tree, patch_stats=False,
+                                  verbose=None):
+    """Read the source spaces from a FIF file
+
+    Parameters
+    ----------
+    fid : file descriptor
+        An open file descriptor.
+    tree : dict
+        The FIF tree structure if source is a file id.
+    patch_stats : bool, optional (default False)
+        Calculate and add cortical patch statistics to the surfaces.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    src : SourceSpaces
+        The source spaces.
+    """
+    #   Find all source spaces
+    spaces = dir_tree_find(tree, FIFF.FIFFB_MNE_SOURCE_SPACE)
+    if len(spaces) == 0:
+        raise ValueError('No source spaces found')
+
+    src = list()
+    for s in spaces:
+        logger.info('    Reading a source space...')
+        this = _read_one_source_space(fid, s)
+        logger.info('    [done]')
+        if patch_stats:
+            _complete_source_space_info(this)
+
+        src.append(this)
+
+    logger.info('    %d source spaces read' % len(spaces))
+    return SourceSpaces(src)
+
+
+ at verbose
+def read_source_spaces(fname, patch_stats=False, verbose=None):
+    """Read the source spaces from a FIF file
+
+    Parameters
+    ----------
+    fname : str
+        The name of the file, which should end with -src.fif or
+        -src.fif.gz.
+    patch_stats : bool, optional (default False)
+        Calculate and add cortical patch statistics to the surfaces.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    src : SourceSpaces
+        The source spaces.
+
+    See Also
+    --------
+    write_source_spaces, setup_source_space, setup_volume_source_space
+    """
+    # be more permissive on read than write (fwd/inv can contain src)
+    check_fname(fname, 'source space', ('-src.fif', '-src.fif.gz',
+                                        '-fwd.fif', '-fwd.fif.gz',
+                                        '-inv.fif', '-inv.fif.gz'))
+
+    ff, tree, _ = fiff_open(fname)
+    with ff as fid:
+        src = _read_source_spaces_from_tree(fid, tree, patch_stats=patch_stats,
+                                            verbose=verbose)
+        src.info['fname'] = fname
+        node = dir_tree_find(tree, FIFF.FIFFB_MNE_ENV)
+        if node:
+            node = node[0]
+            for p in range(node['nent']):
+                kind = node['directory'][p].kind
+                pos = node['directory'][p].pos
+                tag = read_tag(fid, pos)
+                if kind == FIFF.FIFF_MNE_ENV_WORKING_DIR:
+                    src.info['working_dir'] = tag.data
+                elif kind == FIFF.FIFF_MNE_ENV_COMMAND_LINE:
+                    src.info['command_line'] = tag.data
+    return src
+
+
+ at verbose
+def _read_one_source_space(fid, this, verbose=None):
+    """Read one source space
+    """
+    FIFF_BEM_SURF_NTRI = 3104
+    FIFF_BEM_SURF_TRIANGLES = 3106
+
+    res = dict()
+
+    tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_ID)
+    if tag is None:
+        res['id'] = int(FIFF.FIFFV_MNE_SURF_UNKNOWN)
+    else:
+        res['id'] = int(tag.data)
+
+    tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_TYPE)
+    if tag is None:
+        raise ValueError('Unknown source space type')
+    else:
+        src_type = int(tag.data)
+        if src_type == FIFF.FIFFV_MNE_SPACE_SURFACE:
+            res['type'] = 'surf'
+        elif src_type == FIFF.FIFFV_MNE_SPACE_VOLUME:
+            res['type'] = 'vol'
+        elif src_type == FIFF.FIFFV_MNE_SPACE_DISCRETE:
+            res['type'] = 'discrete'
+        else:
+            raise ValueError('Unknown source space type (%d)' % src_type)
+
+    if res['type'] == 'vol':
+
+        tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_VOXEL_DIMS)
+        if tag is not None:
+            res['shape'] = tuple(tag.data)
+
+        tag = find_tag(fid, this, FIFF.FIFF_COORD_TRANS)
+        if tag is not None:
+            res['src_mri_t'] = tag.data
+
+        parent_mri = dir_tree_find(this, FIFF.FIFFB_MNE_PARENT_MRI_FILE)
+        if len(parent_mri) == 0:
+            # MNE 2.7.3 (and earlier) didn't store necessary information
+            # about volume coordinate translations. Although there is a
+            # FFIF_COORD_TRANS in the higher level of the FIFF file, this
+            # doesn't contain all the info we need. Safer to return an
+            # error unless a user really wants us to add backward compat.
+            raise ValueError('Can not find parent MRI location. The volume '
+                             'source space may have been made with an MNE '
+                             'version that is too old (<= 2.7.3). Consider '
+                             'updating and regenerating the inverse.')
+
+        mri = parent_mri[0]
+        for d in mri['directory']:
+            if d.kind == FIFF.FIFF_COORD_TRANS:
+                tag = read_tag(fid, d.pos)
+                trans = tag.data
+                if trans['from'] == FIFF.FIFFV_MNE_COORD_MRI_VOXEL:
+                    res['vox_mri_t'] = tag.data
+                if trans['to'] == FIFF.FIFFV_MNE_COORD_RAS:
+                    res['mri_ras_t'] = tag.data
+
+        tag = find_tag(fid, mri, FIFF.FIFF_MNE_SOURCE_SPACE_INTERPOLATOR)
+        if tag is not None:
+            res['interpolator'] = tag.data
+        else:
+            logger.info("Interpolation matrix for MRI not found.")
+
+        tag = find_tag(fid, mri, FIFF.FIFF_MNE_SOURCE_SPACE_MRI_FILE)
+        if tag is not None:
+            res['mri_file'] = tag.data
+
+        tag = find_tag(fid, mri, FIFF.FIFF_MRI_WIDTH)
+        if tag is not None:
+            res['mri_width'] = int(tag.data)
+
+        tag = find_tag(fid, mri, FIFF.FIFF_MRI_HEIGHT)
+        if tag is not None:
+            res['mri_height'] = int(tag.data)
+
+        tag = find_tag(fid, mri, FIFF.FIFF_MRI_DEPTH)
+        if tag is not None:
+            res['mri_depth'] = int(tag.data)
+
+        tag = find_tag(fid, mri, FIFF.FIFF_MNE_FILE_NAME)
+        if tag is not None:
+            res['mri_volume_name'] = tag.data
+
+        tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NNEIGHBORS)
+        if tag is not None:
+            nneighbors = tag.data
+            tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NEIGHBORS)
+            offset = 0
+            neighbors = []
+            for n in nneighbors:
+                neighbors.append(tag.data[offset:offset + n])
+                offset += n
+            res['neighbor_vert'] = neighbors
+
+        tag = find_tag(fid, this, FIFF.FIFF_COMMENT)
+        if tag is not None:
+            res['seg_name'] = tag.data
+
+    tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NPOINTS)
+    if tag is None:
+        raise ValueError('Number of vertices not found')
+
+    res['np'] = int(tag.data)
+
+    tag = find_tag(fid, this, FIFF_BEM_SURF_NTRI)
+    if tag is None:
+        tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NTRI)
+        if tag is None:
+            res['ntri'] = 0
+        else:
+            res['ntri'] = int(tag.data)
+    else:
+        res['ntri'] = tag.data
+
+    tag = find_tag(fid, this, FIFF.FIFF_MNE_COORD_FRAME)
+    if tag is None:
+        raise ValueError('Coordinate frame information not found')
+
+    res['coord_frame'] = tag.data
+
+    #   Vertices, normals, and triangles
+    tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_POINTS)
+    if tag is None:
+        raise ValueError('Vertex data not found')
+
+    res['rr'] = tag.data.astype(np.float)  # double precision for mayavi
+    if res['rr'].shape[0] != res['np']:
+        raise ValueError('Vertex information is incorrect')
+
+    tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NORMALS)
+    if tag is None:
+        raise ValueError('Vertex normals not found')
+
+    res['nn'] = tag.data
+    if res['nn'].shape[0] != res['np']:
+        raise ValueError('Vertex normal information is incorrect')
+
+    if res['ntri'] > 0:
+        tag = find_tag(fid, this, FIFF_BEM_SURF_TRIANGLES)
+        if tag is None:
+            tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_TRIANGLES)
+            if tag is None:
+                raise ValueError('Triangulation not found')
+            else:
+                res['tris'] = tag.data - 1  # index start at 0 in Python
+        else:
+            res['tris'] = tag.data - 1  # index start at 0 in Python
+
+        if res['tris'].shape[0] != res['ntri']:
+            raise ValueError('Triangulation information is incorrect')
+    else:
+        res['tris'] = None
+
+    #   Which vertices are active
+    tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NUSE)
+    if tag is None:
+        res['nuse'] = 0
+        res['inuse'] = np.zeros(res['nuse'], dtype=np.int)
+        res['vertno'] = None
+    else:
+        res['nuse'] = int(tag.data)
+        tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_SELECTION)
+        if tag is None:
+            raise ValueError('Source selection information missing')
+
+        res['inuse'] = tag.data.astype(np.int).T
+        if len(res['inuse']) != res['np']:
+            raise ValueError('Incorrect number of entries in source space '
+                             'selection')
+
+        res['vertno'] = np.where(res['inuse'])[0]
+
+    #   Use triangulation
+    tag1 = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NUSE_TRI)
+    tag2 = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_USE_TRIANGLES)
+    if tag1 is None or tag2 is None:
+        res['nuse_tri'] = 0
+        res['use_tris'] = None
+    else:
+        res['nuse_tri'] = tag1.data
+        res['use_tris'] = tag2.data - 1  # index start at 0 in Python
+
+    #   Patch-related information
+    tag1 = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NEAREST)
+    tag2 = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NEAREST_DIST)
+
+    if tag1 is None or tag2 is None:
+        res['nearest'] = None
+        res['nearest_dist'] = None
+    else:
+        res['nearest'] = tag1.data
+        res['nearest_dist'] = tag2.data.T
+
+    _add_patch_info(res)
+
+    #   Distances
+    tag1 = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_DIST)
+    tag2 = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_DIST_LIMIT)
+    if tag1 is None or tag2 is None:
+        res['dist'] = None
+        res['dist_limit'] = None
+    else:
+        res['dist'] = tag1.data
+        res['dist_limit'] = tag2.data
+        #   Add the upper triangle
+        res['dist'] = res['dist'] + res['dist'].T
+    if (res['dist'] is not None):
+        logger.info('    Distance information added...')
+
+    tag = find_tag(fid, this, FIFF.FIFF_SUBJ_HIS_ID)
+    if tag is not None:
+        res['subject_his_id'] = tag.data
+
+    return res
+
+
+ at verbose
+def _complete_source_space_info(this, verbose=None):
+    """Add more info on surface
+    """
+    #   Main triangulation
+    logger.info('    Completing triangulation info...')
+    this['tri_area'] = np.zeros(this['ntri'])
+    r1 = this['rr'][this['tris'][:, 0], :]
+    r2 = this['rr'][this['tris'][:, 1], :]
+    r3 = this['rr'][this['tris'][:, 2], :]
+    this['tri_cent'] = (r1 + r2 + r3) / 3.0
+    this['tri_nn'] = fast_cross_3d((r2 - r1), (r3 - r1))
+    size = np.sqrt(np.sum(this['tri_nn'] ** 2, axis=1))
+    this['tri_area'] = size / 2.0
+    this['tri_nn'] /= size[:, None]
+    logger.info('[done]')
+
+    #   Selected triangles
+    logger.info('    Completing selection triangulation info...')
+    if this['nuse_tri'] > 0:
+        r1 = this['rr'][this['use_tris'][:, 0], :]
+        r2 = this['rr'][this['use_tris'][:, 1], :]
+        r3 = this['rr'][this['use_tris'][:, 2], :]
+        this['use_tri_cent'] = (r1 + r2 + r3) / 3.0
+        this['use_tri_nn'] = fast_cross_3d((r2 - r1), (r3 - r1))
+        this['use_tri_area'] = np.sqrt(np.sum(this['use_tri_nn'] ** 2, axis=1)
+                                       ) / 2.0
+    logger.info('[done]')
+
+
+def find_source_space_hemi(src):
+    """Return the hemisphere id for a source space
+
+    Parameters
+    ----------
+    src : dict
+        The source space to investigate
+
+    Returns
+    -------
+    hemi : int
+        Deduced hemisphere id
+    """
+    xave = src['rr'][:, 0].sum()
+
+    if xave < 0:
+        hemi = int(FIFF.FIFFV_MNE_SURF_LEFT_HEMI)
+    else:
+        hemi = int(FIFF.FIFFV_MNE_SURF_RIGHT_HEMI)
+
+    return hemi
+
+
+def label_src_vertno_sel(label, src):
+    """ Find vertex numbers and indices from label
+
+    Parameters
+    ----------
+    label : Label
+        Source space label
+    src : dict
+        Source space
+
+    Returns
+    -------
+    vertices : list of length 2
+        Vertex numbers for lh and rh
+    src_sel : array of int (len(idx) = len(vertices[0]) + len(vertices[1]))
+        Indices of the selected vertices in sourse space
+    """
+    if src[0]['type'] != 'surf':
+        return Exception('Labels are only supported with surface source '
+                         'spaces')
+
+    vertno = [src[0]['vertno'], src[1]['vertno']]
+
+    if label.hemi == 'lh':
+        vertno_sel = np.intersect1d(vertno[0], label.vertices)
+        src_sel = np.searchsorted(vertno[0], vertno_sel)
+        vertno[0] = vertno_sel
+        vertno[1] = np.array([], int)
+    elif label.hemi == 'rh':
+        vertno_sel = np.intersect1d(vertno[1], label.vertices)
+        src_sel = np.searchsorted(vertno[1], vertno_sel) + len(vertno[0])
+        vertno[0] = np.array([], int)
+        vertno[1] = vertno_sel
+    elif label.hemi == 'both':
+        vertno_sel_lh = np.intersect1d(vertno[0], label.lh.vertices)
+        src_sel_lh = np.searchsorted(vertno[0], vertno_sel_lh)
+        vertno_sel_rh = np.intersect1d(vertno[1], label.rh.vertices)
+        src_sel_rh = np.searchsorted(vertno[1], vertno_sel_rh) + len(vertno[0])
+        src_sel = np.hstack((src_sel_lh, src_sel_rh))
+        vertno = [vertno_sel_lh, vertno_sel_rh]
+    else:
+        raise Exception("Unknown hemisphere type")
+
+    return vertno, src_sel
+
+
+def _get_vertno(src):
+    return [s['vertno'] for s in src]
+
+
+###############################################################################
+# Write routines
+
+ at verbose
+def _write_source_spaces_to_fid(fid, src, verbose=None):
+    """Write the source spaces to a FIF file
+
+    Parameters
+    ----------
+    fid : file descriptor
+        An open file descriptor.
+    src : list
+        The list of source spaces.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+    """
+    for s in src:
+        logger.info('    Write a source space...')
+        start_block(fid, FIFF.FIFFB_MNE_SOURCE_SPACE)
+        _write_one_source_space(fid, s, verbose)
+        end_block(fid, FIFF.FIFFB_MNE_SOURCE_SPACE)
+        logger.info('    [done]')
+    logger.info('    %d source spaces written' % len(src))
+
+
+ at verbose
+def write_source_spaces(fname, src, verbose=None):
+    """Write source spaces to a file
+
+    Parameters
+    ----------
+    fname : str
+        The name of the file, which should end with -src.fif or
+        -src.fif.gz.
+    src : SourceSpaces
+        The source spaces (as returned by read_source_spaces).
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    See Also
+    --------
+    read_source_spaces
+    """
+    check_fname(fname, 'source space', ('-src.fif', '-src.fif.gz'))
+
+    fid = start_file(fname)
+    start_block(fid, FIFF.FIFFB_MNE)
+
+    if src.info:
+        start_block(fid, FIFF.FIFFB_MNE_ENV)
+
+        write_id(fid, FIFF.FIFF_BLOCK_ID)
+
+        data = src.info.get('working_dir', None)
+        if data:
+            write_string(fid, FIFF.FIFF_MNE_ENV_WORKING_DIR, data)
+        data = src.info.get('command_line', None)
+        if data:
+            write_string(fid, FIFF.FIFF_MNE_ENV_COMMAND_LINE, data)
+
+        end_block(fid, FIFF.FIFFB_MNE_ENV)
+
+    _write_source_spaces_to_fid(fid, src, verbose)
+
+    end_block(fid, FIFF.FIFFB_MNE)
+    end_file(fid)
+
+
+def _write_one_source_space(fid, this, verbose=None):
+    """Write one source space"""
+    if this['type'] == 'surf':
+        src_type = FIFF.FIFFV_MNE_SPACE_SURFACE
+    elif this['type'] == 'vol':
+        src_type = FIFF.FIFFV_MNE_SPACE_VOLUME
+    elif this['type'] == 'discrete':
+        src_type = FIFF.FIFFV_MNE_SPACE_DISCRETE
+    else:
+        raise ValueError('Unknown source space type (%s)' % this['type'])
+    write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_TYPE, src_type)
+    if this['id'] >= 0:
+        write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_ID, this['id'])
+
+    data = this.get('subject_his_id', None)
+    if data:
+        write_string(fid, FIFF.FIFF_SUBJ_HIS_ID, data)
+    write_int(fid, FIFF.FIFF_MNE_COORD_FRAME, this['coord_frame'])
+
+    write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NPOINTS, this['np'])
+    write_float_matrix(fid, FIFF.FIFF_MNE_SOURCE_SPACE_POINTS, this['rr'])
+    write_float_matrix(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NORMALS, this['nn'])
+
+    #   Which vertices are active
+    write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_SELECTION, this['inuse'])
+    write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NUSE, this['nuse'])
+
+    if this['ntri'] > 0:
+        write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NTRI, this['ntri'])
+        write_int_matrix(fid, FIFF.FIFF_MNE_SOURCE_SPACE_TRIANGLES,
+                         this['tris'] + 1)
+
+    if this['type'] != 'vol' and this['use_tris'] is not None:
+        #   Use triangulation
+        write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NUSE_TRI, this['nuse_tri'])
+        write_int_matrix(fid, FIFF.FIFF_MNE_SOURCE_SPACE_USE_TRIANGLES,
+                         this['use_tris'] + 1)
+
+    if this['type'] == 'vol':
+        neighbor_vert = this.get('neighbor_vert', None)
+        if neighbor_vert is not None:
+            nneighbors = np.array([len(n) for n in neighbor_vert])
+            neighbors = np.concatenate(neighbor_vert)
+            write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NNEIGHBORS, nneighbors)
+            write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NEIGHBORS, neighbors)
+
+        write_coord_trans(fid, this['src_mri_t'])
+
+        write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_VOXEL_DIMS, this['shape'])
+
+        start_block(fid, FIFF.FIFFB_MNE_PARENT_MRI_FILE)
+        write_coord_trans(fid, this['mri_ras_t'])
+        write_coord_trans(fid, this['vox_mri_t'])
+
+        mri_volume_name = this.get('mri_volume_name', None)
+        if mri_volume_name is not None:
+            write_string(fid, FIFF.FIFF_MNE_FILE_NAME, mri_volume_name)
+
+        write_float_sparse_rcs(fid, FIFF.FIFF_MNE_SOURCE_SPACE_INTERPOLATOR,
+                               this['interpolator'])
+
+        if 'mri_file' in this and this['mri_file'] is not None:
+            write_string(fid, FIFF.FIFF_MNE_SOURCE_SPACE_MRI_FILE,
+                         this['mri_file'])
+
+        write_int(fid, FIFF.FIFF_MRI_WIDTH, this['mri_width'])
+        write_int(fid, FIFF.FIFF_MRI_HEIGHT, this['mri_height'])
+        write_int(fid, FIFF.FIFF_MRI_DEPTH, this['mri_depth'])
+
+        end_block(fid, FIFF.FIFFB_MNE_PARENT_MRI_FILE)
+
+    #   Patch-related information
+    if this['nearest'] is not None:
+        write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NEAREST, this['nearest'])
+        write_float_matrix(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NEAREST_DIST,
+                           this['nearest_dist'])
+
+    #   Distances
+    if this['dist'] is not None:
+        # Save only upper triangular portion of the matrix
+        dists = this['dist'].copy()
+        dists = sparse.triu(dists, format=dists.format)
+        write_float_sparse_rcs(fid, FIFF.FIFF_MNE_SOURCE_SPACE_DIST, dists)
+        write_float_matrix(fid, FIFF.FIFF_MNE_SOURCE_SPACE_DIST_LIMIT,
+                           this['dist_limit'])
+
+    #   Segmentation data
+    if this['type'] == 'vol' and ('seg_name' in this):
+        # Save the name of the segment
+        write_string(fid, FIFF.FIFF_COMMENT, this['seg_name'])
+
+
+##############################################################################
+# Surface to MNI conversion
+
+ at verbose
+def vertex_to_mni(vertices, hemis, subject, subjects_dir=None, mode=None,
+                  verbose=None):
+    """Convert the array of vertices for a hemisphere to MNI coordinates
+
+    Parameters
+    ----------
+    vertices : int, or list of int
+        Vertex number(s) to convert
+    hemis : int, or list of int
+        Hemisphere(s) the vertices belong to
+    subject : string
+        Name of the subject to load surfaces from.
+    subjects_dir : string, or None
+        Path to SUBJECTS_DIR if it is not set in the environment.
+    mode : string | None
+        Either 'nibabel' or 'freesurfer' for the software to use to
+        obtain the transforms. If None, 'nibabel' is tried first, falling
+        back to 'freesurfer' if it fails. Results should be equivalent with
+        either option, but nibabel may be quicker (and more pythonic).
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    coordinates : n_vertices x 3 array of float
+        The MNI coordinates (in mm) of the vertices
+
+    Notes
+    -----
+    This function requires either nibabel (in Python) or Freesurfer
+    (with utility "mri_info") to be correctly installed.
+    """
+    if not has_freesurfer() and not has_nibabel():
+        raise RuntimeError('NiBabel (Python) or Freesurfer (Unix) must be '
+                           'correctly installed and accessible from Python')
+
+    if not isinstance(vertices, list) and not isinstance(vertices, np.ndarray):
+        vertices = [vertices]
+
+    if not isinstance(hemis, list) and not isinstance(hemis, np.ndarray):
+        hemis = [hemis] * len(vertices)
+
+    if not len(hemis) == len(vertices):
+        raise ValueError('hemi and vertices must match in length')
+
+    subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
+
+    surfs = [op.join(subjects_dir, subject, 'surf', '%s.white' % h)
+             for h in ['lh', 'rh']]
+
+    # read surface locations in MRI space
+    rr = [read_surface(s)[0] for s in surfs]
+
+    # take point locations in MRI space and convert to MNI coordinates
+    xfm = _read_talxfm(subject, subjects_dir, mode)
+    data = np.array([rr[h][v, :] for h, v in zip(hemis, vertices)])
+    return apply_trans(xfm['trans'], data)
+
+
+ at verbose
+def _read_talxfm(subject, subjects_dir, mode=None, verbose=None):
+    """Read MNI transform from FreeSurfer talairach.xfm file
+
+    Adapted from freesurfer m-files. Altered to deal with Norig
+    and Torig correctly.
+    """
+    if mode is not None and mode not in ['nibabel', 'freesurfer']:
+        raise ValueError('mode must be "nibabel" or "freesurfer"')
+    fname = op.join(subjects_dir, subject, 'mri', 'transforms',
+                    'talairach.xfm')
+    # read the RAS to MNI transform from talairach.xfm
+    with open(fname, 'r') as fid:
+        logger.debug('Reading FreeSurfer talairach.xfm file:\n%s' % fname)
+
+        # read lines until we get the string 'Linear_Transform', which precedes
+        # the data transformation matrix
+        got_it = False
+        comp = 'Linear_Transform'
+        for line in fid:
+            if line[:len(comp)] == comp:
+                # we have the right line, so don't read any more
+                got_it = True
+                break
+
+        if got_it:
+            xfm = list()
+            # read the transformation matrix (3x4)
+            for ii, line in enumerate(fid):
+                digs = [float(s) for s in line.strip('\n;').split()]
+                xfm.append(digs)
+                if ii == 2:
+                    break
+            xfm.append([0., 0., 0., 1.])
+            xfm = np.array(xfm, dtype=float)
+        else:
+            raise ValueError('failed to find \'Linear_Transform\' string in '
+                             'xfm file:\n%s' % fname)
+
+    # Setup the RAS to MNI transform
+    ras_mni_t = {'from': FIFF.FIFFV_MNE_COORD_RAS,
+                 'to': FIFF.FIFFV_MNE_COORD_MNI_TAL, 'trans': xfm}
+
+    # now get Norig and Torig
+    # (i.e. vox_ras_t and vox_mri_t, respectively)
+    path = op.join(subjects_dir, subject, 'mri', 'orig.mgz')
+    if not op.isfile(path):
+        path = op.join(subjects_dir, subject, 'mri', 'T1.mgz')
+    if not op.isfile(path):
+        raise IOError('mri not found: %s' % path)
+
+    if has_nibabel():
+        use_nibabel = True
+    else:
+        use_nibabel = False
+        if mode == 'nibabel':
+            raise ImportError('Tried to import nibabel but failed, try using '
+                              'mode=None or mode=Freesurfer')
+
+    # note that if mode == None, then we default to using nibabel
+    if use_nibabel is True and mode == 'freesurfer':
+        use_nibabel = False
+    if use_nibabel:
+        import nibabel as nib
+        img = nib.load(path)
+        hdr = img.get_header()
+        # read the MRI_VOXEL to RAS transform
+        n_orig = hdr.get_vox2ras()
+        # read the MRI_VOXEL to MRI transform
+        ds = np.array(hdr.get_zooms())
+        ns = (np.array(hdr.get_data_shape()[:3]) * ds) / 2.0
+        t_orig = np.array([[-ds[0], 0, 0, ns[0]],
+                           [0, 0, ds[2], -ns[2]],
+                           [0, -ds[1], 0, ns[1]],
+                           [0, 0, 0, 1]], dtype=float)
+        nt_orig = [n_orig, t_orig]
+    else:
+        nt_orig = list()
+        for conv in ['--vox2ras', '--vox2ras-tkr']:
+            stdout, stderr = run_subprocess(['mri_info', conv, path])
+            stdout = np.fromstring(stdout, sep=' ').astype(float)
+            if not stdout.size == 16:
+                raise ValueError('Could not parse Freesurfer mri_info output')
+            nt_orig.append(stdout.reshape(4, 4))
+    # extract the MRI_VOXEL to RAS transform
+    n_orig = nt_orig[0]
+    vox_ras_t = {'from': FIFF.FIFFV_MNE_COORD_MRI_VOXEL,
+                 'to': FIFF.FIFFV_MNE_COORD_RAS,
+                 'trans': n_orig}
+
+    # extract the MRI_VOXEL to MRI transform
+    t_orig = nt_orig[1]
+    vox_mri_t = Transform('mri_voxel', 'mri', t_orig)
+
+    # invert MRI_VOXEL to MRI to get the MRI to MRI_VOXEL transform
+    mri_vox_t = invert_transform(vox_mri_t)
+
+    # construct an MRI to RAS transform
+    mri_ras_t = combine_transforms(mri_vox_t, vox_ras_t, 'mri', 'ras')
+
+    # construct the MRI to MNI transform
+    mri_mni_t = combine_transforms(mri_ras_t, ras_mni_t, 'mri', 'mni_tal')
+    return mri_mni_t
+
+
+###############################################################################
+# Creation and decimation
+
+ at verbose
+def setup_source_space(subject, fname=True, spacing='oct6', surface='white',
+                       overwrite=False, subjects_dir=None, add_dist=True,
+                       n_jobs=1, verbose=None):
+    """Setup a source space with subsampling
+
+    Parameters
+    ----------
+    subject : str
+        Subject to process.
+    fname : str | None | bool
+        Filename to use. If True, a default name will be used. If None,
+        the source space will not be saved (only returned).
+    spacing : str
+        The spacing to use. Can be ``'ico#'`` for a recursively subdivided
+        icosahedron, ``'oct#'`` for a recursively subdivided octahedron,
+        or ``'all'`` for all points.
+    surface : str
+        The surface to use.
+    overwrite: bool
+        If True, overwrite output file (if it exists).
+    subjects_dir : string, or None
+        Path to SUBJECTS_DIR if it is not set in the environment.
+    add_dist : bool
+        Add distance and patch information to the source space. This takes some
+        time so precomputing it is recommended.
+    n_jobs : int
+        Number of jobs to run in parallel. Will use at most 2 jobs
+        (one for each hemisphere).
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    src : list
+        The source space for each hemisphere.
+    """
+    cmd = ('setup_source_space(%s, fname=%s, spacing=%s, surface=%s, '
+           'overwrite=%s, subjects_dir=%s, add_dist=%s, verbose=%s)'
+           % (subject, fname, spacing, surface, overwrite,
+              subjects_dir, add_dist, verbose))
+    # check to make sure our parameters are good, parse 'spacing'
+    space_err = ('"spacing" must be a string with values '
+                 '"ico#", "oct#", or "all", and "ico" and "oct"'
+                 'numbers must be integers')
+    if not isinstance(spacing, string_types) or len(spacing) < 3:
+        raise ValueError(space_err)
+    if spacing == 'all':
+        stype = 'all'
+        sval = ''
+    elif spacing[:3] == 'ico':
+        stype = 'ico'
+        sval = spacing[3:]
+    elif spacing[:3] == 'oct':
+        stype = 'oct'
+        sval = spacing[3:]
+    else:
+        raise ValueError(space_err)
+    try:
+        if stype in ['ico', 'oct']:
+            sval = int(sval)
+        elif stype == 'spacing':  # spacing
+            sval = float(sval)
+    except:
+        raise ValueError(space_err)
+    subjects_dir = get_subjects_dir(subjects_dir)
+    surfs = [op.join(subjects_dir, subject, 'surf', hemi + surface)
+             for hemi in ['lh.', 'rh.']]
+    bem_dir = op.join(subjects_dir, subject, 'bem')
+
+    for surf, hemi in zip(surfs, ['LH', 'RH']):
+        if surf is not None and not op.isfile(surf):
+            raise IOError('Could not find the %s surface %s'
+                          % (hemi, surf))
+
+    if not (fname is True or fname is None or isinstance(fname, string_types)):
+        raise ValueError('"fname" must be a string, True, or None')
+    if fname is True:
+        extra = '%s-%s' % (stype, sval) if sval != '' else stype
+        fname = op.join(bem_dir, '%s-%s-src.fif' % (subject, extra))
+    if fname is not None and op.isfile(fname) and overwrite is False:
+        raise IOError('file "%s" exists, use overwrite=True if you want '
+                      'to overwrite the file' % fname)
+
+    logger.info('Setting up the source space with the following parameters:\n')
+    logger.info('SUBJECTS_DIR = %s' % subjects_dir)
+    logger.info('Subject      = %s' % subject)
+    logger.info('Surface      = %s' % surface)
+    if stype == 'ico':
+        src_type_str = 'ico = %s' % sval
+        logger.info('Icosahedron subdivision grade %s\n' % sval)
+    elif stype == 'oct':
+        src_type_str = 'oct = %s' % sval
+        logger.info('Octahedron subdivision grade %s\n' % sval)
+    else:
+        src_type_str = 'all'
+        logger.info('Include all vertices\n')
+
+    # Create the fif file
+    if fname is not None:
+        logger.info('>>> 1. Creating the source space file %s...' % fname)
+    else:
+        logger.info('>>> 1. Creating the source space...\n')
+
+    # mne_make_source_space ... actually make the source spaces
+    src = []
+
+    # pre-load ico/oct surf (once) for speed, if necessary
+    if stype in ['ico', 'oct']:
+        # ### from mne_ico_downsample.c ###
+        if stype == 'ico':
+            logger.info('Doing the icosahedral vertex picking...')
+            ico_surf = _get_ico_surface(sval)
+        else:
+            logger.info('Doing the octahedral vertex picking...')
+            ico_surf = _tessellate_sphere_surf(sval)
+    else:
+        ico_surf = None
+
+    for hemi, surf in zip(['lh', 'rh'], surfs):
+        logger.info('Loading %s...' % surf)
+        # Setup the surface spacing in the MRI coord frame
+        s = _create_surf_spacing(surf, hemi, subject, stype, sval, ico_surf,
+                                 subjects_dir)
+        logger.info('loaded %s %d/%d selected to source space (%s)'
+                    % (op.split(surf)[1], s['nuse'], s['np'], src_type_str))
+        src.append(s)
+        logger.info('')  # newline after both subject types are run
+
+    # Fill in source space info
+    hemi_ids = [FIFF.FIFFV_MNE_SURF_LEFT_HEMI, FIFF.FIFFV_MNE_SURF_RIGHT_HEMI]
+    for s, s_id in zip(src, hemi_ids):
+        # Add missing fields
+        s.update(dict(dist=None, dist_limit=None, nearest=None, type='surf',
+                      nearest_dist=None, pinfo=None, patch_inds=None, id=s_id,
+                      coord_frame=np.array((FIFF.FIFFV_COORD_MRI,), np.int32)))
+        s['rr'] /= 1000.0
+        del s['tri_area']
+        del s['tri_cent']
+        del s['tri_nn']
+        del s['neighbor_tri']
+
+    # upconvert to object format from lists
+    src = SourceSpaces(src, dict(working_dir=os.getcwd(), command_line=cmd))
+
+    if add_dist:
+        add_source_space_distances(src, n_jobs=n_jobs, verbose=verbose)
+
+    # write out if requested, then return the data
+    if fname is not None:
+        write_source_spaces(fname, src)
+        logger.info('Wrote %s' % fname)
+    logger.info('You are now one step closer to computing the gain matrix')
+    return src
+
+
+ at verbose
+def setup_volume_source_space(subject, fname=None, pos=5.0, mri=None,
+                              sphere=(0.0, 0.0, 0.0, 90.0), bem=None,
+                              surface=None, mindist=5.0, exclude=0.0,
+                              overwrite=False, subjects_dir=None,
+                              volume_label=None, add_interpolator=True,
+                              verbose=None):
+    """Setup a volume source space with grid spacing or discrete source space
+
+    Parameters
+    ----------
+    subject : str
+        Subject to process.
+    fname : str | None
+        Filename to use. If None, the source space will not be saved
+        (only returned).
+    pos : float | dict
+        Positions to use for sources. If float, a grid will be constructed
+        with the spacing given by `pos` in mm, generating a volume source
+        space. If dict, pos['rr'] and pos['nn'] will be used as the source
+        space locations (in meters) and normals, respectively, creating a
+        discrete source space. NOTE: For a discrete source space (`pos` is
+        a dict), `mri` must be None.
+    mri : str | None
+        The filename of an MRI volume (mgh or mgz) to create the
+        interpolation matrix over. Source estimates obtained in the
+        volume source space can then be morphed onto the MRI volume
+        using this interpolator. If pos is a dict, this can be None.
+    sphere : array_like (length 4)
+        Define spherical source space bounds using origin and radius given
+        by (ox, oy, oz, rad) in mm. Only used if `bem` and `surface` are
+        both None.
+    bem : str | None
+        Define source space bounds using a BEM file (specifically the inner
+        skull surface).
+    surface : str | dict | None
+        Define source space bounds using a FreeSurfer surface file. Can
+        also be a dictionary with entries `'rr'` and `'tris'`, such as
+        those returned by `read_surface()`.
+    mindist : float
+        Exclude points closer than this distance (mm) to the bounding surface.
+    exclude : float
+        Exclude points closer than this distance (mm) from the center of mass
+        of the bounding surface.
+    overwrite: bool
+        If True, overwrite output file (if it exists).
+    subjects_dir : string, or None
+        Path to SUBJECTS_DIR if it is not set in the environment.
+    volume_label : str | None
+        Region of interest corresponding with freesurfer lookup table.
+    add_interpolator : bool
+        If True and ``mri`` is not None, then an interpolation matrix
+        will be produced.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    src : list
+        The source space. Note that this list will have length 1 for
+        compatibility reasons, as most functions expect source spaces
+        to be provided as lists).
+
+    Notes
+    -----
+    To create a discrete source space, `pos` must be a dict, 'mri' must be
+    None, and 'volume_label' must be None. To create a whole brain volume
+    source space, `pos` must be a float and 'mri' must be provided. To create
+    a volume source space from label, 'pos' must be a float, 'volume_label'
+    must be provided, and 'mri' must refer to a .mgh or .mgz file with values
+    corresponding to the freesurfer lookup-table (typically aseg.mgz).
+    """
+
+    subjects_dir = get_subjects_dir(subjects_dir)
+
+    if bem is not None and surface is not None:
+        raise ValueError('Only one of "bem" and "surface" should be '
+                         'specified')
+    if mri is not None:
+        if not op.isfile(mri):
+            raise IOError('mri file "%s" not found' % mri)
+        if isinstance(pos, dict):
+            raise ValueError('Cannot create interpolation matrix for '
+                             'discrete source space, mri must be None if '
+                             'pos is a dict')
+
+    if volume_label is not None:
+        if mri is None:
+            raise RuntimeError('"mri" must be provided if "volume_label" is '
+                               'not None')
+        # Check that volume label is found in .mgz file
+        volume_labels = get_volume_labels_from_aseg(mri)
+        if volume_label not in volume_labels:
+            raise ValueError('Volume %s not found in file %s. Double check '
+                             'freesurfer lookup table.' % (volume_label, mri))
+
+    sphere = np.asarray(sphere)
+    if sphere.size != 4:
+        raise ValueError('"sphere" must be array_like with 4 elements')
+
+    # triage bounding argument
+    if bem is not None:
+        logger.info('BEM file              : %s', bem)
+    elif surface is not None:
+        if isinstance(surface, dict):
+            if not all(key in surface for key in ['rr', 'tris']):
+                raise KeyError('surface, if dict, must have entries "rr" '
+                               'and "tris"')
+            # let's make sure we have geom info
+            surface = _read_surface_geom(surface, verbose=False)
+            surf_extra = 'dict()'
+        elif isinstance(surface, string_types):
+            if not op.isfile(surface):
+                raise IOError('surface file "%s" not found' % surface)
+            surf_extra = surface
+        logger.info('Boundary surface file : %s', surf_extra)
+    else:
+        logger.info('Sphere                : origin at (%.1f %.1f %.1f) mm'
+                    % (sphere[0], sphere[1], sphere[2]))
+        logger.info('              radius  : %.1f mm' % sphere[3])
+
+    # triage pos argument
+    if isinstance(pos, dict):
+        if not all(key in pos for key in ['rr', 'nn']):
+            raise KeyError('pos, if dict, must contain "rr" and "nn"')
+        pos_extra = 'dict()'
+    else:  # pos should be float-like
+        try:
+            pos = float(pos)
+        except (TypeError, ValueError):
+            raise ValueError('pos must be a dict, or something that can be '
+                             'cast to float()')
+    if not isinstance(pos, float):
+        logger.info('Source location file  : %s', pos_extra)
+        logger.info('Assuming input in millimeters')
+        logger.info('Assuming input in MRI coordinates')
+
+    logger.info('Output file           : %s', fname)
+    if isinstance(pos, float):
+        logger.info('grid                  : %.1f mm' % pos)
+        logger.info('mindist               : %.1f mm' % mindist)
+        pos /= 1000.0  # convert pos from m to mm
+    if exclude > 0.0:
+        logger.info('Exclude               : %.1f mm' % exclude)
+    if mri is not None:
+        logger.info('MRI volume            : %s' % mri)
+    exclude /= 1000.0  # convert exclude from m to mm
+    logger.info('')
+
+    # Explicit list of points
+    if not isinstance(pos, float):
+        # Make the grid of sources
+        sp = _make_discrete_source_space(pos)
+    else:
+        # Load the brain surface as a template
+        if bem is not None:
+            # read bem surface in the MRI coordinate frame
+            surf = read_bem_surfaces(bem, s_id=FIFF.FIFFV_BEM_SURF_ID_BRAIN,
+                                     verbose=False)
+            logger.info('Loaded inner skull from %s (%d nodes)'
+                        % (bem, surf['np']))
+        elif surface is not None:
+            if isinstance(surface, string_types):
+                # read the surface in the MRI coordinate frame
+                surf = _read_surface_geom(surface)
+            else:
+                surf = surface
+            logger.info('Loaded bounding surface from %s (%d nodes)'
+                        % (surface, surf['np']))
+            surf = deepcopy(surf)
+            surf['rr'] *= 1e-3  # must be converted to meters
+        else:  # Load an icosahedron and use that as the surface
+            logger.info('Setting up the sphere...')
+            surf = _get_ico_surface(3)
+
+            # Scale and shift
+
+            # center at origin and make radius 1
+            _normalize_vectors(surf['rr'])
+
+            # normalize to sphere (in MRI coord frame)
+            surf['rr'] *= sphere[3] / 1000.0  # scale by radius
+            surf['rr'] += sphere[:3] / 1000.0  # move by center
+            _complete_surface_info(surf, True)
+        # Make the grid of sources in MRI space
+        sp = _make_volume_source_space(surf, pos, exclude, mindist, mri,
+                                       volume_label)
+
+    # Compute an interpolation matrix to show data in MRI_VOXEL coord frame
+    if mri is not None:
+        _add_interpolator(sp, mri, add_interpolator)
+    elif sp['type'] == 'vol':
+        # If there is no interpolator, it's actually a discrete source space
+        sp['type'] = 'discrete'
+
+    if 'vol_dims' in sp:
+        del sp['vol_dims']
+
+    # Save it
+    sp.update(dict(nearest=None, dist=None, use_tris=None, patch_inds=None,
+                   dist_limit=None, pinfo=None, ntri=0, nearest_dist=None,
+                   nuse_tri=0, tris=None))
+    sp = SourceSpaces([sp], dict(working_dir=os.getcwd(), command_line='None'))
+    if fname is not None:
+        write_source_spaces(fname, sp, verbose=False)
+    return sp
+
+
+def _make_voxel_ras_trans(move, ras, voxel_size):
+    """Make a transformation from MRI_VOXEL to MRI surface RAS (i.e. MRI)"""
+    assert voxel_size.ndim == 1
+    assert voxel_size.size == 3
+    rot = ras.T * voxel_size[np.newaxis, :]
+    assert rot.ndim == 2
+    assert rot.shape[0] == 3
+    assert rot.shape[1] == 3
+    trans = np.c_[np.r_[rot, np.zeros((1, 3))], np.r_[move, 1.0]]
+    t = Transform('mri_voxel', 'mri', trans)
+    return t
+
+
+def _make_discrete_source_space(pos):
+    """Use a discrete set of source locs/oris to make src space
+
+    Parameters
+    ----------
+    pos : dict
+        Must have entries "rr" and "nn". Data should be in meters.
+
+    Returns
+    -------
+    src : dict
+        The source space.
+    """
+    # process points
+    rr = pos['rr'].copy()
+    nn = pos['nn'].copy()
+    if not (rr.ndim == nn.ndim == 2 and nn.shape[0] == nn.shape[0] and
+            rr.shape[1] == nn.shape[1]):
+        raise RuntimeError('"rr" and "nn" must both be 2D arrays with '
+                           'the same number of rows and 3 columns')
+    npts = rr.shape[0]
+    _normalize_vectors(nn)
+    nz = np.sum(np.sum(nn * nn, axis=1) == 0)
+    if nz != 0:
+        raise RuntimeError('%d sources have zero length normal' % nz)
+    logger.info('Positions (in meters) and orientations')
+    logger.info('%d sources' % npts)
+
+    # Ready to make the source space
+    coord_frame = FIFF.FIFFV_COORD_MRI
+    sp = dict(coord_frame=coord_frame, type='discrete', nuse=npts, np=npts,
+              inuse=np.ones(npts, int), vertno=np.arange(npts), rr=rr, nn=nn,
+              id=-1)
+    return sp
+
+
+def _make_volume_source_space(surf, grid, exclude, mindist, mri=None,
+                              volume_label=None, do_neighbors=True, n_jobs=1):
+    """Make a source space which covers the volume bounded by surf"""
+
+    # Figure out the grid size in the MRI coordinate frame
+    mins = np.min(surf['rr'], axis=0)
+    maxs = np.max(surf['rr'], axis=0)
+    cm = np.mean(surf['rr'], axis=0)  # center of mass
+
+    # Define the sphere which fits the surface
+    maxdist = np.sqrt(np.max(np.sum((surf['rr'] - cm) ** 2, axis=1)))
+
+    logger.info('Surface CM = (%6.1f %6.1f %6.1f) mm'
+                % (1000 * cm[0], 1000 * cm[1], 1000 * cm[2]))
+    logger.info('Surface fits inside a sphere with radius %6.1f mm'
+                % (1000 * maxdist))
+    logger.info('Surface extent:')
+    for c, mi, ma in zip('xyz', mins, maxs):
+        logger.info('    %s = %6.1f ... %6.1f mm' % (c, 1000 * mi, 1000 * ma))
+    maxn = np.array([np.floor(np.abs(m) / grid) + 1 if m > 0 else -
+                     np.floor(np.abs(m) / grid) - 1 for m in maxs], int)
+    minn = np.array([np.floor(np.abs(m) / grid) + 1 if m > 0 else -
+                     np.floor(np.abs(m) / grid) - 1 for m in mins], int)
+    logger.info('Grid extent:')
+    for c, mi, ma in zip('xyz', minn, maxn):
+        logger.info('    %s = %6.1f ... %6.1f mm'
+                    % (c, 1000 * mi * grid, 1000 * ma * grid))
+
+    # Now make the initial grid
+    ns = maxn - minn + 1
+    npts = np.prod(ns)
+    nrow = ns[0]
+    ncol = ns[1]
+    nplane = nrow * ncol
+    # x varies fastest, then y, then z (can use unravel to do this)
+    rr = meshgrid(np.arange(minn[2], maxn[2] + 1),
+                  np.arange(minn[1], maxn[1] + 1),
+                  np.arange(minn[0], maxn[0] + 1), indexing='ij')
+    x, y, z = rr[2].ravel(), rr[1].ravel(), rr[0].ravel()
+    rr = np.array([x * grid, y * grid, z * grid]).T
+    sp = dict(np=npts, nn=np.zeros((npts, 3)), rr=rr,
+              inuse=np.ones(npts, int), type='vol', nuse=npts,
+              coord_frame=FIFF.FIFFV_COORD_MRI, id=-1, shape=ns)
+    sp['nn'][:, 2] = 1.0
+    assert sp['rr'].shape[0] == npts
+
+    logger.info('%d sources before omitting any.', sp['nuse'])
+
+    # Exclude infeasible points
+    dists = np.sqrt(np.sum((sp['rr'] - cm) ** 2, axis=1))
+    bads = np.where(np.logical_or(dists < exclude, dists > maxdist))[0]
+    sp['inuse'][bads] = False
+    sp['nuse'] -= len(bads)
+    logger.info('%d sources after omitting infeasible sources.', sp['nuse'])
+
+    _filter_source_spaces(surf, mindist, None, [sp], n_jobs)
+    logger.info('%d sources remaining after excluding the sources outside '
+                'the surface and less than %6.1f mm inside.'
+                % (sp['nuse'], mindist))
+
+    if not do_neighbors:
+        if volume_label is not None:
+            raise RuntimeError('volume_label cannot be None unless '
+                               'do_neighbors is True')
+        return sp
+    k = np.arange(npts)
+    neigh = np.empty((26, npts), int)
+    neigh.fill(-1)
+
+    # Figure out each neighborhood:
+    # 6-neighborhood first
+    idxs = [z > minn[2], x < maxn[0], y < maxn[1],
+            x > minn[0], y > minn[1], z < maxn[2]]
+    offsets = [-nplane, 1, nrow, -1, -nrow, nplane]
+    for n, idx, offset in zip(neigh[:6], idxs, offsets):
+        n[idx] = k[idx] + offset
+
+    # Then the rest to complete the 26-neighborhood
+
+    # First the plane below
+    idx1 = z > minn[2]
+
+    idx2 = np.logical_and(idx1, x < maxn[0])
+    neigh[6, idx2] = k[idx2] + 1 - nplane
+    idx3 = np.logical_and(idx2, y < maxn[1])
+    neigh[7, idx3] = k[idx3] + 1 + nrow - nplane
+
+    idx2 = np.logical_and(idx1, y < maxn[1])
+    neigh[8, idx2] = k[idx2] + nrow - nplane
+
+    idx2 = np.logical_and(idx1, x > minn[0])
+    idx3 = np.logical_and(idx2, y < maxn[1])
+    neigh[9, idx3] = k[idx3] - 1 + nrow - nplane
+    neigh[10, idx2] = k[idx2] - 1 - nplane
+    idx3 = np.logical_and(idx2, y > minn[1])
+    neigh[11, idx3] = k[idx3] - 1 - nrow - nplane
+
+    idx2 = np.logical_and(idx1, y > minn[1])
+    neigh[12, idx2] = k[idx2] - nrow - nplane
+    idx3 = np.logical_and(idx2, x < maxn[0])
+    neigh[13, idx3] = k[idx3] + 1 - nrow - nplane
+
+    # Then the same plane
+    idx1 = np.logical_and(x < maxn[0], y < maxn[1])
+    neigh[14, idx1] = k[idx1] + 1 + nrow
+
+    idx1 = x > minn[0]
+    idx2 = np.logical_and(idx1, y < maxn[1])
+    neigh[15, idx2] = k[idx2] - 1 + nrow
+    idx2 = np.logical_and(idx1, y > minn[1])
+    neigh[16, idx2] = k[idx2] - 1 - nrow
+
+    idx1 = np.logical_and(y > minn[1], x < maxn[0])
+    neigh[17, idx1] = k[idx1] + 1 - nrow - nplane
+
+    # Finally one plane above
+    idx1 = z < maxn[2]
+
+    idx2 = np.logical_and(idx1, x < maxn[0])
+    neigh[18, idx2] = k[idx2] + 1 + nplane
+    idx3 = np.logical_and(idx2, y < maxn[1])
+    neigh[19, idx3] = k[idx3] + 1 + nrow + nplane
+
+    idx2 = np.logical_and(idx1, y < maxn[1])
+    neigh[20, idx2] = k[idx2] + nrow + nplane
+
+    idx2 = np.logical_and(idx1, x > minn[0])
+    idx3 = np.logical_and(idx2, y < maxn[1])
+    neigh[21, idx3] = k[idx3] - 1 + nrow + nplane
+    neigh[22, idx2] = k[idx2] - 1 + nplane
+    idx3 = np.logical_and(idx2, y > minn[1])
+    neigh[23, idx3] = k[idx3] - 1 - nrow + nplane
+
+    idx2 = np.logical_and(idx1, y > minn[1])
+    neigh[24, idx2] = k[idx2] - nrow + nplane
+    idx3 = np.logical_and(idx2, x < maxn[0])
+    neigh[25, idx3] = k[idx3] + 1 - nrow + nplane
+
+    # Restrict sources to volume of interest
+    if volume_label is not None:
+        try:
+            import nibabel as nib
+        except ImportError:
+            raise ImportError("nibabel is required to read segmentation file.")
+
+        logger.info('Selecting voxels from %s' % volume_label)
+
+        # Read the segmentation data using nibabel
+        mgz = nib.load(mri)
+        mgz_data = mgz.get_data()
+
+        # Get the numeric index for this volume label
+        lut = _get_lut()
+        vol_id = _get_lut_id(lut, volume_label, True)
+
+        # Get indices for this volume label in voxel space
+        vox_bool = mgz_data == vol_id
+
+        # Get the 3 dimensional indices in voxel space
+        vox_xyz = np.array(np.where(vox_bool)).T
+
+        # Transform to RAS coordinates
+        # (use tkr normalization or volume won't align with surface sources)
+        trans = _get_mgz_header(mri)['vox2ras_tkr']
+        # Convert transform from mm to m
+        trans[:3] /= 1000.
+        rr_voi = apply_trans(trans, vox_xyz)  # positions of VOI in RAS space
+        # Filter out points too far from volume region voxels
+        dists = _compute_nearest(rr_voi, sp['rr'], return_dists=True)[1]
+        # Maximum distance from center of mass of a voxel to any of its corners
+        maxdist = np.sqrt(((trans[:3, :3].sum(0) / 2.) ** 2).sum())
+        bads = np.where(dists > maxdist)[0]
+
+        # Update source info
+        sp['inuse'][bads] = False
+        sp['vertno'] = np.where(sp['inuse'] > 0)[0]
+        sp['nuse'] = len(sp['vertno'])
+        sp['seg_name'] = volume_label
+        sp['mri_file'] = mri
+
+        # Update log
+        logger.info('%d sources remaining after excluding sources too far '
+                    'from VOI voxels', sp['nuse'])
+
+    # Omit unused vertices from the neighborhoods
+    logger.info('Adjusting the neighborhood info...')
+    # remove non source-space points
+    log_inuse = sp['inuse'] > 0
+    neigh[:, np.logical_not(log_inuse)] = -1
+    # remove these points from neigh
+    vertno = np.where(log_inuse)[0]
+    sp['vertno'] = vertno
+    old_shape = neigh.shape
+    neigh = neigh.ravel()
+    checks = np.where(neigh >= 0)[0]
+    removes = np.logical_not(in1d(checks, vertno))
+    neigh[checks[removes]] = -1
+    neigh.shape = old_shape
+    neigh = neigh.T
+    # Thought we would need this, but C code keeps -1 vertices, so we will:
+    # neigh = [n[n >= 0] for n in enumerate(neigh[vertno])]
+    sp['neighbor_vert'] = neigh
+
+    # Set up the volume data (needed for creating the interpolation matrix)
+    r0 = minn * grid
+    voxel_size = grid * np.ones(3)
+    ras = np.eye(3)
+    sp['src_mri_t'] = _make_voxel_ras_trans(r0, ras, voxel_size)
+    sp['vol_dims'] = maxn - minn + 1
+    return sp
+
+
+def _vol_vertex(width, height, jj, kk, pp):
+    return jj + width * kk + pp * (width * height)
+
+
+def _get_mgz_header(fname):
+    """Adapted from nibabel to quickly extract header info"""
+    if not fname.endswith('.mgz'):
+        raise IOError('Filename must end with .mgz')
+    header_dtd = [('version', '>i4'), ('dims', '>i4', (4,)),
+                  ('type', '>i4'), ('dof', '>i4'), ('goodRASFlag', '>i2'),
+                  ('delta', '>f4', (3,)), ('Mdc', '>f4', (3, 3)),
+                  ('Pxyz_c', '>f4', (3,))]
+    header_dtype = np.dtype(header_dtd)
+    with gzip_open(fname, 'rb') as fid:
+        hdr_str = fid.read(header_dtype.itemsize)
+    header = np.ndarray(shape=(), dtype=header_dtype,
+                        buffer=hdr_str)
+    # dims
+    dims = header['dims'].astype(int)
+    dims = dims[:3] if len(dims) == 4 else dims
+    # vox2ras_tkr
+    delta = header['delta']
+    ds = np.array(delta, float)
+    ns = np.array(dims * ds) / 2.0
+    v2rtkr = np.array([[-ds[0], 0, 0, ns[0]],
+                       [0, 0, ds[2], -ns[2]],
+                       [0, -ds[1], 0, ns[1]],
+                       [0, 0, 0, 1]], dtype=np.float32)
+    # ras2vox
+    d = np.diag(delta)
+    pcrs_c = dims / 2.0
+    Mdc = header['Mdc'].T
+    pxyz_0 = header['Pxyz_c'] - np.dot(Mdc, np.dot(d, pcrs_c))
+    M = np.eye(4, 4)
+    M[0:3, 0:3] = np.dot(Mdc, d)
+    M[0:3, 3] = pxyz_0.T
+    M = linalg.inv(M)
+    header = dict(dims=dims, vox2ras_tkr=v2rtkr, ras2vox=M)
+    return header
+
+
+def _add_interpolator(s, mri_name, add_interpolator):
+    """Compute a sparse matrix to interpolate the data into an MRI volume"""
+    # extract transformation information from mri
+    logger.info('Reading %s...' % mri_name)
+    header = _get_mgz_header(mri_name)
+    mri_width, mri_height, mri_depth = header['dims']
+
+    s.update(dict(mri_width=mri_width, mri_height=mri_height,
+                  mri_depth=mri_depth))
+    trans = header['vox2ras_tkr'].copy()
+    trans[:3, :] /= 1000.0
+    s['vox_mri_t'] = Transform('mri_voxel', 'mri', trans)  # ras_tkr
+    trans = linalg.inv(np.dot(header['vox2ras_tkr'], header['ras2vox']))
+    trans[:3, 3] /= 1000.0
+    s['mri_ras_t'] = Transform('mri', 'ras', trans)  # ras
+    s['mri_volume_name'] = mri_name
+    nvox = mri_width * mri_height * mri_depth
+    if not add_interpolator:
+        s['interpolator'] = sparse.csr_matrix((nvox, s['np']))
+        return
+
+    _print_coord_trans(s['src_mri_t'], 'Source space : ')
+    _print_coord_trans(s['vox_mri_t'], 'MRI volume : ')
+    _print_coord_trans(s['mri_ras_t'], 'MRI volume : ')
+
+    #
+    # Convert MRI voxels from destination (MRI volume) to source (volume
+    # source space subset) coordinates
+    #
+    combo_trans = combine_transforms(s['vox_mri_t'],
+                                     invert_transform(s['src_mri_t']),
+                                     'mri_voxel', 'mri_voxel')
+    combo_trans['trans'] = combo_trans['trans'].astype(np.float32)
+
+    logger.info('Setting up interpolation...')
+
+    # Loop over slices to save (lots of) memory
+    # Note that it is the slowest incrementing index
+    # This is equivalent to using mgrid and reshaping, but faster
+    data = []
+    indices = []
+    indptr = np.zeros(nvox + 1, np.int32)
+    for p in range(mri_depth):
+        js = np.arange(mri_width, dtype=np.float32)
+        js = np.tile(js[np.newaxis, :],
+                     (mri_height, 1)).ravel()
+        ks = np.arange(mri_height, dtype=np.float32)
+        ks = np.tile(ks[:, np.newaxis],
+                     (1, mri_width)).ravel()
+        ps = np.empty((mri_height, mri_width), np.float32).ravel()
+        ps.fill(p)
+        r0 = np.c_[js, ks, ps]
+        del js, ks, ps
+
+        # Transform our vertices from their MRI space into our source space's
+        # frame (this is labeled as FIFFV_MNE_COORD_MRI_VOXEL, but it's
+        # really a subset of the entire volume!)
+        r0 = apply_trans(combo_trans['trans'], r0)
+        rn = np.floor(r0).astype(int)
+        maxs = (s['vol_dims'] - 1)[np.newaxis, :]
+        good = np.where(np.logical_and(np.all(rn >= 0, axis=1),
+                                       np.all(rn < maxs, axis=1)))[0]
+        rn = rn[good]
+        r0 = r0[good]
+
+        # now we take each MRI voxel *in this space*, and figure out how
+        # to make its value the weighted sum of voxels in the volume source
+        # space. This is a 3D weighting scheme based (presumably) on the
+        # fact that we know we're interpolating from one volumetric grid
+        # into another.
+        jj = rn[:, 0]
+        kk = rn[:, 1]
+        pp = rn[:, 2]
+        vss = np.empty((len(jj), 8), np.int32)
+        width = s['vol_dims'][0]
+        height = s['vol_dims'][1]
+        jjp1 = jj + 1
+        kkp1 = kk + 1
+        ppp1 = pp + 1
+        vss[:, 0] = _vol_vertex(width, height, jj, kk, pp)
+        vss[:, 1] = _vol_vertex(width, height, jjp1, kk, pp)
+        vss[:, 2] = _vol_vertex(width, height, jjp1, kkp1, pp)
+        vss[:, 3] = _vol_vertex(width, height, jj, kkp1, pp)
+        vss[:, 4] = _vol_vertex(width, height, jj, kk, ppp1)
+        vss[:, 5] = _vol_vertex(width, height, jjp1, kk, ppp1)
+        vss[:, 6] = _vol_vertex(width, height, jjp1, kkp1, ppp1)
+        vss[:, 7] = _vol_vertex(width, height, jj, kkp1, ppp1)
+        del jj, kk, pp, jjp1, kkp1, ppp1
+        uses = np.any(s['inuse'][vss], axis=1)
+        if uses.size == 0:
+            continue
+        vss = vss[uses].ravel()  # vertex (col) numbers in csr matrix
+        indices.append(vss)
+        indptr[good[uses] + p * mri_height * mri_width + 1] = 8
+        del vss
+
+        # figure out weights for each vertex
+        r0 = r0[uses]
+        rn = rn[uses]
+        del uses, good
+        xf = r0[:, 0] - rn[:, 0].astype(np.float32)
+        yf = r0[:, 1] - rn[:, 1].astype(np.float32)
+        zf = r0[:, 2] - rn[:, 2].astype(np.float32)
+        omxf = 1.0 - xf
+        omyf = 1.0 - yf
+        omzf = 1.0 - zf
+        # each entry in the concatenation corresponds to a row of vss
+        data.append(np.array([omxf * omyf * omzf,
+                              xf * omyf * omzf,
+                              xf * yf * omzf,
+                              omxf * yf * omzf,
+                              omxf * omyf * zf,
+                              xf * omyf * zf,
+                              xf * yf * zf,
+                              omxf * yf * zf], order='F').T.ravel())
+        del xf, yf, zf, omxf, omyf, omzf
+
+        # Compose the sparse matrix
+    indptr = np.cumsum(indptr, out=indptr)
+    indices = np.concatenate(indices)
+    data = np.concatenate(data)
+    s['interpolator'] = sparse.csr_matrix((data, indices, indptr),
+                                          shape=(nvox, s['np']))
+    logger.info(' %d/%d nonzero values [done]' % (len(data), nvox))
+
+
+ at verbose
+def _filter_source_spaces(surf, limit, mri_head_t, src, n_jobs=1,
+                          verbose=None):
+    """Remove all source space points closer than a given limit (in mm)"""
+    if src[0]['coord_frame'] == FIFF.FIFFV_COORD_HEAD and mri_head_t is None:
+        raise RuntimeError('Source spaces are in head coordinates and no '
+                           'coordinate transform was provided!')
+
+    # How close are the source points to the surface?
+    out_str = 'Source spaces are in '
+
+    if src[0]['coord_frame'] == FIFF.FIFFV_COORD_HEAD:
+        inv_trans = invert_transform(mri_head_t)
+        out_str += 'head coordinates.'
+    elif src[0]['coord_frame'] == FIFF.FIFFV_COORD_MRI:
+        out_str += 'MRI coordinates.'
+    else:
+        out_str += 'unknown (%d) coordinates.' % src[0]['coord_frame']
+    logger.info(out_str)
+    out_str = 'Checking that the sources are inside the bounding surface'
+    if limit > 0.0:
+        out_str += ' and at least %6.1f mm away' % (limit)
+    logger.info(out_str + ' (will take a few...)')
+
+    for s in src:
+        vertno = np.where(s['inuse'])[0]  # can't trust s['vertno'] this deep
+        # Convert all points here first to save time
+        r1s = s['rr'][vertno]
+        if s['coord_frame'] == FIFF.FIFFV_COORD_HEAD:
+            r1s = apply_trans(inv_trans['trans'], r1s)
+
+        # Check that the source is inside surface (often the inner skull)
+        outside = _points_outside_surface(r1s, surf, n_jobs)
+        omit_outside = np.sum(outside)
+
+        # vectorized nearest using BallTree (or cdist)
+        omit = 0
+        if limit > 0.0:
+            dists = _compute_nearest(surf['rr'], r1s, return_dists=True)[1]
+            close = np.logical_and(dists < limit / 1000.0,
+                                   np.logical_not(outside))
+            omit = np.sum(close)
+            outside = np.logical_or(outside, close)
+        s['inuse'][vertno[outside]] = False
+        s['nuse'] -= (omit + omit_outside)
+        s['vertno'] = np.where(s['inuse'])[0]
+
+        if omit_outside > 0:
+            extras = [omit_outside]
+            extras += ['s', 'they are'] if omit_outside > 1 else ['', 'it is']
+            logger.info('%d source space point%s omitted because %s '
+                        'outside the inner skull surface.' % tuple(extras))
+        if omit > 0:
+            extras = [omit]
+            extras += ['s'] if omit_outside > 1 else ['']
+            extras += [limit]
+            logger.info('%d source space point%s omitted because of the '
+                        '%6.1f-mm distance limit.' % tuple(extras))
+    logger.info('Thank you for waiting.')
+
+
+ at verbose
+def _points_outside_surface(rr, surf, n_jobs=1, verbose=None):
+    """Check whether points are outside a surface
+
+    Parameters
+    ----------
+    rr : ndarray
+        Nx3 array of points to check.
+    surf : dict
+        Surface with entries "rr" and "tris".
+
+    Returns
+    -------
+    outside : ndarray
+        1D logical array of size N for which points are outside the surface.
+    """
+    rr = np.atleast_2d(rr)
+    assert rr.shape[1] == 3
+    parallel, p_fun, _ = parallel_func(_get_solids, n_jobs)
+    tot_angles = parallel(p_fun(surf['rr'][tris], rr)
+                          for tris in np.array_split(surf['tris'], n_jobs))
+    return np.abs(np.sum(tot_angles, axis=0) / (2 * np.pi) - 1.0) > 1e-5
+
+
+def _get_solids(tri_rrs, fros):
+    """Helper for computing _sum_solids_div total angle in chunks"""
+    # NOTE: This incorporates the division by 4PI that used to be separate
+    # for tri_rr in tri_rrs:
+    #     v1 = fros - tri_rr[0]
+    #     v2 = fros - tri_rr[1]
+    #     v3 = fros - tri_rr[2]
+    #     triple = np.sum(fast_cross_3d(v1, v2) * v3, axis=1)
+    #     l1 = np.sqrt(np.sum(v1 * v1, axis=1))
+    #     l2 = np.sqrt(np.sum(v2 * v2, axis=1))
+    #     l3 = np.sqrt(np.sum(v3 * v3, axis=1))
+    #     s = (l1 * l2 * l3 +
+    #          np.sum(v1 * v2, axis=1) * l3 +
+    #          np.sum(v1 * v3, axis=1) * l2 +
+    #          np.sum(v2 * v3, axis=1) * l1)
+    #     tot_angle -= np.arctan2(triple, s)
+
+    # This is the vectorized version, but with a slicing heuristic to
+    # prevent memory explosion
+    tot_angle = np.zeros((len(fros)))
+    slices = np.r_[np.arange(0, len(fros), 100), [len(fros)]]
+    for i1, i2 in zip(slices[:-1], slices[1:]):
+        v1 = fros[i1:i2] - tri_rrs[:, 0, :][:, np.newaxis]
+        v2 = fros[i1:i2] - tri_rrs[:, 1, :][:, np.newaxis]
+        v3 = fros[i1:i2] - tri_rrs[:, 2, :][:, np.newaxis]
+        triples = _fast_cross_nd_sum(v1, v2, v3)
+        l1 = np.sqrt(np.sum(v1 * v1, axis=2))
+        l2 = np.sqrt(np.sum(v2 * v2, axis=2))
+        l3 = np.sqrt(np.sum(v3 * v3, axis=2))
+        ss = (l1 * l2 * l3 +
+              np.sum(v1 * v2, axis=2) * l3 +
+              np.sum(v1 * v3, axis=2) * l2 +
+              np.sum(v2 * v3, axis=2) * l1)
+        tot_angle[i1:i2] = -np.sum(np.arctan2(triples, ss), axis=0)
+    return tot_angle
+
+
+ at verbose
+def _ensure_src(src, verbose=None):
+    """Helper to ensure we have a source space"""
+    if isinstance(src, string_types):
+        if not op.isfile(src):
+            raise IOError('Source space file "%s" not found' % src)
+        logger.info('Reading %s...' % src)
+        src = read_source_spaces(src, verbose=False)
+    if not isinstance(src, SourceSpaces):
+        raise ValueError('src must be a string or instance of SourceSpaces')
+    return src
+
+
+def _ensure_src_subject(src, subject):
+    src_subject = src[0].get('subject_his_id', None)
+    if subject is None:
+        subject = src_subject
+        if subject is None:
+            raise ValueError('source space is too old, subject must be '
+                             'provided')
+    elif src_subject is not None and subject != src_subject:
+        raise ValueError('Mismatch between provided subject "%s" and subject '
+                         'name "%s" in the source space'
+                         % (subject, src_subject))
+    return subject
+
+
+ at verbose
+def add_source_space_distances(src, dist_limit=np.inf, n_jobs=1, verbose=None):
+    """Compute inter-source distances along the cortical surface
+
+    This function will also try to add patch info for the source space.
+    It will only occur if the ``dist_limit`` is sufficiently high that all
+    points on the surface are within ``dist_limit`` of a point in the
+    source space.
+
+    Parameters
+    ----------
+    src : instance of SourceSpaces
+        The source spaces to compute distances for.
+    dist_limit : float
+        The upper limit of distances to include (in meters).
+        Note: if limit < np.inf, scipy > 0.13 (bleeding edge as of
+        10/2013) must be installed.
+    n_jobs : int
+        Number of jobs to run in parallel. Will only use (up to) as many
+        cores as there are source spaces.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    src : instance of SourceSpaces
+        The original source spaces, with distance information added.
+        The distances are stored in src[n]['dist'].
+        Note: this function operates in-place.
+
+    Notes
+    -----
+    Requires scipy >= 0.11 (> 0.13 for `dist_limit < np.inf`).
+
+    This function can be memory- and CPU-intensive. On a high-end machine
+    (2012) running 6 jobs in parallel, an ico-5 (10242 per hemi) source space
+    takes about 10 minutes to compute all distances (`dist_limit = np.inf`).
+    With `dist_limit = 0.007`, computing distances takes about 1 minute.
+
+    We recommend computing distances once per source space and then saving
+    the source space to disk, as the computed distances will automatically be
+    stored along with the source space data for future use.
+    """
+    n_jobs = check_n_jobs(n_jobs)
+    src = _ensure_src(src)
+    if not np.isscalar(dist_limit):
+        raise ValueError('limit must be a scalar, got %s' % repr(dist_limit))
+    if not check_version('scipy', '0.11'):
+        raise RuntimeError('scipy >= 0.11 must be installed (or > 0.13 '
+                           'if dist_limit < np.inf')
+
+    if not all(s['type'] == 'surf' for s in src):
+        raise RuntimeError('Currently all source spaces must be of surface '
+                           'type')
+
+    if dist_limit < np.inf:
+        # can't do introspection on dijkstra function because it's Cython,
+        # so we'll just try quickly here
+        try:
+            sparse.csgraph.dijkstra(sparse.csr_matrix(np.zeros((2, 2))),
+                                    limit=1.0)
+        except TypeError:
+            raise RuntimeError('Cannot use "limit < np.inf" unless scipy '
+                               '> 0.13 is installed')
+
+    parallel, p_fun, _ = parallel_func(_do_src_distances, n_jobs)
+    min_dists = list()
+    min_idxs = list()
+    logger.info('Calculating source space distances (limit=%s mm)...'
+                % (1000 * dist_limit))
+    for s in src:
+        connectivity = mesh_dist(s['tris'], s['rr'])
+        d = parallel(p_fun(connectivity, s['vertno'], r, dist_limit)
+                     for r in np.array_split(np.arange(len(s['vertno'])),
+                                             n_jobs))
+        # deal with indexing so we can add patch info
+        min_idx = np.array([dd[1] for dd in d])
+        min_dist = np.array([dd[2] for dd in d])
+        midx = np.argmin(min_dist, axis=0)
+        range_idx = np.arange(len(s['rr']))
+        min_dist = min_dist[midx, range_idx]
+        min_idx = min_idx[midx, range_idx]
+        min_dists.append(min_dist)
+        min_idxs.append(min_idx)
+        # now actually deal with distances, convert to sparse representation
+        d = np.concatenate([dd[0] for dd in d]).ravel()  # already float32
+        idx = d > 0
+        d = d[idx]
+        i, j = np.meshgrid(s['vertno'], s['vertno'])
+        i = i.ravel()[idx]
+        j = j.ravel()[idx]
+        d = sparse.csr_matrix((d, (i, j)),
+                              shape=(s['np'], s['np']), dtype=np.float32)
+        s['dist'] = d
+        s['dist_limit'] = np.array([dist_limit], np.float32)
+
+    # Let's see if our distance was sufficient to allow for patch info
+    if not any(np.any(np.isinf(md)) for md in min_dists):
+        # Patch info can be added!
+        for s, min_dist, min_idx in zip(src, min_dists, min_idxs):
+            s['nearest'] = min_idx
+            s['nearest_dist'] = min_dist
+            _add_patch_info(s)
+    else:
+        logger.info('Not adding patch information, dist_limit too small')
+    return src
+
+
+def _do_src_distances(con, vertno, run_inds, limit):
+    """Helper to compute source space distances in chunks"""
+    if limit < np.inf:
+        func = partial(sparse.csgraph.dijkstra, limit=limit)
+    else:
+        func = sparse.csgraph.dijkstra
+    chunk_size = 20  # save memory by chunking (only a little slower)
+    lims = np.r_[np.arange(0, len(run_inds), chunk_size), len(run_inds)]
+    n_chunks = len(lims) - 1
+    # eventually we want this in float32, so save memory by only storing 32-bit
+    d = np.empty((len(run_inds), len(vertno)), np.float32)
+    min_dist = np.empty((n_chunks, con.shape[0]))
+    min_idx = np.empty((n_chunks, con.shape[0]), np.int32)
+    range_idx = np.arange(con.shape[0])
+    for li, (l1, l2) in enumerate(zip(lims[:-1], lims[1:])):
+        idx = vertno[run_inds[l1:l2]]
+        out = func(con, indices=idx)
+        midx = np.argmin(out, axis=0)
+        min_idx[li] = idx[midx]
+        min_dist[li] = out[midx, range_idx]
+        d[l1:l2] = out[:, vertno]
+    midx = np.argmin(min_dist, axis=0)
+    min_dist = min_dist[midx, range_idx]
+    min_idx = min_idx[midx, range_idx]
+    d[d == np.inf] = 0  # scipy will give us np.inf for uncalc. distances
+    return d, min_idx, min_dist
+
+
+def get_volume_labels_from_aseg(mgz_fname):
+    """Returns a list of names of segmented volumes.
+
+    Parameters
+    ----------
+    mgz_fname : str
+        Filename to read. Typically aseg.mgz or some variant in the freesurfer
+        pipeline.
+
+    Returns
+    -------
+    label_names : list of str
+        The names of segmented volumes included in this mgz file.
+
+    Notes
+    -----
+    .. versionadded:: 0.9.0
+    """
+    import nibabel as nib
+
+    # Read the mgz file using nibabel
+    mgz_data = nib.load(mgz_fname).get_data()
+
+    # Get the unique label names
+    lut = _get_lut()
+    label_names = [lut[lut['id'] == ii]['name'][0].decode('utf-8')
+                   for ii in np.unique(mgz_data)]
+    label_names = sorted(label_names, key=lambda n: n.lower())
+    return label_names
+
+
+def _get_hemi(s):
+    """Helper to get a hemisphere from a given source space"""
+    if s['type'] != 'surf':
+        raise RuntimeError('Only surface source spaces supported')
+    if s['id'] == FIFF.FIFFV_MNE_SURF_LEFT_HEMI:
+        return 'lh', 0, s['id']
+    elif s['id'] == FIFF.FIFFV_MNE_SURF_RIGHT_HEMI:
+        return 'rh', 1, s['id']
+    else:
+        raise ValueError('unknown surface ID %s' % s['id'])
+
+
+def _get_vertex_map_nn(fro_src, subject_from, subject_to, hemi, subjects_dir,
+                       to_neighbor_tri=None):
+    """Helper to get a nearest-neigbor vertex match for a given hemi src
+
+    The to_neighbor_tri can optionally be passed in to avoid recomputation
+    if it's already available.
+    """
+    # adapted from mne_make_source_space.c, knowing accurate=False (i.e.
+    # nearest-neighbor mode should be used)
+    logger.info('Mapping %s %s -> %s (nearest neighbor)...'
+                % (hemi, subject_from, subject_to))
+    regs = [op.join(subjects_dir, s, 'surf', '%s.sphere.reg' % hemi)
+            for s in (subject_from, subject_to)]
+    reg_fro, reg_to = [_read_surface_geom(r, patch_stats=False) for r in regs]
+    if to_neighbor_tri is None:
+        to_neighbor_tri = _triangle_neighbors(reg_to['tris'], reg_to['np'])
+    morph_inuse = np.zeros(len(reg_to['rr']), bool)
+    best = np.zeros(fro_src['np'], int)
+    ones = _compute_nearest(reg_to['rr'], reg_fro['rr'][fro_src['vertno']])
+    for v, one in zip(fro_src['vertno'], ones):
+        # if it were actually a proper morph map, we would do this, but since
+        # we know it's nearest neighbor list, we don't need to:
+        # this_mm = mm[v]
+        # one = this_mm.indices[this_mm.data.argmax()]
+        if morph_inuse[one]:
+            # Try the nearest neighbors
+            neigh = _get_surf_neighbors(reg_to, one)  # on demand calc
+            was = one
+            one = neigh[np.where(~morph_inuse[neigh])[0]]
+            if len(one) == 0:
+                raise RuntimeError('vertex %d would be used multiple times.'
+                                   % one)
+            one = one[0]
+            logger.info('Source space vertex moved from %d to %d because of '
+                        'double occupation.' % (was, one))
+        best[v] = one
+        morph_inuse[one] = True
+    return best
+
+
+ at verbose
+def morph_source_spaces(src_from, subject_to, surf='white', subject_from=None,
+                        subjects_dir=None, verbose=None):
+    """Morph an existing source space to a different subject
+
+    .. warning:: This can be used in place of morphing source estimates for
+                 multiple subjects, but there may be consequences in terms
+                 of dipole topology.
+
+    Parameters
+    ----------
+    src_from : instance of SourceSpaces
+        Surface source spaces to morph.
+    subject_to : str
+        The destination subject.
+    surf : str
+        The brain surface to use for the new source space.
+    subject_from : str | None
+        The "from" subject. For most source spaces this shouldn't need
+        to be provided, since it is stored in the source space itself.
+    subjects_dir : string, or None
+        Path to SUBJECTS_DIR if it is not set in the environment.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    src : instance of SourceSpaces
+        The morphed source spaces.
+
+    Notes
+    -----
+    .. versionadded:: 0.10.0
+    """
+    # adapted from mne_make_source_space.c
+    src_from = _ensure_src(src_from)
+    subject_from = _ensure_src_subject(src_from, subject_from)
+    subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
+    src_out = list()
+    for fro in src_from:
+        hemi, idx, id_ = _get_hemi(fro)
+        to = op.join(subjects_dir, subject_to, 'surf', '%s.%s' % (hemi, surf,))
+        logger.info('Reading destination surface %s' % (to,))
+        to = _read_surface_geom(to, patch_stats=False, verbose=False)
+        _complete_surface_info(to)
+        # Now we morph the vertices to the destination
+        # The C code does something like this, but with a nearest-neighbor
+        # mapping instead of the weighted one::
+        #
+        #     >>> mm = read_morph_map(subject_from, subject_to, subjects_dir)
+        #
+        # Here we use a direct NN calculation, since picking the max from the
+        # existing morph map (which naively one might expect to be equivalent)
+        # differs for ~3% of vertices.
+        best = _get_vertex_map_nn(fro, subject_from, subject_to, hemi,
+                                  subjects_dir, to['neighbor_tri'])
+        for key in ('neighbor_tri', 'tri_area', 'tri_cent', 'tri_nn',
+                    'use_tris'):
+            del to[key]
+        to['vertno'] = np.sort(best[fro['vertno']])
+        to['inuse'] = np.zeros(len(to['rr']), int)
+        to['inuse'][to['vertno']] = True
+        to['use_tris'] = best[fro['use_tris']]
+        to.update(nuse=len(to['vertno']), nuse_tri=len(to['use_tris']),
+                  nearest=None, nearest_dist=None, patch_inds=None, pinfo=None,
+                  dist=None, id=id_, dist_limit=None, type='surf',
+                  coord_frame=FIFF.FIFFV_COORD_MRI, subject_his_id=subject_to,
+                  rr=to['rr'] / 1000.)
+        src_out.append(to)
+        logger.info('[done]\n')
+    info = dict(working_dir=os.getcwd(),
+                command_line=_get_call_line(in_verbose=True))
+    return SourceSpaces(src_out, info=info)
+
+
+ at verbose
+def _get_morph_src_reordering(vertices, src_from, subject_from, subject_to,
+                              subjects_dir=None, verbose=None):
+    """Get the reordering indices for a morphed source space
+
+    Parameters
+    ----------
+    vertices : list
+        The vertices for the left and right hemispheres.
+    src_from : instance of SourceSpaces
+        The original source space.
+    subject_from : str
+        The source subject.
+    subject_to : str
+        The destination subject.
+    subjects_dir : string, or None
+        Path to SUBJECTS_DIR if it is not set in the environment.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    data_idx : ndarray, shape (n_vertices,)
+        The array used to reshape the data.
+    from_vertices : list
+        The right and left hemisphere vertex numbers for the "from" subject.
+    """
+    subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
+    from_vertices = list()
+    data_idxs = list()
+    offset = 0
+    for ii, hemi in enumerate(('lh', 'rh')):
+        # Get the mapping from the original source space to the destination
+        # subject's surface vertex numbers
+        best = _get_vertex_map_nn(src_from[ii], subject_from, subject_to,
+                                  hemi, subjects_dir)
+        full_mapping = best[src_from[ii]['vertno']]
+        # Tragically, we might not have all of our vertno left (e.g. because
+        # some are omitted during fwd calc), so we must do some indexing magic:
+
+        # From all vertices, a subset could be chosen by fwd calc:
+        used_vertices = in1d(full_mapping, vertices[ii])
+        from_vertices.append(src_from[ii]['vertno'][used_vertices])
+        remaining_mapping = full_mapping[used_vertices]
+        if not np.array_equal(np.sort(remaining_mapping), vertices[ii]) or \
+                not in1d(vertices[ii], full_mapping).all():
+            raise RuntimeError('Could not map vertices, perhaps the wrong '
+                               'subject "%s" was provided?' % subject_from)
+
+        # And our data have been implicitly remapped by the forced ascending
+        # vertno order in source spaces
+        implicit_mapping = np.argsort(remaining_mapping)  # happens to data
+        data_idx = np.argsort(implicit_mapping)  # to reverse the mapping
+        data_idx += offset  # hemisphere offset
+        data_idxs.append(data_idx)
+        offset += len(implicit_mapping)
+    data_idx = np.concatenate(data_idxs)
+    # this one is really just a sanity check for us, should never be violated
+    # by users
+    assert np.array_equal(np.sort(data_idx),
+                          np.arange(sum(len(v) for v in vertices)))
+    return data_idx, from_vertices
+
+
+def _compare_source_spaces(src0, src1, mode='exact', dist_tol=1.5e-3):
+    """Compare two source spaces
+
+    Note: this function is also used by forward/tests/test_make_forward.py
+    """
+    from nose.tools import assert_equal, assert_true
+    from numpy.testing import assert_allclose, assert_array_equal
+    from scipy.spatial.distance import cdist
+    if mode != 'exact' and 'approx' not in mode:  # 'nointerp' can be appended
+        raise RuntimeError('unknown mode %s' % mode)
+
+    for s0, s1 in zip(src0, src1):
+        # first check the keys
+        a, b = set(s0.keys()), set(s1.keys())
+        assert_equal(a, b, str(a ^ b))
+        for name in ['nuse', 'ntri', 'np', 'type', 'id']:
+            assert_equal(s0[name], s1[name], name)
+        for name in ['subject_his_id']:
+            if name in s0 or name in s1:
+                assert_equal(s0[name], s1[name], name)
+        for name in ['interpolator']:
+            if name in s0 or name in s1:
+                diffs = (s0['interpolator'] - s1['interpolator']).data
+                if len(diffs) > 0 and 'nointerp' not in mode:
+                    # 5%
+                    assert_true(np.sqrt(np.mean(diffs ** 2)) < 0.10, name)
+        for name in ['nn', 'rr', 'nuse_tri', 'coord_frame', 'tris']:
+            if s0[name] is None:
+                assert_true(s1[name] is None, name)
+            else:
+                if mode == 'exact':
+                    assert_array_equal(s0[name], s1[name], name)
+                else:  # 'approx' in mode
+                    atol = 1e-3 if name == 'nn' else 1e-4
+                    assert_allclose(s0[name], s1[name], rtol=1e-3, atol=atol,
+                                    err_msg=name)
+        for name in ['seg_name']:
+            if name in s0 or name in s1:
+                assert_equal(s0[name], s1[name], name)
+        if mode == 'exact':
+            for name in ['inuse', 'vertno', 'use_tris']:
+                assert_array_equal(s0[name], s1[name], err_msg=name)
+            # these fields will exist if patch info was added, these are
+            # not tested in mode == 'approx'
+            for name in ['nearest', 'nearest_dist']:
+                if s0[name] is None:
+                    assert_true(s1[name] is None, name)
+                else:
+                    assert_array_equal(s0[name], s1[name])
+            for name in ['dist_limit']:
+                assert_true(s0[name] == s1[name], name)
+            for name in ['dist']:
+                if s0[name] is not None:
+                    assert_equal(s1[name].shape, s0[name].shape)
+                    assert_true(len((s0['dist'] - s1['dist']).data) == 0)
+            for name in ['pinfo']:
+                if s0[name] is not None:
+                    assert_true(len(s0[name]) == len(s1[name]))
+                    for p1, p2 in zip(s0[name], s1[name]):
+                        assert_true(all(p1 == p2))
+        else:  # 'approx' in mode:
+            # deal with vertno, inuse, and use_tris carefully
+            assert_array_equal(s0['vertno'], np.where(s0['inuse'])[0],
+                               'left hemisphere vertices')
+            assert_array_equal(s1['vertno'], np.where(s1['inuse'])[0],
+                               'right hemisphere vertices')
+            assert_equal(len(s0['vertno']), len(s1['vertno']))
+            agreement = np.mean(s0['inuse'] == s1['inuse'])
+            assert_true(agreement >= 0.99, "%s < 0.99" % agreement)
+            if agreement < 1.0:
+                # make sure mismatched vertno are within 1.5mm
+                v0 = np.setdiff1d(s0['vertno'], s1['vertno'])
+                v1 = np.setdiff1d(s1['vertno'], s0['vertno'])
+                dists = cdist(s0['rr'][v0], s1['rr'][v1])
+                assert_allclose(np.min(dists, axis=1), np.zeros(len(v0)),
+                                atol=dist_tol, err_msg='mismatched vertno')
+            if s0['use_tris'] is not None:  # for "spacing"
+                assert_array_equal(s0['use_tris'].shape, s1['use_tris'].shape)
+            else:
+                assert_true(s1['use_tris'] is None)
+            assert_true(np.mean(s0['use_tris'] == s1['use_tris']) > 0.99)
+    # The above "if s0[name] is not None" can be removed once the sample
+    # dataset is updated to have a source space with distance info
+    for name in ['working_dir', 'command_line']:
+        if mode == 'exact':
+            assert_equal(src0.info[name], src1.info[name])
+        else:  # 'approx' in mode:
+            if name in src0.info:
+                assert_true(name in src1.info, '"%s" missing' % name)
+            else:
+                assert_true(name not in src1.info,
+                            '"%s" should not exist' % name)
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/stats/__init__.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/stats/__init__.py
new file mode 100644
index 0000000..b45141e
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/stats/__init__.py
@@ -0,0 +1,14 @@
+"""Functions for statistical analysis"""
+
+from .parametric import (
+    f_threshold_twoway_rm, f_threshold_mway_rm, f_twoway_rm, f_mway_rm)
+from .permutations import permutation_t_test
+from .cluster_level import (permutation_cluster_test,
+                            permutation_cluster_1samp_test,
+                            spatio_temporal_cluster_1samp_test,
+                            spatio_temporal_cluster_test,
+                            _st_mask_from_s_inds,
+                            ttest_1samp_no_p,
+                            summarize_clusters_stc)
+from .multi_comp import fdr_correction, bonferroni_correction
+from .regression import linear_regression, linear_regression_raw
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/stats/cluster_level.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/stats/cluster_level.py
new file mode 100644
index 0000000..d0b1ec6
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/stats/cluster_level.py
@@ -0,0 +1,1555 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+# Authors: Thorsten Kranz <thorstenkranz at gmail.com>
+#          Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#          Eric Larson <larson.eric.d at gmail.com>
+#          Denis Engemann <denis.engemann at gmail.com>
+#
+# License: Simplified BSD
+
+import numpy as np
+import warnings
+import logging
+from scipy import sparse
+
+from .parametric import f_oneway
+from ..parallel import parallel_func, check_n_jobs
+from ..utils import split_list, logger, verbose, ProgressBar
+from ..fixes import in1d, unravel_index
+from ..source_estimate import SourceEstimate
+
+
+def _get_clusters_spatial(s, neighbors):
+    """Helper function to form spatial clusters using neighbor lists
+
+    This is equivalent to _get_components with n_times = 1, with a properly
+    reconfigured connectivity matrix (formed as "neighbors" list)
+    """
+    # s is a vector of spatial indices that are significant, like:
+    #     s = np.where(x_in)[0]
+    # for x_in representing a single time-instant
+    r = np.ones(s.shape, dtype=bool)
+    clusters = list()
+    next_ind = 0 if s.size > 0 else None
+    while next_ind is not None:
+        # put first point in a cluster, adjust remaining
+        t_inds = [next_ind]
+        r[next_ind] = False
+        icount = 1  # count of nodes in the current cluster
+        while icount <= len(t_inds):
+            ind = t_inds[icount - 1]
+            # look across other vertices
+            buddies = np.where(r)[0]
+            buddies = buddies[in1d(s[buddies], neighbors[s[ind]],
+                                   assume_unique=True)]
+            t_inds += buddies.tolist()
+            r[buddies] = False
+            icount += 1
+        # this is equivalent to np.where(r)[0] for these purposes, but it's
+        # a little bit faster. Unfortunately there's no way to tell numpy
+        # just to find the first instance (to save checking every one):
+        next_ind = np.argmax(r)
+        if next_ind == 0:
+            next_ind = None
+        clusters.append(s[t_inds])
+    return clusters
+
+
+def _reassign(check, clusters, base, num):
+    """Helper function to reassign cluster numbers"""
+    # reconfigure check matrix
+    check[check == num] = base
+    # concatenate new values into clusters array
+    clusters[base - 1] = np.concatenate((clusters[base - 1],
+                                         clusters[num - 1]))
+    clusters[num - 1] = np.array([], dtype=int)
+
+
+def _get_clusters_st_1step(keepers, neighbors):
+    """Directly calculate connectivity based on knowledge that time points are
+    only connected to adjacent neighbors for data organized as time x space.
+
+    This algorithm time increases linearly with the number of time points,
+    compared to with the square for the standard (graph) algorithm.
+
+    This algorithm creates clusters for each time point using a method more
+    efficient than the standard graph method (but otherwise equivalent), then
+    combines these clusters across time points in a reasonable way."""
+    n_src = len(neighbors)
+    n_times = len(keepers)
+    # start cluster numbering at 1 for diffing convenience
+    enum_offset = 1
+    check = np.zeros((n_times, n_src), dtype=int)
+    clusters = list()
+    for ii, k in enumerate(keepers):
+        c = _get_clusters_spatial(k, neighbors)
+        for ci, cl in enumerate(c):
+            check[ii, cl] = ci + enum_offset
+        enum_offset += len(c)
+        # give them the correct offsets
+        c = [cl + ii * n_src for cl in c]
+        clusters += c
+
+    # now that each cluster has been assigned a unique number, combine them
+    # by going through each time point
+    for check1, check2, k in zip(check[:-1], check[1:], keepers[:-1]):
+        # go through each one that needs reassignment
+        inds = k[check2[k] - check1[k] > 0]
+        check1_d = check1[inds]
+        n = check2[inds]
+        nexts = np.unique(n)
+        for num in nexts:
+            prevs = check1_d[n == num]
+            base = np.min(prevs)
+            for pr in np.unique(prevs[prevs != base]):
+                _reassign(check1, clusters, base, pr)
+            # reassign values
+            _reassign(check2, clusters, base, num)
+    # clean up clusters
+    clusters = [cl for cl in clusters if len(cl) > 0]
+    return clusters
+
+
+def _get_clusters_st_multistep(keepers, neighbors, max_step=1):
+    """Directly calculate connectivity based on knowledge that time points are
+    only connected to adjacent neighbors for data organized as time x space.
+
+    This algorithm time increases linearly with the number of time points,
+    compared to with the square for the standard (graph) algorithm."""
+    n_src = len(neighbors)
+    n_times = len(keepers)
+    t_border = list()
+    t_border.append(0)
+    for ki, k in enumerate(keepers):
+        keepers[ki] = k + ki * n_src
+        t_border.append(t_border[ki] + len(k))
+    t_border = np.array(t_border)
+    keepers = np.concatenate(keepers)
+    v = keepers
+    t, s = divmod(v, n_src)
+
+    r = np.ones(t.shape, dtype=bool)
+    clusters = list()
+    next_ind = 0
+    inds = np.arange(t_border[0], t_border[n_times])
+    if s.size > 0:
+        while next_ind is not None:
+            # put first point in a cluster, adjust remaining
+            t_inds = [next_ind]
+            r[next_ind] = False
+            icount = 1  # count of nodes in the current cluster
+            # look for significant values at the next time point,
+            # same sensor, not placed yet, and add those
+            while icount <= len(t_inds):
+                ind = t_inds[icount - 1]
+                selves = inds[t_border[max(t[ind] - max_step, 0)]:
+                              t_border[min(t[ind] + max_step + 1, n_times)]]
+                selves = selves[r[selves]]
+                selves = selves[s[ind] == s[selves]]
+
+                # look at current time point across other vertices
+                buddies = inds[t_border[t[ind]]:t_border[t[ind] + 1]]
+                buddies = buddies[r[buddies]]
+                buddies = buddies[in1d(s[buddies], neighbors[s[ind]],
+                                       assume_unique=True)]
+                buddies = np.concatenate((selves, buddies))
+                t_inds += buddies.tolist()
+                r[buddies] = False
+                icount += 1
+            # this is equivalent to np.where(r)[0] for these purposes, but it's
+            # a little bit faster. Unfortunately there's no way to tell numpy
+            # just to find the first instance (to save checking every one):
+            next_ind = np.argmax(r)
+            if next_ind == 0:
+                next_ind = None
+            clusters.append(v[t_inds])
+
+    return clusters
+
+
+def _get_clusters_st(x_in, neighbors, max_step=1):
+    """Helper function to choose the most efficient version"""
+    n_src = len(neighbors)
+    n_times = x_in.size // n_src
+    cl_goods = np.where(x_in)[0]
+    if len(cl_goods) > 0:
+        keepers = [np.array([], dtype=int)] * n_times
+        row, col = unravel_index(cl_goods, (n_times, n_src))
+        if isinstance(row, int):
+            row = [row]
+            col = [col]
+            lims = [0]
+        else:
+            order = np.argsort(row)
+            row = row[order]
+            col = col[order]
+            lims = [0] + (np.where(np.diff(row) > 0)[0] +
+                          1).tolist() + [len(row)]
+
+        for start, end in zip(lims[:-1], lims[1:]):
+            keepers[row[start]] = np.sort(col[start:end])
+        if max_step == 1:
+            return _get_clusters_st_1step(keepers, neighbors)
+        else:
+            return _get_clusters_st_multistep(keepers, neighbors,
+                                              max_step)
+    else:
+        return []
+
+
+def _get_components(x_in, connectivity, return_list=True):
+    """get connected components from a mask and a connectivity matrix"""
+    try:
+        from sklearn.utils._csgraph import cs_graph_components
+    except ImportError:
+        try:
+            from scikits.learn.utils._csgraph import cs_graph_components
+        except ImportError:
+            try:
+                from sklearn.utils.sparsetools import connected_components
+                cs_graph_components = connected_components
+            except ImportError:
+                # in theory we might be able to shoehorn this into using
+                # _get_clusters_spatial if we transform connectivity into
+                # a neighbor list, and it might end up being faster anyway,
+                # but for now:
+                raise ImportError('scikit-learn must be installed')
+
+    mask = np.logical_and(x_in[connectivity.row], x_in[connectivity.col])
+    data = connectivity.data[mask]
+    row = connectivity.row[mask]
+    col = connectivity.col[mask]
+    shape = connectivity.shape
+    idx = np.where(x_in)[0]
+    row = np.concatenate((row, idx))
+    col = np.concatenate((col, idx))
+    data = np.concatenate((data, np.ones(len(idx), dtype=data.dtype)))
+    connectivity = sparse.coo_matrix((data, (row, col)), shape=shape)
+    _, components = cs_graph_components(connectivity)
+    if return_list:
+        start = np.min(components)
+        stop = np.max(components)
+        comp_list = [list() for i in range(start, stop + 1, 1)]
+        mask = np.zeros(len(comp_list), dtype=bool)
+        for ii, comp in enumerate(components):
+            comp_list[comp].append(ii)
+            mask[comp] += x_in[ii]
+        clusters = [np.array(k) for k, m in zip(comp_list, mask) if m]
+        return clusters
+    else:
+        return components
+
+
+def _find_clusters(x, threshold, tail=0, connectivity=None, max_step=1,
+                   include=None, partitions=None, t_power=1, show_info=False):
+    """For a given 1d-array (test statistic), find all clusters which
+    are above/below a certain threshold. Returns a list of 2-tuples.
+
+    When doing a two-tailed test (tail == 0), only points with the same
+    sign will be clustered together.
+
+    Parameters
+    ----------
+    x : 1D array
+        Data
+    threshold : float | dict
+        Where to threshold the statistic. Should be negative for tail == -1,
+        and positive for tail == 0 or 1. Can also be an dict for
+        threshold-free cluster enhancement.
+    tail : -1 | 0 | 1
+        Type of comparison
+    connectivity : sparse matrix in COO format, None, or list
+        Defines connectivity between features. The matrix is assumed to
+        be symmetric and only the upper triangular half is used.
+        If connectivity is a list, it is assumed that each entry stores the
+        indices of the spatial neighbors in a spatio-temporal dataset x.
+        Default is None, i.e, a regular lattice connectivity.
+    max_step : int
+        If connectivity is a list, this defines the maximal number of steps
+        between vertices along the second dimension (typically time) to be
+        considered connected.
+    include : 1D bool array or None
+        Mask to apply to the data of points to cluster. If None, all points
+        are used.
+    partitions : array of int or None
+        An array (same size as X) of integers indicating which points belong
+        to each partition.
+    t_power : float
+        Power to raise the statistical values (usually t-values) by before
+        summing (sign will be retained). Note that t_power == 0 will give a
+        count of nodes in each cluster, t_power == 1 will weight each node by
+        its statistical score.
+    show_info : bool
+        If True, display information about thresholds used (for TFCE). Should
+        only be done for the standard permutation.
+
+    Returns
+    -------
+    clusters : list of slices or list of arrays (boolean masks)
+        We use slices for 1D signals and mask to multidimensional
+        arrays.
+    sums: array
+        Sum of x values in clusters.
+    """
+    from scipy import ndimage
+    if tail not in [-1, 0, 1]:
+        raise ValueError('invalid tail parameter')
+
+    x = np.asanyarray(x)
+
+    if not np.isscalar(threshold):
+        if not isinstance(threshold, dict):
+            raise TypeError('threshold must be a number, or a dict for '
+                            'threshold-free cluster enhancement')
+        if not all(key in threshold for key in ['start', 'step']):
+            raise KeyError('threshold, if dict, must have at least '
+                           '"start" and "step"')
+        tfce = True
+        if tail == -1:
+            if threshold['start'] > 0:
+                raise ValueError('threshold["start"] must be <= 0 for '
+                                 'tail == -1')
+            if threshold['step'] >= 0:
+                raise ValueError('threshold["step"] must be < 0 for '
+                                 'tail == -1')
+            stop = np.min(x)
+        elif tail == 1:
+            stop = np.max(x)
+        else:  # tail == 0
+            stop = np.max(np.abs(x))
+        thresholds = np.arange(threshold['start'], stop,
+                               threshold['step'], float)
+        h_power = threshold.get('h_power', 2)
+        e_power = threshold.get('e_power', 0.5)
+        if show_info is True:
+            if len(thresholds) == 0:
+                txt = ('threshold["start"] (%s) is more extreme than '
+                       'data statistics with most extreme value %s'
+                       % (threshold['start'], stop))
+                logger.warning(txt)
+                warnings.warn(txt)
+            else:
+                logger.info('Using %d thresholds from %0.2f to %0.2f for TFCE '
+                            'computation (h_power=%0.2f, e_power=%0.2f)'
+                            % (len(thresholds), thresholds[0], thresholds[-1],
+                               h_power, e_power))
+        scores = np.zeros(x.size)
+    else:
+        thresholds = [threshold]
+        tfce = False
+
+    # include all points by default
+    if include is None:
+        include = np.ones(x.shape, dtype=bool)
+
+    if not np.all(np.diff(thresholds) > 0):
+        raise RuntimeError('Threshold misconfiguration, must be monotonically'
+                           ' increasing')
+
+    # set these here just in case thresholds == []
+    clusters = list()
+    sums = np.empty(0)
+    for ti, thresh in enumerate(thresholds):
+        # these need to be reset on each run
+        clusters = list()
+        sums = np.empty(0)
+        if tail == 0:
+            x_ins = [np.logical_and(x > thresh, include),
+                     np.logical_and(x < -thresh, include)]
+        elif tail == -1:
+            x_ins = [np.logical_and(x < thresh, include)]
+        else:  # tail == 1
+            x_ins = [np.logical_and(x > thresh, include)]
+        # loop over tails
+        for x_in in x_ins:
+            if np.any(x_in):
+                out = _find_clusters_1dir_parts(x, x_in, connectivity,
+                                                max_step, partitions, t_power,
+                                                ndimage)
+                clusters += out[0]
+                sums = np.concatenate((sums, out[1]))
+        if tfce is True:
+            # the score of each point is the sum of the h^H * e^E for each
+            # supporting section "rectangle" h x e.
+            if ti == 0:
+                h = abs(thresh)
+            else:
+                h = abs(thresh - thresholds[ti - 1])
+            h = h ** h_power
+            for c in clusters:
+                # triage based on cluster storage type
+                if isinstance(c, slice):
+                    len_c = c.stop - c.start
+                elif isinstance(c, tuple):
+                    len_c = len(c)
+                elif c.dtype == bool:
+                    len_c = np.sum(c)
+                else:
+                    len_c = len(c)
+                scores[c] += h * (len_c ** e_power)
+    if tfce is True:
+        # each point gets treated independently
+        clusters = np.arange(x.size)
+        if connectivity is None:
+            if x.ndim == 1:
+                # slices
+                clusters = [slice(c, c + 1) for c in clusters]
+            else:
+                # boolean masks (raveled)
+                clusters = [(clusters == ii).ravel()
+                            for ii in range(len(clusters))]
+        else:
+            clusters = [np.array([c]) for c in clusters]
+        sums = scores
+    return clusters, sums
+
+
+def _find_clusters_1dir_parts(x, x_in, connectivity, max_step, partitions,
+                              t_power, ndimage):
+    """Deal with partitions, and pass the work to _find_clusters_1dir
+    """
+    if partitions is None:
+        clusters, sums = _find_clusters_1dir(x, x_in, connectivity, max_step,
+                                             t_power, ndimage)
+    else:
+        # cluster each partition separately
+        clusters = list()
+        sums = list()
+        for p in range(np.max(partitions) + 1):
+            x_i = np.logical_and(x_in, partitions == p)
+            out = _find_clusters_1dir(x, x_i, connectivity, max_step, t_power,
+                                      ndimage)
+            clusters += out[0]
+            sums.append(out[1])
+        sums = np.concatenate(sums)
+    return clusters, sums
+
+
+def _find_clusters_1dir(x, x_in, connectivity, max_step, t_power, ndimage):
+    """Actually call the clustering algorithm"""
+    if connectivity is None:
+        labels, n_labels = ndimage.label(x_in)
+
+        if x.ndim == 1:
+            # slices
+            clusters = ndimage.find_objects(labels, n_labels)
+            if len(clusters) == 0:
+                sums = list()
+            else:
+                index = list(range(1, n_labels + 1))
+                if t_power == 1:
+                    sums = ndimage.measurements.sum(x, labels, index=index)
+                else:
+                    sums = ndimage.measurements.sum(np.sign(x) *
+                                                    np.abs(x) ** t_power,
+                                                    labels, index=index)
+        else:
+            # boolean masks (raveled)
+            clusters = list()
+            sums = np.empty(n_labels)
+            for l in range(1, n_labels + 1):
+                c = labels == l
+                clusters.append(c.ravel())
+                if t_power == 1:
+                    sums[l - 1] = np.sum(x[c])
+                else:
+                    sums[l - 1] = np.sum(np.sign(x[c]) *
+                                         np.abs(x[c]) ** t_power)
+    else:
+        if x.ndim > 1:
+            raise Exception("Data should be 1D when using a connectivity "
+                            "to define clusters.")
+        if isinstance(connectivity, sparse.spmatrix):
+            clusters = _get_components(x_in, connectivity)
+        elif isinstance(connectivity, list):  # use temporal adjacency
+            clusters = _get_clusters_st(x_in, connectivity, max_step)
+        else:
+            raise ValueError('Connectivity must be a sparse matrix or list')
+        if t_power == 1:
+            sums = np.array([np.sum(x[c]) for c in clusters])
+        else:
+            sums = np.array([np.sum(np.sign(x[c]) * np.abs(x[c]) ** t_power)
+                            for c in clusters])
+
+    return clusters, np.atleast_1d(sums)
+
+
+def _cluster_indices_to_mask(components, n_tot):
+    """Convert to the old format of clusters, which were bool arrays"""
+    for ci, c in enumerate(components):
+        components[ci] = np.zeros((n_tot), dtype=bool)
+        components[ci][c] = True
+    return components
+
+
+def _cluster_mask_to_indices(components):
+    """Convert to the old format of clusters, which were bool arrays"""
+    for ci, c in enumerate(components):
+        if not isinstance(c, slice):
+            components[ci] = np.where(c)[0]
+    return components
+
+
+def _pval_from_histogram(T, H0, tail):
+    """Get p-values from stats values given an H0 distribution
+
+    For each stat compute a p-value as percentile of its statistics
+    within all statistics in surrogate data
+    """
+    if tail not in [-1, 0, 1]:
+        raise ValueError('invalid tail parameter')
+
+    # from pct to fraction
+    if tail == -1:  # up tail
+        pval = np.array([np.sum(H0 <= t) for t in T])
+    elif tail == 1:  # low tail
+        pval = np.array([np.sum(H0 >= t) for t in T])
+    else:  # both tails
+        pval = np.array([np.sum(abs(H0) >= abs(t)) for t in T])
+
+    pval = (pval + 1.0) / (H0.size + 1.0)  # the init data is one resampling
+    return pval
+
+
+def _setup_connectivity(connectivity, n_vertices, n_times):
+    if connectivity.shape[0] == n_vertices:  # use global algorithm
+        connectivity = connectivity.tocoo()
+        n_times = None
+    else:  # use temporal adjacency algorithm
+        if not round(n_vertices / float(connectivity.shape[0])) == n_times:
+            raise ValueError('connectivity must be of the correct size')
+        # we claim to only use upper triangular part... not true here
+        connectivity = (connectivity + connectivity.transpose()).tocsr()
+        connectivity = [connectivity.indices[connectivity.indptr[i]:
+                        connectivity.indptr[i + 1]] for i in
+                        range(len(connectivity.indptr) - 1)]
+    return connectivity
+
+
+def _do_permutations(X_full, slices, threshold, tail, connectivity, stat_fun,
+                     max_step, include, partitions, t_power, seeds,
+                     sample_shape, buffer_size, progress_bar):
+
+    n_samp, n_vars = X_full.shape
+
+    if buffer_size is not None and n_vars <= buffer_size:
+        buffer_size = None  # don't use buffer for few variables
+
+    # allocate space for output
+    max_cluster_sums = np.empty(len(seeds), dtype=np.double)
+
+    if buffer_size is not None:
+        # allocate buffer, so we don't need to allocate memory during loop
+        X_buffer = [np.empty((len(X_full[s]), buffer_size), dtype=X_full.dtype)
+                    for s in slices]
+
+    for seed_idx, seed in enumerate(seeds):
+        if progress_bar is not None:
+            if (not (seed_idx + 1) % 32) or (seed_idx == 0):
+                progress_bar.update(seed_idx + 1)
+
+        # shuffle sample indices
+        rng = np.random.RandomState(seed)
+        idx_shuffled = np.arange(n_samp)
+        rng.shuffle(idx_shuffled)
+        idx_shuffle_list = [idx_shuffled[s] for s in slices]
+
+        if buffer_size is None:
+            # shuffle all data at once
+            X_shuffle_list = [X_full[idx, :] for idx in idx_shuffle_list]
+            T_obs_surr = stat_fun(*X_shuffle_list)
+        else:
+            # only shuffle a small data buffer, so we need less memory
+            T_obs_surr = np.empty(n_vars, dtype=X_full.dtype)
+
+            for pos in range(0, n_vars, buffer_size):
+                # number of variables for this loop
+                n_var_loop = min(pos + buffer_size, n_vars) - pos
+
+                # fill buffer
+                for i, idx in enumerate(idx_shuffle_list):
+                    X_buffer[i][:, :n_var_loop] =\
+                        X_full[idx, pos: pos + n_var_loop]
+
+                # apply stat_fun and store result
+                tmp = stat_fun(*X_buffer)
+                T_obs_surr[pos: pos + n_var_loop] = tmp[:n_var_loop]
+
+        # The stat should have the same shape as the samples for no conn.
+        if connectivity is None:
+            T_obs_surr.shape = sample_shape
+
+        # Find cluster on randomized stats
+        out = _find_clusters(T_obs_surr, threshold=threshold, tail=tail,
+                             max_step=max_step, connectivity=connectivity,
+                             partitions=partitions, include=include,
+                             t_power=t_power)
+        perm_clusters_sums = out[1]
+
+        if len(perm_clusters_sums) > 0:
+            max_cluster_sums[seed_idx] = np.max(perm_clusters_sums)
+        else:
+            max_cluster_sums[seed_idx] = 0
+
+    return max_cluster_sums
+
+
+def _do_1samp_permutations(X, slices, threshold, tail, connectivity, stat_fun,
+                           max_step, include, partitions, t_power, seeds,
+                           sample_shape, buffer_size, progress_bar):
+    n_samp, n_vars = X.shape
+    assert slices is None  # should be None for the 1 sample case
+
+    if buffer_size is not None and n_vars <= buffer_size:
+        buffer_size = None  # don't use buffer for few variables
+
+    # allocate space for output
+    max_cluster_sums = np.empty(len(seeds), dtype=np.double)
+
+    if buffer_size is not None:
+        # allocate a buffer so we don't need to allocate memory in loop
+        X_flip_buffer = np.empty((n_samp, buffer_size), dtype=X.dtype)
+
+    for seed_idx, seed in enumerate(seeds):
+        if progress_bar is not None:
+            if not (seed_idx + 1) % 32 or seed_idx == 0:
+                progress_bar.update(seed_idx + 1)
+
+        if isinstance(seed, np.ndarray):
+            # new surrogate data with specified sign flip
+            if not seed.size == n_samp:
+                raise ValueError('rng string must be n_samples long')
+            signs = 2 * seed[:, None].astype(int) - 1
+            if not np.all(np.equal(np.abs(signs), 1)):
+                raise ValueError('signs from rng must be +/- 1')
+        else:
+            rng = np.random.RandomState(seed)
+            # new surrogate data with random sign flip
+            signs = np.sign(0.5 - rng.rand(n_samp))
+            signs = signs[:, np.newaxis]
+
+        if buffer_size is None:
+            # be careful about non-writable memmap (GH#1507)
+            if X.flags.writeable:
+                X *= signs
+                # Recompute statistic on randomized data
+                T_obs_surr = stat_fun(X)
+                # Set X back to previous state (trade memory eff. for CPU use)
+                X *= signs
+            else:
+                T_obs_surr = stat_fun(X * signs)
+        else:
+            # only sign-flip a small data buffer, so we need less memory
+            T_obs_surr = np.empty(n_vars, dtype=X.dtype)
+
+            for pos in range(0, n_vars, buffer_size):
+                # number of variables for this loop
+                n_var_loop = min(pos + buffer_size, n_vars) - pos
+
+                X_flip_buffer[:, :n_var_loop] =\
+                    signs * X[:, pos: pos + n_var_loop]
+
+                # apply stat_fun and store result
+                tmp = stat_fun(X_flip_buffer)
+                T_obs_surr[pos: pos + n_var_loop] = tmp[:n_var_loop]
+
+        # The stat should have the same shape as the samples for no conn.
+        if connectivity is None:
+            T_obs_surr.shape = sample_shape
+
+        # Find cluster on randomized stats
+        out = _find_clusters(T_obs_surr, threshold=threshold, tail=tail,
+                             max_step=max_step, connectivity=connectivity,
+                             partitions=partitions, include=include,
+                             t_power=t_power)
+        perm_clusters_sums = out[1]
+        if len(perm_clusters_sums) > 0:
+            # get max with sign info
+            idx_max = np.argmax(np.abs(perm_clusters_sums))
+            max_cluster_sums[seed_idx] = perm_clusters_sums[idx_max]
+        else:
+            max_cluster_sums[seed_idx] = 0
+
+    return max_cluster_sums
+
+
+ at verbose
+def _permutation_cluster_test(X, threshold, n_permutations, tail, stat_fun,
+                              connectivity, verbose, n_jobs, seed, max_step,
+                              exclude, step_down_p, t_power, out_type,
+                              check_disjoint, buffer_size):
+    n_jobs = check_n_jobs(n_jobs)
+    """ Aux Function
+
+    Note. X is required to be a list. Depending on the length of X
+    either a 1 sample t-test or an f-test / more sample permutation scheme
+    is elicited.
+    """
+    if out_type not in ['mask', 'indices']:
+        raise ValueError('out_type must be either \'mask\' or \'indices\'')
+
+    # check dimensions for each group in X (a list at this stage).
+    X = [x[:, np.newaxis] if x.ndim == 1 else x for x in X]
+    n_samples = X[0].shape[0]
+    n_times = X[0].shape[1]
+
+    sample_shape = X[0].shape[1:]
+    for x in X:
+        if x.shape[1:] != sample_shape:
+            raise ValueError('All samples mush have the same size')
+
+    # flatten the last dimensions in case the data is high dimensional
+    X = [np.reshape(x, (x.shape[0], -1)) for x in X]
+    n_tests = X[0].shape[1]
+
+    if connectivity is not None:
+        connectivity = _setup_connectivity(connectivity, n_tests, n_times)
+
+    if (exclude is not None) and not exclude.size == n_tests:
+        raise ValueError('exclude must be the same shape as X[0]')
+
+    # Step 1: Calculate T-stat for original data
+    # -------------------------------------------------------------
+    T_obs = stat_fun(*X)
+    logger.info('stat_fun(H1): min=%f max=%f' % (np.min(T_obs), np.max(T_obs)))
+
+    # test if stat_fun treats variables independently
+    if buffer_size is not None:
+        T_obs_buffer = np.zeros_like(T_obs)
+        for pos in range(0, n_tests, buffer_size):
+            T_obs_buffer[pos: pos + buffer_size] =\
+                stat_fun(*[x[:, pos: pos + buffer_size] for x in X])
+
+        if not np.alltrue(T_obs == T_obs_buffer):
+            logger.warning('Provided stat_fun does not treat variables '
+                           'independently. Setting buffer_size to None.')
+            buffer_size = None
+
+    # The stat should have the same shape as the samples for no conn.
+    if connectivity is None:
+        T_obs.shape = sample_shape
+
+    if exclude is not None:
+        include = np.logical_not(exclude)
+    else:
+        include = None
+
+    # determine if connectivity itself can be separated into disjoint sets
+    if check_disjoint is True and connectivity is not None:
+        partitions = _get_partitions_from_connectivity(connectivity, n_times)
+    else:
+        partitions = None
+    logger.info('Running intial clustering')
+    out = _find_clusters(T_obs, threshold, tail, connectivity,
+                         max_step=max_step, include=include,
+                         partitions=partitions, t_power=t_power,
+                         show_info=True)
+    clusters, cluster_stats = out
+    # For TFCE, return the "adjusted" statistic instead of raw scores
+    if isinstance(threshold, dict):
+        T_obs = cluster_stats.copy()
+
+    logger.info('Found %d clusters' % len(clusters))
+
+    # convert clusters to old format
+    if connectivity is not None:
+        # our algorithms output lists of indices by default
+        if out_type == 'mask':
+            clusters = _cluster_indices_to_mask(clusters, n_tests)
+    else:
+        # ndimage outputs slices or boolean masks by default
+        if out_type == 'indices':
+            clusters = _cluster_mask_to_indices(clusters)
+
+    # The stat should have the same shape as the samples
+    T_obs.shape = sample_shape
+
+    if len(X) == 1:  # 1 sample test
+        do_perm_func = _do_1samp_permutations
+        X_full = X[0]
+        slices = None
+    else:
+        do_perm_func = _do_permutations
+        X_full = np.concatenate(X, axis=0)
+        n_samples_per_condition = [x.shape[0] for x in X]
+        splits_idx = np.append([0], np.cumsum(n_samples_per_condition))
+        slices = [slice(splits_idx[k], splits_idx[k + 1])
+                  for k in range(len(X))]
+    parallel, my_do_perm_func, _ = parallel_func(do_perm_func, n_jobs)
+
+    # Step 2: If we have some clusters, repeat process on permuted data
+    # -------------------------------------------------------------------
+
+    def get_progress_bar(seeds):
+        # make sure the progress bar adds to up 100% across n jobs
+        return (ProgressBar(len(seeds), spinner=True) if
+                logger.level <= logging.INFO else None)
+
+    if len(clusters) > 0:
+        # check to see if we can do an exact test
+        # note for a two-tailed test, we can exploit symmetry to just do half
+        seeds = None
+        if len(X) == 1:
+            max_perms = 2 ** (n_samples - (tail == 0))
+            if max_perms <= n_permutations:
+                # omit first perm b/c accounted for in _pval_from_histogram,
+                # convert to binary array representation
+                seeds = [np.fromiter(np.binary_repr(s, n_samples), dtype=int)
+                         for s in range(1, max_perms)]
+
+        if seeds is None:
+            if seed is None:
+                seeds = [None] * n_permutations
+            else:
+                seeds = list(seed + np.arange(n_permutations))
+
+        # Step 3: repeat permutations for step-down-in-jumps procedure
+        n_removed = 1  # number of new clusters added
+        total_removed = 0
+        step_down_include = None  # start out including all points
+        n_step_downs = 0
+
+        while n_removed > 0:
+            # actually do the clustering for each partition
+            if include is not None:
+                if step_down_include is not None:
+                    this_include = np.logical_and(include, step_down_include)
+                else:
+                    this_include = include
+            else:
+                this_include = step_down_include
+            logger.info('Permuting ...')
+            H0 = parallel(my_do_perm_func(X_full, slices, threshold, tail,
+                          connectivity, stat_fun, max_step, this_include,
+                          partitions, t_power, s, sample_shape, buffer_size,
+                          get_progress_bar(s))
+                          for s in split_list(seeds, n_jobs))
+            H0 = np.concatenate(H0)
+            logger.info('Computing cluster p-values')
+            cluster_pv = _pval_from_histogram(cluster_stats, H0, tail)
+
+            # figure out how many new ones will be removed for step-down
+            to_remove = np.where(cluster_pv < step_down_p)[0]
+            n_removed = to_remove.size - total_removed
+            total_removed = to_remove.size
+            step_down_include = np.ones(n_tests, dtype=bool)
+            for ti in to_remove:
+                step_down_include[clusters[ti]] = False
+            if connectivity is None:
+                step_down_include.shape = sample_shape
+            n_step_downs += 1
+            if step_down_p > 0:
+                a_text = 'additional ' if n_step_downs > 1 else ''
+                pl = '' if n_removed == 1 else 's'
+                logger.info('Step-down-in-jumps iteration #%i found %i %s'
+                            'cluster%s to exclude from subsequent iterations'
+                            % (n_step_downs, n_removed, a_text, pl))
+        logger.info('Done.')
+        # The clusters should have the same shape as the samples
+        clusters = _reshape_clusters(clusters, sample_shape)
+        return T_obs, clusters, cluster_pv, H0
+    else:
+        return T_obs, np.array([]), np.array([]), np.array([])
+
+
+def ttest_1samp_no_p(X, sigma=0, method='relative'):
+    """t-test with variance adjustment and no p-value calculation
+
+    Parameters
+    ----------
+    X : array
+        Array to return t-values for.
+    sigma : float
+        The variance estate will be given by "var + sigma * max(var)" or
+        "var + sigma", depending on "method". By default this is 0 (no
+        adjustment). See Notes for details.
+    method : str
+        If 'relative', the minimum variance estimate will be sigma * max(var),
+        if 'absolute' the minimum variance estimate will be sigma.
+
+    Returns
+    -------
+    t : array
+        t-values, potentially adjusted using the hat method.
+
+    Notes
+    -----
+    One can use the conversion:
+
+        threshold = -scipy.stats.distributions.t.ppf(p_thresh, n_samples - 1)
+
+    to convert a desired p-value threshold to t-value threshold. Don't forget
+    that for two-tailed tests, p_thresh in the above should be divided by 2.
+
+    To use the "hat" adjustment method, a value of sigma=1e-3 may be a
+    reasonable choice. See Ridgway et al. 2012 "The problem of low variance
+    voxels in statistical parametric mapping; a new hat avoids a 'haircut'",
+    NeuroImage. 2012 Feb 1;59(3):2131-41.
+    """
+    if method not in ['absolute', 'relative']:
+        raise ValueError('method must be "absolute" or "relative", not %s'
+                         % method)
+    var = np.var(X, axis=0, ddof=1)
+    if sigma > 0:
+        limit = sigma * np.max(var) if method == 'relative' else sigma
+        var += limit
+    return np.mean(X, axis=0) / np.sqrt(var / X.shape[0])
+
+
+ at verbose
+def permutation_cluster_test(X, threshold=None, n_permutations=1024,
+                             tail=0, stat_fun=f_oneway,
+                             connectivity=None, verbose=None, n_jobs=1,
+                             seed=None, max_step=1, exclude=None,
+                             step_down_p=0, t_power=1, out_type='mask',
+                             check_disjoint=False, buffer_size=1000):
+    """Cluster-level statistical permutation test
+
+    For a list of nd-arrays of data, e.g. 2d for time series or 3d for
+    time-frequency power values, calculate some statistics corrected for
+    multiple comparisons using permutations and cluster level correction.
+    Each element of the list X contains the data for one group of
+    observations. Randomized data are generated with random partitions
+    of the data.
+
+    Parameters
+    ----------
+    X : list
+        List of nd-arrays containing the data. Each element of X contains
+        the samples for one group. First dimension of each element is the
+        number of samples/observations in this group. The other dimensions
+        are for the size of the observations. For example if X = [X1, X2]
+        with X1.shape = (20, 50, 4) and X2.shape = (17, 50, 4) one has
+        2 groups with respectively 20 and 17 observations in each.
+        Each data point is of shape (50, 4).
+    threshold : float | dict | None
+        If threshold is None, it will choose a t-threshold equivalent to
+        p < 0.05 for the given number of (within-subject) observations.
+        If a dict is used, then threshold-free cluster enhancement (TFCE)
+        will be used.
+    n_permutations : int
+        The number of permutations to compute.
+    tail : -1 or 0 or 1 (default = 0)
+        If tail is 1, the statistic is thresholded above threshold.
+        If tail is -1, the statistic is thresholded below threshold.
+        If tail is 0, the statistic is thresholded on both sides of
+        the distribution.
+    stat_fun : callable
+        function called to calculate statistics, must accept 1d-arrays as
+        arguments (default: scipy.stats.f_oneway).
+    connectivity : sparse matrix.
+        Defines connectivity between features. The matrix is assumed to
+        be symmetric and only the upper triangular half is used.
+        Default is None, i.e, a regular lattice connectivity.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+    n_jobs : int
+        Number of permutations to run in parallel (requires joblib package).
+    seed : int or None
+        Seed the random number generator for results reproducibility.
+    max_step : int
+        When connectivity is a n_vertices x n_vertices matrix, specify the
+        maximum number of steps between vertices along the second dimension
+        (typically time) to be considered connected. This is not used for full
+        or None connectivity matrices.
+    exclude : boolean array or None
+        Mask to apply to the data to exclude certain points from clustering
+        (e.g., medial wall vertices). Should be the same shape as X. If None,
+        no points are excluded.
+    step_down_p : float
+        To perform a step-down-in-jumps test, pass a p-value for clusters to
+        exclude from each successive iteration. Default is zero, perform no
+        step-down test (since no clusters will be smaller than this value).
+        Setting this to a reasonable value, e.g. 0.05, can increase sensitivity
+        but costs computation time.
+    t_power : float
+        Power to raise the statistical values (usually f-values) by before
+        summing (sign will be retained). Note that t_power == 0 will give a
+        count of nodes in each cluster, t_power == 1 will weight each node by
+        its statistical score.
+    out_type : str
+        For arrays with connectivity, this sets the output format for clusters.
+        If 'mask', it will pass back a list of boolean mask arrays.
+        If 'indices', it will pass back a list of lists, where each list is the
+        set of vertices in a given cluster. Note that the latter may use far
+        less memory for large datasets.
+    check_disjoint : bool
+        If True, the connectivity matrix (or list) will be examined to
+        determine of it can be separated into disjoint sets. In some cases
+        (usually with connectivity as a list and many "time" points), this
+        can lead to faster clustering, but results should be identical.
+    buffer_size: int or None
+        The statistics will be computed for blocks of variables of size
+        "buffer_size" at a time. This is option significantly reduces the
+        memory requirements when n_jobs > 1 and memory sharing between
+        processes is enabled (see set_cache_dir()), as X will be shared
+        between processes and each process only needs to allocate space
+        for a small block of variables.
+
+    Returns
+    -------
+    T_obs : array of shape [n_tests]
+        T-statistic observed for all variables.
+    clusters : list
+        List type defined by out_type above.
+    cluster_pv : array
+        P-value for each cluster
+    H0 : array of shape [n_permutations]
+        Max cluster level stats observed under permutation.
+
+    Notes
+    -----
+    Reference:
+    Cluster permutation algorithm as described in
+    Maris/Oostenveld (2007),
+    "Nonparametric statistical testing of EEG- and MEG-data"
+    Journal of Neuroscience Methods, Vol. 164, No. 1., pp. 177-190.
+    doi:10.1016/j.jneumeth.2007.03.024
+    """
+    from scipy import stats
+    ppf = stats.f.ppf
+    if threshold is None:
+        p_thresh = 0.05 / (1 + (tail == 0))
+        n_samples_per_group = [len(x) for x in X]
+        threshold = ppf(1. - p_thresh, *n_samples_per_group)
+        if np.sign(tail) < 0:
+            threshold = -threshold
+
+    return _permutation_cluster_test(X=X, threshold=threshold,
+                                     n_permutations=n_permutations,
+                                     tail=tail, stat_fun=stat_fun,
+                                     connectivity=connectivity,
+                                     verbose=verbose,
+                                     n_jobs=n_jobs, seed=seed,
+                                     max_step=max_step,
+                                     exclude=exclude, step_down_p=step_down_p,
+                                     t_power=t_power, out_type=out_type,
+                                     check_disjoint=check_disjoint,
+                                     buffer_size=buffer_size)
+
+
+permutation_cluster_test.__test__ = False
+
+
+ at verbose
+def permutation_cluster_1samp_test(X, threshold=None, n_permutations=1024,
+                                   tail=0, stat_fun=ttest_1samp_no_p,
+                                   connectivity=None, verbose=None, n_jobs=1,
+                                   seed=None, max_step=1, exclude=None,
+                                   step_down_p=0, t_power=1, out_type='mask',
+                                   check_disjoint=False, buffer_size=1000):
+    """Non-parametric cluster-level 1 sample T-test
+
+    From a array of observations, e.g. signal amplitudes or power spectrum
+    estimates etc., calculate if the observed mean significantly deviates
+    from 0. The procedure uses a cluster analysis with permutation test
+    for calculating corrected p-values. Randomized data are generated with
+    random sign flips.
+
+    Parameters
+    ----------
+    X : array, shape=(n_samples, p, q) or (n_samples, p)
+        Array where the first dimension corresponds to the
+        samples (observations). X[k] can be a 1D or 2D array (time series
+        or TF image) associated to the kth observation.
+    threshold : float | dict | None
+        If threshold is None, it will choose a t-threshold equivalent to
+        p < 0.05 for the given number of (within-subject) observations.
+        If a dict is used, then threshold-free cluster enhancement (TFCE)
+        will be used.
+    n_permutations : int
+        The number of permutations to compute.
+    tail : -1 or 0 or 1 (default = 0)
+        If tail is 1, the statistic is thresholded above threshold.
+        If tail is -1, the statistic is thresholded below threshold.
+        If tail is 0, the statistic is thresholded on both sides of
+        the distribution.
+    stat_fun : function
+        Function used to compute the statistical map.
+    connectivity : sparse matrix or None
+        Defines connectivity between features. The matrix is assumed to
+        be symmetric and only the upper triangular half is used.
+        This matrix must be square with dimension (n_vertices * n_times) or
+        (n_vertices). Default is None, i.e, a regular lattice connectivity.
+        Use square n_vertices matrix for datasets with a large temporal
+        extent to save on memory and computation time.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+    n_jobs : int
+        Number of permutations to run in parallel (requires joblib package).
+    seed : int or None
+        Seed the random number generator for results reproducibility.
+        Note that if n_permutations >= 2^(n_samples) [or (2^(n_samples-1)) for
+        two-tailed tests], this value will be ignored since an exact test
+        (full permutation test) will be performed.
+    max_step : int
+        When connectivity is a n_vertices x n_vertices matrix, specify the
+        maximum number of steps between vertices along the second dimension
+        (typically time) to be considered connected. This is not used for full
+        or None connectivity matrices.
+    exclude : boolean array or None
+        Mask to apply to the data to exclude certain points from clustering
+        (e.g., medial wall vertices). Should be the same shape as X. If None,
+        no points are excluded.
+    step_down_p : float
+        To perform a step-down-in-jumps test, pass a p-value for clusters to
+        exclude from each successive iteration. Default is zero, perform no
+        step-down test (since no clusters will be smaller than this value).
+        Setting this to a reasonable value, e.g. 0.05, can increase sensitivity
+        but costs computation time.
+    t_power : float
+        Power to raise the statistical values (usually t-values) by before
+        summing (sign will be retained). Note that t_power == 0 will give a
+        count of nodes in each cluster, t_power == 1 will weight each node by
+        its statistical score.
+    out_type : str
+        For arrays with connectivity, this sets the output format for clusters.
+        If 'mask', it will pass back a list of boolean mask arrays.
+        If 'indices', it will pass back a list of lists, where each list is the
+        set of vertices in a given cluster. Note that the latter may use far
+        less memory for large datasets.
+    check_disjoint : bool
+        If True, the connectivity matrix (or list) will be examined to
+        determine of it can be separated into disjoint sets. In some cases
+        (usually with connectivity as a list and many "time" points), this
+        can lead to faster clustering, but results should be identical.
+    buffer_size: int or None
+        The statistics will be computed for blocks of variables of size
+        "buffer_size" at a time. This is option significantly reduces the
+        memory requirements when n_jobs > 1 and memory sharing between
+        processes is enabled (see set_cache_dir()), as X will be shared
+        between processes and each process only needs to allocate space
+        for a small block of variables.
+
+    Returns
+    -------
+    T_obs : array of shape [n_tests]
+        T-statistic observed for all variables
+    clusters : list
+        List type defined by out_type above.
+    cluster_pv : array
+        P-value for each cluster
+    H0 : array of shape [n_permutations]
+        Max cluster level stats observed under permutation.
+
+    Notes
+    -----
+    Reference:
+    Cluster permutation algorithm as described in
+    Maris/Oostenveld (2007),
+    "Nonparametric statistical testing of EEG- and MEG-data"
+    Journal of Neuroscience Methods, Vol. 164, No. 1., pp. 177-190.
+    doi:10.1016/j.jneumeth.2007.03.024
+    """
+    from scipy import stats
+    ppf = stats.t.ppf
+    if threshold is None:
+        p_thresh = 0.05 / (1 + (tail == 0))
+        n_samples = len(X)
+        threshold = -ppf(p_thresh, n_samples - 1)
+        if np.sign(tail) < 0:
+            threshold = -threshold
+
+    X = [X]  # for one sample only one data array
+    return _permutation_cluster_test(X=X,
+                                     threshold=threshold,
+                                     n_permutations=n_permutations,
+                                     tail=tail, stat_fun=stat_fun,
+                                     connectivity=connectivity,
+                                     verbose=verbose,
+                                     n_jobs=n_jobs, seed=seed,
+                                     max_step=max_step,
+                                     exclude=exclude, step_down_p=step_down_p,
+                                     t_power=t_power, out_type=out_type,
+                                     check_disjoint=check_disjoint,
+                                     buffer_size=buffer_size)
+
+
+permutation_cluster_1samp_test.__test__ = False
+
+
+ at verbose
+def spatio_temporal_cluster_1samp_test(X, threshold=None,
+                                       n_permutations=1024, tail=0,
+                                       stat_fun=ttest_1samp_no_p,
+                                       connectivity=None, verbose=None,
+                                       n_jobs=1, seed=None, max_step=1,
+                                       spatial_exclude=None, step_down_p=0,
+                                       t_power=1, out_type='indices',
+                                       check_disjoint=False, buffer_size=1000):
+    """Non-parametric cluster-level 1 sample T-test for spatio-temporal data
+
+    This function provides a convenient wrapper for data organized in the form
+    (observations x time x space) to use permutation_cluster_1samp_test.
+
+    Parameters
+    ----------
+    X : array
+        Array of shape observations x time x vertices.
+    threshold : float | dict | None
+        If threshold is None, it will choose a t-threshold equivalent to
+        p < 0.05 for the given number of (within-subject) observations.
+        If a dict is used, then threshold-free cluster enhancement (TFCE)
+        will be used.
+    n_permutations : int
+        The number of permutations to compute.
+    tail : -1 or 0 or 1 (default = 0)
+        If tail is 1, the statistic is thresholded above threshold.
+        If tail is -1, the statistic is thresholded below threshold.
+        If tail is 0, the statistic is thresholded on both sides of
+        the distribution.
+    stat_fun : function
+        Function used to compute the statistical map.
+    connectivity : sparse matrix or None
+        Defines connectivity between features. The matrix is assumed to
+        be symmetric and only the upper triangular half is used.
+        This matrix must be square with dimension (n_vertices * n_times) or
+        (n_vertices). Default is None, i.e, a regular lattice connectivity.
+        Use square n_vertices matrix for datasets with a large temporal
+        extent to save on memory and computation time.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+    n_jobs : int
+        Number of permutations to run in parallel (requires joblib package).
+    seed : int or None
+        Seed the random number generator for results reproducibility.
+        Note that if n_permutations >= 2^(n_samples) [or (2^(n_samples-1)) for
+        two-tailed tests], this value will be ignored since an exact test
+        (full permutation test) will be performed.
+    max_step : int
+        When connectivity is a n_vertices x n_vertices matrix, specify the
+        maximum number of steps between vertices along the second dimension
+        (typically time) to be considered connected. This is not used for full
+        or None connectivity matrices.
+    spatial_exclude : list of int or None
+        List of spatial indices to exclude from clustering.
+    step_down_p : float
+        To perform a step-down-in-jumps test, pass a p-value for clusters to
+        exclude from each successive iteration. Default is zero, perform no
+        step-down test (since no clusters will be smaller than this value).
+        Setting this to a reasonable value, e.g. 0.05, can increase sensitivity
+        but costs computation time.
+    t_power : float
+        Power to raise the statistical values (usually t-values) by before
+        summing (sign will be retained). Note that t_power == 0 will give a
+        count of nodes in each cluster, t_power == 1 will weight each node by
+        its statistical score.
+    out_type : str
+        For arrays with connectivity, this sets the output format for clusters.
+        If 'mask', it will pass back a list of boolean mask arrays.
+        If 'indices', it will pass back a list of lists, where each list is the
+        set of vertices in a given cluster. Note that the latter may use far
+        less memory for large datasets.
+    check_disjoint : bool
+        If True, the connectivity matrix (or list) will be examined to
+        determine of it can be separated into disjoint sets. In some cases
+        (usually with connectivity as a list and many "time" points), this
+        can lead to faster clustering, but results should be identical.
+    buffer_size: int or None
+        The statistics will be computed for blocks of variables of size
+        "buffer_size" at a time. This is option significantly reduces the
+        memory requirements when n_jobs > 1 and memory sharing between
+        processes is enabled (see set_cache_dir()), as X will be shared
+        between processes and each process only needs to allocate space
+        for a small block of variables.
+
+    Returns
+    -------
+    T_obs : array of shape [n_tests]
+        T-statistic observed for all variables.
+    clusters : list
+        List type defined by out_type above.
+    cluster_pv: array
+        P-value for each cluster
+    H0 : array of shape [n_permutations]
+        Max cluster level stats observed under permutation.
+
+    Notes
+    -----
+    Reference:
+    Cluster permutation algorithm as described in
+    Maris/Oostenveld (2007),
+    "Nonparametric statistical testing of EEG- and MEG-data"
+    Journal of Neuroscience Methods, Vol. 164, No. 1., pp. 177-190.
+    doi:10.1016/j.jneumeth.2007.03.024
+
+    TFCE originally described in Smith/Nichols (2009),
+    "Threshold-free cluster enhancement: Addressing problems of
+    smoothing, threshold dependence, and localisation in cluster
+    inference", NeuroImage 44 (2009) 83-98.
+    """
+    n_samples, n_times, n_vertices = X.shape
+
+    # convert spatial_exclude before passing on if necessary
+    if spatial_exclude is not None:
+        exclude = _st_mask_from_s_inds(n_times, n_vertices,
+                                       spatial_exclude, True)
+    else:
+        exclude = None
+
+    # do the heavy lifting
+    out = permutation_cluster_1samp_test(X, threshold=threshold,
+                                         stat_fun=stat_fun, tail=tail,
+                                         n_permutations=n_permutations,
+                                         connectivity=connectivity,
+                                         n_jobs=n_jobs, seed=seed,
+                                         max_step=max_step, exclude=exclude,
+                                         step_down_p=step_down_p,
+                                         t_power=t_power, out_type=out_type,
+                                         check_disjoint=check_disjoint,
+                                         buffer_size=buffer_size)
+    return out
+
+
+spatio_temporal_cluster_1samp_test.__test__ = False
+
+
+ at verbose
+def spatio_temporal_cluster_test(X, threshold=1.67, n_permutations=1024,
+                                 tail=0, stat_fun=f_oneway,
+                                 connectivity=None, verbose=None, n_jobs=1,
+                                 seed=None, max_step=1, spatial_exclude=None,
+                                 step_down_p=0, t_power=1, out_type='indices',
+                                 check_disjoint=False, buffer_size=1000):
+    """Non-parametric cluster-level test for spatio-temporal data
+
+    This function provides a convenient wrapper for data organized in the form
+    (observations x time x space) to use permutation_cluster_test.
+
+    Parameters
+    ----------
+    X: list of arrays
+        Array of shape (observations, time, vertices) in each group.
+    threshold: float
+        The threshold for the statistic.
+    n_permutations: int
+        See permutation_cluster_test.
+    tail : -1 or 0 or 1 (default = 0)
+        See permutation_cluster_test.
+    stat_fun : function
+        function called to calculate statistics, must accept 1d-arrays as
+        arguments (default: scipy.stats.f_oneway)
+    connectivity : sparse matrix or None
+        Defines connectivity between features. The matrix is assumed to
+        be symmetric and only the upper triangular half is used.
+        Default is None, i.e, a regular lattice connectivity.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+    n_jobs : int
+        Number of permutations to run in parallel (requires joblib package).
+    seed : int or None
+        Seed the random number generator for results reproducibility.
+    max_step : int
+        When connectivity is a n_vertices x n_vertices matrix, specify the
+        maximum number of steps between vertices along the second dimension
+        (typically time) to be considered connected. This is not used for full
+        or None connectivity matrices.
+    spatial_exclude : list of int or None
+        List of spatial indices to exclude from clustering.
+    step_down_p : float
+        To perform a step-down-in-jumps test, pass a p-value for clusters to
+        exclude from each successive iteration. Default is zero, perform no
+        step-down test (since no clusters will be smaller than this value).
+        Setting this to a reasonable value, e.g. 0.05, can increase sensitivity
+        but costs computation time.
+    t_power : float
+        Power to raise the statistical values (usually f-values) by before
+        summing (sign will be retained). Note that t_power == 0 will give a
+        count of nodes in each cluster, t_power == 1 will weight each node by
+        its statistical score.
+    out_type : str
+        For arrays with connectivity, this sets the output format for clusters.
+        If 'mask', it will pass back a list of boolean mask arrays.
+        If 'indices', it will pass back a list of lists, where each list is the
+        set of vertices in a given cluster. Note that the latter may use far
+        less memory for large datasets.
+    check_disjoint : bool
+        If True, the connectivity matrix (or list) will be examined to
+        determine of it can be separated into disjoint sets. In some cases
+        (usually with connectivity as a list and many "time" points), this
+        can lead to faster clustering, but results should be identical.
+    buffer_size: int or None
+        The statistics will be computed for blocks of variables of size
+        "buffer_size" at a time. This is option significantly reduces the
+        memory requirements when n_jobs > 1 and memory sharing between
+        processes is enabled (see set_cache_dir()), as X will be shared
+        between processes and each process only needs to allocate space
+        for a small block of variables.
+
+    Returns
+    -------
+    T_obs : array of shape [n_tests]
+        T-statistic observed for all variables
+    clusters : list
+        List type defined by out_type above.
+    cluster_pv: array
+        P-value for each cluster
+    H0 : array of shape [n_permutations]
+        Max cluster level stats observed under permutation.
+
+    Notes
+    -----
+    Reference:
+    Cluster permutation algorithm as described in
+    Maris/Oostenveld (2007),
+    "Nonparametric statistical testing of EEG- and MEG-data"
+    Journal of Neuroscience Methods, Vol. 164, No. 1., pp. 177-190.
+    doi:10.1016/j.jneumeth.2007.03.024
+    """
+    n_samples, n_times, n_vertices = X[0].shape
+
+    # convert spatial_exclude before passing on if necessary
+    if spatial_exclude is not None:
+        exclude = _st_mask_from_s_inds(n_times, n_vertices,
+                                       spatial_exclude, True)
+    else:
+        exclude = None
+
+    # do the heavy lifting
+    out = permutation_cluster_test(X, threshold=threshold,
+                                   stat_fun=stat_fun, tail=tail,
+                                   n_permutations=n_permutations,
+                                   connectivity=connectivity, n_jobs=n_jobs,
+                                   seed=seed, max_step=max_step,
+                                   exclude=exclude, step_down_p=step_down_p,
+                                   t_power=t_power, out_type=out_type,
+                                   check_disjoint=check_disjoint,
+                                   buffer_size=buffer_size)
+    return out
+
+
+spatio_temporal_cluster_test.__test__ = False
+
+
+def _st_mask_from_s_inds(n_times, n_vertices, vertices, set_as=True):
+    """This function returns a boolean mask vector to apply to a spatio-
+    temporal connectivity matrix (n_times * n_vertices square) to include (or
+    exclude) certain spatial coordinates. This is useful for excluding certain
+    regions from analysis (e.g., medial wall vertices).
+
+    Parameters
+    ----------
+    n_times : int
+        Number of time points.
+    n_vertices : int
+        Number of spatial points.
+    vertices : list or array of int
+        Vertex numbers to set.
+    set_as : bool
+        If True, all points except "vertices" are set to False (inclusion).
+        If False, all points except "vertices" are set to True (exclusion).
+
+    Returns
+    -------
+    mask : array of bool
+        A (n_times * n_vertices) array of boolean values for masking
+    """
+    mask = np.zeros((n_times, n_vertices), dtype=bool)
+    mask[:, vertices] = True
+    mask = mask.ravel()
+    if set_as is False:
+        mask = np.logical_not(mask)
+    return mask
+
+
+ at verbose
+def _get_partitions_from_connectivity(connectivity, n_times, verbose=None):
+    """Use indices to specify disjoint subsets (e.g., hemispheres) based on
+    connectivity"""
+    if isinstance(connectivity, list):
+        test = np.ones(len(connectivity))
+        test_conn = np.zeros((len(connectivity), len(connectivity)),
+                             dtype='bool')
+        for vi in range(len(connectivity)):
+            test_conn[connectivity[vi], vi] = True
+        test_conn = sparse.coo_matrix(test_conn, dtype='float')
+    else:
+        test = np.ones(connectivity.shape[0])
+        test_conn = connectivity
+
+    part_clusts = _find_clusters(test, 0, 1, test_conn)[0]
+    if len(part_clusts) > 1:
+        logger.info('%i disjoint connectivity sets found'
+                    % len(part_clusts))
+        partitions = np.zeros(len(test), dtype='int')
+        for ii, pc in enumerate(part_clusts):
+            partitions[pc] = ii
+        if isinstance(connectivity, list):
+            partitions = np.tile(partitions, n_times)
+    else:
+        logger.info('No disjoint connectivity sets found')
+        partitions = None
+
+    return partitions
+
+
+def _reshape_clusters(clusters, sample_shape):
+    """Reshape cluster masks or indices to be of the correct shape"""
+    # format of the bool mask and indices are ndarrays
+    if len(clusters) > 0 and isinstance(clusters[0], np.ndarray):
+        if clusters[0].dtype == bool:  # format of mask
+            clusters = [c.reshape(sample_shape) for c in clusters]
+        else:  # format of indices
+            clusters = [unravel_index(c, sample_shape) for c in clusters]
+    return clusters
+
+
+def summarize_clusters_stc(clu, p_thresh=0.05, tstep=1e-3, tmin=0,
+                           subject='fsaverage', vertices=None):
+    """ Assemble summary SourceEstimate from spatiotemporal cluster results
+
+    This helps visualizing results from spatio-temporal-clustering
+    permutation tests
+
+    Parameters
+    ----------
+    clu : tuple
+        the output from clustering permutation tests.
+    p_thresh : float
+        The significance threshold for inclusion of clusters.
+    tstep : float
+        The temporal difference between two time samples.
+    tmin : float | int
+        The time of the first sample.
+    subject : str
+        The name of the subject.
+    vertices : list of arrays | None
+        The vertex numbers associated with the source space locations. Defaults
+        to None. If None, equals ```[np.arange(10242), np.arange(10242)]```.
+
+    Returns
+    -------
+    out : instance of SourceEstimate
+    """
+    if vertices is None:
+        vertices = [np.arange(10242), np.arange(10242)]
+
+    T_obs, clusters, clu_pvals, _ = clu
+    n_times, n_vertices = T_obs.shape
+    good_cluster_inds = np.where(clu_pvals < p_thresh)[0]
+
+    #  Build a convenient representation of each cluster, where each
+    #  cluster becomes a "time point" in the SourceEstimate
+    if len(good_cluster_inds) > 0:
+        data = np.zeros((n_vertices, n_times))
+        data_summary = np.zeros((n_vertices, len(good_cluster_inds) + 1))
+        for ii, cluster_ind in enumerate(good_cluster_inds):
+            data.fill(0)
+            v_inds = clusters[cluster_ind][1]
+            t_inds = clusters[cluster_ind][0]
+            data[v_inds, t_inds] = T_obs[t_inds, v_inds]
+            # Store a nice visualization of the cluster by summing across time
+            data = np.sign(data) * np.logical_not(data == 0) * tstep
+            data_summary[:, ii + 1] = 1e3 * np.sum(data, axis=1)
+            # Make the first "time point" a sum across all clusters for easy
+            # visualization
+        data_summary[:, 0] = np.sum(data_summary, axis=1)
+
+        return SourceEstimate(data_summary, vertices, tmin=tmin, tstep=tstep,
+                              subject=subject)
+    else:
+        raise RuntimeError('No significant clusters available. Please adjust '
+                           'your threshold or check your statistical '
+                           'analysis.')
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/stats/multi_comp.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/stats/multi_comp.py
new file mode 100644
index 0000000..a26b4a7
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/stats/multi_comp.py
@@ -0,0 +1,102 @@
+# Authors: Josef Pktd and example from H Raja and rewrite from Vincent Davis
+#          Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#
+# Code borrowed from statsmodels
+#
+# License: BSD (3-clause)
+
+import numpy as np
+
+
+def _ecdf(x):
+    '''no frills empirical cdf used in fdrcorrection
+    '''
+    nobs = len(x)
+    return np.arange(1, nobs + 1) / float(nobs)
+
+
+def fdr_correction(pvals, alpha=0.05, method='indep'):
+    """P-value correction with False Discovery Rate (FDR)
+
+    Correction for multiple comparison using FDR.
+
+    This covers Benjamini/Hochberg for independent or positively correlated and
+    Benjamini/Yekutieli for general or negatively correlated tests.
+
+    Parameters
+    ----------
+    pvals : array_like
+        set of p-values of the individual tests.
+    alpha : float
+        error rate
+    method : 'indep' | 'negcorr'
+        If 'indep' it implements Benjamini/Hochberg for independent or if
+        'negcorr' it corresponds to Benjamini/Yekutieli.
+
+    Returns
+    -------
+    reject : array, bool
+        True if a hypothesis is rejected, False if not
+    pval_corrected : array
+        pvalues adjusted for multiple hypothesis testing to limit FDR
+
+    Notes
+    -----
+    Reference:
+    Genovese CR, Lazar NA, Nichols T.
+    Thresholding of statistical maps in functional neuroimaging using the false
+    discovery rate. Neuroimage. 2002 Apr;15(4):870-8.
+    """
+    pvals = np.asarray(pvals)
+    shape_init = pvals.shape
+    pvals = pvals.ravel()
+
+    pvals_sortind = np.argsort(pvals)
+    pvals_sorted = pvals[pvals_sortind]
+    sortrevind = pvals_sortind.argsort()
+
+    if method in ['i', 'indep', 'p', 'poscorr']:
+        ecdffactor = _ecdf(pvals_sorted)
+    elif method in ['n', 'negcorr']:
+        cm = np.sum(1. / np.arange(1, len(pvals_sorted) + 1))
+        ecdffactor = _ecdf(pvals_sorted) / cm
+    else:
+        raise ValueError("Method should be 'indep' and 'negcorr'")
+
+    reject = pvals_sorted < (ecdffactor * alpha)
+    if reject.any():
+        rejectmax = max(np.nonzero(reject)[0])
+    else:
+        rejectmax = 0
+    reject[:rejectmax] = True
+
+    pvals_corrected_raw = pvals_sorted / ecdffactor
+    pvals_corrected = np.minimum.accumulate(pvals_corrected_raw[::-1])[::-1]
+    pvals_corrected[pvals_corrected > 1.0] = 1.0
+    pvals_corrected = pvals_corrected[sortrevind].reshape(shape_init)
+    reject = reject[sortrevind].reshape(shape_init)
+    return reject, pvals_corrected
+
+
+def bonferroni_correction(pval, alpha=0.05):
+    """P-value correction with Bonferroni method
+
+    Parameters
+    ----------
+    pval : array_like
+        set of p-values of the individual tests.
+    alpha : float
+        error rate
+
+    Returns
+    -------
+    reject : array, bool
+        True if a hypothesis is rejected, False if not
+    pval_corrected : array
+        pvalues adjusted for multiple hypothesis testing to limit FDR
+
+    """
+    pval = np.asarray(pval)
+    pval_corrected = pval * float(pval.size)
+    reject = pval_corrected < alpha
+    return reject, pval_corrected
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/stats/parametric.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/stats/parametric.py
new file mode 100644
index 0000000..ed7fbe3
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/stats/parametric.py
@@ -0,0 +1,357 @@
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Denis Engemann <denis.engemann at gmail.com>
+#          Eric Larson <larson.eric.d at gmail.com>
+#
+# License: Simplified BSD
+
+import numpy as np
+from functools import reduce
+from string import ascii_uppercase
+
+from ..externals.six import string_types
+from ..utils import deprecated
+from ..fixes import matrix_rank
+
+# The following function is a rewriting of scipy.stats.f_oneway
+# Contrary to the scipy.stats.f_oneway implementation it does not
+# copy the data while keeping the inputs unchanged.
+
+
+def _f_oneway(*args):
+    """
+    Performs a 1-way ANOVA.
+
+    The one-way ANOVA tests the null hypothesis that 2 or more groups have
+    the same population mean. The test is applied to samples from two or
+    more groups, possibly with differing sizes.
+
+    Parameters
+    ----------
+    sample1, sample2, ... : array_like
+        The sample measurements should be given as arguments.
+
+    Returns
+    -------
+    F-value : float
+        The computed F-value of the test
+    p-value : float
+        The associated p-value from the F-distribution
+
+    Notes
+    -----
+    The ANOVA test has important assumptions that must be satisfied in order
+    for the associated p-value to be valid.
+
+    1. The samples are independent
+    2. Each sample is from a normally distributed population
+    3. The population standard deviations of the groups are all equal.  This
+       property is known as homocedasticity.
+
+    If these assumptions are not true for a given set of data, it may still be
+    possible to use the Kruskal-Wallis H-test (`stats.kruskal`_) although with
+    some loss of power
+
+    The algorithm is from Heiman[2], pp.394-7.
+
+    See scipy.stats.f_oneway that should give the same results while
+    being less efficient
+
+    References
+    ----------
+    .. [1] Lowry, Richard.  "Concepts and Applications of Inferential
+           Statistics". Chapter 14.
+           http://faculty.vassar.edu/lowry/ch14pt1.html
+
+    .. [2] Heiman, G.W.  Research Methods in Statistics. 2002.
+
+    """
+    from scipy import stats
+    sf = stats.f.sf
+    n_classes = len(args)
+    n_samples_per_class = np.array([len(a) for a in args])
+    n_samples = np.sum(n_samples_per_class)
+    ss_alldata = reduce(lambda x, y: x + y,
+                        [np.sum(a ** 2, axis=0) for a in args])
+    sums_args = [np.sum(a, axis=0) for a in args]
+    square_of_sums_alldata = reduce(lambda x, y: x + y, sums_args) ** 2
+    square_of_sums_args = [s ** 2 for s in sums_args]
+    sstot = ss_alldata - square_of_sums_alldata / float(n_samples)
+    ssbn = 0
+    for k, _ in enumerate(args):
+        ssbn += square_of_sums_args[k] / n_samples_per_class[k]
+    ssbn -= square_of_sums_alldata / float(n_samples)
+    sswn = sstot - ssbn
+    dfbn = n_classes - 1
+    dfwn = n_samples - n_classes
+    msb = ssbn / float(dfbn)
+    msw = sswn / float(dfwn)
+    f = msb / msw
+    prob = sf(dfbn, dfwn, f)
+    return f, prob
+
+
+def f_oneway(*args):
+    """Call scipy.stats.f_oneway, but return only f-value"""
+    return _f_oneway(*args)[0]
+
+
+def _map_effects(n_factors, effects):
+    """Map effects to indices"""
+    if n_factors > len(ascii_uppercase):
+        raise ValueError('Maximum number of factors supported is 26')
+
+    factor_names = list(ascii_uppercase[:n_factors])
+
+    if isinstance(effects, string_types):
+        if '*' in effects and ':' in effects:
+            raise ValueError('Not "*" and ":" permitted in effects')
+        elif '+' in effects and ':' in effects:
+            raise ValueError('Not "+" and ":" permitted in effects')
+        elif effects == 'all':
+            effects = None
+        elif len(effects) == 1 or ':' in effects:
+            effects = [effects]
+        elif '+' in effects:
+            # all main effects
+            effects = effects.split('+')
+        elif '*' in effects:
+            pass  # handle later
+        else:
+            raise ValueError('"{0}" is not a valid option for "effects"'
+                             .format(effects))
+    if isinstance(effects, list):
+        bad_names = [e for e in effects if e not in factor_names]
+        if len(bad_names) > 1:
+            raise ValueError('Effect names: {0} are not valid. They should '
+                             'the first `n_factors` ({1}) characters from the'
+                             'alphabet'.format(bad_names, n_factors))
+
+    indices = list(np.arange(2 ** n_factors - 1))
+    names = list()
+    for this_effect in indices:
+        contrast_idx = _get_contrast_indices(this_effect + 1, n_factors)
+        this_code = (n_factors - 1) - np.where(contrast_idx == 1)[0]
+        this_name = [factor_names[e] for e in this_code]
+        this_name.sort()
+        names.append(':'.join(this_name))
+
+    if effects is None or isinstance(effects, string_types):
+        effects_ = names
+    else:
+        effects_ = effects
+
+    selection = [names.index(sel) for sel in effects_]
+    names = [names[sel] for sel in selection]
+
+    if isinstance(effects, string_types):
+        if '*' in effects:
+            # hierarchical order of effects
+            # the * based effect can be used as stop index
+            sel_ind = names.index(effects.replace('*', ':')) + 1
+            names = names[:sel_ind]
+            selection = selection[:sel_ind]
+
+    return selection, names
+
+
+def _get_contrast_indices(effect_idx, n_factors):
+    """Henson's factor coding, see num2binvec"""
+    binrepr = np.binary_repr(effect_idx, n_factors)
+    return np.array([int(i) for i in binrepr], dtype=int)
+
+
+def _iter_contrasts(n_subjects, factor_levels, effect_picks):
+    """ Aux Function: Setup contrasts """
+    from scipy.signal import detrend
+    sc = []
+    n_factors = len(factor_levels)
+    # prepare computation of Kronecker products
+    for n_levels in factor_levels:
+        # for each factor append
+        # 1) column vector of length == number of levels,
+        # 2) square matrix with diagonal == number of levels
+
+        # main + interaction effects for contrasts
+        sc.append([np.ones([n_levels, 1]),
+                   detrend(np.eye(n_levels), type='constant')])
+
+    for this_effect in effect_picks:
+        contrast_idx = _get_contrast_indices(this_effect + 1, n_factors)
+        c_ = sc[0][contrast_idx[n_factors - 1]]
+        for i_contrast in range(1, n_factors):
+            this_contrast = contrast_idx[(n_factors - 1) - i_contrast]
+            c_ = np.kron(c_, sc[i_contrast][this_contrast])
+        df1 = matrix_rank(c_)
+        df2 = df1 * (n_subjects - 1)
+        yield c_, df1, df2
+
+
+ at deprecated('"f_threshold_twoway_rm" is deprecated and will be removed in'
+            'MNE-0.11. Please use f_threshold_mway_rm instead')
+def f_threshold_twoway_rm(n_subjects, factor_levels, effects='A*B',
+                          pvalue=0.05):
+    return f_threshold_mway_rm(
+        n_subjects=n_subjects, factor_levels=factor_levels,
+        effects=effects, pvalue=pvalue)
+
+
+def f_threshold_mway_rm(n_subjects, factor_levels, effects='A*B',
+                        pvalue=0.05):
+    """ Compute f-value thesholds for a two-way ANOVA
+
+    Parameters
+    ----------
+    n_subjects : int
+        The number of subjects to be analyzed.
+    factor_levels : list-like
+        The number of levels per factor.
+    effects : str
+        A string denoting the effect to be returned. The following
+        mapping is currently supported:
+            'A': main effect of A
+            'B': main effect of B
+            'A:B': interaction effect
+            'A+B': both main effects
+            'A*B': all three effects
+    pvalue : float
+        The p-value to be thresholded.
+
+    Returns
+    -------
+    f_threshold : list | float
+        list of f-values for each effect if the number of effects
+        requested > 2, else float.
+
+    See Also
+    --------
+    f_oneway
+    f_mway_rm
+
+    Notes
+    -----
+    .. versionadded:: 0.10
+    """
+    from scipy.stats import f
+    effect_picks, _ = _map_effects(len(factor_levels), effects)
+
+    f_threshold = []
+    for _, df1, df2 in _iter_contrasts(n_subjects, factor_levels,
+                                       effect_picks):
+        f_threshold.append(f(df1, df2).isf(pvalue))
+
+    return f_threshold if len(f_threshold) > 1 else f_threshold[0]
+
+
+# The following functions based on MATLAB code by Rik Henson
+# and Python code from the pvttble toolbox by Roger Lew.
+ at deprecated('"f_twoway_rm" is deprecated and will be removed in MNE 0.11."'
+            " Please use f_mway_rm instead")
+def f_twoway_rm(data, factor_levels, effects='A*B', alpha=0.05,
+                correction=False, return_pvals=True):
+    """This function is deprecated, use `f_mway_rm` instead"""
+    return f_mway_rm(data=data, factor_levels=factor_levels, effects=effects,
+                     alpha=alpha, correction=correction,
+                     return_pvals=return_pvals)
+
+
+def f_mway_rm(data, factor_levels, effects='all', alpha=0.05,
+              correction=False, return_pvals=True):
+    """M-way repeated measures ANOVA for fully balanced designs
+
+    Parameters
+    ----------
+    data : ndarray
+        3D array where the first two dimensions are compliant
+        with a subjects X conditions scheme where the first
+        factor repeats slowest::
+
+                        A1B1 A1B2 A2B1 A2B2
+            subject 1   1.34 2.53 0.97 1.74
+            subject ... .... .... .... ....
+            subject k   2.45 7.90 3.09 4.76
+
+        The last dimensions is thought to carry the observations
+        for mass univariate analysis.
+    factor_levels : list-like
+        The number of levels per factor.
+    effects : str | list
+        A string denoting the effect to be returned. The following
+        mapping is currently supported (example with 2 factors):
+
+            * ``'A'``: main effect of A
+            * ``'B'``: main effect of B
+            * ``'A:B'``: interaction effect
+            * ``'A+B'``: both main effects
+            * ``'A*B'``: all three effects
+            * ``'all'``: all effects (equals 'A*B' in a 2 way design)
+
+        If list, effect names are used: ``['A', 'B', 'A:B']``.
+    alpha : float
+        The significance threshold.
+    correction : bool
+        The correction method to be employed if one factor has more than two
+        levels. If True, sphericity correction using the Greenhouse-Geisser
+        method will be applied.
+    return_pvals : bool
+        If True, return p values corresponding to f values.
+
+    Returns
+    -------
+    f_vals : ndarray
+        An array of f values with length corresponding to the number
+        of effects estimated. The shape depends on the number of effects
+        estimated.
+    p_vals : ndarray
+        If not requested via return_pvals, defaults to an empty array.
+
+    See Also
+    --------
+    f_oneway
+    f_threshold_mway_rm
+
+    Notes
+    -----
+    .. versionadded:: 0.10
+    """
+    from scipy.stats import f
+    if data.ndim == 2:  # general purpose support, e.g. behavioural data
+        data = data[:, :, np.newaxis]
+    elif data.ndim > 3:  # let's allow for some magic here.
+        data = data.reshape(
+            data.shape[0], data.shape[1], np.prod(data.shape[2:]))
+
+    effect_picks, _ = _map_effects(len(factor_levels), effects)
+    n_obs = data.shape[2]
+    n_replications = data.shape[0]
+
+    # pute last axis in fornt to 'iterate' over mass univariate instances.
+    data = np.rollaxis(data, 2)
+    fvalues, pvalues = [], []
+    for c_, df1, df2 in _iter_contrasts(n_replications, factor_levels,
+                                        effect_picks):
+        y = np.dot(data, c_)
+        b = np.mean(y, axis=1)[:, np.newaxis, :]
+        ss = np.sum(np.sum(y * b, axis=2), axis=1)
+        mse = (np.sum(np.sum(y * y, axis=2), axis=1) - ss) / (df2 / df1)
+        fvals = ss / mse
+        fvalues.append(fvals)
+        if correction:
+            # sample covariances, leave off "/ (y.shape[1] - 1)" norm because
+            # it falls out.
+            v = np.array([np.dot(y_.T, y_) for y_ in y])
+            v = (np.array([np.trace(vv) for vv in v]) ** 2 /
+                 (df1 * np.sum(np.sum(v * v, axis=2), axis=1)))
+            eps = v
+
+        df1, df2 = np.zeros(n_obs) + df1, np.zeros(n_obs) + df2
+        if correction:
+            df1, df2 = [d[None, :] * eps for d in (df1, df2)]
+
+        if return_pvals:
+            pvals = f(df1, df2).sf(fvals)
+        else:
+            pvals = np.empty(0)
+        pvalues.append(pvals)
+
+    # handle single effect returns
+    return [np.squeeze(np.asarray(vv)) for vv in (fvalues, pvalues)]
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/stats/permutations.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/stats/permutations.py
new file mode 100644
index 0000000..a20892a
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/stats/permutations.py
@@ -0,0 +1,152 @@
+"""T-test with permutations
+"""
+
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Fernando Perez (bin_perm_rep function)
+#
+# License: Simplified BSD
+
+from math import sqrt
+import numpy as np
+
+from ..parallel import parallel_func
+from .. import verbose
+
+
+def bin_perm_rep(ndim, a=0, b=1):
+    """bin_perm_rep(ndim) -> ndim permutations with repetitions of (a,b).
+
+    Returns an array with all the possible permutations with repetitions of
+    (0,1) in ndim dimensions.  The array is shaped as (2**ndim,ndim), and is
+    ordered with the last index changing fastest.  For examble, for ndim=3:
+
+    Examples:
+
+    >>> bin_perm_rep(3)
+    array([[0, 0, 0],
+           [0, 0, 1],
+           [0, 1, 0],
+           [0, 1, 1],
+           [1, 0, 0],
+           [1, 0, 1],
+           [1, 1, 0],
+           [1, 1, 1]])
+    """
+
+    # Create the leftmost column as 0,0,...,1,1,...
+    nperms = 2 ** ndim
+    perms = np.empty((nperms, ndim), type(a))
+    perms.fill(a)
+    half_point = nperms // 2
+    perms[half_point:, 0] = b
+    # Fill the rest of the table by sampling the pervious column every 2 items
+    for j in range(1, ndim):
+        half_col = perms[::2, j - 1]
+        perms[:half_point, j] = half_col
+        perms[half_point:, j] = half_col
+
+    return perms
+
+
+def _max_stat(X, X2, perms, dof_scaling):
+    """Aux function for permutation_t_test (for parallel comp)"""
+    n_samples = len(X)
+    mus = np.dot(perms, X) / float(n_samples)
+    stds = np.sqrt(X2[None, :] - mus ** 2) * dof_scaling  # std with splitting
+    max_abs = np.max(np.abs(mus) / (stds / sqrt(n_samples)), axis=1)  # t-max
+    return max_abs
+
+
+ at verbose
+def permutation_t_test(X, n_permutations=10000, tail=0, n_jobs=1,
+                       verbose=None):
+    """One sample/paired sample permutation test based on a t-statistic.
+
+    This function can perform the test on one variable or
+    simultaneously on multiple variables. When applying the test to multiple
+    variables, the "tmax" method is used for adjusting the p-values of each
+    variable for multiple comparisons. Like Bonferroni correction, this method
+    adjusts p-values in a way that controls the family-wise error rate.
+    However, the permutation method will be more
+    powerful than Bonferroni correction when different variables in the test
+    are correlated.
+
+    Parameters
+    ----------
+    X : array of shape [n_samples x n_tests]
+        Data of size number of samples (aka number of observations) times
+        number of tests (aka number of variables).
+    n_permutations : int or 'all'
+        Number of permutations. If n_permutations is 'all' all possible
+        permutations are tested (2**n_samples). It's the exact test, that
+        can be untractable when the number of samples is big (e.g. > 20).
+        If n_permutations >= 2**n_samples then the exact test is performed.
+    tail : -1 or 0 or 1 (default = 0)
+        If tail is 1, the alternative hypothesis is that the
+        mean of the data is greater than 0 (upper tailed test).  If tail is 0,
+        the alternative hypothesis is that the mean of the data is different
+        than 0 (two tailed test).  If tail is -1, the alternative hypothesis
+        is that the mean of the data is less than 0 (lower tailed test).
+    n_jobs : int
+        Number of CPUs to use for computation.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    T_obs : array of shape [n_tests]
+        T-statistic observed for all variables
+
+    p_values : array of shape [n_tests]
+        P-values for all the tests (aka variables)
+
+    H0 : array of shape [n_permutations]
+        T-statistic obtained by permutations and t-max trick for multiple
+        comparison.
+
+    Notes
+    -----
+    A reference (among many) in field of neuroimaging:
+    Nichols, T. E. & Holmes, A. P. (2002). Nonparametric permutation tests
+    for functional neuroimaging: a primer with examples.
+    Human Brain Mapping, 15, 1-25.
+    Overview of standard nonparametric randomization and permutation
+    testing applied to neuroimaging data (e.g. fMRI)
+    DOI: http://dx.doi.org/10.1002/hbm.1058
+    """
+    n_samples, n_tests = X.shape
+
+    do_exact = False
+    if (n_permutations == 'all') or (n_permutations >= 2 ** n_samples - 1):
+        do_exact = True
+        n_permutations = 2 ** n_samples - 1
+
+    X2 = np.mean(X ** 2, axis=0)  # precompute moments
+    mu0 = np.mean(X, axis=0)
+    dof_scaling = sqrt(n_samples / (n_samples - 1.0))
+    std0 = np.sqrt(X2 - mu0 ** 2) * dof_scaling  # get std with var splitting
+    T_obs = np.mean(X, axis=0) / (std0 / sqrt(n_samples))
+
+    if do_exact:
+        perms = bin_perm_rep(n_samples, a=1, b=-1)[1:, :]
+    else:
+        perms = np.sign(0.5 - np.random.rand(n_permutations, n_samples))
+
+    parallel, my_max_stat, n_jobs = parallel_func(_max_stat, n_jobs)
+
+    max_abs = np.concatenate(parallel(my_max_stat(X, X2, p, dof_scaling)
+                                      for p in np.array_split(perms, n_jobs)))
+    H0 = np.sort(max_abs)
+
+    scaling = float(n_permutations + 1)
+
+    if tail == 0:
+        p_values = 1.0 - np.searchsorted(H0, np.abs(T_obs)) / scaling
+    elif tail == 1:
+        p_values = 1.0 - np.searchsorted(H0, T_obs) / scaling
+    elif tail == -1:
+        p_values = 1.0 - np.searchsorted(H0, -T_obs) / scaling
+
+    return T_obs, p_values, H0
+
+permutation_t_test.__test__ = False  # for nosetests
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/stats/regression.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/stats/regression.py
new file mode 100644
index 0000000..b5fb7d7
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/stats/regression.py
@@ -0,0 +1,337 @@
+# Authors: Tal Linzen <linzen at nyu.edu>
+#          Teon Brooks <teon.brooks at gmail.com>
+#          Denis A. Engemann <denis.engemann at gmail.com>
+#          Jona Sassenhagen <jona.sassenhagen at gmail.com>
+#          Marijn van Vliet <w.m.vanvliet at gmail.com>
+#
+# License: BSD (3-clause)
+
+from collections import namedtuple
+from inspect import isgenerator
+import warnings
+from ..externals.six import string_types
+
+import numpy as np
+from scipy import linalg, sparse
+
+from ..source_estimate import SourceEstimate
+from ..epochs import _BaseEpochs
+from ..evoked import Evoked, EvokedArray
+from ..utils import logger, _reject_data_segments, _get_fast_dot
+from ..io.pick import pick_types, pick_info
+from ..fixes import in1d
+
+
+def linear_regression(inst, design_matrix, names=None):
+    """Fit Ordinary Least Squares regression (OLS)
+
+    Parameters
+    ----------
+    inst : instance of Epochs | iterable of SourceEstimate
+        The data to be regressed. Contains all the trials, sensors, and time
+        points for the regression. For Source Estimates, accepts either a list
+        or a generator object.
+    design_matrix : ndarray, shape (n_observations, n_regressors)
+        The regressors to be used. Must be a 2d array with as many rows as
+        the first dimension of `data`. The first column of this matrix will
+        typically consist of ones (intercept column).
+    names : list-like | None
+        Optional parameter to name the regressors. If provided, the length must
+        correspond to the number of columns present in regressors
+        (including the intercept, if present).
+        Otherwise the default names are x0, x1, x2...xn for n regressors.
+
+    Returns
+    -------
+    results : dict of namedtuple
+        For each regressor (key) a namedtuple is provided with the
+        following attributes:
+
+            beta : regression coefficients
+            stderr : standard error of regression coefficients
+            t_val : t statistics (beta / stderr)
+            p_val : two-sided p-value of t statistic under the t distribution
+            mlog10_p_val : -log10 transformed p-value.
+
+        The tuple members are numpy arrays. The shape of each numpy array is
+        the shape of the data minus the first dimension; e.g., if the shape of
+        the original data was (n_observations, n_channels, n_timepoints),
+        then the shape of each of the arrays will be
+        (n_channels, n_timepoints).
+    """
+    if names is None:
+        names = ['x%i' % i for i in range(design_matrix.shape[1])]
+
+    if isinstance(inst, _BaseEpochs):
+        picks = pick_types(inst.info, meg=True, eeg=True, ref_meg=True,
+                           stim=False, eog=False, ecg=False,
+                           emg=False, exclude=['bads'])
+        if [inst.ch_names[p] for p in picks] != inst.ch_names:
+            warnings.warn('Fitting linear model to non-data or bad '
+                          'channels. Check picking', UserWarning)
+        msg = 'Fitting linear model to epochs'
+        data = inst.get_data()
+        out = EvokedArray(np.zeros(data.shape[1:]), inst.info, inst.tmin)
+    elif isgenerator(inst):
+        msg = 'Fitting linear model to source estimates (generator input)'
+        out = next(inst)
+        data = np.array([out.data] + [i.data for i in inst])
+    elif isinstance(inst, list) and isinstance(inst[0], SourceEstimate):
+        msg = 'Fitting linear model to source estimates (list input)'
+        out = inst[0]
+        data = np.array([i.data for i in inst])
+    else:
+        raise ValueError('Input must be epochs or iterable of source '
+                         'estimates')
+    logger.info(msg + ', (%s targets, %s regressors)' %
+                (np.product(data.shape[1:]), len(names)))
+    lm_params = _fit_lm(data, design_matrix, names)
+    lm = namedtuple('lm', 'beta stderr t_val p_val mlog10_p_val')
+    lm_fits = {}
+    for name in names:
+        parameters = [p[name] for p in lm_params]
+        for ii, value in enumerate(parameters):
+            out_ = out.copy()
+            if isinstance(out_, SourceEstimate):
+                out_._data[:] = value
+            elif isinstance(out_, Evoked):
+                out_.data[:] = value
+            else:
+                raise RuntimeError('Invalid container.')
+            parameters[ii] = out_
+        lm_fits[name] = lm(*parameters)
+    logger.info('Done')
+    return lm_fits
+
+
+def _fit_lm(data, design_matrix, names):
+    """Aux function"""
+    from scipy import stats
+    n_samples = len(data)
+    n_features = np.product(data.shape[1:])
+    if design_matrix.ndim != 2:
+        raise ValueError('Design matrix must be a 2d array')
+    n_rows, n_predictors = design_matrix.shape
+
+    if n_samples != n_rows:
+        raise ValueError('Number of rows in design matrix must be equal '
+                         'to number of observations')
+    if n_predictors != len(names):
+        raise ValueError('Number of regressor names must be equal to '
+                         'number of column in design matrix')
+
+    y = np.reshape(data, (n_samples, n_features))
+    betas, resid_sum_squares, _, _ = linalg.lstsq(a=design_matrix, b=y)
+
+    df = n_rows - n_predictors
+    sqrt_noise_var = np.sqrt(resid_sum_squares / df).reshape(data.shape[1:])
+    design_invcov = linalg.inv(np.dot(design_matrix.T, design_matrix))
+    unscaled_stderrs = np.sqrt(np.diag(design_invcov))
+
+    beta, stderr, t_val, p_val, mlog10_p_val = (dict() for _ in range(5))
+    for x, unscaled_stderr, predictor in zip(betas, unscaled_stderrs, names):
+        beta[predictor] = x.reshape(data.shape[1:])
+        stderr[predictor] = sqrt_noise_var * unscaled_stderr
+        t_val[predictor] = beta[predictor] / stderr[predictor]
+        cdf = stats.t.cdf(np.abs(t_val[predictor]), df)
+        p_val[predictor] = (1. - cdf) * 2.
+        mlog10_p_val[predictor] = -np.log10(p_val[predictor])
+
+    return beta, stderr, t_val, p_val, mlog10_p_val
+
+
+def linear_regression_raw(raw, events, event_id=None, tmin=-.1, tmax=1,
+                          covariates=None, reject=None, flat=None, tstep=1.,
+                          decim=1, picks=None, solver='pinv'):
+    """Estimate regression-based evoked potentials/fields by linear modelling
+
+    This models the full M/EEG time course, including correction for
+    overlapping potentials and allowing for continuous/scalar predictors.
+    Internally, this constructs a predictor matrix X of size
+    n_samples * (n_conds * window length), solving the linear system
+    ``Y = bX`` and returning ``b`` as evoked-like time series split by
+    condition. See [1]_.
+
+    Parameters
+    ----------
+    raw : instance of Raw
+        A raw object. Note: be very careful about data that is not
+        downsampled, as the resulting matrices can be enormous and easily
+        overload your computer. Typically, 100 Hz sampling rate is
+        appropriate - or using the decim keyword (see below).
+    events : ndarray of int, shape (n_events, 3)
+        An array where the first column corresponds to samples in raw
+        and the last to integer codes in event_id.
+    event_id : dict
+        As in Epochs; a dictionary where the values may be integers or
+        iterables of integers, corresponding to the 3rd column of
+        events, and the keys are condition names.
+    tmin : float | dict
+        If float, gives the lower limit (in seconds) for the time window for
+        which all event types' effects are estimated. If a dict, can be used to
+        specify time windows for specific event types: keys correspond to keys
+        in event_id and/or covariates; for missing values, the default (-.1) is
+        used.
+    tmax : float | dict
+        If float, gives the upper limit (in seconds) for the time window for
+        which all event types' effects are estimated. If a dict, can be used to
+        specify time windows for specific event types: keys correspond to keys
+        in event_id and/or covariates; for missing values, the default (1.) is
+        used.
+    covariates : dict-like | None
+        If dict-like (e.g., a pandas DataFrame), values have to be array-like
+        and of the same length as the columns in ```events```. Keys correspond
+        to additional event types/conditions to be estimated and are matched
+        with the time points given by the first column of ```events```. If
+        None, only binary events (from event_id) are used.
+    reject : None | dict
+        For cleaning raw data before the regression is performed: set up
+        rejection parameters based on peak-to-peak amplitude in continuously
+        selected subepochs. If None, no rejection is done.
+        If dict, keys are types ('grad' | 'mag' | 'eeg' | 'eog' | 'ecg')
+        and values are the maximal peak-to-peak values to select rejected
+        epochs, e.g.::
+
+            reject = dict(grad=4000e-12, # T / m (gradiometers)
+                          mag=4e-11, # T (magnetometers)
+                          eeg=40e-5, # uV (EEG channels)
+                          eog=250e-5 # uV (EOG channels))
+
+    flat : None | dict
+        or cleaning raw data before the regression is performed: set up
+        rejection parameters based on flatness of the signal. If None, no
+        rejection is done. If a dict, keys are ('grad' | 'mag' |
+        'eeg' | 'eog' | 'ecg') and values are minimal peak-to-peak values to
+        select rejected epochs.
+    tstep : float
+        Length of windows for peak-to-peak detection for raw data cleaning.
+    decim : int
+        Decimate by choosing only a subsample of data points. Highly
+        recommended for data recorded at high sampling frequencies, as
+        otherwise huge intermediate matrices have to be created and inverted.
+    picks : None | list
+        List of indices of channels to be included. If None, defaults to all
+        MEG and EEG channels.
+    solver : str | function
+        Either a function which takes as its inputs the sparse predictor
+        matrix X and the observation matrix Y, and returns the coefficient
+        matrix b; or a string (for now, only 'pinv'), in which case the
+        solver used is dot(scipy.linalg.pinv(dot(X.T, X)), dot(X.T, Y.T)).T.
+
+    Returns
+    -------
+    evokeds : dict
+        A dict where the keys correspond to conditions and the values are
+        Evoked objects with the ER[F/P]s. These can be used exactly like any
+        other Evoked object, including e.g. plotting or statistics.
+
+    References
+    ----------
+    .. [1] Smith, N. J., & Kutas, M. (2015). Regression-based estimation of ERP
+           waveforms: II. Non-linear effects, overlap correction, and practical
+           considerations. Psychophysiology, 52(2), 169-189.
+    """
+
+    if isinstance(solver, string_types):
+        if solver == 'pinv':
+            fast_dot = _get_fast_dot()
+
+            # inv is slightly (~10%) faster, but pinv seemingly more stable
+            def solver(X, Y):
+                return fast_dot(linalg.pinv(X.T.dot(X).todense()),
+                                X.T.dot(Y.T)).T
+        else:
+            raise ValueError("No such solver: {0}".format(solver))
+
+    # prepare raw and events
+    if picks is None:
+        picks = pick_types(raw.info, meg=True, eeg=True, ref_meg=True)
+    info = pick_info(raw.info, picks, copy=True)
+    decim = int(decim)
+    info["sfreq"] /= decim
+    data, times = raw[:]
+    data = data[picks, ::decim]
+    times = times[::decim]
+    events = events.copy()
+    events[:, 0] -= raw.first_samp
+    events[:, 0] //= decim
+
+    conds = list(event_id)
+    if covariates is not None:
+        conds += list(covariates)
+
+    # time windows (per event type) are converted to sample points from times
+    if isinstance(tmin, (float, int)):
+        tmin_s = dict((cond, int(tmin * info["sfreq"])) for cond in conds)
+    else:
+        tmin_s = dict((cond, int(tmin.get(cond, -.1) * info["sfreq"]))
+                      for cond in conds)
+    if isinstance(tmax, (float, int)):
+        tmax_s = dict(
+            (cond, int((tmax * info["sfreq"]) + 1.)) for cond in conds)
+    else:
+        tmax_s = dict((cond, int((tmax.get(cond, 1.) * info["sfreq"]) + 1))
+                      for cond in conds)
+
+    # Construct predictor matrix
+    # We do this by creating one array per event type, shape (lags, samples)
+    # (where lags depends on tmin/tmax and can be different for different
+    # event types). Columns correspond to predictors, predictors correspond to
+    # time lags. Thus, each array is mostly sparse, with one diagonal of 1s
+    # per event (for binary predictors).
+
+    cond_length = dict()
+    xs = []
+    for cond in conds:
+        tmin_, tmax_ = tmin_s[cond], tmax_s[cond]
+        n_lags = int(tmax_ - tmin_)  # width of matrix
+        if cond in event_id:  # for binary predictors
+            ids = ([event_id[cond]]
+                   if isinstance(event_id[cond], int)
+                   else event_id[cond])
+            onsets = -(events[in1d(events[:, 2], ids), 0] + tmin_)
+            values = np.ones((len(onsets), n_lags))
+
+        else:  # for predictors from covariates, e.g. continuous ones
+            covs = covariates[cond]
+            if len(covs) != len(events):
+                error = ("Condition {0} from ```covariates``` is "
+                         "not the same length as ```events```").format(cond)
+                raise ValueError(error)
+            onsets = -(events[np.where(covs != 0), 0] + tmin_)[0]
+            v = np.asarray(covs)[np.nonzero(covs)].astype(float)
+            values = np.ones((len(onsets), n_lags)) * v[:, np.newaxis]
+
+        cond_length[cond] = len(onsets)
+        xs.append(sparse.dia_matrix((values, onsets),
+                                    shape=(data.shape[1], n_lags)))
+
+    X = sparse.hstack(xs)
+
+    # find only those positions where at least one predictor isn't 0
+    has_val = np.unique(X.nonzero()[0])
+
+    # additionally, reject positions based on extreme steps in the data
+    if reject is not None:
+        _, inds = _reject_data_segments(data, reject, flat, decim=None,
+                                        info=info, tstep=tstep)
+        for t0, t1 in inds:
+            has_val = np.setdiff1d(has_val, range(t0, t1))
+
+    # solve linear system
+    X, data = X.tocsr()[has_val], data[:, has_val]
+    coefs = solver(X, data)
+
+    # construct Evoked objects to be returned from output
+    evokeds = dict()
+    cum = 0
+    for cond in conds:
+        tmin_, tmax_ = tmin_s[cond], tmax_s[cond]
+        evokeds[cond] = EvokedArray(coefs[:, cum:cum + tmax_ - tmin_],
+                                    info=info, comment=cond,
+                                    tmin=tmin_ / float(info["sfreq"]),
+                                    nave=cond_length[cond],
+                                    kind='mean')  # note that nave and kind are
+        cum += tmax_ - tmin_                      # technically not correct
+
+    return evokeds
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/stats/tests/__init__.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/stats/tests/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/stats/tests/test_cluster_level.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/stats/tests/test_cluster_level.py
new file mode 100644
index 0000000..3f00cc9
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/stats/tests/test_cluster_level.py
@@ -0,0 +1,479 @@
+import os
+import os.path as op
+import numpy as np
+from numpy.testing import (assert_equal, assert_array_equal,
+                           assert_array_almost_equal)
+from nose.tools import assert_true, assert_raises
+from scipy import sparse, linalg, stats
+from mne.fixes import partial
+import warnings
+from mne.parallel import _force_serial
+from mne.stats.cluster_level import (permutation_cluster_test,
+                                     permutation_cluster_1samp_test,
+                                     spatio_temporal_cluster_test,
+                                     spatio_temporal_cluster_1samp_test,
+                                     ttest_1samp_no_p, summarize_clusters_stc)
+from mne.utils import run_tests_if_main, slow_test, _TempDir, set_log_file
+
+warnings.simplefilter('always')  # enable b/c these tests throw warnings
+
+
+n_space = 50
+
+
+def _get_conditions():
+    noise_level = 20
+    n_time_1 = 20
+    n_time_2 = 13
+    normfactor = np.hanning(20).sum()
+    rng = np.random.RandomState(42)
+    condition1_1d = rng.randn(n_time_1, n_space) * noise_level
+    for c in condition1_1d:
+        c[:] = np.convolve(c, np.hanning(20), mode="same") / normfactor
+
+    condition2_1d = rng.randn(n_time_2, n_space) * noise_level
+    for c in condition2_1d:
+        c[:] = np.convolve(c, np.hanning(20), mode="same") / normfactor
+
+    pseudoekp = 10 * np.hanning(25)[None, :]
+    condition1_1d[:, 25:] += pseudoekp
+    condition2_1d[:, 25:] -= pseudoekp
+
+    condition1_2d = condition1_1d[:, :, np.newaxis]
+    condition2_2d = condition2_1d[:, :, np.newaxis]
+    return condition1_1d, condition2_1d, condition1_2d, condition2_2d
+
+
+def test_cache_dir():
+    """Test use of cache dir
+    """
+    tempdir = _TempDir()
+    orig_dir = os.getenv('MNE_CACHE_DIR', None)
+    orig_size = os.getenv('MNE_MEMMAP_MIN_SIZE', None)
+    rng = np.random.RandomState(0)
+    X = rng.randn(9, 2, 10)
+    log_file = op.join(tempdir, 'log.txt')
+    try:
+        os.environ['MNE_MEMMAP_MIN_SIZE'] = '1K'
+        os.environ['MNE_CACHE_DIR'] = tempdir
+        # Fix error for #1507: in-place when memmapping
+        permutation_cluster_1samp_test(
+            X, buffer_size=None, n_jobs=2, n_permutations=1,
+            seed=0, stat_fun=ttest_1samp_no_p, verbose=False)
+        # ensure that non-independence yields warning
+        stat_fun = partial(ttest_1samp_no_p, sigma=1e-3)
+        set_log_file(log_file)
+        permutation_cluster_1samp_test(
+            X, buffer_size=10, n_jobs=2, n_permutations=1,
+            seed=0, stat_fun=stat_fun, verbose=False)
+        with open(log_file, 'r') as fid:
+            assert_true('independently' in ''.join(fid.readlines()))
+    finally:
+        if orig_dir is not None:
+            os.environ['MNE_CACHE_DIR'] = orig_dir
+        else:
+            del os.environ['MNE_CACHE_DIR']
+        if orig_size is not None:
+            os.environ['MNE_MEMMAP_MIN_SIZE'] = orig_size
+        else:
+            del os.environ['MNE_MEMMAP_MIN_SIZE']
+        set_log_file(None)
+
+
+def test_permutation_step_down_p():
+    """Test cluster level permutations with step_down_p
+    """
+    try:
+        try:
+            from sklearn.feature_extraction.image import grid_to_graph
+        except ImportError:
+            from scikits.learn.feature_extraction.image import grid_to_graph  # noqa
+    except ImportError:
+        return
+    rng = np.random.RandomState(0)
+    # subjects, time points, spatial points
+    X = rng.randn(9, 2, 10)
+    # add some significant points
+    X[:, 0:2, 0:2] += 2  # span two time points and two spatial points
+    X[:, 1, 5:9] += 0.5  # span four time points with 4x smaller amplitude
+    thresh = 2
+    # make sure it works when we use ALL points in step-down
+    t, clusters, p, H0 = \
+        permutation_cluster_1samp_test(X, threshold=thresh,
+                                       step_down_p=1.0)
+    # make sure using step-down will actually yield improvements sometimes
+    t, clusters, p_old, H0 = \
+        permutation_cluster_1samp_test(X, threshold=thresh,
+                                       step_down_p=0.0)
+    assert_equal(np.sum(p_old < 0.05), 1)  # just spatial cluster
+    t, clusters, p_new, H0 = \
+        permutation_cluster_1samp_test(X, threshold=thresh,
+                                       step_down_p=0.05)
+    assert_equal(np.sum(p_new < 0.05), 2)  # time one rescued
+    assert_true(np.all(p_old >= p_new))
+
+
+def test_cluster_permutation_test():
+    """Test cluster level permutations tests
+    """
+    condition1_1d, condition2_1d, condition1_2d, condition2_2d = \
+        _get_conditions()
+    for condition1, condition2 in zip((condition1_1d, condition1_2d),
+                                      (condition2_1d, condition2_2d)):
+        T_obs, clusters, cluster_p_values, hist = permutation_cluster_test(
+            [condition1, condition2], n_permutations=100, tail=1, seed=1,
+            buffer_size=None)
+        assert_equal(np.sum(cluster_p_values < 0.05), 1)
+
+        T_obs, clusters, cluster_p_values, hist = permutation_cluster_test(
+            [condition1, condition2], n_permutations=100, tail=0, seed=1,
+            buffer_size=None)
+        assert_equal(np.sum(cluster_p_values < 0.05), 1)
+
+        # test with 2 jobs and buffer_size enabled
+        buffer_size = condition1.shape[1] // 10
+        T_obs, clusters, cluster_p_values_buff, hist =\
+            permutation_cluster_test([condition1, condition2],
+                                     n_permutations=100, tail=0, seed=1,
+                                     n_jobs=2, buffer_size=buffer_size)
+        assert_array_equal(cluster_p_values, cluster_p_values_buff)
+
+
+ at slow_test
+def test_cluster_permutation_t_test():
+    """Test cluster level permutations T-test
+    """
+    condition1_1d, condition2_1d, condition1_2d, condition2_2d = \
+        _get_conditions()
+
+    # use a very large sigma to make sure Ts are not independent
+    stat_funs = [ttest_1samp_no_p,
+                 partial(ttest_1samp_no_p, sigma=1e-1)]
+
+    for stat_fun in stat_funs:
+        for condition1 in (condition1_1d, condition1_2d):
+            # these are so significant we can get away with fewer perms
+            T_obs, clusters, cluster_p_values, hist =\
+                permutation_cluster_1samp_test(condition1, n_permutations=100,
+                                               tail=0, seed=1,
+                                               buffer_size=None)
+            assert_equal(np.sum(cluster_p_values < 0.05), 1)
+
+            T_obs_pos, c_1, cluster_p_values_pos, _ =\
+                permutation_cluster_1samp_test(condition1, n_permutations=100,
+                                               tail=1, threshold=1.67, seed=1,
+                                               stat_fun=stat_fun,
+                                               buffer_size=None)
+
+            T_obs_neg, _, cluster_p_values_neg, _ =\
+                permutation_cluster_1samp_test(-condition1, n_permutations=100,
+                                               tail=-1, threshold=-1.67,
+                                               seed=1, stat_fun=stat_fun,
+                                               buffer_size=None)
+            assert_array_equal(T_obs_pos, -T_obs_neg)
+            assert_array_equal(cluster_p_values_pos < 0.05,
+                               cluster_p_values_neg < 0.05)
+
+            # test with 2 jobs and buffer_size enabled
+            buffer_size = condition1.shape[1] // 10
+            T_obs_neg_buff, _, cluster_p_values_neg_buff, _ = \
+                permutation_cluster_1samp_test(-condition1, n_permutations=100,
+                                               tail=-1, threshold=-1.67,
+                                               seed=1, n_jobs=2,
+                                               stat_fun=stat_fun,
+                                               buffer_size=buffer_size)
+
+            assert_array_equal(T_obs_neg, T_obs_neg_buff)
+            assert_array_equal(cluster_p_values_neg, cluster_p_values_neg_buff)
+
+
+def test_cluster_permutation_with_connectivity():
+    """Test cluster level permutations with connectivity matrix
+    """
+    try:
+        try:
+            from sklearn.feature_extraction.image import grid_to_graph
+        except ImportError:
+            from scikits.learn.feature_extraction.image import grid_to_graph
+    except ImportError:
+        return
+    condition1_1d, condition2_1d, condition1_2d, condition2_2d = \
+        _get_conditions()
+
+    n_pts = condition1_1d.shape[1]
+    # we don't care about p-values in any of these, so do fewer permutations
+    args = dict(seed=None, max_step=1, exclude=None,
+                step_down_p=0, t_power=1, threshold=1.67,
+                check_disjoint=False, n_permutations=50)
+
+    did_warn = False
+    for X1d, X2d, func, spatio_temporal_func in \
+            [(condition1_1d, condition1_2d,
+              permutation_cluster_1samp_test,
+              spatio_temporal_cluster_1samp_test),
+             ([condition1_1d, condition2_1d],
+              [condition1_2d, condition2_2d],
+              permutation_cluster_test,
+              spatio_temporal_cluster_test)]:
+        out = func(X1d, **args)
+        connectivity = grid_to_graph(1, n_pts)
+        out_connectivity = func(X1d, connectivity=connectivity, **args)
+        assert_array_equal(out[0], out_connectivity[0])
+        for a, b in zip(out_connectivity[1], out[1]):
+            assert_array_equal(out[0][a], out[0][b])
+            assert_true(np.all(a[b]))
+
+        # test spatio-temporal w/o time connectivity (repeat spatial pattern)
+        connectivity_2 = sparse.coo_matrix(
+            linalg.block_diag(connectivity.asfptype().todense(),
+                              connectivity.asfptype().todense()))
+
+        if isinstance(X1d, list):
+            X1d_2 = [np.concatenate((x, x), axis=1) for x in X1d]
+        else:
+            X1d_2 = np.concatenate((X1d, X1d), axis=1)
+
+        out_connectivity_2 = func(X1d_2, connectivity=connectivity_2, **args)
+        # make sure we were operating on the same values
+        split = len(out[0])
+        assert_array_equal(out[0], out_connectivity_2[0][:split])
+        assert_array_equal(out[0], out_connectivity_2[0][split:])
+
+        # make sure we really got 2x the number of original clusters
+        n_clust_orig = len(out[1])
+        assert_true(len(out_connectivity_2[1]) == 2 * n_clust_orig)
+
+        # Make sure that we got the old ones back
+        data_1 = set([np.sum(out[0][b[:n_pts]]) for b in out[1]])
+        data_2 = set([np.sum(out_connectivity_2[0][a]) for a in
+                     out_connectivity_2[1][:]])
+        assert_true(len(data_1.intersection(data_2)) == len(data_1))
+
+        # now use the other algorithm
+        if isinstance(X1d, list):
+            X1d_3 = [np.reshape(x, (-1, 2, n_space)) for x in X1d_2]
+        else:
+            X1d_3 = np.reshape(X1d_2, (-1, 2, n_space))
+
+        out_connectivity_3 = spatio_temporal_func(X1d_3, n_permutations=50,
+                                                  connectivity=connectivity,
+                                                  max_step=0, threshold=1.67,
+                                                  check_disjoint=True)
+        # make sure we were operating on the same values
+        split = len(out[0])
+        assert_array_equal(out[0], out_connectivity_3[0][0])
+        assert_array_equal(out[0], out_connectivity_3[0][1])
+
+        # make sure we really got 2x the number of original clusters
+        assert_true(len(out_connectivity_3[1]) == 2 * n_clust_orig)
+
+        # Make sure that we got the old ones back
+        data_1 = set([np.sum(out[0][b[:n_pts]]) for b in out[1]])
+        data_2 = set([np.sum(out_connectivity_3[0][a[0], a[1]]) for a in
+                     out_connectivity_3[1]])
+        assert_true(len(data_1.intersection(data_2)) == len(data_1))
+
+        # test new versus old method
+        out_connectivity_4 = spatio_temporal_func(X1d_3, n_permutations=50,
+                                                  connectivity=connectivity,
+                                                  max_step=2, threshold=1.67)
+        out_connectivity_5 = spatio_temporal_func(X1d_3, n_permutations=50,
+                                                  connectivity=connectivity,
+                                                  max_step=1, threshold=1.67)
+
+        # clusters could be in a different order
+        sums_4 = [np.sum(out_connectivity_4[0][a])
+                  for a in out_connectivity_4[1]]
+        sums_5 = [np.sum(out_connectivity_4[0][a])
+                  for a in out_connectivity_5[1]]
+        sums_4 = np.sort(sums_4)
+        sums_5 = np.sort(sums_5)
+        assert_array_almost_equal(sums_4, sums_5)
+
+        if not _force_serial:
+            assert_raises(ValueError, spatio_temporal_func, X1d_3,
+                          n_permutations=1, connectivity=connectivity,
+                          max_step=1, threshold=1.67, n_jobs=-1000)
+
+        # not enough TFCE params
+        assert_raises(KeyError, spatio_temporal_func, X1d_3,
+                      connectivity=connectivity, threshold=dict(me='hello'))
+
+        # too extreme a start threshold
+        with warnings.catch_warnings(record=True) as w:
+            spatio_temporal_func(X1d_3, connectivity=connectivity,
+                                 threshold=dict(start=10, step=1))
+        if not did_warn:
+            assert_true(len(w) == 1)
+            did_warn = True
+
+        # too extreme a start threshold
+        assert_raises(ValueError, spatio_temporal_func, X1d_3,
+                      connectivity=connectivity, tail=-1,
+                      threshold=dict(start=1, step=-1))
+        assert_raises(ValueError, spatio_temporal_func, X1d_3,
+                      connectivity=connectivity, tail=-1,
+                      threshold=dict(start=-1, step=1))
+
+        # wrong type for threshold
+        assert_raises(TypeError, spatio_temporal_func, X1d_3,
+                      connectivity=connectivity, threshold=[])
+
+        # wrong value for tail
+        assert_raises(ValueError, spatio_temporal_func, X1d_3,
+                      connectivity=connectivity, tail=2)
+
+        # make sure it actually found a significant point
+        out_connectivity_6 = spatio_temporal_func(X1d_3, n_permutations=50,
+                                                  connectivity=connectivity,
+                                                  max_step=1,
+                                                  threshold=dict(start=1,
+                                                                 step=1))
+        assert_true(np.min(out_connectivity_6[2]) < 0.05)
+
+
+ at slow_test
+def test_permutation_connectivity_equiv():
+    """Test cluster level permutations with and without connectivity
+    """
+    try:
+        try:
+            from sklearn.feature_extraction.image import grid_to_graph
+        except ImportError:
+            from scikits.learn.feature_extraction.image import grid_to_graph
+    except ImportError:
+        return
+    rng = np.random.RandomState(0)
+    # subjects, time points, spatial points
+    n_time = 2
+    n_space = 4
+    X = rng.randn(6, n_time, n_space)
+    # add some significant points
+    X[:, :, 0:2] += 10  # span two time points and two spatial points
+    X[:, 1, 3] += 20  # span one time point
+    max_steps = [1, 1, 1, 2]
+    # This will run full algorithm in two ways, then the ST-algorithm in 2 ways
+    # All of these should give the same results
+    conns = [None, grid_to_graph(n_time, n_space),
+             grid_to_graph(1, n_space), grid_to_graph(1, n_space)]
+    stat_map = None
+    thresholds = [2, dict(start=1.5, step=1.0)]
+    sig_counts = [2, 5]
+    sdps = [0, 0.05, 0.05]
+    ots = ['mask', 'mask', 'indices']
+    stat_fun = partial(ttest_1samp_no_p, sigma=1e-3)
+    for thresh, count in zip(thresholds, sig_counts):
+        cs = None
+        ps = None
+        for max_step, conn in zip(max_steps, conns):
+            for sdp, ot in zip(sdps, ots):
+                t, clusters, p, H0 = \
+                    permutation_cluster_1samp_test(
+                        X, threshold=thresh, connectivity=conn, n_jobs=2,
+                        max_step=max_step, stat_fun=stat_fun,
+                        step_down_p=sdp, out_type=ot)
+                # make sure our output datatype is correct
+                if ot == 'mask':
+                    assert_true(isinstance(clusters[0], np.ndarray))
+                    assert_true(clusters[0].dtype == bool)
+                    assert_array_equal(clusters[0].shape, X.shape[1:])
+                else:  # ot == 'indices'
+                    assert_true(isinstance(clusters[0], tuple))
+
+                # make sure all comparisons were done; for TFCE, no perm
+                # should come up empty
+                if count == 8:
+                    assert_true(not np.any(H0 == 0))
+                inds = np.where(p < 0.05)[0]
+                assert_true(len(inds) == count)
+                this_cs = [clusters[ii] for ii in inds]
+                this_ps = p[inds]
+                this_stat_map = np.zeros((n_time, n_space), dtype=bool)
+                for ci, c in enumerate(this_cs):
+                    if isinstance(c, tuple):
+                        this_c = np.zeros((n_time, n_space), bool)
+                        for x, y in zip(c[0], c[1]):
+                            this_stat_map[x, y] = True
+                            this_c[x, y] = True
+                        this_cs[ci] = this_c
+                        c = this_c
+                    this_stat_map[c] = True
+                if cs is None:
+                    ps = this_ps
+                    cs = this_cs
+                if stat_map is None:
+                    stat_map = this_stat_map
+                assert_array_equal(ps, this_ps)
+                assert_true(len(cs) == len(this_cs))
+                for c1, c2 in zip(cs, this_cs):
+                    assert_array_equal(c1, c2)
+                assert_array_equal(stat_map, this_stat_map)
+
+
+ at slow_test
+def spatio_temporal_cluster_test_connectivity():
+    """Test spatio-temporal cluster permutations
+    """
+    try:
+        try:
+            from sklearn.feature_extraction.image import grid_to_graph
+        except ImportError:
+            from scikits.learn.feature_extraction.image import grid_to_graph
+    except ImportError:
+        return
+    condition1_1d, condition2_1d, condition1_2d, condition2_2d = \
+        _get_conditions()
+
+    rng = np.random.RandomState(0)
+    noise1_2d = rng.randn(condition1_2d.shape[0], condition1_2d.shape[1], 10)
+    data1_2d = np.transpose(np.dstack((condition1_2d, noise1_2d)), [0, 2, 1])
+
+    noise2_d2 = rng.randn(condition2_2d.shape[0], condition2_2d.shape[1], 10)
+    data2_2d = np.transpose(np.dstack((condition2_2d, noise2_d2)), [0, 2, 1])
+
+    conn = grid_to_graph(data1_2d.shape[-1], 1)
+
+    threshold = dict(start=4.0, step=2)
+    T_obs, clusters, p_values_conn, hist = \
+        spatio_temporal_cluster_test([data1_2d, data2_2d], connectivity=conn,
+                                     n_permutations=50, tail=1, seed=1,
+                                     threshold=threshold, buffer_size=None)
+
+    buffer_size = data1_2d.size // 10
+    T_obs, clusters, p_values_no_conn, hist = \
+        spatio_temporal_cluster_test([data1_2d, data2_2d],
+                                     n_permutations=50, tail=1, seed=1,
+                                     threshold=threshold, n_jobs=2,
+                                     buffer_size=buffer_size)
+
+    assert_equal(np.sum(p_values_conn < 0.05), np.sum(p_values_no_conn < 0.05))
+
+    # make sure results are the same without buffer_size
+    T_obs, clusters, p_values2, hist2 = \
+        spatio_temporal_cluster_test([data1_2d, data2_2d],
+                                     n_permutations=50, tail=1, seed=1,
+                                     threshold=threshold, n_jobs=2,
+                                     buffer_size=None)
+    assert_array_equal(p_values_no_conn, p_values2)
+
+
+def ttest_1samp(X):
+    """Returns T-values
+    """
+    return stats.ttest_1samp(X, 0)[0]
+
+
+def test_summarize_clusters():
+    """Test cluster summary stcs
+    """
+    clu = (np.random.random([1, 20484]),
+           [(np.array([0]), np.array([0, 2, 4]))],
+           np.array([0.02, 0.1]),
+           np.array([12, -14, 30]))
+    stc_sum = summarize_clusters_stc(clu)
+    assert_true(stc_sum.data.shape[1] == 2)
+    clu[2][0] = 0.3
+    assert_raises(RuntimeError, summarize_clusters_stc, clu)
+
+
+run_tests_if_main()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/stats/tests/test_multi_comp.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/stats/tests/test_multi_comp.py
new file mode 100644
index 0000000..76b2c99
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/stats/tests/test_multi_comp.py
@@ -0,0 +1,47 @@
+import numpy as np
+from numpy.testing import (
+    assert_almost_equal, assert_allclose, assert_raises, assert_array_equal)
+from nose.tools import assert_true
+from scipy import stats
+
+from mne.stats import fdr_correction, bonferroni_correction
+
+
+def test_multi_pval_correction():
+    """Test pval correction for multi comparison (FDR and Bonferroni)
+    """
+    rng = np.random.RandomState(0)
+    X = rng.randn(10, 1000, 10)
+    X[:, :50, 0] += 4.0  # 50 significant tests
+    alpha = 0.05
+
+    T, pval = stats.ttest_1samp(X, 0)
+
+    n_samples = X.shape[0]
+    n_tests = X.size / n_samples
+    thresh_uncorrected = stats.t.ppf(1.0 - alpha, n_samples - 1)
+
+    reject_bonferroni, pval_bonferroni = bonferroni_correction(pval, alpha)
+    thresh_bonferroni = stats.t.ppf(1.0 - alpha / n_tests, n_samples - 1)
+    assert_true(pval_bonferroni.ndim == 2)
+    assert_true(reject_bonferroni.ndim == 2)
+    assert_allclose(pval_bonferroni / 10000, pval)
+    reject_expected = pval_bonferroni < alpha
+    assert_array_equal(reject_bonferroni, reject_expected)
+
+    fwer = np.mean(reject_bonferroni)
+    assert_almost_equal(fwer, alpha, 1)
+
+    reject_fdr, pval_fdr = fdr_correction(pval, alpha=alpha, method='indep')
+    assert_true(pval_fdr.ndim == 2)
+    assert_true(reject_fdr.ndim == 2)
+    thresh_fdr = np.min(np.abs(T)[reject_fdr])
+    assert_true(0 <= (reject_fdr.sum() - 50) <= 50 * 1.05)
+    assert_true(thresh_uncorrected <= thresh_fdr <= thresh_bonferroni)
+    assert_raises(ValueError, fdr_correction, pval, alpha, method='blah')
+    assert_true(np.all(fdr_correction(pval, alpha=0)[0] == 0))
+
+    reject_fdr, pval_fdr = fdr_correction(pval, alpha=alpha, method='negcorr')
+    thresh_fdr = np.min(np.abs(T)[reject_fdr])
+    assert_true(0 <= (reject_fdr.sum() - 50) <= 50 * 1.05)
+    assert_true(thresh_uncorrected <= thresh_fdr <= thresh_bonferroni)
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/stats/tests/test_parametric.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/stats/tests/test_parametric.py
new file mode 100644
index 0000000..57f184d
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/stats/tests/test_parametric.py
@@ -0,0 +1,111 @@
+from itertools import product
+from mne.stats.parametric import (f_mway_rm, f_threshold_mway_rm,
+                                  _map_effects)
+from nose.tools import assert_raises, assert_true
+from numpy.testing import assert_array_almost_equal
+
+import numpy as np
+
+# hardcoded external test results, manually transferred
+test_external = {
+    # SPSS, manually conducted analyis
+    'spss_fvals': np.array([2.568, 0.240, 1.756]),
+    'spss_pvals_uncorrected': np.array([0.126, 0.788, 0.186]),
+    'spss_pvals_corrected': np.array([0.126, 0.784, 0.192]),
+    # R 15.2
+    # data generated using this code http://goo.gl/7UcKb
+    'r_fvals': np.array([2.567619, 0.24006, 1.756380]),
+    'r_pvals_uncorrected': np.array([0.12557, 0.78776, 0.1864]),
+    # and https://gist.github.com/dengemann/5539403
+    'r_fvals_3way': np.array([
+        0.74783999999999995,   # A
+        0.20895,               # B
+        0.21378,               # A:B
+        0.99404000000000003,   # C
+        0.094039999999999999,  # A:C
+        0.11685,               # B:C
+        2.78749]),              # A:B:C
+    'r_fvals_1way': np.array([0.67571999999999999])
+}
+
+
+def generate_data(n_subjects, n_conditions):
+    """generate testing data"""
+    rng = np.random.RandomState(42)
+    data = rng.randn(n_subjects * n_conditions).reshape(
+        n_subjects, n_conditions)
+    return data
+
+
+def test_map_effects():
+    """ Test ANOVA effects parsing"""
+    selection, names = _map_effects(n_factors=2, effects='A')
+    assert_true(names, ['A'])
+
+    selection, names = _map_effects(n_factors=2, effects=['A', 'A:B'])
+    assert_true(names, ['A', 'A:B'])
+
+    selection, names = _map_effects(n_factors=3, effects='A*B')
+    assert_true(names, ['A', 'B', 'A:B'])
+
+    selection, names = _map_effects(n_factors=3, effects='A*C')
+    assert_true(names, ['A', 'B', 'A:B', 'C', 'A:C', 'B:C', 'A:B:C'])
+
+    assert_raises(ValueError, _map_effects, n_factors=2, effects='C')
+
+    assert_raises(ValueError, _map_effects, n_factors=27, effects='all')
+
+
+def test_f_twoway_rm():
+    """ Test 2-way anova """
+    iter_params = product([4, 10], [2, 15], [4, 6, 8],
+                          ['A', 'B', 'A:B'],
+                          [False, True])
+    _effects = {
+        4: [2, 2],
+        6: [2, 3],
+        8: [2, 4]
+    }
+    for params in iter_params:
+        n_subj, n_obs, n_levels, effects, correction = params
+        data = np.random.random([n_subj, n_levels, n_obs])
+        fvals, pvals = f_mway_rm(data, _effects[n_levels], effects,
+                                 correction=correction)
+        assert_true((fvals >= 0).all())
+        if pvals.any():
+            assert_true(((0 <= pvals) & (1 >= pvals)).all())
+        n_effects = len(_map_effects(n_subj, effects)[0])
+        assert_true(fvals.size == n_obs * n_effects)
+        if n_effects == 1:  # test for principle of least surprise ...
+            assert_true(fvals.ndim == 1)
+
+        fvals_ = f_threshold_mway_rm(n_subj, _effects[n_levels], effects)
+        assert_true((fvals_ >= 0).all())
+        assert_true(fvals_.size == n_effects)
+
+    data = np.random.random([n_subj, n_levels, 1])
+    assert_raises(ValueError, f_mway_rm, data, _effects[n_levels],
+                  effects='C', correction=correction)
+    data = np.random.random([n_subj, n_levels, n_obs, 3])
+    # check for dimension handling
+    f_mway_rm(data, _effects[n_levels], effects, correction=correction)
+
+    # now check against external software results
+    test_data = generate_data(n_subjects=20, n_conditions=6)
+    fvals, pvals = f_mway_rm(test_data, [2, 3])
+
+    assert_array_almost_equal(fvals, test_external['spss_fvals'], 3)
+    assert_array_almost_equal(pvals, test_external['spss_pvals_uncorrected'],
+                              3)
+    assert_array_almost_equal(fvals, test_external['r_fvals'], 4)
+    assert_array_almost_equal(pvals, test_external['r_pvals_uncorrected'], 3)
+
+    _, pvals = f_mway_rm(test_data, [2, 3], correction=True)
+    assert_array_almost_equal(pvals, test_external['spss_pvals_corrected'], 3)
+
+    test_data = generate_data(n_subjects=20, n_conditions=8)
+    fvals, _ = f_mway_rm(test_data, [2, 2, 2])
+    assert_array_almost_equal(fvals, test_external['r_fvals_3way'], 5)
+
+    fvals, _ = f_mway_rm(test_data, [8], 'A')
+    assert_array_almost_equal(fvals, test_external['r_fvals_1way'], 5)
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/stats/tests/test_permutations.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/stats/tests/test_permutations.py
new file mode 100644
index 0000000..8ac0bac
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/stats/tests/test_permutations.py
@@ -0,0 +1,33 @@
+import numpy as np
+from numpy.testing import assert_array_equal, assert_almost_equal
+from scipy import stats
+
+from mne.stats.permutations import permutation_t_test
+
+
+def test_permutation_t_test():
+    """Test T-test based on permutations
+    """
+    # 1 sample t-test
+    np.random.seed(10)
+    n_samples, n_tests = 30, 5
+    X = np.random.randn(n_samples, n_tests)
+    X[:, :2] += 1
+
+    T_obs, p_values, H0 = permutation_t_test(X, n_permutations=999, tail=0)
+    is_significant = p_values < 0.05
+    assert_array_equal(is_significant, [True, True, False, False, False])
+
+    T_obs, p_values, H0 = permutation_t_test(X, n_permutations=999, tail=1)
+    is_significant = p_values < 0.05
+    assert_array_equal(is_significant, [True, True, False, False, False])
+
+    T_obs, p_values, H0 = permutation_t_test(X, n_permutations=999, tail=-1)
+    is_significant = p_values < 0.05
+    assert_array_equal(is_significant, [False, False, False, False, False])
+
+    X = np.random.randn(18, 1)
+    T_obs, p_values, H0 = permutation_t_test(X[:, [0]], n_permutations='all')
+    T_obs_scipy, p_values_scipy = stats.ttest_1samp(X[:, 0], 0)
+    assert_almost_equal(T_obs[0], T_obs_scipy, 8)
+    assert_almost_equal(p_values[0], p_values_scipy, 2)
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/stats/tests/test_regression.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/stats/tests/test_regression.py
new file mode 100644
index 0000000..0dccf0f
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/stats/tests/test_regression.py
@@ -0,0 +1,110 @@
+# Authors: Teon Brooks <teon.brooks at gmail.com>
+#          Denis A. Engemann <denis.engemann at gmail.com>
+#          Jona Sassenhagen <jona.sassenhagen at gmail.com>
+#
+# License: BSD (3-clause)
+
+import os.path as op
+import warnings
+
+import numpy as np
+from numpy.testing import assert_array_equal, assert_allclose
+
+from scipy.signal import hann
+
+from nose.tools import assert_raises, assert_true, assert_equal
+
+import mne
+from mne import read_source_estimate
+from mne.datasets import testing
+from mne.stats.regression import linear_regression, linear_regression_raw
+from mne.io import RawArray
+
+data_path = testing.data_path(download=False)
+stc_fname = op.join(data_path, 'MEG', 'sample',
+                    'sample_audvis_trunc-meg-lh.stc')
+raw_fname = data_path + '/MEG/sample/sample_audvis_trunc_raw.fif'
+event_fname = data_path + '/MEG/sample/sample_audvis_trunc_raw-eve.fif'
+
+
+ at testing.requires_testing_data
+def test_regression():
+    """Test Ordinary Least Squares Regression
+    """
+    tmin, tmax = -0.2, 0.5
+    event_id = dict(aud_l=1, aud_r=2)
+
+    # Setup for reading the raw data
+    raw = mne.io.Raw(raw_fname)
+    events = mne.read_events(event_fname)[:10]
+    epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
+                        baseline=(None, 0))
+    picks = np.arange(len(epochs.ch_names))
+    evoked = epochs.average(picks=picks)
+    design_matrix = epochs.events[:, 1:].astype(np.float64)
+    # makes the intercept
+    design_matrix[:, 0] = 1
+    # creates contrast: aud_l=0, aud_r=1
+    design_matrix[:, 1] -= 1
+    with warnings.catch_warnings(record=True) as w:
+        lm = linear_regression(epochs, design_matrix, ['intercept', 'aud'])
+        assert_true(w[0].category == UserWarning)
+        assert_true('non-data' in '%s' % w[0].message)
+
+    for predictor, parameters in lm.items():
+        for value in parameters:
+            assert_equal(value.data.shape, evoked.data.shape)
+
+    assert_raises(ValueError, linear_regression, [epochs, epochs],
+                  design_matrix)
+
+    stc = read_source_estimate(stc_fname).crop(0, 0.02)
+    stc_list = [stc, stc, stc]
+    stc_gen = (s for s in stc_list)
+    with warnings.catch_warnings(record=True):  # divide by zero
+        lm1 = linear_regression(stc_list, design_matrix[:len(stc_list)])
+    lm2 = linear_regression(stc_gen, design_matrix[:len(stc_list)])
+
+    for k in lm1:
+        for v1, v2 in zip(lm1[k], lm2[k]):
+            assert_array_equal(v1.data, v2.data)
+
+
+ at testing.requires_testing_data
+def test_continuous_regression_no_overlap():
+    """Test regression without overlap correction, on real data"""
+    tmin, tmax = -.1, .5
+
+    raw = mne.io.Raw(raw_fname, preload=True)
+    events = mne.read_events(event_fname)
+    event_id = dict(audio_l=1, audio_r=2)
+
+    raw = raw.pick_channels(raw.ch_names[:2])
+
+    epochs = mne.Epochs(raw, events, event_id, tmin, tmax,
+                        baseline=None, reject=None)
+
+    revokeds = linear_regression_raw(raw, events, event_id,
+                                     tmin=tmin, tmax=tmax,
+                                     reject=None)
+
+    for cond in event_id.keys():
+        assert_allclose(revokeds[cond].data,
+                        epochs[cond].average().data)
+
+
+def test_continuous_regression_with_overlap():
+    """Test regression with overlap correction"""
+    signal = np.zeros(100000)
+    times = [1000, 2500, 3000, 5000, 5250, 7000, 7250, 8000]
+    events = np.zeros((len(times), 3), int)
+    events[:, 2] = 1
+    events[:, 0] = times
+    signal[events[:, 0]] = 1.
+    effect = hann(101)
+    signal = np.convolve(signal, effect)[:len(signal)]
+    raw = RawArray(signal[np.newaxis, :], mne.create_info(1, 100, 'eeg'))
+
+    assert_allclose(effect,
+                    linear_regression_raw(raw, events, {1: 1}, tmin=0)[1]
+                    .data.flatten())
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/surface.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/surface.py
new file mode 100644
index 0000000..8013042
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/surface.py
@@ -0,0 +1,1113 @@
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
+#          Denis A. Engemann <denis.engemann at gmail.com>
+#
+# License: BSD (3-clause)
+
+import os
+from os import path as op
+import sys
+from struct import pack
+from glob import glob
+
+import numpy as np
+from scipy.sparse import coo_matrix, csr_matrix, eye as speye
+
+from .bem import read_bem_surfaces
+from .io.constants import FIFF
+from .io.open import fiff_open
+from .io.tree import dir_tree_find
+from .io.tag import find_tag
+from .io.write import (write_int, start_file, end_block,
+                       start_block, end_file, write_string,
+                       write_float_sparse_rcs)
+from .channels.channels import _get_meg_system
+from .transforms import transform_surface_to
+from .utils import logger, verbose, get_subjects_dir
+from .externals.six import string_types
+
+
+###############################################################################
+# AUTOMATED SURFACE FINDING
+
+ at verbose
+def get_head_surf(subject, source=('bem', 'head'), subjects_dir=None,
+                  verbose=None):
+    """Load the subject head surface
+
+    Parameters
+    ----------
+    subject : str
+        Subject name.
+    source : str | list of str
+        Type to load. Common choices would be `'bem'` or `'head'`. We first
+        try loading `'$SUBJECTS_DIR/$SUBJECT/bem/$SUBJECT-$SOURCE.fif'`, and
+        then look for `'$SUBJECT*$SOURCE.fif'` in the same directory by going
+        through all files matching the pattern. The head surface will be read
+        from the first file containing a head surface. Can also be a list
+        to try multiple strings.
+    subjects_dir : str, or None
+        Path to the SUBJECTS_DIR. If None, the path is obtained by using
+        the environment variable SUBJECTS_DIR.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    surf : dict
+        The head surface.
+    """
+    # Load the head surface from the BEM
+    subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
+    # use realpath to allow for linked surfaces (c.f. MNE manual 196-197)
+    if isinstance(source, string_types):
+        source = [source]
+    surf = None
+    for this_source in source:
+        this_head = op.realpath(op.join(subjects_dir, subject, 'bem',
+                                        '%s-%s.fif' % (subject, this_source)))
+        if op.exists(this_head):
+            surf = read_bem_surfaces(this_head, True,
+                                     FIFF.FIFFV_BEM_SURF_ID_HEAD,
+                                     verbose=False)
+        else:
+            # let's do a more sophisticated search
+            path = op.join(subjects_dir, subject, 'bem')
+            if not op.isdir(path):
+                raise IOError('Subject bem directory "%s" does not exist'
+                              % path)
+            files = sorted(glob(op.join(path, '%s*%s.fif'
+                                        % (subject, this_source))))
+            for this_head in files:
+                try:
+                    surf = read_bem_surfaces(this_head, True,
+                                             FIFF.FIFFV_BEM_SURF_ID_HEAD,
+                                             verbose=False)
+                except ValueError:
+                    pass
+                else:
+                    break
+        if surf is not None:
+            break
+
+    if surf is None:
+        raise IOError('No file matching "%s*%s" and containing a head '
+                      'surface found' % (subject, this_source))
+    logger.info('Using surface from %s' % this_head)
+    return surf
+
+
+ at verbose
+def get_meg_helmet_surf(info, trans=None, verbose=None):
+    """Load the MEG helmet associated with the MEG sensors
+
+    Parameters
+    ----------
+    info : instance of io.meas_info.Info
+        Measurement info.
+    trans : dict
+        The head<->MRI transformation, usually obtained using
+        read_trans(). Can be None, in which case the surface will
+        be in head coordinates instead of MRI coordinates.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    surf : dict
+        The MEG helmet as a surface.
+    """
+    system = _get_meg_system(info)
+    logger.info('Getting helmet for system %s' % system)
+    fname = op.join(op.split(__file__)[0], 'data', 'helmets',
+                    system + '.fif.gz')
+    surf = read_bem_surfaces(fname, False, FIFF.FIFFV_MNE_SURF_MEG_HELMET,
+                             verbose=False)
+
+    # Ignore what the file says, it's in device coords and we want MRI coords
+    surf['coord_frame'] = FIFF.FIFFV_COORD_DEVICE
+    transform_surface_to(surf, 'head', info['dev_head_t'])
+    if trans is not None:
+        transform_surface_to(surf, 'mri', trans)
+    return surf
+
+
+###############################################################################
+# EFFICIENCY UTILITIES
+
+def fast_cross_3d(x, y):
+    """Compute cross product between list of 3D vectors
+
+    Much faster than np.cross() when the number of cross products
+    becomes large (>500). This is because np.cross() methods become
+    less memory efficient at this stage.
+
+    Parameters
+    ----------
+    x : array
+        Input array 1.
+    y : array
+        Input array 2.
+
+    Returns
+    -------
+    z : array
+        Cross product of x and y.
+
+    Notes
+    -----
+    x and y must both be 2D row vectors. One must have length 1, or both
+    lengths must match.
+    """
+    assert x.ndim == 2
+    assert y.ndim == 2
+    assert x.shape[1] == 3
+    assert y.shape[1] == 3
+    assert (x.shape[0] == 1 or y.shape[0] == 1) or x.shape[0] == y.shape[0]
+    if max([x.shape[0], y.shape[0]]) >= 500:
+        return np.c_[x[:, 1] * y[:, 2] - x[:, 2] * y[:, 1],
+                     x[:, 2] * y[:, 0] - x[:, 0] * y[:, 2],
+                     x[:, 0] * y[:, 1] - x[:, 1] * y[:, 0]]
+    else:
+        return np.cross(x, y)
+
+
+def _fast_cross_nd_sum(a, b, c):
+    """Fast cross and sum"""
+    return ((a[..., 1] * b[..., 2] - a[..., 2] * b[..., 1]) * c[..., 0] +
+            (a[..., 2] * b[..., 0] - a[..., 0] * b[..., 2]) * c[..., 1] +
+            (a[..., 0] * b[..., 1] - a[..., 1] * b[..., 0]) * c[..., 2])
+
+
+def _accumulate_normals(tris, tri_nn, npts):
+    """Efficiently accumulate triangle normals"""
+    # this code replaces the following, but is faster (vectorized):
+    #
+    # this['nn'] = np.zeros((this['np'], 3))
+    # for p in xrange(this['ntri']):
+    #     verts = this['tris'][p]
+    #     this['nn'][verts, :] += this['tri_nn'][p, :]
+    #
+    nn = np.zeros((npts, 3))
+    for verts in tris.T:  # note this only loops 3x (number of verts per tri)
+        for idx in range(3):  # x, y, z
+            nn[:, idx] += np.bincount(verts, weights=tri_nn[:, idx],
+                                      minlength=npts)
+    return nn
+
+
+def _triangle_neighbors(tris, npts):
+    """Efficiently compute vertex neighboring triangles"""
+    # this code replaces the following, but is faster (vectorized):
+    #
+    # this['neighbor_tri'] = [list() for _ in xrange(this['np'])]
+    # for p in xrange(this['ntri']):
+    #     verts = this['tris'][p]
+    #     this['neighbor_tri'][verts[0]].append(p)
+    #     this['neighbor_tri'][verts[1]].append(p)
+    #     this['neighbor_tri'][verts[2]].append(p)
+    # this['neighbor_tri'] = [np.array(nb, int) for nb in this['neighbor_tri']]
+    #
+    verts = tris.ravel()
+    counts = np.bincount(verts, minlength=npts)
+    reord = np.argsort(verts)
+    tri_idx = np.unravel_index(reord, (len(tris), 3))[0]
+    idx = np.cumsum(np.r_[0, counts])
+    # the sort below slows it down a bit, but is needed for equivalence
+    neighbor_tri = [np.sort(tri_idx[v1:v2])
+                    for v1, v2 in zip(idx[:-1], idx[1:])]
+    return neighbor_tri
+
+
+def _triangle_coords(r, geom, best):
+    """Get coordinates of a vertex projected to a triangle"""
+    r1 = geom['r1'][best]
+    tri_nn = geom['nn'][best]
+    r12 = geom['r12'][best]
+    r13 = geom['r13'][best]
+    a = geom['a'][best]
+    b = geom['b'][best]
+    c = geom['c'][best]
+    rr = r - r1
+    z = np.sum(rr * tri_nn)
+    v1 = np.sum(rr * r12)
+    v2 = np.sum(rr * r13)
+    det = a * b - c * c
+    x = (b * v1 - c * v2) / det
+    y = (a * v2 - c * v1) / det
+    return x, y, z
+
+
+ at verbose
+def _complete_surface_info(this, do_neighbor_vert=False, verbose=None):
+    """Complete surface info"""
+    # based on mne_source_space_add_geometry_info() in mne_add_geometry_info.c
+
+    #   Main triangulation [mne_add_triangle_data()]
+    this['tri_area'] = np.zeros(this['ntri'])
+    r1 = this['rr'][this['tris'][:, 0], :]
+    r2 = this['rr'][this['tris'][:, 1], :]
+    r3 = this['rr'][this['tris'][:, 2], :]
+    this['tri_cent'] = (r1 + r2 + r3) / 3.0
+    this['tri_nn'] = fast_cross_3d((r2 - r1), (r3 - r1))
+
+    #   Triangle normals and areas
+    size = np.sqrt(np.sum(this['tri_nn'] ** 2, axis=1))
+    this['tri_area'] = size / 2.0
+    zidx = np.where(size == 0)[0]
+    for idx in zidx:
+        logger.info('    Warning: zero size triangle # %s' % idx)
+    size[zidx] = 1.0  # prevent ugly divide-by-zero
+    this['tri_nn'] /= size[:, None]
+
+    #    Find neighboring triangles, accumulate vertex normals, normalize
+    logger.info('    Triangle neighbors and vertex normals...')
+    this['neighbor_tri'] = _triangle_neighbors(this['tris'], this['np'])
+    this['nn'] = _accumulate_normals(this['tris'], this['tri_nn'], this['np'])
+    _normalize_vectors(this['nn'])
+
+    #   Check for topological defects
+    idx = np.where([len(n) == 0 for n in this['neighbor_tri']])[0]
+    if len(idx) > 0:
+        logger.info('    Vertices [%s] do not have any neighboring'
+                    'triangles!' % ','.join([str(ii) for ii in idx]))
+    idx = np.where([len(n) < 3 for n in this['neighbor_tri']])[0]
+    if len(idx) > 0:
+        logger.info('    Vertices [%s] have fewer than three neighboring '
+                    'tris, omitted' % ','.join([str(ii) for ii in idx]))
+    for k in idx:
+        this['neighbor_tri'] = np.array([], int)
+
+    #   Determine the neighboring vertices and fix errors
+    if do_neighbor_vert is True:
+        logger.info('    Vertex neighbors...')
+        this['neighbor_vert'] = [_get_surf_neighbors(this, k)
+                                 for k in range(this['np'])]
+
+    return this
+
+
+def _get_surf_neighbors(surf, k):
+    """Calculate the surface neighbors based on triangulation"""
+    verts = surf['tris'][surf['neighbor_tri'][k]]
+    verts = np.setdiff1d(verts, [k], assume_unique=False)
+    assert np.all(verts < surf['np'])
+    nneighbors = len(verts)
+    nneigh_max = len(surf['neighbor_tri'][k])
+    if nneighbors > nneigh_max:
+        raise RuntimeError('Too many neighbors for vertex %d' % k)
+    elif nneighbors != nneigh_max:
+        logger.info('    Incorrect number of distinct neighbors for vertex'
+                    ' %d (%d instead of %d) [fixed].' % (k, nneighbors,
+                                                         nneigh_max))
+    return verts
+
+
+def _normalize_vectors(rr):
+    """Normalize surface vertices"""
+    size = np.sqrt(np.sum(rr * rr, axis=1))
+    size[size == 0] = 1.0  # avoid divide-by-zero
+    rr /= size[:, np.newaxis]  # operate in-place
+
+
+def _compute_nearest(xhs, rr, use_balltree=True, return_dists=False):
+    """Find nearest neighbors
+
+    Note: The rows in xhs and rr must all be unit-length vectors, otherwise
+    the result will be incorrect.
+
+    Parameters
+    ----------
+    xhs : array, shape=(n_samples, n_dim)
+        Points of data set.
+    rr : array, shape=(n_query, n_dim)
+        Points to find nearest neighbors for.
+    use_balltree : bool
+        Use fast BallTree based search from scikit-learn. If scikit-learn
+        is not installed it will fall back to the slow brute force search.
+    return_dists : bool
+        If True, return associated distances.
+
+    Returns
+    -------
+    nearest : array, shape=(n_query,)
+        Index of nearest neighbor in xhs for every point in rr.
+    distances : array, shape=(n_query,)
+        The distances. Only returned if return_dists is True.
+    """
+    if use_balltree:
+        try:
+            from sklearn.neighbors import BallTree
+        except ImportError:
+            logger.info('Nearest-neighbor searches will be significantly '
+                        'faster if scikit-learn is installed.')
+            use_balltree = False
+
+    if xhs.size == 0 or rr.size == 0:
+        if return_dists:
+            return np.array([], int), np.array([])
+        return np.array([], int)
+    if use_balltree is True:
+        ball_tree = BallTree(xhs)
+        if return_dists:
+            out = ball_tree.query(rr, k=1, return_distance=True)
+            return out[1][:, 0], out[0][:, 0]
+        else:
+            nearest = ball_tree.query(rr, k=1, return_distance=False)[:, 0]
+            return nearest
+    else:
+        from scipy.spatial.distance import cdist
+        if return_dists:
+            nearest = list()
+            dists = list()
+            for r in rr:
+                d = cdist(r[np.newaxis, :], xhs)
+                idx = np.argmin(d)
+                nearest.append(idx)
+                dists.append(d[0, idx])
+            return (np.array(nearest), np.array(dists))
+        else:
+            nearest = np.array([np.argmin(cdist(r[np.newaxis, :], xhs))
+                                for r in rr])
+            return nearest
+
+
+###############################################################################
+# Handle freesurfer
+
+def _fread3(fobj):
+    """Docstring"""
+    b1, b2, b3 = np.fromfile(fobj, ">u1", 3)
+    return (b1 << 16) + (b2 << 8) + b3
+
+
+def _fread3_many(fobj, n):
+    """Read 3-byte ints from an open binary file object."""
+    b1, b2, b3 = np.fromfile(fobj, ">u1",
+                             3 * n).reshape(-1, 3).astype(np.int).T
+    return (b1 << 16) + (b2 << 8) + b3
+
+
+def read_curvature(filepath):
+    """Load in curavature values from the ?h.curv file."""
+    with open(filepath, "rb") as fobj:
+        magic = _fread3(fobj)
+        if magic == 16777215:
+            vnum = np.fromfile(fobj, ">i4", 3)[0]
+            curv = np.fromfile(fobj, ">f4", vnum)
+        else:
+            vnum = magic
+            _fread3(fobj)
+            curv = np.fromfile(fobj, ">i2", vnum) / 100
+        bin_curv = 1 - np.array(curv != 0, np.int)
+    return bin_curv
+
+
+ at verbose
+def read_surface(fname, verbose=None):
+    """Load a Freesurfer surface mesh in triangular format
+
+    Parameters
+    ----------
+    fname : str
+        The name of the file containing the surface.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    rr : array, shape=(n_vertices, 3)
+        Coordinate points.
+    tris : int array, shape=(n_faces, 3)
+        Triangulation (each line contains indexes for three points which
+        together form a face).
+
+    See Also
+    --------
+    write_surface
+    """
+    TRIANGLE_MAGIC = 16777214
+    QUAD_MAGIC = 16777215
+    NEW_QUAD_MAGIC = 16777213
+    with open(fname, "rb", buffering=0) as fobj:  # buffering=0 for np bug
+        magic = _fread3(fobj)
+        # Quad file or new quad
+        if magic in (QUAD_MAGIC, NEW_QUAD_MAGIC):
+            create_stamp = ''
+            nvert = _fread3(fobj)
+            nquad = _fread3(fobj)
+            (fmt, div) = (">i2", 100.) if magic == QUAD_MAGIC else (">f4", 1.)
+            coords = np.fromfile(fobj, fmt, nvert * 3).astype(np.float) / div
+            coords = coords.reshape(-1, 3)
+            quads = _fread3_many(fobj, nquad * 4)
+            quads = quads.reshape(nquad, 4)
+
+            # Face splitting follows
+            faces = np.zeros((2 * nquad, 3), dtype=np.int)
+            nface = 0
+            for quad in quads:
+                if (quad[0] % 2) == 0:
+                    faces[nface:nface + 2] = [[quad[0], quad[1], quad[3]],
+                                              [quad[2], quad[3], quad[1]]]
+                else:
+                    faces[nface:nface + 2] = [[quad[0], quad[1], quad[2]],
+                                              [quad[0], quad[2], quad[3]]]
+                nface += 2
+        elif magic == TRIANGLE_MAGIC:  # Triangle file
+            create_stamp = fobj.readline()
+            fobj.readline()
+            vnum = np.fromfile(fobj, ">i4", 1)[0]
+            fnum = np.fromfile(fobj, ">i4", 1)[0]
+            coords = np.fromfile(fobj, ">f4", vnum * 3).reshape(vnum, 3)
+            faces = np.fromfile(fobj, ">i4", fnum * 3).reshape(fnum, 3)
+        else:
+            raise ValueError("%s does not appear to be a Freesurfer surface"
+                             % fname)
+        logger.info('Triangle file: %s nvert = %s ntri = %s'
+                    % (create_stamp.strip(), len(coords), len(faces)))
+
+    coords = coords.astype(np.float)  # XXX: due to mayavi bug on mac 32bits
+    return coords, faces
+
+
+ at verbose
+def _read_surface_geom(fname, patch_stats=True, norm_rr=False, verbose=None):
+    """Load the surface as dict, optionally add the geometry information"""
+    # based on mne_load_surface_geom() in mne_surface_io.c
+    if isinstance(fname, string_types):
+        rr, tris = read_surface(fname)  # mne_read_triangle_file()
+        nvert = len(rr)
+        ntri = len(tris)
+        s = dict(rr=rr, tris=tris, use_tris=tris, ntri=ntri,
+                 np=nvert)
+    elif isinstance(fname, dict):
+        s = fname
+    else:
+        raise RuntimeError('fname cannot be understood as str or dict')
+    if patch_stats is True:
+        s = _complete_surface_info(s)
+    if norm_rr is True:
+        _normalize_vectors(s['rr'])
+    return s
+
+
+##############################################################################
+# SURFACE CREATION
+
+def _get_ico_surface(grade, patch_stats=False):
+    """Return an icosahedral surface of the desired grade"""
+    # always use verbose=False since users don't need to know we're pulling
+    # these from a file
+    ico_file_name = op.join(op.dirname(__file__), 'data',
+                            'icos.fif.gz')
+    ico = read_bem_surfaces(ico_file_name, patch_stats, s_id=9000 + grade,
+                            verbose=False)
+    return ico
+
+
+def _tessellate_sphere_surf(level, rad=1.0):
+    """Return a surface structure instead of the details"""
+    rr, tris = _tessellate_sphere(level)
+    npt = len(rr)  # called "npt" instead of "np" because of numpy...
+    ntri = len(tris)
+    nn = rr.copy()
+    rr *= rad
+    s = dict(rr=rr, np=npt, tris=tris, use_tris=tris, ntri=ntri, nuse=np,
+             nn=nn, inuse=np.ones(npt, int))
+    return s
+
+
+def _norm_midpt(ai, bi, rr):
+    a = np.array([rr[aii] for aii in ai])
+    b = np.array([rr[bii] for bii in bi])
+    c = (a + b) / 2.
+    return c / np.sqrt(np.sum(c ** 2, 1))[:, np.newaxis]
+
+
+def _tessellate_sphere(mylevel):
+    """Create a tessellation of a unit sphere"""
+    # Vertices of a unit octahedron
+    rr = np.array([[1, 0, 0], [-1, 0, 0],  # xplus, xminus
+                   [0, 1, 0], [0, -1, 0],  # yplus, yminus
+                   [0, 0, 1], [0, 0, -1]], float)  # zplus, zminus
+    tris = np.array([[0, 4, 2], [2, 4, 1], [1, 4, 3], [3, 4, 0],
+                     [0, 2, 5], [2, 1, 5], [1, 3, 5], [3, 0, 5]], int)
+
+    # A unit octahedron
+    if mylevel < 1:
+        raise ValueError('# of levels must be >= 1')
+
+    # Reverse order of points in each triangle
+    # for counter-clockwise ordering
+    tris = tris[:, [2, 1, 0]]
+
+    # Subdivide each starting triangle (mylevel - 1) times
+    for _ in range(1, mylevel):
+        """
+        Subdivide each triangle in the old approximation and normalize
+        the new points thus generated to lie on the surface of the unit
+        sphere.
+
+        Each input triangle with vertices labelled [0,1,2] as shown
+        below will be turned into four new triangles:
+
+                             Make new points
+                             a = (0+2)/2
+                             b = (0+1)/2
+                             c = (1+2)/2
+                 1
+                /\           Normalize a, b, c
+               /  \
+             b/____\c        Construct new triangles
+             /\    /\        [0,b,a]
+            /  \  /  \       [b,1,c]
+           /____\/____\      [a,b,c]
+          0     a      2     [a,c,2]
+
+        """
+        # use new method: first make new points (rr)
+        a = _norm_midpt(tris[:, 0], tris[:, 2], rr)
+        b = _norm_midpt(tris[:, 0], tris[:, 1], rr)
+        c = _norm_midpt(tris[:, 1], tris[:, 2], rr)
+        lims = np.cumsum([len(rr), len(a), len(b), len(c)])
+        aidx = np.arange(lims[0], lims[1])
+        bidx = np.arange(lims[1], lims[2])
+        cidx = np.arange(lims[2], lims[3])
+        rr = np.concatenate((rr, a, b, c))
+
+        # now that we have our points, make new triangle definitions
+        tris = np.array((np.c_[tris[:, 0], bidx, aidx],
+                         np.c_[bidx, tris[:, 1], cidx],
+                         np.c_[aidx, bidx, cidx],
+                         np.c_[aidx, cidx, tris[:, 2]]), int).swapaxes(0, 1)
+        tris = np.reshape(tris, (np.prod(tris.shape[:2]), 3))
+
+    # Copy the resulting approximation into standard table
+    rr_orig = rr
+    rr = np.empty_like(rr)
+    nnode = 0
+    for k, tri in enumerate(tris):
+        for j in range(3):
+            coord = rr_orig[tri[j]]
+            # this is faster than cdist (no need for sqrt)
+            similarity = np.dot(rr[:nnode], coord)
+            idx = np.where(similarity > 0.99999)[0]
+            if len(idx) > 0:
+                tris[k, j] = idx[0]
+            else:
+                rr[nnode] = coord
+                tris[k, j] = nnode
+                nnode += 1
+    rr = rr[:nnode].copy()
+    return rr, tris
+
+
+def _create_surf_spacing(surf, hemi, subject, stype, sval, ico_surf,
+                         subjects_dir):
+    """Load a surf and use the subdivided icosahedron to get points"""
+    # Based on load_source_space_surf_spacing() in load_source_space.c
+    surf = _read_surface_geom(surf)
+
+    if stype in ['ico', 'oct']:
+        # ## from mne_ico_downsample.c ## #
+        surf_name = op.join(subjects_dir, subject, 'surf', hemi + '.sphere')
+        logger.info('Loading geometry from %s...' % surf_name)
+        from_surf = _read_surface_geom(surf_name, norm_rr=True,
+                                       patch_stats=False)
+        if not len(from_surf['rr']) == surf['np']:
+            raise RuntimeError('Mismatch between number of surface vertices, '
+                               'possible parcellation error?')
+        _normalize_vectors(ico_surf['rr'])
+
+        # Make the maps
+        logger.info('Mapping %s %s -> %s (%d) ...'
+                    % (hemi, subject, stype, sval))
+        mmap = _compute_nearest(from_surf['rr'], ico_surf['rr'])
+        nmap = len(mmap)
+        surf['inuse'] = np.zeros(surf['np'], int)
+        for k in range(nmap):
+            if surf['inuse'][mmap[k]]:
+                # Try the nearest neighbors
+                neigh = _get_surf_neighbors(surf, mmap[k])
+                was = mmap[k]
+                inds = np.where(np.logical_not(surf['inuse'][neigh]))[0]
+                if len(inds) == 0:
+                    raise RuntimeError('Could not find neighbor for vertex '
+                                       '%d / %d' % (k, nmap))
+                else:
+                    mmap[k] = neigh[inds[-1]]
+                logger.info('    Source space vertex moved from %d to %d '
+                            'because of double occupation', was, mmap[k])
+            elif mmap[k] < 0 or mmap[k] > surf['np']:
+                raise RuntimeError('Map number out of range (%d), this is '
+                                   'probably due to inconsistent surfaces. '
+                                   'Parts of the FreeSurfer reconstruction '
+                                   'need to be redone.' % mmap[k])
+            surf['inuse'][mmap[k]] = True
+
+        logger.info('Setting up the triangulation for the decimated '
+                    'surface...')
+        surf['use_tris'] = np.array([mmap[ist] for ist in ico_surf['tris']],
+                                    np.int32)
+    else:  # use_all is True
+        surf['inuse'] = np.ones(surf['np'], int)
+        surf['use_tris'] = None
+    if surf['use_tris'] is not None:
+        surf['nuse_tri'] = len(surf['use_tris'])
+    else:
+        surf['nuse_tri'] = 0
+    surf['nuse'] = np.sum(surf['inuse'])
+    surf['vertno'] = np.where(surf['inuse'])[0]
+
+    # set some final params
+    inds = np.arange(surf['np'])
+    sizes = np.sqrt(np.sum(surf['nn'] ** 2, axis=1))
+    surf['nn'][inds] = surf['nn'][inds] / sizes[:, np.newaxis]
+    surf['inuse'][sizes <= 0] = False
+    surf['nuse'] = np.sum(surf['inuse'])
+    surf['subject_his_id'] = subject
+    return surf
+
+
+def write_surface(fname, coords, faces, create_stamp=''):
+    """Write a triangular Freesurfer surface mesh
+
+    Accepts the same data format as is returned by read_surface().
+
+    Parameters
+    ----------
+    fname : str
+        File to write.
+    coords : array, shape=(n_vertices, 3)
+        Coordinate points.
+    faces : int array, shape=(n_faces, 3)
+        Triangulation (each line contains indexes for three points which
+        together form a face).
+    create_stamp : str
+        Comment that is written to the beginning of the file. Can not contain
+        line breaks.
+
+    See Also
+    --------
+    read_surface
+    """
+    if len(create_stamp.splitlines()) > 1:
+        raise ValueError("create_stamp can only contain one line")
+
+    with open(fname, 'wb') as fid:
+        fid.write(pack('>3B', 255, 255, 254))
+        strs = ['%s\n' % create_stamp, '\n']
+        strs = [s.encode('utf-8') for s in strs]
+        fid.writelines(strs)
+        vnum = len(coords)
+        fnum = len(faces)
+        fid.write(pack('>2i', vnum, fnum))
+        fid.write(np.array(coords, dtype='>f4').tostring())
+        fid.write(np.array(faces, dtype='>i4').tostring())
+
+
+###############################################################################
+# Decimation
+
+def _decimate_surface(points, triangles, reduction):
+    """Aux function"""
+    if 'DISPLAY' not in os.environ and sys.platform != 'win32':
+        os.environ['ETS_TOOLKIT'] = 'null'
+    try:
+        from tvtk.api import tvtk
+    except ImportError:
+        raise ValueError('This function requires the TVTK package to be '
+                         'installed')
+    if triangles.max() > len(points) - 1:
+        raise ValueError('The triangles refer to undefined points. '
+                         'Please check your mesh.')
+    src = tvtk.PolyData(points=points, polys=triangles)
+    decimate = tvtk.QuadricDecimation(input=src, target_reduction=reduction)
+    decimate.update()
+    out = decimate.output
+    tris = out.polys.to_array()
+    # n-tuples + interleaved n-next -- reshape trick
+    return out.points.to_array(), tris.reshape(tris.size / 4, 4)[:, 1:]
+
+
+def decimate_surface(points, triangles, n_triangles):
+    """ Decimate surface data
+
+    Note. Requires TVTK to be installed for this to function.
+
+    Note. If an if an odd target number was requested,
+    the ``quadric decimation`` algorithm used results in the
+    next even number of triangles. For example a reduction request to 30001
+    triangles will result in 30000 triangles.
+
+    Parameters
+    ----------
+    points : ndarray
+        The surface to be decimated, a 3 x number of points array.
+    triangles : ndarray
+        The surface to be decimated, a 3 x number of triangles array.
+    n_triangles : int
+        The desired number of triangles.
+
+    Returns
+    -------
+    points : ndarray
+        The decimated points.
+    triangles : ndarray
+        The decimated triangles.
+    """
+
+    reduction = 1 - (float(n_triangles) / len(triangles))
+    return _decimate_surface(points, triangles, reduction)
+
+
+###############################################################################
+# Morph maps
+
+ at verbose
+def read_morph_map(subject_from, subject_to, subjects_dir=None,
+                   verbose=None):
+    """Read morph map
+
+    Morph maps can be generated with mne_make_morph_maps. If one isn't
+    available, it will be generated automatically and saved to the
+    ``subjects_dir/morph_maps`` directory.
+
+    Parameters
+    ----------
+    subject_from : string
+        Name of the original subject as named in the SUBJECTS_DIR.
+    subject_to : string
+        Name of the subject on which to morph as named in the SUBJECTS_DIR.
+    subjects_dir : string
+        Path to SUBJECTS_DIR is not set in the environment.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    left_map, right_map : sparse matrix
+        The morph maps for the 2 hemispheres.
+    """
+    subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
+
+    # First check for morph-map dir existence
+    mmap_dir = op.join(subjects_dir, 'morph-maps')
+    if not op.isdir(mmap_dir):
+        try:
+            os.mkdir(mmap_dir)
+        except Exception:
+            logger.warning('Could not find or make morph map directory "%s"'
+                           % mmap_dir)
+
+    # Does the file exist
+    fname = op.join(mmap_dir, '%s-%s-morph.fif' % (subject_from, subject_to))
+    if not op.exists(fname):
+        fname = op.join(mmap_dir, '%s-%s-morph.fif'
+                        % (subject_to, subject_from))
+        if not op.exists(fname):
+            logger.warning('Morph map "%s" does not exist, '
+                           'creating it and saving it to disk (this may take '
+                           'a few minutes)' % fname)
+            logger.info('Creating morph map %s -> %s'
+                        % (subject_from, subject_to))
+            mmap_1 = _make_morph_map(subject_from, subject_to, subjects_dir)
+            logger.info('Creating morph map %s -> %s'
+                        % (subject_to, subject_from))
+            mmap_2 = _make_morph_map(subject_to, subject_from, subjects_dir)
+            try:
+                _write_morph_map(fname, subject_from, subject_to,
+                                 mmap_1, mmap_2)
+            except Exception as exp:
+                logger.warning('Could not write morph-map file "%s" '
+                               '(error: %s)' % (fname, exp))
+            return mmap_1
+
+    f, tree, _ = fiff_open(fname)
+    with f as fid:
+        # Locate all maps
+        maps = dir_tree_find(tree, FIFF.FIFFB_MNE_MORPH_MAP)
+        if len(maps) == 0:
+            raise ValueError('Morphing map data not found')
+
+        # Find the correct ones
+        left_map = None
+        right_map = None
+        for m in maps:
+            tag = find_tag(fid, m, FIFF.FIFF_MNE_MORPH_MAP_FROM)
+            if tag.data == subject_from:
+                tag = find_tag(fid, m, FIFF.FIFF_MNE_MORPH_MAP_TO)
+                if tag.data == subject_to:
+                    #  Names match: which hemishere is this?
+                    tag = find_tag(fid, m, FIFF.FIFF_MNE_HEMI)
+                    if tag.data == FIFF.FIFFV_MNE_SURF_LEFT_HEMI:
+                        tag = find_tag(fid, m, FIFF.FIFF_MNE_MORPH_MAP)
+                        left_map = tag.data
+                        logger.info('    Left-hemisphere map read.')
+                    elif tag.data == FIFF.FIFFV_MNE_SURF_RIGHT_HEMI:
+                        tag = find_tag(fid, m, FIFF.FIFF_MNE_MORPH_MAP)
+                        right_map = tag.data
+                        logger.info('    Right-hemisphere map read.')
+
+    if left_map is None or right_map is None:
+        raise ValueError('Could not find both hemispheres in %s' % fname)
+
+    return left_map, right_map
+
+
+def _write_morph_map(fname, subject_from, subject_to, mmap_1, mmap_2):
+    """Write a morph map to disk"""
+    fid = start_file(fname)
+    assert len(mmap_1) == 2
+    assert len(mmap_2) == 2
+    hemis = [FIFF.FIFFV_MNE_SURF_LEFT_HEMI, FIFF.FIFFV_MNE_SURF_RIGHT_HEMI]
+    for m, hemi in zip(mmap_1, hemis):
+        start_block(fid, FIFF.FIFFB_MNE_MORPH_MAP)
+        write_string(fid, FIFF.FIFF_MNE_MORPH_MAP_FROM, subject_from)
+        write_string(fid, FIFF.FIFF_MNE_MORPH_MAP_TO, subject_to)
+        write_int(fid, FIFF.FIFF_MNE_HEMI, hemi)
+        write_float_sparse_rcs(fid, FIFF.FIFF_MNE_MORPH_MAP, m)
+        end_block(fid, FIFF.FIFFB_MNE_MORPH_MAP)
+    for m, hemi in zip(mmap_2, hemis):
+        start_block(fid, FIFF.FIFFB_MNE_MORPH_MAP)
+        write_string(fid, FIFF.FIFF_MNE_MORPH_MAP_FROM, subject_to)
+        write_string(fid, FIFF.FIFF_MNE_MORPH_MAP_TO, subject_from)
+        write_int(fid, FIFF.FIFF_MNE_HEMI, hemi)
+        write_float_sparse_rcs(fid, FIFF.FIFF_MNE_MORPH_MAP, m)
+        end_block(fid, FIFF.FIFFB_MNE_MORPH_MAP)
+    end_file(fid)
+
+
+def _get_tri_dist(p, q, p0, q0, a, b, c, dist):
+    """Auxiliary function for getting the distance to a triangle edge"""
+    return np.sqrt((p - p0) * (p - p0) * a +
+                   (q - q0) * (q - q0) * b +
+                   (p - p0) * (q - q0) * c +
+                   dist * dist)
+
+
+def _get_tri_supp_geom(tris, rr):
+    """Create supplementary geometry information using tris and rrs"""
+    r1 = rr[tris[:, 0], :]
+    r12 = rr[tris[:, 1], :] - r1
+    r13 = rr[tris[:, 2], :] - r1
+    r1213 = np.array([r12, r13]).swapaxes(0, 1)
+    a = np.sum(r12 * r12, axis=1)
+    b = np.sum(r13 * r13, axis=1)
+    c = np.sum(r12 * r13, axis=1)
+    mat = np.rollaxis(np.array([[b, -c], [-c, a]]), 2)
+    mat /= (a * b - c * c)[:, np.newaxis, np.newaxis]
+    nn = fast_cross_3d(r12, r13)
+    _normalize_vectors(nn)
+    return dict(r1=r1, r12=r12, r13=r13, r1213=r1213,
+                a=a, b=b, c=c, mat=mat, nn=nn)
+
+
+ at verbose
+def _make_morph_map(subject_from, subject_to, subjects_dir=None):
+    """Construct morph map from one subject to another
+
+    Note that this is close, but not exactly like the C version.
+    For example, parts are more accurate due to double precision,
+    so expect some small morph-map differences!
+
+    Note: This seems easily parallelizable, but the overhead
+    of pickling all the data structures makes it less efficient
+    than just running on a single core :(
+    """
+    subjects_dir = get_subjects_dir(subjects_dir)
+    morph_maps = list()
+
+    # add speedy short-circuit for self-maps
+    if subject_from == subject_to:
+        for hemi in ['lh', 'rh']:
+            fname = op.join(subjects_dir, subject_from, 'surf',
+                            '%s.sphere.reg' % hemi)
+            from_pts = read_surface(fname, verbose=False)[0]
+            n_pts = len(from_pts)
+            morph_maps.append(speye(n_pts, n_pts, format='csr'))
+        return morph_maps
+
+    for hemi in ['lh', 'rh']:
+        # load surfaces and normalize points to be on unit sphere
+        fname = op.join(subjects_dir, subject_from, 'surf',
+                        '%s.sphere.reg' % hemi)
+        from_pts, from_tris = read_surface(fname, verbose=False)
+        n_from_pts = len(from_pts)
+        _normalize_vectors(from_pts)
+        tri_geom = _get_tri_supp_geom(from_tris, from_pts)
+
+        fname = op.join(subjects_dir, subject_to, 'surf',
+                        '%s.sphere.reg' % hemi)
+        to_pts = read_surface(fname, verbose=False)[0]
+        n_to_pts = len(to_pts)
+        _normalize_vectors(to_pts)
+
+        # from surface: get nearest neighbors, find triangles for each vertex
+        nn_pts_idx = _compute_nearest(from_pts, to_pts)
+        from_pt_tris = _triangle_neighbors(from_tris, len(from_pts))
+        from_pt_tris = [from_pt_tris[pt_idx] for pt_idx in nn_pts_idx]
+
+        # find triangle in which point lies and assoc. weights
+        nn_tri_inds = []
+        nn_tris_weights = []
+        for pt_tris, to_pt in zip(from_pt_tris, to_pts):
+            p, q, idx, dist = _find_nearest_tri_pt(pt_tris, to_pt, tri_geom)
+            nn_tri_inds.append(idx)
+            nn_tris_weights.extend([1. - (p + q), p, q])
+
+        nn_tris = from_tris[nn_tri_inds]
+        row_ind = np.repeat(np.arange(n_to_pts), 3)
+        this_map = csr_matrix((nn_tris_weights, (row_ind, nn_tris.ravel())),
+                              shape=(n_to_pts, n_from_pts))
+        morph_maps.append(this_map)
+
+    return morph_maps
+
+
+def _find_nearest_tri_pt(pt_tris, to_pt, tri_geom, run_all=False):
+    """Find nearest point mapping to a set of triangles
+
+    If run_all is False, if the point lies within a triangle, it stops.
+    If run_all is True, edges of other triangles are checked in case
+    those (somehow) are closer.
+    """
+    # The following dense code is equivalent to the following:
+    #   rr = r1[pt_tris] - to_pts[ii]
+    #   v1s = np.sum(rr * r12[pt_tris], axis=1)
+    #   v2s = np.sum(rr * r13[pt_tris], axis=1)
+    #   aas = a[pt_tris]
+    #   bbs = b[pt_tris]
+    #   ccs = c[pt_tris]
+    #   dets = aas * bbs - ccs * ccs
+    #   pp = (bbs * v1s - ccs * v2s) / dets
+    #   qq = (aas * v2s - ccs * v1s) / dets
+    #   pqs = np.array(pp, qq)
+
+    # This einsum is equivalent to doing:
+    # pqs = np.array([np.dot(x, y) for x, y in zip(r1213, r1-to_pt)])
+    r1 = tri_geom['r1'][pt_tris]
+    rrs = to_pt - r1
+    tri_nn = tri_geom['nn'][pt_tris]
+    vect = np.einsum('ijk,ik->ij', tri_geom['r1213'][pt_tris], rrs)
+    mats = tri_geom['mat'][pt_tris]
+    # This einsum is equivalent to doing:
+    # pqs = np.array([np.dot(m, v) for m, v in zip(mats, vect)]).T
+    pqs = np.einsum('ijk,ik->ji', mats, vect)
+    found = False
+    dists = np.sum(rrs * tri_nn, axis=1)
+
+    # There can be multiple (sadness), find closest
+    idx = np.where(np.all(pqs >= 0., axis=0))[0]
+    idx = idx[np.where(np.all(pqs[:, idx] <= 1., axis=0))[0]]
+    idx = idx[np.where(np.sum(pqs[:, idx], axis=0) < 1.)[0]]
+    dist = np.inf
+    if len(idx) > 0:
+        found = True
+        pt = idx[np.argmin(np.abs(dists[idx]))]
+        p, q = pqs[:, pt]
+        dist = dists[pt]
+        # re-reference back to original numbers
+        pt = pt_tris[pt]
+
+    if found is False or run_all is True:
+        # don't include ones that we might have found before
+        s = np.setdiff1d(np.arange(len(pt_tris)), idx)  # ones to check sides
+        # Tough: must investigate the sides
+        pp, qq, ptt, distt = _nearest_tri_edge(pt_tris[s], to_pt, pqs[:, s],
+                                               dists[s], tri_geom)
+        if np.abs(distt) < np.abs(dist):
+            p, q, pt, dist = pp, qq, ptt, distt
+    return p, q, pt, dist
+
+
+def _nearest_tri_edge(pt_tris, to_pt, pqs, dist, tri_geom):
+    """Get nearest location from a point to the edge of a set of triangles"""
+    # We might do something intelligent here. However, for now
+    # it is ok to do it in the hard way
+    aa = tri_geom['a'][pt_tris]
+    bb = tri_geom['b'][pt_tris]
+    cc = tri_geom['c'][pt_tris]
+    pp = pqs[0]
+    qq = pqs[1]
+    # Find the nearest point from a triangle:
+    #   Side 1 -> 2
+    p0 = np.minimum(np.maximum(pp + 0.5 * (qq * cc) / aa,
+                               0.0), 1.0)
+    q0 = np.zeros_like(p0)
+    #   Side 2 -> 3
+    t1 = (0.5 * ((2.0 * aa - cc) * (1.0 - pp) +
+                 (2.0 * bb - cc) * qq) / (aa + bb - cc))
+    t1 = np.minimum(np.maximum(t1, 0.0), 1.0)
+    p1 = 1.0 - t1
+    q1 = t1
+    #   Side 1 -> 3
+    q2 = np.minimum(np.maximum(qq + 0.5 * (pp * cc) / bb, 0.0), 1.0)
+    p2 = np.zeros_like(q2)
+
+    # figure out which one had the lowest distance
+    dist0 = _get_tri_dist(pp, qq, p0, q0, aa, bb, cc, dist)
+    dist1 = _get_tri_dist(pp, qq, p1, q1, aa, bb, cc, dist)
+    dist2 = _get_tri_dist(pp, qq, p2, q2, aa, bb, cc, dist)
+    pp = np.r_[p0, p1, p2]
+    qq = np.r_[q0, q1, q2]
+    dists = np.r_[dist0, dist1, dist2]
+    ii = np.argmin(np.abs(dists))
+    p, q, pt, dist = pp[ii], qq[ii], pt_tris[ii % len(pt_tris)], dists[ii]
+    return p, q, pt, dist
+
+
+def mesh_edges(tris):
+    """Returns sparse matrix with edges as an adjacency matrix
+
+    Parameters
+    ----------
+    tris : array of shape [n_triangles x 3]
+        The triangles.
+
+    Returns
+    -------
+    edges : sparse matrix
+        The adjacency matrix.
+    """
+    if np.max(tris) > len(np.unique(tris)):
+        raise ValueError('Cannot compute connectivity on a selection of '
+                         'triangles.')
+
+    npoints = np.max(tris) + 1
+    ones_ntris = np.ones(3 * len(tris))
+
+    a, b, c = tris.T
+    x = np.concatenate((a, b, c))
+    y = np.concatenate((b, c, a))
+    edges = coo_matrix((ones_ntris, (x, y)), shape=(npoints, npoints))
+    edges = edges.tocsr()
+    edges = edges + edges.T
+    return edges
+
+
+def mesh_dist(tris, vert):
+    """Compute adjacency matrix weighted by distances
+
+    It generates an adjacency matrix where the entries are the distances
+    between neighboring vertices.
+
+    Parameters
+    ----------
+    tris : array (n_tris x 3)
+        Mesh triangulation
+    vert : array (n_vert x 3)
+        Vertex locations
+
+    Returns
+    -------
+    dist_matrix : scipy.sparse.csr_matrix
+        Sparse matrix with distances between adjacent vertices
+    """
+    edges = mesh_edges(tris).tocoo()
+
+    # Euclidean distances between neighboring vertices
+    dist = np.sqrt(np.sum((vert[edges.row, :] - vert[edges.col, :]) ** 2,
+                          axis=1))
+    dist_matrix = csr_matrix((dist, (edges.row, edges.col)), shape=edges.shape)
+    return dist_matrix
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/tests/__init__.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/tests/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/tests/test_bem.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/tests/test_bem.py
new file mode 100644
index 0000000..dee1b83
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/tests/test_bem.py
@@ -0,0 +1,264 @@
+# Authors: Marijn van Vliet <w.m.vanvliet at gmail.com>
+#
+# License: BSD 3 clause
+
+import os.path as op
+import numpy as np
+from nose.tools import assert_raises, assert_true
+from numpy.testing import assert_equal, assert_allclose
+
+from mne import (make_bem_model, read_bem_surfaces, write_bem_surfaces,
+                 make_bem_solution, read_bem_solution, write_bem_solution,
+                 make_sphere_model, Transform)
+from mne.preprocessing.maxfilter import fit_sphere_to_headshape
+from mne.io.constants import FIFF
+from mne.transforms import translation
+from mne.datasets import testing
+from mne.utils import run_tests_if_main, _TempDir, slow_test
+from mne.bem import (_ico_downsample, _get_ico_map, _order_surfaces,
+                     _assert_complete_surface, _assert_inside,
+                     _check_surface_size, _bem_find_surface)
+from mne.io import read_info
+
+fname_raw = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data',
+                    'test_raw.fif')
+subjects_dir = op.join(testing.data_path(download=False), 'subjects')
+fname_bem_3 = op.join(subjects_dir, 'sample', 'bem',
+                      'sample-320-320-320-bem.fif')
+fname_bem_1 = op.join(subjects_dir, 'sample', 'bem',
+                      'sample-320-bem.fif')
+fname_bem_sol_3 = op.join(subjects_dir, 'sample', 'bem',
+                          'sample-320-320-320-bem-sol.fif')
+fname_bem_sol_1 = op.join(subjects_dir, 'sample', 'bem',
+                          'sample-320-bem-sol.fif')
+
+
+def _compare_bem_surfaces(surfs_1, surfs_2):
+    """Helper to compare BEM surfaces"""
+    names = ['id', 'nn', 'rr', 'coord_frame', 'tris', 'sigma', 'ntri', 'np']
+    ignores = ['tri_cent', 'tri_nn', 'tri_area', 'neighbor_tri']
+    for s0, s1 in zip(surfs_1, surfs_2):
+        assert_equal(set(names), set(s0.keys()) - set(ignores))
+        assert_equal(set(names), set(s1.keys()) - set(ignores))
+        for name in names:
+            assert_allclose(s0[name], s1[name], rtol=1e-3, atol=1e-6,
+                            err_msg='Mismatch: "%s"' % name)
+
+
+def _compare_bem_solutions(sol_a, sol_b):
+    """Helper to compare BEM solutions"""
+    # compare the surfaces we used
+    _compare_bem_surfaces(sol_a['surfs'], sol_b['surfs'])
+    # compare the actual solutions
+    names = ['bem_method', 'field_mult', 'gamma', 'is_sphere',
+             'nsol', 'sigma', 'source_mult', 'solution']
+    assert_equal(set(sol_a.keys()), set(sol_b.keys()))
+    assert_equal(set(names + ['surfs']), set(sol_b.keys()))
+    for key in names:
+        assert_allclose(sol_a[key], sol_b[key], rtol=1e-3, atol=1e-5,
+                        err_msg='Mismatch: %s' % key)
+
+
+ at testing.requires_testing_data
+def test_io_bem():
+    """Test reading and writing of bem surfaces and solutions
+    """
+    tempdir = _TempDir()
+    temp_bem = op.join(tempdir, 'temp-bem.fif')
+    assert_raises(ValueError, read_bem_surfaces, fname_raw)
+    assert_raises(ValueError, read_bem_surfaces, fname_bem_3, s_id=10)
+    surf = read_bem_surfaces(fname_bem_3, patch_stats=True)
+    surf = read_bem_surfaces(fname_bem_3, patch_stats=False)
+    write_bem_surfaces(temp_bem, surf[0])
+    surf_read = read_bem_surfaces(temp_bem, patch_stats=False)
+    _compare_bem_surfaces(surf, surf_read)
+
+    assert_raises(RuntimeError, read_bem_solution, fname_bem_3)
+    temp_sol = op.join(tempdir, 'temp-sol.fif')
+    sol = read_bem_solution(fname_bem_sol_3)
+    assert_true('BEM' in repr(sol))
+    write_bem_solution(temp_sol, sol)
+    sol_read = read_bem_solution(temp_sol)
+    _compare_bem_solutions(sol, sol_read)
+    sol = read_bem_solution(fname_bem_sol_1)
+    assert_raises(RuntimeError, _bem_find_surface, sol, 3)
+
+
+def test_make_sphere_model():
+    """Test making a sphere model"""
+    info = read_info(fname_raw)
+    assert_raises(ValueError, make_sphere_model, 'foo', 'auto', info)
+    assert_raises(ValueError, make_sphere_model, 'auto', 'auto', None)
+    # here we just make sure it works -- the functionality is actually
+    # tested more extensively e.g. in the forward and dipole code
+    bem = make_sphere_model('auto', 'auto', info)
+    assert_true('3 layers' in repr(bem))
+    assert_true('Sphere ' in repr(bem))
+    assert_true(' mm' in repr(bem))
+    bem = make_sphere_model('auto', None, info)
+    assert_true('no layers' in repr(bem))
+    assert_true('Sphere ' in repr(bem))
+
+
+ at testing.requires_testing_data
+def test_bem_model():
+    """Test BEM model creation from Python with I/O"""
+    tempdir = _TempDir()
+    fname_temp = op.join(tempdir, 'temp-bem.fif')
+    for kwargs, fname in zip((dict(), dict(conductivity=[0.3])),
+                             [fname_bem_3, fname_bem_1]):
+        model = make_bem_model('sample', ico=2, subjects_dir=subjects_dir,
+                               **kwargs)
+        model_c = read_bem_surfaces(fname)
+        _compare_bem_surfaces(model, model_c)
+        write_bem_surfaces(fname_temp, model)
+        model_read = read_bem_surfaces(fname_temp)
+        _compare_bem_surfaces(model, model_c)
+        _compare_bem_surfaces(model_read, model_c)
+    assert_raises(ValueError, make_bem_model, 'sample',  # bad conductivity
+                  conductivity=[0.3, 0.006], subjects_dir=subjects_dir)
+
+
+ at slow_test
+ at testing.requires_testing_data
+def test_bem_solution():
+    """Test making a BEM solution from Python with I/O"""
+    # test degenerate conditions
+    surf = read_bem_surfaces(fname_bem_1)[0]
+    assert_raises(RuntimeError, _ico_downsample, surf, 10)  # bad dec grade
+    s_bad = dict(tris=surf['tris'][1:], ntri=surf['ntri'] - 1, rr=surf['rr'])
+    assert_raises(RuntimeError, _ico_downsample, s_bad, 1)  # not isomorphic
+    s_bad = dict(tris=surf['tris'].copy(), ntri=surf['ntri'],
+                 rr=surf['rr'])  # bad triangulation
+    s_bad['tris'][0] = [0, 0, 0]
+    assert_raises(RuntimeError, _ico_downsample, s_bad, 1)
+    s_bad['id'] = 1
+    assert_raises(RuntimeError, _assert_complete_surface, s_bad)
+    s_bad = dict(tris=surf['tris'], ntri=surf['ntri'], rr=surf['rr'].copy())
+    s_bad['rr'][0] = 0.
+    assert_raises(RuntimeError, _get_ico_map, surf, s_bad)
+
+    surfs = read_bem_surfaces(fname_bem_3)
+    assert_raises(RuntimeError, _assert_inside, surfs[0], surfs[1])  # outside
+    surfs[0]['id'] = 100  # bad surfs
+    assert_raises(RuntimeError, _order_surfaces, surfs)
+    surfs[1]['rr'] /= 1000.
+    assert_raises(RuntimeError, _check_surface_size, surfs[1])
+
+    # actually test functionality
+    tempdir = _TempDir()
+    fname_temp = op.join(tempdir, 'temp-bem-sol.fif')
+    # use a model and solution made in Python
+    conductivities = [(0.3,), (0.3, 0.006, 0.3)]
+    fnames = [fname_bem_sol_1, fname_bem_sol_3]
+    for cond, fname in zip(conductivities, fnames):
+        for model_type in ('python', 'c'):
+            if model_type == 'python':
+                model = make_bem_model('sample', conductivity=cond, ico=2,
+                                       subjects_dir=subjects_dir)
+            else:
+                model = fname_bem_1 if len(cond) == 1 else fname_bem_3
+        solution = make_bem_solution(model)
+        solution_c = read_bem_solution(fname)
+        _compare_bem_solutions(solution, solution_c)
+        write_bem_solution(fname_temp, solution)
+        solution_read = read_bem_solution(fname_temp)
+        _compare_bem_solutions(solution, solution_c)
+        _compare_bem_solutions(solution_read, solution_c)
+
+
+def test_fit_sphere_to_headshape():
+    """Test fitting a sphere to digitization points"""
+    # Create points of various kinds
+    rad = 90.  # mm
+    center = np.array([0.5, -10., 40.])  # mm
+    dev_trans = np.array([0., -0.005, -10.])
+    dev_center = center - dev_trans
+    dig = [
+        # Left auricular
+        {'coord_frame': FIFF.FIFFV_COORD_HEAD,
+         'ident': FIFF.FIFFV_POINT_LPA,
+         'kind': FIFF.FIFFV_POINT_CARDINAL,
+         'r': np.array([-1.0, 0.0, 0.0])},
+        # Nasion
+        {'coord_frame': FIFF.FIFFV_COORD_HEAD,
+         'ident': FIFF.FIFFV_POINT_NASION,
+         'kind': FIFF.FIFFV_POINT_CARDINAL,
+         'r': np.array([0.0, 1.0, 0.0])},
+        # Right auricular
+        {'coord_frame': FIFF.FIFFV_COORD_HEAD,
+         'ident': FIFF.FIFFV_POINT_RPA,
+         'kind': FIFF.FIFFV_POINT_CARDINAL,
+         'r': np.array([1.0, 0.0, 0.0])},
+
+        # Top of the head (extra point)
+        {'coord_frame': FIFF.FIFFV_COORD_HEAD,
+         'kind': FIFF.FIFFV_POINT_EXTRA,
+         'r': np.array([0.0, 0.0, 1.0])},
+
+        # EEG points
+        # Fz
+        {'coord_frame': FIFF.FIFFV_COORD_HEAD,
+         'kind': FIFF.FIFFV_POINT_EEG,
+         'r': np.array([0, .72, .69])},
+        # F3
+        {'coord_frame': FIFF.FIFFV_COORD_HEAD,
+         'kind': FIFF.FIFFV_POINT_EEG,
+         'r': np.array([-.55, .67, .50])},
+        # F4
+        {'coord_frame': FIFF.FIFFV_COORD_HEAD,
+         'kind': FIFF.FIFFV_POINT_EEG,
+         'r': np.array([.55, .67, .50])},
+        # Cz
+        {'coord_frame': FIFF.FIFFV_COORD_HEAD,
+         'kind': FIFF.FIFFV_POINT_EEG,
+         'r': np.array([0.0, 0.0, 1.0])},
+        # Pz
+        {'coord_frame': FIFF.FIFFV_COORD_HEAD,
+         'kind': FIFF.FIFFV_POINT_EEG,
+         'r': np.array([0, -.72, .69])},
+    ]
+    for d in dig:
+        d['r'] *= rad / 1000.
+        d['r'] += center / 1000.
+
+    # Device to head transformation (rotate .2 rad over X-axis)
+    dev_head_t = Transform('meg', 'head', translation(*(dev_trans / 1000.)))
+
+    info = {'dig': dig, 'dev_head_t': dev_head_t}
+
+    # Degenerate conditions
+    assert_raises(ValueError, fit_sphere_to_headshape, info,
+                  dig_kinds=(FIFF.FIFFV_POINT_HPI,))
+    info['dig'][0]['coord_frame'] = FIFF.FIFFV_COORD_DEVICE
+    assert_raises(RuntimeError, fit_sphere_to_headshape, info)
+    info['dig'][0]['coord_frame'] = FIFF.FIFFV_COORD_HEAD
+
+    #  # Test with 4 points that match a perfect sphere
+    dig_kinds = (FIFF.FIFFV_POINT_CARDINAL, FIFF.FIFFV_POINT_EXTRA)
+    r, oh, od = fit_sphere_to_headshape(info, dig_kinds=dig_kinds)
+    kwargs = dict(rtol=1e-3, atol=1e-2)  # in mm
+    assert_allclose(r, rad, **kwargs)
+    assert_allclose(oh, center, **kwargs)
+    assert_allclose(od, dev_center, **kwargs)
+
+    # Test with all points
+    dig_kinds = (FIFF.FIFFV_POINT_CARDINAL, FIFF.FIFFV_POINT_EXTRA,
+                 FIFF.FIFFV_POINT_EXTRA)
+    r, oh, od = fit_sphere_to_headshape(info, dig_kinds=dig_kinds)
+    assert_allclose(r, rad, **kwargs)
+    assert_allclose(oh, center, **kwargs)
+    assert_allclose(od, dev_center, **kwargs)
+
+    # Test with some noisy EEG points only.
+    dig_kinds = (FIFF.FIFFV_POINT_EEG,)
+    r, oh, od = fit_sphere_to_headshape(info, dig_kinds=dig_kinds)
+    kwargs = dict(rtol=1e-3, atol=10.)  # in mm
+    assert_allclose(r, rad, **kwargs)
+    assert_allclose(oh, center, **kwargs)
+    assert_allclose(od, center, **kwargs)
+
+    dig = [dict(coord_frame=FIFF.FIFFV_COORD_DEVICE, )]
+
+
+run_tests_if_main()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/tests/test_chpi.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/tests/test_chpi.py
new file mode 100644
index 0000000..8d837bf
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/tests/test_chpi.py
@@ -0,0 +1,168 @@
+# Author: Eric Larson <larson.eric.d at gmail.com>
+#
+# License: BSD (3-clause)
+
+import os.path as op
+import numpy as np
+from numpy.testing import assert_allclose
+from nose.tools import assert_raises, assert_equal, assert_true
+import warnings
+
+from mne.io import read_info, Raw
+from mne.io.constants import FIFF
+from mne.chpi import (_rot_to_quat, _quat_to_rot, get_chpi_positions,
+                      _calculate_chpi_positions, _angle_between_quats)
+from mne.utils import (run_tests_if_main, _TempDir, slow_test, set_log_file,
+                       requires_version)
+from mne.datasets import testing
+
+base_dir = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data')
+test_fif_fname = op.join(base_dir, 'test_raw.fif')
+ctf_fname = op.join(base_dir, 'test_ctf_raw.fif')
+hp_fif_fname = op.join(base_dir, 'test_chpi_raw_sss.fif')
+hp_fname = op.join(base_dir, 'test_chpi_raw_hp.txt')
+
+data_path = testing.data_path(download=False)
+raw_fif_fname = op.join(data_path, 'SSS', 'test_move_anon_raw.fif')
+pos_fname = op.join(data_path, 'SSS', 'test_move_anon_raw.pos')
+sss_fif_fname = op.join(data_path, 'SSS', 'test_move_anon_raw_sss.fif')
+
+warnings.simplefilter('always')
+
+
+def test_quaternions():
+    """Test quaternion calculations
+    """
+    rots = [np.eye(3)]
+    for fname in [test_fif_fname, ctf_fname, hp_fif_fname]:
+        rots += [read_info(fname)['dev_head_t']['trans'][:3, :3]]
+    for rot in rots:
+        assert_allclose(rot, _quat_to_rot(_rot_to_quat(rot)),
+                        rtol=1e-5, atol=1e-5)
+        rot = rot[np.newaxis, np.newaxis, :, :]
+        assert_allclose(rot, _quat_to_rot(_rot_to_quat(rot)),
+                        rtol=1e-5, atol=1e-5)
+
+    # let's make sure our angle function works in some reasonable way
+    for ii in range(3):
+        for jj in range(3):
+            a = np.zeros(3)
+            b = np.zeros(3)
+            a[ii] = 1.
+            b[jj] = 1.
+            expected = np.pi if ii != jj else 0.
+            assert_allclose(_angle_between_quats(a, b), expected, atol=1e-5)
+
+
+def test_get_chpi():
+    """Test CHPI position computation
+    """
+    trans0, rot0 = get_chpi_positions(hp_fname)[:2]
+    trans0, rot0 = trans0[:-1], rot0[:-1]
+    raw = Raw(hp_fif_fname)
+    out = get_chpi_positions(raw)
+    trans1, rot1, t1 = out
+    trans1, rot1 = trans1[2:], rot1[2:]
+    # these will not be exact because they don't use equiv. time points
+    assert_allclose(trans0, trans1, atol=1e-5, rtol=1e-1)
+    assert_allclose(rot0, rot1, atol=1e-6, rtol=1e-1)
+    # run through input checking
+    assert_raises(TypeError, get_chpi_positions, 1)
+    assert_raises(ValueError, get_chpi_positions, hp_fname, [1])
+    raw_no_chpi = Raw(test_fif_fname)
+    assert_raises(RuntimeError, get_chpi_positions, raw_no_chpi)
+    assert_raises(ValueError, get_chpi_positions, raw, t_step='foo')
+    assert_raises(IOError, get_chpi_positions, 'foo')
+
+
+ at testing.requires_testing_data
+def test_hpi_info():
+    """Test getting HPI info
+    """
+    tempdir = _TempDir()
+    temp_name = op.join(tempdir, 'temp_raw.fif')
+    for fname in (raw_fif_fname, sss_fif_fname):
+        with warnings.catch_warnings(record=True):
+            warnings.simplefilter('always')
+            raw = Raw(fname, allow_maxshield=True)
+        assert_true(len(raw.info['hpi_subsystem']) > 0)
+        raw.save(temp_name, overwrite=True)
+        with warnings.catch_warnings(record=True):
+            warnings.simplefilter('always')
+            raw_2 = Raw(temp_name, allow_maxshield=True)
+        assert_equal(len(raw_2.info['hpi_subsystem']),
+                     len(raw.info['hpi_subsystem']))
+
+
+def _compare_positions(a, b, max_dist=0.003, max_angle=5.):
+    """Compare estimated cHPI positions"""
+    from scipy.interpolate import interp1d
+    trans, rot, t = a
+    trans_est, rot_est, t_est = b
+    quats_est = _rot_to_quat(rot_est)
+
+    # maxfilter produces some times that are implausibly large (weird)
+    use_mask = (t >= t_est[0]) & (t <= t_est[-1])
+    t = t[use_mask]
+    trans = trans[use_mask]
+    quats = _rot_to_quat(rot)
+    quats = quats[use_mask]
+
+    # double-check our angle function
+    for q in (quats, quats_est):
+        angles = _angle_between_quats(q, q)
+        assert_allclose(angles, 0., atol=1e-5)
+
+    # < 3 mm translation difference between MF and our estimation
+    trans_est_interp = interp1d(t_est, trans_est, axis=0)(t)
+    worst = np.sqrt(np.sum((trans - trans_est_interp) ** 2, axis=1)).max()
+    assert_true(worst <= max_dist, '%0.1f > %0.1f mm'
+                % (1000 * worst, 1000 * max_dist))
+
+    # < 5 degrees rotation difference between MF and our estimation
+    # (note that the interpolation will make this slightly worse)
+    quats_est_interp = interp1d(t_est, quats_est, axis=0)(t)
+    worst = 180 * _angle_between_quats(quats_est_interp, quats).max() / np.pi
+    assert_true(worst <= max_angle, '%0.1f > %0.1f deg' % (worst, max_angle,))
+
+
+ at slow_test
+ at testing.requires_testing_data
+ at requires_version('scipy', '0.11')
+ at requires_version('numpy', '1.7')
+def test_calculate_chpi_positions():
+    """Test calculation of cHPI positions
+    """
+    trans, rot, t = get_chpi_positions(pos_fname)
+    with warnings.catch_warnings(record=True):
+        raw = Raw(raw_fif_fname, allow_maxshield=True, preload=True)
+    t -= raw.first_samp / raw.info['sfreq']
+    trans_est, rot_est, t_est = _calculate_chpi_positions(raw, verbose='debug')
+    _compare_positions((trans, rot, t), (trans_est, rot_est, t_est))
+
+    # degenerate conditions
+    raw_no_chpi = Raw(test_fif_fname)
+    assert_raises(RuntimeError, _calculate_chpi_positions, raw_no_chpi)
+    raw_bad = raw.copy()
+    for d in raw_bad.info['dig']:
+        if d['kind'] == FIFF.FIFFV_POINT_HPI:
+            d['coord_frame'] = 999
+            break
+    assert_raises(RuntimeError, _calculate_chpi_positions, raw_bad)
+    raw_bad = raw.copy()
+    for d in raw_bad.info['dig']:
+        if d['kind'] == FIFF.FIFFV_POINT_HPI:
+            d['r'] = np.ones(3)
+    raw_bad.crop(0, 1., copy=False)
+    tempdir = _TempDir()
+    log_file = op.join(tempdir, 'temp_log.txt')
+    set_log_file(log_file, overwrite=True)
+    try:
+        _calculate_chpi_positions(raw_bad)
+    finally:
+        set_log_file()
+    with open(log_file, 'r') as fid:
+        for line in fid:
+            assert_true('0/5 acceptable' in line)
+
+run_tests_if_main()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/tests/test_coreg.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/tests/test_coreg.py
new file mode 100644
index 0000000..0735f8e
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/tests/test_coreg.py
@@ -0,0 +1,174 @@
+from glob import glob
+import os
+
+from nose.tools import assert_raises, assert_true
+import numpy as np
+from numpy.testing import assert_array_almost_equal, assert_array_less
+
+import mne
+from mne.transforms import apply_trans, rotation, translation, scaling
+from mne.coreg import (fit_matched_points, fit_point_cloud,
+                       _point_cloud_error, _decimate_points,
+                       create_default_subject, scale_mri,
+                       _is_mri_subject, scale_labels, scale_source_space)
+from mne.utils import (requires_mne, requires_freesurfer, _TempDir,
+                       run_tests_if_main, requires_version)
+from functools import reduce
+
+
+ at requires_mne
+ at requires_freesurfer
+ at requires_version('scipy', '0.11')
+def test_scale_mri():
+    """Test creating fsaverage and scaling it"""
+    # create fsaverage
+    tempdir = _TempDir()
+    create_default_subject(subjects_dir=tempdir)
+    is_mri = _is_mri_subject('fsaverage', tempdir)
+    assert_true(is_mri, "Creating fsaverage failed")
+
+    fid_path = os.path.join(tempdir, 'fsaverage', 'bem',
+                            'fsaverage-fiducials.fif')
+    os.remove(fid_path)
+    create_default_subject(update=True, subjects_dir=tempdir)
+    assert_true(os.path.exists(fid_path), "Updating fsaverage")
+
+    # remove redundant label files
+    label_temp = os.path.join(tempdir, 'fsaverage', 'label', '*.label')
+    label_paths = glob(label_temp)
+    for label_path in label_paths[1:]:
+        os.remove(label_path)
+
+    # create source space
+    path = os.path.join(tempdir, 'fsaverage', 'bem', 'fsaverage-ico-0-src.fif')
+    mne.setup_source_space('fsaverage', path, 'ico0', overwrite=True,
+                           subjects_dir=tempdir, add_dist=False)
+
+    # scale fsaverage
+    os.environ['_MNE_FEW_SURFACES'] = 'true'
+    scale_mri('fsaverage', 'flachkopf', [1, .2, .8], True,
+              subjects_dir=tempdir)
+    del os.environ['_MNE_FEW_SURFACES']
+    is_mri = _is_mri_subject('flachkopf', tempdir)
+    assert_true(is_mri, "Scaling fsaverage failed")
+    src_path = os.path.join(tempdir, 'flachkopf', 'bem',
+                            'flachkopf-ico-0-src.fif')
+    assert_true(os.path.exists(src_path), "Source space was not scaled")
+    scale_labels('flachkopf', subjects_dir=tempdir)
+
+    # scale source space separately
+    os.remove(src_path)
+    scale_source_space('flachkopf', 'ico-0', subjects_dir=tempdir)
+    assert_true(os.path.exists(src_path), "Source space was not scaled")
+
+    # add distances to source space
+    src = mne.read_source_spaces(path)
+    mne.add_source_space_distances(src)
+    src.save(path)
+
+    # scale with distances
+    os.remove(src_path)
+    scale_source_space('flachkopf', 'ico-0', subjects_dir=tempdir)
+
+
+def test_fit_matched_points():
+    """Test fit_matched_points: fitting two matching sets of points"""
+    tgt_pts = np.random.uniform(size=(6, 3))
+
+    # rotation only
+    trans = rotation(2, 6, 3)
+    src_pts = apply_trans(trans, tgt_pts)
+    trans_est = fit_matched_points(src_pts, tgt_pts, translate=False,
+                                   out='trans')
+    est_pts = apply_trans(trans_est, src_pts)
+    assert_array_almost_equal(tgt_pts, est_pts, 2, "fit_matched_points with "
+                              "rotation")
+
+    # rotation & scaling
+    trans = np.dot(rotation(2, 6, 3), scaling(.5, .5, .5))
+    src_pts = apply_trans(trans, tgt_pts)
+    trans_est = fit_matched_points(src_pts, tgt_pts, translate=False, scale=1,
+                                   out='trans')
+    est_pts = apply_trans(trans_est, src_pts)
+    assert_array_almost_equal(tgt_pts, est_pts, 2, "fit_matched_points with "
+                              "rotation and scaling.")
+
+    # rotation & translation
+    trans = np.dot(translation(2, -6, 3), rotation(2, 6, 3))
+    src_pts = apply_trans(trans, tgt_pts)
+    trans_est = fit_matched_points(src_pts, tgt_pts, out='trans')
+    est_pts = apply_trans(trans_est, src_pts)
+    assert_array_almost_equal(tgt_pts, est_pts, 2, "fit_matched_points with "
+                              "rotation and translation.")
+
+    # rotation & translation & scaling
+    trans = reduce(np.dot, (translation(2, -6, 3), rotation(1.5, .3, 1.4),
+                            scaling(.5, .5, .5)))
+    src_pts = apply_trans(trans, tgt_pts)
+    trans_est = fit_matched_points(src_pts, tgt_pts, scale=1, out='trans')
+    est_pts = apply_trans(trans_est, src_pts)
+    assert_array_almost_equal(tgt_pts, est_pts, 2, "fit_matched_points with "
+                              "rotation, translation and scaling.")
+
+    # test exceeding tolerance
+    tgt_pts[0, :] += 20
+    assert_raises(RuntimeError, fit_matched_points, tgt_pts, src_pts, tol=10)
+
+
+def test_fit_point_cloud():
+    """Test fit_point_cloud: fitting a set of points to a point cloud"""
+    # evenly spaced target points on a sphere
+    u = np.linspace(0, np.pi, 150)
+    v = np.linspace(0, np.pi, 150)
+
+    x = np.outer(np.cos(u), np.sin(v)).reshape((-1, 1))
+    y = np.outer(np.sin(u), np.sin(v)).reshape((-1, 1))
+    z = np.outer(np.ones(np.size(u)), np.cos(v)).reshape((-1, 1)) * 3
+
+    tgt_pts = np.hstack((x, y, z))
+    tgt_pts = _decimate_points(tgt_pts, .05)
+
+    # pick some points to fit
+    some_tgt_pts = tgt_pts[::362]
+
+    # rotation only
+    trans = rotation(1.5, .3, -0.4)
+    src_pts = apply_trans(trans, some_tgt_pts)
+    trans_est = fit_point_cloud(src_pts, tgt_pts, rotate=True, translate=False,
+                                scale=0, out='trans')
+    est_pts = apply_trans(trans_est, src_pts)
+    err = _point_cloud_error(est_pts, tgt_pts)
+    assert_array_less(err, .1, "fit_point_cloud with rotation.")
+
+    # rotation and translation
+    trans = np.dot(rotation(0.5, .3, -0.4), translation(.3, .2, -.2))
+    src_pts = apply_trans(trans, some_tgt_pts)
+    trans_est = fit_point_cloud(src_pts, tgt_pts, rotate=True, translate=True,
+                                scale=0, out='trans')
+    est_pts = apply_trans(trans_est, src_pts)
+    err = _point_cloud_error(est_pts, tgt_pts)
+    assert_array_less(err, .1, "fit_point_cloud with rotation and "
+                      "translation.")
+
+    # rotation and 1 scale parameter
+    trans = np.dot(rotation(0.5, .3, -0.4), scaling(1.5, 1.5, 1.5))
+    src_pts = apply_trans(trans, some_tgt_pts)
+    trans_est = fit_point_cloud(src_pts, tgt_pts, rotate=True, translate=False,
+                                scale=1, out='trans')
+    est_pts = apply_trans(trans_est, src_pts)
+    err = _point_cloud_error(est_pts, tgt_pts)
+    assert_array_less(err, .1, "fit_point_cloud with rotation and 1 scaling "
+                      "parameter.")
+
+    # rotation and 3 scale parameter
+    trans = np.dot(rotation(0.5, .3, -0.4), scaling(1.5, 1.7, 1.1))
+    src_pts = apply_trans(trans, some_tgt_pts)
+    trans_est = fit_point_cloud(src_pts, tgt_pts, rotate=True, translate=False,
+                                scale=3, out='trans')
+    est_pts = apply_trans(trans_est, src_pts)
+    err = _point_cloud_error(est_pts, tgt_pts)
+    assert_array_less(err, .1, "fit_point_cloud with rotation and 3 scaling "
+                      "parameters.")
+
+
+run_tests_if_main()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/tests/test_cov.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/tests/test_cov.py
new file mode 100644
index 0000000..6619b04
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/tests/test_cov.py
@@ -0,0 +1,464 @@
+# Author: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#         Denis Engemann <denis.engemann at gmail.com>
+#
+# License: BSD (3-clause)
+
+import os.path as op
+
+from nose.tools import assert_true, assert_equal
+from numpy.testing import assert_array_almost_equal, assert_array_equal
+from nose.tools import assert_raises
+import numpy as np
+from scipy import linalg
+import warnings
+import itertools as itt
+
+from mne.cov import (regularize, whiten_evoked, _estimate_rank_meeg_cov,
+                     _auto_low_rank_model, _apply_scaling_cov,
+                     _undo_scaling_cov)
+
+from mne import (read_cov, write_cov, Epochs, merge_events,
+                 find_events, compute_raw_covariance,
+                 compute_covariance, read_evokeds, compute_proj_raw,
+                 pick_channels_cov, pick_channels, pick_types, pick_info,
+                 make_ad_hoc_cov)
+from mne.io import Raw
+from mne.utils import (_TempDir, slow_test, requires_sklearn_0_15,
+                       run_tests_if_main)
+from mne.io.proc_history import _get_sss_rank
+from mne.io.pick import channel_type, _picks_by_type
+
+warnings.simplefilter('always')  # enable b/c these tests throw warnings
+
+base_dir = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data')
+cov_fname = op.join(base_dir, 'test-cov.fif')
+cov_gz_fname = op.join(base_dir, 'test-cov.fif.gz')
+cov_km_fname = op.join(base_dir, 'test-km-cov.fif')
+raw_fname = op.join(base_dir, 'test_raw.fif')
+ave_fname = op.join(base_dir, 'test-ave.fif')
+erm_cov_fname = op.join(base_dir, 'test_erm-cov.fif')
+hp_fif_fname = op.join(base_dir, 'test_chpi_raw_sss.fif')
+
+
+def test_ad_hoc_cov():
+    """Test ad hoc cov creation and I/O"""
+    tempdir = _TempDir()
+    out_fname = op.join(tempdir, 'test-cov.fif')
+    evoked = read_evokeds(ave_fname)[0]
+    cov = make_ad_hoc_cov(evoked.info)
+    cov.save(out_fname)
+    assert_true('Covariance' in repr(cov))
+    cov2 = read_cov(out_fname)
+    assert_array_almost_equal(cov['data'], cov2['data'])
+
+
+def test_io_cov():
+    """Test IO for noise covariance matrices
+    """
+    tempdir = _TempDir()
+    cov = read_cov(cov_fname)
+    cov['method'] = 'empirical'
+    cov['loglik'] = -np.inf
+    cov.save(op.join(tempdir, 'test-cov.fif'))
+    cov2 = read_cov(op.join(tempdir, 'test-cov.fif'))
+    assert_array_almost_equal(cov.data, cov2.data)
+    assert_equal(cov['method'], cov2['method'])
+    assert_equal(cov['loglik'], cov2['loglik'])
+    assert_true('Covariance' in repr(cov))
+
+    cov2 = read_cov(cov_gz_fname)
+    assert_array_almost_equal(cov.data, cov2.data)
+    cov2.save(op.join(tempdir, 'test-cov.fif.gz'))
+    cov2 = read_cov(op.join(tempdir, 'test-cov.fif.gz'))
+    assert_array_almost_equal(cov.data, cov2.data)
+
+    cov['bads'] = ['EEG 039']
+    cov_sel = pick_channels_cov(cov, exclude=cov['bads'])
+    assert_true(cov_sel['dim'] == (len(cov['data']) - len(cov['bads'])))
+    assert_true(cov_sel['data'].shape == (cov_sel['dim'], cov_sel['dim']))
+    cov_sel.save(op.join(tempdir, 'test-cov.fif'))
+
+    cov2 = read_cov(cov_gz_fname)
+    assert_array_almost_equal(cov.data, cov2.data)
+    cov2.save(op.join(tempdir, 'test-cov.fif.gz'))
+    cov2 = read_cov(op.join(tempdir, 'test-cov.fif.gz'))
+    assert_array_almost_equal(cov.data, cov2.data)
+
+    # test warnings on bad filenames
+    with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter('always')
+        cov_badname = op.join(tempdir, 'test-bad-name.fif.gz')
+        write_cov(cov_badname, cov)
+        read_cov(cov_badname)
+    assert_true(len(w) == 2)
+
+
+def test_cov_estimation_on_raw_segment():
+    """Test estimation from raw on continuous recordings (typically empty room)
+    """
+    tempdir = _TempDir()
+    raw = Raw(raw_fname, preload=False)
+    cov = compute_raw_covariance(raw)
+    cov_mne = read_cov(erm_cov_fname)
+    assert_true(cov_mne.ch_names == cov.ch_names)
+    assert_true(linalg.norm(cov.data - cov_mne.data, ord='fro') /
+                linalg.norm(cov.data, ord='fro') < 1e-4)
+
+    # test IO when computation done in Python
+    cov.save(op.join(tempdir, 'test-cov.fif'))  # test saving
+    cov_read = read_cov(op.join(tempdir, 'test-cov.fif'))
+    assert_true(cov_read.ch_names == cov.ch_names)
+    assert_true(cov_read.nfree == cov.nfree)
+    assert_array_almost_equal(cov.data, cov_read.data)
+
+    # test with a subset of channels
+    picks = pick_channels(raw.ch_names, include=raw.ch_names[:5])
+    cov = compute_raw_covariance(raw, picks=picks)
+    assert_true(cov_mne.ch_names[:5] == cov.ch_names)
+    assert_true(linalg.norm(cov.data - cov_mne.data[picks][:, picks],
+                ord='fro') / linalg.norm(cov.data, ord='fro') < 1e-4)
+    # make sure we get a warning with too short a segment
+    raw_2 = raw.crop(0, 1)
+    with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter('always')
+        cov = compute_raw_covariance(raw_2)
+    assert_true(len(w) == 1)
+
+
+ at slow_test
+def test_cov_estimation_with_triggers():
+    """Test estimation from raw with triggers
+    """
+    tempdir = _TempDir()
+    raw = Raw(raw_fname, preload=False)
+    events = find_events(raw, stim_channel='STI 014')
+    event_ids = [1, 2, 3, 4]
+    reject = dict(grad=10000e-13, mag=4e-12, eeg=80e-6, eog=150e-6)
+
+    # cov with merged events and keep_sample_mean=True
+    events_merged = merge_events(events, event_ids, 1234)
+    epochs = Epochs(raw, events_merged, 1234, tmin=-0.2, tmax=0,
+                    baseline=(-0.2, -0.1), proj=True,
+                    reject=reject, preload=True)
+
+    cov = compute_covariance(epochs, keep_sample_mean=True)
+    cov_mne = read_cov(cov_km_fname)
+    assert_true(cov_mne.ch_names == cov.ch_names)
+    assert_true((linalg.norm(cov.data - cov_mne.data, ord='fro') /
+                linalg.norm(cov.data, ord='fro')) < 0.005)
+
+    # Test with tmin and tmax (different but not too much)
+    cov_tmin_tmax = compute_covariance(epochs, tmin=-0.19, tmax=-0.01)
+    assert_true(np.all(cov.data != cov_tmin_tmax.data))
+    assert_true((linalg.norm(cov.data - cov_tmin_tmax.data, ord='fro') /
+                 linalg.norm(cov_tmin_tmax.data, ord='fro')) < 0.05)
+
+    # cov using a list of epochs and keep_sample_mean=True
+    epochs = [Epochs(raw, events, ev_id, tmin=-0.2, tmax=0,
+              baseline=(-0.2, -0.1), proj=True, reject=reject)
+              for ev_id in event_ids]
+
+    cov2 = compute_covariance(epochs, keep_sample_mean=True)
+    assert_array_almost_equal(cov.data, cov2.data)
+    assert_true(cov.ch_names == cov2.ch_names)
+
+    # cov with keep_sample_mean=False using a list of epochs
+    cov = compute_covariance(epochs, keep_sample_mean=False)
+    cov_mne = read_cov(cov_fname)
+    assert_true(cov_mne.ch_names == cov.ch_names)
+    assert_true((linalg.norm(cov.data - cov_mne.data, ord='fro') /
+                 linalg.norm(cov.data, ord='fro')) < 0.005)
+
+    method_params = {'empirical': {'assume_centered': False}}
+    assert_raises(ValueError, compute_covariance, epochs,
+                  keep_sample_mean=False, method_params=method_params)
+
+    assert_raises(ValueError, compute_covariance, epochs,
+                  keep_sample_mean=False, method='factor_analysis')
+
+    # test IO when computation done in Python
+    cov.save(op.join(tempdir, 'test-cov.fif'))  # test saving
+    cov_read = read_cov(op.join(tempdir, 'test-cov.fif'))
+    assert_true(cov_read.ch_names == cov.ch_names)
+    assert_true(cov_read.nfree == cov.nfree)
+    assert_true((linalg.norm(cov.data - cov_read.data, ord='fro') /
+                 linalg.norm(cov.data, ord='fro')) < 1e-5)
+
+    # cov with list of epochs with different projectors
+    epochs = [Epochs(raw, events[:4], event_ids[0], tmin=-0.2, tmax=0,
+                     baseline=(-0.2, -0.1), proj=True, reject=reject),
+              Epochs(raw, events[:4], event_ids[0], tmin=-0.2, tmax=0,
+                     baseline=(-0.2, -0.1), proj=False, reject=reject)]
+    # these should fail
+    assert_raises(ValueError, compute_covariance, epochs)
+    assert_raises(ValueError, compute_covariance, epochs, projs=None)
+    # these should work, but won't be equal to above
+    with warnings.catch_warnings(record=True) as w:  # too few samples warning
+        warnings.simplefilter('always')
+        cov = compute_covariance(epochs, projs=epochs[0].info['projs'])
+        cov = compute_covariance(epochs, projs=[])
+    assert_true(len(w) == 2)
+
+    # test new dict support
+    epochs = Epochs(raw, events, dict(a=1, b=2, c=3, d=4), tmin=-0.2, tmax=0,
+                    baseline=(-0.2, -0.1), proj=True, reject=reject)
+    compute_covariance(epochs)
+
+
+def test_arithmetic_cov():
+    """Test arithmetic with noise covariance matrices
+    """
+    cov = read_cov(cov_fname)
+    cov_sum = cov + cov
+    assert_array_almost_equal(2 * cov.nfree, cov_sum.nfree)
+    assert_array_almost_equal(2 * cov.data, cov_sum.data)
+    assert_true(cov.ch_names == cov_sum.ch_names)
+
+    cov += cov
+    assert_array_almost_equal(cov_sum.nfree, cov.nfree)
+    assert_array_almost_equal(cov_sum.data, cov.data)
+    assert_true(cov_sum.ch_names == cov.ch_names)
+
+
+def test_regularize_cov():
+    """Test cov regularization
+    """
+    raw = Raw(raw_fname, preload=False)
+    raw.info['bads'].append(raw.ch_names[0])  # test with bad channels
+    noise_cov = read_cov(cov_fname)
+    # Regularize noise cov
+    reg_noise_cov = regularize(noise_cov, raw.info,
+                               mag=0.1, grad=0.1, eeg=0.1, proj=True,
+                               exclude='bads')
+    assert_true(noise_cov['dim'] == reg_noise_cov['dim'])
+    assert_true(noise_cov['data'].shape == reg_noise_cov['data'].shape)
+    assert_true(np.mean(noise_cov['data'] < reg_noise_cov['data']) < 0.08)
+
+
+def test_evoked_whiten():
+    """Test whitening of evoked data"""
+    evoked = read_evokeds(ave_fname, condition=0, baseline=(None, 0),
+                          proj=True)
+    cov = read_cov(cov_fname)
+
+    ###########################################################################
+    # Show result
+    picks = pick_types(evoked.info, meg=True, eeg=True, ref_meg=False,
+                       exclude='bads')
+
+    noise_cov = regularize(cov, evoked.info, grad=0.1, mag=0.1, eeg=0.1,
+                           exclude='bads')
+
+    evoked_white = whiten_evoked(evoked, noise_cov, picks, diag=True)
+    whiten_baseline_data = evoked_white.data[picks][:, evoked.times < 0]
+    mean_baseline = np.mean(np.abs(whiten_baseline_data), axis=1)
+    assert_true(np.all(mean_baseline < 1.))
+    assert_true(np.all(mean_baseline > 0.2))
+
+
+ at slow_test
+def test_rank():
+    """Test cov rank estimation"""
+    raw_sample = Raw(raw_fname)
+
+    raw_sss = Raw(hp_fif_fname)
+    raw_sss.add_proj(compute_proj_raw(raw_sss))
+
+    cov_sample = compute_raw_covariance(raw_sample)
+    cov_sample_proj = compute_raw_covariance(
+        raw_sample.copy().apply_proj())
+
+    cov_sss = compute_raw_covariance(raw_sss)
+    cov_sss_proj = compute_raw_covariance(
+        raw_sss.copy().apply_proj())
+
+    picks_all_sample = pick_types(raw_sample.info, meg=True, eeg=True)
+    picks_all_sss = pick_types(raw_sss.info, meg=True, eeg=True)
+
+    info_sample = pick_info(raw_sample.info, picks_all_sample)
+    picks_stack_sample = [('eeg', pick_types(info_sample, meg=False,
+                                             eeg=True))]
+    picks_stack_sample += [('meg', pick_types(info_sample, meg=True))]
+    picks_stack_sample += [('all',
+                            pick_types(info_sample, meg=True, eeg=True))]
+
+    info_sss = pick_info(raw_sss.info, picks_all_sss)
+    picks_stack_somato = [('eeg', pick_types(info_sss, meg=False, eeg=True))]
+    picks_stack_somato += [('meg', pick_types(info_sss, meg=True))]
+    picks_stack_somato += [('all',
+                            pick_types(info_sss, meg=True, eeg=True))]
+
+    iter_tests = list(itt.product(
+        [(cov_sample, picks_stack_sample, info_sample),
+         (cov_sample_proj, picks_stack_sample, info_sample),
+         (cov_sss, picks_stack_somato, info_sss),
+         (cov_sss_proj, picks_stack_somato, info_sss)],  # sss
+        [dict(mag=1e15, grad=1e13, eeg=1e6)]
+    ))
+
+    for (cov, picks_list, this_info), scalings in iter_tests:
+        for ch_type, picks in picks_list:
+
+            this_very_info = pick_info(this_info, picks)
+
+            # compute subset of projs
+            this_projs = [c['active'] and
+                          len(set(c['data']['col_names'])
+                              .intersection(set(this_very_info['ch_names']))) >
+                          0 for c in cov['projs']]
+            n_projs = sum(this_projs)
+
+            # count channel types
+            ch_types = [channel_type(this_very_info, idx)
+                        for idx in range(len(picks))]
+            n_eeg, n_mag, n_grad = [ch_types.count(k) for k in
+                                    ['eeg', 'mag', 'grad']]
+            n_meg = n_mag + n_grad
+            if ch_type in ('all', 'eeg'):
+                n_projs_eeg = 1
+            else:
+                n_projs_eeg = 0
+
+            # check sss
+            if 'proc_history' in this_very_info:
+                mf = this_very_info['proc_history'][0]['max_info']
+                n_free = _get_sss_rank(mf)
+                if 'mag' not in ch_types and 'grad' not in ch_types:
+                    n_free = 0
+                # - n_projs XXX clarify
+                expected_rank = n_free + n_eeg
+                if n_projs > 0 and ch_type in ('all', 'eeg'):
+                    expected_rank -= n_projs_eeg
+            else:
+                expected_rank = n_meg + n_eeg - n_projs
+
+            C = cov['data'][np.ix_(picks, picks)]
+            est_rank = _estimate_rank_meeg_cov(C, this_very_info,
+                                               scalings=scalings)
+
+            assert_equal(expected_rank, est_rank)
+
+
+def test_cov_scaling():
+    """Test rescaling covs"""
+    evoked = read_evokeds(ave_fname, condition=0, baseline=(None, 0),
+                          proj=True)
+    cov = read_cov(cov_fname)['data']
+    cov2 = read_cov(cov_fname)['data']
+
+    assert_array_equal(cov, cov2)
+    evoked.pick_channels([evoked.ch_names[k] for k in pick_types(
+        evoked.info, meg=True, eeg=True
+    )])
+    picks_list = _picks_by_type(evoked.info)
+    scalings = dict(mag=1e15, grad=1e13, eeg=1e6)
+
+    _apply_scaling_cov(cov2, picks_list, scalings=scalings)
+    _apply_scaling_cov(cov, picks_list, scalings=scalings)
+    assert_array_equal(cov, cov2)
+    assert_true(cov.max() > 1)
+
+    _undo_scaling_cov(cov2, picks_list, scalings=scalings)
+    _undo_scaling_cov(cov, picks_list, scalings=scalings)
+    assert_array_equal(cov, cov2)
+    assert_true(cov.max() < 1)
+
+
+ at requires_sklearn_0_15
+def test_auto_low_rank():
+    """Test probabilistic low rank estimators"""
+
+    n_samples, n_features, rank = 400, 20, 10
+    sigma = 0.1
+
+    def get_data(n_samples, n_features, rank, sigma):
+        rng = np.random.RandomState(42)
+        W = rng.randn(n_features, n_features)
+        X = rng.randn(n_samples, rank)
+        U, _, _ = linalg.svd(W.copy())
+        X = np.dot(X, U[:, :rank].T)
+
+        sigmas = sigma * rng.rand(n_features) + sigma / 2.
+        X += rng.randn(n_samples, n_features) * sigmas
+        return X
+
+    X = get_data(n_samples=n_samples, n_features=n_features, rank=rank,
+                 sigma=sigma)
+    method_params = {'iter_n_components': [9, 10, 11]}
+    cv = 3
+    n_jobs = 1
+    mode = 'factor_analysis'
+    rescale = 1e8
+    X *= rescale
+    est, info = _auto_low_rank_model(X, mode=mode, n_jobs=n_jobs,
+                                     method_params=method_params,
+                                     cv=cv)
+    assert_equal(info['best'], rank)
+
+    X = get_data(n_samples=n_samples, n_features=n_features, rank=rank,
+                 sigma=sigma)
+    method_params = {'iter_n_components': [n_features + 5]}
+    msg = ('You are trying to estimate %i components on matrix '
+           'with %i features.')
+    with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter('always')
+        _auto_low_rank_model(X, mode=mode, n_jobs=n_jobs,
+                             method_params=method_params, cv=cv)
+        assert_equal(len(w), 1)
+        assert_equal(msg % (n_features + 5, n_features), '%s' % w[0].message)
+
+    method_params = {'iter_n_components': [n_features + 5]}
+    assert_raises(ValueError, _auto_low_rank_model, X, mode='foo',
+                  n_jobs=n_jobs, method_params=method_params, cv=cv)
+
+
+ at slow_test
+ at requires_sklearn_0_15
+def test_compute_covariance_auto_reg():
+    """Test automated regularization"""
+
+    raw = Raw(raw_fname, preload=False)
+    events = find_events(raw, stim_channel='STI 014')
+    event_ids = [1, 2, 3, 4]
+    reject = dict(mag=4e-12)
+
+    # cov with merged events and keep_sample_mean=True
+    events_merged = merge_events(events, event_ids, 1234)
+    picks = pick_types(raw.info, meg='mag', eeg=False)
+    epochs = Epochs(
+        raw, events_merged, 1234, tmin=-0.2, tmax=0,
+        picks=picks[:10],  # we need a few channels for numerical reasons
+        # in PCA/FA.
+        baseline=(-0.2, -0.1), proj=True, reject=reject, preload=True)
+    epochs = epochs.crop(None, 0)[:10]
+
+    method_params = dict(factor_analysis=dict(iter_n_components=[3]),
+                         pca=dict(iter_n_components=[3]))
+
+    covs = compute_covariance(epochs, method='auto',
+                              method_params=method_params,
+                              projs=True,
+                              return_estimators=True)
+
+    logliks = [c['loglik'] for c in covs]
+    assert_true(np.diff(logliks).max() <= 0)  # descending order
+
+    methods = ['empirical',
+               'factor_analysis',
+               'ledoit_wolf',
+               'pca']
+    cov3 = compute_covariance(epochs, method=methods,
+                              method_params=method_params, projs=None,
+                              return_estimators=True)
+
+    assert_equal(set([c['method'] for c in cov3]),
+                 set(methods))
+
+    # invalid prespecified method
+    assert_raises(ValueError, compute_covariance, epochs, method='pizza')
+
+    # invalid scalings
+    assert_raises(ValueError, compute_covariance, epochs, method='shrunk',
+                  scalings=dict(misc=123))
+
+run_tests_if_main()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/tests/test_defaults.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/tests/test_defaults.py
new file mode 100644
index 0000000..807a693
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/tests/test_defaults.py
@@ -0,0 +1,22 @@
+from nose.tools import assert_equal, assert_true
+from copy import deepcopy
+
+from mne.defaults import _handle_default
+
+
+def test_handle_default():
+    """Test mutable default
+    """
+    x = deepcopy(_handle_default('scalings'))
+    y = _handle_default('scalings')
+    z = _handle_default('scalings', dict(mag=1, grad=2))
+    w = _handle_default('scalings', {})
+    assert_equal(set(x.keys()), set(y.keys()))
+    assert_equal(set(x.keys()), set(z.keys()))
+    for key in x.keys():
+        assert_equal(x[key], y[key])
+        assert_equal(x[key], w[key])
+        if key in ('mag', 'grad'):
+            assert_true(x[key] != z[key])
+        else:
+            assert_equal(x[key], z[key])
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/tests/test_dipole.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/tests/test_dipole.py
new file mode 100644
index 0000000..4819578
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/tests/test_dipole.py
@@ -0,0 +1,256 @@
+import os.path as op
+import numpy as np
+from nose.tools import assert_true, assert_equal, assert_raises
+from numpy.testing import assert_allclose
+import warnings
+
+from mne import (read_dipole, read_forward_solution,
+                 convert_forward_solution, read_evokeds, read_cov,
+                 SourceEstimate, write_evokeds, fit_dipole,
+                 transform_surface_to, make_sphere_model, pick_types,
+                 pick_info, EvokedArray, read_source_spaces, make_ad_hoc_cov,
+                 make_forward_solution)
+from mne.simulation import simulate_evoked
+from mne.datasets import testing
+from mne.utils import (run_tests_if_main, _TempDir, slow_test, requires_mne,
+                       run_subprocess)
+from mne.proj import make_eeg_average_ref_proj
+
+from mne.io import Raw
+
+from mne.surface import _compute_nearest
+from mne.bem import _bem_find_surface, read_bem_solution
+from mne.transforms import (read_trans, apply_trans, _get_mri_head_t)
+
+warnings.simplefilter('always')
+data_path = testing.data_path(download=False)
+fname_raw = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc_raw.fif')
+fname_dip = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc_set1.dip')
+fname_evo = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc-ave.fif')
+fname_cov = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc-cov.fif')
+fname_bem = op.join(data_path, 'subjects', 'sample', 'bem',
+                    'sample-1280-1280-1280-bem-sol.fif')
+fname_src = op.join(data_path, 'subjects', 'sample', 'bem',
+                    'sample-oct-2-src.fif')
+fname_trans = op.join(data_path, 'MEG', 'sample',
+                      'sample_audvis_trunc-trans.fif')
+fname_fwd = op.join(data_path, 'MEG', 'sample',
+                    'sample_audvis_trunc-meg-eeg-oct-6-fwd.fif')
+subjects_dir = op.join(data_path, 'subjects')
+
+
+def _compare_dipoles(orig, new):
+    """Compare dipole results for equivalence"""
+    assert_allclose(orig.times, new.times, atol=1e-3, err_msg='times')
+    assert_allclose(orig.pos, new.pos, err_msg='pos')
+    assert_allclose(orig.amplitude, new.amplitude, err_msg='amplitude')
+    assert_allclose(orig.gof, new.gof, err_msg='gof')
+    assert_allclose(orig.ori, new.ori, rtol=1e-4, atol=1e-4, err_msg='ori')
+    assert_equal(orig.name, new.name)
+
+
+def _check_dipole(dip, n_dipoles):
+    assert_equal(len(dip), n_dipoles)
+    assert_equal(dip.pos.shape, (n_dipoles, 3))
+    assert_equal(dip.ori.shape, (n_dipoles, 3))
+    assert_equal(dip.gof.shape, (n_dipoles,))
+    assert_equal(dip.amplitude.shape, (n_dipoles,))
+
+
+ at testing.requires_testing_data
+def test_io_dipoles():
+    """Test IO for .dip files
+    """
+    tempdir = _TempDir()
+    dipole = read_dipole(fname_dip)
+    print(dipole)  # test repr
+    out_fname = op.join(tempdir, 'temp.dip')
+    dipole.save(out_fname)
+    dipole_new = read_dipole(out_fname)
+    _compare_dipoles(dipole, dipole_new)
+
+
+ at slow_test
+ at testing.requires_testing_data
+ at requires_mne
+def test_dipole_fitting():
+    """Test dipole fitting"""
+    amp = 10e-9
+    tempdir = _TempDir()
+    rng = np.random.RandomState(0)
+    fname_dtemp = op.join(tempdir, 'test.dip')
+    fname_sim = op.join(tempdir, 'test-ave.fif')
+    fwd = convert_forward_solution(read_forward_solution(fname_fwd),
+                                   surf_ori=False, force_fixed=True)
+    evoked = read_evokeds(fname_evo)[0]
+    cov = read_cov(fname_cov)
+    n_per_hemi = 5
+    vertices = [np.sort(rng.permutation(s['vertno'])[:n_per_hemi])
+                for s in fwd['src']]
+    nv = sum(len(v) for v in vertices)
+    stc = SourceEstimate(amp * np.eye(nv), vertices, 0, 0.001)
+    with warnings.catch_warnings(record=True):  # semi-def cov
+        evoked = simulate_evoked(fwd, stc, evoked, cov, snr=20,
+                                 random_state=rng)
+    # For speed, let's use a subset of channels (strange but works)
+    picks = np.sort(np.concatenate([
+        pick_types(evoked.info, meg=True, eeg=False)[::2],
+        pick_types(evoked.info, meg=False, eeg=True)[::2]]))
+    evoked.pick_channels([evoked.ch_names[p] for p in picks])
+    evoked.add_proj(make_eeg_average_ref_proj(evoked.info))
+    write_evokeds(fname_sim, evoked)
+
+    # Run MNE-C version
+    run_subprocess([
+        'mne_dipole_fit', '--meas', fname_sim, '--meg', '--eeg',
+        '--noise', fname_cov, '--dip', fname_dtemp,
+        '--mri', fname_fwd, '--reg', '0', '--tmin', '0',
+    ])
+    dip_c = read_dipole(fname_dtemp)
+
+    # Run mne-python version
+    sphere = make_sphere_model(head_radius=0.1)
+    dip, residuals = fit_dipole(evoked, fname_cov, sphere, fname_fwd)
+
+    # Sanity check: do our residuals have less power than orig data?
+    data_rms = np.sqrt(np.sum(evoked.data ** 2, axis=0))
+    resi_rms = np.sqrt(np.sum(residuals ** 2, axis=0))
+    assert_true((data_rms > resi_rms).all())
+
+    # Compare to original points
+    transform_surface_to(fwd['src'][0], 'head', fwd['mri_head_t'])
+    transform_surface_to(fwd['src'][1], 'head', fwd['mri_head_t'])
+    src_rr = np.concatenate([s['rr'][v] for s, v in zip(fwd['src'], vertices)],
+                            axis=0)
+    src_nn = np.concatenate([s['nn'][v] for s, v in zip(fwd['src'], vertices)],
+                            axis=0)
+
+    # MNE-C skips the last "time" point :(
+    dip.crop(dip_c.times[0], dip_c.times[-1])
+    src_rr, src_nn = src_rr[:-1], src_nn[:-1]
+
+    # check that we did at least as well
+    corrs, dists, gc_dists, amp_errs, gofs = [], [], [], [], []
+    for d in (dip_c, dip):
+        new = d.pos
+        diffs = new - src_rr
+        corrs += [np.corrcoef(src_rr.ravel(), new.ravel())[0, 1]]
+        dists += [np.sqrt(np.mean(np.sum(diffs * diffs, axis=1)))]
+        gc_dists += [180 / np.pi * np.mean(np.arccos(np.sum(src_nn * d.ori,
+                                                     axis=1)))]
+        amp_errs += [np.sqrt(np.mean((amp - d.amplitude) ** 2))]
+        gofs += [np.mean(d.gof)]
+    assert_true(dists[0] >= dists[1], 'dists: %s' % dists)
+    assert_true(corrs[0] <= corrs[1], 'corrs: %s' % corrs)
+    assert_true(gc_dists[0] >= gc_dists[1], 'gc-dists (ori): %s' % gc_dists)
+    assert_true(amp_errs[0] >= amp_errs[1], 'amplitude errors: %s' % amp_errs)
+    assert_true(gofs[0] <= gofs[1], 'gof: %s' % gofs)
+
+
+ at testing.requires_testing_data
+def test_len_index_dipoles():
+    """Test len and indexing of Dipole objects
+    """
+    dipole = read_dipole(fname_dip)
+    d0 = dipole[0]
+    d1 = dipole[:1]
+    _check_dipole(d0, 1)
+    _check_dipole(d1, 1)
+    _compare_dipoles(d0, d1)
+    mask = dipole.gof > 15
+    idx = np.where(mask)[0]
+    d_mask = dipole[mask]
+    _check_dipole(d_mask, 4)
+    _compare_dipoles(d_mask, dipole[idx])
+
+
+ at testing.requires_testing_data
+def test_min_distance_fit_dipole():
+    """Test dipole min_dist to inner_skull"""
+    subject = 'sample'
+    raw = Raw(fname_raw, preload=True)
+
+    # select eeg data
+    picks = pick_types(raw.info, meg=False, eeg=True, exclude='bads')
+    info = pick_info(raw.info, picks)
+
+    # Let's use cov = Identity
+    cov = read_cov(fname_cov)
+    cov['data'] = np.eye(cov['data'].shape[0])
+
+    # Simulated scal map
+    simulated_scalp_map = np.zeros(picks.shape[0])
+    simulated_scalp_map[27:34] = 1
+
+    simulated_scalp_map = simulated_scalp_map[:, None]
+
+    evoked = EvokedArray(simulated_scalp_map, info, tmin=0)
+
+    min_dist = 5.  # distance in mm
+
+    dip, residual = fit_dipole(evoked, cov, fname_bem, fname_trans,
+                               min_dist=min_dist)
+
+    dist = _compute_depth(dip, fname_bem, fname_trans, subject, subjects_dir)
+
+    # Constraints are not exact, so bump the minimum slightly
+    assert_true(min_dist - 0.1 < (dist[0] * 1000.) < (min_dist + 1.))
+
+    assert_raises(ValueError, fit_dipole, evoked, cov, fname_bem, fname_trans,
+                  -1.)
+
+
+def _compute_depth(dip, fname_bem, fname_trans, subject, subjects_dir):
+    """Compute dipole depth"""
+    trans = read_trans(fname_trans)
+    trans = _get_mri_head_t(trans)[0]
+    bem = read_bem_solution(fname_bem)
+    surf = _bem_find_surface(bem, 'inner_skull')
+    points = surf['rr']
+    points = apply_trans(trans['trans'], points)
+    depth = _compute_nearest(points, dip.pos, return_dists=True)[1][0]
+    return np.ravel(depth)
+
+
+ at testing.requires_testing_data
+def test_accuracy():
+    """Test dipole fitting to sub-mm accuracy
+    """
+    evoked = read_evokeds(fname_evo)[0].crop(0., 0.,)
+    evoked.pick_types(meg=True, eeg=False)
+    evoked.pick_channels([c for c in evoked.ch_names[::4]])
+    bem = make_sphere_model('auto', 0.09, evoked.info,
+                            relative_radii=(0.999, 0.998, 0.997, 0.995))
+    src = read_source_spaces(fname_src)
+
+    fwd = make_forward_solution(evoked.info, None, src, bem)
+    fwd = convert_forward_solution(fwd, force_fixed=True)
+    vertices = [src[0]['vertno'], src[1]['vertno']]
+    n_vertices = sum(len(v) for v in vertices)
+    amp = 10e-9
+    data = np.eye(n_vertices + 1)[:n_vertices]
+    data[-1, -1] = 1.
+    data *= amp
+    stc = SourceEstimate(data, vertices, 0., 1e-3, 'sample')
+    sim = simulate_evoked(fwd, stc, evoked.info, cov=None, snr=np.inf)
+
+    cov = make_ad_hoc_cov(evoked.info)
+    dip = fit_dipole(sim, cov, bem, min_dist=0.001)[0]
+
+    ds = []
+    for vi in range(n_vertices):
+        if vi < len(vertices[0]):
+            hi = 0
+            vertno = vi
+        else:
+            hi = 1
+            vertno = vi - len(vertices[0])
+        vertno = src[hi]['vertno'][vertno]
+        rr = src[hi]['rr'][vertno]
+        d = np.sqrt(np.sum((rr - dip.pos[vi]) ** 2))
+        ds.append(d)
+    # make sure that our median is sub-mm and the large majority are very close
+    # (we expect some to be off by a bit e.g. because they are radial)
+    assert_true((np.percentile(ds, [50, 90]) < [0.0005, 0.002]).all())
+
+run_tests_if_main(False)
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/tests/test_docstring_parameters.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/tests/test_docstring_parameters.py
new file mode 100644
index 0000000..0d4654f
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/tests/test_docstring_parameters.py
@@ -0,0 +1,160 @@
+# TODO inspect for Cython (see sagenb.misc.sageinspect)
+from __future__ import print_function
+
+from nose.plugins.skip import SkipTest
+from nose.tools import assert_true
+from os import path as op
+import sys
+import inspect
+import warnings
+import imp
+
+from pkgutil import walk_packages
+from inspect import getsource
+
+import mne
+from mne.utils import run_tests_if_main
+
+public_modules = [
+    # the list of modules users need to access for all functionality
+    'mne',
+    'mne.beamformer',
+    'mne.connectivity',
+    'mne.datasets',
+    'mne.datasets.megsim',
+    'mne.datasets.sample',
+    'mne.datasets.spm_face',
+    'mne.decoding',
+    'mne.filter',
+    'mne.gui',
+    'mne.inverse_sparse',
+    'mne.io',
+    'mne.io.kit',
+    'mne.minimum_norm',
+    'mne.preprocessing',
+    'mne.realtime',
+    'mne.report',
+    'mne.simulation',
+    'mne.source_estimate',
+    'mne.source_space',
+    'mne.stats',
+    'mne.time_frequency',
+    'mne.viz',
+]
+
+docscrape_path = op.join(op.dirname(__file__), '..', '..', 'doc', 'sphinxext',
+                         'numpy_ext', 'docscrape.py')
+if op.isfile(docscrape_path):
+    docscrape = imp.load_source('docscrape', docscrape_path)
+else:
+    docscrape = None
+
+
+def get_name(func):
+    parts = []
+    module = inspect.getmodule(func)
+    if module:
+        parts.append(module.__name__)
+    if hasattr(func, 'im_class'):
+        parts.append(func.im_class.__name__)
+    parts.append(func.__name__)
+    return '.'.join(parts)
+
+
+# functions to ignore args / docstring of
+_docstring_ignores = [
+    'mne.io.write',  # always ignore these
+    'mne.fixes._in1d',  # fix function
+    'mne.gui.coregistration',  # deprecated single argument w/None
+]
+
+_tab_ignores = [
+    'mne.channels.tests.test_montage',  # demo data has a tab
+]
+
+
+def check_parameters_match(func, doc=None):
+    """Helper to check docstring, returns list of incorrect results"""
+    incorrect = []
+    name_ = get_name(func)
+    if not name_.startswith('mne.') or name_.startswith('mne.externals'):
+        return incorrect
+    if inspect.isdatadescriptor(func):
+        return incorrect
+    args, varargs, varkw, defaults = inspect.getargspec(func)
+    # drop self
+    if len(args) > 0 and args[0] == 'self':
+        args = args[1:]
+
+    if doc is None:
+        with warnings.catch_warnings(record=True) as w:
+            doc = docscrape.FunctionDoc(func)
+        if len(w):
+            raise RuntimeError('Error for %s:\n%s' % (name_, w[0]))
+    # check set
+    param_names = [name for name, _, _ in doc['Parameters']]
+    # clean up some docscrape output:
+    param_names = [name.split(':')[0].strip('` ') for name in param_names]
+    param_names = [name for name in param_names if '*' not in name]
+    if len(param_names) != len(args):
+        bad = str(sorted(list(set(param_names) - set(args)) +
+                         list(set(args) - set(param_names))))
+        if not any(d in name_ for d in _docstring_ignores) and \
+                'deprecation_wrapped' not in func.__code__.co_name:
+            incorrect += [name_ + ' arg mismatch: ' + bad]
+    else:
+        for n1, n2 in zip(param_names, args):
+            if n1 != n2:
+                incorrect += [name_ + ' ' + n1 + ' != ' + n2]
+    return incorrect
+
+
+def test_docstring_parameters():
+    """Test module docsting formatting"""
+    if docscrape is None:
+        raise SkipTest('This must be run from the mne-python source directory')
+    incorrect = []
+    for name in public_modules:
+        module = __import__(name, globals())
+        for submod in name.split('.')[1:]:
+            module = getattr(module, submod)
+        classes = inspect.getmembers(module, inspect.isclass)
+        for cname, cls in classes:
+            if cname.startswith('_'):
+                continue
+            with warnings.catch_warnings(record=True) as w:
+                cdoc = docscrape.ClassDoc(cls)
+            if len(w):
+                raise RuntimeError('Error for __init__ of %s in %s:\n%s'
+                                   % (cls, name, w[0]))
+            if hasattr(cls, '__init__'):
+                incorrect += check_parameters_match(cls.__init__, cdoc)
+            for method_name in cdoc.methods:
+                method = getattr(cls, method_name)
+                incorrect += check_parameters_match(method)
+            if hasattr(cls, '__call__'):
+                incorrect += check_parameters_match(cls.__call__)
+        functions = inspect.getmembers(module, inspect.isfunction)
+        for fname, func in functions:
+            if fname.startswith('_'):
+                continue
+            incorrect += check_parameters_match(func)
+    msg = '\n' + '\n'.join(sorted(list(set(incorrect))))
+    if len(incorrect) > 0:
+        raise AssertionError(msg)
+
+
+def test_tabs():
+    """Test that there are no tabs in our source files"""
+    for importer, modname, ispkg in walk_packages(mne.__path__, prefix='mne.'):
+        if not ispkg and modname not in _tab_ignores:
+            # mod = importlib.import_module(modname)  # not py26 compatible!
+            __import__(modname)  # because we don't import e.g. mne.tests w/mne
+            mod = sys.modules[modname]
+            source = getsource(mod)
+            assert_true('\t' not in source,
+                        '"%s" has tabs, please remove them or add it to the'
+                        'ignore list' % modname)
+
+
+run_tests_if_main()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/tests/test_epochs.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/tests/test_epochs.py
new file mode 100644
index 0000000..34e76aa
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/tests/test_epochs.py
@@ -0,0 +1,1793 @@
+# Author: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#         Denis Engemann <denis.engemann at gmail.com>
+#
+# License: BSD (3-clause)
+
+import os.path as op
+from copy import deepcopy
+
+from nose.tools import (assert_true, assert_equal, assert_raises,
+                        assert_not_equal)
+
+from numpy.testing import (assert_array_equal, assert_array_almost_equal,
+                           assert_allclose)
+import numpy as np
+import copy as cp
+import warnings
+from scipy import fftpack
+import matplotlib
+
+from mne import (io, Epochs, read_events, pick_events, read_epochs,
+                 equalize_channels, pick_types, pick_channels, read_evokeds,
+                 write_evokeds)
+from mne.epochs import (
+    bootstrap, equalize_epoch_counts, combine_event_ids, add_channels_epochs,
+    EpochsArray, concatenate_epochs, _BaseEpochs)
+from mne.utils import (_TempDir, requires_pandas, slow_test,
+                       clean_warning_registry, run_tests_if_main,
+                       requires_version)
+
+from mne.io.meas_info import create_info
+from mne.io.proj import _has_eeg_average_ref_proj
+from mne.event import merge_events
+from mne.io.constants import FIFF
+from mne.externals.six import text_type
+from mne.externals.six.moves import zip, cPickle as pickle
+
+matplotlib.use('Agg')  # for testing don't use X server
+
+warnings.simplefilter('always')  # enable b/c these tests throw warnings
+
+base_dir = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data')
+raw_fname = op.join(base_dir, 'test_raw.fif')
+event_name = op.join(base_dir, 'test-eve.fif')
+evoked_nf_name = op.join(base_dir, 'test-nf-ave.fif')
+
+event_id, tmin, tmax = 1, -0.2, 0.5
+event_id_2 = 2
+
+
+def _get_data():
+    raw = io.Raw(raw_fname, add_eeg_ref=False, proj=False)
+    events = read_events(event_name)
+    picks = pick_types(raw.info, meg=True, eeg=True, stim=True,
+                       ecg=True, eog=True, include=['STI 014'],
+                       exclude='bads')
+    return raw, events, picks
+
+reject = dict(grad=1000e-12, mag=4e-12, eeg=80e-6, eog=150e-6)
+flat = dict(grad=1e-15, mag=1e-15)
+
+clean_warning_registry()  # really clean warning stack
+
+
+def test_reject():
+    """Test epochs rejection
+    """
+    raw, events, picks = _get_data()
+    # cull the list just to contain the relevant event
+    events = events[events[:, 2] == event_id, :]
+    selection = np.arange(3)
+    drop_log = [[]] * 3 + [['MEG 2443']] * 4
+    assert_raises(TypeError, pick_types, raw)
+    picks_meg = pick_types(raw.info, meg=True, eeg=False)
+    assert_raises(TypeError, Epochs, raw, events, event_id, tmin, tmax,
+                  picks=picks, preload=False, reject='foo')
+    assert_raises(ValueError, Epochs, raw, events, event_id, tmin, tmax,
+                  picks=picks_meg, preload=False, reject=dict(eeg=1.))
+    assert_raises(KeyError, Epochs, raw, events, event_id, tmin, tmax,
+                  picks=picks, preload=False, reject=dict(foo=1.))
+
+    data_7 = dict()
+    keep_idx = [0, 1, 2]
+    for preload in (True, False):
+        for proj in (True, False, 'delayed'):
+            # no rejection
+            epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                            preload=preload)
+            assert_raises(ValueError, epochs.drop_bad_epochs, reject='foo')
+            epochs.drop_bad_epochs()
+            assert_equal(len(epochs), len(events))
+            assert_array_equal(epochs.selection, np.arange(len(events)))
+            assert_array_equal(epochs.drop_log, [[]] * 7)
+            if proj not in data_7:
+                data_7[proj] = epochs.get_data()
+            assert_array_equal(epochs.get_data(), data_7[proj])
+
+            # with rejection
+            epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                            reject=reject, preload=preload)
+            epochs.drop_bad_epochs()
+            assert_equal(len(epochs), len(events) - 4)
+            assert_array_equal(epochs.selection, selection)
+            assert_array_equal(epochs.drop_log, drop_log)
+            assert_array_equal(epochs.get_data(), data_7[proj][keep_idx])
+
+            # rejection post-hoc
+            epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                            preload=preload)
+            epochs.drop_bad_epochs()
+            assert_equal(len(epochs), len(events))
+            assert_array_equal(epochs.get_data(), data_7[proj])
+            epochs.drop_bad_epochs(reject)
+            assert_equal(len(epochs), len(events) - 4)
+            assert_equal(len(epochs), len(epochs.get_data()))
+            assert_array_equal(epochs.selection, selection)
+            assert_array_equal(epochs.drop_log, drop_log)
+            assert_array_equal(epochs.get_data(), data_7[proj][keep_idx])
+
+            # rejection twice
+            reject_part = dict(grad=1100e-12, mag=4e-12, eeg=80e-6, eog=150e-6)
+            epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                            reject=reject_part, preload=preload)
+            epochs.drop_bad_epochs()
+            assert_equal(len(epochs), len(events) - 1)
+            epochs.drop_bad_epochs(reject)
+            assert_equal(len(epochs), len(events) - 4)
+            assert_array_equal(epochs.selection, selection)
+            assert_array_equal(epochs.drop_log, drop_log)
+            assert_array_equal(epochs.get_data(), data_7[proj][keep_idx])
+
+            # ensure that thresholds must become more stringent, not less
+            assert_raises(ValueError, epochs.drop_bad_epochs, reject_part)
+            assert_equal(len(epochs), len(events) - 4)
+            assert_array_equal(epochs.get_data(), data_7[proj][keep_idx])
+            epochs.drop_bad_epochs(flat=dict(mag=1.))
+            assert_equal(len(epochs), 0)
+            assert_raises(ValueError, epochs.drop_bad_epochs,
+                          flat=dict(mag=0.))
+
+            # rejection of subset of trials (ensure array ownership)
+            reject_part = dict(grad=1100e-12, mag=4e-12, eeg=80e-6, eog=150e-6)
+            epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                            reject=None, preload=preload)
+            epochs = epochs[:-1]
+            epochs.drop_bad_epochs(reject=reject)
+            assert_equal(len(epochs), len(events) - 4)
+            assert_array_equal(epochs.get_data(), data_7[proj][keep_idx])
+
+
+def test_decim():
+    """Test epochs decimation
+    """
+    # First with EpochsArray
+    n_epochs, n_channels, n_times = 5, 10, 20
+    dec_1, dec_2 = 2, 3
+    decim = dec_1 * dec_2
+    sfreq = 1000.
+    sfreq_new = sfreq / decim
+    data = np.random.randn(n_epochs, n_channels, n_times)
+    events = np.array([np.arange(n_epochs), [0] * n_epochs, [1] * n_epochs]).T
+    info = create_info(n_channels, sfreq, 'eeg')
+    info['lowpass'] = sfreq_new / float(decim)
+    epochs = EpochsArray(data, info, events)
+    data_epochs = epochs.decimate(decim, copy=True).get_data()
+    data_epochs_2 = epochs.decimate(dec_1).decimate(dec_2).get_data()
+    assert_array_equal(data_epochs, data[:, :, ::decim])
+    assert_array_equal(data_epochs, data_epochs_2)
+
+    # Now let's do it with some real data
+    raw, events, picks = _get_data()
+    sfreq_new = raw.info['sfreq'] / decim
+    raw.info['lowpass'] = sfreq_new / 4.  # suppress aliasing warnings
+    epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                    preload=False)
+    assert_raises(ValueError, epochs.decimate, -1)
+    expected_data = epochs.get_data()[:, :, ::decim]
+    expected_times = epochs.times[::decim]
+    for preload in (True, False):
+        # at init
+        epochs = Epochs(raw, events, event_id, tmin, tmax, decim=decim,
+                        preload=preload)
+        assert_allclose(epochs.get_data(), expected_data)
+        assert_allclose(epochs.get_data(), expected_data)
+        assert_equal(epochs.info['sfreq'], sfreq_new)
+        assert_array_equal(epochs.times, expected_times)
+
+        # split between init and afterward
+        epochs = Epochs(raw, events, event_id, tmin, tmax, decim=dec_1,
+                        preload=preload).decimate(dec_2)
+        assert_allclose(epochs.get_data(), expected_data)
+        assert_allclose(epochs.get_data(), expected_data)
+        assert_equal(epochs.info['sfreq'], sfreq_new)
+        assert_array_equal(epochs.times, expected_times)
+        epochs = Epochs(raw, events, event_id, tmin, tmax, decim=dec_2,
+                        preload=preload).decimate(dec_1)
+        assert_allclose(epochs.get_data(), expected_data)
+        assert_allclose(epochs.get_data(), expected_data)
+        assert_equal(epochs.info['sfreq'], sfreq_new)
+        assert_array_equal(epochs.times, expected_times)
+
+        # split between init and afterward, with preload in between
+        epochs = Epochs(raw, events, event_id, tmin, tmax, decim=dec_1,
+                        preload=preload)
+        epochs.load_data()
+        epochs = epochs.decimate(dec_2)
+        assert_allclose(epochs.get_data(), expected_data)
+        assert_allclose(epochs.get_data(), expected_data)
+        assert_equal(epochs.info['sfreq'], sfreq_new)
+        assert_array_equal(epochs.times, expected_times)
+        epochs = Epochs(raw, events, event_id, tmin, tmax, decim=dec_2,
+                        preload=preload)
+        epochs.load_data()
+        epochs = epochs.decimate(dec_1)
+        assert_allclose(epochs.get_data(), expected_data)
+        assert_allclose(epochs.get_data(), expected_data)
+        assert_equal(epochs.info['sfreq'], sfreq_new)
+        assert_array_equal(epochs.times, expected_times)
+
+        # decimate afterward
+        epochs = Epochs(raw, events, event_id, tmin, tmax,
+                        preload=preload).decimate(decim)
+        assert_allclose(epochs.get_data(), expected_data)
+        assert_allclose(epochs.get_data(), expected_data)
+        assert_equal(epochs.info['sfreq'], sfreq_new)
+        assert_array_equal(epochs.times, expected_times)
+
+        # decimate afterward, with preload in between
+        epochs = Epochs(raw, events, event_id, tmin, tmax,
+                        preload=preload)
+        epochs.load_data()
+        epochs.decimate(decim)
+        assert_allclose(epochs.get_data(), expected_data)
+        assert_allclose(epochs.get_data(), expected_data)
+        assert_equal(epochs.info['sfreq'], sfreq_new)
+        assert_array_equal(epochs.times, expected_times)
+
+
+def test_base_epochs():
+    """Test base epochs class
+    """
+    raw = _get_data()[0]
+    epochs = _BaseEpochs(raw.info, None, np.ones((1, 3), int),
+                         event_id, tmin, tmax)
+    assert_raises(NotImplementedError, epochs.get_data)
+    # events with non integers
+    assert_raises(ValueError, _BaseEpochs, raw.info, None,
+                  np.ones((1, 3), float), event_id, tmin, tmax)
+    assert_raises(ValueError, _BaseEpochs, raw.info, None,
+                  np.ones((1, 3, 2), int), event_id, tmin, tmax)
+
+
+ at requires_version('scipy', '0.14')
+def test_savgol_filter():
+    """Test savgol filtering
+    """
+    h_freq = 10.
+    raw, events = _get_data()[:2]
+    epochs = Epochs(raw, events, event_id, tmin, tmax)
+    assert_raises(RuntimeError, epochs.savgol_filter, 10.)
+    epochs = Epochs(raw, events, event_id, tmin, tmax, preload=True)
+    freqs = fftpack.fftfreq(len(epochs.times), 1. / epochs.info['sfreq'])
+    data = np.abs(fftpack.fft(epochs.get_data()))
+    match_mask = np.logical_and(freqs >= 0, freqs <= h_freq / 2.)
+    mismatch_mask = np.logical_and(freqs >= h_freq * 2, freqs < 50.)
+    epochs.savgol_filter(h_freq)
+    data_filt = np.abs(fftpack.fft(epochs.get_data()))
+    # decent in pass-band
+    assert_allclose(np.mean(data[:, :, match_mask], 0),
+                    np.mean(data_filt[:, :, match_mask], 0),
+                    rtol=1e-4, atol=1e-2)
+    # suppression in stop-band
+    assert_true(np.mean(data[:, :, mismatch_mask]) >
+                np.mean(data_filt[:, :, mismatch_mask]) * 5)
+
+
+def test_epochs_hash():
+    """Test epoch hashing
+    """
+    raw, events = _get_data()[:2]
+    epochs = Epochs(raw, events, event_id, tmin, tmax)
+    assert_raises(RuntimeError, epochs.__hash__)
+    epochs = Epochs(raw, events, event_id, tmin, tmax, preload=True)
+    assert_equal(hash(epochs), hash(epochs))
+    epochs_2 = Epochs(raw, events, event_id, tmin, tmax, preload=True)
+    assert_equal(hash(epochs), hash(epochs_2))
+    # do NOT use assert_equal here, failing output is terrible
+    assert_true(pickle.dumps(epochs) == pickle.dumps(epochs_2))
+
+    epochs_2._data[0, 0, 0] -= 1
+    assert_not_equal(hash(epochs), hash(epochs_2))
+
+
+def test_event_ordering():
+    """Test event order"""
+    raw, events = _get_data()[:2]
+    events2 = events.copy()
+    np.random.shuffle(events2)
+    for ii, eve in enumerate([events, events2]):
+        with warnings.catch_warnings(record=True) as w:
+            warnings.simplefilter('always')
+            Epochs(raw, eve, event_id, tmin, tmax,
+                   baseline=(None, 0), reject=reject, flat=flat)
+            assert_equal(len(w), ii)
+            if ii > 0:
+                assert_true('chronologically' in '%s' % w[-1].message)
+
+
+def test_epochs_bad_baseline():
+    """Test Epochs initialization with bad baseline parameters
+    """
+    raw, events = _get_data()[:2]
+    assert_raises(ValueError, Epochs, raw, events, None, -0.1, 0.3, (-0.2, 0))
+    assert_raises(ValueError, Epochs, raw, events, None, -0.1, 0.3, (0, 0.4))
+
+
+def test_epoch_combine_ids():
+    """Test combining event ids in epochs compared to events
+    """
+    raw, events, picks = _get_data()
+    epochs = Epochs(raw, events, {'a': 1, 'b': 2, 'c': 3,
+                                  'd': 4, 'e': 5, 'f': 32},
+                    tmin, tmax, picks=picks, preload=False)
+    events_new = merge_events(events, [1, 2], 12)
+    epochs_new = combine_event_ids(epochs, ['a', 'b'], {'ab': 12})
+    assert_equal(epochs_new['ab'].name, 'ab')
+    assert_array_equal(events_new, epochs_new.events)
+    # should probably add test + functionality for non-replacement XXX
+
+
+def test_epoch_multi_ids():
+    """Test epoch selection via multiple/partial keys
+    """
+    raw, events, picks = _get_data()
+    epochs = Epochs(raw, events, {'a/b/a': 1, 'a/b/b': 2, 'a/c': 3,
+                                  'b/d': 4, 'a_b': 5},
+                    tmin, tmax, picks=picks, preload=False)
+    epochs_regular = epochs[['a', 'b']]
+    epochs_multi = epochs[['a/b/a', 'a/b/b']]
+    assert_array_equal(epochs_regular.events, epochs_multi.events)
+
+
+def test_read_epochs_bad_events():
+    """Test epochs when events are at the beginning or the end of the file
+    """
+    raw, events, picks = _get_data()
+    # Event at the beginning
+    epochs = Epochs(raw, np.array([[raw.first_samp, 0, event_id]]),
+                    event_id, tmin, tmax, picks=picks, baseline=(None, 0))
+    with warnings.catch_warnings(record=True):
+        evoked = epochs.average()
+
+    epochs = Epochs(raw, np.array([[raw.first_samp, 0, event_id]]),
+                    event_id, tmin, tmax, picks=picks, baseline=(None, 0))
+    assert_true(repr(epochs))  # test repr
+    epochs.drop_bad_epochs()
+    assert_true(repr(epochs))
+    with warnings.catch_warnings(record=True):
+        evoked = epochs.average()
+
+    # Event at the end
+    epochs = Epochs(raw, np.array([[raw.last_samp, 0, event_id]]),
+                    event_id, tmin, tmax, picks=picks, baseline=(None, 0))
+
+    with warnings.catch_warnings(record=True):
+        evoked = epochs.average()
+        assert evoked
+    warnings.resetwarnings()
+
+
+ at slow_test
+def test_read_write_epochs():
+    """Test epochs from raw files with IO as fif file
+    """
+    raw, events, picks = _get_data()
+    tempdir = _TempDir()
+    temp_fname = op.join(tempdir, 'test-epo.fif')
+    temp_fname_no_bl = op.join(tempdir, 'test_no_bl-epo.fif')
+    baseline = (None, 0)
+    epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                    baseline=baseline, preload=True)
+    epochs_orig = epochs.copy()
+    epochs_no_bl = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                          baseline=None, preload=True)
+    assert_true(epochs_no_bl.baseline is None)
+    evoked = epochs.average()
+    data = epochs.get_data()
+
+    # Bad tmin/tmax parameters
+    assert_raises(ValueError, Epochs, raw, events, event_id, tmax, tmin,
+                  baseline=None)
+
+    epochs_no_id = Epochs(raw, pick_events(events, include=event_id),
+                          None, tmin, tmax, picks=picks,
+                          baseline=(None, 0))
+    assert_array_equal(data, epochs_no_id.get_data())
+
+    eog_picks = pick_types(raw.info, meg=False, eeg=False, stim=False,
+                           eog=True, exclude='bads')
+    eog_ch_names = [raw.ch_names[k] for k in eog_picks]
+    epochs.drop_channels(eog_ch_names)
+    assert_true(len(epochs.info['chs']) == len(epochs.ch_names) ==
+                epochs.get_data().shape[1])
+    data_no_eog = epochs.get_data()
+    assert_true(data.shape[1] == (data_no_eog.shape[1] + len(eog_picks)))
+
+    # test decim kwarg
+    with warnings.catch_warnings(record=True) as w:
+        # decim with lowpass
+        warnings.simplefilter('always')
+        epochs_dec = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                            baseline=(None, 0), decim=4)
+        assert_equal(len(w), 1)
+
+        # decim without lowpass
+        lowpass = raw.info['lowpass']
+        raw.info['lowpass'] = None
+        epochs_dec = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                            baseline=(None, 0), decim=4)
+        assert_equal(len(w), 2)
+        raw.info['lowpass'] = lowpass
+
+    data_dec = epochs_dec.get_data()
+    assert_allclose(data[:, :, epochs_dec._decim_slice], data_dec, rtol=1e-7,
+                    atol=1e-12)
+
+    evoked_dec = epochs_dec.average()
+    assert_allclose(evoked.data[:, epochs_dec._decim_slice],
+                    evoked_dec.data, rtol=1e-12)
+
+    n = evoked.data.shape[1]
+    n_dec = evoked_dec.data.shape[1]
+    n_dec_min = n // 4
+    assert_true(n_dec_min <= n_dec <= n_dec_min + 1)
+    assert_true(evoked_dec.info['sfreq'] == evoked.info['sfreq'] / 4)
+
+    # Test event access on non-preloaded data (#2345)
+
+    # due to reapplication of the proj matrix, this is our quality limit
+    # for some tests
+    tols = dict(atol=1e-3, rtol=1e-20)
+
+    raw, events, picks = _get_data()
+    events[::2, 1] = 1
+    events[1::2, 2] = 2
+    event_ids = dict(a=1, b=2)
+    for proj in (True, 'delayed', False):
+        epochs = Epochs(raw, events, event_ids, tmin, tmax, picks=picks,
+                        baseline=(None, 0), proj=proj, reject=reject,
+                        add_eeg_ref=True)
+        data1 = epochs.get_data()
+        data2 = epochs.apply_proj().get_data()
+        assert_allclose(data1, data2, **tols)
+        epochs.save(temp_fname)
+        epochs_read = read_epochs(temp_fname, preload=False)
+        assert_allclose(epochs.get_data(), epochs_read.get_data(), **tols)
+        assert_allclose(epochs['a'].get_data(),
+                        epochs_read['a'].get_data(), **tols)
+        assert_allclose(epochs['b'].get_data(),
+                        epochs_read['b'].get_data(), **tols)
+
+    # ensure we don't leak file descriptors
+    epochs_read = read_epochs(temp_fname, preload=False)
+    epochs_copy = epochs_read.copy()
+    del epochs_read
+    epochs_copy.get_data()
+    with warnings.catch_warnings(record=True) as w:
+        del epochs_copy
+    assert_equal(len(w), 0)
+
+    # test IO
+    for preload in (False, True):
+        epochs = epochs_orig.copy()
+        epochs.save(temp_fname)
+        epochs_no_bl.save(temp_fname_no_bl)
+        epochs_read = read_epochs(temp_fname, preload=preload)
+        epochs_no_bl.save(temp_fname_no_bl)
+        epochs_read = read_epochs(temp_fname)
+        epochs_no_bl_read = read_epochs(temp_fname_no_bl)
+        assert_raises(ValueError, epochs.apply_baseline, baseline=[1, 2, 3])
+        epochs_no_bl_read.apply_baseline(baseline)
+        assert_true(epochs_no_bl_read.baseline == baseline)
+        assert_true(str(epochs_read).startswith('<Epochs'))
+
+        assert_array_equal(epochs_no_bl_read.times, epochs.times)
+        assert_array_almost_equal(epochs_read.get_data(), epochs.get_data())
+        assert_array_almost_equal(epochs.get_data(),
+                                  epochs_no_bl_read.get_data())
+        assert_array_equal(epochs_read.times, epochs.times)
+        assert_array_almost_equal(epochs_read.average().data, evoked.data)
+        assert_equal(epochs_read.proj, epochs.proj)
+        bmin, bmax = epochs.baseline
+        if bmin is None:
+            bmin = epochs.times[0]
+        if bmax is None:
+            bmax = epochs.times[-1]
+        baseline = (bmin, bmax)
+        assert_array_almost_equal(epochs_read.baseline, baseline)
+        assert_array_almost_equal(epochs_read.tmin, epochs.tmin, 2)
+        assert_array_almost_equal(epochs_read.tmax, epochs.tmax, 2)
+        assert_equal(epochs_read.event_id, epochs.event_id)
+
+        epochs.event_id.pop('1')
+        epochs.event_id.update({'a:a': 1})  # test allow for ':' in key
+        epochs.save(op.join(tempdir, 'foo-epo.fif'))
+        epochs_read2 = read_epochs(op.join(tempdir, 'foo-epo.fif'),
+                                   preload=preload)
+        assert_equal(epochs_read2.event_id, epochs.event_id)
+
+        # add reject here so some of the epochs get dropped
+        epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                        baseline=(None, 0), reject=reject)
+        epochs.save(temp_fname)
+        # ensure bad events are not saved
+        epochs_read3 = read_epochs(temp_fname, preload=preload)
+        assert_array_equal(epochs_read3.events, epochs.events)
+        data = epochs.get_data()
+        assert_true(epochs_read3.events.shape[0] == data.shape[0])
+
+        # test copying loaded one (raw property)
+        epochs_read4 = epochs_read3.copy()
+        assert_array_almost_equal(epochs_read4.get_data(), data)
+        # test equalizing loaded one (drop_log property)
+        epochs_read4.equalize_event_counts(epochs.event_id)
+
+        epochs.drop_epochs([1, 2], reason='can we recover orig ID?')
+        epochs.save(temp_fname)
+        epochs_read5 = read_epochs(temp_fname, preload=preload)
+        assert_array_equal(epochs_read5.selection, epochs.selection)
+        assert_equal(len(epochs_read5.selection), len(epochs_read5.events))
+        assert_array_equal(epochs_read5.drop_log, epochs.drop_log)
+
+        if preload:
+            # Test that one can drop channels on read file
+            epochs_read5.drop_channels(epochs_read5.ch_names[:1])
+
+        # test warnings on bad filenames
+        with warnings.catch_warnings(record=True) as w:
+            warnings.simplefilter('always')
+            epochs_badname = op.join(tempdir, 'test-bad-name.fif.gz')
+            epochs.save(epochs_badname)
+            read_epochs(epochs_badname, preload=preload)
+        assert_true(len(w) == 2)
+
+        # test loading epochs with missing events
+        epochs = Epochs(raw, events, dict(foo=1, bar=999), tmin, tmax,
+                        picks=picks, on_missing='ignore')
+        epochs.save(temp_fname)
+        epochs_read = read_epochs(temp_fname, preload=preload)
+        assert_allclose(epochs.get_data(), epochs_read.get_data(), **tols)
+        assert_array_equal(epochs.events, epochs_read.events)
+        assert_equal(set(epochs.event_id.keys()),
+                     set(text_type(x) for x in epochs_read.event_id.keys()))
+
+        # test saving split epoch files
+        epochs.save(temp_fname, split_size='7MB')
+        epochs_read = read_epochs(temp_fname, preload=preload)
+        assert_allclose(epochs.get_data(), epochs_read.get_data(), **tols)
+        assert_array_equal(epochs.events, epochs_read.events)
+        assert_array_equal(epochs.selection, epochs_read.selection)
+        assert_equal(epochs.drop_log, epochs_read.drop_log)
+
+        # Test that having a single time point works
+        epochs.load_data()
+        epochs.crop(0, 0, copy=False)
+        assert_equal(len(epochs.times), 1)
+        assert_equal(epochs.get_data().shape[-1], 1)
+        epochs.save(temp_fname)
+        epochs_read = read_epochs(temp_fname, preload=preload)
+        assert_equal(len(epochs_read.times), 1)
+        assert_equal(epochs.get_data().shape[-1], 1)
+
+
+def test_epochs_proj():
+    """Test handling projection (apply proj in Raw or in Epochs)
+    """
+    tempdir = _TempDir()
+    raw, events, picks = _get_data()
+    exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053']  # bads + 2 more
+    this_picks = pick_types(raw.info, meg=True, eeg=False, stim=True,
+                            eog=True, exclude=exclude)
+    epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=this_picks,
+                    baseline=(None, 0), proj=True)
+    assert_true(all(p['active'] is True for p in epochs.info['projs']))
+    evoked = epochs.average()
+    assert_true(all(p['active'] is True for p in evoked.info['projs']))
+    data = epochs.get_data()
+
+    raw_proj = io.Raw(raw_fname, proj=True)
+    epochs_no_proj = Epochs(raw_proj, events[:4], event_id, tmin, tmax,
+                            picks=this_picks, baseline=(None, 0), proj=False)
+
+    data_no_proj = epochs_no_proj.get_data()
+    assert_true(all(p['active'] is True for p in epochs_no_proj.info['projs']))
+    evoked_no_proj = epochs_no_proj.average()
+    assert_true(all(p['active'] is True for p in evoked_no_proj.info['projs']))
+    assert_true(epochs_no_proj.proj is True)  # as projs are active from Raw
+
+    assert_array_almost_equal(data, data_no_proj, decimal=8)
+
+    # make sure we can exclude avg ref
+    this_picks = pick_types(raw.info, meg=True, eeg=True, stim=True,
+                            eog=True, exclude=exclude)
+    epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=this_picks,
+                    baseline=(None, 0), proj=True, add_eeg_ref=True)
+    assert_true(_has_eeg_average_ref_proj(epochs.info['projs']))
+    epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=this_picks,
+                    baseline=(None, 0), proj=True, add_eeg_ref=False)
+    assert_true(not _has_eeg_average_ref_proj(epochs.info['projs']))
+
+    # make sure we don't add avg ref when a custom ref has been applied
+    raw.info['custom_ref_applied'] = True
+    epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=this_picks,
+                    baseline=(None, 0), proj=True)
+    assert_true(not _has_eeg_average_ref_proj(epochs.info['projs']))
+
+    # From GH#2200:
+    # This has no problem
+    proj = raw.info['projs']
+    epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=this_picks,
+                    baseline=(None, 0), proj=False)
+    epochs.info['projs'] = []
+    data = epochs.copy().add_proj(proj).apply_proj().get_data()
+    # save and reload data
+    fname_epo = op.join(tempdir, 'temp-epo.fif')
+    epochs.save(fname_epo)  # Save without proj added
+    epochs_read = read_epochs(fname_epo)
+    epochs_read.add_proj(proj)
+    epochs_read.apply_proj()  # This used to bomb
+    data_2 = epochs_read.get_data()  # Let's check the result
+    assert_allclose(data, data_2, atol=1e-15, rtol=1e-3)
+
+
+def test_evoked_arithmetic():
+    """Test arithmetic of evoked data
+    """
+    raw, events, picks = _get_data()
+    epochs1 = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
+                     baseline=(None, 0))
+    evoked1 = epochs1.average()
+    epochs2 = Epochs(raw, events[4:8], event_id, tmin, tmax, picks=picks,
+                     baseline=(None, 0))
+    evoked2 = epochs2.average()
+    epochs = Epochs(raw, events[:8], event_id, tmin, tmax, picks=picks,
+                    baseline=(None, 0))
+    evoked = epochs.average()
+    evoked_sum = evoked1 + evoked2
+    assert_array_equal(evoked.data, evoked_sum.data)
+    assert_array_equal(evoked.times, evoked_sum.times)
+    assert_true(evoked_sum.nave == (evoked1.nave + evoked2.nave))
+    evoked_diff = evoked1 - evoked1
+    assert_array_equal(np.zeros_like(evoked.data), evoked_diff.data)
+
+
+def test_evoked_io_from_epochs():
+    """Test IO of evoked data made from epochs
+    """
+    tempdir = _TempDir()
+    raw, events, picks = _get_data()
+    # offset our tmin so we don't get exactly a zero value when decimating
+    with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter('always')
+        epochs = Epochs(raw, events[:4], event_id, tmin + 0.011, tmax,
+                        picks=picks, baseline=(None, 0), decim=5)
+    assert_true(len(w) == 1)
+    evoked = epochs.average()
+    evoked.save(op.join(tempdir, 'evoked-ave.fif'))
+    evoked2 = read_evokeds(op.join(tempdir, 'evoked-ave.fif'))[0]
+    assert_allclose(evoked.data, evoked2.data, rtol=1e-4, atol=1e-20)
+    assert_allclose(evoked.times, evoked2.times, rtol=1e-4,
+                    atol=1 / evoked.info['sfreq'])
+
+    # now let's do one with negative time
+    with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter('always')
+        epochs = Epochs(raw, events[:4], event_id, 0.1, tmax,
+                        picks=picks, baseline=(0.1, 0.2), decim=5)
+    evoked = epochs.average()
+    evoked.save(op.join(tempdir, 'evoked-ave.fif'))
+    evoked2 = read_evokeds(op.join(tempdir, 'evoked-ave.fif'))[0]
+    assert_allclose(evoked.data, evoked2.data, rtol=1e-4, atol=1e-20)
+    assert_allclose(evoked.times, evoked2.times, rtol=1e-4, atol=1e-20)
+
+    # should be equivalent to a cropped original
+    with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter('always')
+        epochs = Epochs(raw, events[:4], event_id, -0.2, tmax,
+                        picks=picks, baseline=(0.1, 0.2), decim=5)
+    evoked = epochs.average()
+    evoked.crop(0.099, None)
+    assert_allclose(evoked.data, evoked2.data, rtol=1e-4, atol=1e-20)
+    assert_allclose(evoked.times, evoked2.times, rtol=1e-4, atol=1e-20)
+
+
+def test_evoked_standard_error():
+    """Test calculation and read/write of standard error
+    """
+    raw, events, picks = _get_data()
+    tempdir = _TempDir()
+    epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
+                    baseline=(None, 0))
+    evoked = [epochs.average(), epochs.standard_error()]
+    write_evokeds(op.join(tempdir, 'evoked-ave.fif'), evoked)
+    evoked2 = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), [0, 1])
+    evoked3 = [read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 'Unknown'),
+               read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 'Unknown',
+                            kind='standard_error')]
+    for evoked_new in [evoked2, evoked3]:
+        assert_true(evoked_new[0]._aspect_kind ==
+                    FIFF.FIFFV_ASPECT_AVERAGE)
+        assert_true(evoked_new[0].kind == 'average')
+        assert_true(evoked_new[1]._aspect_kind ==
+                    FIFF.FIFFV_ASPECT_STD_ERR)
+        assert_true(evoked_new[1].kind == 'standard_error')
+        for ave, ave2 in zip(evoked, evoked_new):
+            assert_array_almost_equal(ave.data, ave2.data)
+            assert_array_almost_equal(ave.times, ave2.times)
+            assert_equal(ave.nave, ave2.nave)
+            assert_equal(ave._aspect_kind, ave2._aspect_kind)
+            assert_equal(ave.kind, ave2.kind)
+            assert_equal(ave.last, ave2.last)
+            assert_equal(ave.first, ave2.first)
+
+
+def test_reject_epochs():
+    """Test of epochs rejection
+    """
+    raw, events, picks = _get_data()
+    events1 = events[events[:, 2] == event_id]
+    epochs = Epochs(raw, events1,
+                    event_id, tmin, tmax, baseline=(None, 0),
+                    reject=reject, flat=flat)
+    assert_raises(RuntimeError, len, epochs)
+    n_events = len(epochs.events)
+    data = epochs.get_data()
+    n_clean_epochs = len(data)
+    # Should match
+    # mne_process_raw --raw test_raw.fif --projoff \
+    #   --saveavetag -ave --ave test.ave --filteroff
+    assert_true(n_events > n_clean_epochs)
+    assert_true(n_clean_epochs == 3)
+    assert_true(epochs.drop_log == [[], [], [], ['MEG 2443'], ['MEG 2443'],
+                                    ['MEG 2443'], ['MEG 2443']])
+
+    # Ensure epochs are not dropped based on a bad channel
+    raw_2 = raw.copy()
+    raw_2.info['bads'] = ['MEG 2443']
+    reject_crazy = dict(grad=1000e-15, mag=4e-15, eeg=80e-9, eog=150e-9)
+    epochs = Epochs(raw_2, events1, event_id, tmin, tmax, baseline=(None, 0),
+                    reject=reject_crazy, flat=flat)
+    epochs.drop_bad_epochs()
+
+    assert_true(all('MEG 2442' in e for e in epochs.drop_log))
+    assert_true(all('MEG 2443' not in e for e in epochs.drop_log))
+
+    # Invalid reject_tmin/reject_tmax/detrend
+    assert_raises(ValueError, Epochs, raw, events1, event_id, tmin, tmax,
+                  reject_tmin=1., reject_tmax=0)
+    assert_raises(ValueError, Epochs, raw, events1, event_id, tmin, tmax,
+                  reject_tmin=tmin - 1, reject_tmax=1.)
+    assert_raises(ValueError, Epochs, raw, events1, event_id, tmin, tmax,
+                  reject_tmin=0., reject_tmax=tmax + 1)
+
+    epochs = Epochs(raw, events1, event_id, tmin, tmax, picks=picks,
+                    baseline=(None, 0), reject=reject, flat=flat,
+                    reject_tmin=0., reject_tmax=.1)
+    data = epochs.get_data()
+    n_clean_epochs = len(data)
+    assert_true(n_clean_epochs == 7)
+    assert_true(len(epochs) == 7)
+    assert_true(epochs.times[epochs._reject_time][0] >= 0.)
+    assert_true(epochs.times[epochs._reject_time][-1] <= 0.1)
+
+    # Invalid data for _is_good_epoch function
+    epochs = Epochs(raw, events1, event_id, tmin, tmax, reject=None, flat=None)
+    assert_equal(epochs._is_good_epoch(None), (False, ['NO_DATA']))
+    assert_equal(epochs._is_good_epoch(np.zeros((1, 1))),
+                 (False, ['TOO_SHORT']))
+    data = epochs[0].get_data()[0]
+    assert_equal(epochs._is_good_epoch(data), (True, None))
+
+
+def test_preload_epochs():
+    """Test preload of epochs
+    """
+    raw, events, picks = _get_data()
+    epochs_preload = Epochs(raw, events[:16], event_id, tmin, tmax,
+                            picks=picks, baseline=(None, 0), preload=True,
+                            reject=reject, flat=flat)
+    data_preload = epochs_preload.get_data()
+
+    epochs = Epochs(raw, events[:16], event_id, tmin, tmax, picks=picks,
+                    baseline=(None, 0), preload=False,
+                    reject=reject, flat=flat)
+    data = epochs.get_data()
+    assert_array_equal(data_preload, data)
+    assert_array_almost_equal(epochs_preload.average().data,
+                              epochs.average().data, 18)
+
+
+def test_indexing_slicing():
+    """Test of indexing and slicing operations
+    """
+    raw, events, picks = _get_data()
+    epochs = Epochs(raw, events[:20], event_id, tmin, tmax, picks=picks,
+                    baseline=(None, 0), preload=False,
+                    reject=reject, flat=flat)
+
+    data_normal = epochs.get_data()
+
+    n_good_events = data_normal.shape[0]
+
+    # indices for slicing
+    start_index = 1
+    end_index = n_good_events - 1
+
+    assert((end_index - start_index) > 0)
+
+    for preload in [True, False]:
+        epochs2 = Epochs(raw, events[:20], event_id, tmin, tmax,
+                         picks=picks, baseline=(None, 0), preload=preload,
+                         reject=reject, flat=flat)
+
+        if not preload:
+            epochs2.drop_bad_epochs()
+
+        # using slicing
+        epochs2_sliced = epochs2[start_index:end_index]
+
+        data_epochs2_sliced = epochs2_sliced.get_data()
+        assert_array_equal(data_epochs2_sliced,
+                           data_normal[start_index:end_index])
+
+        # using indexing
+        pos = 0
+        for idx in range(start_index, end_index):
+            data = epochs2_sliced[pos].get_data()
+            assert_array_equal(data[0], data_normal[idx])
+            pos += 1
+
+        # using indexing with an int
+        data = epochs2[data_epochs2_sliced.shape[0]].get_data()
+        assert_array_equal(data, data_normal[[idx]])
+
+        # using indexing with an array
+        idx = np.random.randint(0, data_epochs2_sliced.shape[0], 10)
+        data = epochs2[idx].get_data()
+        assert_array_equal(data, data_normal[idx])
+
+        # using indexing with a list of indices
+        idx = [0]
+        data = epochs2[idx].get_data()
+        assert_array_equal(data, data_normal[idx])
+        idx = [0, 1]
+        data = epochs2[idx].get_data()
+        assert_array_equal(data, data_normal[idx])
+
+
+def test_comparision_with_c():
+    """Test of average obtained vs C code
+    """
+    raw, events = _get_data()[:2]
+    c_evoked = read_evokeds(evoked_nf_name, condition=0)
+    epochs = Epochs(raw, events, event_id, tmin, tmax,
+                    baseline=None, preload=True,
+                    reject=None, flat=None)
+    evoked = epochs.average()
+    sel = pick_channels(c_evoked.ch_names, evoked.ch_names)
+    evoked_data = evoked.data
+    c_evoked_data = c_evoked.data[sel]
+
+    assert_true(evoked.nave == c_evoked.nave)
+    assert_array_almost_equal(evoked_data, c_evoked_data, 10)
+    assert_array_almost_equal(evoked.times, c_evoked.times, 12)
+
+
+def test_crop():
+    """Test of crop of epochs
+    """
+    raw, events, picks = _get_data()
+    epochs = Epochs(raw, events[:5], event_id, tmin, tmax, picks=picks,
+                    baseline=(None, 0), preload=False,
+                    reject=reject, flat=flat)
+    assert_raises(RuntimeError, epochs.crop, None, 0.2)  # not preloaded
+    data_normal = epochs.get_data()
+
+    epochs2 = Epochs(raw, events[:5], event_id, tmin, tmax,
+                     picks=picks, baseline=(None, 0), preload=True,
+                     reject=reject, flat=flat)
+    with warnings.catch_warnings(record=True) as w:
+        epochs2.crop(-20, 200)
+    assert_true(len(w) == 2)
+
+    # indices for slicing
+    tmin_window = tmin + 0.1
+    tmax_window = tmax - 0.1
+    tmask = (epochs.times >= tmin_window) & (epochs.times <= tmax_window)
+    assert_true(tmin_window > tmin)
+    assert_true(tmax_window < tmax)
+    epochs3 = epochs2.crop(tmin_window, tmax_window, copy=True)
+    data3 = epochs3.get_data()
+    epochs2.crop(tmin_window, tmax_window)
+    data2 = epochs2.get_data()
+    assert_array_equal(data2, data_normal[:, :, tmask])
+    assert_array_equal(data3, data_normal[:, :, tmask])
+
+    # test time info is correct
+    epochs = EpochsArray(np.zeros((1, 1, 1000)), create_info(1, 1000., 'eeg'),
+                         np.ones((1, 3), int), tmin=-0.2)
+    epochs.crop(-.200, .700)
+    last_time = epochs.times[-1]
+    with warnings.catch_warnings(record=True):  # not LP filtered
+        epochs.decimate(10)
+    assert_allclose(last_time, epochs.times[-1])
+
+
+def test_resample():
+    """Test of resample of epochs
+    """
+    raw, events, picks = _get_data()
+    epochs = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks,
+                    baseline=(None, 0), preload=False,
+                    reject=reject, flat=flat)
+    assert_raises(RuntimeError, epochs.resample, 100)
+
+    epochs_o = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks,
+                      baseline=(None, 0), preload=True,
+                      reject=reject, flat=flat)
+    epochs = epochs_o.copy()
+
+    data_normal = cp.deepcopy(epochs.get_data())
+    times_normal = cp.deepcopy(epochs.times)
+    sfreq_normal = epochs.info['sfreq']
+    # upsample by 2
+    epochs = epochs_o.copy()
+    epochs.resample(sfreq_normal * 2, npad=0)
+    data_up = cp.deepcopy(epochs.get_data())
+    times_up = cp.deepcopy(epochs.times)
+    sfreq_up = epochs.info['sfreq']
+    # downsamply by 2, which should match
+    epochs.resample(sfreq_normal, npad=0)
+    data_new = cp.deepcopy(epochs.get_data())
+    times_new = cp.deepcopy(epochs.times)
+    sfreq_new = epochs.info['sfreq']
+    assert_true(data_up.shape[2] == 2 * data_normal.shape[2])
+    assert_true(sfreq_up == 2 * sfreq_normal)
+    assert_true(sfreq_new == sfreq_normal)
+    assert_true(len(times_up) == 2 * len(times_normal))
+    assert_array_almost_equal(times_new, times_normal, 10)
+    assert_true(data_up.shape[2] == 2 * data_normal.shape[2])
+    assert_array_almost_equal(data_new, data_normal, 5)
+
+    # use parallel
+    epochs = epochs_o.copy()
+    epochs.resample(sfreq_normal * 2, n_jobs=2, npad=0)
+    assert_true(np.allclose(data_up, epochs._data, rtol=1e-8, atol=1e-16))
+
+    # test copy flag
+    epochs = epochs_o.copy()
+    epochs_resampled = epochs.resample(sfreq_normal * 2, npad=0, copy=True)
+    assert_true(epochs_resampled is not epochs)
+    epochs_resampled = epochs.resample(sfreq_normal * 2, npad=0, copy=False)
+    assert_true(epochs_resampled is epochs)
+
+
+def test_detrend():
+    """Test detrending of epochs
+    """
+    raw, events, picks = _get_data()
+
+    # test first-order
+    epochs_1 = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
+                      baseline=None, detrend=1)
+    epochs_2 = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
+                      baseline=None, detrend=None)
+    data_picks = pick_types(epochs_1.info, meg=True, eeg=True,
+                            exclude='bads')
+    evoked_1 = epochs_1.average()
+    evoked_2 = epochs_2.average()
+    evoked_2.detrend(1)
+    # Due to roundoff these won't be exactly equal, but they should be close
+    assert_true(np.allclose(evoked_1.data, evoked_2.data,
+                            rtol=1e-8, atol=1e-20))
+
+    # test zeroth-order case
+    for preload in [True, False]:
+        epochs_1 = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
+                          baseline=(None, None), preload=preload)
+        epochs_2 = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
+                          baseline=None, preload=preload, detrend=0)
+        a = epochs_1.get_data()
+        b = epochs_2.get_data()
+        # All data channels should be almost equal
+        assert_true(np.allclose(a[:, data_picks, :], b[:, data_picks, :],
+                                rtol=1e-16, atol=1e-20))
+        # There are non-M/EEG channels that should not be equal:
+        assert_true(not np.allclose(a, b))
+
+    assert_raises(ValueError, Epochs, raw, events[:4], event_id, tmin, tmax,
+                  detrend=2)
+
+
+def test_bootstrap():
+    """Test of bootstrapping of epochs
+    """
+    raw, events, picks = _get_data()
+    epochs = Epochs(raw, events[:5], event_id, tmin, tmax, picks=picks,
+                    baseline=(None, 0), preload=True,
+                    reject=reject, flat=flat)
+    epochs2 = bootstrap(epochs, random_state=0)
+    assert_true(len(epochs2.events) == len(epochs.events))
+    assert_true(epochs._data.shape == epochs2._data.shape)
+
+
+def test_epochs_copy():
+    """Test copy epochs
+    """
+    raw, events, picks = _get_data()
+    epochs = Epochs(raw, events[:5], event_id, tmin, tmax, picks=picks,
+                    baseline=(None, 0), preload=True,
+                    reject=reject, flat=flat)
+    copied = epochs.copy()
+    assert_array_equal(epochs._data, copied._data)
+
+    epochs = Epochs(raw, events[:5], event_id, tmin, tmax, picks=picks,
+                    baseline=(None, 0), preload=False,
+                    reject=reject, flat=flat)
+    copied = epochs.copy()
+    data = epochs.get_data()
+    copied_data = copied.get_data()
+    assert_array_equal(data, copied_data)
+
+
+def test_iter_evoked():
+    """Test the iterator for epochs -> evoked
+    """
+    raw, events, picks = _get_data()
+    epochs = Epochs(raw, events[:5], event_id, tmin, tmax, picks=picks,
+                    baseline=(None, 0))
+
+    for ii, ev in enumerate(epochs.iter_evoked()):
+        x = ev.data
+        y = epochs.get_data()[ii, :, :]
+        assert_array_equal(x, y)
+
+
+def test_subtract_evoked():
+    """Test subtraction of Evoked from Epochs
+    """
+    raw, events, picks = _get_data()
+    epochs = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks,
+                    baseline=(None, 0))
+
+    # make sure subraction fails if data channels are missing
+    assert_raises(ValueError, epochs.subtract_evoked,
+                  epochs.average(picks[:5]))
+
+    # do the subraction using the default argument
+    epochs.subtract_evoked()
+
+    # apply SSP now
+    epochs.apply_proj()
+
+    # use preloading and SSP from the start
+    epochs2 = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks,
+                     baseline=(None, 0), preload=True, proj=True)
+
+    evoked = epochs2.average()
+    epochs2.subtract_evoked(evoked)
+
+    # this gives the same result
+    assert_allclose(epochs.get_data(), epochs2.get_data())
+
+    # if we compute the evoked response after subtracting it we get zero
+    zero_evoked = epochs.average()
+    data = zero_evoked.data
+    assert_allclose(data, np.zeros_like(data), atol=1e-15)
+
+
+def test_epoch_eq():
+    """Test epoch count equalization and condition combining
+    """
+    raw, events, picks = _get_data()
+    # equalizing epochs objects
+    epochs_1 = Epochs(raw, events, event_id, tmin, tmax, picks=picks)
+    epochs_2 = Epochs(raw, events, event_id_2, tmin, tmax, picks=picks)
+    epochs_1.drop_bad_epochs()  # make sure drops are logged
+    assert_true(len([l for l in epochs_1.drop_log if not l]) ==
+                len(epochs_1.events))
+    drop_log1 = epochs_1.drop_log = [[] for _ in range(len(epochs_1.events))]
+    drop_log2 = [[] if l == ['EQUALIZED_COUNT'] else l for l in
+                 epochs_1.drop_log]
+    assert_true(drop_log1 == drop_log2)
+    assert_true(len([l for l in epochs_1.drop_log if not l]) ==
+                len(epochs_1.events))
+    assert_true(epochs_1.events.shape[0] != epochs_2.events.shape[0])
+    equalize_epoch_counts([epochs_1, epochs_2], method='mintime')
+    assert_true(epochs_1.events.shape[0] == epochs_2.events.shape[0])
+    epochs_3 = Epochs(raw, events, event_id, tmin, tmax, picks=picks)
+    epochs_4 = Epochs(raw, events, event_id_2, tmin, tmax, picks=picks)
+    equalize_epoch_counts([epochs_3, epochs_4], method='truncate')
+    assert_true(epochs_1.events.shape[0] == epochs_3.events.shape[0])
+    assert_true(epochs_3.events.shape[0] == epochs_4.events.shape[0])
+
+    # equalizing conditions
+    epochs = Epochs(raw, events, {'a': 1, 'b': 2, 'c': 3, 'd': 4},
+                    tmin, tmax, picks=picks, reject=reject)
+    epochs.drop_bad_epochs()  # make sure drops are logged
+    assert_true(len([l for l in epochs.drop_log if not l]) ==
+                len(epochs.events))
+    drop_log1 = deepcopy(epochs.drop_log)
+    old_shapes = [epochs[key].events.shape[0] for key in ['a', 'b', 'c', 'd']]
+    epochs.equalize_event_counts(['a', 'b'], copy=False)
+    # undo the eq logging
+    drop_log2 = [[] if l == ['EQUALIZED_COUNT'] else l for l in
+                 epochs.drop_log]
+    assert_true(drop_log1 == drop_log2)
+
+    assert_true(len([l for l in epochs.drop_log if not l]) ==
+                len(epochs.events))
+    new_shapes = [epochs[key].events.shape[0] for key in ['a', 'b', 'c', 'd']]
+    assert_true(new_shapes[0] == new_shapes[1])
+    assert_true(new_shapes[2] == new_shapes[2])
+    assert_true(new_shapes[3] == new_shapes[3])
+    # now with two conditions collapsed
+    old_shapes = new_shapes
+    epochs.equalize_event_counts([['a', 'b'], 'c'], copy=False)
+    new_shapes = [epochs[key].events.shape[0] for key in ['a', 'b', 'c', 'd']]
+    assert_true(new_shapes[0] + new_shapes[1] == new_shapes[2])
+    assert_true(new_shapes[3] == old_shapes[3])
+    assert_raises(KeyError, epochs.equalize_event_counts, [1, 'a'])
+
+    # now let's combine conditions
+    old_shapes = new_shapes
+    epochs = epochs.equalize_event_counts([['a', 'b'], ['c', 'd']])[0]
+    new_shapes = [epochs[key].events.shape[0] for key in ['a', 'b', 'c', 'd']]
+    assert_true(old_shapes[0] + old_shapes[1] == new_shapes[0] + new_shapes[1])
+    assert_true(new_shapes[0] + new_shapes[1] == new_shapes[2] + new_shapes[3])
+    assert_raises(ValueError, combine_event_ids, epochs, ['a', 'b'],
+                  {'ab': 1})
+
+    combine_event_ids(epochs, ['a', 'b'], {'ab': 12}, copy=False)
+    caught = 0
+    for key in ['a', 'b']:
+        try:
+            epochs[key]
+        except KeyError:
+            caught += 1
+    assert_raises(Exception, caught == 2)
+    assert_true(not np.any(epochs.events[:, 2] == 1))
+    assert_true(not np.any(epochs.events[:, 2] == 2))
+    epochs = combine_event_ids(epochs, ['c', 'd'], {'cd': 34})
+    assert_true(np.all(np.logical_or(epochs.events[:, 2] == 12,
+                                     epochs.events[:, 2] == 34)))
+    assert_true(epochs['ab'].events.shape[0] == old_shapes[0] + old_shapes[1])
+    assert_true(epochs['ab'].events.shape[0] == epochs['cd'].events.shape[0])
+
+    # equalizing with hierarchical tags
+    epochs = Epochs(raw, events, {'a/x': 1, 'b/x': 2, 'a/y': 3, 'b/y': 4},
+                    tmin, tmax, picks=picks, reject=reject)
+    cond1, cond2 = ['a', ['b/x', 'b/y']], [['a/x', 'a/y'], 'b']
+    es = [epochs.equalize_event_counts(c)[0] for c in (cond1, cond2)]
+    assert_array_equal(es[0].events[:, 0], es[1].events[:, 0])
+    cond1, cond2 = ['a', ['b', 'b/y']], [['a/x', 'a/y'], 'x']
+    for c in (cond1, cond2):  # error b/c tag and id mix/non-orthogonal tags
+        assert_raises(ValueError, epochs.equalize_event_counts, c)
+
+
+def test_access_by_name():
+    """Test accessing epochs by event name and on_missing for rare events
+    """
+    tempdir = _TempDir()
+    raw, events, picks = _get_data()
+
+    # Test various invalid inputs
+    assert_raises(ValueError, Epochs, raw, events, {1: 42, 2: 42}, tmin,
+                  tmax, picks=picks)
+    assert_raises(ValueError, Epochs, raw, events, {'a': 'spam', 2: 'eggs'},
+                  tmin, tmax, picks=picks)
+    assert_raises(ValueError, Epochs, raw, events, {'a': 'spam', 2: 'eggs'},
+                  tmin, tmax, picks=picks)
+    assert_raises(ValueError, Epochs, raw, events, 'foo', tmin, tmax,
+                  picks=picks)
+    assert_raises(ValueError, Epochs, raw, events, ['foo'], tmin, tmax,
+                  picks=picks)
+
+    # Test accessing non-existent events (assumes 12345678 does not exist)
+    event_id_illegal = dict(aud_l=1, does_not_exist=12345678)
+    assert_raises(ValueError, Epochs, raw, events, event_id_illegal,
+                  tmin, tmax)
+    # Test on_missing
+    assert_raises(ValueError, Epochs, raw, events, 1, tmin, tmax,
+                  on_missing='foo')
+    with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter('always')
+        Epochs(raw, events, event_id_illegal, tmin, tmax, on_missing='warning')
+        nw = len(w)
+        assert_true(1 <= nw <= 2)
+        Epochs(raw, events, event_id_illegal, tmin, tmax, on_missing='ignore')
+        assert_equal(len(w), nw)
+
+    # Test constructing epochs with a list of ints as events
+    epochs = Epochs(raw, events, [1, 2], tmin, tmax, picks=picks)
+    for k, v in epochs.event_id.items():
+        assert_equal(int(k), v)
+
+    epochs = Epochs(raw, events, {'a': 1, 'b': 2}, tmin, tmax, picks=picks)
+    assert_raises(KeyError, epochs.__getitem__, 'bar')
+
+    data = epochs['a'].get_data()
+    event_a = events[events[:, 2] == 1]
+    assert_true(len(data) == len(event_a))
+
+    epochs = Epochs(raw, events, {'a': 1, 'b': 2}, tmin, tmax, picks=picks,
+                    preload=True)
+    assert_raises(KeyError, epochs.__getitem__, 'bar')
+    temp_fname = op.join(tempdir, 'test-epo.fif')
+    epochs.save(temp_fname)
+    epochs2 = read_epochs(temp_fname)
+
+    for ep in [epochs, epochs2]:
+        data = ep['a'].get_data()
+        event_a = events[events[:, 2] == 1]
+        assert_true(len(data) == len(event_a))
+
+    assert_array_equal(epochs2['a'].events, epochs['a'].events)
+
+    epochs3 = Epochs(raw, events, {'a': 1, 'b': 2, 'c': 3, 'd': 4},
+                     tmin, tmax, picks=picks, preload=True)
+    assert_equal(list(sorted(epochs3[('a', 'b')].event_id.values())),
+                 [1, 2])
+    epochs4 = epochs['a']
+    epochs5 = epochs3['a']
+    assert_array_equal(epochs4.events, epochs5.events)
+    # 20 is our tolerance because epochs are written out as floats
+    assert_array_almost_equal(epochs4.get_data(), epochs5.get_data(), 20)
+    epochs6 = epochs3[['a', 'b']]
+    assert_true(all(np.logical_or(epochs6.events[:, 2] == 1,
+                                  epochs6.events[:, 2] == 2)))
+    assert_array_equal(epochs.events, epochs6.events)
+    assert_array_almost_equal(epochs.get_data(), epochs6.get_data(), 20)
+
+    # Make sure we preserve names
+    assert_equal(epochs['a'].name, 'a')
+    assert_equal(epochs[['a', 'b']]['a'].name, 'a')
+
+
+ at requires_pandas
+def test_to_data_frame():
+    """Test epochs Pandas exporter"""
+    raw, events, picks = _get_data()
+    epochs = Epochs(raw, events, {'a': 1, 'b': 2}, tmin, tmax, picks=picks)
+    assert_raises(ValueError, epochs.to_data_frame, index=['foo', 'bar'])
+    assert_raises(ValueError, epochs.to_data_frame, index='qux')
+    assert_raises(ValueError, epochs.to_data_frame, np.arange(400))
+
+    df = epochs.to_data_frame(index=['condition', 'epoch', 'time'],
+                              picks=list(range(epochs.info['nchan'])))
+
+    # Default index and picks
+    df2 = epochs.to_data_frame()
+    assert_equal(df.index.names, df2.index.names)
+    assert_array_equal(df.columns.values, epochs.ch_names)
+
+    data = np.hstack(epochs.get_data())
+    assert_true((df.columns == epochs.ch_names).all())
+    assert_array_equal(df.values[:, 0], data[0] * 1e13)
+    assert_array_equal(df.values[:, 2], data[2] * 1e15)
+    for ind in ['time', ['condition', 'time'], ['condition', 'time', 'epoch']]:
+        df = epochs.to_data_frame(index=ind)
+        assert_true(df.index.names == ind if isinstance(ind, list) else [ind])
+        # test that non-indexed data were present as categorial variables
+        assert_array_equal(sorted(df.reset_index().columns[:3]),
+                           sorted(['time', 'condition', 'epoch']))
+
+
+def test_epochs_proj_mixin():
+    """Test SSP proj methods from ProjMixin class
+    """
+    raw, events, picks = _get_data()
+    for proj in [True, False]:
+        epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
+                        baseline=(None, 0), proj=proj)
+
+        assert_true(all(p['active'] == proj for p in epochs.info['projs']))
+
+        # test adding / deleting proj
+        if proj:
+            epochs.get_data()
+            assert_true(all(p['active'] == proj for p in epochs.info['projs']))
+            assert_raises(ValueError, epochs.add_proj, epochs.info['projs'][0],
+                          {'remove_existing': True})
+            assert_raises(ValueError, epochs.add_proj, 'spam')
+            assert_raises(ValueError, epochs.del_proj, 0)
+        else:
+            projs = deepcopy(epochs.info['projs'])
+            n_proj = len(epochs.info['projs'])
+            epochs.del_proj(0)
+            assert_true(len(epochs.info['projs']) == n_proj - 1)
+            epochs.add_proj(projs, remove_existing=False)
+            assert_true(len(epochs.info['projs']) == 2 * n_proj - 1)
+            epochs.add_proj(projs, remove_existing=True)
+            assert_true(len(epochs.info['projs']) == n_proj)
+
+    # catch no-gos.
+    # wrong proj argument
+    assert_raises(ValueError, Epochs, raw, events[:4], event_id, tmin, tmax,
+                  picks=picks, baseline=(None, 0), proj='crazy')
+
+    for preload in [True, False]:
+        epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
+                        baseline=(None, 0), proj='delayed', preload=preload,
+                        add_eeg_ref=True, reject=reject)
+        epochs2 = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
+                         baseline=(None, 0), proj=True, preload=preload,
+                         add_eeg_ref=True, reject=reject)
+
+        assert_allclose(epochs.copy().apply_proj().get_data()[0],
+                        epochs2.get_data()[0], rtol=1e-10, atol=1e-25)
+
+        # make sure data output is constant across repeated calls
+        # e.g. drop bads
+        assert_array_equal(epochs.get_data(), epochs.get_data())
+        assert_array_equal(epochs2.get_data(), epochs2.get_data())
+
+    # test epochs.next calls
+    data = epochs.get_data().copy()
+    data2 = np.array([e for e in epochs])
+    assert_array_equal(data, data2)
+
+    # cross application from processing stream 1 to 2
+    epochs.apply_proj()
+    assert_array_equal(epochs._projector, epochs2._projector)
+    assert_allclose(epochs._data, epochs2.get_data())
+
+    # test mixin against manual application
+    epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
+                    baseline=None, proj=False, add_eeg_ref=True)
+    data = epochs.get_data().copy()
+    epochs.apply_proj()
+    assert_allclose(np.dot(epochs._projector, data[0]), epochs._data[0])
+
+
+def test_delayed_epochs():
+    """Test delayed projection
+    """
+    raw, events, picks = _get_data()
+    events = events[:10]
+    picks = np.concatenate([pick_types(raw.info, meg=True, eeg=True)[::22],
+                            pick_types(raw.info, meg=False, eeg=False,
+                                       ecg=True, eog=True)])
+    picks = np.sort(picks)
+    raw.info['lowpass'] = 40.  # fake the LP info so no warnings
+    for preload in (True, False):
+        for proj in (True, False, 'delayed'):
+            for decim in (1, 3):
+                for ii in range(2):
+                    epochs = Epochs(raw, events, event_id, tmin, tmax,
+                                    picks=picks, proj=proj, reject=reject,
+                                    preload=preload, decim=decim)
+                    if ii == 1:
+                        epochs.load_data()
+                    picks_data = pick_types(epochs.info, meg=True, eeg=True)
+                    evoked = epochs.average(picks=picks_data)
+                    if proj is True:
+                        evoked.apply_proj()
+                    epochs_data = epochs.get_data().mean(axis=0)[picks_data]
+                    assert_array_equal(evoked.ch_names,
+                                       np.array(epochs.ch_names)[picks_data])
+                    assert_allclose(evoked.times, epochs.times)
+                    assert_allclose(evoked.data, epochs_data,
+                                    rtol=1e-5, atol=1e-15)
+
+
+def test_drop_epochs():
+    """Test dropping of epochs.
+    """
+    raw, events, picks = _get_data()
+    epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                    baseline=(None, 0))
+    events1 = events[events[:, 2] == event_id]
+
+    # Bound checks
+    assert_raises(IndexError, epochs.drop_epochs, [len(epochs.events)])
+    assert_raises(IndexError, epochs.drop_epochs, [-1])
+    assert_raises(ValueError, epochs.drop_epochs, [[1, 2], [3, 4]])
+
+    # Test selection attribute
+    assert_array_equal(epochs.selection,
+                       np.where(events[:, 2] == event_id)[0])
+    assert_equal(len(epochs.drop_log), len(events))
+    assert_true(all(epochs.drop_log[k] == ['IGNORED']
+                for k in set(range(len(events))) - set(epochs.selection)))
+
+    selection = epochs.selection.copy()
+    n_events = len(epochs.events)
+    epochs.drop_epochs([2, 4], reason='d')
+    assert_equal(epochs.drop_log_stats(), 2. / n_events * 100)
+    assert_equal(len(epochs.drop_log), len(events))
+    assert_equal([epochs.drop_log[k]
+                  for k in selection[[2, 4]]], [['d'], ['d']])
+    assert_array_equal(events[epochs.selection], events1[[0, 1, 3, 5, 6]])
+    assert_array_equal(events[epochs[3:].selection], events1[[5, 6]])
+    assert_array_equal(events[epochs['1'].selection], events1[[0, 1, 3, 5, 6]])
+
+
+def test_drop_epochs_mult():
+    """Test that subselecting epochs or making less epochs is equivalent"""
+    raw, events, picks = _get_data()
+    for preload in [True, False]:
+        epochs1 = Epochs(raw, events, {'a': 1, 'b': 2},
+                         tmin, tmax, picks=picks, reject=reject,
+                         preload=preload)['a']
+        epochs2 = Epochs(raw, events, {'a': 1},
+                         tmin, tmax, picks=picks, reject=reject,
+                         preload=preload)
+
+        if preload:
+            # In the preload case you cannot know the bads if already ignored
+            assert_equal(len(epochs1.drop_log), len(epochs2.drop_log))
+            for d1, d2 in zip(epochs1.drop_log, epochs2.drop_log):
+                if d1 == ['IGNORED']:
+                    assert_true(d2 == ['IGNORED'])
+                if d1 != ['IGNORED'] and d1 != []:
+                    assert_true((d2 == d1) or (d2 == ['IGNORED']))
+                if d1 == []:
+                    assert_true(d2 == [])
+            assert_array_equal(epochs1.events, epochs2.events)
+            assert_array_equal(epochs1.selection, epochs2.selection)
+        else:
+            # In the non preload is should be exactly the same
+            assert_equal(epochs1.drop_log, epochs2.drop_log)
+            assert_array_equal(epochs1.events, epochs2.events)
+            assert_array_equal(epochs1.selection, epochs2.selection)
+
+
+def test_contains():
+    """Test membership API"""
+    raw, events = _get_data()[:2]
+
+    tests = [(('mag', False), ('grad', 'eeg')),
+             (('grad', False), ('mag', 'eeg')),
+             ((False, True), ('grad', 'mag'))]
+
+    for (meg, eeg), others in tests:
+        picks_contains = pick_types(raw.info, meg=meg, eeg=eeg)
+        epochs = Epochs(raw, events, {'a': 1, 'b': 2}, tmin, tmax,
+                        picks=picks_contains, reject=None,
+                        preload=False)
+        test = 'eeg' if eeg is True else meg
+        assert_true(test in epochs)
+        assert_true(not any(o in epochs for o in others))
+
+    assert_raises(ValueError, epochs.__contains__, 'foo')
+    assert_raises(ValueError, epochs.__contains__, 1)
+
+
+def test_drop_channels_mixin():
+    """Test channels-dropping functionality
+    """
+    raw, events = _get_data()[:2]
+    # here without picks to get additional coverage
+    epochs = Epochs(raw, events, event_id, tmin, tmax, picks=None,
+                    baseline=(None, 0), preload=True)
+    drop_ch = epochs.ch_names[:3]
+    ch_names = epochs.ch_names[3:]
+
+    ch_names_orig = epochs.ch_names
+    dummy = epochs.drop_channels(drop_ch, copy=True)
+    assert_equal(ch_names, dummy.ch_names)
+    assert_equal(ch_names_orig, epochs.ch_names)
+    assert_equal(len(ch_names_orig), epochs.get_data().shape[1])
+
+    epochs.drop_channels(drop_ch)
+    assert_equal(ch_names, epochs.ch_names)
+    assert_equal(len(ch_names), epochs.get_data().shape[1])
+
+
+def test_pick_channels_mixin():
+    """Test channel-picking functionality
+    """
+    raw, events, picks = _get_data()
+    epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                    baseline=(None, 0), preload=True)
+    ch_names = epochs.ch_names[:3]
+    epochs.preload = False
+    assert_raises(RuntimeError, epochs.drop_channels, ['foo'])
+    epochs.preload = True
+    ch_names_orig = epochs.ch_names
+    dummy = epochs.pick_channels(ch_names, copy=True)
+    assert_equal(ch_names, dummy.ch_names)
+    assert_equal(ch_names_orig, epochs.ch_names)
+    assert_equal(len(ch_names_orig), epochs.get_data().shape[1])
+
+    epochs.pick_channels(ch_names)
+    assert_equal(ch_names, epochs.ch_names)
+    assert_equal(len(ch_names), epochs.get_data().shape[1])
+
+    # Invalid picks
+    assert_raises(ValueError, Epochs, raw, events, event_id, tmin, tmax,
+                  picks=[])
+
+
+def test_equalize_channels():
+    """Test equalization of channels
+    """
+    raw, events, picks = _get_data()
+    epochs1 = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                     baseline=(None, 0), proj=False, preload=True)
+    epochs2 = epochs1.copy()
+    ch_names = epochs1.ch_names[2:]
+    epochs1.drop_channels(epochs1.ch_names[:1])
+    epochs2.drop_channels(epochs2.ch_names[1:2])
+    my_comparison = [epochs1, epochs2]
+    equalize_channels(my_comparison)
+    for e in my_comparison:
+        assert_equal(ch_names, e.ch_names)
+
+
+def test_illegal_event_id():
+    """Test handling of invalid events ids"""
+    raw, events, picks = _get_data()
+    event_id_illegal = dict(aud_l=1, does_not_exist=12345678)
+
+    assert_raises(ValueError, Epochs, raw, events, event_id_illegal, tmin,
+                  tmax, picks=picks, baseline=(None, 0), proj=False)
+
+
+def test_add_channels_epochs():
+    """Test adding channels"""
+    raw, events, picks = _get_data()
+
+    def make_epochs(picks, proj):
+        return Epochs(raw, events, event_id, tmin, tmax, baseline=(None, 0),
+                      reject=None, preload=True, proj=proj, picks=picks)
+
+    picks = pick_types(raw.info, meg=True, eeg=True, exclude='bads')
+    picks_meg = pick_types(raw.info, meg=True, eeg=False, exclude='bads')
+    picks_eeg = pick_types(raw.info, meg=False, eeg=True, exclude='bads')
+
+    for proj in (False, True):
+        epochs = make_epochs(picks=picks, proj=proj)
+        epochs_meg = make_epochs(picks=picks_meg, proj=proj)
+        epochs_eeg = make_epochs(picks=picks_eeg, proj=proj)
+        epochs.info._check_consistency()
+        epochs_meg.info._check_consistency()
+        epochs_eeg.info._check_consistency()
+
+        epochs2 = add_channels_epochs([epochs_meg, epochs_eeg])
+
+        assert_equal(len(epochs.info['projs']), len(epochs2.info['projs']))
+        assert_equal(len(epochs.info.keys()), len(epochs2.info.keys()))
+
+        data1 = epochs.get_data()
+        data2 = epochs2.get_data()
+        data3 = np.concatenate([e.get_data() for e in
+                                [epochs_meg, epochs_eeg]], axis=1)
+        assert_array_equal(data1.shape, data2.shape)
+        assert_allclose(data1, data3, atol=1e-25)
+        assert_allclose(data1, data2, atol=1e-25)
+
+    epochs_meg2 = epochs_meg.copy()
+    epochs_meg2.info['meas_date'] += 10
+    add_channels_epochs([epochs_meg2, epochs_eeg])
+
+    epochs_meg2 = epochs_meg.copy()
+    epochs2.info['filename'] = epochs2.info['filename'].upper()
+    epochs2 = add_channels_epochs([epochs_meg, epochs_eeg])
+
+    epochs_meg2 = epochs_meg.copy()
+    epochs_meg2.events[3, 2] -= 1
+    assert_raises(ValueError, add_channels_epochs,
+                  [epochs_meg2, epochs_eeg])
+
+    assert_raises(ValueError, add_channels_epochs,
+                  [epochs_meg, epochs_eeg[:2]])
+
+    epochs_meg.info['chs'].pop(0)
+    epochs_meg.info['ch_names'].pop(0)
+    epochs_meg.info['nchan'] -= 1
+    assert_raises(RuntimeError, add_channels_epochs,
+                  [epochs_meg, epochs_eeg])
+
+    epochs_meg2 = epochs_meg.copy()
+    epochs_meg2.info['sfreq'] = None
+    assert_raises(RuntimeError, add_channels_epochs,
+                  [epochs_meg2, epochs_eeg])
+
+    epochs_meg2 = epochs_meg.copy()
+    epochs_meg2.info['sfreq'] += 10
+    assert_raises(RuntimeError, add_channels_epochs,
+                  [epochs_meg2, epochs_eeg])
+
+    epochs_meg2 = epochs_meg.copy()
+    epochs_meg2.info['ch_names'][1] = epochs_meg2.info['ch_names'][0]
+    epochs_meg2.info['chs'][1]['ch_name'] = epochs_meg2.info['ch_names'][1]
+    assert_raises(ValueError, add_channels_epochs,
+                  [epochs_meg2, epochs_eeg])
+
+    epochs_meg2 = epochs_meg.copy()
+    epochs_meg2.info['dev_head_t']['to'] += 1
+    assert_raises(ValueError, add_channels_epochs,
+                  [epochs_meg2, epochs_eeg])
+
+    epochs_meg2 = epochs_meg.copy()
+    epochs_meg2.info['dev_head_t']['to'] += 1
+    assert_raises(ValueError, add_channels_epochs,
+                  [epochs_meg2, epochs_eeg])
+
+    epochs_meg2 = epochs_meg.copy()
+    epochs_meg2.info['expimenter'] = 'foo'
+    assert_raises(RuntimeError, add_channels_epochs,
+                  [epochs_meg2, epochs_eeg])
+
+    epochs_meg2 = epochs_meg.copy()
+    epochs_meg2.preload = False
+    assert_raises(ValueError, add_channels_epochs,
+                  [epochs_meg2, epochs_eeg])
+
+    epochs_meg2 = epochs_meg.copy()
+    epochs_meg2.tmin += 0.4
+    assert_raises(NotImplementedError, add_channels_epochs,
+                  [epochs_meg2, epochs_eeg])
+
+    epochs_meg2 = epochs_meg.copy()
+    epochs_meg2.tmin += 0.5
+    assert_raises(NotImplementedError, add_channels_epochs,
+                  [epochs_meg2, epochs_eeg])
+
+    epochs_meg2 = epochs_meg.copy()
+    epochs_meg2.baseline = None
+    assert_raises(NotImplementedError, add_channels_epochs,
+                  [epochs_meg2, epochs_eeg])
+
+    epochs_meg2 = epochs_meg.copy()
+    epochs_meg2.event_id['b'] = 2
+    assert_raises(NotImplementedError, add_channels_epochs,
+                  [epochs_meg2, epochs_eeg])
+
+
+def test_array_epochs():
+    """Test creating epochs from array
+    """
+    import matplotlib.pyplot as plt
+    tempdir = _TempDir()
+
+    # creating
+    rng = np.random.RandomState(42)
+    data = rng.random_sample((10, 20, 300))
+    sfreq = 1e3
+    ch_names = ['EEG %03d' % (i + 1) for i in range(20)]
+    types = ['eeg'] * 20
+    info = create_info(ch_names, sfreq, types)
+    events = np.c_[np.arange(1, 600, 60),
+                   np.zeros(10, int),
+                   [1, 2] * 5]
+    event_id = {'a': 1, 'b': 2}
+    epochs = EpochsArray(data, info, events, tmin, event_id)
+    assert_true(str(epochs).startswith('<EpochsArray'))
+    # From GH#1963
+    assert_raises(ValueError, EpochsArray, data[:-1], info, events, tmin,
+                  event_id)
+    assert_raises(ValueError, EpochsArray, data, info, events, tmin,
+                  dict(a=1))
+
+    # saving
+    temp_fname = op.join(tempdir, 'test-epo.fif')
+    epochs.save(temp_fname)
+    epochs2 = read_epochs(temp_fname)
+    data2 = epochs2.get_data()
+    assert_allclose(data, data2)
+    assert_allclose(epochs.times, epochs2.times)
+    assert_equal(epochs.event_id, epochs2.event_id)
+    assert_array_equal(epochs.events, epochs2.events)
+
+    # plotting
+    epochs[0].plot()
+    plt.close('all')
+
+    # indexing
+    assert_array_equal(np.unique(epochs['a'].events[:, 2]), np.array([1]))
+    assert_equal(len(epochs[:2]), 2)
+    data[0, 5, 150] = 3000
+    data[1, :, :] = 0
+    data[2, 5, 210] = 3000
+    data[3, 5, 260] = 0
+    epochs = EpochsArray(data, info, events=events, event_id=event_id,
+                         tmin=0, reject=dict(eeg=1000), flat=dict(eeg=1e-1),
+                         reject_tmin=0.1, reject_tmax=0.2)
+    assert_equal(len(epochs), len(events) - 2)
+    assert_equal(epochs.drop_log[0], ['EEG 006'])
+    assert_equal(len(epochs.drop_log), 10)
+    assert_equal(len(epochs.events), len(epochs.selection))
+
+    # baseline
+    data = np.ones((10, 20, 300))
+    epochs = EpochsArray(data, info, events=events, event_id=event_id,
+                         tmin=-.2, baseline=(None, 0))
+    ep_data = epochs.get_data()
+    assert_array_equal(np.zeros_like(ep_data), ep_data)
+
+    # one time point
+    epochs = EpochsArray(data[:, :, :1], info, events=events,
+                         event_id=event_id, tmin=0., baseline=None)
+    assert_allclose(epochs.times, [0.])
+    assert_allclose(epochs.get_data(), data[:, :, :1])
+    epochs.save(temp_fname)
+    epochs_read = read_epochs(temp_fname)
+    assert_allclose(epochs_read.times, [0.])
+    assert_allclose(epochs_read.get_data(), data[:, :, :1])
+
+    # event as integer (#2435)
+    mask = (events[:, 2] == 1)
+    data_1 = data[mask]
+    events_1 = events[mask]
+    epochs = EpochsArray(data_1, info, events=events_1, event_id=1,
+                         tmin=-0.2, baseline=(None, 0))
+
+
+def test_concatenate_epochs():
+    """Test concatenate epochs"""
+    raw, events, picks = _get_data()
+    epochs = Epochs(
+        raw=raw, events=events, event_id=event_id, tmin=tmin, tmax=tmax,
+        picks=picks)
+    epochs2 = epochs.copy()
+    epochs_list = [epochs, epochs2]
+    epochs_conc = concatenate_epochs(epochs_list)
+    assert_array_equal(
+        epochs_conc.events[:, 0], np.unique(epochs_conc.events[:, 0]))
+
+    expected_shape = list(epochs.get_data().shape)
+    expected_shape[0] *= 2
+    expected_shape = tuple(expected_shape)
+
+    assert_equal(epochs_conc.get_data().shape, expected_shape)
+    assert_equal(epochs_conc.drop_log, epochs.drop_log * 2)
+
+    epochs2 = epochs.copy()
+    epochs2._data = epochs2.get_data()
+    epochs2.preload = True
+    assert_raises(
+        ValueError, concatenate_epochs,
+        [epochs, epochs2.drop_channels(epochs2.ch_names[:1], copy=True)])
+
+    epochs2.times = np.delete(epochs2.times, 1)
+    assert_raises(
+        ValueError,
+        concatenate_epochs, [epochs, epochs2])
+
+    assert_equal(epochs_conc._raw, None)
+
+    # check if baseline is same for all epochs
+    epochs2.baseline = (-0.1, None)
+    assert_raises(ValueError, concatenate_epochs, [epochs, epochs2])
+
+
+def test_add_channels():
+    """Test epoch splitting / re-appending channel types
+    """
+    raw, events, picks = _get_data()
+    epoch_nopre = Epochs(
+        raw=raw, events=events, event_id=event_id, tmin=tmin, tmax=tmax,
+        picks=picks)
+    epoch = Epochs(
+        raw=raw, events=events, event_id=event_id, tmin=tmin, tmax=tmax,
+        picks=picks, preload=True)
+    epoch_eeg = epoch.pick_types(meg=False, eeg=True, copy=True)
+    epoch_meg = epoch.pick_types(meg=True, copy=True)
+    epoch_stim = epoch.pick_types(meg=False, stim=True, copy=True)
+    epoch_eeg_meg = epoch.pick_types(meg=True, eeg=True, copy=True)
+    epoch_new = epoch_meg.add_channels([epoch_eeg, epoch_stim], copy=True)
+    assert_true(all(ch in epoch_new.ch_names
+                    for ch in epoch_stim.ch_names + epoch_meg.ch_names))
+    epoch_new = epoch_meg.add_channels([epoch_eeg], copy=True)
+
+    assert_true(ch in epoch_new.ch_names for ch in epoch.ch_names)
+    assert_array_equal(epoch_new._data, epoch_eeg_meg._data)
+    assert_true(all(ch not in epoch_new.ch_names
+                    for ch in epoch_stim.ch_names))
+
+    # Now test errors
+    epoch_badsf = epoch_eeg.copy()
+    epoch_badsf.info['sfreq'] = 3.1415927
+    epoch_eeg = epoch_eeg.crop(-.1, .1)
+
+    assert_raises(AssertionError, epoch_meg.add_channels, [epoch_nopre])
+    assert_raises(RuntimeError, epoch_meg.add_channels, [epoch_badsf])
+    assert_raises(AssertionError, epoch_meg.add_channels, [epoch_eeg])
+    assert_raises(ValueError, epoch_meg.add_channels, [epoch_meg])
+    assert_raises(AssertionError, epoch_meg.add_channels, epoch_badsf)
+
+
+run_tests_if_main()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/tests/test_event.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/tests/test_event.py
new file mode 100644
index 0000000..2c5dd99
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/tests/test_event.py
@@ -0,0 +1,339 @@
+import os.path as op
+import os
+
+from nose.tools import assert_true, assert_raises
+import numpy as np
+from numpy.testing import assert_array_almost_equal, assert_array_equal
+import warnings
+
+from mne import (read_events, write_events, make_fixed_length_events,
+                 find_events, pick_events, find_stim_steps, io, pick_channels)
+from mne.utils import _TempDir, run_tests_if_main
+from mne.event import define_target_events, merge_events
+
+warnings.simplefilter('always')
+
+base_dir = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data')
+fname = op.join(base_dir, 'test-eve.fif')
+fname_gz = op.join(base_dir, 'test-eve.fif.gz')
+fname_1 = op.join(base_dir, 'test-1-eve.fif')
+fname_txt = op.join(base_dir, 'test-eve.eve')
+fname_txt_1 = op.join(base_dir, 'test-eve-1.eve')
+
+# using mne_process_raw --raw test_raw.fif --eventsout test-mpr-eve.eve:
+fname_txt_mpr = op.join(base_dir, 'test-mpr-eve.eve')
+fname_old_txt = op.join(base_dir, 'test-eve-old-style.eve')
+raw_fname = op.join(base_dir, 'test_raw.fif')
+
+
+def test_add_events():
+    """Test adding events to a Raw file"""
+    # need preload
+    raw = io.Raw(raw_fname, preload=False)
+    events = np.array([[raw.first_samp, 0, 1]])
+    assert_raises(RuntimeError, raw.add_events, events, 'STI 014')
+    raw = io.Raw(raw_fname, preload=True)
+    orig_events = find_events(raw, 'STI 014')
+    # add some events
+    events = np.array([raw.first_samp, 0, 1])
+    assert_raises(ValueError, raw.add_events, events, 'STI 014')  # bad shape
+    events[0] = raw.first_samp + raw.n_times + 1
+    events = events[np.newaxis, :]
+    assert_raises(ValueError, raw.add_events, events, 'STI 014')  # bad time
+    events[0, 0] = raw.first_samp - 1
+    assert_raises(ValueError, raw.add_events, events, 'STI 014')  # bad time
+    events[0, 0] = raw.first_samp + 1  # can't actually be first_samp
+    assert_raises(ValueError, raw.add_events, events, 'STI FOO')
+    raw.add_events(events, 'STI 014')
+    new_events = find_events(raw, 'STI 014')
+    assert_array_equal(new_events, np.concatenate((events, orig_events)))
+
+
+def test_merge_events():
+    """Test event merging
+    """
+    events = read_events(fname)  # Use as the gold standard
+    merges = [1, 2, 3, 4]
+    events_out = merge_events(events, merges, 1234)
+    events_out2 = events.copy()
+    for m in merges:
+        assert_true(not np.any(events_out[:, 2] == m))
+        events_out2[events[:, 2] == m, 2] = 1234
+    assert_array_equal(events_out, events_out2)
+    # test non-replacement functionality, should be sorted union of orig & new
+    events_out2 = merge_events(events, merges, 1234, False)
+    events_out = np.concatenate((events_out, events))
+    events_out = events_out[np.argsort(events_out[:, 0])]
+    assert_array_equal(events_out, events_out2)
+
+
+def test_io_events():
+    """Test IO for events
+    """
+    tempdir = _TempDir()
+    # Test binary fif IO
+    events = read_events(fname)  # Use as the gold standard
+    write_events(op.join(tempdir, 'events-eve.fif'), events)
+    events2 = read_events(op.join(tempdir, 'events-eve.fif'))
+    assert_array_almost_equal(events, events2)
+
+    # Test binary fif.gz IO
+    events2 = read_events(fname_gz)  # Use as the gold standard
+    assert_array_almost_equal(events, events2)
+    write_events(op.join(tempdir, 'events-eve.fif.gz'), events2)
+    events2 = read_events(op.join(tempdir, 'events-eve.fif.gz'))
+    assert_array_almost_equal(events, events2)
+
+    # Test new format text file IO
+    write_events(op.join(tempdir, 'events.eve'), events)
+    events2 = read_events(op.join(tempdir, 'events.eve'))
+    assert_array_almost_equal(events, events2)
+    events2 = read_events(fname_txt_mpr)
+    assert_array_almost_equal(events, events2)
+
+    # Test old format text file IO
+    events2 = read_events(fname_old_txt)
+    assert_array_almost_equal(events, events2)
+    write_events(op.join(tempdir, 'events.eve'), events)
+    events2 = read_events(op.join(tempdir, 'events.eve'))
+    assert_array_almost_equal(events, events2)
+
+    # Test event selection
+    a = read_events(op.join(tempdir, 'events-eve.fif'), include=1)
+    b = read_events(op.join(tempdir, 'events-eve.fif'), include=[1])
+    c = read_events(op.join(tempdir, 'events-eve.fif'),
+                    exclude=[2, 3, 4, 5, 32])
+    d = read_events(op.join(tempdir, 'events-eve.fif'), include=1,
+                    exclude=[2, 3])
+    assert_array_equal(a, b)
+    assert_array_equal(a, c)
+    assert_array_equal(a, d)
+
+    # Test binary file IO for 1 event
+    events = read_events(fname_1)  # Use as the new gold standard
+    write_events(op.join(tempdir, 'events-eve.fif'), events)
+    events2 = read_events(op.join(tempdir, 'events-eve.fif'))
+    assert_array_almost_equal(events, events2)
+
+    # Test text file IO for 1 event
+    write_events(op.join(tempdir, 'events.eve'), events)
+    events2 = read_events(op.join(tempdir, 'events.eve'))
+    assert_array_almost_equal(events, events2)
+
+    # test warnings on bad filenames
+    with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter('always')
+        fname2 = op.join(tempdir, 'test-bad-name.fif')
+        write_events(fname2, events)
+        read_events(fname2)
+    assert_true(len(w) == 2)
+
+
+def test_find_events():
+    """Test find events in raw file
+    """
+    events = read_events(fname)
+    raw = io.Raw(raw_fname, preload=True)
+    # let's test the defaulting behavior while we're at it
+    extra_ends = ['', '_1']
+    orig_envs = [os.getenv('MNE_STIM_CHANNEL%s' % s) for s in extra_ends]
+    os.environ['MNE_STIM_CHANNEL'] = 'STI 014'
+    if 'MNE_STIM_CHANNEL_1' in os.environ:
+        del os.environ['MNE_STIM_CHANNEL_1']
+    events2 = find_events(raw)
+    assert_array_almost_equal(events, events2)
+    # now test with mask
+    events11 = find_events(raw, mask=3)
+    events22 = read_events(fname, mask=3)
+    assert_array_equal(events11, events22)
+
+    # Reset some data for ease of comparison
+    raw._first_samps[0] = 0
+    raw.info['sfreq'] = 1000
+    raw._update_times()
+
+    stim_channel = 'STI 014'
+    stim_channel_idx = pick_channels(raw.info['ch_names'],
+                                     include=[stim_channel])
+
+    # test digital masking
+    raw._data[stim_channel_idx, :5] = np.arange(5)
+    raw._data[stim_channel_idx, 5:] = 0
+    # 1 == '0b1', 2 == '0b10', 3 == '0b11', 4 == '0b100'
+
+    assert_raises(TypeError, find_events, raw, mask="0")
+    assert_array_equal(find_events(raw, shortest_event=1, mask=1),
+                       [[2,    0,    2], [4,    2,    4]])
+    assert_array_equal(find_events(raw, shortest_event=1, mask=2),
+                       [[1,    0,    1], [3,    0,    1], [4,    1,    4]])
+    assert_array_equal(find_events(raw, shortest_event=1, mask=3),
+                       [[4,    0,    4]])
+    assert_array_equal(find_events(raw, shortest_event=1, mask=4),
+                       [[1,    0,    1], [2,    1,    2], [3,    2,    3]])
+
+    # test empty events channel
+    raw._data[stim_channel_idx, :] = 0
+    assert_array_equal(find_events(raw), np.empty((0, 3), dtype='int32'))
+
+    raw._data[stim_channel_idx, :4] = 1
+    assert_array_equal(find_events(raw), np.empty((0, 3), dtype='int32'))
+
+    raw._data[stim_channel_idx, -1:] = 9
+    assert_array_equal(find_events(raw), [[14399, 0, 9]])
+
+    # Test that we can handle consecutive events with no gap
+    raw._data[stim_channel_idx, 10:20] = 5
+    raw._data[stim_channel_idx, 20:30] = 6
+    raw._data[stim_channel_idx, 30:32] = 5
+    raw._data[stim_channel_idx, 40] = 6
+
+    assert_array_equal(find_events(raw, consecutive=False),
+                       [[10, 0, 5],
+                        [40, 0, 6],
+                        [14399, 0, 9]])
+    assert_array_equal(find_events(raw, consecutive=True),
+                       [[10, 0, 5],
+                        [20, 5, 6],
+                        [30, 6, 5],
+                        [40, 0, 6],
+                        [14399, 0, 9]])
+    assert_array_equal(find_events(raw),
+                       [[10, 0, 5],
+                        [20, 5, 6],
+                        [40, 0, 6],
+                        [14399, 0, 9]])
+    assert_array_equal(find_events(raw, output='offset', consecutive=False),
+                       [[31, 0, 5],
+                        [40, 0, 6],
+                        [14399, 0, 9]])
+    assert_array_equal(find_events(raw, output='offset', consecutive=True),
+                       [[19, 6, 5],
+                        [29, 5, 6],
+                        [31, 0, 5],
+                        [40, 0, 6],
+                        [14399, 0, 9]])
+    assert_raises(ValueError, find_events, raw, output='step',
+                  consecutive=True)
+    assert_array_equal(find_events(raw, output='step', consecutive=True,
+                                   shortest_event=1),
+                       [[10, 0, 5],
+                        [20, 5, 6],
+                        [30, 6, 5],
+                        [32, 5, 0],
+                        [40, 0, 6],
+                        [41, 6, 0],
+                        [14399, 0, 9],
+                        [14400, 9, 0]])
+    assert_array_equal(find_events(raw, output='offset'),
+                       [[19, 6, 5],
+                        [31, 0, 6],
+                        [40, 0, 6],
+                        [14399, 0, 9]])
+    assert_array_equal(find_events(raw, consecutive=False, min_duration=0.002),
+                       [[10, 0, 5]])
+    assert_array_equal(find_events(raw, consecutive=True, min_duration=0.002),
+                       [[10, 0, 5],
+                        [20, 5, 6],
+                        [30, 6, 5]])
+    assert_array_equal(find_events(raw, output='offset', consecutive=False,
+                                   min_duration=0.002),
+                       [[31, 0, 5]])
+    assert_array_equal(find_events(raw, output='offset', consecutive=True,
+                                   min_duration=0.002),
+                       [[19, 6, 5],
+                        [29, 5, 6],
+                        [31, 0, 5]])
+    assert_array_equal(find_events(raw, consecutive=True, min_duration=0.003),
+                       [[10, 0, 5],
+                        [20, 5, 6]])
+
+    # test find_stim_steps merge parameter
+    raw._data[stim_channel_idx, :] = 0
+    raw._data[stim_channel_idx, 0] = 1
+    raw._data[stim_channel_idx, 10] = 4
+    raw._data[stim_channel_idx, 11:20] = 5
+    assert_array_equal(find_stim_steps(raw, pad_start=0, merge=0,
+                                       stim_channel=stim_channel),
+                       [[0, 0, 1],
+                        [1, 1, 0],
+                        [10, 0, 4],
+                        [11, 4, 5],
+                        [20, 5, 0]])
+    assert_array_equal(find_stim_steps(raw, merge=-1,
+                                       stim_channel=stim_channel),
+                       [[1, 1, 0],
+                        [10, 0, 5],
+                        [20, 5, 0]])
+    assert_array_equal(find_stim_steps(raw, merge=1,
+                                       stim_channel=stim_channel),
+                       [[1, 1, 0],
+                        [11, 0, 5],
+                        [20, 5, 0]])
+
+    # put back the env vars we trampled on
+    for s, o in zip(extra_ends, orig_envs):
+        if o is not None:
+            os.environ['MNE_STIM_CHANNEL%s' % s] = o
+
+
+def test_pick_events():
+    """Test pick events in a events ndarray
+    """
+    events = np.array([[1, 0, 1],
+                       [2, 1, 0],
+                       [3, 0, 4],
+                       [4, 4, 2],
+                       [5, 2, 0]])
+    assert_array_equal(pick_events(events, include=[1, 4], exclude=4),
+                       [[1, 0, 1],
+                        [3, 0, 4]])
+    assert_array_equal(pick_events(events, exclude=[0, 2]),
+                       [[1, 0, 1],
+                        [3, 0, 4]])
+    assert_array_equal(pick_events(events, include=[1, 2], step=True),
+                       [[1, 0, 1],
+                        [2, 1, 0],
+                        [4, 4, 2],
+                        [5, 2, 0]])
+
+
+def test_make_fixed_length_events():
+    """Test making events of a fixed length
+    """
+    raw = io.Raw(raw_fname)
+    events = make_fixed_length_events(raw, id=1)
+    assert_true(events.shape[1], 3)
+
+
+def test_define_events():
+    """Test defining response events
+    """
+    events = read_events(fname)
+    raw = io.Raw(raw_fname)
+    events_, _ = define_target_events(events, 5, 32, raw.info['sfreq'],
+                                      .2, 0.7, 42, 99)
+    n_target = events[events[:, 2] == 5].shape[0]
+    n_miss = events_[events_[:, 2] == 99].shape[0]
+    n_target_ = events_[events_[:, 2] == 42].shape[0]
+
+    assert_true(n_target_ == (n_target - n_miss))
+
+    events = np.array([[0, 0, 1],
+                       [375, 0, 2],
+                       [500, 0, 1],
+                       [875, 0, 3],
+                       [1000, 0, 1],
+                       [1375, 0, 3],
+                       [1100, 0, 1],
+                       [1475, 0, 2],
+                       [1500, 0, 1],
+                       [1875, 0, 2]])
+    true_lag_nofill = [1500., 1500., 1500.]
+    true_lag_fill = [1500., np.nan, np.nan, 1500., 1500.]
+    n, lag_nofill = define_target_events(events, 1, 2, 250., 1.4, 1.6, 5)
+    n, lag_fill = define_target_events(events, 1, 2, 250., 1.4, 1.6, 5, 99)
+
+    assert_array_equal(true_lag_fill, lag_fill)
+    assert_array_equal(true_lag_nofill, lag_nofill)
+
+run_tests_if_main()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/tests/test_evoked.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/tests/test_evoked.py
new file mode 100644
index 0000000..7918378
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/tests/test_evoked.py
@@ -0,0 +1,483 @@
+# Author: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#         Denis Engemann <denis.engemann at gmail.com>
+#         Andrew Dykstra <andrew.r.dykstra at gmail.com>
+#         Mads Jensen <mje.mads at gmail.com>
+#
+# License: BSD (3-clause)
+
+import os.path as op
+from copy import deepcopy
+import warnings
+
+import numpy as np
+from scipy import fftpack
+from numpy.testing import (assert_array_almost_equal, assert_equal,
+                           assert_array_equal, assert_allclose)
+from nose.tools import assert_true, assert_raises, assert_not_equal
+
+from mne import (equalize_channels, pick_types, read_evokeds, write_evokeds,
+                 grand_average, combine_evoked)
+from mne.evoked import _get_peak, EvokedArray
+from mne.epochs import EpochsArray
+
+from mne.utils import _TempDir, requires_pandas, slow_test, requires_version
+
+from mne.io.meas_info import create_info
+from mne.externals.six.moves import cPickle as pickle
+
+warnings.simplefilter('always')
+
+fname = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data',
+                'test-ave.fif')
+fname_gz = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data',
+                   'test-ave.fif.gz')
+
+
+ at requires_version('scipy', '0.14')
+def test_savgol_filter():
+    """Test savgol filtering
+    """
+    h_freq = 10.
+    evoked = read_evokeds(fname, 0)
+    freqs = fftpack.fftfreq(len(evoked.times), 1. / evoked.info['sfreq'])
+    data = np.abs(fftpack.fft(evoked.data))
+    match_mask = np.logical_and(freqs >= 0, freqs <= h_freq / 2.)
+    mismatch_mask = np.logical_and(freqs >= h_freq * 2, freqs < 50.)
+    assert_raises(ValueError, evoked.savgol_filter, evoked.info['sfreq'])
+    evoked.savgol_filter(h_freq)
+    data_filt = np.abs(fftpack.fft(evoked.data))
+    # decent in pass-band
+    assert_allclose(np.mean(data[:, match_mask], 0),
+                    np.mean(data_filt[:, match_mask], 0),
+                    rtol=1e-4, atol=1e-2)
+    # suppression in stop-band
+    assert_true(np.mean(data[:, mismatch_mask]) >
+                np.mean(data_filt[:, mismatch_mask]) * 5)
+
+
+def test_hash_evoked():
+    """Test evoked hashing
+    """
+    ave = read_evokeds(fname, 0)
+    ave_2 = read_evokeds(fname, 0)
+    assert_equal(hash(ave), hash(ave_2))
+    # do NOT use assert_equal here, failing output is terrible
+    assert_true(pickle.dumps(ave) == pickle.dumps(ave_2))
+
+    ave_2.data[0, 0] -= 1
+    assert_not_equal(hash(ave), hash(ave_2))
+
+
+ at slow_test
+def test_io_evoked():
+    """Test IO for evoked data (fif + gz) with integer and str args
+    """
+    tempdir = _TempDir()
+    ave = read_evokeds(fname, 0)
+
+    write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave)
+    ave2 = read_evokeds(op.join(tempdir, 'evoked-ave.fif'))[0]
+
+    # This not being assert_array_equal due to windows rounding
+    assert_true(np.allclose(ave.data, ave2.data, atol=1e-16, rtol=1e-3))
+    assert_array_almost_equal(ave.times, ave2.times)
+    assert_equal(ave.nave, ave2.nave)
+    assert_equal(ave._aspect_kind, ave2._aspect_kind)
+    assert_equal(ave.kind, ave2.kind)
+    assert_equal(ave.last, ave2.last)
+    assert_equal(ave.first, ave2.first)
+    assert_true(repr(ave))
+
+    # test compressed i/o
+    ave2 = read_evokeds(fname_gz, 0)
+    assert_true(np.allclose(ave.data, ave2.data, atol=1e-16, rtol=1e-8))
+
+    # test str access
+    condition = 'Left Auditory'
+    assert_raises(ValueError, read_evokeds, fname, condition, kind='stderr')
+    assert_raises(ValueError, read_evokeds, fname, condition,
+                  kind='standard_error')
+    ave3 = read_evokeds(fname, condition)
+    assert_array_almost_equal(ave.data, ave3.data, 19)
+
+    # test read_evokeds and write_evokeds
+    types = ['Left Auditory', 'Right Auditory', 'Left visual', 'Right visual']
+    aves1 = read_evokeds(fname)
+    aves2 = read_evokeds(fname, [0, 1, 2, 3])
+    aves3 = read_evokeds(fname, types)
+    write_evokeds(op.join(tempdir, 'evoked-ave.fif'), aves1)
+    aves4 = read_evokeds(op.join(tempdir, 'evoked-ave.fif'))
+    for aves in [aves2, aves3, aves4]:
+        for [av1, av2] in zip(aves1, aves):
+            assert_array_almost_equal(av1.data, av2.data)
+            assert_array_almost_equal(av1.times, av2.times)
+            assert_equal(av1.nave, av2.nave)
+            assert_equal(av1.kind, av2.kind)
+            assert_equal(av1._aspect_kind, av2._aspect_kind)
+            assert_equal(av1.last, av2.last)
+            assert_equal(av1.first, av2.first)
+            assert_equal(av1.comment, av2.comment)
+
+    # test warnings on bad filenames
+    with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter('always')
+        fname2 = op.join(tempdir, 'test-bad-name.fif')
+        write_evokeds(fname2, ave)
+        read_evokeds(fname2)
+    assert_true(len(w) == 2)
+
+
+def test_shift_time_evoked():
+    """ Test for shifting of time scale
+    """
+    tempdir = _TempDir()
+    # Shift backward
+    ave = read_evokeds(fname, 0)
+    ave.shift_time(-0.1, relative=True)
+    write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave)
+
+    # Shift forward twice the amount
+    ave_bshift = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)
+    ave_bshift.shift_time(0.2, relative=True)
+    write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave_bshift)
+
+    # Shift backward again
+    ave_fshift = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)
+    ave_fshift.shift_time(-0.1, relative=True)
+    write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave_fshift)
+
+    ave_normal = read_evokeds(fname, 0)
+    ave_relative = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)
+
+    assert_true(np.allclose(ave_normal.data, ave_relative.data,
+                            atol=1e-16, rtol=1e-3))
+    assert_array_almost_equal(ave_normal.times, ave_relative.times, 10)
+
+    assert_equal(ave_normal.last, ave_relative.last)
+    assert_equal(ave_normal.first, ave_relative.first)
+
+    # Absolute time shift
+    ave = read_evokeds(fname, 0)
+    ave.shift_time(-0.3, relative=False)
+    write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave)
+
+    ave_absolute = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)
+
+    assert_true(np.allclose(ave_normal.data, ave_absolute.data,
+                            atol=1e-16, rtol=1e-3))
+    assert_equal(ave_absolute.first, int(-0.3 * ave.info['sfreq']))
+
+
+def test_evoked_resample():
+    """Test for resampling of evoked data
+    """
+    tempdir = _TempDir()
+    # upsample, write it out, read it in
+    ave = read_evokeds(fname, 0)
+    sfreq_normal = ave.info['sfreq']
+    ave.resample(2 * sfreq_normal)
+    write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave)
+    ave_up = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)
+
+    # compare it to the original
+    ave_normal = read_evokeds(fname, 0)
+
+    # and compare the original to the downsampled upsampled version
+    ave_new = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)
+    ave_new.resample(sfreq_normal)
+
+    assert_array_almost_equal(ave_normal.data, ave_new.data, 2)
+    assert_array_almost_equal(ave_normal.times, ave_new.times)
+    assert_equal(ave_normal.nave, ave_new.nave)
+    assert_equal(ave_normal._aspect_kind, ave_new._aspect_kind)
+    assert_equal(ave_normal.kind, ave_new.kind)
+    assert_equal(ave_normal.last, ave_new.last)
+    assert_equal(ave_normal.first, ave_new.first)
+
+    # for the above to work, the upsampling just about had to, but
+    # we'll add a couple extra checks anyway
+    assert_true(len(ave_up.times) == 2 * len(ave_normal.times))
+    assert_true(ave_up.data.shape[1] == 2 * ave_normal.data.shape[1])
+
+
+def test_evoked_detrend():
+    """Test for detrending evoked data
+    """
+    ave = read_evokeds(fname, 0)
+    ave_normal = read_evokeds(fname, 0)
+    ave.detrend(0)
+    ave_normal.data -= np.mean(ave_normal.data, axis=1)[:, np.newaxis]
+    picks = pick_types(ave.info, meg=True, eeg=True, exclude='bads')
+    assert_true(np.allclose(ave.data[picks], ave_normal.data[picks],
+                            rtol=1e-8, atol=1e-16))
+
+
+ at requires_pandas
+def test_to_data_frame():
+    """Test evoked Pandas exporter"""
+    ave = read_evokeds(fname, 0)
+    assert_raises(ValueError, ave.to_data_frame, picks=np.arange(400))
+    df = ave.to_data_frame()
+    assert_true((df.columns == ave.ch_names).all())
+    df = ave.to_data_frame(index=None).reset_index('time')
+    assert_true('time' in df.columns)
+    assert_array_equal(df.values[:, 1], ave.data[0] * 1e13)
+    assert_array_equal(df.values[:, 3], ave.data[2] * 1e15)
+
+
+def test_evoked_proj():
+    """Test SSP proj operations
+    """
+    for proj in [True, False]:
+        ave = read_evokeds(fname, condition=0, proj=proj)
+        assert_true(all(p['active'] == proj for p in ave.info['projs']))
+
+        # test adding / deleting proj
+        if proj:
+            assert_raises(ValueError, ave.add_proj, [],
+                          {'remove_existing': True})
+            assert_raises(ValueError, ave.del_proj, 0)
+        else:
+            projs = deepcopy(ave.info['projs'])
+            n_proj = len(ave.info['projs'])
+            ave.del_proj(0)
+            assert_true(len(ave.info['projs']) == n_proj - 1)
+            ave.add_proj(projs, remove_existing=False)
+            assert_true(len(ave.info['projs']) == 2 * n_proj - 1)
+            ave.add_proj(projs, remove_existing=True)
+            assert_true(len(ave.info['projs']) == n_proj)
+
+    ave = read_evokeds(fname, condition=0, proj=False)
+    data = ave.data.copy()
+    ave.apply_proj()
+    assert_allclose(np.dot(ave._projector, data), ave.data)
+
+
+def test_get_peak():
+    """Test peak getter
+    """
+
+    evoked = read_evokeds(fname, condition=0, proj=True)
+    assert_raises(ValueError, evoked.get_peak, ch_type='mag', tmin=1)
+    assert_raises(ValueError, evoked.get_peak, ch_type='mag', tmax=0.9)
+    assert_raises(ValueError, evoked.get_peak, ch_type='mag', tmin=0.02,
+                  tmax=0.01)
+    assert_raises(ValueError, evoked.get_peak, ch_type='mag', mode='foo')
+    assert_raises(RuntimeError, evoked.get_peak, ch_type=None, mode='foo')
+    assert_raises(ValueError, evoked.get_peak, ch_type='misc', mode='foo')
+
+    ch_idx, time_idx = evoked.get_peak(ch_type='mag')
+    assert_true(ch_idx in evoked.ch_names)
+    assert_true(time_idx in evoked.times)
+
+    ch_idx, time_idx = evoked.get_peak(ch_type='mag',
+                                       time_as_index=True)
+    assert_true(time_idx < len(evoked.times))
+
+    data = np.array([[0., 1.,  2.],
+                     [0., -3.,  0]])
+
+    times = np.array([.1, .2, .3])
+
+    ch_idx, time_idx = _get_peak(data, times, mode='abs')
+    assert_equal(ch_idx, 1)
+    assert_equal(time_idx, 1)
+
+    ch_idx, time_idx = _get_peak(data * -1, times, mode='neg')
+    assert_equal(ch_idx, 0)
+    assert_equal(time_idx, 2)
+
+    ch_idx, time_idx = _get_peak(data, times, mode='pos')
+    assert_equal(ch_idx, 0)
+    assert_equal(time_idx, 2)
+
+    assert_raises(ValueError, _get_peak, data + 1e3, times, mode='neg')
+    assert_raises(ValueError, _get_peak, data - 1e3, times, mode='pos')
+
+
+def test_drop_channels_mixin():
+    """Test channels-dropping functionality
+    """
+    evoked = read_evokeds(fname, condition=0, proj=True)
+    drop_ch = evoked.ch_names[:3]
+    ch_names = evoked.ch_names[3:]
+
+    ch_names_orig = evoked.ch_names
+    dummy = evoked.drop_channels(drop_ch, copy=True)
+    assert_equal(ch_names, dummy.ch_names)
+    assert_equal(ch_names_orig, evoked.ch_names)
+    assert_equal(len(ch_names_orig), len(evoked.data))
+
+    evoked.drop_channels(drop_ch)
+    assert_equal(ch_names, evoked.ch_names)
+    assert_equal(len(ch_names), len(evoked.data))
+
+
+def test_pick_channels_mixin():
+    """Test channel-picking functionality
+    """
+    evoked = read_evokeds(fname, condition=0, proj=True)
+    ch_names = evoked.ch_names[:3]
+
+    ch_names_orig = evoked.ch_names
+    dummy = evoked.pick_channels(ch_names, copy=True)
+    assert_equal(ch_names, dummy.ch_names)
+    assert_equal(ch_names_orig, evoked.ch_names)
+    assert_equal(len(ch_names_orig), len(evoked.data))
+
+    evoked.pick_channels(ch_names)
+    assert_equal(ch_names, evoked.ch_names)
+    assert_equal(len(ch_names), len(evoked.data))
+
+    evoked = read_evokeds(fname, condition=0, proj=True)
+    assert_true('meg' in evoked)
+    assert_true('eeg' in evoked)
+    evoked.pick_types(meg=False, eeg=True)
+    assert_true('meg' not in evoked)
+    assert_true('eeg' in evoked)
+    assert_true(len(evoked.ch_names) == 60)
+
+
+def test_equalize_channels():
+    """Test equalization of channels
+    """
+    evoked1 = read_evokeds(fname, condition=0, proj=True)
+    evoked2 = evoked1.copy()
+    ch_names = evoked1.ch_names[2:]
+    evoked1.drop_channels(evoked1.ch_names[:1])
+    evoked2.drop_channels(evoked2.ch_names[1:2])
+    my_comparison = [evoked1, evoked2]
+    equalize_channels(my_comparison)
+    for e in my_comparison:
+        assert_equal(ch_names, e.ch_names)
+
+
+def test_evoked_arithmetic():
+    """Test evoked arithmetic
+    """
+    ev = read_evokeds(fname, condition=0)
+    ev1 = EvokedArray(np.ones_like(ev.data), ev.info, ev.times[0], nave=20)
+    ev2 = EvokedArray(-np.ones_like(ev.data), ev.info, ev.times[0], nave=10)
+
+    # combine_evoked([ev1, ev2]) should be the same as ev1 + ev2:
+    # data should be added according to their `nave` weights
+    # nave = ev1.nave + ev2.nave
+    ev = ev1 + ev2
+    assert_equal(ev.nave, ev1.nave + ev2.nave)
+    assert_allclose(ev.data, 1. / 3. * np.ones_like(ev.data))
+    ev = ev1 - ev2
+    assert_equal(ev.nave, ev1.nave + ev2.nave)
+    assert_equal(ev.comment, ev1.comment + ' - ' + ev2.comment)
+    assert_allclose(ev.data, np.ones_like(ev1.data))
+
+    # default comment behavior if evoked.comment is None
+    old_comment1 = ev1.comment
+    old_comment2 = ev2.comment
+    ev1.comment = None
+    with warnings.catch_warnings(record=True):
+        warnings.simplefilter('always')
+        ev = ev1 - ev2
+        assert_equal(ev.comment, 'unknown')
+    ev1.comment = old_comment1
+    ev2.comment = old_comment2
+
+    # equal weighting
+    ev = combine_evoked([ev1, ev2], weights='equal')
+    assert_allclose(ev.data, np.zeros_like(ev1.data))
+
+    # combine_evoked([ev1, ev2], weights=[1, 0]) should yield the same as ev1
+    ev = combine_evoked([ev1, ev2], weights=[1, 0])
+    assert_equal(ev.nave, ev1.nave)
+    assert_allclose(ev.data, ev1.data)
+
+    # simple subtraction (like in oddball)
+    ev = combine_evoked([ev1, ev2], weights=[1, -1])
+    assert_allclose(ev.data, 2 * np.ones_like(ev1.data))
+
+    assert_raises(ValueError, combine_evoked, [ev1, ev2], weights='foo')
+    assert_raises(ValueError, combine_evoked, [ev1, ev2], weights=[1])
+
+    # grand average
+    evoked1, evoked2 = read_evokeds(fname, condition=[0, 1], proj=True)
+    ch_names = evoked1.ch_names[2:]
+    evoked1.info['bads'] = ['EEG 008']  # test interpolation
+    evoked1.drop_channels(evoked1.ch_names[:1])
+    evoked2.drop_channels(evoked2.ch_names[1:2])
+    gave = grand_average([evoked1, evoked2])
+    assert_equal(gave.data.shape, [len(ch_names), evoked1.data.shape[1]])
+    assert_equal(ch_names, gave.ch_names)
+    assert_equal(gave.nave, 2)
+
+
+def test_array_epochs():
+    """Test creating evoked from array
+    """
+    tempdir = _TempDir()
+
+    # creating
+    rng = np.random.RandomState(42)
+    data1 = rng.randn(20, 60)
+    sfreq = 1e3
+    ch_names = ['EEG %03d' % (i + 1) for i in range(20)]
+    types = ['eeg'] * 20
+    info = create_info(ch_names, sfreq, types)
+    evoked1 = EvokedArray(data1, info, tmin=-0.01)
+
+    # save, read, and compare evokeds
+    tmp_fname = op.join(tempdir, 'evkdary-ave.fif')
+    evoked1.save(tmp_fname)
+    evoked2 = read_evokeds(tmp_fname)[0]
+    data2 = evoked2.data
+    assert_allclose(data1, data2)
+    assert_allclose(evoked1.times, evoked2.times)
+    assert_equal(evoked1.first, evoked2.first)
+    assert_equal(evoked1.last, evoked2.last)
+    assert_equal(evoked1.kind, evoked2.kind)
+    assert_equal(evoked1.nave, evoked2.nave)
+
+    # now compare with EpochsArray (with single epoch)
+    data3 = data1[np.newaxis, :, :]
+    events = np.c_[10, 0, 1]
+    evoked3 = EpochsArray(data3, info, events=events, tmin=-0.01).average()
+    assert_allclose(evoked1.data, evoked3.data)
+    assert_allclose(evoked1.times, evoked3.times)
+    assert_equal(evoked1.first, evoked3.first)
+    assert_equal(evoked1.last, evoked3.last)
+    assert_equal(evoked1.kind, evoked3.kind)
+    assert_equal(evoked1.nave, evoked3.nave)
+
+    # test match between channels info and data
+    ch_names = ['EEG %03d' % (i + 1) for i in range(19)]
+    types = ['eeg'] * 19
+    info = create_info(ch_names, sfreq, types)
+    assert_raises(ValueError, EvokedArray, data1, info, tmin=-0.01)
+
+
+def test_add_channels():
+    """Test evoked splitting / re-appending channel types
+    """
+    evoked = read_evokeds(fname, condition=0)
+    evoked.info['buffer_size_sec'] = None
+    evoked_eeg = evoked.pick_types(meg=False, eeg=True, copy=True)
+    evoked_meg = evoked.pick_types(meg=True, copy=True)
+    evoked_stim = evoked.pick_types(meg=False, stim=True, copy=True)
+    evoked_eeg_meg = evoked.pick_types(meg=True, eeg=True, copy=True)
+    evoked_new = evoked_meg.add_channels([evoked_eeg, evoked_stim], copy=True)
+    assert_true(all(ch in evoked_new.ch_names
+                    for ch in evoked_stim.ch_names + evoked_meg.ch_names))
+    evoked_new = evoked_meg.add_channels([evoked_eeg], copy=True)
+
+    assert_true(ch in evoked_new.ch_names for ch in evoked.ch_names)
+    assert_array_equal(evoked_new.data, evoked_eeg_meg.data)
+    assert_true(all(ch not in evoked_new.ch_names
+                    for ch in evoked_stim.ch_names))
+
+    # Now test errors
+    evoked_badsf = evoked_eeg.copy()
+    evoked_badsf.info['sfreq'] = 3.1415927
+    evoked_eeg = evoked_eeg.crop(-.1, .1)
+
+    assert_raises(RuntimeError, evoked_meg.add_channels, [evoked_badsf])
+    assert_raises(AssertionError, evoked_meg.add_channels, [evoked_eeg])
+    assert_raises(ValueError, evoked_meg.add_channels, [evoked_meg])
+    assert_raises(AssertionError, evoked_meg.add_channels, evoked_badsf)
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/tests/test_filter.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/tests/test_filter.py
new file mode 100644
index 0000000..cd67ab9
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/tests/test_filter.py
@@ -0,0 +1,379 @@
+import numpy as np
+from numpy.testing import (assert_array_almost_equal, assert_almost_equal,
+                           assert_array_equal, assert_allclose)
+from nose.tools import assert_equal, assert_true, assert_raises
+import os.path as op
+import warnings
+from scipy.signal import resample as sp_resample
+
+from mne.filter import (band_pass_filter, high_pass_filter, low_pass_filter,
+                        band_stop_filter, resample, _resample_stim_channels,
+                        construct_iir_filter, notch_filter, detrend,
+                        _overlap_add_filter, _smart_pad)
+
+from mne import set_log_file
+from mne.utils import _TempDir, sum_squared, run_tests_if_main, slow_test
+
+warnings.simplefilter('always')  # enable b/c these tests throw warnings
+
+
+def test_1d_filter():
+    """Test our private overlap-add filtering function"""
+    rng = np.random.RandomState(0)
+    # make some random signals and filters
+    for n_signal in (1, 2, 5, 10, 20, 40, 100, 200, 400, 1000, 2000):
+        x = rng.randn(n_signal)
+        for n_filter in (2, 5, 10, 20, 40, 100, 200, 400, 1000, 2000):
+            # Don't test n_filter == 1 because scipy can't handle it.
+            if n_filter > n_signal:
+                continue  # only equal or lesser lengths supported
+            for filter_type in ('identity', 'random'):
+                if filter_type == 'random':
+                    h = rng.randn(n_filter)
+                else:  # filter_type == 'identity'
+                    h = np.concatenate([[1.], np.zeros(n_filter - 1)])
+                # ensure we pad the signal the same way for both filters
+                n_pad = max(min(n_filter, n_signal - 1), 0)
+                x_pad = _smart_pad(x, n_pad)
+                for zero_phase in (True, False):
+                    # compute our expected result the slow way
+                    if zero_phase:
+                        x_expected = np.convolve(x_pad, h)[::-1]
+                        x_expected = np.convolve(x_expected, h)[::-1]
+                        x_expected = x_expected[len(h) - 1:-(len(h) - 1)]
+                    else:
+                        x_expected = np.convolve(x_pad, h)
+                        x_expected = x_expected[:-(len(h) - 1)]
+                    # remove padding
+                    if n_pad > 0:
+                        x_expected = x_expected[n_pad:-n_pad]
+                    # make sure we actually set things up reasonably
+                    if filter_type == 'identity':
+                        assert_allclose(x_expected, x)
+                    # compute our version
+                    for n_fft in (None, 32, 128, 129, 1023, 1024, 1025, 2048):
+                        # need to use .copy() b/c signal gets modified inplace
+                        x_copy = x[np.newaxis, :].copy()
+                        if (n_fft is not None and n_fft < 2 * n_filter - 1 and
+                                zero_phase):
+                            assert_raises(ValueError, _overlap_add_filter,
+                                          x_copy, h, n_fft, zero_phase)
+                        elif (n_fft is not None and n_fft < n_filter and not
+                                zero_phase):
+                            assert_raises(ValueError, _overlap_add_filter,
+                                          x_copy, h, n_fft, zero_phase)
+                        else:
+                            # bad len warning
+                            with warnings.catch_warnings(record=True):
+                                x_filtered = _overlap_add_filter(
+                                    x_copy, h, n_fft, zero_phase)[0]
+                            assert_allclose(x_expected, x_filtered)
+
+
+def test_iir_stability():
+    """Test IIR filter stability check
+    """
+    sig = np.empty(1000)
+    sfreq = 1000
+    # This will make an unstable filter, should throw RuntimeError
+    assert_raises(RuntimeError, high_pass_filter, sig, sfreq, 0.6,
+                  method='iir', iir_params=dict(ftype='butter', order=8))
+    # can't pass iir_params if method='fir'
+    assert_raises(ValueError, high_pass_filter, sig, sfreq, 0.1,
+                  method='fir', iir_params=dict(ftype='butter', order=2))
+    # method must be string
+    assert_raises(TypeError, high_pass_filter, sig, sfreq, 0.1,
+                  method=1)
+    # unknown method
+    assert_raises(ValueError, high_pass_filter, sig, sfreq, 0.1,
+                  method='blah')
+    # bad iir_params
+    assert_raises(ValueError, high_pass_filter, sig, sfreq, 0.1,
+                  method='fir', iir_params='blah')
+
+    # should pass because dafault trans_bandwidth is not relevant
+    high_pass_filter(sig, 250, 0.5, method='iir',
+                     iir_params=dict(ftype='butter', order=6))
+
+
+def test_notch_filters():
+    """Test notch filters
+    """
+    tempdir = _TempDir()
+    log_file = op.join(tempdir, 'temp_log.txt')
+    # let's use an ugly, prime sfreq for fun
+    sfreq = 487.0
+    sig_len_secs = 20
+    t = np.arange(0, int(sig_len_secs * sfreq)) / sfreq
+    freqs = np.arange(60, 241, 60)
+
+    # make a "signal"
+    rng = np.random.RandomState(0)
+    a = rng.randn(int(sig_len_secs * sfreq))
+    orig_power = np.sqrt(np.mean(a ** 2))
+    # make line noise
+    a += np.sum([np.sin(2 * np.pi * f * t) for f in freqs], axis=0)
+
+    # only allow None line_freqs with 'spectrum_fit' mode
+    assert_raises(ValueError, notch_filter, a, sfreq, None, 'fft')
+    assert_raises(ValueError, notch_filter, a, sfreq, None, 'iir')
+    methods = ['spectrum_fit', 'spectrum_fit', 'fft', 'fft', 'iir']
+    filter_lengths = [None, None, None, 8192, None]
+    line_freqs = [None, freqs, freqs, freqs, freqs]
+    tols = [2, 1, 1, 1]
+    for meth, lf, fl, tol in zip(methods, line_freqs, filter_lengths, tols):
+        if lf is None:
+            set_log_file(log_file, overwrite=True)
+
+        b = notch_filter(a, sfreq, lf, filter_length=fl, method=meth,
+                         verbose='INFO')
+
+        if lf is None:
+            set_log_file()
+            with open(log_file) as fid:
+                out = fid.readlines()
+            if len(out) != 2 and len(out) != 3:  # force_serial: len(out) == 3
+                raise ValueError('Detected frequencies not logged properly')
+            out = np.fromstring(out[-1], sep=', ')
+            assert_array_almost_equal(out, freqs)
+        new_power = np.sqrt(sum_squared(b) / b.size)
+        assert_almost_equal(new_power, orig_power, tol)
+
+
+def test_resample():
+    """Test resampling"""
+    x = np.random.normal(0, 1, (10, 10, 10))
+    x_rs = resample(x, 1, 2, 10)
+    assert_equal(x.shape, (10, 10, 10))
+    assert_equal(x_rs.shape, (10, 10, 5))
+
+    x_2 = x.swapaxes(0, 1)
+    x_2_rs = resample(x_2, 1, 2, 10)
+    assert_array_equal(x_2_rs.swapaxes(0, 1), x_rs)
+
+    x_3 = x.swapaxes(0, 2)
+    x_3_rs = resample(x_3, 1, 2, 10, 0)
+    assert_array_equal(x_3_rs.swapaxes(0, 2), x_rs)
+
+    # make sure we cast to array if necessary
+    assert_array_equal(resample([0, 0], 2, 1), [0., 0., 0., 0.])
+
+
+def test_resample_stim_channel():
+    """Test resampling of stim channels"""
+
+    # Downsampling
+    assert_array_equal(
+        _resample_stim_channels([1, 0, 0, 0, 2, 0, 0, 0], 1, 2),
+        [[1, 0, 2, 0]])
+    assert_array_equal(
+        _resample_stim_channels([1, 0, 0, 0, 2, 0, 0, 0], 1, 1.5),
+        [[1, 0, 0, 2, 0]])
+    assert_array_equal(
+        _resample_stim_channels([1, 0, 0, 1, 2, 0, 0, 1], 1, 2),
+        [[1, 1, 2, 1]])
+
+    # Upsampling
+    assert_array_equal(
+        _resample_stim_channels([1, 2, 3], 2, 1), [[1, 1, 2, 2, 3, 3]])
+    assert_array_equal(
+        _resample_stim_channels([1, 2, 3], 2.5, 1), [[1, 1, 1, 2, 2, 3, 3, 3]])
+
+    # Proper number of samples in stim channel resampling from io/base.py
+    data_chunk = np.zeros((1, 315600))
+    for new_data_len in (52598, 52599, 52600, 52601, 315599, 315600):
+        new_data = _resample_stim_channels(data_chunk, new_data_len,
+                                           data_chunk.shape[1])
+        assert_equal(new_data.shape[1], new_data_len)
+
+
+ at slow_test
+def test_filters():
+    """Test low-, band-, high-pass, and band-stop filters plus resampling
+    """
+    sfreq = 500
+    sig_len_secs = 30
+
+    a = np.random.randn(2, sig_len_secs * sfreq)
+
+    # let's test our catchers
+    for fl in ['blah', [0, 1], 1000.5, '10ss', '10']:
+        assert_raises(ValueError, band_pass_filter, a, sfreq, 4, 8,
+                      filter_length=fl)
+    for nj in ['blah', 0.5]:
+        assert_raises(ValueError, band_pass_filter, a, sfreq, 4, 8, n_jobs=nj)
+    # > Nyq/2
+    assert_raises(ValueError, band_pass_filter, a, sfreq, 4, sfreq / 2.)
+    assert_raises(ValueError, low_pass_filter, a, sfreq, sfreq / 2.)
+    # check our short-filter warning:
+    with warnings.catch_warnings(record=True) as w:
+        # Warning for low attenuation
+        band_pass_filter(a, sfreq, 1, 8, filter_length=1024)
+        # Warning for too short a filter
+        band_pass_filter(a, sfreq, 1, 8, filter_length='0.5s')
+    assert_true(len(w) >= 2)
+
+    # try new default and old default
+    for fl in ['10s', '5000ms', None]:
+        bp = band_pass_filter(a, sfreq, 4, 8, filter_length=fl)
+        bs = band_stop_filter(a, sfreq, 4 - 0.5, 8 + 0.5, filter_length=fl)
+        lp = low_pass_filter(a, sfreq, 8, filter_length=fl, n_jobs=2)
+        hp = high_pass_filter(lp, sfreq, 4, filter_length=fl)
+        assert_array_almost_equal(hp, bp, 2)
+        assert_array_almost_equal(bp + bs, a, 1)
+
+    # Overlap-add filtering with a fixed filter length
+    filter_length = 8192
+    bp_oa = band_pass_filter(a, sfreq, 4, 8, filter_length)
+    bs_oa = band_stop_filter(a, sfreq, 4 - 0.5, 8 + 0.5, filter_length)
+    lp_oa = low_pass_filter(a, sfreq, 8, filter_length)
+    hp_oa = high_pass_filter(lp_oa, sfreq, 4, filter_length)
+    assert_array_almost_equal(hp_oa, bp_oa, 2)
+    # Our filters are no longer quite complementary with linear rolloffs :(
+    # this is the tradeoff for stability of the filtering
+    # obtained by directly using the result of firwin2 instead of
+    # modifying it...
+    assert_array_almost_equal(bp_oa + bs_oa, a, 1)
+
+    # The two methods should give the same result
+    # As filtering for short signals uses a circular convolution (FFT) and
+    # the overlap-add filter implements a linear convolution, the signal
+    # boundary will be slightly different and we ignore it
+    n_edge_ignore = 0
+    assert_array_almost_equal(hp[n_edge_ignore:-n_edge_ignore],
+                              hp_oa[n_edge_ignore:-n_edge_ignore], 2)
+
+    # and since these are low-passed, downsampling/upsampling should be close
+    n_resamp_ignore = 10
+    bp_up_dn = resample(resample(bp_oa, 2, 1, n_jobs=2), 1, 2, n_jobs=2)
+    assert_array_almost_equal(bp_oa[n_resamp_ignore:-n_resamp_ignore],
+                              bp_up_dn[n_resamp_ignore:-n_resamp_ignore], 2)
+    # note that on systems without CUDA, this line serves as a test for a
+    # graceful fallback to n_jobs=1
+    bp_up_dn = resample(resample(bp_oa, 2, 1, n_jobs='cuda'), 1, 2,
+                        n_jobs='cuda')
+    assert_array_almost_equal(bp_oa[n_resamp_ignore:-n_resamp_ignore],
+                              bp_up_dn[n_resamp_ignore:-n_resamp_ignore], 2)
+    # test to make sure our resamling matches scipy's
+    bp_up_dn = sp_resample(sp_resample(bp_oa, 2 * bp_oa.shape[-1], axis=-1,
+                                       window='boxcar'),
+                           bp_oa.shape[-1], window='boxcar', axis=-1)
+    assert_array_almost_equal(bp_oa[n_resamp_ignore:-n_resamp_ignore],
+                              bp_up_dn[n_resamp_ignore:-n_resamp_ignore], 2)
+
+    # make sure we don't alias
+    t = np.array(list(range(sfreq * sig_len_secs))) / float(sfreq)
+    # make sinusoid close to the Nyquist frequency
+    sig = np.sin(2 * np.pi * sfreq / 2.2 * t)
+    # signal should disappear with 2x downsampling
+    sig_gone = resample(sig, 1, 2)[n_resamp_ignore:-n_resamp_ignore]
+    assert_array_almost_equal(np.zeros_like(sig_gone), sig_gone, 2)
+
+    # let's construct some filters
+    iir_params = dict(ftype='cheby1', gpass=1, gstop=20)
+    iir_params = construct_iir_filter(iir_params, 40, 80, 1000, 'low')
+    # this should be a third order filter
+    assert_true(iir_params['a'].size - 1 == 3)
+    assert_true(iir_params['b'].size - 1 == 3)
+    iir_params = dict(ftype='butter', order=4)
+    iir_params = construct_iir_filter(iir_params, 40, None, 1000, 'low')
+    assert_true(iir_params['a'].size - 1 == 4)
+    assert_true(iir_params['b'].size - 1 == 4)
+
+    # check that picks work for 3d array with one channel and picks=[0]
+    a = np.random.randn(5 * sfreq, 5 * sfreq)
+    b = a[:, None, :]
+
+    with warnings.catch_warnings(record=True) as w:
+        a_filt = band_pass_filter(a, sfreq, 4, 8)
+        b_filt = band_pass_filter(b, sfreq, 4, 8, picks=[0])
+
+    assert_array_equal(a_filt[:, None, :], b_filt)
+
+    # check for n-dimensional case
+    a = np.random.randn(2, 2, 2, 2)
+    assert_raises(ValueError, band_pass_filter, a, sfreq, Fp1=4, Fp2=8,
+                  picks=np.array([0, 1]))
+
+    # test that our overlap-add filtering doesn't introduce strange
+    # artifacts (from mne_analyze mailing list 2015/06/25)
+    N = 300
+    sfreq = 100.
+    lp = 10.
+    sine_freq = 1.
+    x = np.ones(N)
+    x += np.sin(2 * np.pi * sine_freq * np.arange(N) / sfreq)
+    with warnings.catch_warnings(record=True):  # filter attenuation
+        x_filt = low_pass_filter(x, sfreq, lp, '1s')
+    # the firwin2 function gets us this close
+    assert_allclose(x, x_filt, rtol=1e-3, atol=1e-3)
+
+
+def test_cuda():
+    """Test CUDA-based filtering
+    """
+    # NOTE: don't make test_cuda() the last test, or pycuda might spew
+    # some warnings about clean-up failing
+    # Also, using `n_jobs='cuda'` on a non-CUDA system should be fine,
+    # as it should fall back to using n_jobs=1.
+    tempdir = _TempDir()
+    log_file = op.join(tempdir, 'temp_log.txt')
+    sfreq = 500
+    sig_len_secs = 20
+    a = np.random.randn(sig_len_secs * sfreq)
+
+    set_log_file(log_file, overwrite=True)
+    for fl in ['10s', None, 2048]:
+        bp = band_pass_filter(a, sfreq, 4, 8, n_jobs=1, filter_length=fl)
+        bs = band_stop_filter(a, sfreq, 4 - 0.5, 8 + 0.5, n_jobs=1,
+                              filter_length=fl)
+        lp = low_pass_filter(a, sfreq, 8, n_jobs=1, filter_length=fl)
+        hp = high_pass_filter(lp, sfreq, 4, n_jobs=1, filter_length=fl)
+
+        bp_c = band_pass_filter(a, sfreq, 4, 8, n_jobs='cuda',
+                                filter_length=fl, verbose='INFO')
+        bs_c = band_stop_filter(a, sfreq, 4 - 0.5, 8 + 0.5, n_jobs='cuda',
+                                filter_length=fl, verbose='INFO')
+        lp_c = low_pass_filter(a, sfreq, 8, n_jobs='cuda', filter_length=fl,
+                               verbose='INFO')
+        hp_c = high_pass_filter(lp, sfreq, 4, n_jobs='cuda', filter_length=fl,
+                                verbose='INFO')
+
+        assert_array_almost_equal(bp, bp_c, 12)
+        assert_array_almost_equal(bs, bs_c, 12)
+        assert_array_almost_equal(lp, lp_c, 12)
+        assert_array_almost_equal(hp, hp_c, 12)
+
+    # check to make sure we actually used CUDA
+    set_log_file()
+    with open(log_file) as fid:
+        out = fid.readlines()
+    # triage based on whether or not we actually expected to use CUDA
+    from mne.cuda import _cuda_capable  # allow above funs to set it
+    tot = 12 if _cuda_capable else 0
+    assert_true(sum(['Using CUDA for FFT FIR filtering' in o
+                     for o in out]) == tot)
+
+    # check resampling
+    a = np.random.RandomState(0).randn(3, sig_len_secs * sfreq)
+    a1 = resample(a, 1, 2, n_jobs=2, npad=0)
+    a2 = resample(a, 1, 2, n_jobs='cuda', npad=0)
+    a3 = resample(a, 2, 1, n_jobs=2, npad=0)
+    a4 = resample(a, 2, 1, n_jobs='cuda', npad=0)
+    assert_array_almost_equal(a3, a4, 14)
+    assert_array_almost_equal(a1, a2, 14)
+    assert_array_equal(resample([0, 0], 2, 1, n_jobs='cuda'), [0., 0., 0., 0.])
+    assert_array_equal(resample(np.zeros(2, np.float32), 2, 1, n_jobs='cuda'),
+                       [0., 0., 0., 0.])
+
+
+def test_detrend():
+    """Test zeroth and first order detrending
+    """
+    x = np.arange(10)
+    assert_array_almost_equal(detrend(x, 1), np.zeros_like(x))
+    x = np.ones(10)
+    assert_array_almost_equal(detrend(x, 0), np.zeros_like(x))
+
+
+run_tests_if_main()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/tests/test_fixes.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/tests/test_fixes.py
new file mode 100644
index 0000000..eaa9fa3
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/tests/test_fixes.py
@@ -0,0 +1,194 @@
+# Authors: Emmanuelle Gouillart <emmanuelle.gouillart at normalesup.org>
+#          Gael Varoquaux <gael.varoquaux at normalesup.org>
+#          Alex Gramfort <alexandre.gramfort at telecom-paristech.fr>
+# License: BSD
+
+import numpy as np
+
+from nose.tools import assert_equal, assert_raises, assert_true
+from numpy.testing import assert_array_equal
+from distutils.version import LooseVersion
+from scipy import signal, sparse
+
+from mne.utils import run_tests_if_main
+from mne.fixes import (_in1d, _tril_indices, _copysign, _unravel_index,
+                       _Counter, _unique, _bincount, _digitize,
+                       _sparse_block_diag, _matrix_rank, _meshgrid,
+                       _isclose)
+from mne.fixes import _firwin2 as mne_firwin2
+from mne.fixes import _filtfilt as mne_filtfilt
+
+
+def test_counter():
+    """Test Counter replacement"""
+    import collections
+    try:
+        Counter = collections.Counter
+    except Exception:
+        pass
+    else:
+        a = Counter([1, 2, 1, 3])
+        b = _Counter([1, 2, 1, 3])
+        c = _Counter()
+        c.update(b)
+        for key, count in zip([1, 2, 3], [2, 1, 1]):
+            assert_equal(a[key], b[key])
+            assert_equal(a[key], c[key])
+
+
+def test_unique():
+    """Test unique() replacement
+    """
+    # skip test for np version < 1.5
+    if LooseVersion(np.__version__) < LooseVersion('1.5'):
+        return
+    for arr in [np.array([]), np.random.rand(10), np.ones(10)]:
+        # basic
+        assert_array_equal(np.unique(arr), _unique(arr))
+        # with return_index=True
+        x1, x2 = np.unique(arr, return_index=True, return_inverse=False)
+        y1, y2 = _unique(arr, return_index=True, return_inverse=False)
+        assert_array_equal(x1, y1)
+        assert_array_equal(x2, y2)
+        # with return_inverse=True
+        x1, x2 = np.unique(arr, return_index=False, return_inverse=True)
+        y1, y2 = _unique(arr, return_index=False, return_inverse=True)
+        assert_array_equal(x1, y1)
+        assert_array_equal(x2, y2)
+        # with both:
+        x1, x2, x3 = np.unique(arr, return_index=True, return_inverse=True)
+        y1, y2, y3 = _unique(arr, return_index=True, return_inverse=True)
+        assert_array_equal(x1, y1)
+        assert_array_equal(x2, y2)
+        assert_array_equal(x3, y3)
+
+
+def test_bincount():
+    """Test bincount() replacement
+    """
+    # skip test for np version < 1.6
+    if LooseVersion(np.__version__) < LooseVersion('1.6'):
+        return
+    for minlength in [None, 100]:
+        x = _bincount(np.ones(10, int), None, minlength)
+        y = np.bincount(np.ones(10, int), None, minlength)
+        assert_array_equal(x, y)
+
+
+def test_in1d():
+    """Test numpy.in1d() replacement"""
+    a = np.arange(10)
+    b = a[a % 2 == 0]
+    assert_equal(_in1d(a, b).sum(), 5)
+
+
+def test_digitize():
+    """Test numpy.digitize() replacement"""
+    data = np.arange(9)
+    bins = [0, 5, 10]
+    left = np.array([1, 1, 1, 1, 1, 2, 2, 2, 2])
+    right = np.array([0, 1, 1, 1, 1, 1, 2, 2, 2])
+
+    assert_array_equal(_digitize(data, bins), left)
+    assert_array_equal(_digitize(data, bins, True), right)
+    assert_raises(NotImplementedError, _digitize, data + 0.1, bins, True)
+    assert_raises(NotImplementedError, _digitize, data, [0., 5, 10], True)
+
+
+def test_tril_indices():
+    """Test numpy.tril_indices() replacement"""
+    il1 = _tril_indices(4)
+    il2 = _tril_indices(4, -1)
+
+    a = np.array([[1, 2, 3, 4],
+                  [5, 6, 7, 8],
+                  [9, 10, 11, 12],
+                  [13, 14, 15, 16]])
+
+    assert_array_equal(a[il1],
+                       np.array([1, 5, 6, 9, 10, 11, 13, 14, 15, 16]))
+
+    assert_array_equal(a[il2], np.array([5, 9, 10, 13, 14, 15]))
+
+
+def test_unravel_index():
+    """Test numpy.unravel_index() replacement"""
+    assert_equal(_unravel_index(2, (2, 3)), (0, 2))
+    assert_equal(_unravel_index(2, (2, 2)), (1, 0))
+    assert_equal(_unravel_index(254, (17, 94)), (2, 66))
+    assert_equal(_unravel_index((2 * 3 + 1) * 6 + 4, (4, 3, 6)), (2, 1, 4))
+    assert_array_equal(_unravel_index(np.array([22, 41, 37]), (7, 6)),
+                       [[3, 6, 6], [4, 5, 1]])
+    assert_array_equal(_unravel_index(1621, (6, 7, 8, 9)), (3, 1, 4, 1))
+
+
+def test_copysign():
+    """Test numpy.copysign() replacement"""
+    a = np.array([-1, 1, -1])
+    b = np.array([1, -1, 1])
+
+    assert_array_equal(_copysign(a, b), b)
+    assert_array_equal(_copysign(b, a), a)
+
+
+def test_firwin2():
+    """Test firwin2 backport
+    """
+    taps1 = mne_firwin2(150, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0])
+    taps2 = signal.firwin2(150, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0])
+    assert_array_equal(taps1, taps2)
+
+
+def test_filtfilt():
+    """Test IIR filtfilt replacement
+    """
+    x = np.r_[1, np.zeros(100)]
+    # Filter with an impulse
+    y = mne_filtfilt([1, 0], [1, 0], x, padlen=0)
+    assert_array_equal(x, y)
+
+
+def test_sparse_block_diag():
+    """Test sparse block diag replacement"""
+    x = _sparse_block_diag([sparse.eye(2, 2), sparse.eye(2, 2)])
+    x = x - sparse.eye(4, 4)
+    x.eliminate_zeros()
+    assert_equal(len(x.data), 0)
+
+
+def test_rank():
+    """Test rank replacement"""
+    assert_equal(_matrix_rank(np.ones(10)), 1)
+    assert_equal(_matrix_rank(np.eye(10)), 10)
+    assert_equal(_matrix_rank(np.ones((10, 10))), 1)
+    assert_raises(TypeError, _matrix_rank, np.ones((10, 10, 10)))
+
+
+def test_meshgrid():
+    """Test meshgrid replacement
+    """
+    a = np.arange(10)
+    b = np.linspace(0, 1, 5)
+    a_grid, b_grid = _meshgrid(a, b, indexing='ij')
+    for grid in (a_grid, b_grid):
+        assert_equal(grid.shape, (a.size, b.size))
+    a_grid, b_grid = _meshgrid(a, b, indexing='xy', copy=True)
+    for grid in (a_grid, b_grid):
+        assert_equal(grid.shape, (b.size, a.size))
+    assert_raises(TypeError, _meshgrid, a, b, foo='a')
+    assert_raises(ValueError, _meshgrid, a, b, indexing='foo')
+
+
+def test_isclose():
+    """Test isclose replacement
+    """
+    a = np.random.RandomState(0).randn(10)
+    b = a.copy()
+    assert_true(_isclose(a, b).all())
+    a[0] = np.inf
+    b[0] = np.inf
+    a[-1] = np.nan
+    b[-1] = np.nan
+    assert_true(_isclose(a, b, equal_nan=True).all())
+
+run_tests_if_main()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/tests/test_import_nesting.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/tests/test_import_nesting.py
new file mode 100644
index 0000000..36d0a20
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/tests/test_import_nesting.py
@@ -0,0 +1,53 @@
+import sys
+from subprocess import Popen, PIPE
+
+from mne.utils import run_tests_if_main, requires_version
+
+
+run_script = """
+from __future__ import print_function
+
+import sys
+import mne
+
+out = []
+
+# check scipy
+ok_scipy_submodules = set(['scipy', 'numpy',  # these appear in old scipy
+                           'fftpack', 'lib', 'linalg',
+                           'misc', 'sparse', 'version'])
+scipy_submodules = set(x.split('.')[1] for x in sys.modules.keys()
+                       if x.startswith('scipy.') and '__' not in x and
+                       not x.split('.')[1].startswith('_'))
+bad = scipy_submodules - ok_scipy_submodules
+if len(bad) > 0:
+    out.append('Found un-nested scipy submodules: %s' % list(bad))
+
+# check sklearn and others
+_sklearn = _pandas = _nose = False
+for x in sys.modules.keys():
+    if x.startswith('sklearn') and not _sklearn:
+        out.append('Found un-nested sklearn import')
+        _sklearn = True
+    if x.startswith('pandas') and not _pandas:
+        out.append('Found un-nested pandas import')
+        _pandas = True
+    if x.startswith('nose') and not _nose:
+        out.append('Found un-nested nose import')
+        _nose = True
+if len(out) > 0:
+    print('\\n' + '\\n'.join(out), end='')
+    exit(1)
+"""
+
+
+ at requires_version('scipy', '0.11')  # old ones not organized properly
+def test_module_nesting():
+    """Test that module imports are necessary
+    """
+    proc = Popen([sys.executable, '-c', run_script], stdout=PIPE, stderr=PIPE)
+    stdout, stderr = proc.communicate()
+    if proc.returncode:
+        raise AssertionError(stdout)
+
+run_tests_if_main()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/tests/test_label.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/tests/test_label.py
new file mode 100644
index 0000000..99a5c74
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/tests/test_label.py
@@ -0,0 +1,749 @@
+import os
+import os.path as op
+import shutil
+import glob
+import warnings
+import sys
+
+import numpy as np
+from scipy import sparse
+
+from numpy.testing import assert_array_equal, assert_array_almost_equal
+from nose.tools import assert_equal, assert_true, assert_false, assert_raises
+
+from mne.datasets import testing
+from mne import (read_label, stc_to_label, read_source_estimate,
+                 read_source_spaces, grow_labels, read_labels_from_annot,
+                 write_labels_to_annot, split_label, spatial_tris_connectivity,
+                 read_surface)
+from mne.label import Label, _blend_colors
+from mne.utils import (_TempDir, requires_sklearn, get_subjects_dir,
+                       run_tests_if_main, slow_test)
+from mne.fixes import digitize, in1d, assert_is, assert_is_not
+from mne.label import _n_colors
+from mne.source_space import SourceSpaces
+from mne.source_estimate import mesh_edges
+from mne.externals.six import string_types
+from mne.externals.six.moves import cPickle as pickle
+
+
+warnings.simplefilter('always')  # enable b/c these tests throw warnings
+
+data_path = testing.data_path(download=False)
+subjects_dir = op.join(data_path, 'subjects')
+src_fname = op.join(subjects_dir, 'sample', 'bem', 'sample-oct-6-src.fif')
+stc_fname = op.join(data_path, 'MEG', 'sample',
+                    'sample_audvis_trunc-meg-lh.stc')
+real_label_fname = op.join(data_path, 'MEG', 'sample', 'labels',
+                           'Aud-lh.label')
+real_label_rh_fname = op.join(data_path, 'MEG', 'sample', 'labels',
+                              'Aud-rh.label')
+v1_label_fname = op.join(subjects_dir, 'sample', 'label', 'lh.V1.label')
+
+fwd_fname = op.join(data_path, 'MEG', 'sample',
+                    'sample_audvis_trunc-meg-eeg-oct-6-fwd.fif')
+src_bad_fname = op.join(data_path, 'subjects', 'fsaverage', 'bem',
+                        'fsaverage-ico-5-src.fif')
+label_dir = op.join(subjects_dir, 'sample', 'label', 'aparc')
+
+test_path = op.join(op.split(__file__)[0], '..', 'io', 'tests', 'data')
+label_fname = op.join(test_path, 'test-lh.label')
+label_rh_fname = op.join(test_path, 'test-rh.label')
+
+# This code was used to generate the "fake" test labels:
+# for hemi in ['lh', 'rh']:
+#    label = Label(np.unique((np.random.rand(100) * 10242).astype(int)),
+#                  hemi=hemi, comment='Test ' + hemi, subject='fsaverage')
+#    label.save(op.join(test_path, 'test-%s.label' % hemi))
+
+
+# XXX : this was added for backward compat and keep the old test_label_in_src
+def _stc_to_label(stc, src, smooth, subjects_dir=None):
+    """Compute a label from the non-zero sources in an stc object.
+
+    Parameters
+    ----------
+    stc : SourceEstimate
+        The source estimates.
+    src : SourceSpaces | str | None
+        The source space over which the source estimates are defined.
+        If it's a string it should the subject name (e.g. fsaverage).
+        Can be None if stc.subject is not None.
+    smooth : int
+        Number of smoothing iterations.
+    subjects_dir : str | None
+        Path to SUBJECTS_DIR if it is not set in the environment.
+
+    Returns
+    -------
+    labels : list of Labels | list of list of Labels
+        The generated labels. If connected is False, it returns
+        a list of Labels (one per hemisphere). If no Label is available
+        in a hemisphere, None is returned. If connected is True,
+        it returns for each hemisphere a list of connected labels
+        ordered in decreasing order depending of the maximum value in the stc.
+        If no Label is available in an hemisphere, an empty list is returned.
+    """
+    src = stc.subject if src is None else src
+
+    if isinstance(src, string_types):
+        subject = src
+    else:
+        subject = stc.subject
+
+    if isinstance(src, string_types):
+        subjects_dir = get_subjects_dir(subjects_dir)
+        surf_path_from = op.join(subjects_dir, src, 'surf')
+        rr_lh, tris_lh = read_surface(op.join(surf_path_from,
+                                      'lh.white'))
+        rr_rh, tris_rh = read_surface(op.join(surf_path_from,
+                                      'rh.white'))
+        rr = [rr_lh, rr_rh]
+        tris = [tris_lh, tris_rh]
+    else:
+        if not isinstance(src, SourceSpaces):
+            raise TypeError('src must be a string or a set of source spaces')
+        if len(src) != 2:
+            raise ValueError('source space should contain the 2 hemispheres')
+        rr = [1e3 * src[0]['rr'], 1e3 * src[1]['rr']]
+        tris = [src[0]['tris'], src[1]['tris']]
+
+    labels = []
+    cnt = 0
+    for hemi_idx, (hemi, this_vertno, this_tris, this_rr) in enumerate(
+            zip(['lh', 'rh'], stc.vertices, tris, rr)):
+        this_data = stc.data[cnt:cnt + len(this_vertno)]
+        e = mesh_edges(this_tris)
+        e.data[e.data == 2] = 1
+        n_vertices = e.shape[0]
+        e = e + sparse.eye(n_vertices, n_vertices)
+
+        clusters = [this_vertno[np.any(this_data, axis=1)]]
+
+        cnt += len(this_vertno)
+
+        clusters = [c for c in clusters if len(c) > 0]
+
+        if len(clusters) == 0:
+            this_labels = None
+        else:
+            this_labels = []
+            colors = _n_colors(len(clusters))
+            for c, color in zip(clusters, colors):
+                idx_use = c
+                for k in range(smooth):
+                    e_use = e[:, idx_use]
+                    data1 = e_use * np.ones(len(idx_use))
+                    idx_use = np.where(data1)[0]
+
+                label = Label(idx_use, this_rr[idx_use], None, hemi,
+                              'Label from stc', subject=subject,
+                              color=color)
+
+                this_labels.append(label)
+
+            this_labels = this_labels[0]
+
+        labels.append(this_labels)
+
+    return labels
+
+
+def assert_labels_equal(l0, l1, decimal=5, comment=True, color=True):
+    if comment:
+        assert_equal(l0.comment, l1.comment)
+    if color:
+        assert_equal(l0.color, l1.color)
+
+    for attr in ['hemi', 'subject']:
+        attr0 = getattr(l0, attr)
+        attr1 = getattr(l1, attr)
+        msg = "label.%s: %r != %r" % (attr, attr0, attr1)
+        assert_equal(attr0, attr1, msg)
+    for attr in ['vertices', 'pos', 'values']:
+        a0 = getattr(l0, attr)
+        a1 = getattr(l1, attr)
+        assert_array_almost_equal(a0, a1, decimal)
+
+
+def test_label_subject():
+    """Test label subject name extraction
+    """
+    label = read_label(label_fname)
+    assert_is(label.subject, None)
+    assert_true('unknown' in repr(label))
+    label = read_label(label_fname, subject='fsaverage')
+    assert_true(label.subject == 'fsaverage')
+    assert_true('fsaverage' in repr(label))
+
+
+def test_label_addition():
+    """Test label addition
+    """
+    pos = np.random.rand(10, 3)
+    values = np.arange(10.) / 10
+    idx0 = list(range(7))
+    idx1 = list(range(7, 10))  # non-overlapping
+    idx2 = list(range(5, 10))  # overlapping
+    l0 = Label(idx0, pos[idx0], values[idx0], 'lh', color='red')
+    l1 = Label(idx1, pos[idx1], values[idx1], 'lh')
+    l2 = Label(idx2, pos[idx2], values[idx2], 'lh', color=(0, 1, 0, .5))
+
+    assert_equal(len(l0), len(idx0))
+
+    l_good = l0.copy()
+    l_good.subject = 'sample'
+    l_bad = l1.copy()
+    l_bad.subject = 'foo'
+    assert_raises(ValueError, l_good.__add__, l_bad)
+    assert_raises(TypeError, l_good.__add__, 'foo')
+    assert_raises(ValueError, l_good.__sub__, l_bad)
+    assert_raises(TypeError, l_good.__sub__, 'foo')
+
+    # adding non-overlapping labels
+    l01 = l0 + l1
+    assert_equal(len(l01), len(l0) + len(l1))
+    assert_array_equal(l01.values[:len(l0)], l0.values)
+    assert_equal(l01.color, l0.color)
+    # subtraction
+    assert_labels_equal(l01 - l0, l1, comment=False, color=False)
+    assert_labels_equal(l01 - l1, l0, comment=False, color=False)
+
+    # adding overlappig labels
+    l = l0 + l2
+    i0 = np.where(l0.vertices == 6)[0][0]
+    i2 = np.where(l2.vertices == 6)[0][0]
+    i = np.where(l.vertices == 6)[0][0]
+    assert_equal(l.values[i], l0.values[i0] + l2.values[i2])
+    assert_equal(l.values[0], l0.values[0])
+    assert_array_equal(np.unique(l.vertices), np.unique(idx0 + idx2))
+    assert_equal(l.color, _blend_colors(l0.color, l2.color))
+
+    # adding lh and rh
+    l2.hemi = 'rh'
+    # this now has deprecated behavior
+    bhl = l0 + l2
+    assert_equal(bhl.hemi, 'both')
+    assert_equal(len(bhl), len(l0) + len(l2))
+    assert_equal(bhl.color, l.color)
+    assert_true('BiHemiLabel' in repr(bhl))
+    # subtraction
+    assert_labels_equal(bhl - l0, l2)
+    assert_labels_equal(bhl - l2, l0)
+
+    bhl2 = l1 + bhl
+    assert_labels_equal(bhl2.lh, l01)
+    assert_equal(bhl2.color, _blend_colors(l1.color, bhl.color))
+    assert_array_equal((l2 + bhl).rh.vertices, bhl.rh.vertices)  # rh label
+    assert_array_equal((bhl + bhl).lh.vertices, bhl.lh.vertices)
+    assert_raises(TypeError, bhl.__add__, 5)
+
+    # subtraction
+    bhl_ = bhl2 - l1
+    assert_labels_equal(bhl_.lh, bhl.lh, comment=False, color=False)
+    assert_labels_equal(bhl_.rh, bhl.rh)
+    assert_labels_equal(bhl2 - l2, l0 + l1)
+    assert_labels_equal(bhl2 - l1 - l0, l2)
+    bhl_ = bhl2 - bhl2
+    assert_array_equal(bhl_.vertices, [])
+
+
+ at testing.requires_testing_data
+def test_label_in_src():
+    """Test label in src"""
+    src = read_source_spaces(src_fname)
+    label = read_label(v1_label_fname)
+
+    # construct label from source space vertices
+    vert_in_src = np.intersect1d(label.vertices, src[0]['vertno'], True)
+    where = in1d(label.vertices, vert_in_src)
+    pos_in_src = label.pos[where]
+    values_in_src = label.values[where]
+    label_src = Label(vert_in_src, pos_in_src, values_in_src,
+                      hemi='lh').fill(src)
+
+    # check label vertices
+    vertices_status = in1d(src[0]['nearest'], label.vertices)
+    vertices_in = np.nonzero(vertices_status)[0]
+    vertices_out = np.nonzero(np.logical_not(vertices_status))[0]
+    assert_array_equal(label_src.vertices, vertices_in)
+    assert_array_equal(in1d(vertices_out, label_src.vertices), False)
+
+    # check values
+    value_idx = digitize(src[0]['nearest'][vertices_in], vert_in_src, True)
+    assert_array_equal(label_src.values, values_in_src[value_idx])
+
+    # test exception
+    vertices = np.append([-1], vert_in_src)
+    assert_raises(ValueError, Label(vertices, hemi='lh').fill, src)
+
+
+ at testing.requires_testing_data
+def test_label_io_and_time_course_estimates():
+    """Test IO for label + stc files
+    """
+    stc = read_source_estimate(stc_fname)
+    label = read_label(real_label_fname)
+    stc_label = stc.in_label(label)
+
+    assert_true(len(stc_label.times) == stc_label.data.shape[1])
+    assert_true(len(stc_label.vertices[0]) == stc_label.data.shape[0])
+
+
+ at testing.requires_testing_data
+def test_label_io():
+    """Test IO of label files
+    """
+    tempdir = _TempDir()
+    label = read_label(label_fname)
+
+    # label attributes
+    assert_equal(label.name, 'test-lh')
+    assert_is(label.subject, None)
+    assert_is(label.color, None)
+
+    # save and reload
+    label.save(op.join(tempdir, 'foo'))
+    label2 = read_label(op.join(tempdir, 'foo-lh.label'))
+    assert_labels_equal(label, label2)
+
+    # pickling
+    dest = op.join(tempdir, 'foo.pickled')
+    with open(dest, 'wb') as fid:
+        pickle.dump(label, fid, pickle.HIGHEST_PROTOCOL)
+    with open(dest, 'rb') as fid:
+        label2 = pickle.load(fid)
+    assert_labels_equal(label, label2)
+
+
+def _assert_labels_equal(labels_a, labels_b, ignore_pos=False):
+    """Make sure two sets of labels are equal"""
+    for label_a, label_b in zip(labels_a, labels_b):
+        assert_array_equal(label_a.vertices, label_b.vertices)
+        assert_true(label_a.name == label_b.name)
+        assert_true(label_a.hemi == label_b.hemi)
+        if not ignore_pos:
+            assert_array_equal(label_a.pos, label_b.pos)
+
+
+ at testing.requires_testing_data
+def test_annot_io():
+    """Test I/O from and to *.annot files"""
+    # copy necessary files from fsaverage to tempdir
+    tempdir = _TempDir()
+    subject = 'fsaverage'
+    label_src = os.path.join(subjects_dir, 'fsaverage', 'label')
+    surf_src = os.path.join(subjects_dir, 'fsaverage', 'surf')
+    label_dir = os.path.join(tempdir, subject, 'label')
+    surf_dir = os.path.join(tempdir, subject, 'surf')
+    os.makedirs(label_dir)
+    os.mkdir(surf_dir)
+    shutil.copy(os.path.join(label_src, 'lh.PALS_B12_Lobes.annot'), label_dir)
+    shutil.copy(os.path.join(label_src, 'rh.PALS_B12_Lobes.annot'), label_dir)
+    shutil.copy(os.path.join(surf_src, 'lh.white'), surf_dir)
+    shutil.copy(os.path.join(surf_src, 'rh.white'), surf_dir)
+
+    # read original labels
+    assert_raises(IOError, read_labels_from_annot, subject, 'PALS_B12_Lobesey',
+                  subjects_dir=tempdir)
+    labels = read_labels_from_annot(subject, 'PALS_B12_Lobes',
+                                    subjects_dir=tempdir)
+
+    # test saving parcellation only covering one hemisphere
+    parc = [l for l in labels if l.name == 'LOBE.TEMPORAL-lh']
+    write_labels_to_annot(parc, subject, 'myparc', subjects_dir=tempdir)
+    parc1 = read_labels_from_annot(subject, 'myparc', subjects_dir=tempdir)
+    parc1 = [l for l in parc1 if not l.name.startswith('unknown')]
+    assert_equal(len(parc1), len(parc))
+    for l1, l in zip(parc1, parc):
+        assert_labels_equal(l1, l)
+
+    # test saving only one hemisphere
+    parc = [l for l in labels if l.name.startswith('LOBE')]
+    write_labels_to_annot(parc, subject, 'myparc2', hemi='lh',
+                          subjects_dir=tempdir)
+    annot_fname = os.path.join(tempdir, subject, 'label', '%sh.myparc2.annot')
+    assert_true(os.path.isfile(annot_fname % 'l'))
+    assert_false(os.path.isfile(annot_fname % 'r'))
+    parc1 = read_labels_from_annot(subject, 'myparc2',
+                                   annot_fname=annot_fname % 'l',
+                                   subjects_dir=tempdir)
+    parc_lh = [l for l in parc if l.name.endswith('lh')]
+    for l1, l in zip(parc1, parc_lh):
+        assert_labels_equal(l1, l)
+
+
+ at testing.requires_testing_data
+def test_read_labels_from_annot():
+    """Test reading labels from FreeSurfer parcellation
+    """
+    # test some invalid inputs
+    assert_raises(ValueError, read_labels_from_annot, 'sample', hemi='bla',
+                  subjects_dir=subjects_dir)
+    assert_raises(ValueError, read_labels_from_annot, 'sample',
+                  annot_fname='bla.annot', subjects_dir=subjects_dir)
+
+    # read labels using hemi specification
+    labels_lh = read_labels_from_annot('sample', hemi='lh',
+                                       subjects_dir=subjects_dir)
+    for label in labels_lh:
+        assert_true(label.name.endswith('-lh'))
+        assert_true(label.hemi == 'lh')
+        # XXX fails on 2.6 for some reason...
+        if sys.version_info[:2] > (2, 6):
+            assert_is_not(label.color, None)
+
+    # read labels using annot_fname
+    annot_fname = op.join(subjects_dir, 'sample', 'label', 'rh.aparc.annot')
+    labels_rh = read_labels_from_annot('sample', annot_fname=annot_fname,
+                                       subjects_dir=subjects_dir)
+    for label in labels_rh:
+        assert_true(label.name.endswith('-rh'))
+        assert_true(label.hemi == 'rh')
+        assert_is_not(label.color, None)
+
+    # combine the lh, rh, labels and sort them
+    labels_lhrh = list()
+    labels_lhrh.extend(labels_lh)
+    labels_lhrh.extend(labels_rh)
+
+    names = [label.name for label in labels_lhrh]
+    labels_lhrh = [label for (name, label) in sorted(zip(names, labels_lhrh))]
+
+    # read all labels at once
+    labels_both = read_labels_from_annot('sample', subjects_dir=subjects_dir)
+
+    # we have the same result
+    _assert_labels_equal(labels_lhrh, labels_both)
+
+    # aparc has 68 cortical labels
+    assert_true(len(labels_both) == 68)
+
+    # test regexp
+    label = read_labels_from_annot('sample', parc='aparc.a2009s',
+                                   regexp='Angu', subjects_dir=subjects_dir)[0]
+    assert_true(label.name == 'G_pariet_inf-Angular-lh')
+    # silly, but real regexp:
+    label = read_labels_from_annot('sample', 'aparc.a2009s',
+                                   regexp='.*-.{4,}_.{3,3}-L',
+                                   subjects_dir=subjects_dir)[0]
+    assert_true(label.name == 'G_oc-temp_med-Lingual-lh')
+    assert_raises(RuntimeError, read_labels_from_annot, 'sample', parc='aparc',
+                  annot_fname=annot_fname, regexp='JackTheRipper',
+                  subjects_dir=subjects_dir)
+
+
+ at testing.requires_testing_data
+def test_read_labels_from_annot_annot2labels():
+    """Test reading labels from parc. by comparing with mne_annot2labels
+    """
+    label_fnames = glob.glob(label_dir + '/*.label')
+    label_fnames.sort()
+    labels_mne = [read_label(fname) for fname in label_fnames]
+    labels = read_labels_from_annot('sample', subjects_dir=subjects_dir)
+
+    # we have the same result, mne does not fill pos, so ignore it
+    _assert_labels_equal(labels, labels_mne, ignore_pos=True)
+
+
+ at testing.requires_testing_data
+def test_write_labels_to_annot():
+    """Test writing FreeSurfer parcellation from labels"""
+    tempdir = _TempDir()
+
+    labels = read_labels_from_annot('sample', subjects_dir=subjects_dir)
+
+    # create temporary subjects-dir skeleton
+    surf_dir = op.join(subjects_dir, 'sample', 'surf')
+    temp_surf_dir = op.join(tempdir, 'sample', 'surf')
+    os.makedirs(temp_surf_dir)
+    shutil.copy(op.join(surf_dir, 'lh.white'), temp_surf_dir)
+    shutil.copy(op.join(surf_dir, 'rh.white'), temp_surf_dir)
+    os.makedirs(op.join(tempdir, 'sample', 'label'))
+
+    # test automatic filenames
+    dst = op.join(tempdir, 'sample', 'label', '%s.%s.annot')
+    write_labels_to_annot(labels, 'sample', 'test1', subjects_dir=tempdir)
+    assert_true(op.exists(dst % ('lh', 'test1')))
+    assert_true(op.exists(dst % ('rh', 'test1')))
+    # lh only
+    for label in labels:
+        if label.hemi == 'lh':
+            break
+    write_labels_to_annot([label], 'sample', 'test2', subjects_dir=tempdir)
+    assert_true(op.exists(dst % ('lh', 'test2')))
+    assert_true(op.exists(dst % ('rh', 'test2')))
+    # rh only
+    for label in labels:
+        if label.hemi == 'rh':
+            break
+    write_labels_to_annot([label], 'sample', 'test3', subjects_dir=tempdir)
+    assert_true(op.exists(dst % ('lh', 'test3')))
+    assert_true(op.exists(dst % ('rh', 'test3')))
+    # label alone
+    assert_raises(TypeError, write_labels_to_annot, labels[0], 'sample',
+                  'test4', subjects_dir=tempdir)
+
+    # write left and right hemi labels with filenames:
+    fnames = ['%s/%s-myparc' % (tempdir, hemi) for hemi in ['lh', 'rh']]
+    for fname in fnames:
+        write_labels_to_annot(labels, annot_fname=fname)
+
+    # read it back
+    labels2 = read_labels_from_annot('sample', subjects_dir=subjects_dir,
+                                     annot_fname=fnames[0])
+    labels22 = read_labels_from_annot('sample', subjects_dir=subjects_dir,
+                                      annot_fname=fnames[1])
+    labels2.extend(labels22)
+
+    names = [label.name for label in labels2]
+
+    for label in labels:
+        idx = names.index(label.name)
+        assert_labels_equal(label, labels2[idx])
+
+    # same with label-internal colors
+    for fname in fnames:
+        write_labels_to_annot(labels, annot_fname=fname, overwrite=True)
+    labels3 = read_labels_from_annot('sample', subjects_dir=subjects_dir,
+                                     annot_fname=fnames[0])
+    labels33 = read_labels_from_annot('sample', subjects_dir=subjects_dir,
+                                      annot_fname=fnames[1])
+    labels3.extend(labels33)
+    names3 = [label.name for label in labels3]
+    for label in labels:
+        idx = names3.index(label.name)
+        assert_labels_equal(label, labels3[idx])
+
+    # make sure we can't overwrite things
+    assert_raises(ValueError, write_labels_to_annot, labels,
+                  annot_fname=fnames[0])
+
+    # however, this works
+    write_labels_to_annot(labels, annot_fname=fnames[0], overwrite=True)
+
+    # label without color
+    labels_ = labels[:]
+    labels_[0] = labels_[0].copy()
+    labels_[0].color = None
+    write_labels_to_annot(labels_, annot_fname=fnames[0], overwrite=True)
+
+    # duplicate color
+    labels_[0].color = labels_[2].color
+    assert_raises(ValueError, write_labels_to_annot, labels_,
+                  annot_fname=fnames[0], overwrite=True)
+
+    # invalid color inputs
+    labels_[0].color = (1.1, 1., 1., 1.)
+    assert_raises(ValueError, write_labels_to_annot, labels_,
+                  annot_fname=fnames[0], overwrite=True)
+
+    # overlapping labels
+    labels_ = labels[:]
+    cuneus_lh = labels[6]
+    precuneus_lh = labels[50]
+    labels_.append(precuneus_lh + cuneus_lh)
+    assert_raises(ValueError, write_labels_to_annot, labels_,
+                  annot_fname=fnames[0], overwrite=True)
+
+    # unlabeled vertices
+    labels_lh = [label for label in labels if label.name.endswith('lh')]
+    write_labels_to_annot(labels_lh[1:], 'sample', annot_fname=fnames[0],
+                          overwrite=True, subjects_dir=subjects_dir)
+    labels_reloaded = read_labels_from_annot('sample', annot_fname=fnames[0],
+                                             subjects_dir=subjects_dir)
+    assert_equal(len(labels_lh), len(labels_reloaded))
+    label0 = labels_lh[0]
+    label1 = labels_reloaded[-1]
+    assert_equal(label1.name, "unknown-lh")
+    assert_true(np.all(in1d(label0.vertices, label1.vertices)))
+
+    # unnamed labels
+    labels4 = labels[:]
+    labels4[0].name = None
+    assert_raises(ValueError, write_labels_to_annot, labels4,
+                  annot_fname=fnames[0])
+
+
+ at testing.requires_testing_data
+def test_split_label():
+    """Test splitting labels"""
+    aparc = read_labels_from_annot('fsaverage', 'aparc', 'lh',
+                                   regexp='lingual', subjects_dir=subjects_dir)
+    lingual = aparc[0]
+
+    # split with names
+    parts = ('lingual_post', 'lingual_ant')
+    post, ant = split_label(lingual, parts, subjects_dir=subjects_dir)
+
+    # check output names
+    assert_equal(post.name, parts[0])
+    assert_equal(ant.name, parts[1])
+
+    # check vertices add up
+    lingual_reconst = post + ant
+    lingual_reconst.name = lingual.name
+    lingual_reconst.comment = lingual.comment
+    lingual_reconst.color = lingual.color
+    assert_labels_equal(lingual_reconst, lingual)
+
+    # compare output of Label.split() method
+    post1, ant1 = lingual.split(parts, subjects_dir=subjects_dir)
+    assert_labels_equal(post1, post)
+    assert_labels_equal(ant1, ant)
+
+    # compare fs_like split with freesurfer split
+    antmost = split_label(lingual, 40, None, subjects_dir, True)[-1]
+    fs_vert = [210, 4401, 7405, 12079, 16276, 18956, 26356, 32713, 32716,
+               32719, 36047, 36050, 42797, 42798, 42799, 59281, 59282, 59283,
+               71864, 71865, 71866, 71874, 71883, 79901, 79903, 79910, 103024,
+               107849, 107850, 122928, 139356, 139357, 139373, 139374, 139375,
+               139376, 139377, 139378, 139381, 149117, 149118, 149120, 149127]
+    assert_array_equal(antmost.vertices, fs_vert)
+
+    # check default label name
+    assert_equal(antmost.name, "lingual_div40-lh")
+
+
+ at slow_test
+ at testing.requires_testing_data
+ at requires_sklearn
+def test_stc_to_label():
+    """Test stc_to_label
+    """
+    with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter('always')
+        src = read_source_spaces(fwd_fname)
+    src_bad = read_source_spaces(src_bad_fname)
+    stc = read_source_estimate(stc_fname, 'sample')
+    os.environ['SUBJECTS_DIR'] = op.join(data_path, 'subjects')
+    labels1 = _stc_to_label(stc, src='sample', smooth=3)
+    labels2 = _stc_to_label(stc, src=src, smooth=3)
+    assert_equal(len(labels1), len(labels2))
+    for l1, l2 in zip(labels1, labels2):
+        assert_labels_equal(l1, l2, decimal=4)
+
+    with warnings.catch_warnings(record=True) as w:  # connectedness warning
+        warnings.simplefilter('always')
+        labels_lh, labels_rh = stc_to_label(stc, src=src, smooth=True,
+                                            connected=True)
+
+    assert_true(len(w) > 0)
+    assert_raises(ValueError, stc_to_label, stc, 'sample', smooth=True,
+                  connected=True)
+    assert_raises(RuntimeError, stc_to_label, stc, smooth=True, src=src_bad,
+                  connected=True)
+    assert_equal(len(labels_lh), 1)
+    assert_equal(len(labels_rh), 1)
+
+    # test getting tris
+    tris = labels_lh[0].get_tris(src[0]['use_tris'], vertices=stc.vertices[0])
+    assert_raises(ValueError, spatial_tris_connectivity, tris,
+                  remap_vertices=False)
+    connectivity = spatial_tris_connectivity(tris, remap_vertices=True)
+    assert_true(connectivity.shape[0] == len(stc.vertices[0]))
+
+    # "src" as a subject name
+    assert_raises(TypeError, stc_to_label, stc, src=1, smooth=False,
+                  connected=False, subjects_dir=subjects_dir)
+    assert_raises(ValueError, stc_to_label, stc, src=SourceSpaces([src[0]]),
+                  smooth=False, connected=False, subjects_dir=subjects_dir)
+    assert_raises(ValueError, stc_to_label, stc, src='sample', smooth=False,
+                  connected=True, subjects_dir=subjects_dir)
+    assert_raises(ValueError, stc_to_label, stc, src='sample', smooth=True,
+                  connected=False, subjects_dir=subjects_dir)
+    labels_lh, labels_rh = stc_to_label(stc, src='sample', smooth=False,
+                                        connected=False,
+                                        subjects_dir=subjects_dir)
+    assert_true(len(labels_lh) > 1)
+    assert_true(len(labels_rh) > 1)
+
+    # with smooth='patch'
+    with warnings.catch_warnings(record=True) as w:  # connectedness warning
+        warnings.simplefilter('always')
+        labels_patch = stc_to_label(stc, src=src, smooth=True)
+    assert_equal(len(w), 1)
+    assert_equal(len(labels_patch), len(labels1))
+    for l1, l2 in zip(labels1, labels2):
+        assert_labels_equal(l1, l2, decimal=4)
+
+
+ at slow_test
+ at testing.requires_testing_data
+def test_morph():
+    """Test inter-subject label morphing
+    """
+    label_orig = read_label(real_label_fname)
+    label_orig.subject = 'sample'
+    # should work for specifying vertices for both hemis, or just the
+    # hemi of the given label
+    vals = list()
+    for grade in [5, [np.arange(10242), np.arange(10242)], np.arange(10242)]:
+        label = label_orig.copy()
+        # this should throw an error because the label has all zero values
+        assert_raises(ValueError, label.morph, 'sample', 'fsaverage')
+        label.values.fill(1)
+        label.morph(None, 'fsaverage', 5, grade, subjects_dir, 1,
+                    copy=False)
+        label.morph('fsaverage', 'sample', 5, None, subjects_dir, 2,
+                    copy=False)
+        assert_true(np.mean(in1d(label_orig.vertices, label.vertices)) == 1.0)
+        assert_true(len(label.vertices) < 3 * len(label_orig.vertices))
+        vals.append(label.vertices)
+    assert_array_equal(vals[0], vals[1])
+    # make sure label smoothing can run
+    assert_equal(label.subject, 'sample')
+    verts = [np.arange(10242), np.arange(10242)]
+    for hemi in ['lh', 'rh']:
+        label.hemi = hemi
+        label.morph(None, 'fsaverage', 5, verts, subjects_dir, 2)
+    assert_raises(TypeError, label.morph, None, 1, 5, verts,
+                  subjects_dir, 2)
+    assert_raises(TypeError, label.morph, None, 'fsaverage', 5.5, verts,
+                  subjects_dir, 2)
+    label.smooth(subjects_dir=subjects_dir)  # make sure this runs
+
+
+ at testing.requires_testing_data
+def test_grow_labels():
+    """Test generation of circular source labels"""
+    seeds = [0, 50000]
+    # these were chosen manually in mne_analyze
+    should_be_in = [[49, 227], [51207, 48794]]
+    hemis = [0, 1]
+    names = ['aneurism', 'tumor']
+    labels = grow_labels('sample', seeds, 3, hemis, subjects_dir, names=names)
+
+    tgt_names = ['aneurism-lh', 'tumor-rh']
+    tgt_hemis = ['lh', 'rh']
+    for label, seed, hemi, sh, name in zip(labels, seeds, tgt_hemis,
+                                           should_be_in, tgt_names):
+        assert_true(np.any(label.vertices == seed))
+        assert_true(np.all(in1d(sh, label.vertices)))
+        assert_equal(label.hemi, hemi)
+        assert_equal(label.name, name)
+
+    # grow labels with and without overlap
+    seeds = [57532, [58887, 6304]]
+    l01, l02 = grow_labels('fsaverage', seeds, 20, [0, 0], subjects_dir)
+    seeds = [57532, [58887, 6304]]
+    l11, l12 = grow_labels('fsaverage', seeds, 20, [0, 0], subjects_dir,
+                           overlap=False)
+
+    # test label naming
+    assert_equal(l01.name, 'Label_0-lh')
+    assert_equal(l02.name, 'Label_1-lh')
+    assert_equal(l11.name, 'Label_0-lh')
+    assert_equal(l12.name, 'Label_1-lh')
+
+    # make sure set 1 does not overlap
+    overlap = np.intersect1d(l11.vertices, l12.vertices, True)
+    assert_array_equal(overlap, [])
+
+    # make sure both sets cover the same vertices
+    l0 = l01 + l02
+    l1 = l11 + l12
+    assert_array_equal(l1.vertices, l0.vertices)
+
+
+run_tests_if_main()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/tests/test_misc.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/tests/test_misc.py
new file mode 100644
index 0000000..edf8589
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/tests/test_misc.py
@@ -0,0 +1,14 @@
+import os.path as op
+from nose.tools import assert_true
+
+from mne.misc import parse_config
+
+ave_fname = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data',
+                    'test.ave')
+
+
+def test_parse_ave():
+    """Test parsing of .ave file
+    """
+    conditions = parse_config(ave_fname)
+    assert_true(len(conditions) == 4)
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/tests/test_proj.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/tests/test_proj.py
new file mode 100644
index 0000000..e9af0ed
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/tests/test_proj.py
@@ -0,0 +1,278 @@
+import os.path as op
+from nose.tools import assert_true, assert_raises
+import warnings
+
+import numpy as np
+from numpy.testing import (assert_array_almost_equal, assert_allclose,
+                           assert_equal)
+
+import copy as cp
+
+import mne
+from mne.datasets import testing
+from mne import pick_types
+from mne.io import Raw
+from mne import compute_proj_epochs, compute_proj_evoked, compute_proj_raw
+from mne.io.proj import (make_projector, activate_proj,
+                         _needs_eeg_average_ref_proj)
+from mne.proj import (read_proj, write_proj, make_eeg_average_ref_proj,
+                      _has_eeg_average_ref_proj)
+from mne import read_events, Epochs, sensitivity_map, read_source_estimate
+from mne.utils import (_TempDir, run_tests_if_main, clean_warning_registry,
+                       slow_test)
+
+warnings.simplefilter('always')  # enable b/c these tests throw warnings
+
+base_dir = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data')
+raw_fname = op.join(base_dir, 'test_raw.fif')
+event_fname = op.join(base_dir, 'test-eve.fif')
+proj_fname = op.join(base_dir, 'test-proj.fif')
+proj_gz_fname = op.join(base_dir, 'test-proj.fif.gz')
+bads_fname = op.join(base_dir, 'test_bads.txt')
+
+sample_path = op.join(testing.data_path(download=False), 'MEG', 'sample')
+fwd_fname = op.join(sample_path, 'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif')
+sensmap_fname = op.join(sample_path,
+                        'sample_audvis_trunc-%s-oct-4-fwd-sensmap-%s.w')
+
+# sample dataset should be updated to reflect mne conventions
+eog_fname = op.join(sample_path, 'sample_audvis_eog_proj.fif')
+
+
+ at testing.requires_testing_data
+def test_sensitivity_maps():
+    """Test sensitivity map computation"""
+    fwd = mne.read_forward_solution(fwd_fname, surf_ori=True)
+    with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter('always')
+        proj_eog = read_proj(eog_fname)
+    decim = 6
+    for ch_type in ['eeg', 'grad', 'mag']:
+        w = read_source_estimate(sensmap_fname % (ch_type, 'lh')).data
+        stc = sensitivity_map(fwd, projs=None, ch_type=ch_type,
+                              mode='free', exclude='bads')
+        assert_array_almost_equal(stc.data, w, decim)
+        assert_true(stc.subject == 'sample')
+        # let's just make sure the others run
+        if ch_type == 'grad':
+            # fixed (2)
+            w = read_source_estimate(sensmap_fname % (ch_type, '2-lh')).data
+            stc = sensitivity_map(fwd, projs=None, mode='fixed',
+                                  ch_type=ch_type, exclude='bads')
+            assert_array_almost_equal(stc.data, w, decim)
+        if ch_type == 'mag':
+            # ratio (3)
+            w = read_source_estimate(sensmap_fname % (ch_type, '3-lh')).data
+            stc = sensitivity_map(fwd, projs=None, mode='ratio',
+                                  ch_type=ch_type, exclude='bads')
+            assert_array_almost_equal(stc.data, w, decim)
+        if ch_type == 'eeg':
+            # radiality (4), angle (5), remaining (6), and  dampening (7)
+            modes = ['radiality', 'angle', 'remaining', 'dampening']
+            ends = ['4-lh', '5-lh', '6-lh', '7-lh']
+            for mode, end in zip(modes, ends):
+                w = read_source_estimate(sensmap_fname % (ch_type, end)).data
+                stc = sensitivity_map(fwd, projs=proj_eog, mode=mode,
+                                      ch_type=ch_type, exclude='bads')
+                assert_array_almost_equal(stc.data, w, decim)
+
+    # test corner case for EEG
+    stc = sensitivity_map(fwd, projs=[make_eeg_average_ref_proj(fwd['info'])],
+                          ch_type='eeg', exclude='bads')
+    # test volume source space
+    fname = op.join(sample_path, 'sample_audvis_trunc-meg-vol-7-fwd.fif')
+    fwd = mne.read_forward_solution(fname)
+    sensitivity_map(fwd)
+
+
+def test_compute_proj_epochs():
+    """Test SSP computation on epochs"""
+    tempdir = _TempDir()
+    event_id, tmin, tmax = 1, -0.2, 0.3
+
+    raw = Raw(raw_fname, preload=True)
+    events = read_events(event_fname)
+    bad_ch = 'MEG 2443'
+    picks = pick_types(raw.info, meg=True, eeg=False, stim=False, eog=False,
+                       exclude=[])
+    epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                    baseline=None, proj=False)
+
+    evoked = epochs.average()
+    projs = compute_proj_epochs(epochs, n_grad=1, n_mag=1, n_eeg=0, n_jobs=1)
+    write_proj(op.join(tempdir, 'test-proj.fif.gz'), projs)
+    for p_fname in [proj_fname, proj_gz_fname,
+                    op.join(tempdir, 'test-proj.fif.gz')]:
+        projs2 = read_proj(p_fname)
+
+        assert_true(len(projs) == len(projs2))
+
+        for p1, p2 in zip(projs, projs2):
+            assert_true(p1['desc'] == p2['desc'])
+            assert_true(p1['data']['col_names'] == p2['data']['col_names'])
+            assert_true(p1['active'] == p2['active'])
+            # compare with sign invariance
+            p1_data = p1['data']['data'] * np.sign(p1['data']['data'][0, 0])
+            p2_data = p2['data']['data'] * np.sign(p2['data']['data'][0, 0])
+            if bad_ch in p1['data']['col_names']:
+                bad = p1['data']['col_names'].index('MEG 2443')
+                mask = np.ones(p1_data.size, dtype=np.bool)
+                mask[bad] = False
+                p1_data = p1_data[:, mask]
+                p2_data = p2_data[:, mask]
+            corr = np.corrcoef(p1_data, p2_data)[0, 1]
+            assert_array_almost_equal(corr, 1.0, 5)
+
+    # test that you can compute the projection matrix
+    projs = activate_proj(projs)
+    proj, nproj, U = make_projector(projs, epochs.ch_names, bads=[])
+
+    assert_true(nproj == 2)
+    assert_true(U.shape[1] == 2)
+
+    # test that you can save them
+    epochs.info['projs'] += projs
+    evoked = epochs.average()
+    evoked.save(op.join(tempdir, 'foo-ave.fif'))
+
+    projs = read_proj(proj_fname)
+
+    projs_evoked = compute_proj_evoked(evoked, n_grad=1, n_mag=1, n_eeg=0)
+    assert_true(len(projs_evoked) == 2)
+    # XXX : test something
+
+    # test parallelization
+    projs = compute_proj_epochs(epochs, n_grad=1, n_mag=1, n_eeg=0, n_jobs=2,
+                                desc_prefix='foobar')
+    assert_true(all('foobar' in x['desc'] for x in projs))
+    projs = activate_proj(projs)
+    proj_par, _, _ = make_projector(projs, epochs.ch_names, bads=[])
+    assert_allclose(proj, proj_par, rtol=1e-8, atol=1e-16)
+
+    # test warnings on bad filenames
+    clean_warning_registry()
+    with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter('always')
+        proj_badname = op.join(tempdir, 'test-bad-name.fif.gz')
+        write_proj(proj_badname, projs)
+        read_proj(proj_badname)
+        print([ww.message for ww in w])
+    assert_equal(len(w), 2)
+
+
+ at slow_test
+def test_compute_proj_raw():
+    """Test SSP computation on raw"""
+    tempdir = _TempDir()
+    # Test that the raw projectors work
+    raw_time = 2.5  # Do shorter amount for speed
+    raw = Raw(raw_fname).crop(0, raw_time, False)
+    raw.load_data()
+    for ii in (0.25, 0.5, 1, 2):
+        with warnings.catch_warnings(record=True) as w:
+            warnings.simplefilter('always')
+            projs = compute_proj_raw(raw, duration=ii - 0.1, stop=raw_time,
+                                     n_grad=1, n_mag=1, n_eeg=0)
+            assert_true(len(w) == 1)
+
+        # test that you can compute the projection matrix
+        projs = activate_proj(projs)
+        proj, nproj, U = make_projector(projs, raw.ch_names, bads=[])
+
+        assert_true(nproj == 2)
+        assert_true(U.shape[1] == 2)
+
+        # test that you can save them
+        raw.info['projs'] += projs
+        raw.save(op.join(tempdir, 'foo_%d_raw.fif' % ii), overwrite=True)
+
+    # Test that purely continuous (no duration) raw projection works
+    with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter('always')
+        projs = compute_proj_raw(raw, duration=None, stop=raw_time,
+                                 n_grad=1, n_mag=1, n_eeg=0)
+        assert_equal(len(w), 1)
+
+    # test that you can compute the projection matrix
+    projs = activate_proj(projs)
+    proj, nproj, U = make_projector(projs, raw.ch_names, bads=[])
+
+    assert_true(nproj == 2)
+    assert_true(U.shape[1] == 2)
+
+    # test that you can save them
+    raw.info['projs'] += projs
+    raw.save(op.join(tempdir, 'foo_rawproj_continuous_raw.fif'))
+
+    # test resampled-data projector, upsampling instead of downsampling
+    # here to save an extra filtering (raw would have to be LP'ed to be equiv)
+    raw_resamp = cp.deepcopy(raw)
+    raw_resamp.resample(raw.info['sfreq'] * 2, n_jobs=2)
+    with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter('always')
+        projs = compute_proj_raw(raw_resamp, duration=None, stop=raw_time,
+                                 n_grad=1, n_mag=1, n_eeg=0)
+    projs = activate_proj(projs)
+    proj_new, _, _ = make_projector(projs, raw.ch_names, bads=[])
+    assert_array_almost_equal(proj_new, proj, 4)
+
+    # test with bads
+    raw.load_bad_channels(bads_fname)  # adds 2 bad mag channels
+    with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter('always')
+        projs = compute_proj_raw(raw, n_grad=0, n_mag=0, n_eeg=1)
+
+    # test that bad channels can be excluded
+    proj, nproj, U = make_projector(projs, raw.ch_names,
+                                    bads=raw.ch_names)
+    assert_array_almost_equal(proj, np.eye(len(raw.ch_names)))
+
+
+def test_make_eeg_average_ref_proj():
+    """Test EEG average reference projection"""
+    raw = Raw(raw_fname, add_eeg_ref=False, preload=True)
+    eeg = mne.pick_types(raw.info, meg=False, eeg=True)
+
+    # No average EEG reference
+    assert_true(not np.all(raw._data[eeg].mean(axis=0) < 1e-19))
+
+    # Apply average EEG reference
+    car = make_eeg_average_ref_proj(raw.info)
+    reref = raw.copy()
+    reref.add_proj(car)
+    reref.apply_proj()
+    assert_array_almost_equal(reref._data[eeg].mean(axis=0), 0, decimal=19)
+
+    # Error when custom reference has already been applied
+    raw.info['custom_ref_applied'] = True
+    assert_raises(RuntimeError, make_eeg_average_ref_proj, raw.info)
+
+
+def test_has_eeg_average_ref_proj():
+    """Test checking whether an EEG average reference exists"""
+    assert_true(not _has_eeg_average_ref_proj([]))
+
+    raw = Raw(raw_fname, add_eeg_ref=True, preload=False)
+    assert_true(_has_eeg_average_ref_proj(raw.info['projs']))
+
+
+def test_needs_eeg_average_ref_proj():
+    """Test checking whether a recording needs an EEG average reference"""
+    raw = Raw(raw_fname, add_eeg_ref=False, preload=False)
+    assert_true(_needs_eeg_average_ref_proj(raw.info))
+
+    raw = Raw(raw_fname, add_eeg_ref=True, preload=False)
+    assert_true(not _needs_eeg_average_ref_proj(raw.info))
+
+    # No EEG channels
+    raw = Raw(raw_fname, add_eeg_ref=False, preload=True)
+    eeg = [raw.ch_names[c] for c in pick_types(raw.info, meg=False, eeg=True)]
+    raw.drop_channels(eeg)
+    assert_true(not _needs_eeg_average_ref_proj(raw.info))
+
+    # Custom ref flag set
+    raw = Raw(raw_fname, add_eeg_ref=False, preload=False)
+    raw.info['custom_ref_applied'] = True
+    assert_true(not _needs_eeg_average_ref_proj(raw.info))
+
+run_tests_if_main()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/tests/test_report.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/tests/test_report.py
new file mode 100644
index 0000000..e708d82
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/tests/test_report.py
@@ -0,0 +1,266 @@
+# Authors: Mainak Jas <mainak at neuro.hut.fi>
+#          Teon Brooks <teon.brooks at gmail.com>
+#
+# License: BSD (3-clause)
+import os
+import os.path as op
+import glob
+import warnings
+import shutil
+
+from nose.tools import assert_true, assert_equal, assert_raises
+
+from mne import Epochs, read_events, pick_types, read_evokeds
+from mne.io import Raw
+from mne.datasets import testing
+from mne.report import Report
+from mne.utils import (_TempDir, requires_mayavi, requires_nibabel,
+                       requires_PIL, run_tests_if_main, slow_test)
+from mne.viz import plot_trans
+
+import matplotlib
+matplotlib.use('Agg')  # for testing don't use X server
+
+data_dir = testing.data_path(download=False)
+subjects_dir = op.join(data_dir, 'subjects')
+report_dir = op.join(data_dir, 'MEG', 'sample')
+raw_fname = op.join(report_dir, 'sample_audvis_trunc_raw.fif')
+event_fname = op.join(report_dir, 'sample_audvis_trunc_raw-eve.fif')
+cov_fname = op.join(report_dir, 'sample_audvis_trunc-cov.fif')
+fwd_fname = op.join(report_dir, 'sample_audvis_trunc-meg-eeg-oct-6-fwd.fif')
+trans_fname = op.join(report_dir, 'sample_audvis_trunc-trans.fif')
+inv_fname = op.join(report_dir,
+                    'sample_audvis_trunc-meg-eeg-oct-6-meg-inv.fif')
+mri_fname = op.join(subjects_dir, 'sample', 'mri', 'T1.mgz')
+
+base_dir = op.realpath(op.join(op.dirname(__file__), '..', 'io', 'tests',
+                               'data'))
+evoked_fname = op.join(base_dir, 'test-ave.fif')
+
+# Set our plotters to test mode
+
+warnings.simplefilter('always')  # enable b/c these tests throw warnings
+
+
+ at slow_test
+ at testing.requires_testing_data
+ at requires_PIL
+def test_render_report():
+    """Test rendering -*.fif files for mne report.
+    """
+    tempdir = _TempDir()
+    raw_fname_new = op.join(tempdir, 'temp_raw.fif')
+    event_fname_new = op.join(tempdir, 'temp_raw-eve.fif')
+    cov_fname_new = op.join(tempdir, 'temp_raw-cov.fif')
+    fwd_fname_new = op.join(tempdir, 'temp_raw-fwd.fif')
+    inv_fname_new = op.join(tempdir, 'temp_raw-inv.fif')
+    for a, b in [[raw_fname, raw_fname_new],
+                 [event_fname, event_fname_new],
+                 [cov_fname, cov_fname_new],
+                 [fwd_fname, fwd_fname_new],
+                 [inv_fname, inv_fname_new]]:
+        shutil.copyfile(a, b)
+
+    # create and add -epo.fif and -ave.fif files
+    epochs_fname = op.join(tempdir, 'temp-epo.fif')
+    evoked_fname = op.join(tempdir, 'temp-ave.fif')
+    raw = Raw(raw_fname_new)
+    picks = pick_types(raw.info, meg='mag', eeg=False)  # faster with one type
+    epochs = Epochs(raw, read_events(event_fname), 1, -0.2, 0.2, picks=picks)
+    epochs.save(epochs_fname)
+    epochs.average().save(evoked_fname)
+
+    report = Report(info_fname=raw_fname_new, subjects_dir=subjects_dir)
+    with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter('always')
+        report.parse_folder(data_path=tempdir, on_error='raise')
+    assert_true(len(w) >= 1)
+
+    # Check correct paths and filenames
+    fnames = glob.glob(op.join(tempdir, '*.fif'))
+    for fname in fnames:
+        assert_true(op.basename(fname) in
+                    [op.basename(x) for x in report.fnames])
+        assert_true(''.join(report.html).find(op.basename(fname)) != -1)
+
+    assert_equal(len(report.fnames), len(fnames))
+    assert_equal(len(report.html), len(report.fnames))
+
+    # Check saving functionality
+    report.data_path = tempdir
+    report.save(fname=op.join(tempdir, 'report.html'), open_browser=False)
+    assert_true(op.isfile(op.join(tempdir, 'report.html')))
+
+    assert_equal(len(report.html), len(fnames))
+    assert_equal(len(report.html), len(report.fnames))
+
+    # Check saving same report to new filename
+    report.save(fname=op.join(tempdir, 'report2.html'), open_browser=False)
+    assert_true(op.isfile(op.join(tempdir, 'report2.html')))
+
+    # Check overwriting file
+    report.save(fname=op.join(tempdir, 'report.html'), open_browser=False,
+                overwrite=True)
+    assert_true(op.isfile(op.join(tempdir, 'report.html')))
+
+    # Check pattern matching with multiple patterns
+    pattern = ['*raw.fif', '*eve.fif']
+    with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter('always')
+        report.parse_folder(data_path=tempdir, pattern=pattern)
+    assert_true(len(w) >= 1)
+
+    fnames = glob.glob(op.join(tempdir, '*.raw')) + \
+        glob.glob(op.join(tempdir, '*.raw'))
+    for fname in fnames:
+        assert_true(op.basename(fname) in
+                    [op.basename(x) for x in report.fnames])
+        assert_true(''.join(report.html).find(op.basename(fname)) != -1)
+
+
+ at testing.requires_testing_data
+ at requires_mayavi
+ at requires_PIL
+def test_render_add_sections():
+    """Test adding figures/images to section.
+    """
+    from PIL import Image
+    tempdir = _TempDir()
+    import matplotlib.pyplot as plt
+    report = Report(subjects_dir=subjects_dir)
+    # Check add_figs_to_section functionality
+    fig = plt.plot([1, 2], [1, 2])[0].figure
+    report.add_figs_to_section(figs=fig,  # test non-list input
+                               captions=['evoked response'], scale=1.2,
+                               image_format='svg')
+    assert_raises(ValueError, report.add_figs_to_section, figs=[fig, fig],
+                  captions='H')
+    assert_raises(ValueError, report.add_figs_to_section, figs=fig,
+                  captions=['foo'], scale=0, image_format='svg')
+    assert_raises(ValueError, report.add_figs_to_section, figs=fig,
+                  captions=['foo'], scale=1e-10, image_format='svg')
+    # need to recreate because calls above change size
+    fig = plt.plot([1, 2], [1, 2])[0].figure
+
+    # Check add_images_to_section with png and then gif
+    img_fname = op.join(tempdir, 'testimage.png')
+    fig.savefig(img_fname)
+    report.add_images_to_section(fnames=[img_fname],
+                                 captions=['evoked response'])
+
+    im = Image.open(img_fname)
+    op.join(tempdir, 'testimage.gif')
+    im.save(img_fname)  # matplotlib does not support gif
+    report.add_images_to_section(fnames=[img_fname],
+                                 captions=['evoked response'])
+
+    assert_raises(ValueError, report.add_images_to_section,
+                  fnames=[img_fname, img_fname], captions='H')
+
+    assert_raises(ValueError, report.add_images_to_section,
+                  fnames=['foobar.xxx'], captions='H')
+
+    evoked = read_evokeds(evoked_fname, condition='Left Auditory',
+                          baseline=(-0.2, 0.0))
+    fig = plot_trans(evoked.info, trans_fname, subject='sample',
+                     subjects_dir=subjects_dir)
+
+    report.add_figs_to_section(figs=fig,  # test non-list input
+                               captions='random image', scale=1.2)
+
+
+ at slow_test
+ at testing.requires_testing_data
+ at requires_mayavi
+ at requires_nibabel()
+def test_render_mri():
+    """Test rendering MRI for mne report.
+    """
+    tempdir = _TempDir()
+    trans_fname_new = op.join(tempdir, 'temp-trans.fif')
+    for a, b in [[trans_fname, trans_fname_new]]:
+        shutil.copyfile(a, b)
+    report = Report(info_fname=raw_fname,
+                    subject='sample', subjects_dir=subjects_dir)
+    with warnings.catch_warnings(record=True):
+        warnings.simplefilter('always')
+        report.parse_folder(data_path=tempdir, mri_decim=30, pattern='*',
+                            n_jobs=2)
+    report.save(op.join(tempdir, 'report.html'), open_browser=False)
+
+
+ at testing.requires_testing_data
+ at requires_nibabel()
+def test_render_mri_without_bem():
+    """Test rendering MRI without BEM for mne report.
+    """
+    tempdir = _TempDir()
+    os.mkdir(op.join(tempdir, 'sample'))
+    os.mkdir(op.join(tempdir, 'sample', 'mri'))
+    shutil.copyfile(mri_fname, op.join(tempdir, 'sample', 'mri', 'T1.mgz'))
+    report = Report(info_fname=raw_fname,
+                    subject='sample', subjects_dir=tempdir)
+    with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter('always')
+        report.parse_folder(tempdir)
+    assert_true(len(w) >= 1)
+    report.save(op.join(tempdir, 'report.html'), open_browser=False)
+
+
+ at testing.requires_testing_data
+ at requires_nibabel()
+def test_add_htmls_to_section():
+    """Test adding html str to mne report.
+    """
+    report = Report(info_fname=raw_fname,
+                    subject='sample', subjects_dir=subjects_dir)
+    html = '<b>MNE-Python is AWESOME</b>'
+    caption, section = 'html', 'html_section'
+    report.add_htmls_to_section(html, caption, section)
+    idx = report._sectionlabels.index('report_' + section)
+    html_compare = report.html[idx]
+    assert_true(html in html_compare)
+
+
+def test_add_slider_to_section():
+    """Test adding a slider with a series of images to mne report.
+    """
+    tempdir = _TempDir()
+    from matplotlib import pyplot as plt
+    report = Report(info_fname=raw_fname,
+                    subject='sample', subjects_dir=subjects_dir)
+    section = 'slider_section'
+    figs = list()
+    figs.append(plt.figure())
+    plt.plot([1, 2, 3])
+    plt.close('all')
+    figs.append(plt.figure())
+    plt.plot([3, 2, 1])
+    plt.close('all')
+    report.add_slider_to_section(figs, section=section)
+    report.save(op.join(tempdir, 'report.html'), open_browser=False)
+
+    assert_raises(NotImplementedError, report.add_slider_to_section,
+                  [figs, figs])
+    assert_raises(ValueError, report.add_slider_to_section, figs, ['wug'])
+    assert_raises(TypeError, report.add_slider_to_section, figs, 'wug')
+
+
+def test_validate_input():
+    report = Report()
+    items = ['a', 'b', 'c']
+    captions = ['Letter A', 'Letter B', 'Letter C']
+    section = 'ABCs'
+    comments = ['First letter of the alphabet.',
+                'Second letter of the alphabet',
+                'Third letter of the alphabet']
+    assert_raises(ValueError, report._validate_input, items, captions[:-1],
+                  section, comments=None)
+    assert_raises(ValueError, report._validate_input, items, captions, section,
+                  comments=comments[:-1])
+    values = report._validate_input(items, captions, section, comments=None)
+    items_new, captions_new, comments_new = values
+    assert_equal(len(comments_new), len(items))
+
+
+run_tests_if_main()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/tests/test_selection.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/tests/test_selection.py
new file mode 100644
index 0000000..4272ed0
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/tests/test_selection.py
@@ -0,0 +1,27 @@
+from mne import read_selection
+
+
+def test_read_selection():
+    """Test reading of selections"""
+    # test one channel for each selection
+    ch_names = ['MEG 2211', 'MEG 0223', 'MEG 1312', 'MEG 0412', 'MEG 1043',
+                'MEG 2042', 'MEG 2032', 'MEG 0522', 'MEG 1031']
+    sel_names = ['Vertex', 'Left-temporal', 'Right-temporal', 'Left-parietal',
+                 'Right-parietal', 'Left-occipital', 'Right-occipital',
+                 'Left-frontal', 'Right-frontal']
+
+    for i, name in enumerate(sel_names):
+        sel = read_selection(name)
+        assert(ch_names[i] in sel)
+
+    # test some combinations
+    all_ch = read_selection(['L', 'R'])
+    left = read_selection('L')
+    right = read_selection('R')
+
+    assert(len(all_ch) == len(left) + len(right))
+    assert(len(set(left).intersection(set(right))) == 0)
+
+    frontal = read_selection('frontal')
+    occipital = read_selection('Right-occipital')
+    assert(len(set(frontal).intersection(set(occipital))) == 0)
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/tests/test_source_estimate.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/tests/test_source_estimate.py
new file mode 100644
index 0000000..6fa9fdd
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/tests/test_source_estimate.py
@@ -0,0 +1,700 @@
+from __future__ import print_function
+import os.path as op
+from nose.tools import assert_true, assert_raises
+import warnings
+from copy import deepcopy
+
+import numpy as np
+from numpy.testing import (assert_array_almost_equal, assert_array_equal,
+                           assert_allclose, assert_equal)
+
+from scipy.fftpack import fft
+
+from mne.datasets import testing
+from mne import (stats, SourceEstimate, VolSourceEstimate, Label,
+                 read_source_spaces, MixedSourceEstimate)
+from mne import read_source_estimate, morph_data, extract_label_time_course
+from mne.source_estimate import (spatio_temporal_tris_connectivity,
+                                 spatio_temporal_src_connectivity,
+                                 compute_morph_matrix, grade_to_vertices,
+                                 grade_to_tris)
+
+from mne.minimum_norm import read_inverse_operator
+from mne.label import read_labels_from_annot, label_sign_flip
+from mne.utils import (_TempDir, requires_pandas, requires_sklearn,
+                       requires_h5py, run_tests_if_main, slow_test)
+
+warnings.simplefilter('always')  # enable b/c these tests throw warnings
+
+data_path = testing.data_path(download=False)
+subjects_dir = op.join(data_path, 'subjects')
+fname_inv = op.join(data_path, 'MEG', 'sample',
+                    'sample_audvis_trunc-meg-eeg-oct-6-meg-inv.fif')
+fname_t1 = op.join(data_path, 'subjects', 'sample', 'mri', 'T1.mgz')
+fname_src = op.join(data_path, 'MEG', 'sample',
+                    'sample_audvis_trunc-meg-eeg-oct-6-fwd.fif')
+fname_stc = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc-meg')
+fname_smorph = op.join(data_path, 'MEG', 'sample',
+                       'sample_audvis_trunc-meg')
+fname_fmorph = op.join(data_path, 'MEG', 'sample',
+                       'fsaverage_audvis_trunc-meg')
+fname_vol = op.join(data_path, 'MEG', 'sample',
+                    'sample_audvis_trunc-grad-vol-7-fwd-sensmap-vol.w')
+fname_vsrc = op.join(data_path, 'MEG', 'sample',
+                     'sample_audvis_trunc-meg-vol-7-fwd.fif')
+
+
+ at slow_test
+ at testing.requires_testing_data
+def test_volume_stc():
+    """Test volume STCs
+    """
+    tempdir = _TempDir()
+    N = 100
+    data = np.arange(N)[:, np.newaxis]
+    datas = [data, data, np.arange(2)[:, np.newaxis]]
+    vertno = np.arange(N)
+    vertnos = [vertno, vertno[:, np.newaxis], np.arange(2)[:, np.newaxis]]
+    vertno_reads = [vertno, vertno, np.arange(2)]
+    for data, vertno, vertno_read in zip(datas, vertnos, vertno_reads):
+        stc = VolSourceEstimate(data, vertno, 0, 1)
+        fname_temp = op.join(tempdir, 'temp-vl.stc')
+        stc_new = stc
+        for _ in range(2):
+            stc_new.save(fname_temp)
+            stc_new = read_source_estimate(fname_temp)
+            assert_true(isinstance(stc_new, VolSourceEstimate))
+            assert_array_equal(vertno_read, stc_new.vertices)
+            assert_array_almost_equal(stc.data, stc_new.data)
+
+    # now let's actually read a MNE-C processed file
+    stc = read_source_estimate(fname_vol, 'sample')
+    assert_true(isinstance(stc, VolSourceEstimate))
+
+    assert_true('sample' in repr(stc))
+    stc_new = stc
+    assert_raises(ValueError, stc.save, fname_vol, ftype='whatever')
+    for _ in range(2):
+        fname_temp = op.join(tempdir, 'temp-vol.w')
+        stc_new.save(fname_temp, ftype='w')
+        stc_new = read_source_estimate(fname_temp)
+        assert_true(isinstance(stc_new, VolSourceEstimate))
+        assert_array_equal(stc.vertices, stc_new.vertices)
+        assert_array_almost_equal(stc.data, stc_new.data)
+
+    # save the stc as a nifti file and export
+    try:
+        import nibabel as nib
+        with warnings.catch_warnings(record=True):
+            warnings.simplefilter('always')
+            src = read_source_spaces(fname_vsrc)
+        vol_fname = op.join(tempdir, 'stc.nii.gz')
+        stc.save_as_volume(vol_fname, src,
+                           dest='surf', mri_resolution=False)
+        with warnings.catch_warnings(record=True):  # nib<->numpy
+            img = nib.load(vol_fname)
+        assert_true(img.shape == src[0]['shape'] + (len(stc.times),))
+
+        with warnings.catch_warnings(record=True):  # nib<->numpy
+            t1_img = nib.load(fname_t1)
+        stc.save_as_volume(op.join(tempdir, 'stc.nii.gz'), src,
+                           dest='mri', mri_resolution=True)
+        with warnings.catch_warnings(record=True):  # nib<->numpy
+            img = nib.load(vol_fname)
+        assert_true(img.shape == t1_img.shape + (len(stc.times),))
+        assert_array_almost_equal(img.get_affine(), t1_img.get_affine(),
+                                  decimal=5)
+
+        # export without saving
+        img = stc.as_volume(src, dest='mri', mri_resolution=True)
+        assert_true(img.shape == t1_img.shape + (len(stc.times),))
+        assert_array_almost_equal(img.get_affine(), t1_img.get_affine(),
+                                  decimal=5)
+
+    except ImportError:
+        print('Save as nifti test skipped, needs NiBabel')
+
+
+ at testing.requires_testing_data
+def test_expand():
+    """Test stc expansion
+    """
+    stc = read_source_estimate(fname_stc, 'sample')
+    assert_true('sample' in repr(stc))
+    labels_lh = read_labels_from_annot('sample', 'aparc', 'lh',
+                                       subjects_dir=subjects_dir)
+    new_label = labels_lh[0] + labels_lh[1]
+    stc_limited = stc.in_label(new_label)
+    stc_new = stc_limited.copy()
+    stc_new.data.fill(0)
+    for label in labels_lh[:2]:
+        stc_new += stc.in_label(label).expand(stc_limited.vertices)
+    assert_raises(TypeError, stc_new.expand, stc_limited.vertices[0])
+    assert_raises(ValueError, stc_new.expand, [stc_limited.vertices[0]])
+    # make sure we can't add unless vertno agree
+    assert_raises(ValueError, stc.__add__, stc.in_label(labels_lh[0]))
+
+
+def _fake_stc(n_time=10):
+    verts = [np.arange(10), np.arange(90)]
+    return SourceEstimate(np.random.rand(100, n_time), verts, 0, 1e-1, 'foo')
+
+
+def test_io_stc():
+    """Test IO for STC files
+    """
+    tempdir = _TempDir()
+    stc = _fake_stc()
+    stc.save(op.join(tempdir, "tmp.stc"))
+    stc2 = read_source_estimate(op.join(tempdir, "tmp.stc"))
+
+    assert_array_almost_equal(stc.data, stc2.data)
+    assert_array_almost_equal(stc.tmin, stc2.tmin)
+    assert_equal(len(stc.vertices), len(stc2.vertices))
+    for v1, v2 in zip(stc.vertices, stc2.vertices):
+        assert_array_almost_equal(v1, v2)
+    assert_array_almost_equal(stc.tstep, stc2.tstep)
+
+
+ at requires_h5py
+def test_io_stc_h5():
+    """Test IO for STC files using HDF5
+    """
+    tempdir = _TempDir()
+    stc = _fake_stc()
+    assert_raises(ValueError, stc.save, op.join(tempdir, 'tmp'), ftype='foo')
+    out_name = op.join(tempdir, 'tmp')
+    stc.save(out_name, ftype='h5')
+    stc3 = read_source_estimate(out_name)
+    stc4 = read_source_estimate(out_name + '-stc.h5')
+    assert_raises(RuntimeError, read_source_estimate, out_name, subject='bar')
+    for stc_new in stc3, stc4:
+        assert_equal(stc_new.subject, stc.subject)
+        assert_array_equal(stc_new.data, stc.data)
+        assert_array_equal(stc_new.tmin, stc.tmin)
+        assert_array_equal(stc_new.tstep, stc.tstep)
+        assert_equal(len(stc_new.vertices), len(stc.vertices))
+        for v1, v2 in zip(stc_new.vertices, stc.vertices):
+            assert_array_equal(v1, v2)
+
+
+def test_io_w():
+    """Test IO for w files
+    """
+    tempdir = _TempDir()
+    stc = _fake_stc(n_time=1)
+    w_fname = op.join(tempdir, 'fake')
+    stc.save(w_fname, ftype='w')
+    src = read_source_estimate(w_fname)
+    src.save(op.join(tempdir, 'tmp'), ftype='w')
+    src2 = read_source_estimate(op.join(tempdir, 'tmp-lh.w'))
+    assert_array_almost_equal(src.data, src2.data)
+    assert_array_almost_equal(src.lh_vertno, src2.lh_vertno)
+    assert_array_almost_equal(src.rh_vertno, src2.rh_vertno)
+
+
+def test_stc_arithmetic():
+    """Test arithmetic for STC files
+    """
+    stc = _fake_stc()
+    data = stc.data.copy()
+
+    out = list()
+    for a in [data, stc]:
+        a = a + a * 3 + 3 * a - a ** 2 / 2
+
+        a += a
+        a -= a
+        with warnings.catch_warnings(record=True):
+            warnings.simplefilter('always')
+            a /= 2 * a
+        a *= -a
+
+        a += 2
+        a -= 1
+        a *= -1
+        a /= 2
+        b = 2 + a
+        b = 2 - a
+        b = +a
+        assert_array_equal(b.data, a.data)
+        with warnings.catch_warnings(record=True):
+            warnings.simplefilter('always')
+            a **= 3
+        out.append(a)
+
+    assert_array_equal(out[0], out[1].data)
+    assert_array_equal(stc.sqrt().data, np.sqrt(stc.data))
+
+    stc_mean = stc.mean()
+    assert_array_equal(stc_mean.data, np.mean(stc.data, 1)[:, None])
+
+
+ at slow_test
+ at testing.requires_testing_data
+def test_stc_methods():
+    """Test stc methods lh_data, rh_data, bin(), center_of_mass(), resample()
+    """
+    stc = read_source_estimate(fname_stc)
+
+    # lh_data / rh_data
+    assert_array_equal(stc.lh_data, stc.data[:len(stc.lh_vertno)])
+    assert_array_equal(stc.rh_data, stc.data[len(stc.lh_vertno):])
+
+    # bin
+    bin = stc.bin(.12)
+    a = np.array((1,), dtype=stc.data.dtype)
+    a[0] = np.mean(stc.data[0, stc.times < .12])
+    assert a[0] == bin.data[0, 0]
+
+    assert_raises(ValueError, stc.center_of_mass, 'sample')
+    stc.lh_data[:] = 0
+    vertex, hemi, t = stc.center_of_mass('sample', subjects_dir=subjects_dir)
+    assert_true(hemi == 1)
+    # XXX Should design a fool-proof test case, but here were the results:
+    assert_equal(vertex, 124791)
+    assert_equal(np.round(t, 2), 0.12)
+
+    stc = read_source_estimate(fname_stc)
+    stc.subject = 'sample'
+    label_lh = read_labels_from_annot('sample', 'aparc', 'lh',
+                                      subjects_dir=subjects_dir)[0]
+    label_rh = read_labels_from_annot('sample', 'aparc', 'rh',
+                                      subjects_dir=subjects_dir)[0]
+    label_both = label_lh + label_rh
+    for label in (label_lh, label_rh, label_both):
+        assert_true(isinstance(stc.shape, tuple) and len(stc.shape) == 2)
+        stc_label = stc.in_label(label)
+        if label.hemi != 'both':
+            if label.hemi == 'lh':
+                verts = stc_label.vertices[0]
+            else:  # label.hemi == 'rh':
+                verts = stc_label.vertices[1]
+            n_vertices_used = len(label.get_vertices_used(verts))
+            assert_equal(len(stc_label.data), n_vertices_used)
+    stc_lh = stc.in_label(label_lh)
+    assert_raises(ValueError, stc_lh.in_label, label_rh)
+    label_lh.subject = 'foo'
+    assert_raises(RuntimeError, stc.in_label, label_lh)
+
+    stc_new = deepcopy(stc)
+    o_sfreq = 1.0 / stc.tstep
+    # note that using no padding for this STC reduces edge ringing...
+    stc_new.resample(2 * o_sfreq, npad=0, n_jobs=2)
+    assert_true(stc_new.data.shape[1] == 2 * stc.data.shape[1])
+    assert_true(stc_new.tstep == stc.tstep / 2)
+    stc_new.resample(o_sfreq, npad=0)
+    assert_true(stc_new.data.shape[1] == stc.data.shape[1])
+    assert_true(stc_new.tstep == stc.tstep)
+    assert_array_almost_equal(stc_new.data, stc.data, 5)
+
+
+ at testing.requires_testing_data
+def test_extract_label_time_course():
+    """Test extraction of label time courses from stc
+    """
+    n_stcs = 3
+    n_times = 50
+
+    src = read_inverse_operator(fname_inv)['src']
+    vertices = [src[0]['vertno'], src[1]['vertno']]
+    n_verts = len(vertices[0]) + len(vertices[1])
+
+    # get some labels
+    labels_lh = read_labels_from_annot('sample', hemi='lh',
+                                       subjects_dir=subjects_dir)
+    labels_rh = read_labels_from_annot('sample', hemi='rh',
+                                       subjects_dir=subjects_dir)
+    labels = list()
+    labels.extend(labels_lh[:5])
+    labels.extend(labels_rh[:4])
+
+    n_labels = len(labels)
+
+    label_means = np.arange(n_labels)[:, None] * np.ones((n_labels, n_times))
+    label_maxs = np.arange(n_labels)[:, None] * np.ones((n_labels, n_times))
+
+    # compute the mean with sign flip
+    label_means_flipped = np.zeros_like(label_means)
+    for i, label in enumerate(labels):
+        label_means_flipped[i] = i * np.mean(label_sign_flip(label, src))
+
+    # generate some stc's with known data
+    stcs = list()
+    for i in range(n_stcs):
+        data = np.zeros((n_verts, n_times))
+        # set the value of the stc within each label
+        for j, label in enumerate(labels):
+            if label.hemi == 'lh':
+                idx = np.intersect1d(vertices[0], label.vertices)
+                idx = np.searchsorted(vertices[0], idx)
+            elif label.hemi == 'rh':
+                idx = np.intersect1d(vertices[1], label.vertices)
+                idx = len(vertices[0]) + np.searchsorted(vertices[1], idx)
+            data[idx] = label_means[j]
+
+        this_stc = SourceEstimate(data, vertices, 0, 1)
+        stcs.append(this_stc)
+
+    # test some invalid inputs
+    assert_raises(ValueError, extract_label_time_course, stcs, labels,
+                  src, mode='notamode')
+
+    # have an empty label
+    empty_label = labels[0].copy()
+    empty_label.vertices += 1000000
+    assert_raises(ValueError, extract_label_time_course, stcs, empty_label,
+                  src, mode='mean')
+
+    # but this works:
+    tc = extract_label_time_course(stcs, empty_label, src, mode='mean',
+                                   allow_empty=True)
+    for arr in tc:
+        assert_true(arr.shape == (1, n_times))
+        assert_array_equal(arr, np.zeros((1, n_times)))
+
+    # test the different modes
+    modes = ['mean', 'mean_flip', 'pca_flip', 'max']
+
+    for mode in modes:
+        label_tc = extract_label_time_course(stcs, labels, src, mode=mode)
+        label_tc_method = [stc.extract_label_time_course(labels, src,
+                           mode=mode) for stc in stcs]
+        assert_true(len(label_tc) == n_stcs)
+        assert_true(len(label_tc_method) == n_stcs)
+        for tc1, tc2 in zip(label_tc, label_tc_method):
+            assert_true(tc1.shape == (n_labels, n_times))
+            assert_true(tc2.shape == (n_labels, n_times))
+            assert_true(np.allclose(tc1, tc2, rtol=1e-8, atol=1e-16))
+            if mode == 'mean':
+                assert_array_almost_equal(tc1, label_means)
+            if mode == 'mean_flip':
+                assert_array_almost_equal(tc1, label_means_flipped)
+            if mode == 'max':
+                assert_array_almost_equal(tc1, label_maxs)
+
+    # test label with very few vertices (check SVD conditionals)
+    label = Label(vertices=src[0]['vertno'][:2], hemi='lh')
+    x = label_sign_flip(label, src)
+    assert_true(len(x) == 2)
+    label = Label(vertices=[], hemi='lh')
+    x = label_sign_flip(label, src)
+    assert_true(x.size == 0)
+
+
+ at slow_test
+ at testing.requires_testing_data
+def test_morph_data():
+    """Test morphing of data
+    """
+    tempdir = _TempDir()
+    subject_from = 'sample'
+    subject_to = 'fsaverage'
+    stc_from = read_source_estimate(fname_smorph, subject='sample')
+    stc_to = read_source_estimate(fname_fmorph)
+    # make sure we can specify grade
+    stc_from.crop(0.09, 0.1)  # for faster computation
+    stc_to.crop(0.09, 0.1)  # for faster computation
+    assert_raises(ValueError, stc_from.morph, subject_to, grade=3, smooth=-1,
+                  subjects_dir=subjects_dir)
+    stc_to1 = stc_from.morph(subject_to, grade=3, smooth=12, buffer_size=1000,
+                             subjects_dir=subjects_dir)
+    stc_to1.save(op.join(tempdir, '%s_audvis-meg' % subject_to))
+    # make sure we can specify vertices
+    vertices_to = grade_to_vertices(subject_to, grade=3,
+                                    subjects_dir=subjects_dir)
+    stc_to2 = morph_data(subject_from, subject_to, stc_from,
+                         grade=vertices_to, smooth=12, buffer_size=1000,
+                         subjects_dir=subjects_dir)
+    # make sure we can use different buffer_size
+    stc_to3 = morph_data(subject_from, subject_to, stc_from,
+                         grade=vertices_to, smooth=12, buffer_size=3,
+                         subjects_dir=subjects_dir)
+    # make sure we get a warning about # of steps
+    with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter('always')
+        morph_data(subject_from, subject_to, stc_from,
+                   grade=vertices_to, smooth=1, buffer_size=3,
+                   subjects_dir=subjects_dir)
+    assert_equal(len(w), 2)
+
+    assert_array_almost_equal(stc_to.data, stc_to1.data, 5)
+    assert_array_almost_equal(stc_to1.data, stc_to2.data)
+    assert_array_almost_equal(stc_to1.data, stc_to3.data)
+    # make sure precomputed morph matrices work
+    morph_mat = compute_morph_matrix(subject_from, subject_to,
+                                     stc_from.vertices, vertices_to,
+                                     smooth=12, subjects_dir=subjects_dir)
+    stc_to3 = stc_from.morph_precomputed(subject_to, vertices_to, morph_mat)
+    assert_array_almost_equal(stc_to1.data, stc_to3.data)
+    assert_raises(ValueError, stc_from.morph_precomputed,
+                  subject_to, vertices_to, 'foo')
+    assert_raises(ValueError, stc_from.morph_precomputed,
+                  subject_to, [vertices_to[0]], morph_mat)
+    assert_raises(ValueError, stc_from.morph_precomputed,
+                  subject_to, [vertices_to[0][:-1], vertices_to[1]], morph_mat)
+    assert_raises(ValueError, stc_from.morph_precomputed, subject_to,
+                  vertices_to, morph_mat, subject_from='foo')
+
+    # steps warning
+    with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter('always')
+        compute_morph_matrix(subject_from, subject_to,
+                             stc_from.vertices, vertices_to,
+                             smooth=1, subjects_dir=subjects_dir)
+    assert_equal(len(w), 2)
+
+    mean_from = stc_from.data.mean(axis=0)
+    mean_to = stc_to1.data.mean(axis=0)
+    assert_true(np.corrcoef(mean_to, mean_from).min() > 0.999)
+
+    # make sure we can fill by morphing
+    stc_to5 = morph_data(subject_from, subject_to, stc_from, grade=None,
+                         smooth=12, buffer_size=3, subjects_dir=subjects_dir)
+    assert_true(stc_to5.data.shape[0] == 163842 + 163842)
+
+    # Morph sparse data
+    # Make a sparse stc
+    stc_from.vertices[0] = stc_from.vertices[0][[100, 500]]
+    stc_from.vertices[1] = stc_from.vertices[1][[200]]
+    stc_from._data = stc_from._data[:3]
+
+    assert_raises(RuntimeError, stc_from.morph, subject_to, sparse=True,
+                  grade=5, subjects_dir=subjects_dir)
+
+    stc_to_sparse = stc_from.morph(subject_to, grade=None, sparse=True,
+                                   subjects_dir=subjects_dir)
+    assert_array_almost_equal(np.sort(stc_from.data.sum(axis=1)),
+                              np.sort(stc_to_sparse.data.sum(axis=1)))
+    assert_equal(len(stc_from.rh_vertno), len(stc_to_sparse.rh_vertno))
+    assert_equal(len(stc_from.lh_vertno), len(stc_to_sparse.lh_vertno))
+    assert_equal(stc_to_sparse.subject, subject_to)
+    assert_equal(stc_from.tmin, stc_from.tmin)
+    assert_equal(stc_from.tstep, stc_from.tstep)
+
+    stc_from.vertices[0] = np.array([], dtype=np.int64)
+    stc_from._data = stc_from._data[:1]
+
+    stc_to_sparse = stc_from.morph(subject_to, grade=None, sparse=True,
+                                   subjects_dir=subjects_dir)
+    assert_array_almost_equal(np.sort(stc_from.data.sum(axis=1)),
+                              np.sort(stc_to_sparse.data.sum(axis=1)))
+    assert_equal(len(stc_from.rh_vertno), len(stc_to_sparse.rh_vertno))
+    assert_equal(len(stc_from.lh_vertno), len(stc_to_sparse.lh_vertno))
+    assert_equal(stc_to_sparse.subject, subject_to)
+    assert_equal(stc_from.tmin, stc_from.tmin)
+    assert_equal(stc_from.tstep, stc_from.tstep)
+
+
+def _my_trans(data):
+    """FFT that adds an additional dimension by repeating result"""
+    data_t = fft(data)
+    data_t = np.concatenate([data_t[:, :, None], data_t[:, :, None]], axis=2)
+    return data_t, None
+
+
+def test_transform_data():
+    """Test applying linear (time) transform to data"""
+    # make up some data
+    n_sensors, n_vertices, n_times = 10, 20, 4
+    kernel = np.random.randn(n_vertices, n_sensors)
+    sens_data = np.random.randn(n_sensors, n_times)
+
+    vertices = np.arange(n_vertices)
+    data = np.dot(kernel, sens_data)
+
+    for idx, tmin_idx, tmax_idx in\
+            zip([None, np.arange(n_vertices // 2, n_vertices)],
+                [None, 1], [None, 3]):
+
+        if idx is None:
+            idx_use = slice(None, None)
+        else:
+            idx_use = idx
+
+        data_f, _ = _my_trans(data[idx_use, tmin_idx:tmax_idx])
+
+        for stc_data in (data, (kernel, sens_data)):
+            stc = VolSourceEstimate(stc_data, vertices=vertices,
+                                    tmin=0., tstep=1.)
+            stc_data_t = stc.transform_data(_my_trans, idx=idx,
+                                            tmin_idx=tmin_idx,
+                                            tmax_idx=tmax_idx)
+            assert_allclose(data_f, stc_data_t)
+
+
+def test_transform():
+    """Test applying linear (time) transform to data"""
+    # make up some data
+    n_verts_lh, n_verts_rh, n_times = 10, 10, 10
+    vertices = [np.arange(n_verts_lh), n_verts_lh + np.arange(n_verts_rh)]
+    data = np.random.randn(n_verts_lh + n_verts_rh, n_times)
+    stc = SourceEstimate(data, vertices=vertices, tmin=-0.1, tstep=0.1)
+
+    # data_t.ndim > 2 & copy is True
+    stcs_t = stc.transform(_my_trans, copy=True)
+    assert_true(isinstance(stcs_t, list))
+    assert_array_equal(stc.times, stcs_t[0].times)
+    assert_equal(stc.vertices, stcs_t[0].vertices)
+
+    data = np.concatenate((stcs_t[0].data[:, :, None],
+                           stcs_t[1].data[:, :, None]), axis=2)
+    data_t = stc.transform_data(_my_trans)
+    assert_array_equal(data, data_t)  # check against stc.transform_data()
+
+    # data_t.ndim > 2 & copy is False
+    assert_raises(ValueError, stc.transform, _my_trans, copy=False)
+
+    # data_t.ndim = 2 & copy is True
+    tmp = deepcopy(stc)
+    stc_t = stc.transform(np.abs, copy=True)
+    assert_true(isinstance(stc_t, SourceEstimate))
+    assert_array_equal(stc.data, tmp.data)  # xfrm doesn't modify original?
+
+    # data_t.ndim = 2 & copy is False
+    times = np.round(1000 * stc.times)
+    verts = np.arange(len(stc.lh_vertno),
+                      len(stc.lh_vertno) + len(stc.rh_vertno), 1)
+    verts_rh = stc.rh_vertno
+    t_idx = [np.where(times >= -50)[0][0], np.where(times <= 500)[0][-1]]
+    data_t = stc.transform_data(np.abs, idx=verts, tmin_idx=t_idx[0],
+                                tmax_idx=t_idx[-1])
+    stc.transform(np.abs, idx=verts, tmin=-50, tmax=500, copy=False)
+    assert_true(isinstance(stc, SourceEstimate))
+    assert_true((stc.tmin == 0.) & (stc.times[-1] == 0.5))
+    assert_true(len(stc.vertices[0]) == 0)
+    assert_equal(stc.vertices[1], verts_rh)
+    assert_array_equal(stc.data, data_t)
+
+    times = np.round(1000 * stc.times)
+    t_idx = [np.where(times >= 0)[0][0], np.where(times <= 250)[0][-1]]
+    data_t = stc.transform_data(np.abs, tmin_idx=t_idx[0], tmax_idx=t_idx[-1])
+    stc.transform(np.abs, tmin=0, tmax=250, copy=False)
+    assert_true((stc.tmin == 0.) & (stc.times[-1] == 0.2))
+    assert_array_equal(stc.data, data_t)
+
+
+ at requires_sklearn
+def test_spatio_temporal_tris_connectivity():
+    """Test spatio-temporal connectivity from triangles"""
+    tris = np.array([[0, 1, 2], [3, 4, 5]])
+    connectivity = spatio_temporal_tris_connectivity(tris, 2)
+    x = [1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1]
+    components = stats.cluster_level._get_components(np.array(x), connectivity)
+    # _get_components works differently now...
+    old_fmt = [0, 0, -2, -2, -2, -2, 0, -2, -2, -2, -2, 1]
+    new_fmt = np.array(old_fmt)
+    new_fmt = [np.nonzero(new_fmt == v)[0]
+               for v in np.unique(new_fmt[new_fmt >= 0])]
+    assert_true(len(new_fmt), len(components))
+    for c, n in zip(components, new_fmt):
+        assert_array_equal(c, n)
+
+
+ at testing.requires_testing_data
+def test_spatio_temporal_src_connectivity():
+    """Test spatio-temporal connectivity from source spaces"""
+    tris = np.array([[0, 1, 2], [3, 4, 5]])
+    src = [dict(), dict()]
+    connectivity = spatio_temporal_tris_connectivity(tris, 2)
+    src[0]['use_tris'] = np.array([[0, 1, 2]])
+    src[1]['use_tris'] = np.array([[0, 1, 2]])
+    src[0]['vertno'] = np.array([0, 1, 2])
+    src[1]['vertno'] = np.array([0, 1, 2])
+    connectivity2 = spatio_temporal_src_connectivity(src, 2)
+    assert_array_equal(connectivity.todense(), connectivity2.todense())
+    # add test for dist connectivity
+    src[0]['dist'] = np.ones((3, 3)) - np.eye(3)
+    src[1]['dist'] = np.ones((3, 3)) - np.eye(3)
+    src[0]['vertno'] = [0, 1, 2]
+    src[1]['vertno'] = [0, 1, 2]
+    connectivity3 = spatio_temporal_src_connectivity(src, 2, dist=2)
+    assert_array_equal(connectivity.todense(), connectivity3.todense())
+    # add test for source space connectivity with omitted vertices
+    inverse_operator = read_inverse_operator(fname_inv)
+    with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter('always')
+        src_ = inverse_operator['src']
+        connectivity = spatio_temporal_src_connectivity(src_, n_times=2)
+        assert len(w) == 1
+    a = connectivity.shape[0] / 2
+    b = sum([s['nuse'] for s in inverse_operator['src']])
+    assert_true(a == b)
+
+    assert_equal(grade_to_tris(5).shape, [40960, 3])
+
+
+ at requires_pandas
+def test_to_data_frame():
+    """Test stc Pandas exporter"""
+    n_vert, n_times = 10, 5
+    vertices = [np.arange(n_vert, dtype=np.int), np.empty(0, dtype=np.int)]
+    data = np.random.randn(n_vert, n_times)
+    stc_surf = SourceEstimate(data, vertices=vertices, tmin=0, tstep=1,
+                              subject='sample')
+    stc_vol = VolSourceEstimate(data, vertices=vertices[0], tmin=0, tstep=1,
+                                subject='sample')
+    for stc in [stc_surf, stc_vol]:
+        assert_raises(ValueError, stc.to_data_frame, index=['foo', 'bar'])
+        for ncat, ind in zip([1, 0], ['time', ['subject', 'time']]):
+            df = stc.to_data_frame(index=ind)
+            assert_true(df.index.names == ind
+                        if isinstance(ind, list) else [ind])
+            assert_array_equal(df.values.T[ncat:], stc.data)
+            # test that non-indexed data were present as categorial variables
+            assert_true(all([c in ['time', 'subject'] for c in
+                             df.reset_index().columns][:2]))
+
+
+def test_get_peak():
+    """Test peak getter
+    """
+    n_vert, n_times = 10, 5
+    vertices = [np.arange(n_vert, dtype=np.int), np.empty(0, dtype=np.int)]
+    data = np.random.randn(n_vert, n_times)
+    stc_surf = SourceEstimate(data, vertices=vertices, tmin=0, tstep=1,
+                              subject='sample')
+
+    stc_vol = VolSourceEstimate(data, vertices=vertices[0], tmin=0, tstep=1,
+                                subject='sample')
+
+    for ii, stc in enumerate([stc_surf, stc_vol]):
+        assert_raises(ValueError, stc.get_peak, tmin=-100)
+        assert_raises(ValueError, stc.get_peak, tmax=90)
+        assert_raises(ValueError, stc.get_peak, tmin=0.002, tmax=0.001)
+
+        vert_idx, time_idx = stc.get_peak()
+        vertno = np.concatenate(stc.vertices) if ii == 0 else stc.vertices
+        assert_true(vert_idx in vertno)
+        assert_true(time_idx in stc.times)
+
+        ch_idx, time_idx = stc.get_peak(vert_as_index=True,
+                                        time_as_index=True)
+        assert_true(vert_idx < stc.data.shape[0])
+        assert_true(time_idx < len(stc.times))
+
+
+ at testing.requires_testing_data
+def test_mixed_stc():
+    """Test source estimate from mixed source space
+    """
+    N = 90  # number of sources
+    T = 2  # number of time points
+    S = 3  # number of source spaces
+
+    data = np.random.randn(N, T)
+    vertno = S * [np.arange(N // S)]
+
+    # make sure error is raised if vertices are not a list of length >= 2
+    assert_raises(ValueError, MixedSourceEstimate, data=data,
+                  vertices=[np.arange(N)])
+
+    stc = MixedSourceEstimate(data, vertno, 0, 1)
+
+    vol = read_source_spaces(fname_vsrc)
+
+    # make sure error is raised for plotting surface with volume source
+    assert_raises(ValueError, stc.plot_surface, src=vol)
+
+
+run_tests_if_main()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/tests/test_source_space.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/tests/test_source_space.py
new file mode 100644
index 0000000..8fefdf2
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/tests/test_source_space.py
@@ -0,0 +1,687 @@
+from __future__ import print_function
+
+import os
+import os.path as op
+from nose.tools import assert_true, assert_raises
+from nose.plugins.skip import SkipTest
+import numpy as np
+from numpy.testing import assert_array_equal, assert_allclose, assert_equal
+import warnings
+
+from mne.datasets import testing
+from mne import (read_source_spaces, vertex_to_mni, write_source_spaces,
+                 setup_source_space, setup_volume_source_space,
+                 add_source_space_distances, read_bem_surfaces,
+                 morph_source_spaces, SourceEstimate)
+from mne.utils import (_TempDir, requires_fs_or_nibabel, requires_nibabel,
+                       requires_freesurfer, run_subprocess, slow_test,
+                       requires_mne, requires_version, run_tests_if_main)
+from mne.surface import _accumulate_normals, _triangle_neighbors
+from mne.source_space import _get_mgz_header
+from mne.externals.six.moves import zip
+from mne.source_space import (get_volume_labels_from_aseg, SourceSpaces,
+                              _compare_source_spaces)
+from mne.io.constants import FIFF
+
+warnings.simplefilter('always')
+
+data_path = testing.data_path(download=False)
+subjects_dir = op.join(data_path, 'subjects')
+fname_mri = op.join(data_path, 'subjects', 'sample', 'mri', 'T1.mgz')
+fname = op.join(subjects_dir, 'sample', 'bem', 'sample-oct-6-src.fif')
+fname_vol = op.join(subjects_dir, 'sample', 'bem',
+                    'sample-volume-7mm-src.fif')
+fname_bem = op.join(data_path, 'subjects', 'sample', 'bem',
+                    'sample-1280-bem.fif')
+fname_fs = op.join(subjects_dir, 'fsaverage', 'bem', 'fsaverage-ico-5-src.fif')
+fname_morph = op.join(subjects_dir, 'sample', 'bem',
+                      'sample-fsaverage-ico-5-src.fif')
+
+base_dir = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data')
+fname_small = op.join(base_dir, 'small-src.fif.gz')
+
+
+ at testing.requires_testing_data
+ at requires_nibabel(vox2ras_tkr=True)
+def test_mgz_header():
+    """Test MGZ header reading"""
+    import nibabel as nib
+    header = _get_mgz_header(fname_mri)
+    mri_hdr = nib.load(fname_mri).get_header()
+    assert_allclose(mri_hdr.get_data_shape(), header['dims'])
+    assert_allclose(mri_hdr.get_vox2ras_tkr(), header['vox2ras_tkr'])
+    assert_allclose(mri_hdr.get_ras2vox(), header['ras2vox'])
+
+
+ at requires_version('scipy', '0.11')
+def test_add_patch_info():
+    """Test adding patch info to source space"""
+    # let's setup a small source space
+    src = read_source_spaces(fname_small)
+    src_new = read_source_spaces(fname_small)
+    for s in src_new:
+        s['nearest'] = None
+        s['nearest_dist'] = None
+        s['pinfo'] = None
+
+    # test that no patch info is added for small dist_limit
+    try:
+        add_source_space_distances(src_new, dist_limit=0.00001)
+    except RuntimeError:  # what we throw when scipy version is wrong
+        pass
+    else:
+        assert_true(all(s['nearest'] is None for s in src_new))
+        assert_true(all(s['nearest_dist'] is None for s in src_new))
+        assert_true(all(s['pinfo'] is None for s in src_new))
+
+    # now let's use one that works
+    add_source_space_distances(src_new)
+
+    for s1, s2 in zip(src, src_new):
+        assert_array_equal(s1['nearest'], s2['nearest'])
+        assert_allclose(s1['nearest_dist'], s2['nearest_dist'], atol=1e-7)
+        assert_equal(len(s1['pinfo']), len(s2['pinfo']))
+        for p1, p2 in zip(s1['pinfo'], s2['pinfo']):
+            assert_array_equal(p1, p2)
+
+
+ at testing.requires_testing_data
+ at requires_version('scipy', '0.11')
+def test_add_source_space_distances_limited():
+    """Test adding distances to source space with a dist_limit"""
+    tempdir = _TempDir()
+    src = read_source_spaces(fname)
+    src_new = read_source_spaces(fname)
+    del src_new[0]['dist']
+    del src_new[1]['dist']
+    n_do = 200  # limit this for speed
+    src_new[0]['vertno'] = src_new[0]['vertno'][:n_do].copy()
+    src_new[1]['vertno'] = src_new[1]['vertno'][:n_do].copy()
+    out_name = op.join(tempdir, 'temp-src.fif')
+    try:
+        add_source_space_distances(src_new, dist_limit=0.007)
+    except RuntimeError:  # what we throw when scipy version is wrong
+        raise SkipTest('dist_limit requires scipy > 0.13')
+    write_source_spaces(out_name, src_new)
+    src_new = read_source_spaces(out_name)
+
+    for so, sn in zip(src, src_new):
+        assert_array_equal(so['dist_limit'], np.array([-0.007], np.float32))
+        assert_array_equal(sn['dist_limit'], np.array([0.007], np.float32))
+        do = so['dist']
+        dn = sn['dist']
+
+        # clean out distances > 0.007 in C code
+        do.data[do.data > 0.007] = 0
+        do.eliminate_zeros()
+
+        # make sure we have some comparable distances
+        assert_true(np.sum(do.data < 0.007) > 400)
+
+        # do comparison over the region computed
+        d = (do - dn)[:sn['vertno'][n_do - 1]][:, :sn['vertno'][n_do - 1]]
+        assert_allclose(np.zeros_like(d.data), d.data, rtol=0, atol=1e-6)
+
+
+ at slow_test
+ at testing.requires_testing_data
+ at requires_version('scipy', '0.11')
+def test_add_source_space_distances():
+    """Test adding distances to source space"""
+    tempdir = _TempDir()
+    src = read_source_spaces(fname)
+    src_new = read_source_spaces(fname)
+    del src_new[0]['dist']
+    del src_new[1]['dist']
+    n_do = 19  # limit this for speed
+    src_new[0]['vertno'] = src_new[0]['vertno'][:n_do].copy()
+    src_new[1]['vertno'] = src_new[1]['vertno'][:n_do].copy()
+    out_name = op.join(tempdir, 'temp-src.fif')
+    n_jobs = 2
+    assert_true(n_do % n_jobs != 0)
+    add_source_space_distances(src_new, n_jobs=n_jobs)
+    write_source_spaces(out_name, src_new)
+    src_new = read_source_spaces(out_name)
+
+    # iterate over both hemispheres
+    for so, sn in zip(src, src_new):
+        v = so['vertno'][:n_do]
+        assert_array_equal(so['dist_limit'], np.array([-0.007], np.float32))
+        assert_array_equal(sn['dist_limit'], np.array([np.inf], np.float32))
+        do = so['dist']
+        dn = sn['dist']
+
+        # clean out distances > 0.007 in C code (some residual), and Python
+        ds = list()
+        for d in [do, dn]:
+            d.data[d.data > 0.007] = 0
+            d = d[v][:, v]
+            d.eliminate_zeros()
+            ds.append(d)
+
+        # make sure we actually calculated some comparable distances
+        assert_true(np.sum(ds[0].data < 0.007) > 10)
+
+        # do comparison
+        d = ds[0] - ds[1]
+        assert_allclose(np.zeros_like(d.data), d.data, rtol=0, atol=1e-9)
+
+
+ at testing.requires_testing_data
+ at requires_mne
+def test_discrete_source_space():
+    """Test setting up (and reading/writing) discrete source spaces
+    """
+    tempdir = _TempDir()
+    src = read_source_spaces(fname)
+    v = src[0]['vertno']
+
+    # let's make a discrete version with the C code, and with ours
+    temp_name = op.join(tempdir, 'temp-src.fif')
+    try:
+        # save
+        temp_pos = op.join(tempdir, 'temp-pos.txt')
+        np.savetxt(temp_pos, np.c_[src[0]['rr'][v], src[0]['nn'][v]])
+        # let's try the spherical one (no bem or surf supplied)
+        run_subprocess(['mne_volume_source_space', '--meters',
+                        '--pos', temp_pos, '--src', temp_name])
+        src_c = read_source_spaces(temp_name)
+        pos_dict = dict(rr=src[0]['rr'][v], nn=src[0]['nn'][v])
+        src_new = setup_volume_source_space('sample', None,
+                                            pos=pos_dict,
+                                            subjects_dir=subjects_dir)
+        _compare_source_spaces(src_c, src_new, mode='approx')
+        assert_allclose(src[0]['rr'][v], src_new[0]['rr'],
+                        rtol=1e-3, atol=1e-6)
+        assert_allclose(src[0]['nn'][v], src_new[0]['nn'],
+                        rtol=1e-3, atol=1e-6)
+
+        # now do writing
+        write_source_spaces(temp_name, src_c)
+        src_c2 = read_source_spaces(temp_name)
+        _compare_source_spaces(src_c, src_c2)
+
+        # now do MRI
+        assert_raises(ValueError, setup_volume_source_space, 'sample',
+                      pos=pos_dict, mri=fname_mri)
+    finally:
+        if op.isfile(temp_name):
+            os.remove(temp_name)
+
+
+ at slow_test
+ at testing.requires_testing_data
+def test_volume_source_space():
+    """Test setting up volume source spaces
+    """
+    tempdir = _TempDir()
+    src = read_source_spaces(fname_vol)
+    temp_name = op.join(tempdir, 'temp-src.fif')
+    surf = read_bem_surfaces(fname_bem, s_id=FIFF.FIFFV_BEM_SURF_ID_BRAIN)
+    surf['rr'] *= 1e3  # convert to mm
+    # The one in the testing dataset (uses bem as bounds)
+    for bem, surf in zip((fname_bem, None), (None, surf)):
+        src_new = setup_volume_source_space('sample', temp_name, pos=7.0,
+                                            bem=bem, surface=surf,
+                                            mri=fname_mri,
+                                            subjects_dir=subjects_dir)
+        _compare_source_spaces(src, src_new, mode='approx')
+        del src_new
+        src_new = read_source_spaces(temp_name)
+        _compare_source_spaces(src, src_new, mode='approx')
+    assert_raises(IOError, setup_volume_source_space, 'sample', temp_name,
+                  pos=7.0, bem=None, surface='foo',  # bad surf
+                  mri=fname_mri, subjects_dir=subjects_dir)
+
+
+ at testing.requires_testing_data
+ at requires_mne
+def test_other_volume_source_spaces():
+    """Test setting up other volume source spaces"""
+    # these are split off because they require the MNE tools, and
+    # Travis doesn't seem to like them
+
+    # let's try the spherical one (no bem or surf supplied)
+    tempdir = _TempDir()
+    temp_name = op.join(tempdir, 'temp-src.fif')
+    run_subprocess(['mne_volume_source_space',
+                    '--grid', '7.0',
+                    '--src', temp_name,
+                    '--mri', fname_mri])
+    src = read_source_spaces(temp_name)
+    src_new = setup_volume_source_space('sample', temp_name, pos=7.0,
+                                        mri=fname_mri,
+                                        subjects_dir=subjects_dir)
+    _compare_source_spaces(src, src_new, mode='approx')
+    del src
+    del src_new
+    assert_raises(ValueError, setup_volume_source_space, 'sample', temp_name,
+                  pos=7.0, sphere=[1., 1.], mri=fname_mri,  # bad sphere
+                  subjects_dir=subjects_dir)
+
+    # now without MRI argument, it should give an error when we try
+    # to read it
+    run_subprocess(['mne_volume_source_space',
+                    '--grid', '7.0',
+                    '--src', temp_name])
+    assert_raises(ValueError, read_source_spaces, temp_name)
+
+
+ at testing.requires_testing_data
+def test_triangle_neighbors():
+    """Test efficient vertex neighboring triangles for surfaces"""
+    this = read_source_spaces(fname)[0]
+    this['neighbor_tri'] = [list() for _ in range(this['np'])]
+    for p in range(this['ntri']):
+        verts = this['tris'][p]
+        this['neighbor_tri'][verts[0]].append(p)
+        this['neighbor_tri'][verts[1]].append(p)
+        this['neighbor_tri'][verts[2]].append(p)
+    this['neighbor_tri'] = [np.array(nb, int) for nb in this['neighbor_tri']]
+
+    neighbor_tri = _triangle_neighbors(this['tris'], this['np'])
+    assert_true(np.array_equal(nt1, nt2)
+                for nt1, nt2 in zip(neighbor_tri, this['neighbor_tri']))
+
+
+def test_accumulate_normals():
+    """Test efficient normal accumulation for surfaces"""
+    # set up comparison
+    rng = np.random.RandomState(0)
+    n_pts = int(1.6e5)  # approx number in sample source space
+    n_tris = int(3.2e5)
+    # use all positive to make a worst-case for cumulative summation
+    # (real "nn" vectors will have both positive and negative values)
+    tris = (rng.rand(n_tris, 1) * (n_pts - 2)).astype(int)
+    tris = np.c_[tris, tris + 1, tris + 2]
+    tri_nn = rng.rand(n_tris, 3)
+    this = dict(tris=tris, np=n_pts, ntri=n_tris, tri_nn=tri_nn)
+
+    # cut-and-paste from original code in surface.py:
+    #    Find neighboring triangles and accumulate vertex normals
+    this['nn'] = np.zeros((this['np'], 3))
+    for p in range(this['ntri']):
+        # vertex normals
+        verts = this['tris'][p]
+        this['nn'][verts, :] += this['tri_nn'][p, :]
+    nn = _accumulate_normals(this['tris'], this['tri_nn'], this['np'])
+
+    # the moment of truth (or reckoning)
+    assert_allclose(nn, this['nn'], rtol=1e-7, atol=1e-7)
+
+
+ at slow_test
+ at testing.requires_testing_data
+def test_setup_source_space():
+    """Test setting up ico, oct, and all source spaces
+    """
+    tempdir = _TempDir()
+    fname_ico = op.join(data_path, 'subjects', 'fsaverage', 'bem',
+                        'fsaverage-ico-5-src.fif')
+    # first lets test some input params
+    assert_raises(ValueError, setup_source_space, 'sample', spacing='oct',
+                  add_dist=False)
+    assert_raises(ValueError, setup_source_space, 'sample', spacing='octo',
+                  add_dist=False)
+    assert_raises(ValueError, setup_source_space, 'sample', spacing='oct6e',
+                  add_dist=False)
+    assert_raises(ValueError, setup_source_space, 'sample', spacing='7emm',
+                  add_dist=False)
+    assert_raises(ValueError, setup_source_space, 'sample', spacing='alls',
+                  add_dist=False)
+    assert_raises(IOError, setup_source_space, 'sample', spacing='oct6',
+                  subjects_dir=subjects_dir, add_dist=False)
+
+    # ico 5 (fsaverage) - write to temp file
+    src = read_source_spaces(fname_ico)
+    temp_name = op.join(tempdir, 'temp-src.fif')
+    with warnings.catch_warnings(record=True):  # sklearn equiv neighbors
+        warnings.simplefilter('always')
+        src_new = setup_source_space('fsaverage', temp_name, spacing='ico5',
+                                     subjects_dir=subjects_dir, add_dist=False,
+                                     overwrite=True)
+    _compare_source_spaces(src, src_new, mode='approx')
+    assert_array_equal(src[0]['vertno'], np.arange(10242))
+    assert_array_equal(src[1]['vertno'], np.arange(10242))
+
+    # oct-6 (sample) - auto filename + IO
+    src = read_source_spaces(fname)
+    temp_name = op.join(tempdir, 'temp-src.fif')
+    with warnings.catch_warnings(record=True):  # sklearn equiv neighbors
+        warnings.simplefilter('always')
+        src_new = setup_source_space('sample', temp_name, spacing='oct6',
+                                     subjects_dir=subjects_dir,
+                                     overwrite=True, add_dist=False)
+    _compare_source_spaces(src, src_new, mode='approx')
+    src_new = read_source_spaces(temp_name)
+    _compare_source_spaces(src, src_new, mode='approx')
+
+    # all source points - no file writing
+    src_new = setup_source_space('sample', None, spacing='all',
+                                 subjects_dir=subjects_dir, add_dist=False)
+    assert_true(src_new[0]['nuse'] == len(src_new[0]['rr']))
+    assert_true(src_new[1]['nuse'] == len(src_new[1]['rr']))
+
+    # dense source space to hit surf['inuse'] lines of _create_surf_spacing
+    assert_raises(RuntimeError, setup_source_space, 'sample', None,
+                  spacing='ico6', subjects_dir=subjects_dir, add_dist=False)
+
+
+ at testing.requires_testing_data
+def test_read_source_spaces():
+    """Test reading of source space meshes
+    """
+    src = read_source_spaces(fname, patch_stats=True)
+
+    # 3D source space
+    lh_points = src[0]['rr']
+    lh_faces = src[0]['tris']
+    lh_use_faces = src[0]['use_tris']
+    rh_points = src[1]['rr']
+    rh_faces = src[1]['tris']
+    rh_use_faces = src[1]['use_tris']
+    assert_true(lh_faces.min() == 0)
+    assert_true(lh_faces.max() == lh_points.shape[0] - 1)
+    assert_true(lh_use_faces.min() >= 0)
+    assert_true(lh_use_faces.max() <= lh_points.shape[0] - 1)
+    assert_true(rh_faces.min() == 0)
+    assert_true(rh_faces.max() == rh_points.shape[0] - 1)
+    assert_true(rh_use_faces.min() >= 0)
+    assert_true(rh_use_faces.max() <= rh_points.shape[0] - 1)
+
+
+ at slow_test
+ at testing.requires_testing_data
+def test_write_source_space():
+    """Test reading and writing of source spaces
+    """
+    tempdir = _TempDir()
+    src0 = read_source_spaces(fname, patch_stats=False)
+    write_source_spaces(op.join(tempdir, 'tmp-src.fif'), src0)
+    src1 = read_source_spaces(op.join(tempdir, 'tmp-src.fif'),
+                              patch_stats=False)
+    _compare_source_spaces(src0, src1)
+
+    # test warnings on bad filenames
+    with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter('always')
+        src_badname = op.join(tempdir, 'test-bad-name.fif.gz')
+        write_source_spaces(src_badname, src0)
+        read_source_spaces(src_badname)
+    assert_equal(len(w), 2)
+
+
+ at testing.requires_testing_data
+ at requires_fs_or_nibabel
+def test_vertex_to_mni():
+    """Test conversion of vertices to MNI coordinates
+    """
+    # obtained using "tksurfer (sample) (l/r)h white"
+    vertices = [100960, 7620, 150549, 96761]
+    coords = np.array([[-60.86, -11.18, -3.19], [-36.46, -93.18, -2.36],
+                       [-38.00, 50.08, -10.61], [47.14, 8.01, 46.93]])
+    hemis = [0, 0, 0, 1]
+    coords_2 = vertex_to_mni(vertices, hemis, 'sample', subjects_dir)
+    # less than 1mm error
+    assert_allclose(coords, coords_2, atol=1.0)
+
+
+ at testing.requires_testing_data
+ at requires_freesurfer
+ at requires_nibabel()
+def test_vertex_to_mni_fs_nibabel():
+    """Test equivalence of vert_to_mni for nibabel and freesurfer
+    """
+    n_check = 1000
+    subject = 'sample'
+    vertices = np.random.randint(0, 100000, n_check)
+    hemis = np.random.randint(0, 1, n_check)
+    coords = vertex_to_mni(vertices, hemis, subject, subjects_dir,
+                           'nibabel')
+    coords_2 = vertex_to_mni(vertices, hemis, subject, subjects_dir,
+                             'freesurfer')
+    # less than 0.1 mm error
+    assert_allclose(coords, coords_2, atol=0.1)
+
+
+ at testing.requires_testing_data
+ at requires_freesurfer
+ at requires_nibabel()
+def test_get_volume_label_names():
+    """Test reading volume label names
+    """
+    aseg_fname = op.join(subjects_dir, 'sample', 'mri', 'aseg.mgz')
+    label_names = get_volume_labels_from_aseg(aseg_fname)
+    assert_equal(label_names.count('Brain-Stem'), 1)
+
+
+ at testing.requires_testing_data
+ at requires_freesurfer
+ at requires_nibabel()
+def test_source_space_from_label():
+    """Test generating a source space from volume label
+    """
+    tempdir = _TempDir()
+    aseg_fname = op.join(subjects_dir, 'sample', 'mri', 'aseg.mgz')
+    label_names = get_volume_labels_from_aseg(aseg_fname)
+    volume_label = label_names[int(np.random.rand() * len(label_names))]
+
+    # Test pos as dict
+    pos = dict()
+    assert_raises(ValueError, setup_volume_source_space, 'sample', pos=pos,
+                  volume_label=volume_label, mri=aseg_fname)
+
+    # Test no mri provided
+    assert_raises(RuntimeError, setup_volume_source_space, 'sample', mri=None,
+                  volume_label=volume_label)
+
+    # Test invalid volume label
+    assert_raises(ValueError, setup_volume_source_space, 'sample',
+                  volume_label='Hello World!', mri=aseg_fname)
+
+    src = setup_volume_source_space('sample', subjects_dir=subjects_dir,
+                                    volume_label=volume_label, mri=aseg_fname,
+                                    add_interpolator=False)
+    assert_equal(volume_label, src[0]['seg_name'])
+
+    # test reading and writing
+    out_name = op.join(tempdir, 'temp-src.fif')
+    write_source_spaces(out_name, src)
+    src_from_file = read_source_spaces(out_name)
+    _compare_source_spaces(src, src_from_file, mode='approx')
+
+
+ at testing.requires_testing_data
+ at requires_freesurfer
+ at requires_nibabel()
+def test_combine_source_spaces():
+    """Test combining source spaces
+    """
+    tempdir = _TempDir()
+    aseg_fname = op.join(subjects_dir, 'sample', 'mri', 'aseg.mgz')
+    label_names = get_volume_labels_from_aseg(aseg_fname)
+    volume_labels = [label_names[int(np.random.rand() * len(label_names))]
+                     for ii in range(2)]
+
+    # get a surface source space (no need to test creation here)
+    srf = read_source_spaces(fname, patch_stats=False)
+
+    # setup 2 volume source spaces
+    vol = setup_volume_source_space('sample', subjects_dir=subjects_dir,
+                                    volume_label=volume_labels[0],
+                                    mri=aseg_fname, add_interpolator=False)
+
+    # setup a discrete source space
+    rr = np.random.randint(0, 20, (100, 3)) * 1e-3
+    nn = np.zeros(rr.shape)
+    nn[:, -1] = 1
+    pos = {'rr': rr, 'nn': nn}
+    disc = setup_volume_source_space('sample', subjects_dir=subjects_dir,
+                                     pos=pos, verbose='error')
+
+    # combine source spaces
+    src = srf + vol + disc
+
+    # test addition of source spaces
+    assert_equal(type(src), SourceSpaces)
+    assert_equal(len(src), 4)
+
+    # test reading and writing
+    src_out_name = op.join(tempdir, 'temp-src.fif')
+    src.save(src_out_name)
+    src_from_file = read_source_spaces(src_out_name)
+    _compare_source_spaces(src, src_from_file, mode='approx')
+
+    # test that all source spaces are in MRI coordinates
+    coord_frames = np.array([s['coord_frame'] for s in src])
+    assert_true((coord_frames == FIFF.FIFFV_COORD_MRI).all())
+
+    # test errors for export_volume
+    image_fname = op.join(tempdir, 'temp-image.mgz')
+
+    # source spaces with no volume
+    assert_raises(ValueError, srf.export_volume, image_fname, verbose='error')
+
+    # unrecognized source type
+    disc2 = disc.copy()
+    disc2[0]['type'] = 'kitty'
+    src_unrecognized = src + disc2
+    assert_raises(ValueError, src_unrecognized.export_volume, image_fname,
+                  verbose='error')
+
+    # unrecognized file type
+    bad_image_fname = op.join(tempdir, 'temp-image.png')
+    assert_raises(ValueError, src.export_volume, bad_image_fname,
+                  verbose='error')
+
+    # mixed coordinate frames
+    disc3 = disc.copy()
+    disc3[0]['coord_frame'] = 10
+    src_mixed_coord = src + disc3
+    assert_raises(ValueError, src_mixed_coord.export_volume, image_fname,
+                  verbose='error')
+
+
+ at testing.requires_testing_data
+def test_morph_source_spaces():
+    """Test morphing of source spaces
+    """
+    src = read_source_spaces(fname_fs)
+    src_morph = read_source_spaces(fname_morph)
+    src_morph_py = morph_source_spaces(src, 'sample',
+                                       subjects_dir=subjects_dir)
+    _compare_source_spaces(src_morph, src_morph_py, mode='approx')
+
+
+ at slow_test
+ at testing.requires_testing_data
+def test_morphed_source_space_return():
+    """Test returning a morphed source space to the original subject"""
+    # let's create some random data on fsaverage
+    rng = np.random.RandomState(0)
+    data = rng.randn(20484, 1)
+    tmin, tstep = 0, 1.
+    src_fs = read_source_spaces(fname_fs)
+    stc_fs = SourceEstimate(data, [s['vertno'] for s in src_fs],
+                            tmin, tstep, 'fsaverage')
+
+    # Create our morph source space
+    src_morph = morph_source_spaces(src_fs, 'sample',
+                                    subjects_dir=subjects_dir)
+
+    # Morph the data over using standard methods
+    stc_morph = stc_fs.morph('sample', [s['vertno'] for s in src_morph],
+                             smooth=1, subjects_dir=subjects_dir)
+
+    # We can now pretend like this was real data we got e.g. from an inverse.
+    # To be complete, let's remove some vertices
+    keeps = [np.sort(rng.permutation(np.arange(len(v)))[:len(v) - 10])
+             for v in stc_morph.vertices]
+    stc_morph = SourceEstimate(
+        np.concatenate([stc_morph.lh_data[keeps[0]],
+                        stc_morph.rh_data[keeps[1]]]),
+        [v[k] for v, k in zip(stc_morph.vertices, keeps)], tmin, tstep,
+        'sample')
+
+    # Return it to the original subject
+    stc_morph_return = stc_morph.to_original_src(
+        src_fs, subjects_dir=subjects_dir)
+
+    # Compare to the original data
+    stc_morph_morph = stc_morph.morph('fsaverage', stc_morph_return.vertices,
+                                      smooth=1,
+                                      subjects_dir=subjects_dir)
+    assert_equal(stc_morph_return.subject, stc_morph_morph.subject)
+    for ii in range(2):
+        assert_array_equal(stc_morph_return.vertices[ii],
+                           stc_morph_morph.vertices[ii])
+    # These will not match perfectly because morphing pushes data around
+    corr = np.corrcoef(stc_morph_return.data[:, 0],
+                       stc_morph_morph.data[:, 0])[0, 1]
+    assert_true(corr > 0.99, corr)
+
+    # Degenerate cases
+    stc_morph.subject = None  # no .subject provided
+    assert_raises(ValueError, stc_morph.to_original_src,
+                  src_fs, subject_orig='fsaverage', subjects_dir=subjects_dir)
+    stc_morph.subject = 'sample'
+    del src_fs[0]['subject_his_id']  # no name in src_fsaverage
+    assert_raises(ValueError, stc_morph.to_original_src,
+                  src_fs, subjects_dir=subjects_dir)
+    src_fs[0]['subject_his_id'] = 'fsaverage'  # name mismatch
+    assert_raises(ValueError, stc_morph.to_original_src,
+                  src_fs, subject_orig='foo', subjects_dir=subjects_dir)
+    src_fs[0]['subject_his_id'] = 'sample'
+    src = read_source_spaces(fname)  # wrong source space
+    assert_raises(RuntimeError, stc_morph.to_original_src,
+                  src, subjects_dir=subjects_dir)
+
+run_tests_if_main()
+
+# The following code was used to generate small-src.fif.gz.
+# Unfortunately the C code bombs when trying to add source space distances,
+# possibly due to incomplete "faking" of a smaller surface on our part here.
+"""
+# -*- coding: utf-8 -*-
+
+import os
+import numpy as np
+import mne
+
+data_path = mne.datasets.sample.data_path()
+src = mne.setup_source_space('sample', fname=None, spacing='oct5')
+hemis = ['lh', 'rh']
+fnames = [data_path + '/subjects/sample/surf/%s.decimated' % h for h in hemis]
+
+vs = list()
+for s, fname in zip(src, fnames):
+    coords = s['rr'][s['vertno']]
+    vs.append(s['vertno'])
+    idx = -1 * np.ones(len(s['rr']))
+    idx[s['vertno']] = np.arange(s['nuse'])
+    faces = s['use_tris']
+    faces = idx[faces]
+    mne.write_surface(fname, coords, faces)
+
+# we need to move sphere surfaces
+spheres = [data_path + '/subjects/sample/surf/%s.sphere' % h for h in hemis]
+for s in spheres:
+    os.rename(s, s + '.bak')
+try:
+    for s, v in zip(spheres, vs):
+        coords, faces = mne.read_surface(s + '.bak')
+        coords = coords[v]
+        mne.write_surface(s, coords, faces)
+    src = mne.setup_source_space('sample', fname=None, spacing='oct4',
+                                 surface='decimated')
+finally:
+    for s in spheres:
+        os.rename(s + '.bak', s)
+
+fname = 'small-src.fif'
+fname_gz = fname + '.gz'
+mne.write_source_spaces(fname, src)
+mne.utils.run_subprocess(['mne_add_patch_info', '--src', fname,
+                          '--srcp', fname])
+mne.write_source_spaces(fname_gz, mne.read_source_spaces(fname))
+"""
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/tests/test_surface.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/tests/test_surface.py
new file mode 100644
index 0000000..a7e0c1d
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/tests/test_surface.py
@@ -0,0 +1,165 @@
+from __future__ import print_function
+import os
+import os.path as op
+import numpy as np
+import warnings
+from shutil import copyfile
+from scipy import sparse
+from nose.tools import assert_true, assert_raises
+from numpy.testing import assert_array_equal, assert_allclose, assert_equal
+
+from mne.datasets import testing
+from mne import read_surface, write_surface, decimate_surface
+from mne.surface import (read_morph_map, _compute_nearest,
+                         fast_cross_3d, get_head_surf, read_curvature,
+                         get_meg_helmet_surf)
+from mne.utils import _TempDir, requires_tvtk, run_tests_if_main, slow_test
+from mne.io import read_info
+from mne.transforms import _get_mri_head_t
+
+data_path = testing.data_path(download=False)
+subjects_dir = op.join(data_path, 'subjects')
+fname = op.join(subjects_dir, 'sample', 'bem',
+                'sample-1280-1280-1280-bem-sol.fif')
+
+warnings.simplefilter('always')
+
+
+def test_helmet():
+    """Test loading helmet surfaces
+    """
+    base_dir = op.join(op.dirname(__file__), '..', 'io')
+    fname_raw = op.join(base_dir, 'tests', 'data', 'test_raw.fif')
+    fname_kit_raw = op.join(base_dir, 'kit', 'tests', 'data',
+                            'test_bin_raw.fif')
+    fname_bti_raw = op.join(base_dir, 'bti', 'tests', 'data',
+                            'exported4D_linux_raw.fif')
+    fname_ctf_raw = op.join(base_dir, 'tests', 'data', 'test_ctf_raw.fif')
+    fname_trans = op.join(base_dir, 'tests', 'data',
+                          'sample-audvis-raw-trans.txt')
+    trans = _get_mri_head_t(fname_trans)[0]
+    for fname in [fname_raw, fname_kit_raw, fname_bti_raw, fname_ctf_raw]:
+        helmet = get_meg_helmet_surf(read_info(fname), trans)
+        assert_equal(len(helmet['rr']), 304)  # they all have 304 verts
+        assert_equal(len(helmet['rr']), len(helmet['nn']))
+
+
+ at testing.requires_testing_data
+def test_head():
+    """Test loading the head surface
+    """
+    surf_1 = get_head_surf('sample', subjects_dir=subjects_dir)
+    surf_2 = get_head_surf('sample', 'head', subjects_dir=subjects_dir)
+    assert_true(len(surf_1['rr']) < len(surf_2['rr']))  # BEM vs dense head
+
+
+def test_huge_cross():
+    """Test cross product with lots of elements
+    """
+    x = np.random.rand(100000, 3)
+    y = np.random.rand(1, 3)
+    z = np.cross(x, y)
+    zz = fast_cross_3d(x, y)
+    assert_array_equal(z, zz)
+
+
+def test_compute_nearest():
+    """Test nearest neighbor searches"""
+    x = np.random.randn(500, 3)
+    x /= np.sqrt(np.sum(x ** 2, axis=1))[:, None]
+    nn_true = np.random.permutation(np.arange(500, dtype=np.int))[:20]
+    y = x[nn_true]
+
+    nn1 = _compute_nearest(x, y, use_balltree=False)
+    nn2 = _compute_nearest(x, y, use_balltree=True)
+    assert_array_equal(nn_true, nn1)
+    assert_array_equal(nn_true, nn2)
+
+    # test distance support
+    nnn1 = _compute_nearest(x, y, use_balltree=False, return_dists=True)
+    nnn2 = _compute_nearest(x, y, use_balltree=True, return_dists=True)
+    assert_array_equal(nnn1[0], nn_true)
+    assert_array_equal(nnn1[1], np.zeros_like(nn1))  # all dists should be 0
+    assert_equal(len(nnn1), len(nnn2))
+    for nn1, nn2 in zip(nnn1, nnn2):
+        assert_array_equal(nn1, nn2)
+
+
+ at slow_test
+ at testing.requires_testing_data
+def test_make_morph_maps():
+    """Test reading and creating morph maps
+    """
+    # make a new fake subjects_dir
+    tempdir = _TempDir()
+    for subject in ('sample', 'sample_ds', 'fsaverage_ds'):
+        os.mkdir(op.join(tempdir, subject))
+        os.mkdir(op.join(tempdir, subject, 'surf'))
+        for hemi in ['lh', 'rh']:
+            args = [subject, 'surf', hemi + '.sphere.reg']
+            copyfile(op.join(subjects_dir, *args),
+                     op.join(tempdir, *args))
+
+    # this should trigger the creation of morph-maps dir and create the map
+    mmap = read_morph_map('fsaverage_ds', 'sample_ds', tempdir)
+    mmap2 = read_morph_map('fsaverage_ds', 'sample_ds', subjects_dir)
+    assert_equal(len(mmap), len(mmap2))
+    for m1, m2 in zip(mmap, mmap2):
+        # deal with sparse matrix stuff
+        diff = (m1 - m2).data
+        assert_allclose(diff, np.zeros_like(diff), atol=1e-3, rtol=0)
+
+    # This will also trigger creation, but it's trivial
+    mmap = read_morph_map('sample', 'sample', subjects_dir=tempdir)
+    for mm in mmap:
+        assert_true((mm - sparse.eye(mm.shape[0], mm.shape[0])).sum() == 0)
+
+
+ at testing.requires_testing_data
+def test_io_surface():
+    """Test reading and writing of Freesurfer surface mesh files
+    """
+    tempdir = _TempDir()
+    fname_quad = op.join(data_path, 'subjects', 'bert', 'surf',
+                         'lh.inflated.nofix')
+    fname_tri = op.join(data_path, 'subjects', 'fsaverage', 'surf',
+                        'lh.inflated')
+    for fname in (fname_quad, fname_tri):
+        pts, tri = read_surface(fname)
+        write_surface(op.join(tempdir, 'tmp'), pts, tri)
+        c_pts, c_tri = read_surface(op.join(tempdir, 'tmp'))
+        assert_array_equal(pts, c_pts)
+        assert_array_equal(tri, c_tri)
+
+
+ at testing.requires_testing_data
+def test_read_curv():
+    """Test reading curvature data
+    """
+    fname_curv = op.join(data_path, 'subjects', 'fsaverage', 'surf', 'lh.curv')
+    fname_surf = op.join(data_path, 'subjects', 'fsaverage', 'surf',
+                         'lh.inflated')
+    bin_curv = read_curvature(fname_curv)
+    rr = read_surface(fname_surf)[0]
+    assert_true(len(bin_curv) == len(rr))
+    assert_true(np.logical_or(bin_curv == 0, bin_curv == 1).all())
+
+
+ at requires_tvtk
+def test_decimate_surface():
+    """Test triangular surface decimation
+    """
+    points = np.array([[-0.00686118, -0.10369860, 0.02615170],
+                       [-0.00713948, -0.10370162, 0.02614874],
+                       [-0.00686208, -0.10368247, 0.02588313],
+                       [-0.00713987, -0.10368724, 0.02587745]])
+    tris = np.array([[0, 1, 2], [1, 2, 3], [0, 3, 1], [1, 2, 0]])
+    for n_tri in [4, 3, 2]:  # quadric decimation creates even numbered output.
+        _, this_tris = decimate_surface(points, tris, n_tri)
+        assert_true(len(this_tris) == n_tri if not n_tri % 2 else 2)
+    nirvana = 5
+    tris = np.array([[0, 1, 2], [1, 2, 3], [0, 3, 1], [1, 2, nirvana]])
+    assert_raises(ValueError, decimate_surface, points, tris, n_tri)
+
+
+run_tests_if_main()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/tests/test_transforms.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/tests/test_transforms.py
new file mode 100644
index 0000000..605f589
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/tests/test_transforms.py
@@ -0,0 +1,198 @@
+import os
+import os.path as op
+import numpy as np
+
+from nose.tools import assert_true, assert_raises
+from numpy.testing import (assert_array_equal, assert_equal, assert_allclose,
+                           assert_almost_equal, assert_array_almost_equal)
+import warnings
+
+from mne.io.constants import FIFF
+from mne.datasets import testing
+from mne import read_trans, write_trans
+from mne.utils import _TempDir, run_tests_if_main
+from mne.transforms import (invert_transform, _get_mri_head_t,
+                            rotation, rotation3d, rotation_angles, _find_trans,
+                            combine_transforms, transform_coordinates,
+                            collect_transforms, apply_trans, translation,
+                            get_ras_to_neuromag_trans, _sphere_to_cartesian,
+                            _polar_to_cartesian, _cartesian_to_sphere)
+
+warnings.simplefilter('always')  # enable b/c these tests throw warnings
+
+data_path = testing.data_path(download=False)
+fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc-trans.fif')
+fname_eve = op.join(data_path, 'MEG', 'sample',
+                    'sample_audvis_trunc_raw-eve.fif')
+fname_trans = op.join(op.split(__file__)[0], '..', 'io', 'tests',
+                      'data', 'sample-audvis-raw-trans.txt')
+
+
+ at testing.requires_testing_data
+def test_get_mri_head_t():
+    """Test converting '-trans.txt' to '-trans.fif'"""
+    trans = read_trans(fname)
+    trans = invert_transform(trans)  # starts out as head->MRI, so invert
+    trans_2 = _get_mri_head_t(fname_trans)[0]
+    assert_equal(trans['from'], trans_2['from'])
+    assert_equal(trans['to'], trans_2['to'])
+    assert_allclose(trans['trans'], trans_2['trans'], rtol=1e-5, atol=1e-5)
+
+
+ at testing.requires_testing_data
+def test_io_trans():
+    """Test reading and writing of trans files
+    """
+    tempdir = _TempDir()
+    os.mkdir(op.join(tempdir, 'sample'))
+    assert_raises(RuntimeError, _find_trans, 'sample', subjects_dir=tempdir)
+    trans0 = read_trans(fname)
+    fname1 = op.join(tempdir, 'sample', 'test-trans.fif')
+    write_trans(fname1, trans0)
+    assert_true(fname1 == _find_trans('sample', subjects_dir=tempdir))
+    trans1 = read_trans(fname1)
+
+    # check all properties
+    assert_true(trans0['from'] == trans1['from'])
+    assert_true(trans0['to'] == trans1['to'])
+    assert_array_equal(trans0['trans'], trans1['trans'])
+
+    # check reading non -trans.fif files
+    assert_raises(IOError, read_trans, fname_eve)
+
+    # check warning on bad filenames
+    with warnings.catch_warnings(record=True) as w:
+        fname2 = op.join(tempdir, 'trans-test-bad-name.fif')
+        write_trans(fname2, trans0)
+    assert_true(len(w) >= 1)
+
+
+def test_get_ras_to_neuromag_trans():
+    """Test the coordinate transformation from ras to neuromag"""
+    # create model points in neuromag-like space
+    anterior = [0, 1, 0]
+    left = [-1, 0, 0]
+    right = [.8, 0, 0]
+    up = [0, 0, 1]
+    rand_pts = np.random.uniform(-1, 1, (3, 3))
+    pts = np.vstack((anterior, left, right, up, rand_pts))
+
+    # change coord system
+    rx, ry, rz, tx, ty, tz = np.random.uniform(-2 * np.pi, 2 * np.pi, 6)
+    trans = np.dot(translation(tx, ty, tz), rotation(rx, ry, rz))
+    pts_changed = apply_trans(trans, pts)
+
+    # transform back into original space
+    nas, lpa, rpa = pts_changed[:3]
+    hsp_trans = get_ras_to_neuromag_trans(nas, lpa, rpa)
+    pts_restored = apply_trans(hsp_trans, pts_changed)
+
+    err = "Neuromag transformation failed"
+    assert_array_almost_equal(pts_restored, pts, 6, err)
+
+
+def test_sphere_to_cartesian():
+    """Test helper transform function from sphere to cartesian"""
+    phi, theta, r = (np.pi, np.pi, 1)
+    # expected value is (1, 0, 0)
+    z = r * np.sin(phi)
+    rcos_phi = r * np.cos(phi)
+    x = rcos_phi * np.cos(theta)
+    y = rcos_phi * np.sin(theta)
+    coord = _sphere_to_cartesian(phi, theta, r)
+    # np.pi is an approx since pi is irrational
+    assert_almost_equal(coord, (x, y, z), 10)
+    assert_almost_equal(coord, (1, 0, 0), 10)
+
+
+def test_polar_to_cartesian():
+    """Test helper transform function from polar to cartesian"""
+    r = 1
+    theta = np.pi
+    # expected values are (-1, 0)
+    x = r * np.cos(theta)
+    y = r * np.sin(theta)
+    coord = _polar_to_cartesian(theta, r)
+    # np.pi is an approx since pi is irrational
+    assert_almost_equal(coord, (x, y), 10)
+    assert_almost_equal(coord, (-1, 0), 10)
+
+
+def test_cartesian_to_sphere():
+    """Test helper transform function from cartesian to sphere"""
+    x, y, z = (1, 0, 0)
+    # expected values are (0, 0, 1)
+    hypotxy = np.hypot(x, y)
+    r = np.hypot(hypotxy, z)
+    elev = np.arctan2(z, hypotxy)
+    az = np.arctan2(y, x)
+    coord = _cartesian_to_sphere(x, y, z)
+    assert_equal(coord, (az, elev, r))
+    assert_equal(coord, (0, 0, 1))
+
+
+def test_rotation():
+    """Test conversion between rotation angles and transformation matrix
+    """
+    tests = [(0, 0, 1), (.5, .5, .5), (np.pi, 0, -1.5)]
+    for rot in tests:
+        x, y, z = rot
+        m = rotation3d(x, y, z)
+        m4 = rotation(x, y, z)
+        assert_array_equal(m, m4[:3, :3])
+        back = rotation_angles(m)
+        assert_equal(back, rot)
+        back4 = rotation_angles(m4)
+        assert_equal(back4, rot)
+
+
+ at testing.requires_testing_data
+def test_combine():
+    """Test combining transforms
+    """
+    trans = read_trans(fname)
+    inv = invert_transform(trans)
+    combine_transforms(trans, inv, trans['from'], trans['from'])
+    assert_raises(RuntimeError, combine_transforms, trans, inv,
+                  trans['to'], trans['from'])
+    assert_raises(RuntimeError, combine_transforms, trans, inv,
+                  trans['from'], trans['to'])
+    assert_raises(RuntimeError, combine_transforms, trans, trans,
+                  trans['from'], trans['to'])
+
+
+ at testing.requires_testing_data
+def test_transform_coords():
+    """Test transforming coordinates
+    """
+    # normal trans won't work
+    with warnings.catch_warnings(record=True):  # dep
+        assert_raises(ValueError, transform_coordinates,
+                      fname, np.eye(3), 'meg', 'fs_tal')
+    # needs to have all entries
+    pairs = [[FIFF.FIFFV_COORD_MRI, FIFF.FIFFV_COORD_HEAD],
+             [FIFF.FIFFV_COORD_MRI, FIFF.FIFFV_MNE_COORD_RAS],
+             [FIFF.FIFFV_MNE_COORD_RAS, FIFF.FIFFV_MNE_COORD_MNI_TAL],
+             [FIFF.FIFFV_MNE_COORD_MNI_TAL, FIFF.FIFFV_MNE_COORD_FS_TAL_GTZ],
+             [FIFF.FIFFV_MNE_COORD_MNI_TAL, FIFF.FIFFV_MNE_COORD_FS_TAL_LTZ],
+             ]
+    xforms = []
+    for fro, to in pairs:
+        xforms.append({'to': to, 'from': fro, 'trans': np.eye(4)})
+    tempdir = _TempDir()
+    all_fname = op.join(tempdir, 'all-trans.fif')
+    with warnings.catch_warnings(record=True):  # dep
+        collect_transforms(all_fname, xforms)
+    for fro in ['meg', 'mri']:
+        for to in ['meg', 'mri', 'fs_tal', 'mni_tal']:
+            with warnings.catch_warnings(record=True):  # dep
+                out = transform_coordinates(all_fname, np.eye(3), fro, to)
+                assert_allclose(out, np.eye(3))
+    with warnings.catch_warnings(record=True):  # dep
+        assert_raises(ValueError, transform_coordinates, all_fname, np.eye(4),
+                      'meg', 'meg')
+        assert_raises(ValueError, transform_coordinates, all_fname, np.eye(3),
+                      'fs_tal', 'meg')
+
+
+run_tests_if_main()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/tests/test_utils.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/tests/test_utils.py
new file mode 100644
index 0000000..5dcb7e4
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/tests/test_utils.py
@@ -0,0 +1,516 @@
+from numpy.testing import assert_equal, assert_array_equal, assert_allclose
+from nose.tools import assert_true, assert_raises, assert_not_equal
+from copy import deepcopy
+import os.path as op
+import numpy as np
+from scipy import sparse
+import os
+import warnings
+
+from mne.utils import (set_log_level, set_log_file, _TempDir,
+                       get_config, set_config, deprecated, _fetch_file,
+                       sum_squared, estimate_rank,
+                       _url_to_local_path, sizeof_fmt, _check_subject,
+                       _check_type_picks, object_hash, object_diff,
+                       requires_good_network, run_tests_if_main, md5sum,
+                       ArgvSetter, _memory_usage, check_random_state,
+                       _check_mayavi_version, requires_mayavi,
+                       set_memmap_min_size, _get_stim_channel, _check_fname,
+                       create_slices, _time_mask, random_permutation,
+                       _get_call_line, compute_corr, verbose)
+from mne.io import show_fiff
+from mne import Evoked
+from mne.externals.six.moves import StringIO
+
+
+warnings.simplefilter('always')  # enable b/c these tests throw warnings
+
+base_dir = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data')
+fname_evoked = op.join(base_dir, 'test-ave.fif')
+fname_raw = op.join(base_dir, 'test_raw.fif')
+fname_log = op.join(base_dir, 'test-ave.log')
+fname_log_2 = op.join(base_dir, 'test-ave-2.log')
+
+
+def clean_lines(lines=[]):
+    # Function to scrub filenames for checking logging output (in test_logging)
+    return [l if 'Reading ' not in l else 'Reading test file' for l in lines]
+
+
+def test_get_call_line():
+    """Test getting a call line
+    """
+    @verbose
+    def foo(verbose=None):
+        return _get_call_line(in_verbose=True)
+
+    for v in (None, True):
+        my_line = foo(verbose=v)  # testing
+        assert_equal(my_line, 'my_line = foo(verbose=v)  # testing')
+
+    def bar():
+        return _get_call_line(in_verbose=False)
+
+    my_line = bar()  # testing more
+    assert_equal(my_line, 'my_line = bar()  # testing more')
+
+
+def test_misc():
+    """Test misc utilities"""
+    assert_equal(_memory_usage(-1)[0], -1)
+    assert_equal(_memory_usage((clean_lines, [], {}))[0], -1)
+    assert_equal(_memory_usage(clean_lines)[0], -1)
+    assert_raises(ValueError, check_random_state, 'foo')
+    assert_raises(ValueError, set_memmap_min_size, 1)
+    assert_raises(ValueError, set_memmap_min_size, 'foo')
+    assert_raises(TypeError, get_config, 1)
+    assert_raises(TypeError, set_config, 1)
+    assert_raises(TypeError, set_config, 'foo', 1)
+    assert_raises(TypeError, _get_stim_channel, 1, None)
+    assert_raises(TypeError, _get_stim_channel, [1], None)
+    assert_raises(TypeError, _check_fname, 1)
+    assert_raises(ValueError, _check_subject, None, None)
+    assert_raises(ValueError, _check_subject, None, 1)
+    assert_raises(ValueError, _check_subject, 1, None)
+
+
+ at requires_mayavi
+def test_check_mayavi():
+    """Test mayavi version check"""
+    assert_raises(RuntimeError, _check_mayavi_version, '100.0.0')
+
+
+def test_run_tests_if_main():
+    """Test run_tests_if_main functionality"""
+    x = []
+
+    def test_a():
+        x.append(True)
+
+    @np.testing.dec.skipif(True)
+    def test_b():
+        return
+
+    try:
+        __name__ = '__main__'
+        run_tests_if_main(measure_mem=False)  # dual meas causes problems
+
+        def test_c():
+            raise RuntimeError
+
+        try:
+            __name__ = '__main__'
+            run_tests_if_main(measure_mem=False)  # dual meas causes problems
+        except RuntimeError:
+            pass
+        else:
+            raise RuntimeError('Error not raised')
+    finally:
+        del __name__
+    assert_true(len(x) == 2)
+    assert_true(x[0] and x[1])
+
+
+def test_hash():
+    """Test dictionary hashing and comparison functions"""
+    # does hashing all of these types work:
+    # {dict, list, tuple, ndarray, str, float, int, None}
+    d0 = dict(a=dict(a=0.1, b='fo', c=1), b=[1, 'b'], c=(), d=np.ones(3),
+              e=None)
+    d0[1] = None
+    d0[2.] = b'123'
+
+    d1 = deepcopy(d0)
+    assert_true(len(object_diff(d0, d1)) == 0)
+    assert_true(len(object_diff(d1, d0)) == 0)
+    assert_equal(object_hash(d0), object_hash(d1))
+
+    # change values slightly
+    d1['data'] = np.ones(3, int)
+    d1['d'][0] = 0
+    assert_not_equal(object_hash(d0), object_hash(d1))
+
+    d1 = deepcopy(d0)
+    assert_equal(object_hash(d0), object_hash(d1))
+    d1['a']['a'] = 0.11
+    assert_true(len(object_diff(d0, d1)) > 0)
+    assert_true(len(object_diff(d1, d0)) > 0)
+    assert_not_equal(object_hash(d0), object_hash(d1))
+
+    d1 = deepcopy(d0)
+    assert_equal(object_hash(d0), object_hash(d1))
+    d1['a']['d'] = 0  # non-existent key
+    assert_true(len(object_diff(d0, d1)) > 0)
+    assert_true(len(object_diff(d1, d0)) > 0)
+    assert_not_equal(object_hash(d0), object_hash(d1))
+
+    d1 = deepcopy(d0)
+    assert_equal(object_hash(d0), object_hash(d1))
+    d1['b'].append(0)  # different-length lists
+    assert_true(len(object_diff(d0, d1)) > 0)
+    assert_true(len(object_diff(d1, d0)) > 0)
+    assert_not_equal(object_hash(d0), object_hash(d1))
+
+    d1 = deepcopy(d0)
+    assert_equal(object_hash(d0), object_hash(d1))
+    d1['e'] = 'foo'  # non-None
+    assert_true(len(object_diff(d0, d1)) > 0)
+    assert_true(len(object_diff(d1, d0)) > 0)
+    assert_not_equal(object_hash(d0), object_hash(d1))
+
+    d1 = deepcopy(d0)
+    d2 = deepcopy(d0)
+    d1['e'] = StringIO()
+    d2['e'] = StringIO()
+    d2['e'].write('foo')
+    assert_true(len(object_diff(d0, d1)) > 0)
+    assert_true(len(object_diff(d1, d0)) > 0)
+
+    d1 = deepcopy(d0)
+    d1[1] = 2
+    assert_true(len(object_diff(d0, d1)) > 0)
+    assert_true(len(object_diff(d1, d0)) > 0)
+    assert_not_equal(object_hash(d0), object_hash(d1))
+
+    # generators (and other types) not supported
+    d1 = deepcopy(d0)
+    d2 = deepcopy(d0)
+    d1[1] = (x for x in d0)
+    d2[1] = (x for x in d0)
+    assert_raises(RuntimeError, object_diff, d1, d2)
+    assert_raises(RuntimeError, object_hash, d1)
+
+    x = sparse.eye(2, 2, format='csc')
+    y = sparse.eye(2, 2, format='csr')
+    assert_true('type mismatch' in object_diff(x, y))
+    y = sparse.eye(2, 2, format='csc')
+    assert_equal(len(object_diff(x, y)), 0)
+    y[1, 1] = 2
+    assert_true('elements' in object_diff(x, y))
+    y = sparse.eye(3, 3, format='csc')
+    assert_true('shape' in object_diff(x, y))
+    y = 0
+    assert_true('type mismatch' in object_diff(x, y))
+
+
+def test_md5sum():
+    """Test md5sum calculation
+    """
+    tempdir = _TempDir()
+    fname1 = op.join(tempdir, 'foo')
+    fname2 = op.join(tempdir, 'bar')
+    with open(fname1, 'wb') as fid:
+        fid.write(b'abcd')
+    with open(fname2, 'wb') as fid:
+        fid.write(b'efgh')
+    assert_equal(md5sum(fname1), md5sum(fname1, 1))
+    assert_equal(md5sum(fname2), md5sum(fname2, 1024))
+    assert_true(md5sum(fname1) != md5sum(fname2))
+
+
+def test_tempdir():
+    """Test TempDir
+    """
+    tempdir2 = _TempDir()
+    assert_true(op.isdir(tempdir2))
+    x = str(tempdir2)
+    del tempdir2
+    assert_true(not op.isdir(x))
+
+
+def test_estimate_rank():
+    """Test rank estimation
+    """
+    data = np.eye(10)
+    assert_array_equal(estimate_rank(data, return_singular=True)[1],
+                       np.ones(10))
+    data[0, 0] = 0
+    assert_equal(estimate_rank(data), 9)
+
+
+def test_logging():
+    """Test logging (to file)
+    """
+    assert_raises(ValueError, set_log_level, 'foo')
+    tempdir = _TempDir()
+    test_name = op.join(tempdir, 'test.log')
+    with open(fname_log, 'r') as old_log_file:
+        old_lines = clean_lines(old_log_file.readlines())
+    with open(fname_log_2, 'r') as old_log_file_2:
+        old_lines_2 = clean_lines(old_log_file_2.readlines())
+
+    if op.isfile(test_name):
+        os.remove(test_name)
+    # test it one way (printing default off)
+    set_log_file(test_name)
+    set_log_level('WARNING')
+    # should NOT print
+    evoked = Evoked(fname_evoked, condition=1)
+    with open(test_name) as fid:
+        assert_true(fid.readlines() == [])
+    # should NOT print
+    evoked = Evoked(fname_evoked, condition=1, verbose=False)
+    with open(test_name) as fid:
+        assert_true(fid.readlines() == [])
+    # should NOT print
+    evoked = Evoked(fname_evoked, condition=1, verbose='WARNING')
+    with open(test_name) as fid:
+        assert_true(fid.readlines() == [])
+    # SHOULD print
+    evoked = Evoked(fname_evoked, condition=1, verbose=True)
+    with open(test_name, 'r') as new_log_file:
+        new_lines = clean_lines(new_log_file.readlines())
+    assert_equal(new_lines, old_lines)
+    set_log_file(None)  # Need to do this to close the old file
+    os.remove(test_name)
+
+    # now go the other way (printing default on)
+    set_log_file(test_name)
+    set_log_level('INFO')
+    # should NOT print
+    evoked = Evoked(fname_evoked, condition=1, verbose='WARNING')
+    with open(test_name) as fid:
+        assert_true(fid.readlines() == [])
+    # should NOT print
+    evoked = Evoked(fname_evoked, condition=1, verbose=False)
+    with open(test_name) as fid:
+        assert_true(fid.readlines() == [])
+    # SHOULD print
+    evoked = Evoked(fname_evoked, condition=1)
+    with open(test_name, 'r') as new_log_file:
+        new_lines = clean_lines(new_log_file.readlines())
+    with open(fname_log, 'r') as old_log_file:
+        assert_equal(new_lines, old_lines)
+    # check to make sure appending works (and as default, raises a warning)
+    with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter('always')
+        set_log_file(test_name, overwrite=False)
+        assert len(w) == 0
+        set_log_file(test_name)
+        assert len(w) == 1
+    evoked = Evoked(fname_evoked, condition=1)
+    with open(test_name, 'r') as new_log_file:
+        new_lines = clean_lines(new_log_file.readlines())
+    assert_equal(new_lines, old_lines_2)
+
+    # make sure overwriting works
+    set_log_file(test_name, overwrite=True)
+    # this line needs to be called to actually do some logging
+    evoked = Evoked(fname_evoked, condition=1)
+    del evoked
+    with open(test_name, 'r') as new_log_file:
+        new_lines = clean_lines(new_log_file.readlines())
+    assert_equal(new_lines, old_lines)
+
+
+def test_config():
+    """Test mne-python config file support"""
+    tempdir = _TempDir()
+    key = '_MNE_PYTHON_CONFIG_TESTING'
+    value = '123456'
+    old_val = os.getenv(key, None)
+    os.environ[key] = value
+    assert_true(get_config(key) == value)
+    del os.environ[key]
+    # catch the warning about it being a non-standard config key
+    with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter('always')
+        set_config(key, None, home_dir=tempdir)
+    assert_true(len(w) == 1)
+    assert_true(get_config(key, home_dir=tempdir) is None)
+    assert_raises(KeyError, get_config, key, raise_error=True)
+    with warnings.catch_warnings(record=True):
+        warnings.simplefilter('always')
+        set_config(key, value, home_dir=tempdir)
+        assert_true(get_config(key, home_dir=tempdir) == value)
+        set_config(key, None, home_dir=tempdir)
+    if old_val is not None:
+        os.environ[key] = old_val
+    # Check if get_config with no input returns all config
+    key = 'MNE_PYTHON_TESTING_KEY'
+    config = {key: value}
+    with warnings.catch_warnings(record=True):  # non-standard key
+        warnings.simplefilter('always')
+        set_config(key, value, home_dir=tempdir)
+    assert_equal(get_config(home_dir=tempdir), config)
+
+
+def test_show_fiff():
+    """Test show_fiff
+    """
+    # this is not exhaustive, but hopefully bugs will be found in use
+    info = show_fiff(fname_evoked)
+    keys = ['FIFF_EPOCH', 'FIFFB_HPI_COIL', 'FIFFB_PROJ_ITEM',
+            'FIFFB_PROCESSED_DATA', 'FIFFB_EVOKED', 'FIFF_NAVE',
+            'FIFF_EPOCH']
+    assert_true(all(key in info for key in keys))
+    info = show_fiff(fname_raw, read_limit=1024)
+
+
+ at deprecated('message')
+def deprecated_func():
+    pass
+
+
+ at deprecated('message')
+class deprecated_class(object):
+
+    def __init__(self):
+        pass
+
+
+def test_deprecated():
+    """Test deprecated function
+    """
+    with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter('always')
+        deprecated_func()
+    assert_true(len(w) == 1)
+    with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter('always')
+        deprecated_class()
+    assert_true(len(w) == 1)
+
+
+ at requires_good_network
+def test_fetch_file():
+    """Test file downloading
+    """
+    tempdir = _TempDir()
+    urls = ['http://martinos.org/mne/',
+            'ftp://surfer.nmr.mgh.harvard.edu/pub/data/bert.recon.md5sum.txt']
+    with ArgvSetter(disable_stderr=False):  # to capture stdout
+        for url in urls:
+            archive_name = op.join(tempdir, "download_test")
+            _fetch_file(url, archive_name, verbose=False)
+            assert_raises(Exception, _fetch_file, 'NOT_AN_ADDRESS',
+                          op.join(tempdir, 'test'), verbose=False)
+            resume_name = op.join(tempdir, "download_resume")
+            # touch file
+            with open(resume_name + '.part', 'w'):
+                os.utime(resume_name + '.part', None)
+            _fetch_file(url, resume_name, resume=True, verbose=False)
+            assert_raises(ValueError, _fetch_file, url, archive_name,
+                          hash_='a', verbose=False)
+            assert_raises(RuntimeError, _fetch_file, url, archive_name,
+                          hash_='a' * 32, verbose=False)
+
+
+def test_sum_squared():
+    """Test optimized sum of squares
+    """
+    X = np.random.randint(0, 50, (3, 3))
+    assert_equal(np.sum(X ** 2), sum_squared(X))
+
+
+def test_sizeof_fmt():
+    """Test sizeof_fmt
+    """
+    assert_equal(sizeof_fmt(0), '0 bytes')
+    assert_equal(sizeof_fmt(1), '1 byte')
+    assert_equal(sizeof_fmt(1000), '1000 bytes')
+
+
+def test_url_to_local_path():
+    """Test URL to local path
+    """
+    assert_equal(_url_to_local_path('http://google.com/home/why.html', '.'),
+                 op.join('.', 'home', 'why.html'))
+
+
+def test_check_type_picks():
+    """Test checking type integrity checks of picks
+    """
+    picks = np.arange(12)
+    assert_array_equal(picks, _check_type_picks(picks))
+    picks = list(range(12))
+    assert_array_equal(np.array(picks), _check_type_picks(picks))
+    picks = None
+    assert_array_equal(None, _check_type_picks(picks))
+    picks = ['a', 'b']
+    assert_raises(ValueError, _check_type_picks, picks)
+    picks = 'b'
+    assert_raises(ValueError, _check_type_picks, picks)
+
+
+def test_compute_corr():
+    """Test Anscombe's Quartett
+    """
+    x = np.array([10, 8, 13, 9, 11, 14, 6, 4, 12, 7, 5])
+    y = np.array([[8.04, 6.95, 7.58, 8.81, 8.33, 9.96,
+                   7.24, 4.26, 10.84, 4.82, 5.68],
+                  [9.14, 8.14, 8.74, 8.77, 9.26, 8.10,
+                   6.13, 3.10, 9.13, 7.26, 4.74],
+                  [7.46, 6.77, 12.74, 7.11, 7.81, 8.84,
+                   6.08, 5.39, 8.15, 6.42, 5.73],
+                  [8, 8, 8, 8, 8, 8, 8, 19, 8, 8, 8],
+                  [6.58, 5.76, 7.71, 8.84, 8.47, 7.04,
+                   5.25, 12.50, 5.56, 7.91, 6.89]])
+
+    r = compute_corr(x, y.T)
+    r2 = np.array([np.corrcoef(x, y[i])[0, 1]
+                   for i in range(len(y))])
+    assert_allclose(r, r2)
+    assert_raises(ValueError, compute_corr, [1, 2], [])
+
+
+def test_create_slices():
+    """Test checking the create of time create_slices
+    """
+    # Test that create_slices default provide an empty list
+    assert_true(create_slices(0, 0) == [])
+    # Test that create_slice return correct number of slices
+    assert_true(len(create_slices(0, 100)) == 100)
+    # Test with non-zero start parameters
+    assert_true(len(create_slices(50, 100)) == 50)
+    # Test slices' length with non-zero start and window_width=2
+    assert_true(len(create_slices(0, 100, length=2)) == 50)
+    # Test slices' length with manual slice separation
+    assert_true(len(create_slices(0, 100, step=10)) == 10)
+    # Test slices' within length for non-consecutive samples
+    assert_true(len(create_slices(0, 500, length=50, step=10)) == 46)
+    # Test that slices elements start, stop and step correctly
+    slices = create_slices(0, 10)
+    assert_true(slices[0].start == 0)
+    assert_true(slices[0].step == 1)
+    assert_true(slices[0].stop == 1)
+    assert_true(slices[-1].stop == 10)
+    # Same with larger window width
+    slices = create_slices(0, 9, length=3)
+    assert_true(slices[0].start == 0)
+    assert_true(slices[0].step == 1)
+    assert_true(slices[0].stop == 3)
+    assert_true(slices[-1].stop == 9)
+    # Same with manual slices' separation
+    slices = create_slices(0, 9, length=3, step=1)
+    assert_true(len(slices) == 7)
+    assert_true(slices[0].step == 1)
+    assert_true(slices[0].stop == 3)
+    assert_true(slices[-1].start == 6)
+    assert_true(slices[-1].stop == 9)
+
+
+def test_time_mask():
+    """Test safe time masking
+    """
+    N = 10
+    x = np.arange(N).astype(float)
+    assert_equal(_time_mask(x, 0, N - 1).sum(), N)
+    assert_equal(_time_mask(x - 1e-10, 0, N - 1).sum(), N)
+    assert_equal(_time_mask(x - 1e-10, 0, N - 1, strict=True).sum(), N - 1)
+
+
+def test_random_permutation():
+    """Test random permutation function
+    """
+    n_samples = 10
+    random_state = 42
+    python_randperm = random_permutation(n_samples, random_state)
+
+    # matlab output when we execute rng(42), randperm(10)
+    matlab_randperm = np.array([7, 6, 5, 1, 4, 9, 10, 3, 8, 2])
+
+    assert_array_equal(python_randperm, matlab_randperm - 1)
+
+
+run_tests_if_main()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/time_frequency/__init__.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/time_frequency/__init__.py
new file mode 100644
index 0000000..14c92fb
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/time_frequency/__init__.py
@@ -0,0 +1,11 @@
+"""Time frequency analysis tools
+"""
+
+from .tfr import (single_trial_power, morlet, tfr_morlet, cwt_morlet,
+                  AverageTFR, tfr_multitaper, read_tfrs, write_tfrs)
+from .psd import compute_raw_psd, compute_epochs_psd
+from .csd import CrossSpectralDensity, compute_epochs_csd
+from .ar import fit_iir_model_raw
+from .multitaper import dpss_windows, multitaper_psd
+from .stft import stft, istft, stftfreq
+from ._stockwell import tfr_stockwell
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/time_frequency/_stockwell.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/time_frequency/_stockwell.py
new file mode 100644
index 0000000..a9f703e
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/time_frequency/_stockwell.py
@@ -0,0 +1,255 @@
+# Authors : Denis A. Engemann <denis.engemann at gmail.com>
+#           Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#
+# License : BSD 3-clause
+
+from copy import deepcopy
+import math
+import numpy as np
+from scipy import fftpack
+# XXX explore cuda optimazation at some point.
+
+from ..io.pick import pick_types, pick_info
+from ..utils import logger, verbose
+from ..parallel import parallel_func, check_n_jobs
+from .tfr import AverageTFR, _get_data
+
+
+def _check_input_st(x_in, n_fft):
+    """Aux function"""
+    # flatten to 2 D and memorize original shape
+    n_times = x_in.shape[-1]
+
+    def _is_power_of_two(n):
+        return not (n > 0 and ((n & (n - 1))))
+
+    if n_fft is None or (not _is_power_of_two(n_fft) and n_times > n_fft):
+        # Compute next power of 2
+        n_fft = 2 ** int(math.ceil(math.log(n_times, 2)))
+    elif n_fft < n_times:
+        raise ValueError("n_fft cannot be smaller than signal size. "
+                         "Got %s < %s." % (n_fft, n_times))
+    zero_pad = None
+    if n_times < n_fft:
+        msg = ('The input signal is shorter ({0}) than "n_fft" ({1}). '
+               'Applying zero padding.').format(x_in.shape[-1], n_fft)
+        logger.warning(msg)
+        zero_pad = n_fft - n_times
+        pad_array = np.zeros(x_in.shape[:-1] + (zero_pad,), x_in.dtype)
+        x_in = np.concatenate((x_in, pad_array), axis=-1)
+        return x_in, n_fft, zero_pad
+
+
+def _precompute_st_windows(n_samp, start_f, stop_f, sfreq, width):
+    """Precompute stockwell gausian windows (in the freq domain)"""
+    tw = fftpack.fftfreq(n_samp, 1. / sfreq) / n_samp
+    tw = np.r_[tw[:1], tw[1:][::-1]]
+
+    k = width  # 1 for classical stowckwell transform
+    f_range = np.arange(start_f, stop_f, 1)
+    windows = np.empty((len(f_range), len(tw)), dtype=np.complex)
+    for i_f, f in enumerate(f_range):
+        if f == 0.:
+            window = np.ones(len(tw))
+        else:
+            window = ((f / (np.sqrt(2. * np.pi) * k)) *
+                      np.exp(-0.5 * (1. / k ** 2.) * (f ** 2.) * tw ** 2.))
+        window /= window.sum()  # normalisation
+        windows[i_f] = fftpack.fft(window)
+    return windows
+
+
+def _st(x, start_f, windows):
+    """Implementation based on Ali Moukadem Matlab code (only used in tests)"""
+    n_samp = x.shape[-1]
+    ST = np.empty(x.shape[:-1] + (len(windows), n_samp), dtype=np.complex)
+    # do the work
+    Fx = fftpack.fft(x)
+    XF = np.concatenate([Fx, Fx], axis=-1)
+    for i_f, window in enumerate(windows):
+        f = start_f + i_f
+        ST[..., i_f, :] = fftpack.ifft(XF[..., f:f + n_samp] * window)
+    return ST
+
+
+def _st_power_itc(x, start_f, compute_itc, zero_pad, decim, W):
+    """Aux function"""
+    n_samp = x.shape[-1]
+    n_out = (n_samp - zero_pad)
+    n_out = n_out // decim + bool(n_out % decim)
+    psd = np.empty((len(W), n_out))
+    itc = np.empty_like(psd) if compute_itc else None
+    X = fftpack.fft(x)
+    XX = np.concatenate([X, X], axis=-1)
+    for i_f, window in enumerate(W):
+        f = start_f + i_f
+        ST = fftpack.ifft(XX[:, f:f + n_samp] * window)
+        TFR = ST[:, :-zero_pad:decim]
+        TFR_abs = np.abs(TFR)
+        if compute_itc:
+            TFR /= TFR_abs
+            itc[i_f] = np.abs(np.mean(TFR, axis=0))
+        TFR_abs *= TFR_abs
+        psd[i_f] = np.mean(TFR_abs, axis=0)
+    return psd, itc
+
+
+def _induced_power_stockwell(data, sfreq, fmin, fmax, n_fft=None, width=1.0,
+                             decim=1, return_itc=False, n_jobs=1):
+    """Computes power and intertrial coherence using Stockwell (S) transform
+
+    Parameters
+    ----------
+    data : ndarray
+        The signal to transform. Any dimensionality supported as long
+        as the last dimension is time.
+    sfreq : float
+        The sampling frequency.
+    fmin : None, float
+        The minimum frequency to include. If None defaults to the minimum fft
+        frequency greater than zero.
+    fmax : None, float
+        The maximum frequency to include. If None defaults to the maximum fft.
+    n_fft : int | None
+        The length of the windows used for FFT. If None, it defaults to the
+        next power of 2 larger than the signal length.
+    width : float
+        The width of the Gaussian window. If < 1, increased temporal
+        resolution, if > 1, increased frequency resolution. Defaults to 1.
+        (classical S-Transform).
+    decim : int
+        The decimation factor on the time axis. To reduce memory usage.
+    return_itc : bool
+        Return intertrial coherence (ITC) as well as averaged power.
+    n_jobs : int
+        Number of parallel jobs to use.
+
+    Returns
+    -------
+    st_power : ndarray
+        The multitaper power of the Stockwell transformed data.
+        The last two dimensions are frequency and time.
+    itc : ndarray
+        The intertrial coherence. Only returned if return_itc is True.
+    freqs : ndarray
+        The frequencies.
+
+    References
+    ----------
+    Stockwell, R. G. "Why use the S-transform." AMS Pseudo-differential
+        operators: Partial differential equations and time-frequency
+        analysis 52 (2007): 279-309.
+    Moukadem, A., Bouguila, Z., Abdeslam, D. O, and Dieterlen, A. Stockwell
+        transform optimization applied on the detection of split in heart
+        sounds (2014). Signal Processing Conference (EUSIPCO), 2013 Proceedings
+        of the 22nd European, pages 2015--2019.
+    Wheat, K., Cornelissen, P. L., Frost, S.J, and Peter C. Hansen (2010).
+        During Visual Word Recognition, Phonology Is Accessed
+        within 100 ms and May Be Mediated by a Speech Production
+        Code: Evidence from Magnetoencephalography. The Journal of
+        Neuroscience, 30 (15), 5229-5233.
+    K. A. Jones and B. Porjesz and D. Chorlian and M. Rangaswamy and C.
+        Kamarajan and A. Padmanabhapillai and A. Stimus and H. Begleiter
+        (2006). S-transform time-frequency analysis of P300 reveals deficits in
+        individuals diagnosed with alcoholism.
+        Clinical Neurophysiology 117 2128--2143
+    """
+    n_epochs, n_channels = data.shape[:2]
+    n_out = data.shape[2] // decim + bool(data.shape[2] % decim)
+    data, n_fft_, zero_pad = _check_input_st(data, n_fft)
+
+    freqs = fftpack.fftfreq(n_fft_, 1. / sfreq)
+    if fmin is None:
+        fmin = freqs[freqs > 0][0]
+    if fmax is None:
+        fmax = freqs.max()
+
+    start_f = np.abs(freqs - fmin).argmin()
+    stop_f = np.abs(freqs - fmax).argmin()
+    freqs = freqs[start_f:stop_f]
+
+    W = _precompute_st_windows(data.shape[-1], start_f, stop_f, sfreq, width)
+    n_freq = stop_f - start_f
+    psd = np.empty((n_channels, n_freq, n_out))
+    itc = np.empty((n_channels, n_freq, n_out)) if return_itc else None
+
+    parallel, my_st, _ = parallel_func(_st_power_itc, n_jobs)
+    tfrs = parallel(my_st(data[:, c, :], start_f, return_itc, zero_pad,
+                          decim, W)
+                    for c in range(n_channels))
+    for c, (this_psd, this_itc) in enumerate(iter(tfrs)):
+        psd[c] = this_psd
+        if this_itc is not None:
+            itc[c] = this_itc
+
+    return psd, itc, freqs
+
+
+ at verbose
+def tfr_stockwell(inst, fmin=None, fmax=None, n_fft=None,
+                  width=1.0, decim=1, return_itc=False, n_jobs=1,
+                  verbose=None):
+    """Time-Frequency Representation (TFR) using Stockwell Transform
+
+    Parameters
+    ----------
+    inst : Epochs | Evoked
+        The epochs or evoked object.
+    fmin : None, float
+        The minimum frequency to include. If None defaults to the minimum fft
+        frequency greater than zero.
+    fmax : None, float
+        The maximum frequency to include. If None defaults to the maximum fft.
+    n_fft : int | None
+        The length of the windows used for FFT. If None, it defaults to the
+        next power of 2 larger than the signal length.
+    width : float
+        The width of the Gaussian window. If < 1, increased temporal
+        resolution, if > 1, increased frequency resolution. Defaults to 1.
+        (classical S-Transform).
+    decim : int
+        The decimation factor on the time axis. To reduce memory usage.
+    return_itc : bool
+        Return intertrial coherence (ITC) as well as averaged power.
+    n_jobs : int
+        The number of jobs to run in parallel (over channels).
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    power : AverageTFR
+        The averaged power.
+    itc : AverageTFR
+        The intertrial coherence. Only returned if return_itc is True.
+
+    See Also
+    --------
+    cwt : Compute time-frequency decomposition with user-provided wavelets
+    cwt_morlet, multitaper_psd
+
+    Notes
+    -----
+    .. versionadded:: 0.9.0
+    """
+    # verbose dec is used b/c subfunctions are verbose
+    data = _get_data(inst, return_itc)
+    picks = pick_types(inst.info, meg=True, eeg=True)
+    info = pick_info(inst.info, picks)
+    data = data[:, picks, :]
+    n_jobs = check_n_jobs(n_jobs)
+    power, itc, freqs = _induced_power_stockwell(data,
+                                                 sfreq=info['sfreq'],
+                                                 fmin=fmin, fmax=fmax,
+                                                 n_fft=n_fft,
+                                                 width=width,
+                                                 decim=decim,
+                                                 return_itc=return_itc,
+                                                 n_jobs=n_jobs)
+    times = inst.times[::decim].copy()
+    nave = len(data)
+    out = AverageTFR(info, power, times, freqs, nave, method='stockwell-power')
+    if return_itc:
+        out = (out, AverageTFR(deepcopy(info), itc, times.copy(),
+                               freqs.copy(), nave, method='stockwell-itc'))
+    return out
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/time_frequency/ar.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/time_frequency/ar.py
new file mode 100644
index 0000000..8be7039
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/time_frequency/ar.py
@@ -0,0 +1,165 @@
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          The statsmodels folks for AR yule_walker
+#
+# License: BSD (3-clause)
+
+import numpy as np
+from scipy.linalg import toeplitz
+
+from ..io.pick import pick_types
+from ..utils import verbose
+
+
+# XXX : Back ported from statsmodels
+
+def yule_walker(X, order=1, method="unbiased", df=None, inv=False,
+                demean=True):
+    """
+    Estimate AR(p) parameters from a sequence X using Yule-Walker equation.
+
+    Unbiased or maximum-likelihood estimator (mle)
+
+    See, for example:
+
+    http://en.wikipedia.org/wiki/Autoregressive_moving_average_model
+
+    Parameters
+    ----------
+    X : array-like
+        1d array
+    order : integer, optional
+        The order of the autoregressive process.  Default is 1.
+    method : string, optional
+       Method can be "unbiased" or "mle" and this determines denominator in
+       estimate of autocorrelation function (ACF) at lag k. If "mle", the
+       denominator is n=X.shape[0], if "unbiased" the denominator is n-k.
+       The default is unbiased.
+    df : integer, optional
+       Specifies the degrees of freedom. If `df` is supplied, then it is
+       assumed the X has `df` degrees of freedom rather than `n`.  Default is
+       None.
+    inv : bool
+        If inv is True the inverse of R is also returned.  Default is False.
+    demean : bool
+        True, the mean is subtracted from `X` before estimation.
+
+    Returns
+    -------
+    rho
+        The autoregressive coefficients
+    sigma
+        TODO
+
+    """
+    # TODO: define R better, look back at notes and technical notes on YW.
+    # First link here is useful
+    # http://www-stat.wharton.upenn.edu/~steele/Courses/956/ResourceDetails/YuleWalkerAndMore.htm  # noqa
+    method = str(method).lower()
+    if method not in ["unbiased", "mle"]:
+        raise ValueError("ACF estimation method must be 'unbiased' or 'MLE'")
+    X = np.array(X, float)
+    if demean:
+        X -= X.mean()                  # automatically demean's X
+    n = df or X.shape[0]
+
+    if method == "unbiased":        # this is df_resid ie., n - p
+        def denom(k):
+            return n - k
+    else:
+        def denom(k):
+            return n
+    if X.ndim > 1 and X.shape[1] != 1:
+        raise ValueError("expecting a vector to estimate AR parameters")
+    r = np.zeros(order + 1, np.float64)
+    r[0] = (X ** 2).sum() / denom(0)
+    for k in range(1, order + 1):
+        r[k] = (X[0:-k] * X[k:]).sum() / denom(k)
+    R = toeplitz(r[:-1])
+
+    rho = np.linalg.solve(R, r[1:])
+    sigmasq = r[0] - (r[1:] * rho).sum()
+    if inv:
+        return rho, np.sqrt(sigmasq), np.linalg.inv(R)
+    else:
+        return rho, np.sqrt(sigmasq)
+
+
+def ar_raw(raw, order, picks, tmin=None, tmax=None):
+    """Fit AR model on raw data
+
+    Fit AR models for each channels and returns the models
+    coefficients for each of them.
+
+    Parameters
+    ----------
+    raw : Raw instance
+        The raw data
+    order : int
+        The AR model order
+    picks : array-like of int
+        The channels indices to include
+    tmin : float
+        The beginning of time interval in seconds.
+    tmax : float
+        The end of time interval in seconds.
+
+    Returns
+    -------
+    coefs : array
+        Sets of coefficients for each channel
+    """
+    start, stop = None, None
+    if tmin is not None:
+        start = raw.time_as_index(tmin)[0]
+    if tmax is not None:
+        stop = raw.time_as_index(tmax)[0] + 1
+    data, times = raw[picks, start:stop]
+
+    coefs = np.empty((len(data), order))
+    for k, d in enumerate(data):
+        this_coefs, _ = yule_walker(d, order=order)
+        coefs[k, :] = this_coefs
+    return coefs
+
+
+ at verbose
+def fit_iir_model_raw(raw, order=2, picks=None, tmin=None, tmax=None,
+                      verbose=None):
+    """Fits an AR model to raw data and creates the corresponding IIR filter
+
+    The computed filter is the average filter for all the picked channels.
+    The frequency response is given by:
+
+    .. math::
+
+        H(e^{jw}) = \\frac{1}{a[0] + a[1]e^{-jw} + ...
+                                  + a[n]e^{-jnw}}
+
+    Parameters
+    ----------
+    raw : Raw object
+        an instance of Raw.
+    order : int
+        order of the FIR filter.
+    picks : array-like of int | None
+        indices of selected channels. If None, MEG and EEG channels are used.
+    tmin : float
+        The beginning of time interval in seconds.
+    tmax : float
+        The end of time interval in seconds.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    b : ndarray
+        Numerator filter coefficients.
+    a : ndarray
+        Denominator filter coefficients
+    """
+    if picks is None:
+        picks = pick_types(raw.info, meg=True, eeg=True)
+    coefs = ar_raw(raw, order=order, picks=picks, tmin=tmin, tmax=tmax)
+    mean_coefs = np.mean(coefs, axis=0)  # mean model across channels
+    a = np.concatenate(([1.], -mean_coefs))  # filter coefficients
+    return np.array([1.]), a
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/time_frequency/csd.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/time_frequency/csd.py
new file mode 100644
index 0000000..e147da2
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/time_frequency/csd.py
@@ -0,0 +1,258 @@
+# Author: Roman Goj <roman.goj at gmail.com>
+#
+# License: BSD (3-clause)
+
+import warnings
+import copy as cp
+
+import numpy as np
+from scipy.fftpack import fftfreq
+
+from ..io.pick import pick_types
+from ..utils import logger, verbose
+from ..time_frequency.multitaper import (dpss_windows, _mt_spectra,
+                                         _csd_from_mt, _psd_from_mt_adaptive)
+
+
+class CrossSpectralDensity(object):
+    """Cross-spectral density
+
+    Parameters
+    ----------
+    data : array of shape (n_channels, n_channels)
+        The cross-spectral density matrix.
+    ch_names : list of string
+        List of channels' names.
+    projs :
+        List of projectors used in CSD calculation.
+    bads :
+        List of bad channels.
+    frequencies : float | list of float
+        Frequency or frequencies for which the CSD matrix was calculated. If a
+        list is passed, data is a sum across CSD matrices for all frequencies.
+    n_fft : int
+        Length of the FFT used when calculating the CSD matrix.
+    """
+    def __init__(self, data, ch_names, projs, bads, frequencies, n_fft):
+        self.data = data
+        self.dim = len(data)
+        self.ch_names = cp.deepcopy(ch_names)
+        self.projs = cp.deepcopy(projs)
+        self.bads = cp.deepcopy(bads)
+        self.frequencies = np.atleast_1d(np.copy(frequencies))
+        self.n_fft = n_fft
+
+    def __repr__(self):
+        s = 'frequencies : %s' % self.frequencies
+        s += ', size : %s x %s' % self.data.shape
+        s += ', data : %s' % self.data
+        return '<CrossSpectralDensity  |  %s>' % s
+
+
+ at verbose
+def compute_epochs_csd(epochs, mode='multitaper', fmin=0, fmax=np.inf,
+                       fsum=True, tmin=None, tmax=None, n_fft=None,
+                       mt_bandwidth=None, mt_adaptive=False, mt_low_bias=True,
+                       projs=None, verbose=None):
+    """Estimate cross-spectral density from epochs
+
+    Note: Baseline correction should be used when creating the Epochs.
+          Otherwise the computed cross-spectral density will be inaccurate.
+
+    Note: Results are scaled by sampling frequency for compatibility with
+          Matlab.
+
+    Parameters
+    ----------
+    epochs : instance of Epochs
+        The epochs.
+    mode : str
+        Spectrum estimation mode can be either: 'multitaper' or 'fourier'.
+    fmin : float
+        Minimum frequency of interest.
+    fmax : float | np.inf
+        Maximum frequency of interest.
+    fsum : bool
+        Sum CSD values for the frequencies of interest. Summing is performed
+        instead of averaging so that accumulated power is comparable to power
+        in the time domain. If True, a single CSD matrix will be returned. If
+        False, the output will be a list of CSD matrices.
+    tmin : float | None
+        Minimum time instant to consider. If None start at first sample.
+    tmax : float | None
+        Maximum time instant to consider. If None end at last sample.
+    n_fft : int | None
+        Length of the FFT. If None the exact number of samples between tmin and
+        tmax will be used.
+    mt_bandwidth : float | None
+        The bandwidth of the multitaper windowing function in Hz.
+        Only used in 'multitaper' mode.
+    mt_adaptive : bool
+        Use adaptive weights to combine the tapered spectra into PSD.
+        Only used in 'multitaper' mode.
+    mt_low_bias : bool
+        Only use tapers with more than 90% spectral concentration within
+        bandwidth. Only used in 'multitaper' mode.
+    projs : list of Projection | None
+        List of projectors to use in CSD calculation, or None to indicate that
+        the projectors from the epochs should be inherited.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    csd : instance of CrossSpectralDensity
+        The computed cross-spectral density.
+    """
+    # Portions of this code adapted from mne/connectivity/spectral.py
+
+    # Check correctness of input data and parameters
+    if fmax < fmin:
+        raise ValueError('fmax must be larger than fmin')
+    tstep = epochs.times[1] - epochs.times[0]
+    if tmin is not None and tmin < epochs.times[0] - tstep:
+        raise ValueError('tmin should be larger than the smallest data time '
+                         'point')
+    if tmax is not None and tmax > epochs.times[-1] + tstep:
+        raise ValueError('tmax should be smaller than the largest data time '
+                         'point')
+    if tmax is not None and tmin is not None:
+        if tmax < tmin:
+            raise ValueError('tmax must be larger than tmin')
+    if epochs.baseline is None:
+        warnings.warn('Epochs are not baseline corrected, cross-spectral '
+                      'density may be inaccurate')
+
+    if projs is None:
+        projs = cp.deepcopy(epochs.info['projs'])
+    else:
+        projs = cp.deepcopy(projs)
+
+    picks_meeg = pick_types(epochs[0].info, meg=True, eeg=True, eog=False,
+                            ref_meg=False, exclude='bads')
+    ch_names = [epochs.ch_names[k] for k in picks_meeg]
+
+    # Preparing time window slice
+    tstart, tend = None, None
+    if tmin is not None:
+        tstart = np.where(epochs.times >= tmin)[0][0]
+    if tmax is not None:
+        tend = np.where(epochs.times <= tmax)[0][-1] + 1
+    tslice = slice(tstart, tend, None)
+    n_times = len(epochs.times[tslice])
+    n_fft = n_times if n_fft is None else n_fft
+
+    # Preparing frequencies of interest
+    sfreq = epochs.info['sfreq']
+    orig_frequencies = fftfreq(n_fft, 1. / sfreq)
+    freq_mask = (orig_frequencies > fmin) & (orig_frequencies < fmax)
+    frequencies = orig_frequencies[freq_mask]
+    n_freqs = len(frequencies)
+
+    if n_freqs == 0:
+        raise ValueError('No discrete fourier transform results within '
+                         'the given frequency window. Please widen either '
+                         'the frequency window or the time window')
+
+    # Preparing for computing CSD
+    logger.info('Computing cross-spectral density from epochs...')
+    if mode == 'multitaper':
+        # Compute standardized half-bandwidth
+        if mt_bandwidth is not None:
+            half_nbw = float(mt_bandwidth) * n_times / (2 * sfreq)
+        else:
+            half_nbw = 2
+
+        # Compute DPSS windows
+        n_tapers_max = int(2 * half_nbw)
+        window_fun, eigvals = dpss_windows(n_times, half_nbw, n_tapers_max,
+                                           low_bias=mt_low_bias)
+        n_tapers = len(eigvals)
+        logger.info('    using multitaper spectrum estimation with %d DPSS '
+                    'windows' % n_tapers)
+
+        if mt_adaptive and len(eigvals) < 3:
+            warnings.warn('Not adaptively combining the spectral estimators '
+                          'due to a low number of tapers.')
+            mt_adaptive = False
+    elif mode == 'fourier':
+        logger.info('    using FFT with a Hanning window to estimate spectra')
+        window_fun = np.hanning(n_times)
+        mt_adaptive = False
+        eigvals = 1.
+        n_tapers = None
+    else:
+        raise ValueError('Mode has an invalid value.')
+
+    csds_mean = np.zeros((len(ch_names), len(ch_names), n_freqs),
+                         dtype=complex)
+
+    # Picking frequencies of interest
+    freq_mask_mt = freq_mask[orig_frequencies >= 0]
+
+    # Compute CSD for each epoch
+    n_epochs = 0
+    for epoch in epochs:
+        epoch = epoch[picks_meeg][:, tslice]
+
+        # Calculating Fourier transform using multitaper module
+        x_mt, _ = _mt_spectra(epoch, window_fun, sfreq, n_fft)
+
+        if mt_adaptive:
+            # Compute adaptive weights
+            _, weights = _psd_from_mt_adaptive(x_mt, eigvals, freq_mask,
+                                               return_weights=True)
+            # Tiling weights so that we can easily use _csd_from_mt()
+            weights = weights[:, np.newaxis, :, :]
+            weights = np.tile(weights, [1, x_mt.shape[0], 1, 1])
+        else:
+            # Do not use adaptive weights
+            if mode == 'multitaper':
+                weights = np.sqrt(eigvals)[np.newaxis, np.newaxis, :,
+                                           np.newaxis]
+            else:
+                # Hack so we can sum over axis=-2
+                weights = np.array([1.])[:, None, None, None]
+
+        x_mt = x_mt[:, :, freq_mask_mt]
+
+        # Calculating CSD
+        # Tiling x_mt so that we can easily use _csd_from_mt()
+        x_mt = x_mt[:, np.newaxis, :, :]
+        x_mt = np.tile(x_mt, [1, x_mt.shape[0], 1, 1])
+        y_mt = np.transpose(x_mt, axes=[1, 0, 2, 3])
+        weights_y = np.transpose(weights, axes=[1, 0, 2, 3])
+        csds_epoch = _csd_from_mt(x_mt, y_mt, weights, weights_y)
+
+        # Scaling by number of samples and compensating for loss of power due
+        # to windowing (see section 11.5.2 in Bendat & Piersol).
+        if mode == 'fourier':
+            csds_epoch /= n_times
+            csds_epoch *= 8 / 3.
+
+        # Scaling by sampling frequency for compatibility with Matlab
+        csds_epoch /= sfreq
+
+        csds_mean += csds_epoch
+        n_epochs += 1
+
+    csds_mean /= n_epochs
+
+    logger.info('[done]')
+
+    # Summing over frequencies of interest or returning a list of separate CSD
+    # matrices for each frequency
+    if fsum is True:
+        csd_mean_fsum = np.sum(csds_mean, 2)
+        csd = CrossSpectralDensity(csd_mean_fsum, ch_names, projs,
+                                   epochs.info['bads'],
+                                   frequencies=frequencies, n_fft=n_fft)
+        return csd
+    else:
+        csds = []
+        for i in range(n_freqs):
+            csds.append(CrossSpectralDensity(csds_mean[:, :, i], ch_names,
+                                             projs, epochs.info['bads'],
+                                             frequencies=frequencies[i],
+                                             n_fft=n_fft))
+        return csds
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/time_frequency/multitaper.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/time_frequency/multitaper.py
new file mode 100644
index 0000000..37061a9
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/time_frequency/multitaper.py
@@ -0,0 +1,554 @@
+# Author : Martin Luessi mluessi at nmr.mgh.harvard.edu (2012)
+# License : BSD 3-clause
+
+# Parts of this code were copied from NiTime http://nipy.sourceforge.net/nitime
+from warnings import warn
+
+import numpy as np
+from scipy import fftpack, linalg
+import warnings
+
+from ..parallel import parallel_func
+from ..utils import verbose, sum_squared
+
+
+def tridisolve(d, e, b, overwrite_b=True):
+    """
+    Symmetric tridiagonal system solver, from Golub and Van Loan pg 157
+
+    Note: Copied from NiTime
+
+    Parameters
+    ----------
+
+    d : ndarray
+      main diagonal stored in d[:]
+    e : ndarray
+      superdiagonal stored in e[:-1]
+    b : ndarray
+      RHS vector
+
+    Returns
+    -------
+
+    x : ndarray
+      Solution to Ax = b (if overwrite_b is False). Otherwise solution is
+      stored in previous RHS vector b
+
+    """
+    N = len(b)
+    # work vectors
+    dw = d.copy()
+    ew = e.copy()
+    if overwrite_b:
+        x = b
+    else:
+        x = b.copy()
+    for k in range(1, N):
+        # e^(k-1) = e(k-1) / d(k-1)
+        # d(k) = d(k) - e^(k-1)e(k-1) / d(k-1)
+        t = ew[k - 1]
+        ew[k - 1] = t / dw[k - 1]
+        dw[k] = dw[k] - t * ew[k - 1]
+    for k in range(1, N):
+        x[k] = x[k] - ew[k - 1] * x[k - 1]
+    x[N - 1] = x[N - 1] / dw[N - 1]
+    for k in range(N - 2, -1, -1):
+        x[k] = x[k] / dw[k] - ew[k] * x[k + 1]
+
+    if not overwrite_b:
+        return x
+
+
+def tridi_inverse_iteration(d, e, w, x0=None, rtol=1e-8):
+    """Perform an inverse iteration to find the eigenvector corresponding
+    to the given eigenvalue in a symmetric tridiagonal system.
+
+    Note: Copied from NiTime
+
+    Parameters
+    ----------
+
+    d : ndarray
+      main diagonal of the tridiagonal system
+    e : ndarray
+      offdiagonal stored in e[:-1]
+    w : float
+      eigenvalue of the eigenvector
+    x0 : ndarray
+      initial point to start the iteration
+    rtol : float
+      tolerance for the norm of the difference of iterates
+
+    Returns
+    -------
+
+    e: ndarray
+      The converged eigenvector
+
+    """
+    eig_diag = d - w
+    if x0 is None:
+        x0 = np.random.randn(len(d))
+    x_prev = np.zeros_like(x0)
+    norm_x = np.linalg.norm(x0)
+    # the eigenvector is unique up to sign change, so iterate
+    # until || |x^(n)| - |x^(n-1)| ||^2 < rtol
+    x0 /= norm_x
+    while np.linalg.norm(np.abs(x0) - np.abs(x_prev)) > rtol:
+        x_prev = x0.copy()
+        tridisolve(eig_diag, e, x0)
+        norm_x = np.linalg.norm(x0)
+        x0 /= norm_x
+    return x0
+
+
+def dpss_windows(N, half_nbw, Kmax, low_bias=True, interp_from=None,
+                 interp_kind='linear'):
+    """
+    Returns the Discrete Prolate Spheroidal Sequences of orders [0,Kmax-1]
+    for a given frequency-spacing multiple NW and sequence length N.
+
+    Note: Copied from NiTime
+
+    Parameters
+    ----------
+    N : int
+        Sequence length
+    half_nbw : float, unitless
+        Standardized half bandwidth corresponding to 2 * half_bw = BW*f0
+        = BW*N/dt but with dt taken as 1
+    Kmax : int
+        Number of DPSS windows to return is Kmax (orders 0 through Kmax-1)
+    low_bias : Bool
+        Keep only tapers with eigenvalues > 0.9
+    interp_from : int (optional)
+        The dpss can be calculated using interpolation from a set of dpss
+        with the same NW and Kmax, but shorter N. This is the length of this
+        shorter set of dpss windows.
+    interp_kind : str (optional)
+        This input variable is passed to scipy.interpolate.interp1d and
+        specifies the kind of interpolation as a string ('linear', 'nearest',
+        'zero', 'slinear', 'quadratic, 'cubic') or as an integer specifying the
+        order of the spline interpolator to use.
+
+
+    Returns
+    -------
+    v, e : tuple,
+        v is an array of DPSS windows shaped (Kmax, N)
+        e are the eigenvalues
+
+    Notes
+    -----
+    Tridiagonal form of DPSS calculation from:
+
+    Slepian, D. Prolate spheroidal wave functions, Fourier analysis, and
+    uncertainty V: The discrete case. Bell System Technical Journal,
+    Volume 57 (1978), 1371430
+    """
+    from scipy.interpolate import interp1d
+    Kmax = int(Kmax)
+    W = float(half_nbw) / N
+    nidx = np.arange(N, dtype='d')
+
+    # In this case, we create the dpss windows of the smaller size
+    # (interp_from) and then interpolate to the larger size (N)
+    if interp_from is not None:
+        if interp_from > N:
+            e_s = 'In dpss_windows, interp_from is: %s ' % interp_from
+            e_s += 'and N is: %s. ' % N
+            e_s += 'Please enter interp_from smaller than N.'
+            raise ValueError(e_s)
+        dpss = []
+        d, e = dpss_windows(interp_from, half_nbw, Kmax, low_bias=False)
+        for this_d in d:
+            x = np.arange(this_d.shape[-1])
+            I = interp1d(x, this_d, kind=interp_kind)
+            d_temp = I(np.arange(0, this_d.shape[-1] - 1,
+                                 float(this_d.shape[-1] - 1) / N))
+
+            # Rescale:
+            d_temp = d_temp / np.sqrt(sum_squared(d_temp))
+
+            dpss.append(d_temp)
+
+        dpss = np.array(dpss)
+
+    else:
+        # here we want to set up an optimization problem to find a sequence
+        # whose energy is maximally concentrated within band [-W,W].
+        # Thus, the measure lambda(T,W) is the ratio between the energy within
+        # that band, and the total energy. This leads to the eigen-system
+        # (A - (l1)I)v = 0, where the eigenvector corresponding to the largest
+        # eigenvalue is the sequence with maximally concentrated energy. The
+        # collection of eigenvectors of this system are called Slepian
+        # sequences, or discrete prolate spheroidal sequences (DPSS). Only the
+        # first K, K = 2NW/dt orders of DPSS will exhibit good spectral
+        # concentration
+        # [see http://en.wikipedia.org/wiki/Spectral_concentration_problem]
+
+        # Here I set up an alternative symmetric tri-diagonal eigenvalue
+        # problem such that
+        # (B - (l2)I)v = 0, and v are our DPSS (but eigenvalues l2 != l1)
+        # the main diagonal = ([N-1-2*t]/2)**2 cos(2PIW), t=[0,1,2,...,N-1]
+        # and the first off-diagonal = t(N-t)/2, t=[1,2,...,N-1]
+        # [see Percival and Walden, 1993]
+        diagonal = ((N - 1 - 2 * nidx) / 2.) ** 2 * np.cos(2 * np.pi * W)
+        off_diag = np.zeros_like(nidx)
+        off_diag[:-1] = nidx[1:] * (N - nidx[1:]) / 2.
+        # put the diagonals in LAPACK "packed" storage
+        ab = np.zeros((2, N), 'd')
+        ab[1] = diagonal
+        ab[0, 1:] = off_diag[:-1]
+        # only calculate the highest Kmax eigenvalues
+        w = linalg.eigvals_banded(ab, select='i',
+                                  select_range=(N - Kmax, N - 1))
+        w = w[::-1]
+
+        # find the corresponding eigenvectors via inverse iteration
+        t = np.linspace(0, np.pi, N)
+        dpss = np.zeros((Kmax, N), 'd')
+        for k in range(Kmax):
+            dpss[k] = tridi_inverse_iteration(diagonal, off_diag, w[k],
+                                              x0=np.sin((k + 1) * t))
+
+    # By convention (Percival and Walden, 1993 pg 379)
+    # * symmetric tapers (k=0,2,4,...) should have a positive average.
+    # * antisymmetric tapers should begin with a positive lobe
+    fix_symmetric = (dpss[0::2].sum(axis=1) < 0)
+    for i, f in enumerate(fix_symmetric):
+        if f:
+            dpss[2 * i] *= -1
+    fix_skew = (dpss[1::2, 1] < 0)
+    for i, f in enumerate(fix_skew):
+        if f:
+            dpss[2 * i + 1] *= -1
+
+    # Now find the eigenvalues of the original spectral concentration problem
+    # Use the autocorr sequence technique from Percival and Walden, 1993 pg 390
+
+    # compute autocorr using FFT (same as nitime.utils.autocorr(dpss) * N)
+    rxx_size = 2 * N - 1
+    n_fft = 2 ** int(np.ceil(np.log2(rxx_size)))
+    dpss_fft = fftpack.fft(dpss, n_fft)
+    dpss_rxx = np.real(fftpack.ifft(dpss_fft * dpss_fft.conj()))
+    dpss_rxx = dpss_rxx[:, :N]
+
+    r = 4 * W * np.sinc(2 * W * nidx)
+    r[0] = 2 * W
+    eigvals = np.dot(dpss_rxx, r)
+
+    if low_bias:
+        idx = (eigvals > 0.9)
+        if not idx.any():
+            warnings.warn('Could not properly use low_bias, '
+                          'keeping lowest-bias taper')
+            idx = [np.argmax(eigvals)]
+        dpss, eigvals = dpss[idx], eigvals[idx]
+    assert len(dpss) > 0  # should never happen
+    return dpss, eigvals
+
+
+def _psd_from_mt_adaptive(x_mt, eigvals, freq_mask, max_iter=150,
+                          return_weights=False):
+    """
+    Perform an iterative procedure to compute the PSD from tapered spectra
+    using the optimal weights.
+
+    Note: Modified from NiTime
+
+    Parameters
+    ----------
+
+    x_mt : array, shape=(n_signals, n_tapers, n_freqs)
+       The DFTs of the tapered sequences (only positive frequencies)
+    eigvals : array, length n_tapers
+       The eigenvalues of the DPSS tapers
+    freq_mask : array
+        Frequency indices to keep
+    max_iter : int
+       Maximum number of iterations for weight computation
+    return_weights : bool
+       Also return the weights
+
+    Returns
+    -------
+    psd : array, shape=(n_signals, np.sum(freq_mask))
+        The computed PSDs
+    weights : array shape=(n_signals, n_tapers, np.sum(freq_mask))
+        The weights used to combine the tapered spectra
+
+    Notes
+    -----
+
+    The weights to use for making the multitaper estimate, such that
+    :math:`S_{mt} = \sum_{k} |w_k|^2S_k^{mt} / \sum_{k} |w_k|^2`
+    """
+    n_signals, n_tapers, n_freqs = x_mt.shape
+
+    if len(eigvals) != n_tapers:
+        raise ValueError('Need one eigenvalue for each taper')
+
+    if n_tapers < 3:
+        raise ValueError('Not enough tapers to compute adaptive weights.')
+
+    rt_eig = np.sqrt(eigvals)
+
+    # estimate the variance from an estimate with fixed weights
+    psd_est = _psd_from_mt(x_mt, rt_eig[np.newaxis, :, np.newaxis])
+    x_var = np.trapz(psd_est, dx=np.pi / n_freqs) / (2 * np.pi)
+    del psd_est
+
+    # allocate space for output
+    psd = np.empty((n_signals, np.sum(freq_mask)))
+
+    # only keep the frequencies of interest
+    x_mt = x_mt[:, :, freq_mask]
+
+    if return_weights:
+        weights = np.empty((n_signals, n_tapers, psd.shape[1]))
+
+    for i, (xk, var) in enumerate(zip(x_mt, x_var)):
+        # combine the SDFs in the traditional way in order to estimate
+        # the variance of the timeseries
+
+        # The process is to iteratively switch solving for the following
+        # two expressions:
+        # (1) Adaptive Multitaper SDF:
+        # S^{mt}(f) = [ sum |d_k(f)|^2 S_k(f) ]/ sum |d_k(f)|^2
+        #
+        # (2) Weights
+        # d_k(f) = [sqrt(lam_k) S^{mt}(f)] / [lam_k S^{mt}(f) + E{B_k(f)}]
+        #
+        # Where lam_k are the eigenvalues corresponding to the DPSS tapers,
+        # and the expected value of the broadband bias function
+        # E{B_k(f)} is replaced by its full-band integration
+        # (1/2pi) int_{-pi}^{pi} E{B_k(f)} = sig^2(1-lam_k)
+
+        # start with an estimate from incomplete data--the first 2 tapers
+        psd_iter = _psd_from_mt(xk[:2, :], rt_eig[:2, np.newaxis])
+
+        err = np.zeros_like(xk)
+        for n in range(max_iter):
+            d_k = (psd_iter / (eigvals[:, np.newaxis] * psd_iter +
+                   (1 - eigvals[:, np.newaxis]) * var))
+            d_k *= rt_eig[:, np.newaxis]
+            # Test for convergence -- this is overly conservative, since
+            # iteration only stops when all frequencies have converged.
+            # A better approach is to iterate separately for each freq, but
+            # that is a nonvectorized algorithm.
+            # Take the RMS difference in weights from the previous iterate
+            # across frequencies. If the maximum RMS error across freqs is
+            # less than 1e-10, then we're converged
+            err -= d_k
+            if np.max(np.mean(err ** 2, axis=0)) < 1e-10:
+                break
+
+            # update the iterative estimate with this d_k
+            psd_iter = _psd_from_mt(xk, d_k)
+            err = d_k
+
+        if n == max_iter - 1:
+            warn('Iterative multi-taper PSD computation did not converge.',
+                 RuntimeWarning)
+
+        psd[i, :] = psd_iter
+
+        if return_weights:
+            weights[i, :, :] = d_k
+
+    if return_weights:
+        return psd, weights
+    else:
+        return psd
+
+
+def _psd_from_mt(x_mt, weights):
+    """ compute PSD from tapered spectra
+
+    Parameters
+    ----------
+    x_mt : array
+        Tapered spectra
+    weights : array
+        Weights used to combine the tapered spectra
+
+    Returns
+    -------
+    psd : array
+        The computed PSD
+    """
+    psd = weights * x_mt
+    psd = (psd * psd.conj()).real.sum(axis=-2)
+    psd *= 2 / (weights * weights.conj()).real.sum(axis=-2)
+    return psd
+
+
+def _csd_from_mt(x_mt, y_mt, weights_x, weights_y):
+    """ Compute CSD from tapered spectra
+
+    Parameters
+    ----------
+    x_mt : array
+        Tapered spectra for x
+    y_mt : array
+        Tapered spectra for y
+    weights_x : array
+        Weights used to combine the tapered spectra of x_mt
+    weights_y : array
+        Weights used to combine the tapered spectra of y_mt
+
+    Returns
+    -------
+    psd: array
+        The computed PSD
+    """
+    csd = np.sum(weights_x * x_mt * (weights_y * y_mt).conj(), axis=-2)
+    denom = (np.sqrt((weights_x * weights_x.conj()).real.sum(axis=-2)) *
+             np.sqrt((weights_y * weights_y.conj()).real.sum(axis=-2)))
+    csd *= 2 / denom
+    return csd
+
+
+def _mt_spectra(x, dpss, sfreq, n_fft=None):
+    """ Compute tapered spectra
+
+    Parameters
+    ----------
+    x : array, shape=(n_signals, n_times)
+        Input signal
+    dpss : array, shape=(n_tapers, n_times)
+        The tapers
+    sfreq : float
+        The sampling frequency
+    n_fft : int | None
+        Length of the FFT. If None, the number of samples in the input signal
+        will be used.
+
+    Returns
+    -------
+    x_mt : array, shape=(n_signals, n_tapers, n_times)
+        The tapered spectra
+    freqs : array
+        The frequency points in Hz of the spectra
+    """
+
+    if n_fft is None:
+        n_fft = x.shape[1]
+
+    # remove mean (do not use in-place subtraction as it may modify input x)
+    x = x - np.mean(x, axis=-1)[:, np.newaxis]
+    x_mt = fftpack.fft(x[:, np.newaxis, :] * dpss, n=n_fft)
+
+    # only keep positive frequencies
+    freqs = fftpack.fftfreq(n_fft, 1. / sfreq)
+    freq_mask = (freqs >= 0)
+
+    x_mt = x_mt[:, :, freq_mask]
+    freqs = freqs[freq_mask]
+
+    return x_mt, freqs
+
+
+ at verbose
+def multitaper_psd(x, sfreq=2 * np.pi, fmin=0, fmax=np.inf, bandwidth=None,
+                   adaptive=False, low_bias=True, n_jobs=1,
+                   normalization='length', verbose=None):
+    """Compute power spectrum density (PSD) using a multi-taper method
+
+    Parameters
+    ----------
+    x : array, shape=(n_signals, n_times) or (n_times,)
+        The data to compute PSD from.
+    sfreq : float
+        The sampling frequency.
+    fmin : float
+        The lower frequency of interest.
+    fmax : float
+        The upper frequency of interest.
+    bandwidth : float
+        The bandwidth of the multi taper windowing function in Hz.
+    adaptive : bool
+        Use adaptive weights to combine the tapered spectra into PSD
+        (slow, use n_jobs >> 1 to speed up computation).
+    low_bias : bool
+        Only use tapers with more than 90% spectral concentration within
+        bandwidth.
+    n_jobs : int
+        Number of parallel jobs to use (only used if adaptive=True).
+    normalization : str
+        Either "full" or "length" (default). If "full", the PSD will
+        be normalized by the sampling rate as well as the length of
+        the signal (as in nitime).
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    psd : array, shape=(n_signals, len(freqs)) or (len(freqs),)
+        The computed PSD.
+    freqs : array
+        The frequency points in Hz of the PSD.
+
+    See Also
+    --------
+    mne.io.Raw.plot_psd, mne.Epochs.plot_psd
+
+    Notes
+    -----
+    .. versionadded:: 0.9.0
+    """
+    if normalization not in ('length', 'full'):
+        raise ValueError('Normalization must be "length" or "full", not %s'
+                         % normalization)
+    if x.ndim > 2:
+        raise ValueError('x can only be 1d or 2d')
+
+    x_in = np.atleast_2d(x)
+
+    n_times = x_in.shape[1]
+
+    # compute standardized half-bandwidth
+    if bandwidth is not None:
+        half_nbw = float(bandwidth) * n_times / (2 * sfreq)
+    else:
+        half_nbw = 4
+
+    n_tapers_max = int(2 * half_nbw)
+
+    dpss, eigvals = dpss_windows(n_times, half_nbw, n_tapers_max,
+                                 low_bias=low_bias)
+
+    # compute the tapered spectra
+    x_mt, freqs = _mt_spectra(x_in, dpss, sfreq)
+
+    # descide which frequencies to keep
+    freq_mask = (freqs >= fmin) & (freqs <= fmax)
+
+    # combine the tapered spectra
+    if adaptive and len(eigvals) < 3:
+        warn('Not adaptively combining the spectral estimators '
+             'due to a low number of tapers.')
+        adaptive = False
+
+    if not adaptive:
+        x_mt = x_mt[:, :, freq_mask]
+        weights = np.sqrt(eigvals)[np.newaxis, :, np.newaxis]
+        psd = _psd_from_mt(x_mt, weights)
+    else:
+        parallel, my_psd_from_mt_adaptive, n_jobs = \
+            parallel_func(_psd_from_mt_adaptive, n_jobs)
+        out = parallel(my_psd_from_mt_adaptive(x, eigvals, freq_mask)
+                       for x in np.array_split(x_mt, n_jobs))
+        psd = np.concatenate(out)
+
+    if x.ndim == 1:
+        # return a 1d array if input was 1d
+        psd = psd[0, :]
+
+    freqs = freqs[freq_mask]
+    if normalization == 'full':
+        psd /= sfreq
+
+    return psd, freqs
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/time_frequency/psd.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/time_frequency/psd.py
new file mode 100644
index 0000000..c728163
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/time_frequency/psd.py
@@ -0,0 +1,199 @@
+# Authors : Alexandre Gramfort, alexandre.gramfort at telecom-paristech.fr (2011)
+#           Denis A. Engemann <denis.engemann at gmail.com>
+# License : BSD 3-clause
+
+import numpy as np
+
+from ..parallel import parallel_func
+from ..io.proj import make_projector_info
+from ..io.pick import pick_types
+from ..utils import logger, verbose, _time_mask
+
+
+ at verbose
+def compute_raw_psd(raw, tmin=0., tmax=None, picks=None, fmin=0,
+                    fmax=np.inf, n_fft=2048, n_overlap=0,
+                    proj=False, n_jobs=1, verbose=None):
+    """Compute power spectral density with average periodograms.
+
+    Parameters
+    ----------
+    raw : instance of Raw
+        The raw data.
+    tmin : float
+        Minimum time instant to consider (in seconds).
+    tmax : float | None
+        Maximum time instant to consider (in seconds). None will use the
+        end of the file.
+    picks : array-like of int | None
+        The selection of channels to include in the computation.
+        If None, take all channels.
+    fmin : float
+        Min frequency of interest
+    fmax : float
+        Max frequency of interest
+    n_fft : int
+        The length of the tapers ie. the windows. The smaller
+        it is the smoother are the PSDs.
+    n_overlap : int
+        The number of points of overlap between blocks. The default value
+        is 0 (no overlap).
+    proj : bool
+        Apply SSP projection vectors.
+    n_jobs : int
+        Number of CPUs to use in the computation.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    psd : array of float
+        The PSD for all channels
+    freqs: array of float
+        The frequencies
+    """
+    from scipy.signal import welch
+    tmax = raw.times[-1] if tmax is None else tmax
+    start, stop = raw.time_as_index([tmin, tmax])
+    if picks is not None:
+        data, times = raw[picks, start:(stop + 1)]
+    else:
+        data, times = raw[:, start:(stop + 1)]
+    n_fft, n_overlap = _check_nfft(len(times), n_fft, n_overlap)
+
+    if proj:
+        proj, _ = make_projector_info(raw.info)
+        if picks is not None:
+            data = np.dot(proj[picks][:, picks], data)
+        else:
+            data = np.dot(proj, data)
+
+    n_fft = int(n_fft)
+    Fs = raw.info['sfreq']
+
+    logger.info("Effective window size : %0.3f (s)" % (n_fft / float(Fs)))
+
+    parallel, my_pwelch, n_jobs = parallel_func(_pwelch, n_jobs=n_jobs,
+                                                verbose=verbose)
+
+    freqs = np.arange(n_fft // 2 + 1) * (Fs / n_fft)
+    freq_mask = (freqs >= fmin) & (freqs <= fmax)
+    freqs = freqs[freq_mask]
+
+    psds = np.array(parallel(my_pwelch([channel],
+                                       noverlap=n_overlap, nfft=n_fft, fs=Fs,
+                                       freq_mask=freq_mask, welch_fun=welch)
+                             for channel in data))[:, 0, :]
+
+    return psds, freqs
+
+
+def _pwelch(epoch, noverlap, nfft, fs, freq_mask, welch_fun):
+    """Aux function"""
+    return welch_fun(epoch, nperseg=nfft, noverlap=noverlap,
+                     nfft=nfft, fs=fs)[1][..., freq_mask]
+
+
+def _compute_psd(data, fmin, fmax, Fs, n_fft, psd, n_overlap, pad_to):
+    """Compute the PSD"""
+    out = [psd(d, Fs=Fs, NFFT=n_fft, noverlap=n_overlap, pad_to=pad_to)
+           for d in data]
+    psd = np.array([o[0] for o in out])
+    freqs = out[0][1]
+    mask = (freqs >= fmin) & (freqs <= fmax)
+    freqs = freqs[mask]
+    return psd[:, mask], freqs
+
+
+def _check_nfft(n, n_fft, n_overlap):
+    """Helper to make sure n_fft and n_overlap make sense"""
+    n_fft = n if n_fft > n else n_fft
+    n_overlap = n_fft - 1 if n_overlap >= n_fft else n_overlap
+    return n_fft, n_overlap
+
+
+ at verbose
+def compute_epochs_psd(epochs, picks=None, fmin=0, fmax=np.inf, tmin=None,
+                       tmax=None, n_fft=256, n_overlap=0, proj=False,
+                       n_jobs=1, verbose=None):
+    """Compute power spectral density with average periodograms.
+
+    Parameters
+    ----------
+    epochs : instance of Epochs
+        The epochs.
+    picks : array-like of int | None
+        The selection of channels to include in the computation.
+        If None, take all channels.
+    fmin : float
+        Min frequency of interest
+    fmax : float
+        Max frequency of interest
+    tmin : float | None
+        Min time of interest
+    tmax : float | None
+        Max time of interest
+    n_fft : int
+        The length of the tapers ie. the windows. The smaller
+        it is the smoother are the PSDs. The default value is 256.
+        If ``n_fft > len(epochs.times)``, it will be adjusted down to
+        ``len(epochs.times)``.
+    n_overlap : int
+        The number of points of overlap between blocks. Will be adjusted
+        to be <= n_fft.
+    proj : bool
+        Apply SSP projection vectors.
+    n_jobs : int
+        Number of CPUs to use in the computation.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    psds : ndarray (n_epochs, n_channels, n_freqs)
+        The power spectral densities.
+    freqs : ndarray (n_freqs)
+        The frequencies.
+    """
+    from scipy.signal import welch
+    n_fft = int(n_fft)
+    Fs = epochs.info['sfreq']
+    if picks is None:
+        picks = pick_types(epochs.info, meg=True, eeg=True, ref_meg=False,
+                           exclude='bads')
+    n_fft, n_overlap = _check_nfft(len(epochs.times), n_fft, n_overlap)
+
+    if tmin is not None or tmax is not None:
+        time_mask = _time_mask(epochs.times, tmin, tmax)
+    else:
+        time_mask = Ellipsis
+
+    data = epochs.get_data()[:, picks][..., time_mask]
+    if proj:
+        proj, _ = make_projector_info(epochs.info)
+        if picks is not None:
+            data = np.dot(proj[picks][:, picks], data)
+        else:
+            data = np.dot(proj, data)
+
+    logger.info("Effective window size : %0.3f (s)" % (n_fft / float(Fs)))
+
+    freqs = np.arange(n_fft // 2 + 1, dtype=float) * (Fs / n_fft)
+    freq_mask = (freqs >= fmin) & (freqs <= fmax)
+    freqs = freqs[freq_mask]
+    psds = np.empty(data.shape[:-1] + (freqs.size,))
+
+    parallel, my_pwelch, n_jobs = parallel_func(_pwelch, n_jobs=n_jobs,
+                                                verbose=verbose)
+
+    for idx, fepochs in zip(np.array_split(np.arange(len(data)), n_jobs),
+                            parallel(my_pwelch(epoch, noverlap=n_overlap,
+                                               nfft=n_fft, fs=Fs,
+                                               freq_mask=freq_mask,
+                                               welch_fun=welch)
+                                     for epoch in np.array_split(data,
+                                                                 n_jobs))):
+        for i_epoch, f_epoch in zip(idx, fepochs):
+            psds[i_epoch, :, :] = f_epoch
+
+    return psds, freqs
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/time_frequency/stft.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/time_frequency/stft.py
new file mode 100644
index 0000000..83e2733
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/time_frequency/stft.py
@@ -0,0 +1,237 @@
+from math import ceil
+import numpy as np
+from scipy.fftpack import fft, ifft, fftfreq
+
+from ..utils import logger, verbose
+
+
+ at verbose
+def stft(x, wsize, tstep=None, verbose=None):
+    """STFT Short-Term Fourier Transform using a sine window.
+
+    The transformation is designed to be a tight frame that can be
+    perfectly inverted. It only returns the positive frequencies.
+
+    Parameters
+    ----------
+    x : 2d array of size n_signals x T
+        containing multi-channels signal
+    wsize : int
+        length of the STFT window in samples (must be a multiple of 4)
+    tstep : int
+        step between successive windows in samples (must be a multiple of 2,
+        a divider of wsize and smaller than wsize/2) (default: wsize/2)
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    X : 3d array of shape [n_signals, wsize / 2 + 1, n_step]
+        STFT coefficients for positive frequencies with
+        n_step = ceil(T / tstep)
+
+    Examples
+    --------
+    X = stft(x, wsize)
+    X = stft(x, wsize, tstep)
+
+    See Also
+    --------
+    istft
+    stftfreq
+    """
+    if not np.isrealobj(x):
+        raise ValueError("x is not a real valued array")
+
+    if x.ndim == 1:
+        x = x[None, :]
+
+    n_signals, T = x.shape
+    wsize = int(wsize)
+
+    # Errors and warnings
+    if wsize % 4:
+        raise ValueError('The window length must be a multiple of 4.')
+
+    if tstep is None:
+        tstep = wsize / 2
+
+    tstep = int(tstep)
+
+    if (wsize % tstep) or (tstep % 2):
+        raise ValueError('The step size must be a multiple of 2 and a '
+                         'divider of the window length.')
+
+    if tstep > wsize / 2:
+        raise ValueError('The step size must be smaller than half the '
+                         'window length.')
+
+    n_step = int(ceil(T / float(tstep)))
+    n_freq = wsize // 2 + 1
+    logger.info("Number of frequencies: %d" % n_freq)
+    logger.info("Number of time steps: %d" % n_step)
+
+    X = np.zeros((n_signals, n_freq, n_step), dtype=np.complex)
+
+    if n_signals == 0:
+        return X
+
+    # Defining sine window
+    win = np.sin(np.arange(.5, wsize + .5) / wsize * np.pi)
+    win2 = win ** 2
+
+    swin = np.zeros((n_step - 1) * tstep + wsize)
+    for t in range(n_step):
+        swin[t * tstep:t * tstep + wsize] += win2
+    swin = np.sqrt(wsize * swin)
+
+    # Zero-padding and Pre-processing for edges
+    xp = np.zeros((n_signals, wsize + (n_step - 1) * tstep),
+                  dtype=x.dtype)
+    xp[:, (wsize - tstep) // 2: (wsize - tstep) // 2 + T] = x
+    x = xp
+
+    for t in range(n_step):
+        # Framing
+        wwin = win / swin[t * tstep: t * tstep + wsize]
+        frame = x[:, t * tstep: t * tstep + wsize] * wwin[None, :]
+        # FFT
+        fframe = fft(frame)
+        X[:, :, t] = fframe[:, :n_freq]
+
+    return X
+
+
+def istft(X, tstep=None, Tx=None):
+    """ISTFT Inverse Short-Term Fourier Transform using a sine window
+
+    Parameters
+    ----------
+    X : 3d array of shape [n_signals, wsize / 2 + 1,  n_step]
+        The STFT coefficients for positive frequencies
+    tstep : int
+        step between successive windows in samples (must be a multiple of 2,
+        a divider of wsize and smaller than wsize/2) (default: wsize/2)
+    Tx : int
+        Length of returned signal. If None Tx = n_step * tstep
+
+    Returns
+    -------
+    x : 1d array of length Tx
+        vector containing the inverse STFT signal
+
+    Examples
+    --------
+    x = istft(X)
+    x = istft(X, tstep)
+
+    See Also
+    --------
+    stft
+    """
+    # Errors and warnings
+    n_signals, n_win, n_step = X.shape
+    if (n_win % 2 == 0):
+        ValueError('The number of rows of the STFT matrix must be odd.')
+
+    wsize = 2 * (n_win - 1)
+    if tstep is None:
+        tstep = wsize / 2
+
+    if wsize % tstep:
+        raise ValueError('The step size must be a divider of two times the '
+                         'number of rows of the STFT matrix minus two.')
+
+    if wsize % 2:
+        raise ValueError('The step size must be a multiple of 2.')
+
+    if tstep > wsize / 2:
+        raise ValueError('The step size must be smaller than the number of '
+                         'rows of the STFT matrix minus one.')
+
+    if Tx is None:
+        Tx = n_step * tstep
+
+    T = n_step * tstep
+
+    x = np.zeros((n_signals, T + wsize - tstep), dtype=np.float)
+
+    if n_signals == 0:
+        return x[:, :Tx]
+
+    # Defining sine window
+    win = np.sin(np.arange(.5, wsize + .5) / wsize * np.pi)
+    # win = win / norm(win);
+
+    # Pre-processing for edges
+    swin = np.zeros(T + wsize - tstep, dtype=np.float)
+    for t in range(n_step):
+        swin[t * tstep:t * tstep + wsize] += win ** 2
+    swin = np.sqrt(swin / wsize)
+
+    fframe = np.empty((n_signals, n_win + wsize // 2 - 1), dtype=X.dtype)
+    for t in range(n_step):
+        # IFFT
+        fframe[:, :n_win] = X[:, :, t]
+        fframe[:, n_win:] = np.conj(X[:, wsize // 2 - 1: 0: -1, t])
+        frame = ifft(fframe)
+        wwin = win / swin[t * tstep:t * tstep + wsize]
+        # Overlap-add
+        x[:, t * tstep: t * tstep + wsize] += np.real(np.conj(frame) * wwin)
+
+    # Truncation
+    x = x[:, (wsize - tstep) // 2: (wsize - tstep) // 2 + T + 1][:, :Tx].copy()
+    return x
+
+
+def stftfreq(wsize, sfreq=None):
+    """Frequencies of stft transformation
+
+    Parameters
+    ----------
+    wsize : int
+        Size of stft window
+    sfreq : float
+        Sampling frequency. If None the frequencies are given between 0 and pi
+        otherwise it's given in Hz.
+
+    Returns
+    -------
+    freqs : array
+        The positive frequencies returned by stft
+
+
+    See Also
+    --------
+    stft
+    istft
+    """
+    n_freq = wsize // 2 + 1
+    freqs = fftfreq(wsize)
+    freqs = np.abs(freqs[:n_freq])
+    if sfreq is not None:
+        freqs *= float(sfreq)
+    return freqs
+
+
+def stft_norm2(X):
+    """Compute L2 norm of STFT transform
+
+    It takes into account that stft only return positive frequencies.
+    As we use tight frame this quantity is conserved by the stft.
+
+    Parameters
+    ----------
+    X : 3D complex array
+        The STFT transforms
+
+    Returns
+    -------
+    norms2 : array
+        The squared L2 norm of every row of X.
+    """
+    X2 = (X * X.conj()).real
+    # compute all L2 coefs and remove first and last frequency once.
+    norms2 = (2. * X2.sum(axis=2).sum(axis=1) - np.sum(X2[:, 0, :], axis=1) -
+              np.sum(X2[:, -1, :], axis=1))
+    return norms2
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/time_frequency/tests/__init__.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/time_frequency/tests/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/time_frequency/tests/test_ar.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/time_frequency/tests/test_ar.py
new file mode 100644
index 0000000..01d49f4
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/time_frequency/tests/test_ar.py
@@ -0,0 +1,38 @@
+import os.path as op
+import numpy as np
+from numpy.testing import assert_array_almost_equal
+from nose.tools import assert_true, assert_equal
+
+from mne import io, pick_types
+from mne.time_frequency.ar import yule_walker, fit_iir_model_raw
+from mne.utils import requires_statsmodels, requires_patsy
+
+
+raw_fname = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data',
+                    'test_raw.fif')
+
+
+ at requires_patsy
+ at requires_statsmodels
+def test_yule_walker():
+    """Test Yule-Walker against statsmodels
+    """
+    from statsmodels.regression.linear_model import yule_walker as sm_yw
+    d = np.random.randn(100)
+    sm_rho, sm_sigma = sm_yw(d, order=2)
+    rho, sigma = yule_walker(d, order=2)
+    assert_array_almost_equal(sm_sigma, sigma)
+    assert_array_almost_equal(sm_rho, rho)
+
+
+def test_ar_raw():
+    """Test fitting AR model on raw data
+    """
+    raw = io.Raw(raw_fname)
+    # pick MEG gradiometers
+    picks = pick_types(raw.info, meg='grad', exclude='bads')
+    picks = picks[:2]
+    tmin, tmax, order = 0, 10, 2
+    coefs = fit_iir_model_raw(raw, order, picks, tmin, tmax)[1][1:]
+    assert_equal(coefs.shape, (order,))
+    assert_true(0.9 < -coefs[0] < 1.1)
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/time_frequency/tests/test_csd.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/time_frequency/tests/test_csd.py
new file mode 100644
index 0000000..753b191
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/time_frequency/tests/test_csd.py
@@ -0,0 +1,163 @@
+import numpy as np
+from nose.tools import assert_raises, assert_equal, assert_true
+from numpy.testing import assert_array_equal
+from os import path as op
+import warnings
+
+import mne
+
+from mne.io import Raw
+from mne.utils import sum_squared
+from mne.time_frequency import compute_epochs_csd, tfr_morlet
+
+warnings.simplefilter('always')
+base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
+raw_fname = op.join(base_dir, 'test_raw.fif')
+event_fname = op.join(base_dir, 'test-eve.fif')
+
+
+def _get_data():
+    # Read raw data
+    raw = Raw(raw_fname)
+    raw.info['bads'] = ['MEG 2443', 'EEG 053']  # 2 bads channels
+
+    # Set picks
+    picks = mne.pick_types(raw.info, meg=True, eeg=False, eog=False,
+                           stim=False, exclude='bads')
+
+    # Read several epochs
+    event_id, tmin, tmax = 1, -0.2, 0.5
+    events = mne.read_events(event_fname)[0:100]
+    epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
+                        picks=picks, baseline=(None, 0), preload=True,
+                        reject=dict(grad=4000e-13, mag=4e-12))
+
+    # Create an epochs object with one epoch and one channel of artificial data
+    event_id, tmin, tmax = 1, 0.0, 1.0
+    epochs_sin = mne.Epochs(raw, events[0:5], event_id, tmin, tmax, proj=True,
+                            picks=[0], baseline=(None, 0), preload=True,
+                            reject=dict(grad=4000e-13))
+    freq = 10
+    epochs_sin._data = np.sin(2 * np.pi * freq *
+                              epochs_sin.times)[None, None, :]
+    return epochs, epochs_sin
+
+
+def test_compute_epochs_csd():
+    """Test computing cross-spectral density from epochs
+    """
+    epochs, epochs_sin = _get_data()
+    # Check that wrong parameters are recognized
+    assert_raises(ValueError, compute_epochs_csd, epochs, mode='notamode')
+    assert_raises(ValueError, compute_epochs_csd, epochs, fmin=20, fmax=10)
+    assert_raises(ValueError, compute_epochs_csd, epochs, fmin=20, fmax=20.1)
+    assert_raises(ValueError, compute_epochs_csd, epochs, tmin=0.15, tmax=0.1)
+    assert_raises(ValueError, compute_epochs_csd, epochs, tmin=0, tmax=10)
+    assert_raises(ValueError, compute_epochs_csd, epochs, tmin=10, tmax=11)
+
+    data_csd_mt = compute_epochs_csd(epochs, mode='multitaper', fmin=8,
+                                     fmax=12, tmin=0.04, tmax=0.15)
+    data_csd_fourier = compute_epochs_csd(epochs, mode='fourier', fmin=8,
+                                          fmax=12, tmin=0.04, tmax=0.15)
+
+    # Check shape of the CSD matrix
+    n_chan = len(data_csd_mt.ch_names)
+    assert_equal(data_csd_mt.data.shape, (n_chan, n_chan))
+    assert_equal(data_csd_fourier.data.shape, (n_chan, n_chan))
+
+    # Check if the CSD matrix is hermitian
+    assert_array_equal(np.tril(data_csd_mt.data).T.conj(),
+                       np.triu(data_csd_mt.data))
+    assert_array_equal(np.tril(data_csd_fourier.data).T.conj(),
+                       np.triu(data_csd_fourier.data))
+
+    # Computing induced power for comparison
+    epochs.crop(tmin=0.04, tmax=0.15)
+    tfr = tfr_morlet(epochs, freqs=[10], n_cycles=0.6, return_itc=False)
+    power = np.mean(tfr.data, 2)
+
+    # Maximum PSD should occur for specific channel
+    max_ch_power = power.argmax()
+    max_ch_mt = data_csd_mt.data.diagonal().argmax()
+    max_ch_fourier = data_csd_fourier.data.diagonal().argmax()
+    assert_equal(max_ch_mt, max_ch_power)
+    assert_equal(max_ch_fourier, max_ch_power)
+
+    # Maximum CSD should occur for specific channel
+    ch_csd_mt = [np.abs(data_csd_mt.data[max_ch_power][i])
+                 if i != max_ch_power else 0 for i in range(n_chan)]
+    max_ch_csd_mt = np.argmax(ch_csd_mt)
+    ch_csd_fourier = [np.abs(data_csd_fourier.data[max_ch_power][i])
+                      if i != max_ch_power else 0 for i in range(n_chan)]
+    max_ch_csd_fourier = np.argmax(ch_csd_fourier)
+    assert_equal(max_ch_csd_mt, max_ch_csd_fourier)
+
+    # Check a list of CSD matrices is returned for multiple frequencies within
+    # a given range when fsum=False
+    csd_fsum = compute_epochs_csd(epochs, mode='fourier', fmin=8, fmax=20,
+                                  fsum=True)
+    csds = compute_epochs_csd(epochs, mode='fourier', fmin=8, fmax=20,
+                              fsum=False)
+    freqs = [csd.frequencies[0] for csd in csds]
+
+    csd_sum = np.zeros_like(csd_fsum.data)
+    for csd in csds:
+        csd_sum += csd.data
+
+    assert(len(csds) == 2)
+    assert(len(csd_fsum.frequencies) == 2)
+    assert_array_equal(csd_fsum.frequencies, freqs)
+    assert_array_equal(csd_fsum.data, csd_sum)
+
+
+def test_compute_epochs_csd_on_artificial_data():
+    """Test computing CSD on artificial data
+    """
+    epochs, epochs_sin = _get_data()
+    sfreq = epochs_sin.info['sfreq']
+
+    # Computing signal power in the time domain
+    signal_power = sum_squared(epochs_sin._data)
+    signal_power_per_sample = signal_power / len(epochs_sin.times)
+
+    # Computing signal power in the frequency domain
+    data_csd_fourier = compute_epochs_csd(epochs_sin, mode='fourier')
+    data_csd_mt = compute_epochs_csd(epochs_sin, mode='multitaper')
+    fourier_power = np.abs(data_csd_fourier.data[0, 0]) * sfreq
+    mt_power = np.abs(data_csd_mt.data[0, 0]) * sfreq
+    assert_true(abs(fourier_power - signal_power) <= 0.5)
+    assert_true(abs(mt_power - signal_power) <= 1)
+
+    # Power per sample should not depend on time window length
+    for tmax in [0.2, 0.4, 0.6, 0.8]:
+        for add_n_fft in [30, 0, 30]:
+            t_mask = (epochs_sin.times >= 0) & (epochs_sin.times <= tmax)
+            n_samples = sum(t_mask)
+            n_fft = n_samples + add_n_fft
+
+            data_csd_fourier = compute_epochs_csd(epochs_sin, mode='fourier',
+                                                  tmin=None, tmax=tmax, fmin=0,
+                                                  fmax=np.inf, n_fft=n_fft)
+            fourier_power_per_sample = np.abs(data_csd_fourier.data[0, 0]) *\
+                sfreq / data_csd_fourier.n_fft
+            assert_true(abs(signal_power_per_sample -
+                            fourier_power_per_sample) < 0.003)
+        # Power per sample should not depend on number of tapers
+        for n_tapers in [1, 2, 3, 5]:
+            for add_n_fft in [30, 0, 30]:
+                mt_bandwidth = sfreq / float(n_samples) * (n_tapers + 1)
+                data_csd_mt = compute_epochs_csd(epochs_sin, mode='multitaper',
+                                                 tmin=None, tmax=tmax, fmin=0,
+                                                 fmax=np.inf,
+                                                 mt_bandwidth=mt_bandwidth,
+                                                 n_fft=n_fft)
+                mt_power_per_sample = np.abs(data_csd_mt.data[0, 0]) *\
+                    sfreq / data_csd_mt.n_fft
+                # The estimate of power gets worse for small time windows when
+                # more tapers are used
+                if n_tapers == 5 and tmax == 0.2:
+                    delta = 0.05
+                else:
+                    delta = 0.004
+                assert_true(abs(signal_power_per_sample -
+                                mt_power_per_sample) < delta)
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/time_frequency/tests/test_multitaper.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/time_frequency/tests/test_multitaper.py
new file mode 100644
index 0000000..2c4bdbe
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/time_frequency/tests/test_multitaper.py
@@ -0,0 +1,55 @@
+import numpy as np
+from nose.tools import assert_raises
+from numpy.testing import assert_array_almost_equal
+from distutils.version import LooseVersion
+
+from mne.time_frequency import dpss_windows, multitaper_psd
+from mne.utils import requires_nitime
+
+
+ at requires_nitime
+def test_dpss_windows():
+    """ Test computation of DPSS windows """
+
+    import nitime as ni
+    N = 1000
+    half_nbw = 4
+    Kmax = int(2 * half_nbw)
+
+    dpss, eigs = dpss_windows(N, half_nbw, Kmax, low_bias=False)
+    dpss_ni, eigs_ni = ni.algorithms.dpss_windows(N, half_nbw, Kmax)
+
+    assert_array_almost_equal(dpss, dpss_ni)
+    assert_array_almost_equal(eigs, eigs_ni)
+
+    dpss, eigs = dpss_windows(N, half_nbw, Kmax, interp_from=200,
+                              low_bias=False)
+    dpss_ni, eigs_ni = ni.algorithms.dpss_windows(N, half_nbw, Kmax,
+                                                  interp_from=200)
+
+    assert_array_almost_equal(dpss, dpss_ni)
+    assert_array_almost_equal(eigs, eigs_ni)
+
+
+ at requires_nitime
+def test_multitaper_psd():
+    """ Test multi-taper PSD computation """
+
+    import nitime as ni
+    n_times = 1000
+    x = np.random.randn(5, n_times)
+    sfreq = 500
+    assert_raises(ValueError, multitaper_psd, x, sfreq, normalization='foo')
+    ni_5 = (LooseVersion(ni.__version__) >= LooseVersion('0.5'))
+    norm = 'full' if ni_5 else 'length'
+
+    for adaptive, n_jobs in zip((False, True, True), (1, 1, 2)):
+        psd, freqs = multitaper_psd(x, sfreq, adaptive=adaptive, n_jobs=n_jobs,
+                                    normalization=norm)
+        freqs_ni, psd_ni, _ = ni.algorithms.spectral.multi_taper_psd(
+            x, sfreq, adaptive=adaptive, jackknife=False)
+
+        # for some reason nitime returns n_times + 1 frequency points
+        # causing the value at 0 to be different
+        assert_array_almost_equal(psd[:, 1:], psd_ni[:, 1:-1], decimal=3)
+        assert_array_almost_equal(freqs, freqs_ni[:-1])
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/time_frequency/tests/test_psd.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/time_frequency/tests/test_psd.py
new file mode 100644
index 0000000..ab90940
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/time_frequency/tests/test_psd.py
@@ -0,0 +1,157 @@
+import numpy as np
+import os.path as op
+from numpy.testing import assert_array_almost_equal
+from nose.tools import assert_true
+
+from mne import io, pick_types, Epochs, read_events
+from mne.utils import requires_version, slow_test
+from mne.time_frequency import compute_raw_psd, compute_epochs_psd
+
+base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
+raw_fname = op.join(base_dir, 'test_raw.fif')
+event_fname = op.join(base_dir, 'test-eve.fif')
+
+
+ at requires_version('scipy', '0.12')
+def test_psd():
+    """Test PSD estimation
+    """
+    raw = io.Raw(raw_fname)
+
+    exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053']  # bads + 2 more
+
+    # picks MEG gradiometers
+    picks = pick_types(raw.info, meg='mag', eeg=False, stim=False,
+                       exclude=exclude)
+
+    picks = picks[:2]
+
+    tmin, tmax = 0, 10  # use the first 60s of data
+    fmin, fmax = 2, 70  # look at frequencies between 5 and 70Hz
+
+    n_fft = 128
+    psds, freqs = compute_raw_psd(raw, tmin=tmin, tmax=tmax, fmin=fmin,
+                                  fmax=fmax, proj=False, n_fft=n_fft,
+                                  picks=picks, n_jobs=1)
+    assert_true(psds.shape == (len(picks), len(freqs)))
+    assert_true(np.sum(freqs < 0) == 0)
+    assert_true(np.sum(psds < 0) == 0)
+
+    n_fft = 2048  # the FFT size (n_fft). Ideally a power of 2
+    psds, freqs = compute_raw_psd(raw, tmin=tmin, tmax=tmax, picks=picks,
+                                  fmin=fmin, fmax=fmax, n_fft=n_fft, n_jobs=1,
+                                  proj=False)
+    psds_proj, freqs = compute_raw_psd(raw, tmin=tmin, tmax=tmax, picks=picks,
+                                       fmin=fmin, fmax=fmax, n_fft=n_fft,
+                                       n_jobs=1, proj=True)
+
+    assert_array_almost_equal(psds, psds_proj)
+    assert_true(psds.shape == (len(picks), len(freqs)))
+    assert_true(np.sum(freqs < 0) == 0)
+    assert_true(np.sum(psds < 0) == 0)
+
+
+ at requires_version('scipy', '0.12')
+def test_psd_epochs():
+    """Test PSD estimation on epochs
+    """
+    raw = io.Raw(raw_fname)
+
+    exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053']  # bads + 2 more
+
+    # picks MEG gradiometers
+    picks = pick_types(raw.info, meg='mag', eeg=False, stim=False,
+                       exclude=exclude)
+
+    picks = picks[:2]
+
+    n_fft = 512  # the FFT size (n_fft). Ideally a power of 2
+
+    tmin, tmax, event_id = -0.5, 0.5, 1
+    include = []
+    raw.info['bads'] += ['MEG 2443']  # bads
+
+    # picks MEG gradiometers
+    picks = pick_types(raw.info, meg='grad', eeg=False, eog=True,
+                       stim=False, include=include, exclude='bads')
+
+    events = read_events(event_fname)
+
+    epochs = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks,
+                    baseline=(None, 0),
+                    reject=dict(grad=4000e-13, eog=150e-6), proj=False,
+                    preload=True)
+
+    tmin_full, tmax_full = -1, 1
+    epochs_full = Epochs(raw, events[:10], event_id, tmax=tmax_full,
+                         tmin=tmin_full, picks=picks,
+                         baseline=(None, 0),
+                         reject=dict(grad=4000e-13, eog=150e-6), proj=False,
+                         preload=True)
+
+    picks = pick_types(epochs.info, meg='grad', eeg=False, eog=True,
+                       stim=False, include=include, exclude='bads')
+    psds, freqs = compute_epochs_psd(epochs[:1], fmin=2, fmax=300,
+                                     n_fft=n_fft, picks=picks)
+
+    psds_t, freqs_t = compute_epochs_psd(epochs_full[:1], fmin=2, fmax=300,
+                                         tmin=tmin, tmax=tmax,
+                                         n_fft=n_fft, picks=picks)
+    # this one will fail if you add for example 0.1 to tmin
+    assert_array_almost_equal(psds, psds_t, 27)
+
+    psds_proj, _ = compute_epochs_psd(epochs[:1].apply_proj(), fmin=2,
+                                      fmax=300, n_fft=n_fft, picks=picks)
+
+    assert_array_almost_equal(psds, psds_proj)
+    assert_true(psds.shape == (1, len(picks), len(freqs)))
+    assert_true(np.sum(freqs < 0) == 0)
+    assert_true(np.sum(psds < 0) == 0)
+
+
+ at slow_test
+ at requires_version('scipy', '0.12')
+def test_compares_psd():
+    """Test PSD estimation on raw for plt.psd and scipy.signal.welch
+    """
+    raw = io.Raw(raw_fname)
+
+    exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053']  # bads + 2 more
+
+    # picks MEG gradiometers
+    picks = pick_types(raw.info, meg='grad', eeg=False, stim=False,
+                       exclude=exclude)[:2]
+
+    tmin, tmax = 0, 10  # use the first 60s of data
+    fmin, fmax = 2, 70  # look at frequencies between 5 and 70Hz
+    n_fft = 2048
+
+    # Compute psds with the new implementation using Welch
+    psds_welch, freqs_welch = compute_raw_psd(raw, tmin=tmin, tmax=tmax,
+                                              fmin=fmin, fmax=fmax,
+                                              proj=False, picks=picks,
+                                              n_fft=n_fft, n_jobs=1)
+
+    # Compute psds with plt.psd
+    start, stop = raw.time_as_index([tmin, tmax])
+    data, times = raw[picks, start:(stop + 1)]
+    from matplotlib.pyplot import psd
+    out = [psd(d, Fs=raw.info['sfreq'], NFFT=n_fft) for d in data]
+    freqs_mpl = out[0][1]
+    psds_mpl = np.array([o[0] for o in out])
+
+    mask = (freqs_mpl >= fmin) & (freqs_mpl <= fmax)
+    freqs_mpl = freqs_mpl[mask]
+    psds_mpl = psds_mpl[:, mask]
+
+    assert_array_almost_equal(psds_welch, psds_mpl)
+    assert_array_almost_equal(freqs_welch, freqs_mpl)
+
+    assert_true(psds_welch.shape == (len(picks), len(freqs_welch)))
+    assert_true(psds_mpl.shape == (len(picks), len(freqs_mpl)))
+
+    assert_true(np.sum(freqs_welch < 0) == 0)
+    assert_true(np.sum(freqs_mpl < 0) == 0)
+
+    assert_true(np.sum(psds_welch < 0) == 0)
+    assert_true(np.sum(psds_mpl < 0) == 0)
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/time_frequency/tests/test_stft.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/time_frequency/tests/test_stft.py
new file mode 100644
index 0000000..bf91f39
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/time_frequency/tests/test_stft.py
@@ -0,0 +1,60 @@
+import numpy as np
+from scipy import linalg
+from numpy.testing import assert_almost_equal, assert_array_almost_equal
+from nose.tools import assert_true
+
+from mne.time_frequency.stft import stft, istft, stftfreq, stft_norm2
+
+
+def test_stft():
+    "Test stft and istft tight frame property"
+    sfreq = 1000.  # Hz
+    f = 7.  # Hz
+    for T in [253, 256]:  # try with even and odd numbers
+        # Test with low frequency signal
+        t = np.arange(T).astype(np.float)
+        x = np.sin(2 * np.pi * f * t / sfreq)
+        x = np.array([x, x + 1.])
+        wsize = 128
+        tstep = 4
+        X = stft(x, wsize, tstep)
+        xp = istft(X, tstep, Tx=T)
+
+        freqs = stftfreq(wsize, sfreq=1000)
+
+        max_freq = freqs[np.argmax(np.sum(np.abs(X[0]) ** 2, axis=1))]
+
+        assert_true(X.shape[1] == len(freqs))
+        assert_true(np.all(freqs >= 0.))
+        assert_true(np.abs(max_freq - f) < 1.)
+        assert_array_almost_equal(x, xp, decimal=6)
+
+        # norm conservation thanks to tight frame property
+        assert_almost_equal(np.sqrt(stft_norm2(X)),
+                            [linalg.norm(xx) for xx in x], decimal=6)
+
+        # Test with random signal
+        x = np.random.randn(2, T)
+        wsize = 16
+        tstep = 8
+        X = stft(x, wsize, tstep)
+        xp = istft(X, tstep, Tx=T)
+
+        freqs = stftfreq(wsize, sfreq=1000)
+
+        max_freq = freqs[np.argmax(np.sum(np.abs(X[0]) ** 2, axis=1))]
+
+        assert_true(X.shape[1] == len(freqs))
+        assert_true(np.all(freqs >= 0.))
+        assert_array_almost_equal(x, xp, decimal=6)
+
+        # norm conservation thanks to tight frame property
+        assert_almost_equal(np.sqrt(stft_norm2(X)),
+                            [linalg.norm(xx) for xx in x],
+                            decimal=6)
+
+        # Try with empty array
+        x = np.zeros((0, T))
+        X = stft(x, wsize, tstep)
+        xp = istft(X, tstep, T)
+        assert_true(xp.shape == x.shape)
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/time_frequency/tests/test_stockwell.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/time_frequency/tests/test_stockwell.py
new file mode 100644
index 0000000..1d57963
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/time_frequency/tests/test_stockwell.py
@@ -0,0 +1,96 @@
+# Authors : Denis A. Engemann <denis.engemann at gmail.com>
+#           Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#
+# License : BSD 3-clause
+
+import numpy as np
+import os.path as op
+from numpy.testing import assert_array_almost_equal, assert_allclose
+from nose.tools import assert_true, assert_equal
+
+from scipy import fftpack
+
+from mne import io, read_events, Epochs, pick_types
+from mne.time_frequency._stockwell import (tfr_stockwell, _st,
+                                           _precompute_st_windows)
+from mne.time_frequency.tfr import AverageTFR
+
+base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
+raw_fname = op.join(base_dir, 'test_raw.fif')
+
+event_id, tmin, tmax = 1, -0.2, 0.5
+event_id_2 = 2
+raw = io.Raw(raw_fname, add_eeg_ref=False)
+event_name = op.join(base_dir, 'test-eve.fif')
+events = read_events(event_name)
+picks = pick_types(raw.info, meg=True, eeg=True, stim=True,
+                   ecg=True, eog=True, include=['STI 014'],
+                   exclude='bads')
+
+reject = dict(grad=1000e-12, mag=4e-12, eeg=80e-6, eog=150e-6)
+flat = dict(grad=1e-15, mag=1e-15)
+
+
+def test_stockwell_core():
+    """Test stockwell transform"""
+    # adapted from
+    # http://vcs.ynic.york.ac.uk/docs/naf/intro/concepts/timefreq.html
+    sfreq = 1000.0  # make things easy to understand
+    dur = 0.5
+    onset, offset = 0.175, 0.275
+    n_samp = int(sfreq * dur)
+    t = np.arange(n_samp) / sfreq   # make an array for time
+    pulse_freq = 15.
+    pulse = np.cos(2. * np.pi * pulse_freq * t)
+    pulse[0:int(onset * sfreq)] = 0.        # Zero before our desired pulse
+    pulse[int(offset * sfreq):] = 0.         # and zero after our desired pulse
+
+    width = 0.5
+    freqs = fftpack.fftfreq(len(pulse), 1. / sfreq)
+    fmin, fmax = 1.0, 100.0
+    start_f, stop_f = [np.abs(freqs - f).argmin() for f in (fmin, fmax)]
+    W = _precompute_st_windows(n_samp, start_f, stop_f, sfreq, width)
+
+    st_pulse = _st(pulse, start_f, W)
+    st_pulse = np.abs(st_pulse) ** 2
+    assert_equal(st_pulse.shape[-1], len(pulse))
+    st_max_freq = freqs[st_pulse.max(axis=1).argmax(axis=0)]  # max freq
+    assert_allclose(st_max_freq, pulse_freq, atol=1.0)
+    assert_true(onset < t[st_pulse.max(axis=0).argmax(axis=0)] < offset)
+
+    # test inversion to FFT, by averaging local spectra, see eq. 5 in
+    # Moukadem, A., Bouguila, Z., Ould Abdeslam, D. and Alain Dieterlen.
+    # "Stockwell transform optimization applied on the detection of split in
+    # heart sounds."
+
+    width = 1.0
+    start_f, stop_f = 0, len(pulse)
+    W = _precompute_st_windows(n_samp, start_f, stop_f, sfreq, width)
+    y = _st(pulse, start_f, W)
+    # invert stockwell
+    y_inv = fftpack.ifft(np.sum(y, axis=1)).real
+    assert_array_almost_equal(pulse, y_inv)
+
+
+def test_stockwell_api():
+    """Test stockwell functions"""
+    epochs = Epochs(raw, events,  # XXX pick 2 has epochs of zeros.
+                    event_id, tmin, tmax, picks=[0, 1, 3], baseline=(None, 0))
+    for fmin, fmax in [(None, 50), (5, 50), (5, None)]:
+        power, itc = tfr_stockwell(epochs, fmin=fmin, fmax=fmax,
+                                   return_itc=True)
+        if fmax is not None:
+            assert_true(power.freqs.max() <= fmax)
+        power_evoked = tfr_stockwell(epochs.average(), fmin=fmin, fmax=fmax,
+                                     return_itc=False)
+        # for multitaper these don't necessarily match, but they seem to
+        # for stockwell... if this fails, this maybe could be changed
+        # just to check the shape
+        assert_array_almost_equal(power_evoked.data, power.data)
+    assert_true(isinstance(power, AverageTFR))
+    assert_true(isinstance(itc, AverageTFR))
+    assert_equal(power.data.shape, itc.data.shape)
+    assert_true(itc.data.min() >= 0.0)
+    assert_true(itc.data.max() <= 1.0)
+    assert_true(np.log(power.data.max()) * 20 <= 0.0)
+    assert_true(np.log(power.data.max()) * 20 <= 0.0)
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/time_frequency/tests/test_tfr.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/time_frequency/tests/test_tfr.py
new file mode 100644
index 0000000..ee7a734
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/time_frequency/tests/test_tfr.py
@@ -0,0 +1,324 @@
+import numpy as np
+import os.path as op
+from numpy.testing import assert_array_almost_equal, assert_array_equal
+from nose.tools import assert_true, assert_false, assert_equal, assert_raises
+
+import mne
+from mne import io, Epochs, read_events, pick_types, create_info, EpochsArray
+from mne.utils import _TempDir, run_tests_if_main, slow_test, requires_h5py
+from mne.time_frequency import single_trial_power
+from mne.time_frequency.tfr import cwt_morlet, morlet, tfr_morlet
+from mne.time_frequency.tfr import _dpss_wavelet, tfr_multitaper
+from mne.time_frequency.tfr import AverageTFR, read_tfrs, write_tfrs
+
+import matplotlib
+matplotlib.use('Agg')  # for testing don't use X server
+
+raw_fname = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data',
+                    'test_raw.fif')
+event_fname = op.join(op.dirname(__file__), '..', '..', 'io', 'tests',
+                      'data', 'test-eve.fif')
+
+
+def test_morlet():
+    """Test morlet with and without zero mean"""
+    Wz = morlet(1000, [10], 2., zero_mean=True)
+    W = morlet(1000, [10], 2., zero_mean=False)
+
+    assert_true(np.abs(np.mean(np.real(Wz[0]))) < 1e-5)
+    assert_true(np.abs(np.mean(np.real(W[0]))) > 1e-3)
+
+
+def test_time_frequency():
+    """Test time frequency transform (PSD and phase lock)
+    """
+    # Set parameters
+    event_id = 1
+    tmin = -0.2
+    tmax = 0.5
+
+    # Setup for reading the raw data
+    raw = io.Raw(raw_fname)
+    events = read_events(event_fname)
+
+    include = []
+    exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053']  # bads + 2 more
+
+    # picks MEG gradiometers
+    picks = pick_types(raw.info, meg='grad', eeg=False,
+                       stim=False, include=include, exclude=exclude)
+
+    picks = picks[:2]
+    epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                    baseline=(None, 0))
+    data = epochs.get_data()
+    times = epochs.times
+    nave = len(data)
+
+    epochs_nopicks = Epochs(raw, events, event_id, tmin, tmax,
+                            baseline=(None, 0))
+
+    freqs = np.arange(6, 20, 5)  # define frequencies of interest
+    n_cycles = freqs / 4.
+
+    # Test first with a single epoch
+    power, itc = tfr_morlet(epochs[0], freqs=freqs, n_cycles=n_cycles,
+                            use_fft=True, return_itc=True)
+    # Now compute evoked
+    evoked = epochs.average()
+    power_evoked = tfr_morlet(evoked, freqs, n_cycles, use_fft=True,
+                              return_itc=False)
+    assert_raises(ValueError, tfr_morlet, evoked, freqs, 1., return_itc=True)
+    power, itc = tfr_morlet(epochs, freqs=freqs, n_cycles=n_cycles,
+                            use_fft=True, return_itc=True)
+    # Test picks argument
+    power_picks, itc_picks = tfr_morlet(epochs_nopicks, freqs=freqs,
+                                        n_cycles=n_cycles, use_fft=True,
+                                        return_itc=True, picks=picks)
+    # the actual data arrays here are equivalent, too...
+    assert_array_almost_equal(power.data, power_picks.data)
+    assert_array_almost_equal(itc.data, itc_picks.data)
+    assert_array_almost_equal(power.data, power_evoked.data)
+
+    print(itc)  # test repr
+    print(itc.ch_names)  # test property
+    itc += power  # test add
+    itc -= power  # test add
+
+    power.apply_baseline(baseline=(-0.1, 0), mode='logratio')
+
+    assert_true('meg' in power)
+    assert_true('grad' in power)
+    assert_false('mag' in power)
+    assert_false('eeg' in power)
+
+    assert_equal(power.nave, nave)
+    assert_equal(itc.nave, nave)
+    assert_true(power.data.shape == (len(picks), len(freqs), len(times)))
+    assert_true(power.data.shape == itc.data.shape)
+    assert_true(np.sum(itc.data >= 1) == 0)
+    assert_true(np.sum(itc.data <= 0) == 0)
+
+    power, itc = tfr_morlet(epochs, freqs=freqs, n_cycles=2, use_fft=False,
+                            return_itc=True)
+
+    assert_true(power.data.shape == (len(picks), len(freqs), len(times)))
+    assert_true(power.data.shape == itc.data.shape)
+    assert_true(np.sum(itc.data >= 1) == 0)
+    assert_true(np.sum(itc.data <= 0) == 0)
+
+    Fs = raw.info['sfreq']  # sampling in Hz
+    tfr = cwt_morlet(data[0], Fs, freqs, use_fft=True, n_cycles=2)
+    assert_true(tfr.shape == (len(picks), len(freqs), len(times)))
+
+    single_power = single_trial_power(data, Fs, freqs, use_fft=False,
+                                      n_cycles=2)
+
+    assert_array_almost_equal(np.mean(single_power), power.data)
+
+    power_pick = power.pick_channels(power.ch_names[:10:2])
+    assert_equal(len(power_pick.ch_names), len(power.ch_names[:10:2]))
+    assert_equal(power_pick.data.shape[0], len(power.ch_names[:10:2]))
+    power_drop = power.drop_channels(power.ch_names[1:10:2])
+    assert_equal(power_drop.ch_names, power_pick.ch_names)
+    assert_equal(power_pick.data.shape[0], len(power_drop.ch_names))
+
+    mne.equalize_channels([power_pick, power_drop])
+    assert_equal(power_pick.ch_names, power_drop.ch_names)
+    assert_equal(power_pick.data.shape, power_drop.data.shape)
+
+
+def test_dpsswavelet():
+    """Test DPSS wavelet"""
+    freqs = np.arange(5, 25, 3)
+    Ws = _dpss_wavelet(1000, freqs=freqs, n_cycles=freqs / 2.,
+                       time_bandwidth=4.0, zero_mean=True)
+
+    assert_true(len(Ws) == 3)  # 3 tapers expected
+
+    # Check that zero mean is true
+    assert_true(np.abs(np.mean(np.real(Ws[0][0]))) < 1e-5)
+
+    assert_true(len(Ws[0]) == len(freqs))  # As many wavelets as asked for
+
+
+ at slow_test
+def test_tfr_multitaper():
+    """Test tfr_multitaper"""
+    sfreq = 200.0
+    ch_names = ['SIM0001', 'SIM0002', 'SIM0003']
+    ch_types = ['grad', 'grad', 'grad']
+    info = create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types)
+
+    n_times = int(sfreq)  # Second long epochs
+    n_epochs = 3
+    seed = 42
+    rng = np.random.RandomState(seed)
+    noise = 0.1 * rng.randn(n_epochs, len(ch_names), n_times)
+    t = np.arange(n_times, dtype=np.float) / sfreq
+    signal = np.sin(np.pi * 2. * 50. * t)  # 50 Hz sinusoid signal
+    signal[np.logical_or(t < 0.45, t > 0.55)] = 0.  # Hard windowing
+    on_time = np.logical_and(t >= 0.45, t <= 0.55)
+    signal[on_time] *= np.hanning(on_time.sum())  # Ramping
+    dat = noise + signal
+
+    reject = dict(grad=4000.)
+    events = np.empty((n_epochs, 3), int)
+    first_event_sample = 100
+    event_id = dict(sin50hz=1)
+    for k in range(n_epochs):
+        events[k, :] = first_event_sample + k * n_times, 0, event_id['sin50hz']
+
+    epochs = EpochsArray(data=dat, info=info, events=events, event_id=event_id,
+                         reject=reject)
+
+    freqs = np.arange(5, 100, 3, dtype=np.float)
+    power, itc = tfr_multitaper(epochs, freqs=freqs, n_cycles=freqs / 2.,
+                                time_bandwidth=4.0)
+    picks = np.arange(len(ch_names))
+    power_picks, itc_picks = tfr_multitaper(epochs, freqs=freqs,
+                                            n_cycles=freqs / 2.,
+                                            time_bandwidth=4.0, picks=picks)
+    power_evoked = tfr_multitaper(epochs.average(), freqs=freqs,
+                                  n_cycles=freqs / 2., time_bandwidth=4.0,
+                                  return_itc=False)
+    # test picks argument
+    assert_array_almost_equal(power.data, power_picks.data)
+    assert_array_almost_equal(itc.data, itc_picks.data)
+    # one is squared magnitude of the average (evoked) and
+    # the other is average of the squared magnitudes (epochs PSD)
+    # so values shouldn't match, but shapes should
+    assert_array_equal(power.data.shape, power_evoked.data.shape)
+    assert_raises(AssertionError, assert_array_almost_equal,
+                  power.data, power_evoked.data)
+
+    tmax = t[np.argmax(itc.data[0, freqs == 50, :])]
+    fmax = freqs[np.argmax(power.data[1, :, t == 0.5])]
+    assert_true(tmax > 0.3 and tmax < 0.7)
+    assert_false(np.any(itc.data < 0.))
+    assert_true(fmax > 40 and fmax < 60)
+
+
+def test_crop():
+    """Test TFR cropping"""
+    data = np.zeros((3, 2, 3))
+    times = np.array([.1, .2, .3])
+    freqs = np.array([.10, .20])
+    info = mne.create_info(['MEG 001', 'MEG 002', 'MEG 003'], 1000.,
+                           ['mag', 'mag', 'mag'])
+    tfr = AverageTFR(info, data=data, times=times, freqs=freqs,
+                     nave=20, comment='test', method='crazy-tfr')
+    tfr.crop(0.2, 0.3)
+    assert_array_equal(tfr.times, [0.2, 0.3])
+    assert_equal(tfr.data.shape[-1], 2)
+
+
+ at requires_h5py
+def test_io():
+    """Test TFR IO capacities"""
+
+    tempdir = _TempDir()
+    fname = op.join(tempdir, 'test-tfr.h5')
+    data = np.zeros((3, 2, 3))
+    times = np.array([.1, .2, .3])
+    freqs = np.array([.10, .20])
+
+    info = mne.create_info(['MEG 001', 'MEG 002', 'MEG 003'], 1000.,
+                           ['mag', 'mag', 'mag'])
+    tfr = AverageTFR(info, data=data, times=times, freqs=freqs,
+                     nave=20, comment='test', method='crazy-tfr')
+    tfr.save(fname)
+    tfr2 = read_tfrs(fname, condition='test')
+
+    assert_array_equal(tfr.data, tfr2.data)
+    assert_array_equal(tfr.times, tfr2.times)
+    assert_array_equal(tfr.freqs, tfr2.freqs)
+    assert_equal(tfr.comment, tfr2.comment)
+    assert_equal(tfr.nave, tfr2.nave)
+
+    assert_raises(IOError, tfr.save, fname)
+
+    tfr.comment = None
+    tfr.save(fname, overwrite=True)
+    assert_equal(read_tfrs(fname, condition=0).comment, tfr.comment)
+    tfr.comment = 'test-A'
+    tfr2.comment = 'test-B'
+
+    fname = op.join(tempdir, 'test2-tfr.h5')
+    write_tfrs(fname, [tfr, tfr2])
+    tfr3 = read_tfrs(fname, condition='test-A')
+    assert_equal(tfr.comment, tfr3.comment)
+
+    assert_true(isinstance(tfr.info, io.meas_info.Info))
+
+    tfrs = read_tfrs(fname, condition=None)
+    assert_equal(len(tfrs), 2)
+    tfr4 = tfrs[1]
+    assert_equal(tfr2.comment, tfr4.comment)
+
+    assert_raises(ValueError, read_tfrs, fname, condition='nonono')
+
+
+def test_plot():
+    """Test TFR plotting."""
+    import matplotlib.pyplot as plt
+
+    data = np.zeros((3, 2, 3))
+    times = np.array([.1, .2, .3])
+    freqs = np.array([.10, .20])
+    info = mne.create_info(['MEG 001', 'MEG 002', 'MEG 003'], 1000.,
+                           ['mag', 'mag', 'mag'])
+    tfr = AverageTFR(info, data=data, times=times, freqs=freqs,
+                     nave=20, comment='test', method='crazy-tfr')
+    tfr.plot([1, 2], title='title')
+    plt.close('all')
+    ax = plt.subplot2grid((2, 2), (0, 0))
+    ax2 = plt.subplot2grid((2, 2), (1, 1))
+    ax3 = plt.subplot2grid((2, 2), (0, 1))
+    tfr.plot(picks=[0, 1, 2], axes=[ax, ax2, ax3])
+    plt.close('all')
+
+    tfr.plot_topo(picks=[1, 2])
+    plt.close('all')
+
+    tfr.plot_topo(picks=[1, 2])
+    plt.close('all')
+
+
+def test_add_channels():
+    """Test tfr splitting / re-appending channel types
+    """
+    data = np.zeros((6, 2, 3))
+    times = np.array([.1, .2, .3])
+    freqs = np.array([.10, .20])
+    info = mne.create_info(
+        ['MEG 001', 'MEG 002', 'MEG 003', 'EEG 001', 'EEG 002', 'STIM 001'],
+        1000., ['mag', 'mag', 'mag', 'eeg', 'eeg', 'stim'])
+    tfr = AverageTFR(info, data=data, times=times, freqs=freqs,
+                     nave=20, comment='test', method='crazy-tfr')
+    tfr_eeg = tfr.pick_types(meg=False, eeg=True, copy=True)
+    tfr_meg = tfr.pick_types(meg=True, copy=True)
+    tfr_stim = tfr.pick_types(meg=False, stim=True, copy=True)
+    tfr_eeg_meg = tfr.pick_types(meg=True, eeg=True, copy=True)
+    tfr_new = tfr_meg.add_channels([tfr_eeg, tfr_stim], copy=True)
+    assert_true(all(ch in tfr_new.ch_names
+                    for ch in tfr_stim.ch_names + tfr_meg.ch_names))
+    tfr_new = tfr_meg.add_channels([tfr_eeg], copy=True)
+
+    assert_true(ch in tfr_new.ch_names for ch in tfr.ch_names)
+    assert_array_equal(tfr_new.data, tfr_eeg_meg.data)
+    assert_true(all(ch not in tfr_new.ch_names
+                    for ch in tfr_stim.ch_names))
+
+    # Now test errors
+    tfr_badsf = tfr_eeg.copy()
+    tfr_badsf.info['sfreq'] = 3.1415927
+    tfr_eeg = tfr_eeg.crop(-.1, .1)
+
+    assert_raises(RuntimeError, tfr_meg.add_channels, [tfr_badsf])
+    assert_raises(AssertionError, tfr_meg.add_channels, [tfr_eeg])
+    assert_raises(ValueError, tfr_meg.add_channels, [tfr_meg])
+    assert_raises(AssertionError, tfr_meg.add_channels, tfr_badsf)
+
+
+run_tests_if_main()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/time_frequency/tfr.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/time_frequency/tfr.py
new file mode 100644
index 0000000..4623877
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/time_frequency/tfr.py
@@ -0,0 +1,1376 @@
+"""A module which implements the time frequency estimation.
+
+Morlet code inspired by Matlab code from Sheraz Khan & Brainstorm & SPM
+"""
+# Authors : Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#           Hari Bharadwaj <hari at nmr.mgh.harvard.edu>
+#
+# License : BSD (3-clause)
+
+import warnings
+from math import sqrt
+from copy import deepcopy
+import numpy as np
+from scipy import linalg
+from scipy.fftpack import fftn, ifftn
+
+from ..fixes import partial
+from ..baseline import rescale
+from ..parallel import parallel_func
+from ..utils import logger, verbose, _time_mask
+from ..channels.channels import ContainsMixin, UpdateChannelsMixin
+from ..io.pick import pick_info, pick_types
+from ..io.meas_info import Info
+from ..utils import check_fname
+from .multitaper import dpss_windows
+from ..viz.utils import figure_nobar
+from ..externals.h5io import write_hdf5, read_hdf5
+
+
+def _get_data(inst, return_itc):
+    """Get data from Epochs or Evoked instance as epochs x ch x time"""
+    from ..epochs import _BaseEpochs
+    from ..evoked import Evoked
+    if not isinstance(inst, (_BaseEpochs, Evoked)):
+        raise TypeError('inst must be Epochs or Evoked')
+    if isinstance(inst, _BaseEpochs):
+        data = inst.get_data()
+    else:
+        if return_itc:
+            raise ValueError('return_itc must be False for evoked data')
+        data = inst.data[np.newaxis, ...].copy()
+    return data
+
+
+def morlet(sfreq, freqs, n_cycles=7, sigma=None, zero_mean=False):
+    """Compute Wavelets for the given frequency range
+
+    Parameters
+    ----------
+    sfreq : float
+        Sampling Frequency
+    freqs : array
+        frequency range of interest (1 x Frequencies)
+    n_cycles: float | array of float
+        Number of cycles. Fixed number or one per frequency.
+    sigma : float, (optional)
+        It controls the width of the wavelet ie its temporal
+        resolution. If sigma is None the temporal resolution
+        is adapted with the frequency like for all wavelet transform.
+        The higher the frequency the shorter is the wavelet.
+        If sigma is fixed the temporal resolution is fixed
+        like for the short time Fourier transform and the number
+        of oscillations increases with the frequency.
+    zero_mean : bool
+        Make sure the wavelet is zero mean
+
+    Returns
+    -------
+    Ws : list of array
+        Wavelets time series
+
+    See Also
+    --------
+    mne.time_frequency.cwt_morlet : Compute time-frequency decomposition
+                                    with Morlet wavelets
+    """
+    Ws = list()
+    n_cycles = np.atleast_1d(n_cycles)
+
+    if (n_cycles.size != 1) and (n_cycles.size != len(freqs)):
+        raise ValueError("n_cycles should be fixed or defined for "
+                         "each frequency.")
+    for k, f in enumerate(freqs):
+        if len(n_cycles) != 1:
+            this_n_cycles = n_cycles[k]
+        else:
+            this_n_cycles = n_cycles[0]
+        # fixed or scale-dependent window
+        if sigma is None:
+            sigma_t = this_n_cycles / (2.0 * np.pi * f)
+        else:
+            sigma_t = this_n_cycles / (2.0 * np.pi * sigma)
+        # this scaling factor is proportional to (Tallon-Baudry 98):
+        # (sigma_t*sqrt(pi))^(-1/2);
+        t = np.arange(0., 5. * sigma_t, 1.0 / sfreq)
+        t = np.r_[-t[::-1], t[1:]]
+        oscillation = np.exp(2.0 * 1j * np.pi * f * t)
+        gaussian_enveloppe = np.exp(-t ** 2 / (2.0 * sigma_t ** 2))
+        if zero_mean:  # to make it zero mean
+            real_offset = np.exp(- 2 * (np.pi * f * sigma_t) ** 2)
+            oscillation -= real_offset
+        W = oscillation * gaussian_enveloppe
+        W /= sqrt(0.5) * linalg.norm(W.ravel())
+        Ws.append(W)
+    return Ws
+
+
+def _dpss_wavelet(sfreq, freqs, n_cycles=7, time_bandwidth=4.0,
+                  zero_mean=False):
+    """Compute Wavelets for the given frequency range
+
+    Parameters
+    ----------
+    sfreq : float
+        Sampling Frequency.
+    freqs : ndarray, shape (n_freqs,)
+        The frequencies in Hz.
+    n_cycles : float | ndarray, shape (n_freqs,)
+        The number of cycles globally or for each frequency.
+        Defaults to 7.
+    time_bandwidth : float, (optional)
+        Time x Bandwidth product.
+        The number of good tapers (low-bias) is chosen automatically based on
+        this to equal floor(time_bandwidth - 1).
+        Default is 4.0, giving 3 good tapers.
+
+    Returns
+    -------
+    Ws : list of array
+        Wavelets time series
+    """
+    Ws = list()
+    if time_bandwidth < 2.0:
+        raise ValueError("time_bandwidth should be >= 2.0 for good tapers")
+    n_taps = int(np.floor(time_bandwidth - 1))
+    n_cycles = np.atleast_1d(n_cycles)
+
+    if n_cycles.size != 1 and n_cycles.size != len(freqs):
+        raise ValueError("n_cycles should be fixed or defined for "
+                         "each frequency.")
+
+    for m in range(n_taps):
+        Wm = list()
+        for k, f in enumerate(freqs):
+            if len(n_cycles) != 1:
+                this_n_cycles = n_cycles[k]
+            else:
+                this_n_cycles = n_cycles[0]
+
+            t_win = this_n_cycles / float(f)
+            t = np.arange(0., t_win, 1.0 / sfreq)
+            # Making sure wavelets are centered before tapering
+            oscillation = np.exp(2.0 * 1j * np.pi * f * (t - t_win / 2.))
+
+            # Get dpss tapers
+            tapers, conc = dpss_windows(t.shape[0], time_bandwidth / 2.,
+                                        n_taps)
+
+            Wk = oscillation * tapers[m]
+            if zero_mean:  # to make it zero mean
+                real_offset = Wk.mean()
+                Wk -= real_offset
+            Wk /= sqrt(0.5) * linalg.norm(Wk.ravel())
+
+            Wm.append(Wk)
+
+        Ws.append(Wm)
+
+    return Ws
+
+
+def _centered(arr, newsize):
+    """Aux Function to center data"""
+    # Return the center newsize portion of the array.
+    newsize = np.asarray(newsize)
+    currsize = np.array(arr.shape)
+    startind = (currsize - newsize) // 2
+    endind = startind + newsize
+    myslice = [slice(startind[k], endind[k]) for k in range(len(endind))]
+    return arr[tuple(myslice)]
+
+
+def _cwt_fft(X, Ws, mode="same"):
+    """Compute cwt with fft based convolutions
+    Return a generator over signals.
+    """
+    X = np.asarray(X)
+
+    # Precompute wavelets for given frequency range to save time
+    n_signals, n_times = X.shape
+    n_freqs = len(Ws)
+
+    Ws_max_size = max(W.size for W in Ws)
+    size = n_times + Ws_max_size - 1
+    # Always use 2**n-sized FFT
+    fsize = 2 ** int(np.ceil(np.log2(size)))
+
+    # precompute FFTs of Ws
+    fft_Ws = np.empty((n_freqs, fsize), dtype=np.complex128)
+    for i, W in enumerate(Ws):
+        if len(W) > n_times:
+            raise ValueError('Wavelet is too long for such a short signal. '
+                             'Reduce the number of cycles.')
+        fft_Ws[i] = fftn(W, [fsize])
+
+    for k, x in enumerate(X):
+        if mode == "full":
+            tfr = np.zeros((n_freqs, fsize), dtype=np.complex128)
+        elif mode == "same" or mode == "valid":
+            tfr = np.zeros((n_freqs, n_times), dtype=np.complex128)
+
+        fft_x = fftn(x, [fsize])
+        for i, W in enumerate(Ws):
+            ret = ifftn(fft_x * fft_Ws[i])[:n_times + W.size - 1]
+            if mode == "valid":
+                sz = abs(W.size - n_times) + 1
+                offset = (n_times - sz) / 2
+                tfr[i, offset:(offset + sz)] = _centered(ret, sz)
+            else:
+                tfr[i, :] = _centered(ret, n_times)
+        yield tfr
+
+
+def _cwt_convolve(X, Ws, mode='same'):
+    """Compute time freq decomposition with temporal convolutions
+    Return a generator over signals.
+    """
+    X = np.asarray(X)
+
+    n_signals, n_times = X.shape
+    n_freqs = len(Ws)
+
+    # Compute convolutions
+    for x in X:
+        tfr = np.zeros((n_freqs, n_times), dtype=np.complex128)
+        for i, W in enumerate(Ws):
+            ret = np.convolve(x, W, mode=mode)
+            if len(W) > len(x):
+                raise ValueError('Wavelet is too long for such a short '
+                                 'signal. Reduce the number of cycles.')
+            if mode == "valid":
+                sz = abs(W.size - n_times) + 1
+                offset = (n_times - sz) / 2
+                tfr[i, offset:(offset + sz)] = ret
+            else:
+                tfr[i] = ret
+        yield tfr
+
+
+def cwt_morlet(X, sfreq, freqs, use_fft=True, n_cycles=7.0, zero_mean=False):
+    """Compute time freq decomposition with Morlet wavelets
+
+    This function operates directly on numpy arrays. Consider using
+    `tfr_morlet` to process `Epochs` or `Evoked` instances.
+
+    Parameters
+    ----------
+    X : array of shape [n_signals, n_times]
+        signals (one per line)
+    sfreq : float
+        sampling Frequency
+    freqs : array
+        Array of frequencies of interest
+    use_fft : bool
+        Compute convolution with FFT or temoral convolution.
+    n_cycles: float | array of float
+        Number of cycles. Fixed number or one per frequency.
+    zero_mean : bool
+        Make sure the wavelets are zero mean.
+
+    Returns
+    -------
+    tfr : 3D array
+        Time Frequency Decompositions (n_signals x n_frequencies x n_times)
+
+    See Also
+    --------
+    tfr.cwt : Compute time-frequency decomposition with user-provided wavelets
+    """
+    mode = 'same'
+    # mode = "valid"
+    n_signals, n_times = X.shape
+    n_frequencies = len(freqs)
+
+    # Precompute wavelets for given frequency range to save time
+    Ws = morlet(sfreq, freqs, n_cycles=n_cycles, zero_mean=zero_mean)
+
+    if use_fft:
+        coefs = _cwt_fft(X, Ws, mode)
+    else:
+        coefs = _cwt_convolve(X, Ws, mode)
+
+    tfrs = np.empty((n_signals, n_frequencies, n_times), dtype=np.complex)
+    for k, tfr in enumerate(coefs):
+        tfrs[k] = tfr
+
+    return tfrs
+
+
+def cwt(X, Ws, use_fft=True, mode='same', decim=1):
+    """Compute time freq decomposition with continuous wavelet transform
+
+    Parameters
+    ----------
+    X : array of shape [n_signals, n_times]
+        signals (one per line)
+    Ws : list of array
+        Wavelets time series
+    use_fft : bool
+        Use FFT for convolutions
+    mode : 'same' | 'valid' | 'full'
+        Convention for convolution
+    decim : int
+        Temporal decimation factor
+
+    Returns
+    -------
+    tfr : 3D array
+        Time Frequency Decompositions (n_signals x n_frequencies x n_times)
+
+    See Also
+    --------
+    mne.time_frequency.cwt_morlet : Compute time-frequency decomposition
+                                    with Morlet wavelets
+    """
+    n_signals, n_times = X[:, ::decim].shape
+    n_frequencies = len(Ws)
+
+    if use_fft:
+        coefs = _cwt_fft(X, Ws, mode)
+    else:
+        coefs = _cwt_convolve(X, Ws, mode)
+
+    tfrs = np.empty((n_signals, n_frequencies, n_times), dtype=np.complex)
+    for k, tfr in enumerate(coefs):
+        tfrs[k] = tfr[..., ::decim]
+
+    return tfrs
+
+
+def _time_frequency(X, Ws, use_fft, decim):
+    """Aux of time_frequency for parallel computing over channels
+    """
+    n_epochs, n_times = X.shape
+    n_times = n_times // decim + bool(n_times % decim)
+    n_frequencies = len(Ws)
+    psd = np.zeros((n_frequencies, n_times))  # PSD
+    plf = np.zeros((n_frequencies, n_times), np.complex)  # phase lock
+
+    mode = 'same'
+    if use_fft:
+        tfrs = _cwt_fft(X, Ws, mode)
+    else:
+        tfrs = _cwt_convolve(X, Ws, mode)
+
+    for tfr in tfrs:
+        tfr = tfr[:, ::decim]
+        tfr_abs = np.abs(tfr)
+        psd += tfr_abs ** 2
+        plf += tfr / tfr_abs
+    psd /= n_epochs
+    plf = np.abs(plf) / n_epochs
+    return psd, plf
+
+
+ at verbose
+def single_trial_power(data, sfreq, frequencies, use_fft=True, n_cycles=7,
+                       baseline=None, baseline_mode='ratio', times=None,
+                       decim=1, n_jobs=1, zero_mean=False, verbose=None):
+    """Compute time-frequency power on single epochs
+
+    Parameters
+    ----------
+    data : array of shape [n_epochs, n_channels, n_times]
+        The epochs
+    sfreq : float
+        Sampling rate
+    frequencies : array-like
+        The frequencies
+    use_fft : bool
+        Use the FFT for convolutions or not.
+    n_cycles : float | array of float
+        Number of cycles  in the Morlet wavelet. Fixed number
+        or one per frequency.
+    baseline : None (default) or tuple of length 2
+        The time interval to apply baseline correction.
+        If None do not apply it. If baseline is (a, b)
+        the interval is between "a (s)" and "b (s)".
+        If a is None the beginning of the data is used
+        and if b is None then b is set to the end of the interval.
+        If baseline is equal ot (None, None) all the time
+        interval is used.
+    baseline_mode : None | 'ratio' | 'zscore'
+        Do baseline correction with ratio (power is divided by mean
+        power during baseline) or zscore (power is divided by standard
+        deviation of power during baseline after subtracting the mean,
+        power = [power - mean(power_baseline)] / std(power_baseline))
+    times : array
+        Required to define baseline
+    decim : int
+        Temporal decimation factor
+    n_jobs : int
+        The number of epochs to process at the same time
+    zero_mean : bool
+        Make sure the wavelets are zero mean.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    power : 4D array
+        Power estimate (Epochs x Channels x Frequencies x Timepoints).
+    """
+    mode = 'same'
+    n_frequencies = len(frequencies)
+    n_epochs, n_channels, n_times = data[:, :, ::decim].shape
+
+    # Precompute wavelets for given frequency range to save time
+    Ws = morlet(sfreq, frequencies, n_cycles=n_cycles, zero_mean=zero_mean)
+
+    parallel, my_cwt, _ = parallel_func(cwt, n_jobs)
+
+    logger.info("Computing time-frequency power on single epochs...")
+
+    power = np.empty((n_epochs, n_channels, n_frequencies, n_times),
+                     dtype=np.float)
+
+    # Package arguments for `cwt` here to minimize omissions where only one of
+    # the two calls below is updated with new function arguments.
+    cwt_kw = dict(Ws=Ws, use_fft=use_fft, mode=mode, decim=decim)
+    if n_jobs == 1:
+        for k, e in enumerate(data):
+            x = cwt(e, **cwt_kw)
+            power[k] = (x * x.conj()).real
+    else:
+        # Precompute tf decompositions in parallel
+        tfrs = parallel(my_cwt(e, **cwt_kw) for e in data)
+        for k, tfr in enumerate(tfrs):
+            power[k] = (tfr * tfr.conj()).real
+
+    # Run baseline correction.  Be sure to decimate the times array as well if
+    # needed.
+    if times is not None:
+        times = times[::decim]
+    power = rescale(power, times, baseline, baseline_mode, copy=False)
+    return power
+
+
+def _induced_power_cwt(data, sfreq, frequencies, use_fft=True, n_cycles=7,
+                       decim=1, n_jobs=1, zero_mean=False):
+    """Compute time induced power and inter-trial phase-locking factor
+
+    The time frequency decomposition is done with Morlet wavelets
+
+    Parameters
+    ----------
+    data : array
+        3D array of shape [n_epochs, n_channels, n_times]
+    sfreq : float
+        sampling Frequency
+    frequencies : array
+        Array of frequencies of interest
+    use_fft : bool
+        Compute transform with fft based convolutions or temporal
+        convolutions.
+    n_cycles : float | array of float
+        Number of cycles. Fixed number or one per frequency.
+    decim: int
+        Temporal decimation factor
+    n_jobs : int
+        The number of CPUs used in parallel. All CPUs are used in -1.
+        Requires joblib package.
+    zero_mean : bool
+        Make sure the wavelets are zero mean.
+
+    Returns
+    -------
+    power : 2D array
+        Induced power (Channels x Frequencies x Timepoints).
+        Squared amplitude of time-frequency coefficients.
+    phase_lock : 2D array
+        Phase locking factor in [0, 1] (Channels x Frequencies x Timepoints)
+    """
+    n_frequencies = len(frequencies)
+    n_epochs, n_channels, n_times = data[:, :, ::decim].shape
+
+    # Precompute wavelets for given frequency range to save time
+    Ws = morlet(sfreq, frequencies, n_cycles=n_cycles, zero_mean=zero_mean)
+
+    psd = np.empty((n_channels, n_frequencies, n_times))
+    plf = np.empty((n_channels, n_frequencies, n_times))
+    # Separate to save memory for n_jobs=1
+    parallel, my_time_frequency, _ = parallel_func(_time_frequency, n_jobs)
+    psd_plf = parallel(my_time_frequency(data[:, c, :], Ws, use_fft, decim)
+                       for c in range(n_channels))
+    for c, (psd_c, plf_c) in enumerate(psd_plf):
+        psd[c, :, :], plf[c, :, :] = psd_c, plf_c
+    return psd, plf
+
+
+def _preproc_tfr(data, times, freqs, tmin, tmax, fmin, fmax, mode,
+                 baseline, vmin, vmax, dB):
+    """Aux Function to prepare tfr computation"""
+    from ..viz.utils import _setup_vmin_vmax
+
+    if mode is not None and baseline is not None:
+        logger.info("Applying baseline correction '%s' during %s" %
+                    (mode, baseline))
+        data = rescale(data.copy(), times, baseline, mode)
+
+    # crop time
+    itmin, itmax = None, None
+    idx = np.where(_time_mask(times, tmin, tmax))[0]
+    if tmin is not None:
+        itmin = idx[0]
+    if tmax is not None:
+        itmax = idx[-1] + 1
+
+    times = times[itmin:itmax]
+
+    # crop freqs
+    ifmin, ifmax = None, None
+    idx = np.where(_time_mask(freqs, fmin, fmax))[0]
+    if fmin is not None:
+        ifmin = idx[0]
+    if fmax is not None:
+        ifmax = idx[-1] + 1
+
+    freqs = freqs[ifmin:ifmax]
+
+    # crop data
+    data = data[:, ifmin:ifmax, itmin:itmax]
+
+    times *= 1e3
+    if dB:
+        data = 10 * np.log10((data * data.conj()).real)
+
+    vmin, vmax = _setup_vmin_vmax(data, vmin, vmax)
+    return data, times, freqs, vmin, vmax
+
+
+class AverageTFR(ContainsMixin, UpdateChannelsMixin):
+    """Container for Time-Frequency data
+
+    Can for example store induced power at sensor level or intertrial
+    coherence.
+
+    Parameters
+    ----------
+    info : Info
+        The measurement info.
+    data : ndarray, shape (n_channels, n_freqs, n_times)
+        The data.
+    times : ndarray, shape (n_times,)
+        The time values in seconds.
+    freqs : ndarray, shape (n_freqs,)
+        The frequencies in Hz.
+    nave : int
+        The number of averaged TFRs.
+    comment : str | None
+        Comment on the data, e.g., the experimental condition.
+        Defaults to None.
+    method : str | None
+        Comment on the method used to compute the data, e.g., morlet wavelet.
+        Defaults to None.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Attributes
+    ----------
+    ch_names : list
+        The names of the channels.
+    """
+    @verbose
+    def __init__(self, info, data, times, freqs, nave, comment=None,
+                 method=None, verbose=None):
+        self.info = info
+        if data.ndim != 3:
+            raise ValueError('data should be 3d. Got %d.' % data.ndim)
+        n_channels, n_freqs, n_times = data.shape
+        if n_channels != len(info['chs']):
+            raise ValueError("Number of channels and data size don't match"
+                             " (%d != %d)." % (n_channels, len(info['chs'])))
+        if n_freqs != len(freqs):
+            raise ValueError("Number of frequencies and data size don't match"
+                             " (%d != %d)." % (n_freqs, len(freqs)))
+        if n_times != len(times):
+            raise ValueError("Number of times and data size don't match"
+                             " (%d != %d)." % (n_times, len(times)))
+        self.data = data
+        self.times = times
+        self.freqs = freqs
+        self.nave = nave
+        self.comment = comment
+        self.method = method
+
+    @property
+    def ch_names(self):
+        return self.info['ch_names']
+
+    def crop(self, tmin=None, tmax=None, copy=False):
+        """Crop data to a given time interval
+
+        Parameters
+        ----------
+        tmin : float | None
+            Start time of selection in seconds.
+        tmax : float | None
+            End time of selection in seconds.
+        copy : bool
+            If False epochs is cropped in place.
+        """
+        inst = self if not copy else self.copy()
+        mask = _time_mask(inst.times, tmin, tmax)
+        inst.times = inst.times[mask]
+        inst.data = inst.data[..., mask]
+        return inst
+
+    @verbose
+    def plot(self, picks=None, baseline=None, mode='mean', tmin=None,
+             tmax=None, fmin=None, fmax=None, vmin=None, vmax=None,
+             cmap='RdBu_r', dB=False, colorbar=True, show=True,
+             title=None, axes=None, layout=None, verbose=None):
+        """Plot TFRs in a topography with images
+
+        Parameters
+        ----------
+        picks : array-like of int | None
+            The indices of the channels to plot.
+        baseline : None (default) or tuple of length 2
+            The time interval to apply baseline correction.
+            If None do not apply it. If baseline is (a, b)
+            the interval is between "a (s)" and "b (s)".
+            If a is None the beginning of the data is used
+            and if b is None then b is set to the end of the interval.
+            If baseline is equal ot (None, None) all the time
+            interval is used.
+        mode : None | 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
+            Do baseline correction with ratio (power is divided by mean
+            power during baseline) or zscore (power is divided by standard
+            deviation of power during baseline after subtracting the mean,
+            power = [power - mean(power_baseline)] / std(power_baseline)).
+            If None no baseline correction is applied.
+        tmin : None | float
+            The first time instant to display. If None the first time point
+            available is used.
+        tmax : None | float
+            The last time instant to display. If None the last time point
+            available is used.
+        fmin : None | float
+            The first frequency to display. If None the first frequency
+            available is used.
+        fmax : None | float
+            The last frequency to display. If None the last frequency
+            available is used.
+        vmin : float | None
+            The mininum value an the color scale. If vmin is None, the data
+            minimum value is used.
+        vmax : float | None
+            The maxinum value an the color scale. If vmax is None, the data
+            maximum value is used.
+        cmap : matplotlib colormap | str
+            The colormap to use. Defaults to 'RdBu_r'.
+        dB : bool
+            If True, 20*log10 is applied to the data to get dB.
+        colorbar : bool
+            If true, colorbar will be added to the plot. For user defined axes,
+            the colorbar cannot be drawn. Defaults to True.
+        show : bool
+            Call pyplot.show() at the end.
+        title : str | None
+            String for title. Defaults to None (blank/no title).
+        axes : instance of Axes | list | None
+            The axes to plot to. If list, the list must be a list of Axes of
+            the same length as the number of channels. If instance of Axes,
+            there must be only one channel plotted.
+        layout : Layout | None
+            Layout instance specifying sensor positions. Used for interactive
+            plotting of topographies on rectangle selection. If possible, the
+            correct layout is inferred from the data.
+        verbose : bool, str, int, or None
+            If not None, override default verbose level (see mne.verbose).
+
+        Returns
+        -------
+        fig : matplotlib.figure.Figure
+            The figure containing the topography.
+        """
+        from ..viz.topo import _imshow_tfr
+        import matplotlib.pyplot as plt
+        times, freqs = self.times.copy(), self.freqs.copy()
+        info = self.info
+        data = self.data
+
+        n_picks = len(picks)
+        info, data, picks = _prepare_picks(info, data, picks)
+        data = data[picks]
+
+        data, times, freqs, vmin, vmax = \
+            _preproc_tfr(data, times, freqs, tmin, tmax, fmin, fmax, mode,
+                         baseline, vmin, vmax, dB)
+
+        tmin, tmax = times[0], times[-1]
+        if isinstance(axes, plt.Axes):
+            axes = [axes]
+        if isinstance(axes, list) or isinstance(axes, np.ndarray):
+            if len(axes) != n_picks:
+                raise RuntimeError('There must be an axes for each picked '
+                                   'channel.')
+
+        for idx in range(len(data)):
+            if axes is None:
+                fig = plt.figure()
+                ax = fig.add_subplot(111)
+            else:
+                ax = axes[idx]
+                fig = ax.get_figure()
+            onselect_callback = partial(self._onselect, baseline=baseline,
+                                        mode=mode, layout=layout)
+            _imshow_tfr(ax, 0, tmin, tmax, vmin, vmax, onselect_callback,
+                        ylim=None, tfr=data[idx: idx + 1], freq=freqs,
+                        x_label='Time (ms)', y_label='Frequency (Hz)',
+                        colorbar=colorbar, picker=False, cmap=cmap)
+            if title:
+                fig.suptitle(title)
+            colorbar = False  # only one colorbar for multiple axes
+        if show:
+            plt.show()
+        return fig
+
+    def _onselect(self, eclick, erelease, baseline, mode, layout):
+        """Callback function called by rubber band selector in channel tfr."""
+        import matplotlib.pyplot as plt
+        from ..viz import plot_tfr_topomap
+        if abs(eclick.x - erelease.x) < .1 or abs(eclick.y - erelease.y) < .1:
+            return
+        plt.ion()  # turn interactive mode on
+        tmin = round(min(eclick.xdata, erelease.xdata) / 1000., 5)  # ms to s
+        tmax = round(max(eclick.xdata, erelease.xdata) / 1000., 5)
+        fmin = round(min(eclick.ydata, erelease.ydata), 5)  # Hz
+        fmax = round(max(eclick.ydata, erelease.ydata), 5)
+        tmin = min(self.times, key=lambda x: abs(x - tmin))  # find closest
+        tmax = min(self.times, key=lambda x: abs(x - tmax))
+        fmin = min(self.freqs, key=lambda x: abs(x - fmin))
+        fmax = min(self.freqs, key=lambda x: abs(x - fmax))
+        if tmin == tmax or fmin == fmax:
+            logger.info('The selected area is too small. '
+                        'Select a larger time-frequency window.')
+            return
+
+        types = list()
+        if 'eeg' in self:
+            types.append('eeg')
+        if 'mag' in self:
+            types.append('mag')
+        if 'grad' in self:
+            types.append('grad')
+        fig = figure_nobar()
+        fig.suptitle('{:.2f} s - {:.2f} s, {:.2f} Hz - {:.2f} Hz'.format(tmin,
+                                                                         tmax,
+                                                                         fmin,
+                                                                         fmax),
+                     y=0.04)
+        for idx, ch_type in enumerate(types):
+            ax = plt.subplot(1, len(types), idx + 1)
+            plot_tfr_topomap(self, ch_type=ch_type, tmin=tmin, tmax=tmax,
+                             fmin=fmin, fmax=fmax, layout=layout,
+                             baseline=baseline, mode=mode, cmap=None,
+                             title=ch_type, vmin=None, vmax=None,
+                             axes=ax)
+
+    def plot_topo(self, picks=None, baseline=None, mode='mean', tmin=None,
+                  tmax=None, fmin=None, fmax=None, vmin=None, vmax=None,
+                  layout=None, cmap='RdBu_r', title=None, dB=False,
+                  colorbar=True, layout_scale=0.945, show=True,
+                  border='none', fig_facecolor='k', font_color='w'):
+        """Plot TFRs in a topography with images
+
+        Parameters
+        ----------
+        picks : array-like of int | None
+            The indices of the channels to plot. If None all available
+            channels are displayed.
+        baseline : None (default) or tuple of length 2
+            The time interval to apply baseline correction.
+            If None do not apply it. If baseline is (a, b)
+            the interval is between "a (s)" and "b (s)".
+            If a is None the beginning of the data is used
+            and if b is None then b is set to the end of the interval.
+            If baseline is equal ot (None, None) all the time
+            interval is used.
+        mode : None | 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
+            Do baseline correction with ratio (power is divided by mean
+            power during baseline) or zscore (power is divided by standard
+            deviation of power during baseline after subtracting the mean,
+            power = [power - mean(power_baseline)] / std(power_baseline)).
+            If None no baseline correction is applied.
+        tmin : None | float
+            The first time instant to display. If None the first time point
+            available is used.
+        tmax : None | float
+            The last time instant to display. If None the last time point
+            available is used.
+        fmin : None | float
+            The first frequency to display. If None the first frequency
+            available is used.
+        fmax : None | float
+            The last frequency to display. If None the last frequency
+            available is used.
+        vmin : float | None
+            The mininum value an the color scale. If vmin is None, the data
+            minimum value is used.
+        vmax : float | None
+            The maxinum value an the color scale. If vmax is None, the data
+            maximum value is used.
+        layout : Layout | None
+            Layout instance specifying sensor positions. If possible, the
+            correct layout is inferred from the data.
+        cmap : matplotlib colormap | str
+            The colormap to use. Defaults to 'RdBu_r'.
+        title : str
+            Title of the figure.
+        dB : bool
+            If True, 20*log10 is applied to the data to get dB.
+        colorbar : bool
+            If true, colorbar will be added to the plot
+        layout_scale : float
+            Scaling factor for adjusting the relative size of the layout
+            on the canvas.
+        show : bool
+            Call pyplot.show() at the end.
+        border : str
+            matplotlib borders style to be used for each sensor plot.
+        fig_facecolor : str | obj
+            The figure face color. Defaults to black.
+        font_color: str | obj
+            The color of tick labels in the colorbar. Defaults to white.
+
+        Returns
+        -------
+        fig : matplotlib.figure.Figure
+            The figure containing the topography.
+        """
+        from ..viz.topo import _imshow_tfr, _plot_topo
+        import matplotlib.pyplot as plt
+        times = self.times.copy()
+        freqs = self.freqs
+        data = self.data
+        info = self.info
+
+        info, data, picks = _prepare_picks(info, data, picks)
+        data = data[picks]
+
+        data, times, freqs, vmin, vmax = \
+            _preproc_tfr(data, times, freqs, tmin, tmax, fmin, fmax,
+                         mode, baseline, vmin, vmax, dB)
+
+        if layout is None:
+            from mne import find_layout
+            layout = find_layout(self.info)
+        onselect_callback = partial(self._onselect, baseline=baseline,
+                                    mode=mode, layout=layout)
+        imshow = partial(_imshow_tfr, tfr=data, freq=freqs, cmap=cmap,
+                         onselect=onselect_callback)
+
+        fig = _plot_topo(info=info, times=times, show_func=imshow,
+                         layout=layout, colorbar=colorbar, vmin=vmin,
+                         vmax=vmax, cmap=cmap, layout_scale=layout_scale,
+                         title=title, border=border, x_label='Time (ms)',
+                         y_label='Frequency (Hz)', fig_facecolor=fig_facecolor,
+                         font_color=font_color)
+
+        if show:
+            plt.show()
+
+        return fig
+
+    def _check_compat(self, tfr):
+        """checks that self and tfr have the same time-frequency ranges"""
+        assert np.all(tfr.times == self.times)
+        assert np.all(tfr.freqs == self.freqs)
+
+    def __add__(self, tfr):
+        self._check_compat(tfr)
+        out = self.copy()
+        out.data += tfr.data
+        return out
+
+    def __iadd__(self, tfr):
+        self._check_compat(tfr)
+        self.data += tfr.data
+        return self
+
+    def __sub__(self, tfr):
+        self._check_compat(tfr)
+        out = self.copy()
+        out.data -= tfr.data
+        return out
+
+    def __isub__(self, tfr):
+        self._check_compat(tfr)
+        self.data -= tfr.data
+        return self
+
+    def copy(self):
+        """Return a copy of the instance."""
+        return deepcopy(self)
+
+    def __repr__(self):
+        s = "time : [%f, %f]" % (self.times[0], self.times[-1])
+        s += ", freq : [%f, %f]" % (self.freqs[0], self.freqs[-1])
+        s += ", nave : %d" % self.nave
+        s += ', channels : %d' % self.data.shape[0]
+        return "<AverageTFR  |  %s>" % s
+
+    def apply_baseline(self, baseline, mode='mean'):
+        """Baseline correct the data
+
+        Parameters
+        ----------
+        baseline : tuple or list of length 2
+            The time interval to apply rescaling / baseline correction.
+            If None do not apply it. If baseline is (a, b)
+            the interval is between "a (s)" and "b (s)".
+            If a is None the beginning of the data is used
+            and if b is None then b is set to the end of the interval.
+            If baseline is equal to (None, None) all the time
+            interval is used.
+        mode : 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
+            Do baseline correction with ratio (power is divided by mean
+            power during baseline) or z-score (power is divided by standard
+            deviation of power during baseline after subtracting the mean,
+            power = [power - mean(power_baseline)] / std(power_baseline))
+            If None, baseline no correction will be performed.
+        """
+        self.data = rescale(self.data, self.times, baseline, mode, copy=False)
+
+    def plot_topomap(self, tmin=None, tmax=None, fmin=None, fmax=None,
+                     ch_type=None, baseline=None, mode='mean',
+                     layout=None, vmin=None, vmax=None, cmap=None,
+                     sensors=True, colorbar=True, unit=None, res=64, size=2,
+                     cbar_fmt='%1.1e', show_names=False, title=None,
+                     axes=None, show=True, outlines='head', head_pos=None):
+        """Plot topographic maps of time-frequency intervals of TFR data
+
+        Parameters
+        ----------
+        tmin : None | float
+            The first time instant to display. If None the first time point
+            available is used.
+        tmax : None | float
+            The last time instant to display. If None the last time point
+            available is used.
+        fmin : None | float
+            The first frequency to display. If None the first frequency
+            available is used.
+        fmax : None | float
+            The last frequency to display. If None the last frequency
+            available is used.
+        ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
+            The channel type to plot. For 'grad', the gradiometers are
+            collected in pairs and the RMS for each pair is plotted.
+            If None, then channels are chosen in the order given above.
+        baseline : tuple or list of length 2
+            The time interval to apply rescaling / baseline correction.
+            If None do not apply it. If baseline is (a, b)
+            the interval is between "a (s)" and "b (s)".
+            If a is None the beginning of the data is used
+            and if b is None then b is set to the end of the interval.
+            If baseline is equal to (None, None) all the time
+            interval is used.
+        mode : 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
+            Do baseline correction with ratio (power is divided by mean
+            power during baseline) or z-score (power is divided by standard
+            deviation of power during baseline after subtracting the mean,
+            power = [power - mean(power_baseline)] / std(power_baseline))
+            If None, baseline no correction will be performed.
+        layout : None | Layout
+            Layout instance specifying sensor positions (does not need to
+            be specified for Neuromag data). If possible, the correct layout
+            file is inferred from the data; if no appropriate layout file was
+            found, the layout is automatically generated from the sensor
+            locations.
+        vmin : float | callable | None
+            The value specifying the lower bound of the color range. If None,
+            and vmax is None, -vmax is used. Else np.min(data) or in case
+            data contains only positive values 0. If callable, the output
+            equals vmin(data). Defaults to None.
+        vmax : float | callable | None
+            The value specifying the upper bound of the color range. If None,
+            the maximum value is used. If callable, the output equals
+            vmax(data). Defaults to None.
+        cmap : matplotlib colormap | None
+            Colormap. If None and the plotted data is all positive, defaults to
+            'Reds'. If None and data contains also negative values, defaults to
+            'RdBu_r'. Defaults to None.
+        sensors : bool | str
+            Add markers for sensor locations to the plot. Accepts matplotlib
+            plot format string (e.g., 'r+' for red plusses). If True, a circle
+            will be used (via .add_artist). Defaults to True.
+        colorbar : bool
+            Plot a colorbar.
+        unit : dict | str | None
+            The unit of the channel type used for colorbar label. If
+            scale is None the unit is automatically determined.
+        res : int
+            The resolution of the topomap image (n pixels along each side).
+        size : float
+            Side length per topomap in inches.
+        cbar_fmt : str
+            String format for colorbar values.
+        show_names : bool | callable
+            If True, show channel names on top of the map. If a callable is
+            passed, channel names will be formatted using the callable; e.g.,
+            to delete the prefix 'MEG ' from all channel names, pass the
+            function lambda x: x.replace('MEG ', ''). If `mask` is not None,
+            only significant sensors will be shown.
+        title : str | None
+            Title. If None (default), no title is displayed.
+        axes : instance of Axes | None
+            The axes to plot to. If None the axes is defined automatically.
+        show : bool
+            Call pyplot.show() at the end.
+        outlines : 'head' | 'skirt' | dict | None
+            The outlines to be drawn. If 'head', the default head scheme will
+            be drawn. If 'skirt' the head scheme will be drawn, but sensors are
+            allowed to be plotted outside of the head circle. If dict, each key
+            refers to a tuple of x and y positions, the values in 'mask_pos'
+            will serve as image mask, and the 'autoshrink' (bool) field will
+            trigger automated shrinking of the positions due to points outside
+            the outline. Alternatively, a matplotlib patch object can be passed
+            for advanced masking options, either directly or as a function that
+            returns patches (required for multi-axis plots). If None, nothing
+            will be drawn. Defaults to 'head'.
+        head_pos : dict | None
+            If None (default), the sensors are positioned such that they span
+            the head circle. If dict, can have entries 'center' (tuple) and
+            'scale' (tuple) for what the center and scale of the head should be
+            relative to the electrode locations.
+
+        Returns
+        -------
+        fig : matplotlib.figure.Figure
+            The figure containing the topography.
+        """
+        from ..viz import plot_tfr_topomap
+        return plot_tfr_topomap(self, tmin=tmin, tmax=tmax, fmin=fmin,
+                                fmax=fmax, ch_type=ch_type, baseline=baseline,
+                                mode=mode, layout=layout, vmin=vmin, vmax=vmax,
+                                cmap=cmap, sensors=sensors, colorbar=colorbar,
+                                unit=unit, res=res, size=size,
+                                cbar_fmt=cbar_fmt, show_names=show_names,
+                                title=title, axes=axes, show=show,
+                                outlines=outlines, head_pos=head_pos)
+
+    def save(self, fname, overwrite=False):
+        """Save TFR object to hdf5 file
+
+        Parameters
+        ----------
+        fname : str
+            The file name, which should end with -tfr.h5 .
+        overwrite : bool
+            If True, overwrite file (if it exists). Defaults to false
+        """
+        write_tfrs(fname, self, overwrite=overwrite)
+
+
+def _prepare_write_tfr(tfr, condition):
+    """Aux function"""
+    return (condition, dict(times=tfr.times, freqs=tfr.freqs,
+                            data=tfr.data, info=tfr.info, nave=tfr.nave,
+                            comment=tfr.comment, method=tfr.method))
+
+
+def write_tfrs(fname, tfr, overwrite=False):
+    """Write a TFR dataset to hdf5.
+
+    Parameters
+    ----------
+    fname : string
+        The file name, which should end with -tfr.h5
+    tfr : AverageTFR instance, or list of AverageTFR instances
+        The TFR dataset, or list of TFR datasets, to save in one file.
+        Note. If .comment is not None, a name will be generated on the fly,
+        based on the order in which the TFR objects are passed
+    overwrite : bool
+        If True, overwrite file (if it exists). Defaults to False.
+
+    See Also
+    --------
+    read_tfrs
+
+    Notes
+    -----
+    .. versionadded:: 0.9.0
+    """
+    out = []
+    if not isinstance(tfr, (list, tuple)):
+        tfr = [tfr]
+    for ii, tfr_ in enumerate(tfr):
+        comment = ii if tfr_.comment is None else tfr_.comment
+        out.append(_prepare_write_tfr(tfr_, condition=comment))
+    write_hdf5(fname, out, overwrite=overwrite, title='mnepython')
+
+
+def read_tfrs(fname, condition=None):
+    """
+    Read TFR datasets from hdf5 file.
+
+    Parameters
+    ----------
+    fname : string
+        The file name, which should end with -tfr.h5 .
+    condition : int or str | list of int or str | None
+        The condition to load. If None, all conditions will be returned.
+        Defaults to None.
+
+    See Also
+    --------
+    write_tfrs
+
+    Returns
+    -------
+    tfrs : list of instances of AverageTFR | instance of AverageTFR
+        Depending on `condition` either the TFR object or a list of multiple
+        TFR objects.
+
+    Notes
+    -----
+    .. versionadded:: 0.9.0
+    """
+
+    check_fname(fname, 'tfr', ('-tfr.h5',))
+
+    logger.info('Reading %s ...' % fname)
+    tfr_data = read_hdf5(fname, title='mnepython')
+    for k, tfr in tfr_data:
+        tfr['info'] = Info(tfr['info'])
+
+    if condition is not None:
+        tfr_dict = dict(tfr_data)
+        if condition not in tfr_dict:
+            keys = ['%s' % k for k in tfr_dict]
+            raise ValueError('Cannot find condition ("{0}") in this file. '
+                             'I can give you "{1}""'
+                             .format(condition, " or ".join(keys)))
+        out = AverageTFR(**tfr_dict[condition])
+    else:
+        out = [AverageTFR(**d) for d in list(zip(*tfr_data))[1]]
+    return out
+
+
+ at verbose
+def tfr_morlet(inst, freqs, n_cycles, use_fft=False,
+               return_itc=True, decim=1, n_jobs=1, picks=None, verbose=None):
+    """Compute Time-Frequency Representation (TFR) using Morlet wavelets
+
+    Parameters
+    ----------
+    inst : Epochs | Evoked
+        The epochs or evoked object.
+    freqs : ndarray, shape (n_freqs,)
+        The frequencies in Hz.
+    n_cycles : float | ndarray, shape (n_freqs,)
+        The number of cycles globally or for each frequency.
+    use_fft : bool
+        The fft based convolution or not.
+    return_itc : bool
+        Return intertrial coherence (ITC) as well as averaged power.
+        Must be ``False`` for evoked data.
+    decim : int
+        The decimation factor on the time axis. To reduce memory usage.
+    n_jobs : int
+        The number of jobs to run in parallel.
+    picks : array-like of int | None
+        The indices of the channels to plot. If None all available
+        channels are displayed.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    power : instance of AverageTFR
+        The averaged power.
+    itc : instance of AverageTFR
+        The intertrial coherence (ITC). Only returned if return_itc
+        is True.
+
+    See Also
+    --------
+    tfr_multitaper, tfr_stockwell
+    """
+    data = _get_data(inst, return_itc)
+    info = inst.info
+
+    info, data, picks = _prepare_picks(info, data, picks)
+    data = data = data[:, picks, :]
+
+    power, itc = _induced_power_cwt(data, sfreq=info['sfreq'],
+                                    frequencies=freqs,
+                                    n_cycles=n_cycles, n_jobs=n_jobs,
+                                    use_fft=use_fft, decim=decim,
+                                    zero_mean=True)
+    times = inst.times[::decim].copy()
+    nave = len(data)
+    out = AverageTFR(info, power, times, freqs, nave, method='morlet-power')
+    if return_itc:
+        out = (out, AverageTFR(info, itc, times, freqs, nave,
+                               method='morlet-itc'))
+    return out
+
+
+def _prepare_picks(info, data, picks):
+    if picks is None:
+        picks = pick_types(info, meg=True, eeg=True, ref_meg=False,
+                           exclude='bads')
+    if np.array_equal(picks, np.arange(len(data))):
+        picks = slice(None)
+    else:
+        info = pick_info(info, picks)
+
+    return info, data, picks
+
+
+ at verbose
+def _induced_power_mtm(data, sfreq, frequencies, time_bandwidth=4.0,
+                       use_fft=True, n_cycles=7, decim=1, n_jobs=1,
+                       zero_mean=True, verbose=None):
+    """Compute time induced power and inter-trial phase-locking factor
+
+    The time frequency decomposition is done with DPSS wavelets
+
+    Parameters
+    ----------
+    data : np.ndarray, shape (n_epochs, n_channels, n_times)
+        The input data.
+    sfreq : float
+        sampling Frequency
+    frequencies : np.ndarray, shape (n_frequencies,)
+        Array of frequencies of interest
+    time_bandwidth : float
+        Time x (Full) Bandwidth product.
+        The number of good tapers (low-bias) is chosen automatically based on
+        this to equal floor(time_bandwidth - 1). Default is 4.0 (3 tapers).
+    use_fft : bool
+        Compute transform with fft based convolutions or temporal
+        convolutions. Defaults to True.
+    n_cycles : float | np.ndarray shape (n_frequencies,)
+        Number of cycles. Fixed number or one per frequency. Defaults to 7.
+    decim: int
+        Temporal decimation factor. Defaults to 1.
+    n_jobs : int
+        The number of CPUs used in parallel. All CPUs are used in -1.
+        Requires joblib package. Defaults to 1.
+    zero_mean : bool
+        Make sure the wavelets are zero mean. Defaults to True.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    power : np.ndarray, shape (n_channels, n_frequencies, n_times)
+        Induced power. Squared amplitude of time-frequency coefficients.
+    itc : np.ndarray, shape (n_channels, n_frequencies, n_times)
+        Phase locking value.
+    """
+    n_epochs, n_channels, n_times = data[:, :, ::decim].shape
+    logger.info('Data is %d trials and %d channels', n_epochs, n_channels)
+    n_frequencies = len(frequencies)
+    logger.info('Multitaper time-frequency analysis for %d frequencies',
+                n_frequencies)
+
+    # Precompute wavelets for given frequency range to save time
+    Ws = _dpss_wavelet(sfreq, frequencies, n_cycles=n_cycles,
+                       time_bandwidth=time_bandwidth, zero_mean=zero_mean)
+    n_taps = len(Ws)
+    logger.info('Using %d tapers', n_taps)
+    n_times_wavelets = Ws[0][0].shape[0]
+    if n_times <= n_times_wavelets:
+        warnings.warn("Time windows are as long or longer than the epoch. "
+                      "Consider reducing n_cycles.")
+    psd = np.zeros((n_channels, n_frequencies, n_times))
+    itc = np.zeros((n_channels, n_frequencies, n_times))
+    parallel, my_time_frequency, _ = parallel_func(_time_frequency,
+                                                   n_jobs)
+    for m in range(n_taps):
+        psd_itc = parallel(my_time_frequency(data[:, c, :],
+                                             Ws[m], use_fft, decim)
+                           for c in range(n_channels))
+        for c, (psd_c, itc_c) in enumerate(psd_itc):
+            psd[c, :, :] += psd_c
+            itc[c, :, :] += itc_c
+    psd /= n_taps
+    itc /= n_taps
+    return psd, itc
+
+
+ at verbose
+def tfr_multitaper(inst, freqs, n_cycles, time_bandwidth=4.0,
+                   use_fft=True, return_itc=True, decim=1, n_jobs=1,
+                   picks=None, verbose=None):
+    """Compute Time-Frequency Representation (TFR) using DPSS wavelets
+
+    Parameters
+    ----------
+    inst : Epochs | Evoked
+        The epochs or evoked object.
+    freqs : ndarray, shape (n_freqs,)
+        The frequencies in Hz.
+    n_cycles : float | ndarray, shape (n_freqs,)
+        The number of cycles globally or for each frequency.
+        The time-window length is thus T = n_cycles / freq.
+    time_bandwidth : float, (optional)
+        Time x (Full) Bandwidth product. Should be >= 2.0.
+        Choose this along with n_cycles to get desired frequency resolution.
+        The number of good tapers (least leakage from far away frequencies)
+        is chosen automatically based on this to floor(time_bandwidth - 1).
+        Default is 4.0 (3 good tapers).
+        E.g., With freq = 20 Hz and n_cycles = 10, we get time = 0.5 s.
+        If time_bandwidth = 4., then frequency smoothing is (4 / time) = 8 Hz.
+    use_fft : bool
+        The fft based convolution or not.
+        Defaults to True.
+    return_itc : bool
+        Return intertrial coherence (ITC) as well as averaged power.
+        Defaults to True.
+    decim : int
+        The decimation factor on the time axis. To reduce memory usage.
+        Note than this is brute force decimation, no anti-aliasing is done.
+        Defaults to 1.
+    n_jobs : int
+        The number of jobs to run in parallel. Defaults to 1.
+    picks : array-like of int | None
+        The indices of the channels to plot. If None all available
+        channels are displayed.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    power : AverageTFR
+        The averaged power.
+    itc : AverageTFR
+        The intertrial coherence (ITC). Only returned if return_itc
+        is True.
+
+    See Also
+    --------
+    tfr_multitaper, tfr_stockwell
+
+    Notes
+    -----
+    .. versionadded:: 0.9.0
+    """
+
+    data = _get_data(inst, return_itc)
+    info = inst.info
+
+    info, data, picks = _prepare_picks(info, data, picks)
+    data = data = data[:, picks, :]
+
+    power, itc = _induced_power_mtm(data, sfreq=info['sfreq'],
+                                    frequencies=freqs, n_cycles=n_cycles,
+                                    time_bandwidth=time_bandwidth,
+                                    use_fft=use_fft, decim=decim,
+                                    n_jobs=n_jobs, zero_mean=True,
+                                    verbose='INFO')
+    times = inst.times[::decim].copy()
+    nave = len(data)
+    out = AverageTFR(info, power, times, freqs, nave,
+                     method='mutlitaper-power')
+    if return_itc:
+        out = (out, AverageTFR(info, itc, times, freqs, nave,
+                               method='mutlitaper-itc'))
+    return out
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/transforms.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/transforms.py
new file mode 100644
index 0000000..fdc405c
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/transforms.py
@@ -0,0 +1,689 @@
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Christian Brodbeck <christianbrodbeck at nyu.edu>
+#
+# License: BSD (3-clause)
+
+import os
+from os import path as op
+import glob
+import numpy as np
+from numpy import sin, cos
+from scipy import linalg
+
+from .io.constants import FIFF
+from .io.open import fiff_open
+from .io.tag import read_tag
+from .io.write import start_file, end_file, write_coord_trans
+from .utils import check_fname, logger, deprecated
+from .externals.six import string_types
+
+
+# transformation from anterior/left/superior coordinate system to
+# right/anterior/superior:
+als_ras_trans = np.array([[0, -1, 0, 0], [1, 0, 0, 0], [0, 0, 1, 0],
+                          [0, 0, 0, 1]])
+# simultaneously convert [m] to [mm]:
+als_ras_trans_mm = als_ras_trans * [0.001, 0.001, 0.001, 1]
+
+
+_str_to_frame = dict(meg=FIFF.FIFFV_COORD_DEVICE,
+                     mri=FIFF.FIFFV_COORD_MRI,
+                     mri_voxel=FIFF.FIFFV_MNE_COORD_MRI_VOXEL,
+                     head=FIFF.FIFFV_COORD_HEAD,
+                     mni_tal=FIFF.FIFFV_MNE_COORD_MNI_TAL,
+                     ras=FIFF.FIFFV_MNE_COORD_RAS,
+                     fs_tal=FIFF.FIFFV_MNE_COORD_FS_TAL,
+                     ctf_head=FIFF.FIFFV_MNE_COORD_CTF_HEAD,
+                     ctf_meg=FIFF.FIFFV_MNE_COORD_CTF_DEVICE)
+_frame_to_str = dict((val, key) for key, val in _str_to_frame.items())
+
+_verbose_frames = {FIFF.FIFFV_COORD_UNKNOWN: 'unknown',
+                   FIFF.FIFFV_COORD_DEVICE: 'MEG device',
+                   FIFF.FIFFV_COORD_ISOTRAK: 'isotrak',
+                   FIFF.FIFFV_COORD_HPI: 'hpi',
+                   FIFF.FIFFV_COORD_HEAD: 'head',
+                   FIFF.FIFFV_COORD_MRI: 'MRI (surface RAS)',
+                   FIFF.FIFFV_MNE_COORD_MRI_VOXEL: 'MRI voxel',
+                   FIFF.FIFFV_COORD_MRI_SLICE: 'MRI slice',
+                   FIFF.FIFFV_COORD_MRI_DISPLAY: 'MRI display',
+                   FIFF.FIFFV_MNE_COORD_CTF_DEVICE: 'CTF MEG device',
+                   FIFF.FIFFV_MNE_COORD_CTF_HEAD: 'CTF/4D/KIT head',
+                   FIFF.FIFFV_MNE_COORD_RAS: 'RAS (non-zero origin)',
+                   FIFF.FIFFV_MNE_COORD_MNI_TAL: 'MNI Talairach',
+                   FIFF.FIFFV_MNE_COORD_FS_TAL_GTZ: 'Talairach (MNI z > 0)',
+                   FIFF.FIFFV_MNE_COORD_FS_TAL_LTZ: 'Talairach (MNI z < 0)',
+                   -1: 'unknown'}
+
+
+def _to_const(cf):
+    """Helper to convert string or int coord frame into int"""
+    if isinstance(cf, string_types):
+        if cf not in _str_to_frame:
+            raise ValueError('Unknown cf %s' % cf)
+        cf = _str_to_frame[cf]
+    elif not isinstance(cf, int):
+        raise TypeError('cf must be str or int, not %s' % type(cf))
+    return cf
+
+
+class Transform(dict):
+    """A transform
+
+    Parameters
+    ----------
+    fro : str | int
+        The starting coordinate frame.
+    to : str | int
+        The ending coordinate frame.
+    trans : array-like, shape (4, 4)
+        The transformation matrix.
+    """
+    def __init__(self, fro, to, trans):
+        super(Transform, self).__init__()
+        # we could add some better sanity checks here
+        fro = _to_const(fro)
+        to = _to_const(to)
+        trans = np.asarray(trans, dtype=np.float64)
+        if trans.shape != (4, 4):
+            raise ValueError('Transformation must be shape (4, 4) not %s'
+                             % (trans.shape,))
+        self['from'] = fro
+        self['to'] = to
+        self['trans'] = trans
+
+    def __repr__(self):
+        return ('<Transform  |  %s->%s>\n%s'
+                % (_coord_frame_name(self['from']),
+                   _coord_frame_name(self['to']), self['trans']))
+
+    @property
+    def from_str(self):
+        return _coord_frame_name(self['from'])
+
+    @property
+    def to_str(self):
+        return _coord_frame_name(self['to'])
+
+
+def _coord_frame_name(cframe):
+    """Map integers to human-readable (verbose) names"""
+    return _verbose_frames.get(int(cframe), 'unknown')
+
+
+def _print_coord_trans(t, prefix='Coordinate transformation: '):
+    logger.info(prefix + '%s -> %s'
+                % (_coord_frame_name(t['from']), _coord_frame_name(t['to'])))
+    for ti, tt in enumerate(t['trans']):
+        scale = 1000. if ti != 3 else 1.
+        text = ' mm' if ti != 3 else ''
+        logger.info('    % 8.6f % 8.6f % 8.6f    %7.2f%s' %
+                    (tt[0], tt[1], tt[2], scale * tt[3], text))
+
+
+def _find_trans(subject, subjects_dir=None):
+    if subject is None:
+        if 'SUBJECT' in os.environ:
+            subject = os.environ['SUBJECT']
+        else:
+            raise ValueError('SUBJECT environment variable not set')
+
+    trans_fnames = glob.glob(os.path.join(subjects_dir, subject,
+                                          '*-trans.fif'))
+    if len(trans_fnames) < 1:
+        raise RuntimeError('Could not find the transformation for '
+                           '{subject}'.format(subject=subject))
+    elif len(trans_fnames) > 1:
+        raise RuntimeError('Found multiple transformations for '
+                           '{subject}'.format(subject=subject))
+    return trans_fnames[0]
+
+
+def apply_trans(trans, pts, move=True):
+    """Apply a transform matrix to an array of points
+
+    Parameters
+    ----------
+    trans : array, shape = (4, 4) | instance of Transform
+        Transform matrix.
+    pts : array, shape = (3,) | (n, 3)
+        Array with coordinates for one or n points.
+    move : bool
+        If True (default), apply translation.
+
+    Returns
+    -------
+    transformed_pts : shape = (3,) | (n, 3)
+        Transformed point(s).
+    """
+    if isinstance(trans, dict):
+        trans = trans['trans']
+    trans = np.asarray(trans)
+    pts = np.asarray(pts)
+    if pts.size == 0:
+        return pts.copy()
+
+    # apply rotation & scale
+    out_pts = np.dot(pts, trans[:3, :3].T)
+    # apply translation
+    if move is True:
+        transl = trans[:3, 3]
+        if np.any(transl != 0):
+            out_pts += transl
+
+    return out_pts
+
+
+def rotation(x=0, y=0, z=0):
+    """Create an array with a 4 dimensional rotation matrix
+
+    Parameters
+    ----------
+    x, y, z : scalar
+        Rotation around the origin (in rad).
+
+    Returns
+    -------
+    r : array, shape = (4, 4)
+        The rotation matrix.
+    """
+    cos_x = cos(x)
+    cos_y = cos(y)
+    cos_z = cos(z)
+    sin_x = sin(x)
+    sin_y = sin(y)
+    sin_z = sin(z)
+    r = np.array([[cos_y * cos_z, -cos_x * sin_z + sin_x * sin_y * cos_z,
+                   sin_x * sin_z + cos_x * sin_y * cos_z, 0],
+                  [cos_y * sin_z, cos_x * cos_z + sin_x * sin_y * sin_z,
+                   - sin_x * cos_z + cos_x * sin_y * sin_z, 0],
+                  [-sin_y, sin_x * cos_y, cos_x * cos_y, 0],
+                  [0, 0, 0, 1]], dtype=float)
+    return r
+
+
+def rotation3d(x=0, y=0, z=0):
+    """Create an array with a 3 dimensional rotation matrix
+
+    Parameters
+    ----------
+    x, y, z : scalar
+        Rotation around the origin (in rad).
+
+    Returns
+    -------
+    r : array, shape = (3, 3)
+        The rotation matrix.
+    """
+    cos_x = cos(x)
+    cos_y = cos(y)
+    cos_z = cos(z)
+    sin_x = sin(x)
+    sin_y = sin(y)
+    sin_z = sin(z)
+    r = np.array([[cos_y * cos_z, -cos_x * sin_z + sin_x * sin_y * cos_z,
+                   sin_x * sin_z + cos_x * sin_y * cos_z],
+                  [cos_y * sin_z, cos_x * cos_z + sin_x * sin_y * sin_z,
+                   - sin_x * cos_z + cos_x * sin_y * sin_z],
+                  [-sin_y, sin_x * cos_y, cos_x * cos_y]], dtype=float)
+    return r
+
+
+def rotation_angles(m):
+    """Find rotation angles from a transformation matrix
+
+    Parameters
+    ----------
+    m : array, shape >= (3, 3)
+        Rotation matrix. Only the top left 3 x 3 partition is accessed.
+
+    Returns
+    -------
+    x, y, z : float
+        Rotation around x, y and z axes.
+    """
+    x = np.arctan2(m[2, 1], m[2, 2])
+    c2 = np.sqrt(m[0, 0] ** 2 + m[1, 0] ** 2)
+    y = np.arctan2(-m[2, 0], c2)
+    s1 = np.sin(x)
+    c1 = np.cos(x)
+    z = np.arctan2(s1 * m[0, 2] - c1 * m[0, 1], c1 * m[1, 1] - s1 * m[1, 2])
+    return x, y, z
+
+
+def scaling(x=1, y=1, z=1):
+    """Create an array with a scaling matrix
+
+    Parameters
+    ----------
+    x, y, z : scalar
+        Scaling factors.
+
+    Returns
+    -------
+    s : array, shape = (4, 4)
+        The scaling matrix.
+    """
+    s = np.array([[x, 0, 0, 0],
+                  [0, y, 0, 0],
+                  [0, 0, z, 0],
+                  [0, 0, 0, 1]], dtype=float)
+    return s
+
+
+def translation(x=0, y=0, z=0):
+    """Create an array with a translation matrix
+
+    Parameters
+    ----------
+    x, y, z : scalar
+        Translation parameters.
+
+    Returns
+    -------
+    m : array, shape = (4, 4)
+        The translation matrix.
+    """
+    m = np.array([[1, 0, 0, x],
+                  [0, 1, 0, y],
+                  [0, 0, 1, z],
+                  [0, 0, 0, 1]], dtype=float)
+    return m
+
+
+def _ensure_trans(trans, fro='mri', to='head'):
+    """Helper to ensure we have the proper transform"""
+    if isinstance(fro, string_types):
+        from_str = fro
+        from_const = _str_to_frame[fro]
+    else:
+        from_str = _frame_to_str[fro]
+        from_const = fro
+    del fro
+    if isinstance(to, string_types):
+        to_str = to
+        to_const = _str_to_frame[to]
+    else:
+        to_str = _frame_to_str[to]
+        to_const = to
+    del to
+    err_str = 'trans must go %s<->%s, provided' % (from_str, to_str)
+    if trans is None:
+        raise ValueError('%s None' % err_str)
+    if set([trans['from'], trans['to']]) != set([from_const, to_const]):
+        raise ValueError('%s trans is %s->%s' % (err_str,
+                                                 _frame_to_str[trans['from']],
+                                                 _frame_to_str[trans['to']]))
+    if trans['from'] != from_const:
+        trans = invert_transform(trans)
+    return trans
+
+
+def _get_mri_head_t(trans):
+    """Get mri_head_t (from=mri, to=head) from mri filename"""
+    if isinstance(trans, string_types):
+        if not op.isfile(trans):
+            raise IOError('trans file "%s" not found' % trans)
+        if op.splitext(trans)[1] in ['.fif', '.gz']:
+            mri_head_t = read_trans(trans)
+        else:
+            # convert "-trans.txt" to "-trans.fif" mri-type equivalent
+            t = np.genfromtxt(trans)
+            if t.ndim != 2 or t.shape != (4, 4):
+                raise RuntimeError('File "%s" did not have 4x4 entries'
+                                   % trans)
+            mri_head_t = Transform('head', 'mri', t)
+    elif isinstance(trans, dict):
+        mri_head_t = trans
+        trans = 'dict'
+    elif trans is None:
+        mri_head_t = Transform('head', 'mri', np.eye(4))
+        trans = 'identity'
+    else:
+        raise ValueError('trans type %s not known, must be str, dict, or None'
+                         % type(trans))
+    # it's usually a head->MRI transform, so we probably need to invert it
+    mri_head_t = _ensure_trans(mri_head_t, 'mri', 'head')
+    return mri_head_t, trans
+
+
+def combine_transforms(t_first, t_second, fro, to):
+    """Combine two transforms
+
+    Parameters
+    ----------
+    t_first : dict
+        First transform.
+    t_second : dict
+        Second transform.
+    fro : int
+        From coordinate frame.
+    to : int
+        To coordinate frame.
+
+    Returns
+    -------
+    trans : dict
+        Combined transformation.
+    """
+    fro = _to_const(fro)
+    to = _to_const(to)
+    if t_first['from'] != fro:
+        raise RuntimeError('From mismatch: %s ("%s") != %s ("%s")'
+                           % (t_first['from'],
+                              _coord_frame_name(t_first['from']),
+                              fro, _coord_frame_name(fro)))
+    if t_first['to'] != t_second['from']:
+        raise RuntimeError('Transform mismatch: t1["to"] = %s ("%s"), '
+                           't2["from"] = %s ("%s")'
+                           % (t_first['to'], _coord_frame_name(t_first['to']),
+                              t_second['from'],
+                              _coord_frame_name(t_second['from'])))
+    if t_second['to'] != to:
+        raise RuntimeError('To mismatch: %s ("%s") != %s ("%s")'
+                           % (t_second['to'],
+                              _coord_frame_name(t_second['to']),
+                              to, _coord_frame_name(to)))
+    return Transform(fro, to, np.dot(t_second['trans'], t_first['trans']))
+
+
+def read_trans(fname):
+    """Read a -trans.fif file
+
+    Parameters
+    ----------
+    fname : str
+        The name of the file.
+
+    Returns
+    -------
+    trans : dict
+        The transformation dictionary from the fif file.
+
+    See Also
+    --------
+    write_trans
+    Transform
+    """
+    fid, tree, directory = fiff_open(fname)
+
+    with fid:
+        for t in directory:
+            if t.kind == FIFF.FIFF_COORD_TRANS:
+                tag = read_tag(fid, t.pos)
+                break
+        else:
+            raise IOError('This does not seem to be a -trans.fif file.')
+
+    trans = tag.data
+    return trans
+
+
+def write_trans(fname, trans):
+    """Write a -trans.fif file
+
+    Parameters
+    ----------
+    fname : str
+        The name of the file, which should end in '-trans.fif'.
+    trans : dict
+        Trans file data, as returned by read_trans.
+
+    See Also
+    --------
+    read_trans
+    """
+    check_fname(fname, 'trans', ('-trans.fif', '-trans.fif.gz'))
+    fid = start_file(fname)
+    write_coord_trans(fid, trans)
+    end_file(fid)
+
+
+def invert_transform(trans):
+    """Invert a transformation between coordinate systems
+
+    Parameters
+    ----------
+    trans : dict
+        Transform to invert.
+
+    Returns
+    -------
+    inv_trans : dict
+        Inverse transform.
+    """
+    return Transform(trans['to'], trans['from'], linalg.inv(trans['trans']))
+
+
+def transform_surface_to(surf, dest, trans):
+    """Transform surface to the desired coordinate system
+
+    Parameters
+    ----------
+    surf : dict
+        Surface.
+    dest : 'meg' | 'mri' | 'head' | int
+        Destination coordinate system. Can be an integer for using
+        FIFF types.
+    trans : dict
+        Transformation.
+
+    Returns
+    -------
+    res : dict
+        Transformed source space. Data are modified in-place.
+    """
+    if isinstance(dest, string_types):
+        if dest not in _str_to_frame:
+            raise KeyError('dest must be one of %s, not "%s"'
+                           % (list(_str_to_frame.keys()), dest))
+        dest = _str_to_frame[dest]  # convert to integer
+    if surf['coord_frame'] == dest:
+        return surf
+
+    trans = _ensure_trans(trans, int(surf['coord_frame']), dest)
+    surf['coord_frame'] = dest
+    surf['rr'] = apply_trans(trans, surf['rr'])
+    surf['nn'] = apply_trans(trans, surf['nn'], move=False)
+    return surf
+
+
+ at deprecated('transform_coordinates is deprecated and will be removed in v0.11')
+def transform_coordinates(filename, pos, orig, dest):
+    """Transform coordinates between various MRI-related coordinate frames
+
+    Parameters
+    ----------
+    filename: string
+        Name of a fif file containing the coordinate transformations
+        This file can be conveniently created with mne_collect_transforms
+        or ``collect_transforms``.
+    pos: array of shape N x 3
+        array of locations to transform (in meters)
+    orig: 'meg' | 'mri'
+        Coordinate frame of the above locations.
+        'meg' is MEG head coordinates
+        'mri' surface RAS coordinates
+    dest: 'meg' | 'mri' | 'fs_tal' | 'mni_tal'
+        Coordinate frame of the result.
+        'mni_tal' is MNI Talairach
+        'fs_tal' is FreeSurfer Talairach
+
+    Returns
+    -------
+    trans_pos: array of shape N x 3
+        The transformed locations
+
+    Examples
+    --------
+    transform_coordinates('all-trans.fif', np.eye(3), 'meg', 'fs_tal')
+    transform_coordinates('all-trans.fif', np.eye(3), 'mri', 'mni_tal')
+    """
+    #   Read the fif file containing all necessary transformations
+    fid, tree, directory = fiff_open(filename)
+
+    coord_names = dict(mri=FIFF.FIFFV_COORD_MRI,
+                       meg=FIFF.FIFFV_COORD_HEAD,
+                       mni_tal=FIFF.FIFFV_MNE_COORD_MNI_TAL,
+                       fs_tal=FIFF.FIFFV_MNE_COORD_FS_TAL)
+
+    orig = coord_names[orig]
+    dest = coord_names[dest]
+
+    T0 = T1 = T2 = T3plus = T3minus = None
+    for d in directory:
+        if d.kind == FIFF.FIFF_COORD_TRANS:
+            tag = read_tag(fid, d.pos)
+            trans = tag.data
+            if (trans['from'] == FIFF.FIFFV_COORD_MRI and
+                    trans['to'] == FIFF.FIFFV_COORD_HEAD):
+                T0 = invert_transform(trans)
+            elif (trans['from'] == FIFF.FIFFV_COORD_MRI and
+                  trans['to'] == FIFF.FIFFV_MNE_COORD_RAS):
+                T1 = trans
+            elif (trans['from'] == FIFF.FIFFV_MNE_COORD_RAS and
+                  trans['to'] == FIFF.FIFFV_MNE_COORD_MNI_TAL):
+                T2 = trans
+            elif trans['from'] == FIFF.FIFFV_MNE_COORD_MNI_TAL:
+                if trans['to'] == FIFF.FIFFV_MNE_COORD_FS_TAL_GTZ:
+                    T3plus = trans
+                elif trans['to'] == FIFF.FIFFV_MNE_COORD_FS_TAL_LTZ:
+                    T3minus = trans
+    fid.close()
+    #
+    #   Check we have everything we need
+    #
+    if ((orig == FIFF.FIFFV_COORD_HEAD and T0 is None) or (T1 is None) or
+            (T2 is None) or (dest == FIFF.FIFFV_MNE_COORD_FS_TAL and
+                             ((T3minus is None) or (T3minus is None)))):
+        raise ValueError('All required coordinate transforms not found')
+
+    #
+    #   Go ahead and transform the data
+    #
+    if pos.shape[1] != 3:
+        raise ValueError('Coordinates must be given in a N x 3 array')
+
+    if dest == orig:
+        trans_pos = pos.copy()
+    else:
+        n_points = pos.shape[0]
+        pos = np.c_[pos, np.ones(n_points)].T
+        if orig == FIFF.FIFFV_COORD_HEAD:
+            pos = np.dot(T0['trans'], pos)
+        elif orig != FIFF.FIFFV_COORD_MRI:
+            raise ValueError('Input data must be in MEG head or surface RAS '
+                             'coordinates')
+
+        if dest == FIFF.FIFFV_COORD_HEAD:
+            pos = np.dot(linalg.inv(T0['trans']), pos)
+        elif dest != FIFF.FIFFV_COORD_MRI:
+            pos = np.dot(np.dot(T2['trans'], T1['trans']), pos)
+            if dest != FIFF.FIFFV_MNE_COORD_MNI_TAL:
+                if dest == FIFF.FIFFV_MNE_COORD_FS_TAL:
+                    for k in range(n_points):
+                        if pos[2, k] > 0:
+                            pos[:, k] = np.dot(T3plus['trans'], pos[:, k])
+                        else:
+                            pos[:, k] = np.dot(T3minus['trans'], pos[:, k])
+                else:
+                    raise ValueError('Illegal choice for the output '
+                                     'coordinates')
+
+        trans_pos = pos[:3, :].T
+
+    return trans_pos
+
+
+def get_ras_to_neuromag_trans(nasion, lpa, rpa):
+    """Construct a transformation matrix to the MNE head coordinate system
+
+    Construct a transformation matrix from an arbitrary RAS coordinate system
+    to the MNE head coordinate system, in which the x axis passes through the
+    two preauricular points, and the y axis passes through the nasion and is
+    normal to the x axis. (see mne manual, pg. 97)
+
+    Parameters
+    ----------
+    nasion : array_like, shape (3,)
+        Nasion point coordinate.
+    lpa : array_like, shape (3,)
+        Left peri-auricular point coordinate.
+    rpa : array_like, shape (3,)
+        Right peri-auricular point coordinate.
+
+    Returns
+    -------
+    trans : numpy.array, shape = (4, 4)
+        Transformation matrix to MNE head space.
+    """
+    # check input args
+    nasion = np.asarray(nasion)
+    lpa = np.asarray(lpa)
+    rpa = np.asarray(rpa)
+    for pt in (nasion, lpa, rpa):
+        if pt.ndim != 1 or len(pt) != 3:
+            raise ValueError("Points have to be provided as one dimensional "
+                             "arrays of length 3.")
+
+    right = rpa - lpa
+    right_unit = right / linalg.norm(right)
+
+    origin = lpa + np.dot(nasion - lpa, right_unit) * right_unit
+
+    anterior = nasion - origin
+    anterior_unit = anterior / linalg.norm(anterior)
+
+    superior_unit = np.cross(right_unit, anterior_unit)
+
+    x, y, z = -origin
+    origin_trans = translation(x, y, z)
+
+    trans_l = np.vstack((right_unit, anterior_unit, superior_unit, [0, 0, 0]))
+    trans_r = np.reshape([0, 0, 0, 1], (4, 1))
+    rot_trans = np.hstack((trans_l, trans_r))
+
+    trans = np.dot(rot_trans, origin_trans)
+    return trans
+
+
+ at deprecated('collect_transforms is deprecated and will be removed in v0.11')
+def collect_transforms(fname, xforms):
+    """Collect a set of transforms in a single FIFF file
+
+    Parameters
+    ----------
+    fname : str
+        Filename to save to.
+    xforms : list of dict
+        List of transformations.
+    """
+    check_fname(fname, 'trans', ('-trans.fif', '-trans.fif.gz'))
+    with start_file(fname) as fid:
+        for xform in xforms:
+            write_coord_trans(fid, xform)
+        end_file(fid)
+
+
+def _sphere_to_cartesian(theta, phi, r):
+    """Transform spherical coordinates to cartesian"""
+    z = r * np.sin(phi)
+    rcos_phi = r * np.cos(phi)
+    x = rcos_phi * np.cos(theta)
+    y = rcos_phi * np.sin(theta)
+    return x, y, z
+
+
+def _polar_to_cartesian(theta, r):
+    """Transform polar coordinates to cartesian"""
+    x = r * np.cos(theta)
+    y = r * np.sin(theta)
+    return x, y
+
+
+def _cartesian_to_sphere(x, y, z):
+    """Transform cartesian coordinates to spherical"""
+    hypotxy = np.hypot(x, y)
+    r = np.hypot(hypotxy, z)
+    elev = np.arctan2(z, hypotxy)
+    az = np.arctan2(y, x)
+    return az, elev, r
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/utils.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/utils.py
new file mode 100644
index 0000000..43bcb3a
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/utils.py
@@ -0,0 +1,1892 @@
+# -*- coding: utf-8 -*-
+"""Some utility functions"""
+from __future__ import print_function
+
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#
+# License: BSD (3-clause)
+
+import warnings
+import logging
+import time
+from distutils.version import LooseVersion
+import os
+import os.path as op
+from functools import wraps
+import inspect
+from string import Formatter
+import subprocess
+import sys
+import tempfile
+import shutil
+from shutil import rmtree
+from math import log, ceil
+import json
+import ftplib
+import hashlib
+from functools import partial
+import atexit
+
+import numpy as np
+from scipy import linalg, sparse
+
+from .externals.six.moves import urllib
+from .externals.six import string_types, StringIO, BytesIO
+from .externals.decorator import decorator
+
+from .fixes import isclose
+
+logger = logging.getLogger('mne')  # one selection here used across mne-python
+logger.propagate = False  # don't propagate (in case of multiple imports)
+
+
+def _memory_usage(*args, **kwargs):
+    if isinstance(args[0], tuple):
+        args[0][0](*args[0][1], **args[0][2])
+    elif not isinstance(args[0], int):  # can be -1 for current use
+        args[0]()
+    return [-1]
+
+try:
+    from memory_profiler import memory_usage
+except ImportError:
+    memory_usage = _memory_usage
+
+
+def nottest(f):
+    """Decorator to mark a function as not a test"""
+    f.__test__ = False
+    return f
+
+
+###############################################################################
+# RANDOM UTILITIES
+
+def _get_call_line(in_verbose=False):
+    """Helper to get the call line from within a function"""
+    # XXX Eventually we could auto-triage whether in a `verbose` decorated
+    # function or not.
+    # NB This probably only works for functions that are undecorated,
+    # or decorated by `verbose`.
+    back = 2 if not in_verbose else 4
+    call_frame = inspect.getouterframes(inspect.currentframe())[back][0]
+    return inspect.getframeinfo(call_frame).code_context[0].strip()
+
+
+def _sort_keys(x):
+    """Sort and return keys of dict"""
+    keys = list(x.keys())  # note: not thread-safe
+    idx = np.argsort([str(k) for k in keys])
+    keys = [keys[ii] for ii in idx]
+    return keys
+
+
+def object_hash(x, h=None):
+    """Hash a reasonable python object
+
+    Parameters
+    ----------
+    x : object
+        Object to hash. Can be anything comprised of nested versions of:
+        {dict, list, tuple, ndarray, str, bytes, float, int, None}.
+    h : hashlib HASH object | None
+        Optional, object to add the hash to. None creates an MD5 hash.
+
+    Returns
+    -------
+    digest : int
+        The digest resulting from the hash.
+    """
+    if h is None:
+        h = hashlib.md5()
+    if isinstance(x, dict):
+        keys = _sort_keys(x)
+        for key in keys:
+            object_hash(key, h)
+            object_hash(x[key], h)
+    elif isinstance(x, (list, tuple)):
+        h.update(str(type(x)).encode('utf-8'))
+        for xx in x:
+            object_hash(xx, h)
+    elif isinstance(x, bytes):
+        # must come before "str" below
+        h.update(x)
+    elif isinstance(x, (string_types, float, int, type(None))):
+        h.update(str(type(x)).encode('utf-8'))
+        h.update(str(x).encode('utf-8'))
+    elif isinstance(x, np.ndarray):
+        x = np.asarray(x)
+        h.update(str(x.shape).encode('utf-8'))
+        h.update(str(x.dtype).encode('utf-8'))
+        h.update(x.tostring())
+    else:
+        raise RuntimeError('unsupported type: %s (%s)' % (type(x), x))
+    return int(h.hexdigest(), 16)
+
+
+def object_diff(a, b, pre=''):
+    """Compute all differences between two python variables
+
+    Parameters
+    ----------
+    a : object
+        Currently supported: dict, list, tuple, ndarray, int, str, bytes,
+        float, StringIO, BytesIO.
+    b : object
+        Must be same type as x1.
+    pre : str
+        String to prepend to each line.
+
+    Returns
+    -------
+    diffs : str
+        A string representation of the differences.
+    """
+    out = ''
+    if type(a) != type(b):
+        out += pre + ' type mismatch (%s, %s)\n' % (type(a), type(b))
+    elif isinstance(a, dict):
+        k1s = _sort_keys(a)
+        k2s = _sort_keys(b)
+        m1 = set(k2s) - set(k1s)
+        if len(m1):
+            out += pre + ' x1 missing keys %s\n' % (m1)
+        for key in k1s:
+            if key not in k2s:
+                out += pre + ' x2 missing key %s\n' % key
+            else:
+                out += object_diff(a[key], b[key], pre + 'd1[%s]' % repr(key))
+    elif isinstance(a, (list, tuple)):
+        if len(a) != len(b):
+            out += pre + ' length mismatch (%s, %s)\n' % (len(a), len(b))
+        else:
+            for xx1, xx2 in zip(a, b):
+                out += object_diff(xx1, xx2, pre='')
+    elif isinstance(a, (string_types, int, float, bytes)):
+        if a != b:
+            out += pre + ' value mismatch (%s, %s)\n' % (a, b)
+    elif a is None:
+        if b is not None:
+            out += pre + ' a is None, b is not (%s)\n' % (b)
+    elif isinstance(a, np.ndarray):
+        if not np.array_equal(a, b):
+            out += pre + ' array mismatch\n'
+    elif isinstance(a, (StringIO, BytesIO)):
+        if a.getvalue() != b.getvalue():
+            out += pre + ' StringIO mismatch\n'
+    elif sparse.isspmatrix(a):
+        # sparsity and sparse type of b vs a already checked above by type()
+        if b.shape != a.shape:
+            out += pre + (' sparse matrix a and b shape mismatch'
+                          '(%s vs %s)' % (a.shape, b.shape))
+        else:
+            c = a - b
+            c.eliminate_zeros()
+            if c.nnz > 0:
+                out += pre + (' sparse matrix a and b differ on %s '
+                              'elements' % c.nnz)
+    else:
+        raise RuntimeError(pre + ': unsupported type %s (%s)' % (type(a), a))
+    return out
+
+
+def check_random_state(seed):
+    """Turn seed into a np.random.RandomState instance
+
+    If seed is None, return the RandomState singleton used by np.random.
+    If seed is an int, return a new RandomState instance seeded with seed.
+    If seed is already a RandomState instance, return it.
+    Otherwise raise ValueError.
+    """
+    if seed is None or seed is np.random:
+        return np.random.mtrand._rand
+    if isinstance(seed, (int, np.integer)):
+        return np.random.RandomState(seed)
+    if isinstance(seed, np.random.RandomState):
+        return seed
+    raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
+                     ' instance' % seed)
+
+
+def split_list(l, n):
+    """split list in n (approx) equal pieces"""
+    n = int(n)
+    sz = len(l) // n
+    for i in range(n - 1):
+        yield l[i * sz:(i + 1) * sz]
+    yield l[(n - 1) * sz:]
+
+
+def create_chunks(sequence, size):
+    """Generate chunks from a sequence
+
+    Parameters
+    ----------
+    sequence : iterable
+        Any iterable object
+    size : int
+        The chunksize to be returned
+    """
+    return (sequence[p:p + size] for p in range(0, len(sequence), size))
+
+
+def sum_squared(X):
+    """Compute norm of an array
+
+    Parameters
+    ----------
+    X : array
+        Data whose norm must be found
+
+    Returns
+    -------
+    value : float
+        Sum of squares of the input array X
+    """
+    X_flat = X.ravel(order='F' if np.isfortran(X) else 'C')
+    return np.dot(X_flat, X_flat)
+
+
+def check_fname(fname, filetype, endings):
+    """Enforce MNE filename conventions
+
+    Parameters
+    ----------
+    fname : str
+        Name of the file.
+    filetype : str
+        Type of file. e.g., ICA, Epochs etc.
+    endings : tuple
+        Acceptable endings for the filename.
+    """
+    print_endings = ' or '.join([', '.join(endings[:-1]), endings[-1]])
+    if not fname.endswith(endings):
+        warnings.warn('This filename (%s) does not conform to MNE naming '
+                      'conventions. All %s files should end with '
+                      '%s' % (fname, filetype, print_endings))
+
+
+class WrapStdOut(object):
+    """Ridiculous class to work around how doctest captures stdout"""
+    def __getattr__(self, name):
+        # Even more ridiculous than this class, this must be sys.stdout (not
+        # just stdout) in order for this to work (tested on OSX and Linux)
+        return getattr(sys.stdout, name)
+
+
+class _TempDir(str):
+    """Class for creating and auto-destroying temp dir
+
+    This is designed to be used with testing modules. Instances should be
+    defined inside test functions. Instances defined at module level can not
+    guarantee proper destruction of the temporary directory.
+
+    When used at module level, the current use of the __del__() method for
+    cleanup can fail because the rmtree function may be cleaned up before this
+    object (an alternative could be using the atexit module instead).
+    """
+    def __new__(self):
+        new = str.__new__(self, tempfile.mkdtemp())
+        return new
+
+    def __init__(self):
+        self._path = self.__str__()
+
+    def __del__(self):
+        rmtree(self._path, ignore_errors=True)
+
+
+def estimate_rank(data, tol=1e-4, return_singular=False,
+                  norm=True, copy=True):
+    """Helper to estimate the rank of data
+
+    This function will normalize the rows of the data (typically
+    channels or vertices) such that non-zero singular values
+    should be close to one.
+
+    Parameters
+    ----------
+    data : array
+        Data to estimate the rank of (should be 2-dimensional).
+    tol : float
+        Tolerance for singular values to consider non-zero in
+        calculating the rank. The singular values are calculated
+        in this method such that independent data are expected to
+        have singular value around one.
+    return_singular : bool
+        If True, also return the singular values that were used
+        to determine the rank.
+    norm : bool
+        If True, data will be scaled by their estimated row-wise norm.
+        Else data are assumed to be scaled. Defaults to True.
+    copy : bool
+        If False, values in data will be modified in-place during
+        rank estimation (saves memory).
+
+    Returns
+    -------
+    rank : int
+        Estimated rank of the data.
+    s : array
+        If return_singular is True, the singular values that were
+        thresholded to determine the rank are also returned.
+    """
+    if copy is True:
+        data = data.copy()
+    if norm is True:
+        norms = _compute_row_norms(data)
+        data /= norms[:, np.newaxis]
+    s = linalg.svd(data, compute_uv=False, overwrite_a=True)
+    rank = np.sum(s >= tol)
+    if return_singular is True:
+        return rank, s
+    else:
+        return rank
+
+
+def _compute_row_norms(data):
+    """Compute scaling based on estimated norm"""
+    norms = np.sqrt(np.sum(data ** 2, axis=1))
+    norms[norms == 0] = 1.0
+    return norms
+
+
+def _reject_data_segments(data, reject, flat, decim, info, tstep):
+    """Reject data segments using peak-to-peak amplitude
+    """
+    from .epochs import _is_good
+    from .io.pick import channel_indices_by_type
+
+    data_clean = np.empty_like(data)
+    idx_by_type = channel_indices_by_type(info)
+    step = int(ceil(tstep * info['sfreq']))
+    if decim is not None:
+        step = int(ceil(step / float(decim)))
+    this_start = 0
+    this_stop = 0
+    drop_inds = []
+    for first in range(0, data.shape[1], step):
+        last = first + step
+        data_buffer = data[:, first:last]
+        if data_buffer.shape[1] < (last - first):
+            break  # end of the time segment
+        if _is_good(data_buffer, info['ch_names'], idx_by_type, reject,
+                    flat, ignore_chs=info['bads']):
+            this_stop = this_start + data_buffer.shape[1]
+            data_clean[:, this_start:this_stop] = data_buffer
+            this_start += data_buffer.shape[1]
+        else:
+            logger.info("Artifact detected in [%d, %d]" % (first, last))
+            drop_inds.append((first, last))
+    data = data_clean[:, :this_stop]
+    if not data.any():
+        raise RuntimeError('No clean segment found. Please '
+                           'consider updating your rejection '
+                           'thresholds.')
+    return data, drop_inds
+
+
+class _FormatDict(dict):
+    """Helper for pformat()"""
+    def __missing__(self, key):
+        return "{" + key + "}"
+
+
+def pformat(temp, **fmt):
+    """Partially format a template string.
+
+    Examples
+    --------
+    >>> pformat("{a}_{b}", a='x')
+    'x_{b}'
+    """
+    formatter = Formatter()
+    mapping = _FormatDict(fmt)
+    return formatter.vformat(temp, (), mapping)
+
+
+def trait_wraith(*args, **kwargs):
+    # Stand in for traits to allow importing traits based modules when the
+    # traits library is not installed
+    return lambda x: x
+
+
+###############################################################################
+# DECORATORS
+
+# Following deprecated class copied from scikit-learn
+
+# force show of DeprecationWarning even on python 2.7
+warnings.simplefilter('default')
+
+
+class deprecated(object):
+    """Decorator to mark a function or class as deprecated.
+
+    Issue a warning when the function is called/the class is instantiated and
+    adds a warning to the docstring.
+
+    The optional extra argument will be appended to the deprecation message
+    and the docstring. Note: to use this with the default value for extra, put
+    in an empty of parentheses::
+
+        >>> from mne.utils import deprecated
+        >>> deprecated() # doctest: +ELLIPSIS
+        <mne.utils.deprecated object at ...>
+
+        >>> @deprecated()
+        ... def some_function(): pass
+
+
+    Parameters
+    ----------
+    extra: string
+        To be added to the deprecation messages.
+    """
+    # Adapted from http://wiki.python.org/moin/PythonDecoratorLibrary,
+    # but with many changes.
+
+    # scikit-learn will not import on all platforms b/c it can be
+    # sklearn or scikits.learn, so a self-contained example is used above
+
+    def __init__(self, extra=''):
+        self.extra = extra
+
+    def __call__(self, obj):
+        """Call
+
+        Parameters
+        ----------
+        obj : object
+            Object to call.
+        """
+        if isinstance(obj, type):
+            return self._decorate_class(obj)
+        else:
+            return self._decorate_fun(obj)
+
+    def _decorate_class(self, cls):
+        msg = "Class %s is deprecated" % cls.__name__
+        if self.extra:
+            msg += "; %s" % self.extra
+
+        # FIXME: we should probably reset __new__ for full generality
+        init = cls.__init__
+
+        def deprecation_wrapped(*args, **kwargs):
+            warnings.warn(msg, category=DeprecationWarning)
+            return init(*args, **kwargs)
+        cls.__init__ = deprecation_wrapped
+
+        deprecation_wrapped.__name__ = '__init__'
+        deprecation_wrapped.__doc__ = self._update_doc(init.__doc__)
+        deprecation_wrapped.deprecated_original = init
+
+        return cls
+
+    def _decorate_fun(self, fun):
+        """Decorate function fun"""
+
+        msg = "Function %s is deprecated" % fun.__name__
+        if self.extra:
+            msg += "; %s" % self.extra
+
+        def deprecation_wrapped(*args, **kwargs):
+            warnings.warn(msg, category=DeprecationWarning)
+            return fun(*args, **kwargs)
+
+        deprecation_wrapped.__name__ = fun.__name__
+        deprecation_wrapped.__dict__ = fun.__dict__
+        deprecation_wrapped.__doc__ = self._update_doc(fun.__doc__)
+
+        return deprecation_wrapped
+
+    def _update_doc(self, olddoc):
+        newdoc = "DEPRECATED"
+        if self.extra:
+            newdoc = "%s: %s" % (newdoc, self.extra)
+        if olddoc:
+            newdoc = "%s\n\n%s" % (newdoc, olddoc)
+        return newdoc
+
+
+ at decorator
+def verbose(function, *args, **kwargs):
+    """Improved verbose decorator to allow functions to override log-level
+
+    Do not call this directly to set global verbosity level, instead use
+    set_log_level().
+
+    Parameters
+    ----------
+    function : function
+        Function to be decorated by setting the verbosity level.
+
+    Returns
+    -------
+    dec : function
+        The decorated function
+    """
+    arg_names = inspect.getargspec(function).args
+    default_level = verbose_level = None
+    if len(arg_names) > 0 and arg_names[0] == 'self':
+        default_level = getattr(args[0], 'verbose', None)
+    if 'verbose' in arg_names:
+        verbose_level = args[arg_names.index('verbose')]
+    elif 'verbose' in kwargs:
+        verbose_level = kwargs.pop('verbose')
+
+    # This ensures that object.method(verbose=None) will use object.verbose
+    verbose_level = default_level if verbose_level is None else verbose_level
+
+    if verbose_level is not None:
+        old_level = set_log_level(verbose_level, True)
+        # set it back if we get an exception
+        try:
+            return function(*args, **kwargs)
+        finally:
+            set_log_level(old_level)
+    return function(*args, **kwargs)
+
+
+ at nottest
+def slow_test(f):
+    """Decorator for slow tests"""
+    f.slow_test = True
+    return f
+
+
+ at nottest
+def ultra_slow_test(f):
+    """Decorator for ultra slow tests"""
+    f.ultra_slow_test = True
+    f.slow_test = True
+    return f
+
+
+def has_nibabel(vox2ras_tkr=False):
+    """Determine if nibabel is installed
+
+    Parameters
+    ----------
+    vox2ras_tkr : bool
+        If True, require nibabel has vox2ras_tkr support.
+
+    Returns
+    -------
+    has : bool
+        True if the user has nibabel.
+    """
+    try:
+        import nibabel
+        out = True
+        if vox2ras_tkr:  # we need MGHHeader to have vox2ras_tkr param
+            out = (getattr(getattr(getattr(nibabel, 'MGHImage', 0),
+                                   'header_class', 0),
+                           'get_vox2ras_tkr', None) is not None)
+        return out
+    except ImportError:
+        return False
+
+
+def has_mne_c():
+    """Aux function"""
+    return 'MNE_ROOT' in os.environ
+
+
+def has_freesurfer():
+    """Aux function"""
+    return 'FREESURFER_HOME' in os.environ
+
+
+def requires_nibabel(vox2ras_tkr=False):
+    """Aux function"""
+    extra = ' with vox2ras_tkr support' if vox2ras_tkr else ''
+    return np.testing.dec.skipif(not has_nibabel(vox2ras_tkr),
+                                 'Requires nibabel%s' % extra)
+
+
+def requires_version(library, min_version):
+    """Helper for testing"""
+    return np.testing.dec.skipif(not check_version(library, min_version),
+                                 'Requires %s version >= %s'
+                                 % (library, min_version))
+
+
+def requires_module(function, name, call):
+    """Decorator to skip test if package is not available"""
+    try:
+        from nose.plugins.skip import SkipTest
+    except ImportError:
+        SkipTest = AssertionError
+
+    @wraps(function)
+    def dec(*args, **kwargs):
+        skip = False
+        try:
+            exec(call) in globals(), locals()
+        except Exception:
+            skip = True
+        if skip is True:
+            raise SkipTest('Test %s skipped, requires %s'
+                           % (function.__name__, name))
+        return function(*args, **kwargs)
+    return dec
+
+
+_pandas_call = """
+import pandas
+version = LooseVersion(pandas.__version__)
+if version < '0.8.0':
+    raise ImportError
+"""
+
+_sklearn_call = """
+required_version = '0.14'
+import sklearn
+version = LooseVersion(sklearn.__version__)
+if version < required_version:
+    raise ImportError
+"""
+
+_sklearn_0_15_call = """
+required_version = '0.15'
+import sklearn
+version = LooseVersion(sklearn.__version__)
+if version < required_version:
+    raise ImportError
+"""
+
+_mayavi_call = """
+from mayavi import mlab
+mlab.options.backend = 'test'
+"""
+
+_mne_call = """
+if not has_mne_c():
+    raise ImportError
+"""
+
+_fs_call = """
+if not has_freesurfer():
+    raise ImportError
+"""
+
+_n2ft_call = """
+if 'NEUROMAG2FT_ROOT' not in os.environ:
+    raise ImportError
+"""
+
+_fs_or_ni_call = """
+if not has_nibabel() and not has_freesurfer():
+    raise ImportError
+"""
+
+requires_pandas = partial(requires_module, name='pandas', call=_pandas_call)
+requires_sklearn = partial(requires_module, name='sklearn', call=_sklearn_call)
+requires_sklearn_0_15 = partial(requires_module, name='sklearn',
+                                call=_sklearn_0_15_call)
+requires_mayavi = partial(requires_module, name='mayavi', call=_mayavi_call)
+requires_mne = partial(requires_module, name='MNE-C', call=_mne_call)
+requires_freesurfer = partial(requires_module, name='Freesurfer',
+                              call=_fs_call)
+requires_neuromag2ft = partial(requires_module, name='neuromag2ft',
+                               call=_n2ft_call)
+requires_fs_or_nibabel = partial(requires_module, name='nibabel or Freesurfer',
+                                 call=_fs_or_ni_call)
+
+requires_tvtk = partial(requires_module, name='TVTK',
+                        call='from tvtk.api import tvtk')
+requires_statsmodels = partial(requires_module, name='statsmodels',
+                               call='import statsmodels')
+requires_patsy = partial(requires_module, name='patsy',
+                         call='import patsy')
+requires_pysurfer = partial(requires_module, name='PySurfer',
+                            call='from surfer import Brain')
+requires_PIL = partial(requires_module, name='PIL',
+                       call='from PIL import Image')
+requires_good_network = partial(
+    requires_module, name='good network connection',
+    call='if int(os.environ.get("MNE_SKIP_NETWORK_TESTS", 0)):\n'
+         '    raise ImportError')
+requires_nitime = partial(requires_module, name='nitime',
+                          call='import nitime')
+requires_traits = partial(requires_module, name='traits',
+                          call='import traits')
+requires_h5py = partial(requires_module, name='h5py', call='import h5py')
+
+
+def check_version(library, min_version):
+    """Check minimum library version required
+
+    Parameters
+    ----------
+    library : str
+        The library name to import. Must have a ``__version__`` property.
+    min_version : str
+        The minimum version string. Anything that matches
+        ``'(\\d+ | [a-z]+ | \\.)'``
+
+    Returns
+    -------
+    ok : bool
+        True if the library exists with at least the specified version.
+    """
+    ok = True
+    try:
+        library = __import__(library)
+    except ImportError:
+        ok = False
+    else:
+        this_version = LooseVersion(library.__version__)
+        if this_version < min_version:
+            ok = False
+    return ok
+
+
+def _check_mayavi_version(min_version='4.3.0'):
+    """Helper for mayavi"""
+    if not check_version('mayavi', min_version):
+        raise RuntimeError("Need mayavi >= %s" % min_version)
+
+
+ at verbose
+def run_subprocess(command, verbose=None, *args, **kwargs):
+    """Run command using subprocess.Popen
+
+    Run command and wait for command to complete. If the return code was zero
+    then return, otherwise raise CalledProcessError.
+    By default, this will also add stdout= and stderr=subproces.PIPE
+    to the call to Popen to suppress printing to the terminal.
+
+    Parameters
+    ----------
+    command : list of str
+        Command to run as subprocess (see subprocess.Popen documentation).
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+        Defaults to self.verbose.
+    *args, **kwargs : arguments
+        Additional arguments to pass to subprocess.Popen.
+
+    Returns
+    -------
+    stdout : str
+        Stdout returned by the process.
+    stderr : str
+        Stderr returned by the process.
+    """
+    for stdxxx, sys_stdxxx in (['stderr', sys.stderr],
+                               ['stdout', sys.stdout]):
+        if stdxxx not in kwargs:
+            kwargs[stdxxx] = subprocess.PIPE
+        elif kwargs[stdxxx] is sys_stdxxx:
+            if isinstance(sys_stdxxx, StringIO):
+                # nose monkey patches sys.stderr and sys.stdout to StringIO
+                kwargs[stdxxx] = subprocess.PIPE
+            else:
+                kwargs[stdxxx] = sys_stdxxx
+
+    # Check the PATH environment variable. If run_subprocess() is to be called
+    # frequently this should be refactored so as to only check the path once.
+    env = kwargs.get('env', os.environ)
+    if any(p.startswith('~') for p in env['PATH'].split(os.pathsep)):
+        msg = ("Your PATH environment variable contains at least one path "
+               "starting with a tilde ('~') character. Such paths are not "
+               "interpreted correctly from within Python. It is recommended "
+               "that you use '$HOME' instead of '~'.")
+        warnings.warn(msg)
+
+    logger.info("Running subprocess: %s" % ' '.join(command))
+    try:
+        p = subprocess.Popen(command, *args, **kwargs)
+    except Exception:
+        logger.error('Command not found: %s' % (command[0],))
+        raise
+    stdout_, stderr = p.communicate()
+    stdout_ = '' if stdout_ is None else stdout_.decode('utf-8')
+    stderr = '' if stderr is None else stderr.decode('utf-8')
+
+    if stdout_.strip():
+        logger.info("stdout:\n%s" % stdout_)
+    if stderr.strip():
+        logger.info("stderr:\n%s" % stderr)
+
+    output = (stdout_, stderr)
+    if p.returncode:
+        print(output)
+        err_fun = subprocess.CalledProcessError.__init__
+        if 'output' in inspect.getargspec(err_fun).args:
+            raise subprocess.CalledProcessError(p.returncode, command, output)
+        else:
+            raise subprocess.CalledProcessError(p.returncode, command)
+
+    return output
+
+
+###############################################################################
+# LOGGING
+
+def set_log_level(verbose=None, return_old_level=False):
+    """Convenience function for setting the logging level
+
+    Parameters
+    ----------
+    verbose : bool, str, int, or None
+        The verbosity of messages to print. If a str, it can be either DEBUG,
+        INFO, WARNING, ERROR, or CRITICAL. Note that these are for
+        convenience and are equivalent to passing in logging.DEBUG, etc.
+        For bool, True is the same as 'INFO', False is the same as 'WARNING'.
+        If None, the environment variable MNE_LOGGING_LEVEL is read, and if
+        it doesn't exist, defaults to INFO.
+    return_old_level : bool
+        If True, return the old verbosity level.
+    """
+    if verbose is None:
+        verbose = get_config('MNE_LOGGING_LEVEL', 'INFO')
+    elif isinstance(verbose, bool):
+        if verbose is True:
+            verbose = 'INFO'
+        else:
+            verbose = 'WARNING'
+    if isinstance(verbose, string_types):
+        verbose = verbose.upper()
+        logging_types = dict(DEBUG=logging.DEBUG, INFO=logging.INFO,
+                             WARNING=logging.WARNING, ERROR=logging.ERROR,
+                             CRITICAL=logging.CRITICAL)
+        if verbose not in logging_types:
+            raise ValueError('verbose must be of a valid type')
+        verbose = logging_types[verbose]
+    logger = logging.getLogger('mne')
+    old_verbose = logger.level
+    logger.setLevel(verbose)
+    return (old_verbose if return_old_level else None)
+
+
+def set_log_file(fname=None, output_format='%(message)s', overwrite=None):
+    """Convenience function for setting the log to print to a file
+
+    Parameters
+    ----------
+    fname : str, or None
+        Filename of the log to print to. If None, stdout is used.
+        To suppress log outputs, use set_log_level('WARN').
+    output_format : str
+        Format of the output messages. See the following for examples:
+
+            https://docs.python.org/dev/howto/logging.html
+
+        e.g., "%(asctime)s - %(levelname)s - %(message)s".
+    overwrite : bool, or None
+        Overwrite the log file (if it exists). Otherwise, statements
+        will be appended to the log (default). None is the same as False,
+        but additionally raises a warning to notify the user that log
+        entries will be appended.
+    """
+    logger = logging.getLogger('mne')
+    handlers = logger.handlers
+    for h in handlers:
+        if isinstance(h, logging.FileHandler):
+            h.close()
+        logger.removeHandler(h)
+    if fname is not None:
+        if op.isfile(fname) and overwrite is None:
+            warnings.warn('Log entries will be appended to the file. Use '
+                          'overwrite=False to avoid this message in the '
+                          'future.')
+        mode = 'w' if overwrite is True else 'a'
+        lh = logging.FileHandler(fname, mode=mode)
+    else:
+        """ we should just be able to do:
+                lh = logging.StreamHandler(sys.stdout)
+            but because doctests uses some magic on stdout, we have to do this:
+        """
+        lh = logging.StreamHandler(WrapStdOut())
+
+    lh.setFormatter(logging.Formatter(output_format))
+    # actually add the stream handler
+    logger.addHandler(lh)
+
+
+###############################################################################
+# CONFIG / PREFS
+
+def get_subjects_dir(subjects_dir=None, raise_error=False):
+    """Safely use subjects_dir input to return SUBJECTS_DIR
+
+    Parameters
+    ----------
+    subjects_dir : str | None
+        If a value is provided, return subjects_dir. Otherwise, look for
+        SUBJECTS_DIR config and return the result.
+    raise_error : bool
+        If True, raise a KeyError if no value for SUBJECTS_DIR can be found
+        (instead of returning None).
+
+    Returns
+    -------
+    value : str | None
+        The SUBJECTS_DIR value.
+    """
+    if subjects_dir is None:
+        subjects_dir = get_config('SUBJECTS_DIR', raise_error=raise_error)
+    return subjects_dir
+
+
+_temp_home_dir = None
+
+
+def _get_extra_data_path(home_dir=None):
+    """Get path to extra data (config, tables, etc.)"""
+    global _temp_home_dir
+    if home_dir is None:
+        # this has been checked on OSX64, Linux64, and Win32
+        if 'nt' == os.name.lower():
+            home_dir = os.getenv('APPDATA')
+        else:
+            # This is a more robust way of getting the user's home folder on
+            # Linux platforms (not sure about OSX, Unix or BSD) than checking
+            # the HOME environment variable. If the user is running some sort
+            # of script that isn't launched via the command line (e.g. a script
+            # launched via Upstart) then the HOME environment variable will
+            # not be set.
+            if os.getenv('MNE_DONTWRITE_HOME', '') == 'true':
+                if _temp_home_dir is None:
+                    _temp_home_dir = tempfile.mkdtemp()
+                    atexit.register(partial(shutil.rmtree, _temp_home_dir,
+                                            ignore_errors=True))
+                home_dir = _temp_home_dir
+            else:
+                home_dir = os.path.expanduser('~')
+
+        if home_dir is None:
+            raise ValueError('mne-python config file path could '
+                             'not be determined, please report this '
+                             'error to mne-python developers')
+
+    return op.join(home_dir, '.mne')
+
+
+def get_config_path(home_dir=None):
+    """Get path to standard mne-python config file
+
+    Parameters
+    ----------
+    home_dir : str | None
+        The folder that contains the .mne config folder.
+        If None, it is found automatically.
+
+    Returns
+    -------
+    config_path : str
+        The path to the mne-python configuration file. On windows, this
+        will be '%APPDATA%\.mne\mne-python.json'. On every other
+        system, this will be ~/.mne/mne-python.json.
+    """
+    val = op.join(_get_extra_data_path(home_dir=home_dir),
+                  'mne-python.json')
+    return val
+
+
+def set_cache_dir(cache_dir):
+    """Set the directory to be used for temporary file storage.
+
+    This directory is used by joblib to store memmapped arrays,
+    which reduces memory requirements and speeds up parallel
+    computation.
+
+    Parameters
+    ----------
+    cache_dir: str or None
+        Directory to use for temporary file storage. None disables
+        temporary file storage.
+    """
+    if cache_dir is not None and not op.exists(cache_dir):
+        raise IOError('Directory %s does not exist' % cache_dir)
+
+    set_config('MNE_CACHE_DIR', cache_dir)
+
+
+def set_memmap_min_size(memmap_min_size):
+    """Set the minimum size for memmaping of arrays for parallel processing
+
+    Parameters
+    ----------
+    memmap_min_size: str or None
+        Threshold on the minimum size of arrays that triggers automated memory
+        mapping for parallel processing, e.g., '1M' for 1 megabyte.
+        Use None to disable memmaping of large arrays.
+    """
+    if memmap_min_size is not None:
+        if not isinstance(memmap_min_size, string_types):
+            raise ValueError('\'memmap_min_size\' has to be a string.')
+        if memmap_min_size[-1] not in ['K', 'M', 'G']:
+            raise ValueError('The size has to be given in kilo-, mega-, or '
+                             'gigabytes, e.g., 100K, 500M, 1G.')
+
+    set_config('MNE_MEMMAP_MIN_SIZE', memmap_min_size)
+
+
+# List the known configuration values
+known_config_types = [
+    'MNE_BROWSE_RAW_SIZE',
+    'MNE_CUDA_IGNORE_PRECISION',
+    'MNE_DATA',
+    'MNE_DATASETS_MEGSIM_PATH',
+    'MNE_DATASETS_SAMPLE_PATH',
+    'MNE_DATASETS_SOMATO_PATH',
+    'MNE_DATASETS_SPM_FACE_PATH',
+    'MNE_DATASETS_EEGBCI_PATH',
+    'MNE_DATASETS_BRAINSTORM_PATH',
+    'MNE_DATASETS_TESTING_PATH',
+    'MNE_LOGGING_LEVEL',
+    'MNE_USE_CUDA',
+    'SUBJECTS_DIR',
+    'MNE_CACHE_DIR',
+    'MNE_MEMMAP_MIN_SIZE',
+    'MNE_SKIP_TESTING_DATASET_TESTS',
+    'MNE_DATASETS_SPM_FACE_DATASETS_TESTS'
+]
+
+# These allow for partial matches, e.g. 'MNE_STIM_CHANNEL_1' is okay key
+known_config_wildcards = [
+    'MNE_STIM_CHANNEL',
+]
+
+
+def get_config(key=None, default=None, raise_error=False, home_dir=None):
+    """Read mne(-python) preference from env, then mne-python config
+
+    Parameters
+    ----------
+    key : None | str
+        The preference key to look for. The os evironment is searched first,
+        then the mne-python config file is parsed.
+        If None, all the config parameters present in the path are returned.
+    default : str | None
+        Value to return if the key is not found.
+    raise_error : bool
+        If True, raise an error if the key is not found (instead of returning
+        default).
+    home_dir : str | None
+        The folder that contains the .mne config folder.
+        If None, it is found automatically.
+
+    Returns
+    -------
+    value : dict | str | None
+        The preference key value.
+
+    See Also
+    --------
+    set_config
+    """
+
+    if key is not None and not isinstance(key, string_types):
+        raise TypeError('key must be a string')
+
+    # first, check to see if key is in env
+    if key is not None and key in os.environ:
+        return os.environ[key]
+
+    # second, look for it in mne-python config file
+    config_path = get_config_path(home_dir=home_dir)
+    if not op.isfile(config_path):
+        key_found = False
+        val = default
+    else:
+        with open(config_path, 'r') as fid:
+            config = json.load(fid)
+            if key is None:
+                return config
+        key_found = key in config
+        val = config.get(key, default)
+
+    if not key_found and raise_error is True:
+        meth_1 = 'os.environ["%s"] = VALUE' % key
+        meth_2 = 'mne.utils.set_config("%s", VALUE)' % key
+        raise KeyError('Key "%s" not found in environment or in the '
+                       'mne-python config file: %s '
+                       'Try either:'
+                       ' %s for a temporary solution, or:'
+                       ' %s for a permanent one. You can also '
+                       'set the environment variable before '
+                       'running python.'
+                       % (key, config_path, meth_1, meth_2))
+    return val
+
+
+def set_config(key, value, home_dir=None):
+    """Set mne-python preference in config
+
+    Parameters
+    ----------
+    key : str
+        The preference key to set.
+    value : str |  None
+        The value to assign to the preference key. If None, the key is
+        deleted.
+    home_dir : str | None
+        The folder that contains the .mne config folder.
+        If None, it is found automatically.
+
+    See Also
+    --------
+    get_config
+    """
+    if not isinstance(key, string_types):
+        raise TypeError('key must be a string')
+    # While JSON allow non-string types, we allow users to override config
+    # settings using env, which are strings, so we enforce that here
+    if not isinstance(value, string_types) and value is not None:
+        raise TypeError('value must be a string or None')
+    if key not in known_config_types and not \
+            any(k in key for k in known_config_wildcards):
+        warnings.warn('Setting non-standard config type: "%s"' % key)
+
+    # Read all previous values
+    config_path = get_config_path(home_dir=home_dir)
+    if op.isfile(config_path):
+        with open(config_path, 'r') as fid:
+            config = json.load(fid)
+    else:
+        config = dict()
+        logger.info('Attempting to create new mne-python configuration '
+                    'file:\n%s' % config_path)
+    if value is None:
+        config.pop(key, None)
+    else:
+        config[key] = value
+
+    # Write all values. This may fail if the default directory is not
+    # writeable.
+    directory = op.dirname(config_path)
+    if not op.isdir(directory):
+        os.mkdir(directory)
+    with open(config_path, 'w') as fid:
+        json.dump(config, fid, sort_keys=True, indent=0)
+
+
+class ProgressBar(object):
+    """Class for generating a command-line progressbar
+
+    Parameters
+    ----------
+    max_value : int
+        Maximum value of process (e.g. number of samples to process, bytes to
+        download, etc.).
+    initial_value : int
+        Initial value of process, useful when resuming process from a specific
+        value, defaults to 0.
+    mesg : str
+        Message to include at end of progress bar.
+    max_chars : int
+        Number of characters to use for progress bar (be sure to save some room
+        for the message and % complete as well).
+    progress_character : char
+        Character in the progress bar that indicates the portion completed.
+    spinner : bool
+        Show a spinner.  Useful for long-running processes that may not
+        increment the progress bar very often.  This provides the user with
+        feedback that the progress has not stalled.
+
+    Example
+    -------
+    >>> progress = ProgressBar(13000)
+    >>> progress.update(3000) # doctest: +SKIP
+    [.........                               ] 23.07692 |
+    >>> progress.update(6000) # doctest: +SKIP
+    [..................                      ] 46.15385 |
+
+    >>> progress = ProgressBar(13000, spinner=True)
+    >>> progress.update(3000) # doctest: +SKIP
+    [.........                               ] 23.07692 |
+    >>> progress.update(6000) # doctest: +SKIP
+    [..................                      ] 46.15385 /
+    """
+
+    spinner_symbols = ['|', '/', '-', '\\']
+    template = '\r[{0}{1}] {2:.05f} {3} {4}   '
+
+    def __init__(self, max_value, initial_value=0, mesg='', max_chars=40,
+                 progress_character='.', spinner=False, verbose_bool=True):
+        self.cur_value = initial_value
+        self.max_value = float(max_value)
+        self.mesg = mesg
+        self.max_chars = max_chars
+        self.progress_character = progress_character
+        self.spinner = spinner
+        self.spinner_index = 0
+        self.n_spinner = len(self.spinner_symbols)
+        self._do_print = verbose_bool
+
+    def update(self, cur_value, mesg=None):
+        """Update progressbar with current value of process
+
+        Parameters
+        ----------
+        cur_value : number
+            Current value of process.  Should be <= max_value (but this is not
+            enforced).  The percent of the progressbar will be computed as
+            (cur_value / max_value) * 100
+        mesg : str
+            Message to display to the right of the progressbar.  If None, the
+            last message provided will be used.  To clear the current message,
+            pass a null string, ''.
+        """
+        # Ensure floating-point division so we can get fractions of a percent
+        # for the progressbar.
+        self.cur_value = cur_value
+        progress = min(float(self.cur_value) / self.max_value, 1.)
+        num_chars = int(progress * self.max_chars)
+        num_left = self.max_chars - num_chars
+
+        # Update the message
+        if mesg is not None:
+            self.mesg = mesg
+
+        # The \r tells the cursor to return to the beginning of the line rather
+        # than starting a new line.  This allows us to have a progressbar-style
+        # display in the console window.
+        bar = self.template.format(self.progress_character * num_chars,
+                                   ' ' * num_left,
+                                   progress * 100,
+                                   self.spinner_symbols[self.spinner_index],
+                                   self.mesg)
+        # Force a flush because sometimes when using bash scripts and pipes,
+        # the output is not printed until after the program exits.
+        if self._do_print:
+            sys.stdout.write(bar)
+            sys.stdout.flush()
+        # Increament the spinner
+        if self.spinner:
+            self.spinner_index = (self.spinner_index + 1) % self.n_spinner
+
+    def update_with_increment_value(self, increment_value, mesg=None):
+        """Update progressbar with the value of the increment instead of the
+        current value of process as in update()
+
+        Parameters
+        ----------
+        increment_value : int
+            Value of the increment of process.  The percent of the progressbar
+            will be computed as
+            (self.cur_value + increment_value / max_value) * 100
+        mesg : str
+            Message to display to the right of the progressbar.  If None, the
+            last message provided will be used.  To clear the current message,
+            pass a null string, ''.
+        """
+        self.cur_value += increment_value
+        self.update(self.cur_value, mesg)
+
+
+def _chunk_read(response, local_file, initial_size=0, verbose_bool=True):
+    """Download a file chunk by chunk and show advancement
+
+    Can also be used when resuming downloads over http.
+
+    Parameters
+    ----------
+    response: urllib.response.addinfourl
+        Response to the download request in order to get file size.
+    local_file: file
+        Hard disk file where data should be written.
+    initial_size: int, optional
+        If resuming, indicate the initial size of the file.
+
+    Notes
+    -----
+    The chunk size will be automatically adapted based on the connection
+    speed.
+    """
+    # Adapted from NISL:
+    # https://github.com/nisl/tutorial/blob/master/nisl/datasets.py
+
+    # Returns only amount left to download when resuming, not the size of the
+    # entire file
+    total_size = int(response.headers.get('Content-Length', '1').strip())
+    total_size += initial_size
+
+    progress = ProgressBar(total_size, initial_value=initial_size,
+                           max_chars=40, spinner=True, mesg='downloading',
+                           verbose_bool=verbose_bool)
+    chunk_size = 8192  # 2 ** 13
+    while True:
+        t0 = time.time()
+        chunk = response.read(chunk_size)
+        dt = time.time() - t0
+        if dt < 0.001:
+            chunk_size *= 2
+        elif dt > 0.5 and chunk_size > 8192:
+            chunk_size = chunk_size // 2
+        if not chunk:
+            if verbose_bool:
+                sys.stdout.write('\n')
+                sys.stdout.flush()
+            break
+        _chunk_write(chunk, local_file, progress)
+
+
+def _chunk_read_ftp_resume(url, temp_file_name, local_file, verbose_bool=True):
+    """Resume downloading of a file from an FTP server"""
+    # Adapted from: https://pypi.python.org/pypi/fileDownloader.py
+    # but with changes
+
+    parsed_url = urllib.parse.urlparse(url)
+    file_name = os.path.basename(parsed_url.path)
+    server_path = parsed_url.path.replace(file_name, "")
+    unquoted_server_path = urllib.parse.unquote(server_path)
+    local_file_size = os.path.getsize(temp_file_name)
+
+    data = ftplib.FTP()
+    if parsed_url.port is not None:
+        data.connect(parsed_url.hostname, parsed_url.port)
+    else:
+        data.connect(parsed_url.hostname)
+    data.login()
+    if len(server_path) > 1:
+        data.cwd(unquoted_server_path)
+    data.sendcmd("TYPE I")
+    data.sendcmd("REST " + str(local_file_size))
+    down_cmd = "RETR " + file_name
+    file_size = data.size(file_name)
+    progress = ProgressBar(file_size, initial_value=local_file_size,
+                           max_chars=40, spinner=True, mesg='downloading',
+                           verbose_bool=verbose_bool)
+
+    # Callback lambda function that will be passed the downloaded data
+    # chunk and will write it to file and update the progress bar
+    def chunk_write(chunk):
+        return _chunk_write(chunk, local_file, progress)
+    data.retrbinary(down_cmd, chunk_write)
+    data.close()
+    sys.stdout.write('\n')
+    sys.stdout.flush()
+
+
+def _chunk_write(chunk, local_file, progress):
+    """Write a chunk to file and update the progress bar"""
+    local_file.write(chunk)
+    progress.update_with_increment_value(len(chunk))
+
+
+ at verbose
+def _fetch_file(url, file_name, print_destination=True, resume=True,
+                hash_=None, verbose=None):
+    """Load requested file, downloading it if needed or requested
+
+    Parameters
+    ----------
+    url: string
+        The url of file to be downloaded.
+    file_name: string
+        Name, along with the path, of where downloaded file will be saved.
+    print_destination: bool, optional
+        If true, destination of where file was saved will be printed after
+        download finishes.
+    resume: bool, optional
+        If true, try to resume partially downloaded files.
+    hash_ : str | None
+        The hash of the file to check. If None, no checking is
+        performed.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+    """
+    # Adapted from NISL:
+    # https://github.com/nisl/tutorial/blob/master/nisl/datasets.py
+    if hash_ is not None and (not isinstance(hash_, string_types) or
+                              len(hash_) != 32):
+        raise ValueError('Bad hash value given, should be a 32-character '
+                         'string:\n%s' % (hash_,))
+    temp_file_name = file_name + ".part"
+    local_file = None
+    initial_size = 0
+    verbose_bool = (logger.level <= 20)  # 20 is info
+    try:
+        # Checking file size and displaying it alongside the download url
+        u = urllib.request.urlopen(url, timeout=10.)
+        try:
+            file_size = int(u.headers.get('Content-Length', '1').strip())
+        finally:
+            u.close()
+            del u
+        logger.info('Downloading data from %s (%s)\n'
+                    % (url, sizeof_fmt(file_size)))
+        # Downloading data
+        if resume and os.path.exists(temp_file_name):
+            local_file = open(temp_file_name, "ab")
+            # Resuming HTTP and FTP downloads requires different procedures
+            scheme = urllib.parse.urlparse(url).scheme
+            if scheme in ('http', 'https'):
+                local_file_size = os.path.getsize(temp_file_name)
+                # If the file exists, then only download the remainder
+                req = urllib.request.Request(url)
+                req.headers["Range"] = "bytes=%s-" % local_file_size
+                try:
+                    data = urllib.request.urlopen(req)
+                except Exception:
+                    # There is a problem that may be due to resuming, some
+                    # servers may not support the "Range" header. Switch back
+                    # to complete download method
+                    logger.info('Resuming download failed. Attempting to '
+                                'restart downloading the entire file.')
+                    local_file.close()
+                    _fetch_file(url, file_name, resume=False)
+                else:
+                    _chunk_read(data, local_file, initial_size=local_file_size,
+                                verbose_bool=verbose_bool)
+                    data.close()
+                    del data  # should auto-close
+            else:
+                _chunk_read_ftp_resume(url, temp_file_name, local_file,
+                                       verbose_bool=verbose_bool)
+        else:
+            local_file = open(temp_file_name, "wb")
+            data = urllib.request.urlopen(url)
+            try:
+                _chunk_read(data, local_file, initial_size=initial_size,
+                            verbose_bool=verbose_bool)
+            finally:
+                data.close()
+                del data  # should auto-close
+        # temp file must be closed prior to the move
+        if not local_file.closed:
+            local_file.close()
+        # check md5sum
+        if hash_ is not None:
+            logger.info('Verifying download hash.')
+            md5 = md5sum(temp_file_name)
+            if hash_ != md5:
+                raise RuntimeError('Hash mismatch for downloaded file %s, '
+                                   'expected %s but got %s'
+                                   % (temp_file_name, hash_, md5))
+        shutil.move(temp_file_name, file_name)
+        if print_destination is True:
+            logger.info('File saved as %s.\n' % file_name)
+    except Exception as e:
+        logger.error('Error while fetching file %s.'
+                     ' Dataset fetching aborted.' % url)
+        logger.error("Error: %s", e)
+        raise
+    finally:
+        if local_file is not None:
+            if not local_file.closed:
+                local_file.close()
+
+
+def sizeof_fmt(num):
+    """Turn number of bytes into human-readable str"""
+    units = ['bytes', 'kB', 'MB', 'GB', 'TB', 'PB']
+    decimals = [0, 0, 1, 2, 2, 2]
+    """Human friendly file size"""
+    if num > 1:
+        exponent = min(int(log(num, 1024)), len(units) - 1)
+        quotient = float(num) / 1024 ** exponent
+        unit = units[exponent]
+        num_decimals = decimals[exponent]
+        format_string = '{0:.%sf} {1}' % (num_decimals)
+        return format_string.format(quotient, unit)
+    if num == 0:
+        return '0 bytes'
+    if num == 1:
+        return '1 byte'
+
+
+def _url_to_local_path(url, path):
+    """Mirror a url path in a local destination (keeping folder structure)"""
+    destination = urllib.parse.urlparse(url).path
+    # First char should be '/', and it needs to be discarded
+    if len(destination) < 2 or destination[0] != '/':
+        raise ValueError('Invalid URL')
+    destination = os.path.join(path,
+                               urllib.request.url2pathname(destination)[1:])
+    return destination
+
+
+def _get_stim_channel(stim_channel, info):
+    """Helper to determine the appropriate stim_channel
+
+    First, 'MNE_STIM_CHANNEL', 'MNE_STIM_CHANNEL_1', 'MNE_STIM_CHANNEL_2', etc.
+    are read. If these are not found, it will fall back to 'STI 014' if
+    present, then fall back to the first channel of type 'stim', if present.
+
+    Parameters
+    ----------
+    stim_channel : str | list of str | None
+        The stim channel selected by the user.
+    info : instance of Info
+        An information structure containing information about the channels.
+
+    Returns
+    -------
+    stim_channel : str | list of str
+        The name of the stim channel(s) to use
+    """
+    if stim_channel is not None:
+        if not isinstance(stim_channel, list):
+            if not isinstance(stim_channel, string_types):
+                raise TypeError('stim_channel must be a str, list, or None')
+            stim_channel = [stim_channel]
+        if not all(isinstance(s, string_types) for s in stim_channel):
+            raise TypeError('stim_channel list must contain all strings')
+        return stim_channel
+
+    stim_channel = list()
+    ch_count = 0
+    ch = get_config('MNE_STIM_CHANNEL')
+    while(ch is not None and ch in info['ch_names']):
+        stim_channel.append(ch)
+        ch_count += 1
+        ch = get_config('MNE_STIM_CHANNEL_%d' % ch_count)
+    if ch_count > 0:
+        return stim_channel
+
+    if 'STI 014' in info['ch_names']:
+        return ['STI 014']
+
+    from .io.pick import pick_types
+    stim_channel = pick_types(info, meg=False, ref_meg=False, stim=True)
+    if len(stim_channel) > 0:
+        stim_channel = [info['ch_names'][ch_] for ch_ in stim_channel]
+        return stim_channel
+
+    raise ValueError("No stim channels found. Consider specifying them "
+                     "manually using the 'stim_channel' parameter.")
+
+
+def _check_fname(fname, overwrite):
+    """Helper to check for file existence"""
+    if not isinstance(fname, string_types):
+        raise TypeError('file name is not a string')
+    if op.isfile(fname):
+        if not overwrite:
+            raise IOError('Destination file exists. Please use option '
+                          '"overwrite=True" to force overwriting.')
+        else:
+            logger.info('Overwriting existing file.')
+
+
+def _check_subject(class_subject, input_subject, raise_error=True):
+    """Helper to get subject name from class"""
+    if input_subject is not None:
+        if not isinstance(input_subject, string_types):
+            raise ValueError('subject input must be a string')
+        else:
+            return input_subject
+    elif class_subject is not None:
+        if not isinstance(class_subject, string_types):
+            raise ValueError('Neither subject input nor class subject '
+                             'attribute was a string')
+        else:
+            return class_subject
+    else:
+        if raise_error is True:
+            raise ValueError('Neither subject input nor class subject '
+                             'attribute was a string')
+        return None
+
+
+def _check_pandas_installed():
+    """Aux function"""
+    try:
+        import pandas as pd
+        return pd
+    except ImportError:
+        raise RuntimeError('For this method to work the Pandas library is'
+                           ' required.')
+
+
+def _check_pandas_index_arguments(index, defaults):
+    """ Helper function to check pandas index arguments """
+    if not any(isinstance(index, k) for k in (list, tuple)):
+        index = [index]
+    invalid_choices = [e for e in index if e not in defaults]
+    if invalid_choices:
+        options = [', '.join(e) for e in [invalid_choices, defaults]]
+        raise ValueError('[%s] is not an valid option. Valid index'
+                         'values are \'None\' or %s' % tuple(options))
+
+
+def _clean_names(names, remove_whitespace=False, before_dash=True):
+    """ Remove white-space on topo matching
+
+    This function handles different naming
+    conventions for old VS new VectorView systems (`remove_whitespace`).
+    Also it allows to remove system specific parts in CTF channel names
+    (`before_dash`).
+
+    Usage
+    -----
+    # for new VectorView (only inside layout)
+    ch_names = _clean_names(epochs.ch_names, remove_whitespace=True)
+
+    # for CTF
+    ch_names = _clean_names(epochs.ch_names, before_dash=True)
+
+    """
+    cleaned = []
+    for name in names:
+        if ' ' in name and remove_whitespace:
+            name = name.replace(' ', '')
+        if '-' in name and before_dash:
+            name = name.split('-')[0]
+        if name.endswith('_virtual'):
+            name = name[:-8]
+        cleaned.append(name)
+
+    return cleaned
+
+
+def clean_warning_registry():
+    """Safe way to reset warnings """
+    warnings.resetwarnings()
+    reg = "__warningregistry__"
+    bad_names = ['MovedModule']  # this is in six.py, and causes bad things
+    for mod in list(sys.modules.values()):
+        if mod.__class__.__name__ not in bad_names and hasattr(mod, reg):
+            getattr(mod, reg).clear()
+    # hack to deal with old scipy/numpy in tests
+    if os.getenv('TRAVIS') == 'true' and sys.version.startswith('2.6'):
+        warnings.simplefilter('default')
+        try:
+            np.rank([])
+        except Exception:
+            pass
+        warnings.simplefilter('always')
+
+
+def _check_type_picks(picks):
+    """helper to guarantee type integrity of picks"""
+    err_msg = 'picks must be None, a list or an array of integers'
+    if picks is None:
+        pass
+    elif isinstance(picks, list):
+        if not all(isinstance(i, int) for i in picks):
+            raise ValueError(err_msg)
+        picks = np.array(picks)
+    elif isinstance(picks, np.ndarray):
+        if not picks.dtype.kind == 'i':
+            raise ValueError(err_msg)
+    else:
+        raise ValueError(err_msg)
+    return picks
+
+
+ at nottest
+def run_tests_if_main(measure_mem=False):
+    """Run tests in a given file if it is run as a script"""
+    local_vars = inspect.currentframe().f_back.f_locals
+    if not local_vars.get('__name__', '') == '__main__':
+        return
+    # we are in a "__main__"
+    try:
+        import faulthandler
+        faulthandler.enable()
+    except Exception:
+        pass
+    with warnings.catch_warnings(record=True):  # memory_usage internal dep.
+        mem = int(round(max(memory_usage(-1)))) if measure_mem else -1
+    if mem >= 0:
+        print('Memory consumption after import: %s' % mem)
+    t0 = time.time()
+    peak_mem, peak_name = mem, 'import'
+    max_elapsed, elapsed_name = 0, 'N/A'
+    count = 0
+    for name in sorted(list(local_vars.keys()), key=lambda x: x.lower()):
+        val = local_vars[name]
+        if name.startswith('_'):
+            continue
+        elif callable(val) and name.startswith('test'):
+            count += 1
+            doc = val.__doc__.strip() if val.__doc__ else name
+            sys.stdout.write('%s ... ' % doc)
+            sys.stdout.flush()
+            try:
+                t1 = time.time()
+                if measure_mem:
+                    with warnings.catch_warnings(record=True):  # dep warn
+                        mem = int(round(max(memory_usage((val, (), {})))))
+                else:
+                    val()
+                    mem = -1
+                if mem >= peak_mem:
+                    peak_mem, peak_name = mem, name
+                mem = (', mem: %s MB' % mem) if mem >= 0 else ''
+                elapsed = int(round(time.time() - t1))
+                if elapsed >= max_elapsed:
+                    max_elapsed, elapsed_name = elapsed, name
+                sys.stdout.write('time: %s sec%s\n' % (elapsed, mem))
+                sys.stdout.flush()
+            except Exception as err:
+                if 'skiptest' in err.__class__.__name__.lower():
+                    sys.stdout.write('SKIP (%s)\n' % str(err))
+                    sys.stdout.flush()
+                else:
+                    raise
+    elapsed = int(round(time.time() - t0))
+    sys.stdout.write('Total: %s tests\n• %s sec (%s sec for %s)\n• Peak memory'
+                     ' %s MB (%s)\n' % (count, elapsed, max_elapsed,
+                                        elapsed_name, peak_mem, peak_name))
+
+
+class ArgvSetter(object):
+    """Temporarily set sys.argv"""
+    def __init__(self, args=(), disable_stdout=True, disable_stderr=True):
+        self.argv = list(('python',) + args)
+        self.stdout = StringIO() if disable_stdout else sys.stdout
+        self.stderr = StringIO() if disable_stderr else sys.stderr
+
+    def __enter__(self):
+        self.orig_argv = sys.argv
+        sys.argv = self.argv
+        self.orig_stdout = sys.stdout
+        sys.stdout = self.stdout
+        self.orig_stderr = sys.stderr
+        sys.stderr = self.stderr
+        return self
+
+    def __exit__(self, *args):
+        sys.argv = self.orig_argv
+        sys.stdout = self.orig_stdout
+        sys.stderr = self.orig_stderr
+
+
+def md5sum(fname, block_size=1048576):  # 2 ** 20
+    """Calculate the md5sum for a file
+
+    Parameters
+    ----------
+    fname : str
+        Filename.
+    block_size : int
+        Block size to use when reading.
+
+    Returns
+    -------
+    hash_ : str
+        The hexidecimal digest of the hash.
+    """
+    md5 = hashlib.md5()
+    with open(fname, 'rb') as fid:
+        while True:
+            data = fid.read(block_size)
+            if not data:
+                break
+            md5.update(data)
+    return md5.hexdigest()
+
+
+def _sphere_to_cartesian(theta, phi, r):
+    """Transform spherical coordinates to cartesian"""
+    z = r * np.sin(phi)
+    rcos_phi = r * np.cos(phi)
+    x = rcos_phi * np.cos(theta)
+    y = rcos_phi * np.sin(theta)
+    return x, y, z
+
+
+def create_slices(start, stop, step=None, length=1):
+    """ Generate slices of time indexes
+
+    Parameters
+    ----------
+    start : int
+        Index where first slice should start.
+    stop : int
+        Index where last slice should maximally end.
+    length : int
+        Number of time sample included in a given slice.
+    step: int | None
+        Number of time samples separating two slices.
+        If step = None, step = length.
+
+    Returns
+    -------
+    slices : list
+        List of slice objects.
+    """
+
+    # default parameters
+    if step is None:
+        step = length
+
+    # slicing
+    slices = [slice(t, t + length, 1) for t in
+              range(start, stop - length + 1, step)]
+    return slices
+
+
+def _time_mask(times, tmin=None, tmax=None, strict=False):
+    """Helper to safely find sample boundaries"""
+    tmin = -np.inf if tmin is None else tmin
+    tmax = np.inf if tmax is None else tmax
+    mask = (times >= tmin)
+    mask &= (times <= tmax)
+    if not strict:
+        mask |= isclose(times, tmin)
+        mask |= isclose(times, tmax)
+    return mask
+
+
+def _get_fast_dot():
+    """"Helper to get fast dot"""
+    try:
+        from sklearn.utils.extmath import fast_dot
+    except ImportError:
+        fast_dot = np.dot
+    return fast_dot
+
+
+def random_permutation(n_samples, random_state=None):
+    """Helper to emulate the randperm matlab function.
+
+    It returns a vector containing a random permutation of the
+    integers between 0 and n_samples-1. It returns the same random numbers
+    than randperm matlab function whenever the random_state is the same
+    as the matlab's random seed.
+
+    This function is useful for comparing against matlab scripts
+    which use the randperm function.
+
+    Note: the randperm(n_samples) matlab function generates a random
+    sequence between 1 and n_samples, whereas
+    random_permutation(n_samples, random_state) function generates
+    a random sequence between 0 and n_samples-1, that is:
+    randperm(n_samples) = random_permutation(n_samples, random_state) - 1
+
+    Parameters
+    ----------
+    n_samples : int
+        End point of the sequence to be permuted (excluded, i.e., the end point
+        is equal to n_samples-1)
+    random_state : int | None
+        Random seed for initializing the pseudo-random number generator.
+
+    Returns
+    -------
+    randperm : ndarray, int
+        Randomly permuted sequence between 0 and n-1.
+    """
+    rng = check_random_state(random_state)
+    idx = rng.rand(n_samples)
+
+    randperm = np.argsort(idx)
+
+    return randperm
+
+
+def compute_corr(x, y):
+    """Compute pearson correlations between a vector and a matrix"""
+    if len(x) == 0 or len(y) == 0:
+        raise ValueError('x or y has zero length')
+    fast_dot = _get_fast_dot()
+    X = np.array(x, float)
+    Y = np.array(y, float)
+    X -= X.mean(0)
+    Y -= Y.mean(0)
+    x_sd = X.std(0, ddof=1)
+    # if covariance matrix is fully expanded, Y needs a
+    # transpose / broadcasting else Y is correct
+    y_sd = Y.std(0, ddof=1)[:, None if X.shape == Y.shape else Ellipsis]
+    return (fast_dot(X.T, Y) / float(len(X) - 1)) / (x_sd * y_sd)
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/viz/_3d.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/viz/_3d.py
new file mode 100644
index 0000000..044bbe9
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/viz/_3d.py
@@ -0,0 +1,925 @@
+"""Functions to make 3D plots with M/EEG data
+"""
+from __future__ import print_function
+
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Denis Engemann <denis.engemann at gmail.com>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#          Eric Larson <larson.eric.d at gmail.com>
+#          Mainak Jas <mainak at neuro.hut.fi>
+#          Mark Wronkiewicz <wronk.mark at gmail.com>
+#
+# License: Simplified BSD
+
+from ..externals.six import string_types, advance_iterator
+
+import os.path as op
+import inspect
+import warnings
+from itertools import cycle
+import base64
+
+import numpy as np
+from scipy import linalg
+
+from ..io.pick import pick_types
+from ..io.constants import FIFF
+from ..surface import (get_head_surf, get_meg_helmet_surf, read_surface,
+                       transform_surface_to)
+from ..transforms import (read_trans, _find_trans, apply_trans,
+                          combine_transforms, _get_mri_head_t, _ensure_trans,
+                          invert_transform)
+from ..utils import get_subjects_dir, logger, _check_subject, verbose
+from ..defaults import _handle_default
+from .utils import mne_analyze_colormap, _prepare_trellis, COLORS
+from ..externals.six import BytesIO
+
+
+def plot_evoked_field(evoked, surf_maps, time=None, time_label='t = %0.0f ms',
+                      n_jobs=1):
+    """Plot MEG/EEG fields on head surface and helmet in 3D
+
+    Parameters
+    ----------
+    evoked : instance of mne.Evoked
+        The evoked object.
+    surf_maps : list
+        The surface mapping information obtained with make_field_map.
+    time : float | None
+        The time point at which the field map shall be displayed. If None,
+        the average peak latency (across sensor types) is used.
+    time_label : str
+        How to print info about the time instant visualized.
+    n_jobs : int
+        Number of jobs to run in parallel.
+
+    Returns
+    -------
+    fig : instance of mlab.Figure
+        The mayavi figure.
+    """
+    types = [t for t in ['eeg', 'grad', 'mag'] if t in evoked]
+
+    time_idx = None
+    if time is None:
+        time = np.mean([evoked.get_peak(ch_type=t)[1] for t in types])
+
+    if not evoked.times[0] <= time <= evoked.times[-1]:
+        raise ValueError('`time` (%0.3f) must be inside `evoked.times`' % time)
+    time_idx = np.argmin(np.abs(evoked.times - time))
+
+    types = [sm['kind'] for sm in surf_maps]
+
+    # Plot them
+    from mayavi import mlab
+    alphas = [1.0, 0.5]
+    colors = [(0.6, 0.6, 0.6), (1.0, 1.0, 1.0)]
+    colormap = mne_analyze_colormap(format='mayavi')
+    colormap_lines = np.concatenate([np.tile([0., 0., 255., 255.], (127, 1)),
+                                     np.tile([0., 0., 0., 255.], (2, 1)),
+                                     np.tile([255., 0., 0., 255.], (127, 1))])
+
+    fig = mlab.figure(bgcolor=(0.0, 0.0, 0.0), size=(600, 600))
+
+    for ii, this_map in enumerate(surf_maps):
+        surf = this_map['surf']
+        map_data = this_map['data']
+        map_type = this_map['kind']
+        map_ch_names = this_map['ch_names']
+
+        if map_type == 'eeg':
+            pick = pick_types(evoked.info, meg=False, eeg=True)
+        else:
+            pick = pick_types(evoked.info, meg=True, eeg=False, ref_meg=False)
+
+        ch_names = [evoked.ch_names[k] for k in pick]
+
+        set_ch_names = set(ch_names)
+        set_map_ch_names = set(map_ch_names)
+        if set_ch_names != set_map_ch_names:
+            message = ['Channels in map and data do not match.']
+            diff = set_map_ch_names - set_ch_names
+            if len(diff):
+                message += ['%s not in data file. ' % list(diff)]
+            diff = set_ch_names - set_map_ch_names
+            if len(diff):
+                message += ['%s not in map file.' % list(diff)]
+            raise RuntimeError(' '.join(message))
+
+        data = np.dot(map_data, evoked.data[pick, time_idx])
+
+        x, y, z = surf['rr'].T
+        nn = surf['nn']
+        # make absolutely sure these are normalized for Mayavi
+        nn = nn / np.sum(nn * nn, axis=1)[:, np.newaxis]
+
+        # Make a solid surface
+        vlim = np.max(np.abs(data))
+        alpha = alphas[ii]
+        with warnings.catch_warnings(record=True):  # traits
+            mesh = mlab.pipeline.triangular_mesh_source(x, y, z, surf['tris'])
+        mesh.data.point_data.normals = nn
+        mesh.data.cell_data.normals = None
+        mlab.pipeline.surface(mesh, color=colors[ii], opacity=alpha)
+
+        # Now show our field pattern
+        with warnings.catch_warnings(record=True):  # traits
+            mesh = mlab.pipeline.triangular_mesh_source(x, y, z, surf['tris'],
+                                                        scalars=data)
+        mesh.data.point_data.normals = nn
+        mesh.data.cell_data.normals = None
+        with warnings.catch_warnings(record=True):  # traits
+            fsurf = mlab.pipeline.surface(mesh, vmin=-vlim, vmax=vlim)
+        fsurf.module_manager.scalar_lut_manager.lut.table = colormap
+
+        # And the field lines on top
+        with warnings.catch_warnings(record=True):  # traits
+            mesh = mlab.pipeline.triangular_mesh_source(x, y, z, surf['tris'],
+                                                        scalars=data)
+        mesh.data.point_data.normals = nn
+        mesh.data.cell_data.normals = None
+        with warnings.catch_warnings(record=True):  # traits
+            cont = mlab.pipeline.contour_surface(mesh, contours=21,
+                                                 line_width=1.0,
+                                                 vmin=-vlim, vmax=vlim,
+                                                 opacity=alpha)
+        cont.module_manager.scalar_lut_manager.lut.table = colormap_lines
+
+    if '%' in time_label:
+        time_label %= (1e3 * evoked.times[time_idx])
+    mlab.text(0.01, 0.01, time_label, width=0.4)
+    mlab.view(10, 60)
+    return fig
+
+
+def _plot_mri_contours(mri_fname, surf_fnames, orientation='coronal',
+                       slices=None, show=True, img_output=False):
+    """Plot BEM contours on anatomical slices.
+
+    Parameters
+    ----------
+    mri_fname : str
+        The name of the file containing anatomical data.
+    surf_fnames : list of str
+        The filenames for the BEM surfaces in the format
+        ['inner_skull.surf', 'outer_skull.surf', 'outer_skin.surf'].
+    orientation : str
+        'coronal' or 'transverse' or 'sagittal'
+    slices : list of int
+        Slice indices.
+    show : bool
+        Call pyplot.show() at the end.
+    img_output : None | tuple
+        If tuple (width and height), images will be produced instead of a
+        single figure with many axes. This mode is designed to reduce the
+        (substantial) overhead associated with making tens to hundreds
+        of matplotlib axes, instead opting to re-use a single Axes instance.
+
+    Returns
+    -------
+    fig : Instance of matplotlib.figure.Figure | list
+        The figure. Will instead be a list of png images if
+        img_output is a tuple.
+    """
+    import matplotlib.pyplot as plt
+    import nibabel as nib
+
+    if orientation not in ['coronal', 'axial', 'sagittal']:
+        raise ValueError("Orientation must be 'coronal', 'axial' or "
+                         "'sagittal'. Got %s." % orientation)
+
+    # Load the T1 data
+    nim = nib.load(mri_fname)
+    data = nim.get_data()
+    affine = nim.get_affine()
+
+    n_sag, n_axi, n_cor = data.shape
+    orientation_name2axis = dict(sagittal=0, axial=1, coronal=2)
+    orientation_axis = orientation_name2axis[orientation]
+
+    if slices is None:
+        n_slices = data.shape[orientation_axis]
+        slices = np.linspace(0, n_slices, 12, endpoint=False).astype(np.int)
+
+    # create of list of surfaces
+    surfs = list()
+
+    trans = linalg.inv(affine)
+    # XXX : next line is a hack don't ask why
+    trans[:3, -1] = [n_sag // 2, n_axi // 2, n_cor // 2]
+
+    for surf_fname in surf_fnames:
+        surf = dict()
+        surf['rr'], surf['tris'] = read_surface(surf_fname)
+        # move back surface to MRI coordinate system
+        surf['rr'] = nib.affines.apply_affine(trans, surf['rr'])
+        surfs.append(surf)
+
+    if img_output is None:
+        fig, axs = _prepare_trellis(len(slices), 4)
+    else:
+        fig, ax = plt.subplots(1, 1, figsize=(7.0, 7.0))
+        axs = [ax] * len(slices)
+
+        fig_size = fig.get_size_inches()
+        w, h = img_output[0], img_output[1]
+        w2 = fig_size[0]
+        fig.set_size_inches([(w2 / float(w)) * w, (w2 / float(w)) * h])
+        plt.close(fig)
+
+    inds = dict(coronal=[0, 1, 2], axial=[2, 0, 1],
+                sagittal=[2, 1, 0])[orientation]
+    outs = []
+    for ax, sl in zip(axs, slices):
+        # adjust the orientations for good view
+        if orientation == 'coronal':
+            dat = data[:, :, sl].transpose()
+        elif orientation == 'axial':
+            dat = data[:, sl, :]
+        elif orientation == 'sagittal':
+            dat = data[sl, :, :]
+
+        # First plot the anatomical data
+        if img_output is not None:
+            ax.clear()
+        ax.imshow(dat, cmap=plt.cm.gray)
+        ax.axis('off')
+
+        # and then plot the contours on top
+        for surf in surfs:
+            ax.tricontour(surf['rr'][:, inds[0]], surf['rr'][:, inds[1]],
+                          surf['tris'], surf['rr'][:, inds[2]],
+                          levels=[sl], colors='yellow', linewidths=2.0)
+        if img_output is not None:
+            ax.set_xticks([])
+            ax.set_yticks([])
+            ax.set_xlim(0, img_output[1])
+            ax.set_ylim(img_output[0], 0)
+            output = BytesIO()
+            fig.savefig(output, bbox_inches='tight',
+                        pad_inches=0, format='png')
+            outs.append(base64.b64encode(output.getvalue()).decode('ascii'))
+    if show:
+        plt.subplots_adjust(left=0., bottom=0., right=1., top=1., wspace=0.,
+                            hspace=0.)
+        plt.show()
+
+    return fig if img_output is None else outs
+
+
+ at verbose
+def plot_trans(info, trans='auto', subject=None, subjects_dir=None,
+               ch_type=None, source=('bem', 'head'), coord_frame='head',
+               meg_sensors=False, dig=False, verbose=None):
+    """Plot MEG/EEG head surface and helmet in 3D.
+
+    Parameters
+    ----------
+    info : dict
+        The measurement info.
+    trans : str | 'auto' | dict
+        The full path to the head<->MRI transform ``*-trans.fif`` file
+        produced during coregistration.
+    subject : str | None
+        The subject name corresponding to FreeSurfer environment
+        variable SUBJECT.
+    subjects_dir : str
+        The path to the freesurfer subjects reconstructions.
+        It corresponds to Freesurfer environment variable SUBJECTS_DIR.
+    ch_type : None | 'eeg' | 'meg'
+        If None, both the MEG helmet and EEG electrodes will be shown.
+        If 'meg', only the MEG helmet will be shown. If 'eeg', only the
+        EEG electrodes will be shown.
+    source : str
+        Type to load. Common choices would be `'bem'` or `'head'`. We first
+        try loading `'$SUBJECTS_DIR/$SUBJECT/bem/$SUBJECT-$SOURCE.fif'`, and
+        then look for `'$SUBJECT*$SOURCE.fif'` in the same directory. Defaults
+        to 'bem'. Note. For single layer bems it is recommended to use 'head'.
+    coord_frame : str
+        Coordinate frame to use, 'head', 'meg', or 'mri'.
+    meg_sensors : bool
+        If True, plot MEG sensors as points in addition to showing the helmet.
+    dig : bool
+        If True, plot the digitization points.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    fig : instance of mlab.Figure
+        The mayavi figure.
+    """
+    if coord_frame not in ['head', 'meg', 'mri']:
+        raise ValueError('coord_frame must be "head" or "meg"')
+    if ch_type not in [None, 'eeg', 'meg']:
+        raise ValueError('Argument ch_type must be None | eeg | meg. Got %s.'
+                         % ch_type)
+
+    if isinstance(trans, string_types):
+        if trans == 'auto':
+            # let's try to do this in MRI coordinates so they're easy to plot
+            trans = _find_trans(subject, subjects_dir)
+        trans = read_trans(trans)
+    elif not isinstance(trans, dict):
+        raise TypeError('trans must be str or dict')
+    head_mri_t = _ensure_trans(trans, 'head', 'mri')
+    del trans
+
+    # both the head and helmet will be in MRI coordinates after this
+    surfs = [get_head_surf(subject, source=source, subjects_dir=subjects_dir)]
+    if ch_type is None or ch_type == 'meg':
+        surfs.append(get_meg_helmet_surf(info, head_mri_t))
+    if coord_frame == 'meg':
+        surf_trans = combine_transforms(info['dev_head_t'], head_mri_t,
+                                        'meg', 'mri')
+    elif coord_frame == 'head':
+        surf_trans = head_mri_t
+    else:  # coord_frame == 'mri'
+        surf_trans = None
+    surfs = [transform_surface_to(surf, coord_frame, surf_trans)
+             for surf in surfs]
+    del surf_trans
+
+    # determine points
+    meg_loc = list()
+    ext_loc = list()
+    car_loc = list()
+    if ch_type is None or ch_type == 'eeg':
+        eeg_loc = np.array([info['chs'][k]['loc'][:3]
+                           for k in pick_types(info, meg=False, eeg=True)])
+        if len(eeg_loc) > 0:
+            # Transform EEG electrodes from head coordinates if necessary
+            if coord_frame == 'meg':
+                eeg_loc = apply_trans(invert_transform(info['dev_head_t']),
+                                      eeg_loc)
+            elif coord_frame == 'mri':
+                eeg_loc = apply_trans(invert_transform(head_mri_t), eeg_loc)
+        else:
+            # only warn if EEG explicitly requested, or EEG channels exist but
+            # no locations are provided
+            if (ch_type is not None or
+                    len(pick_types(info, meg=False, eeg=True)) > 0):
+                warnings.warn('EEG electrode locations not found. '
+                              'Cannot plot EEG electrodes.')
+    if meg_sensors:
+        meg_loc = np.array([info['chs'][k]['loc'][:3]
+                           for k in pick_types(info)])
+        if len(meg_loc) > 0:
+            # Transform MEG coordinates from meg if necessary
+            if coord_frame == 'head':
+                meg_loc = apply_trans(info['dev_head_t'], meg_loc)
+            elif coord_frame == 'mri':
+                t = combine_transforms(info['dev_head_t'], head_mri_t,
+                                       'meg', 'mri')
+                meg_loc = apply_trans(t, meg_loc)
+        else:
+            warnings.warn('MEG electrodes not found. '
+                          'Cannot plot MEG locations.')
+    if dig:
+        ext_loc = np.array([d['r'] for d in info['dig']
+                           if d['kind'] == FIFF.FIFFV_POINT_EXTRA])
+        car_loc = np.array([d['r'] for d in info['dig']
+                            if d['kind'] == FIFF.FIFFV_POINT_CARDINAL])
+        if coord_frame == 'meg':
+            t = invert_transform(info['dev_head_t'])
+            ext_loc = apply_trans(t, ext_loc)
+            car_loc = apply_trans(t, car_loc)
+        elif coord_frame == 'mri':
+            ext_loc = apply_trans(head_mri_t, ext_loc)
+            car_loc = apply_trans(head_mri_t, car_loc)
+        if len(car_loc) == len(ext_loc) == 0:
+            warnings.warn('Digitization points not found. '
+                          'Cannot plot digitization.')
+
+    # do the plotting, surfaces then points
+    from mayavi import mlab
+    fig = mlab.figure(bgcolor=(0.0, 0.0, 0.0), size=(600, 600))
+
+    alphas = [1.0, 0.5]  # head, helmet
+    colors = [(0.6, 0.6, 0.6), (0.0, 0.0, 0.6)]
+    for surf, alpha, color in zip(surfs, alphas, colors):
+        x, y, z = surf['rr'].T
+        nn = surf['nn']
+        # make absolutely sure these are normalized for Mayavi
+        nn = nn / np.sum(nn * nn, axis=1)[:, np.newaxis]
+
+        # Make a solid surface
+        with warnings.catch_warnings(record=True):  # traits
+            mesh = mlab.pipeline.triangular_mesh_source(x, y, z, surf['tris'])
+        mesh.data.point_data.normals = nn
+        mesh.data.cell_data.normals = None
+        mlab.pipeline.surface(mesh, color=color, opacity=alpha)
+
+    datas = (eeg_loc, meg_loc, car_loc, ext_loc)
+    colors = ((1., 0., 0.), (0., 0.25, 0.5), (1., 1., 0.), (1., 0.5, 0.))
+    alphas = (1.0, 0.25, 0.5, 0.25)
+    scales = (0.005, 0.0025, 0.015, 0.0075)
+    for data, color, alpha, scale in zip(datas, colors, alphas, scales):
+        if len(data) > 0:
+            with warnings.catch_warnings(record=True):  # traits
+                mlab.points3d(data[:, 0], data[:, 1], data[:, 2],
+                              color=color, scale_factor=scale, opacity=alpha)
+    mlab.view(90, 90)
+    return fig
+
+
+def _limits_to_control_points(clim, stc_data, colormap):
+    """Private helper function to convert limits (values or percentiles)
+    to control points.
+
+    Note: If using 'mne', generate cmap control points for a directly
+    mirrored cmap for simplicity (i.e., no normalization is computed to account
+    for a 2-tailed mne cmap).
+
+    Parameters
+    ----------
+    clim : str | dict
+        Desired limits use to set cmap control points.
+
+    Returns
+    -------
+    ctrl_pts : list (length 3)
+        Array of floats corresponding to values to use as cmap control points.
+    colormap : str
+        The colormap.
+    """
+
+    # Based on type of limits specified, get cmap control points
+    if colormap == 'auto':
+        if clim == 'auto':
+            colormap = 'mne' if (stc_data < 0).any() else 'hot'
+        else:
+            if 'lims' in clim:
+                colormap = 'hot'
+            else:  # 'pos_lims' in clim
+                colormap = 'mne'
+    if clim == 'auto':
+        # Set upper and lower bound based on percent, and get average between
+        ctrl_pts = np.percentile(np.abs(stc_data), [96, 97.5, 99.95])
+    elif isinstance(clim, dict):
+        # Get appropriate key for clim if it's a dict
+        limit_key = ['lims', 'pos_lims'][colormap in ('mne', 'mne_analyze')]
+        if colormap != 'auto' and limit_key not in clim.keys():
+            raise KeyError('"pos_lims" must be used with "mne" colormap')
+        clim['kind'] = clim.get('kind', 'percent')
+        if clim['kind'] == 'percent':
+            ctrl_pts = np.percentile(np.abs(stc_data),
+                                     list(np.abs(clim[limit_key])))
+        elif clim['kind'] == 'value':
+            ctrl_pts = np.array(clim[limit_key])
+            if (np.diff(ctrl_pts) < 0).any():
+                raise ValueError('value colormap limits must be strictly '
+                                 'nondecreasing')
+        else:
+            raise ValueError('If clim is a dict, clim[kind] must be '
+                             ' "value" or "percent"')
+    else:
+        raise ValueError('"clim" must be "auto" or dict')
+    if len(ctrl_pts) != 3:
+        raise ValueError('"lims" or "pos_lims" is length %i. It must be length'
+                         ' 3' % len(ctrl_pts))
+    ctrl_pts = np.array(ctrl_pts, float)
+    if len(set(ctrl_pts)) != 3:
+        if len(set(ctrl_pts)) == 1:  # three points match
+            if ctrl_pts[0] == 0:  # all are zero
+                warnings.warn('All data were zero')
+                ctrl_pts = np.arange(3, dtype=float)
+            else:
+                ctrl_pts *= [0., 0.5, 1]  # all nonzero pts == max
+        else:  # two points match
+            # if points one and two are identical, add a tiny bit to the
+            # control point two; if points two and three are identical,
+            # subtract a tiny bit from point two.
+            bump = 1e-5 if ctrl_pts[0] == ctrl_pts[1] else -1e-5
+            ctrl_pts[1] = ctrl_pts[0] + bump * (ctrl_pts[2] - ctrl_pts[0])
+
+    return ctrl_pts, colormap
+
+
+def plot_source_estimates(stc, subject=None, surface='inflated', hemi='lh',
+                          colormap='auto', time_label='time=%0.2f ms',
+                          smoothing_steps=10, transparent=None, alpha=1.0,
+                          time_viewer=False, config_opts=None,
+                          subjects_dir=None, figure=None, views='lat',
+                          colorbar=True, clim='auto'):
+    """Plot SourceEstimates with PySurfer
+
+    Note: PySurfer currently needs the SUBJECTS_DIR environment variable,
+    which will automatically be set by this function. Plotting multiple
+    SourceEstimates with different values for subjects_dir will cause
+    PySurfer to use the wrong FreeSurfer surfaces when using methods of
+    the returned Brain object. It is therefore recommended to set the
+    SUBJECTS_DIR environment variable or always use the same value for
+    subjects_dir (within the same Python session).
+
+    Parameters
+    ----------
+    stc : SourceEstimates
+        The source estimates to plot.
+    subject : str | None
+        The subject name corresponding to FreeSurfer environment
+        variable SUBJECT. If None stc.subject will be used. If that
+        is None, the environment will be used.
+    surface : str
+        The type of surface (inflated, white etc.).
+    hemi : str, 'lh' | 'rh' | 'split' | 'both'
+        The hemisphere to display.
+    colormap : str | np.ndarray of float, shape(n_colors, 3 | 4)
+        Name of colormap to use or a custom look up table. If array, must
+        be (n x 3) or (n x 4) array for with RGB or RGBA values between
+        0 and 255. If 'auto', either 'hot' or 'mne' will be chosen
+        based on whether 'lims' or 'pos_lims' are specified in `clim`.
+    time_label : str
+        How to print info about the time instant visualized.
+    smoothing_steps : int
+        The amount of smoothing
+    transparent : bool | None
+        If True, use a linear transparency between fmin and fmid.
+        None will choose automatically based on colormap type.
+    alpha : float
+        Alpha value to apply globally to the overlay.
+    time_viewer : bool
+        Display time viewer GUI.
+    config_opts : dict
+        Keyword arguments for Brain initialization.
+        See pysurfer.viz.Brain.
+    subjects_dir : str
+        The path to the freesurfer subjects reconstructions.
+        It corresponds to Freesurfer environment variable SUBJECTS_DIR.
+    figure : instance of mayavi.core.scene.Scene | list | int | None
+        If None, a new figure will be created. If multiple views or a
+        split view is requested, this must be a list of the appropriate
+        length. If int is provided it will be used to identify the Mayavi
+        figure by it's id or create a new figure with the given id.
+    views : str | list
+        View to use. See surfer.Brain().
+    colorbar : bool
+        If True, display colorbar on scene.
+    clim : str | dict
+        Colorbar properties specification. If 'auto', set clim automatically
+        based on data percentiles. If dict, should contain:
+
+            ``kind`` : str
+                Flag to specify type of limits. 'value' or 'percent'.
+            ``lims`` : list | np.ndarray | tuple of float, 3 elements
+                Note: Only use this if 'colormap' is not 'mne'.
+                Left, middle, and right bound for colormap.
+            ``pos_lims`` : list | np.ndarray | tuple of float, 3 elements
+                Note: Only use this if 'colormap' is 'mne'.
+                Left, middle, and right bound for colormap. Positive values
+                will be mirrored directly across zero during colormap
+                construction to obtain negative control points.
+
+
+    Returns
+    -------
+    brain : Brain
+        A instance of surfer.viz.Brain from PySurfer.
+    """
+    from surfer import Brain, TimeViewer
+    config_opts = _handle_default('config_opts', config_opts)
+
+    import mayavi
+    from mayavi import mlab
+
+    # import here to avoid circular import problem
+    from ..source_estimate import SourceEstimate
+
+    if not isinstance(stc, SourceEstimate):
+        raise ValueError('stc has to be a surface source estimate')
+
+    if hemi not in ['lh', 'rh', 'split', 'both']:
+        raise ValueError('hemi has to be either "lh", "rh", "split", '
+                         'or "both"')
+
+    n_split = 2 if hemi == 'split' else 1
+    n_views = 1 if isinstance(views, string_types) else len(views)
+    if figure is not None:
+        # use figure with specified id or create new figure
+        if isinstance(figure, int):
+            figure = mlab.figure(figure, size=(600, 600))
+        # make sure it is of the correct type
+        if not isinstance(figure, list):
+            figure = [figure]
+        if not all(isinstance(f, mayavi.core.scene.Scene) for f in figure):
+            raise TypeError('figure must be a mayavi scene or list of scenes')
+        # make sure we have the right number of figures
+        n_fig = len(figure)
+        if not n_fig == n_split * n_views:
+            raise RuntimeError('`figure` must be a list with the same '
+                               'number of elements as PySurfer plots that '
+                               'will be created (%s)' % n_split * n_views)
+
+    # convert control points to locations in colormap
+    ctrl_pts, colormap = _limits_to_control_points(clim, stc.data, colormap)
+
+    # Construct cmap manually if 'mne' and get cmap bounds
+    # and triage transparent argument
+    if colormap in ('mne', 'mne_analyze'):
+        colormap = mne_analyze_colormap(ctrl_pts)
+        scale_pts = [-1 * ctrl_pts[-1], 0, ctrl_pts[-1]]
+        transparent = False if transparent is None else transparent
+    else:
+        scale_pts = ctrl_pts
+        transparent = True if transparent is None else transparent
+
+    subjects_dir = get_subjects_dir(subjects_dir=subjects_dir,
+                                    raise_error=True)
+    subject = _check_subject(stc.subject, subject, True)
+    if hemi in ['both', 'split']:
+        hemis = ['lh', 'rh']
+    else:
+        hemis = [hemi]
+
+    title = subject if len(hemis) > 1 else '%s - %s' % (subject, hemis[0])
+    args = inspect.getargspec(Brain.__init__)[0]
+    kwargs = dict(title=title, figure=figure, config_opts=config_opts,
+                  subjects_dir=subjects_dir)
+    if 'views' in args:
+        kwargs['views'] = views
+    with warnings.catch_warnings(record=True):  # traits warnings
+        brain = Brain(subject, hemi, surface, **kwargs)
+    for hemi in hemis:
+        hemi_idx = 0 if hemi == 'lh' else 1
+        if hemi_idx == 0:
+            data = stc.data[:len(stc.vertices[0])]
+        else:
+            data = stc.data[len(stc.vertices[0]):]
+        vertices = stc.vertices[hemi_idx]
+        time = 1e3 * stc.times
+        with warnings.catch_warnings(record=True):  # traits warnings
+            brain.add_data(data, colormap=colormap, vertices=vertices,
+                           smoothing_steps=smoothing_steps, time=time,
+                           time_label=time_label, alpha=alpha, hemi=hemi,
+                           colorbar=colorbar)
+
+        # scale colormap and set time (index) to display
+        brain.scale_data_colormap(fmin=scale_pts[0], fmid=scale_pts[1],
+                                  fmax=scale_pts[2], transparent=transparent)
+
+    if time_viewer:
+        TimeViewer(brain)
+    return brain
+
+
+def plot_sparse_source_estimates(src, stcs, colors=None, linewidth=2,
+                                 fontsize=18, bgcolor=(.05, 0, .1),
+                                 opacity=0.2, brain_color=(0.7,) * 3,
+                                 show=True, high_resolution=False,
+                                 fig_name=None, fig_number=None, labels=None,
+                                 modes=('cone', 'sphere'),
+                                 scale_factors=(1, 0.6),
+                                 verbose=None, **kwargs):
+    """Plot source estimates obtained with sparse solver
+
+    Active dipoles are represented in a "Glass" brain.
+    If the same source is active in multiple source estimates it is
+    displayed with a sphere otherwise with a cone in 3D.
+
+    Parameters
+    ----------
+    src : dict
+        The source space.
+    stcs : instance of SourceEstimate or list of instances of SourceEstimate
+        The source estimates (up to 3).
+    colors : list
+        List of colors
+    linewidth : int
+        Line width in 2D plot.
+    fontsize : int
+        Font size.
+    bgcolor : tuple of length 3
+        Background color in 3D.
+    opacity : float in [0, 1]
+        Opacity of brain mesh.
+    brain_color : tuple of length 3
+        Brain color.
+    show : bool
+        Show figures if True.
+    high_resolution : bool
+        If True, plot on the original (non-downsampled) cortical mesh.
+    fig_name :
+        Mayavi figure name.
+    fig_number :
+        Matplotlib figure number.
+    labels : ndarray or list of ndarrays
+        Labels to show sources in clusters. Sources with the same
+        label and the waveforms within each cluster are presented in
+        the same color. labels should be a list of ndarrays when
+        stcs is a list ie. one label for each stc.
+    modes : list
+        Should be a list, with each entry being ``'cone'`` or ``'sphere'``
+        to specify how the dipoles should be shown.
+    scale_factors : list
+        List of floating point scale factors for the markers.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+    **kwargs : kwargs
+        Keyword arguments to pass to mlab.triangular_mesh.
+    """
+    known_modes = ['cone', 'sphere']
+    if not isinstance(modes, (list, tuple)) or \
+            not all(mode in known_modes for mode in modes):
+        raise ValueError('mode must be a list containing only '
+                         '"cone" or "sphere"')
+    if not isinstance(stcs, list):
+        stcs = [stcs]
+    if labels is not None and not isinstance(labels, list):
+        labels = [labels]
+
+    if colors is None:
+        colors = COLORS
+
+    linestyles = ['-', '--', ':']
+
+    # Show 3D
+    lh_points = src[0]['rr']
+    rh_points = src[1]['rr']
+    points = np.r_[lh_points, rh_points]
+
+    lh_normals = src[0]['nn']
+    rh_normals = src[1]['nn']
+    normals = np.r_[lh_normals, rh_normals]
+
+    if high_resolution:
+        use_lh_faces = src[0]['tris']
+        use_rh_faces = src[1]['tris']
+    else:
+        use_lh_faces = src[0]['use_tris']
+        use_rh_faces = src[1]['use_tris']
+
+    use_faces = np.r_[use_lh_faces, lh_points.shape[0] + use_rh_faces]
+
+    points *= 170
+
+    vertnos = [np.r_[stc.lh_vertno, lh_points.shape[0] + stc.rh_vertno]
+               for stc in stcs]
+    unique_vertnos = np.unique(np.concatenate(vertnos).ravel())
+
+    from mayavi import mlab
+    from matplotlib.colors import ColorConverter
+    color_converter = ColorConverter()
+
+    f = mlab.figure(figure=fig_name, bgcolor=bgcolor, size=(600, 600))
+    mlab.clf()
+    if mlab.options.backend != 'test':
+        f.scene.disable_render = True
+    with warnings.catch_warnings(record=True):  # traits warnings
+        surface = mlab.triangular_mesh(points[:, 0], points[:, 1],
+                                       points[:, 2], use_faces,
+                                       color=brain_color,
+                                       opacity=opacity, **kwargs)
+
+    import matplotlib.pyplot as plt
+    # Show time courses
+    plt.figure(fig_number)
+    plt.clf()
+
+    colors = cycle(colors)
+
+    logger.info("Total number of active sources: %d" % len(unique_vertnos))
+
+    if labels is not None:
+        colors = [advance_iterator(colors) for _ in
+                  range(np.unique(np.concatenate(labels).ravel()).size)]
+
+    for idx, v in enumerate(unique_vertnos):
+        # get indices of stcs it belongs to
+        ind = [k for k, vertno in enumerate(vertnos) if v in vertno]
+        is_common = len(ind) > 1
+
+        if labels is None:
+            c = advance_iterator(colors)
+        else:
+            # if vertex is in different stcs than take label from first one
+            c = colors[labels[ind[0]][vertnos[ind[0]] == v]]
+
+        mode = modes[1] if is_common else modes[0]
+        scale_factor = scale_factors[1] if is_common else scale_factors[0]
+
+        if (isinstance(scale_factor, (np.ndarray, list, tuple)) and
+                len(unique_vertnos) == len(scale_factor)):
+            scale_factor = scale_factor[idx]
+
+        x, y, z = points[v]
+        nx, ny, nz = normals[v]
+        with warnings.catch_warnings(record=True):  # traits
+            mlab.quiver3d(x, y, z, nx, ny, nz, color=color_converter.to_rgb(c),
+                          mode=mode, scale_factor=scale_factor)
+
+        for k in ind:
+            vertno = vertnos[k]
+            mask = (vertno == v)
+            assert np.sum(mask) == 1
+            linestyle = linestyles[k]
+            plt.plot(1e3 * stcs[k].times, 1e9 * stcs[k].data[mask].ravel(),
+                     c=c, linewidth=linewidth, linestyle=linestyle)
+
+    plt.xlabel('Time (ms)', fontsize=18)
+    plt.ylabel('Source amplitude (nAm)', fontsize=18)
+
+    if fig_name is not None:
+        plt.title(fig_name)
+
+    if show:
+        plt.show()
+
+    surface.actor.property.backface_culling = True
+    surface.actor.property.shading = True
+
+    return surface
+
+
+def plot_dipole_locations(dipoles, trans, subject, subjects_dir=None,
+                          bgcolor=(1, 1, 1), opacity=0.3,
+                          brain_color=(0.7, 0.7, 0.7), mesh_color=(1, 1, 0),
+                          fig_name=None, fig_size=(600, 600), mode='cone',
+                          scale_factor=0.1e-1, colors=None, verbose=None):
+    """Plot dipole locations
+
+    Only the location of the first time point of each dipole is shown.
+
+    Parameters
+    ----------
+    dipoles : list of instances of Dipole | Dipole
+        The dipoles to plot.
+    trans : dict
+        The mri to head trans.
+    subject : str
+        The subject name corresponding to FreeSurfer environment
+        variable SUBJECT.
+    subjects_dir : None | str
+        The path to the freesurfer subjects reconstructions.
+        It corresponds to Freesurfer environment variable SUBJECTS_DIR.
+        The default is None.
+    bgcolor : tuple of length 3
+        Background color in 3D.
+    opacity : float in [0, 1]
+        Opacity of brain mesh.
+    brain_color : tuple of length 3
+        Brain color.
+    mesh_color : tuple of length 3
+        Mesh color.
+    fig_name : str
+        Mayavi figure name.
+    fig_size : tuple of length 2
+        Mayavi figure size.
+    mode : str
+        Should be ``'cone'`` or ``'sphere'`` to specify how the
+        dipoles should be shown.
+    scale_factor : float
+        The scaling applied to amplitudes for the plot.
+    colors: list of colors | None
+        Color to plot with each dipole. If None default colors are used.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    fig : instance of mlab.Figure
+        The mayavi figure.
+
+    Notes
+    -----
+    .. versionadded:: 0.9.0
+    """
+    from mayavi import mlab
+    from matplotlib.colors import ColorConverter
+    color_converter = ColorConverter()
+
+    trans = _get_mri_head_t(trans)[0]
+    subjects_dir = get_subjects_dir(subjects_dir=subjects_dir,
+                                    raise_error=True)
+    fname = op.join(subjects_dir, subject, 'bem', 'inner_skull.surf')
+    points, faces = read_surface(fname)
+    points = apply_trans(trans['trans'], points * 1e-3)
+
+    from .. import Dipole
+    if isinstance(dipoles, Dipole):
+        dipoles = [dipoles]
+
+    if mode not in ['cone', 'sphere']:
+        raise ValueError('mode must be in "cone" or "sphere"')
+
+    if colors is None:
+        colors = cycle(COLORS)
+
+    fig = mlab.figure(size=fig_size, bgcolor=bgcolor, fgcolor=(0, 0, 0))
+    with warnings.catch_warnings(record=True):  # FutureWarning in traits
+        mlab.triangular_mesh(points[:, 0], points[:, 1], points[:, 2],
+                             faces, color=mesh_color, opacity=opacity)
+
+    for dip, color in zip(dipoles, colors):
+        rgb_color = color_converter.to_rgb(color)
+        with warnings.catch_warnings(record=True):  # FutureWarning in traits
+            mlab.quiver3d(dip.pos[0, 0], dip.pos[0, 1], dip.pos[0, 2],
+                          dip.ori[0, 0], dip.ori[0, 1], dip.ori[0, 2],
+                          opacity=1., mode=mode, color=rgb_color,
+                          scalars=dip.amplitude.max(),
+                          scale_factor=scale_factor)
+    if fig_name is not None:
+        mlab.title(fig_name)
+    if fig.scene is not None:  # safe for Travis
+        fig.scene.x_plus_view()
+
+    return fig
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/viz/__init__.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/viz/__init__.py
new file mode 100644
index 0000000..cc3f0bf
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/viz/__init__.py
@@ -0,0 +1,24 @@
+"""Visualization routines
+"""
+
+from .topomap import (plot_evoked_topomap, plot_projs_topomap,
+                      plot_ica_components, plot_tfr_topomap, plot_topomap,
+                      plot_epochs_psd_topomap)
+from .topo import (plot_topo, plot_topo_image_epochs,
+                   iter_topography)
+from .utils import (tight_layout, mne_analyze_colormap, compare_fiff,
+                    ClickableImage, add_background_image)
+from ._3d import (plot_sparse_source_estimates, plot_source_estimates,
+                  plot_trans, plot_evoked_field, plot_dipole_locations)
+from .misc import (plot_cov, plot_bem, plot_events, plot_source_spectrogram,
+                   _get_presser, plot_dipole_amplitudes)
+from .evoked import (plot_evoked, plot_evoked_image, plot_evoked_white,
+                     plot_snr_estimate, plot_evoked_topo)
+from .circle import plot_connectivity_circle, circular_layout
+from .epochs import (plot_image_epochs, plot_drop_log, plot_epochs,
+                     _drop_log_stats, plot_epochs_psd, plot_epochs_image)
+from .raw import plot_raw, plot_raw_psd
+from .ica import plot_ica_scores, plot_ica_sources, plot_ica_overlay
+from .ica import _plot_sources_raw, _plot_sources_epochs
+from .montage import plot_montage
+from .decoding import plot_gat_matrix, plot_gat_times
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/viz/circle.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/viz/circle.py
new file mode 100644
index 0000000..7662b14
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/viz/circle.py
@@ -0,0 +1,414 @@
+"""Functions to plot on circle as for connectivity
+"""
+from __future__ import print_function
+
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Denis Engemann <denis.engemann at gmail.com>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#
+# License: Simplified BSD
+
+
+from itertools import cycle
+from functools import partial
+
+import numpy as np
+
+from ..externals.six import string_types
+from ..fixes import tril_indices, normalize_colors
+
+
+def circular_layout(node_names, node_order, start_pos=90, start_between=True,
+                    group_boundaries=None, group_sep=10):
+    """Create layout arranging nodes on a circle.
+
+    Parameters
+    ----------
+    node_names : list of str
+        Node names.
+    node_order : list of str
+        List with node names defining the order in which the nodes are
+        arranged. Must have the elements as node_names but the order can be
+        different. The nodes are arranged clockwise starting at "start_pos"
+        degrees.
+    start_pos : float
+        Angle in degrees that defines where the first node is plotted.
+    start_between : bool
+        If True, the layout starts with the position between the nodes. This is
+        the same as adding "180. / len(node_names)" to start_pos.
+    group_boundaries : None | array-like
+        List of of boundaries between groups at which point a "group_sep" will
+        be inserted. E.g. "[0, len(node_names) / 2]" will create two groups.
+    group_sep : float
+        Group separation angle in degrees. See "group_boundaries".
+
+    Returns
+    -------
+    node_angles : array, shape=(len(node_names,))
+        Node angles in degrees.
+    """
+    n_nodes = len(node_names)
+
+    if len(node_order) != n_nodes:
+        raise ValueError('node_order has to be the same length as node_names')
+
+    if group_boundaries is not None:
+        boundaries = np.array(group_boundaries, dtype=np.int)
+        if np.any(boundaries >= n_nodes) or np.any(boundaries < 0):
+            raise ValueError('"group_boundaries" has to be between 0 and '
+                             'n_nodes - 1.')
+        if len(boundaries) > 1 and np.any(np.diff(boundaries) <= 0):
+            raise ValueError('"group_boundaries" must have non-decreasing '
+                             'values.')
+        n_group_sep = len(group_boundaries)
+    else:
+        n_group_sep = 0
+        boundaries = None
+
+    # convert it to a list with indices
+    node_order = [node_order.index(name) for name in node_names]
+    node_order = np.array(node_order)
+    if len(np.unique(node_order)) != n_nodes:
+        raise ValueError('node_order has repeated entries')
+
+    node_sep = (360. - n_group_sep * group_sep) / n_nodes
+
+    if start_between:
+        start_pos += node_sep / 2
+
+        if boundaries is not None and boundaries[0] == 0:
+            # special case when a group separator is at the start
+            start_pos += group_sep / 2
+            boundaries = boundaries[1:] if n_group_sep > 1 else None
+
+    node_angles = np.ones(n_nodes, dtype=np.float) * node_sep
+    node_angles[0] = start_pos
+    if boundaries is not None:
+        node_angles[boundaries] += group_sep
+
+    node_angles = np.cumsum(node_angles)[node_order]
+
+    return node_angles
+
+
+def _plot_connectivity_circle_onpick(event, fig=None, axes=None, indices=None,
+                                     n_nodes=0, node_angles=None,
+                                     ylim=[9, 10]):
+    """Isolates connections around a single node when user left clicks a node.
+
+    On right click, resets all connections."""
+    if event.inaxes != axes:
+        return
+
+    if event.button == 1:  # left click
+        # click must be near node radius
+        if not ylim[0] <= event.ydata <= ylim[1]:
+            return
+
+        # all angles in range [0, 2*pi]
+        node_angles = node_angles % (np.pi * 2)
+        node = np.argmin(np.abs(event.xdata - node_angles))
+
+        patches = event.inaxes.patches
+        for ii, (x, y) in enumerate(zip(indices[0], indices[1])):
+            patches[ii].set_visible(node in [x, y])
+        fig.canvas.draw()
+    elif event.button == 3:  # right click
+        patches = event.inaxes.patches
+        for ii in range(np.size(indices, axis=1)):
+            patches[ii].set_visible(True)
+        fig.canvas.draw()
+
+
+def plot_connectivity_circle(con, node_names, indices=None, n_lines=None,
+                             node_angles=None, node_width=None,
+                             node_colors=None, facecolor='black',
+                             textcolor='white', node_edgecolor='black',
+                             linewidth=1.5, colormap='hot', vmin=None,
+                             vmax=None, colorbar=True, title=None,
+                             colorbar_size=0.2, colorbar_pos=(-0.3, 0.1),
+                             fontsize_title=12, fontsize_names=8,
+                             fontsize_colorbar=8, padding=6.,
+                             fig=None, subplot=111, interactive=True,
+                             node_linewidth=2., show=True):
+    """Visualize connectivity as a circular graph.
+
+    Note: This code is based on the circle graph example by Nicolas P. Rougier
+    http://www.labri.fr/perso/nrougier/coding/.
+
+    Parameters
+    ----------
+    con : array
+        Connectivity scores. Can be a square matrix, or a 1D array. If a 1D
+        array is provided, "indices" has to be used to define the connection
+        indices.
+    node_names : list of str
+        Node names. The order corresponds to the order in con.
+    indices : tuple of arrays | None
+        Two arrays with indices of connections for which the connections
+        strenghts are defined in con. Only needed if con is a 1D array.
+    n_lines : int | None
+        If not None, only the n_lines strongest connections (strength=abs(con))
+        are drawn.
+    node_angles : array, shape=(len(node_names,)) | None
+        Array with node positions in degrees. If None, the nodes are equally
+        spaced on the circle. See mne.viz.circular_layout.
+    node_width : float | None
+        Width of each node in degrees. If None, the minimum angle between any
+        two nodes is used as the width.
+    node_colors : list of tuples | list of str
+        List with the color to use for each node. If fewer colors than nodes
+        are provided, the colors will be repeated. Any color supported by
+        matplotlib can be used, e.g., RGBA tuples, named colors.
+    facecolor : str
+        Color to use for background. See matplotlib.colors.
+    textcolor : str
+        Color to use for text. See matplotlib.colors.
+    node_edgecolor : str
+        Color to use for lines around nodes. See matplotlib.colors.
+    linewidth : float
+        Line width to use for connections.
+    colormap : str
+        Colormap to use for coloring the connections.
+    vmin : float | None
+        Minimum value for colormap. If None, it is determined automatically.
+    vmax : float | None
+        Maximum value for colormap. If None, it is determined automatically.
+    colorbar : bool
+        Display a colorbar or not.
+    title : str
+        The figure title.
+    colorbar_size : float
+        Size of the colorbar.
+    colorbar_pos : 2-tuple
+        Position of the colorbar.
+    fontsize_title : int
+        Font size to use for title.
+    fontsize_names : int
+        Font size to use for node names.
+    fontsize_colorbar : int
+        Font size to use for colorbar.
+    padding : float
+        Space to add around figure to accommodate long labels.
+    fig : None | instance of matplotlib.pyplot.Figure
+        The figure to use. If None, a new figure with the specified background
+        color will be created.
+    subplot : int | 3-tuple
+        Location of the subplot when creating figures with multiple plots. E.g.
+        121 or (1, 2, 1) for 1 row, 2 columns, plot 1. See
+        matplotlib.pyplot.subplot.
+    interactive : bool
+        When enabled, left-click on a node to show only connections to that
+        node. Right-click shows all connections.
+    node_linewidth : float
+        Line with for nodes.
+    show : bool
+        Show figure if True.
+
+    Returns
+    -------
+    fig : instance of matplotlib.pyplot.Figure
+        The figure handle.
+    axes : instance of matplotlib.axes.PolarAxesSubplot
+        The subplot handle.
+    """
+    import matplotlib.pyplot as plt
+    import matplotlib.path as m_path
+    import matplotlib.patches as m_patches
+
+    n_nodes = len(node_names)
+
+    if node_angles is not None:
+        if len(node_angles) != n_nodes:
+            raise ValueError('node_angles has to be the same length '
+                             'as node_names')
+        # convert it to radians
+        node_angles = node_angles * np.pi / 180
+    else:
+        # uniform layout on unit circle
+        node_angles = np.linspace(0, 2 * np.pi, n_nodes, endpoint=False)
+
+    if node_width is None:
+        # widths correspond to the minimum angle between two nodes
+        dist_mat = node_angles[None, :] - node_angles[:, None]
+        dist_mat[np.diag_indices(n_nodes)] = 1e9
+        node_width = np.min(np.abs(dist_mat))
+    else:
+        node_width = node_width * np.pi / 180
+
+    if node_colors is not None:
+        if len(node_colors) < n_nodes:
+            node_colors = cycle(node_colors)
+    else:
+        # assign colors using colormap
+        node_colors = [plt.cm.spectral(i / float(n_nodes))
+                       for i in range(n_nodes)]
+
+    # handle 1D and 2D connectivity information
+    if con.ndim == 1:
+        if indices is None:
+            raise ValueError('indices has to be provided if con.ndim == 1')
+    elif con.ndim == 2:
+        if con.shape[0] != n_nodes or con.shape[1] != n_nodes:
+            raise ValueError('con has to be 1D or a square matrix')
+        # we use the lower-triangular part
+        indices = tril_indices(n_nodes, -1)
+        con = con[indices]
+    else:
+        raise ValueError('con has to be 1D or a square matrix')
+
+    # get the colormap
+    if isinstance(colormap, string_types):
+        colormap = plt.get_cmap(colormap)
+
+    # Make figure background the same colors as axes
+    if fig is None:
+        fig = plt.figure(figsize=(8, 8), facecolor=facecolor)
+
+    # Use a polar axes
+    if not isinstance(subplot, tuple):
+        subplot = (subplot,)
+    axes = plt.subplot(*subplot, polar=True, axisbg=facecolor)
+
+    # No ticks, we'll put our own
+    plt.xticks([])
+    plt.yticks([])
+
+    # Set y axes limit, add additonal space if requested
+    plt.ylim(0, 10 + padding)
+
+    # Remove the black axes border which may obscure the labels
+    axes.spines['polar'].set_visible(False)
+
+    # Draw lines between connected nodes, only draw the strongest connections
+    if n_lines is not None and len(con) > n_lines:
+        con_thresh = np.sort(np.abs(con).ravel())[-n_lines]
+    else:
+        con_thresh = 0.
+
+    # get the connections which we are drawing and sort by connection strength
+    # this will allow us to draw the strongest connections first
+    con_abs = np.abs(con)
+    con_draw_idx = np.where(con_abs >= con_thresh)[0]
+
+    con = con[con_draw_idx]
+    con_abs = con_abs[con_draw_idx]
+    indices = [ind[con_draw_idx] for ind in indices]
+
+    # now sort them
+    sort_idx = np.argsort(con_abs)
+    con_abs = con_abs[sort_idx]
+    con = con[sort_idx]
+    indices = [ind[sort_idx] for ind in indices]
+
+    # Get vmin vmax for color scaling
+    if vmin is None:
+        vmin = np.min(con[np.abs(con) >= con_thresh])
+    if vmax is None:
+        vmax = np.max(con)
+    vrange = vmax - vmin
+
+    # We want to add some "noise" to the start and end position of the
+    # edges: We modulate the noise with the number of connections of the
+    # node and the connection strength, such that the strongest connections
+    # are closer to the node center
+    nodes_n_con = np.zeros((n_nodes), dtype=np.int)
+    for i, j in zip(indices[0], indices[1]):
+        nodes_n_con[i] += 1
+        nodes_n_con[j] += 1
+
+    # initalize random number generator so plot is reproducible
+    rng = np.random.mtrand.RandomState(seed=0)
+
+    n_con = len(indices[0])
+    noise_max = 0.25 * node_width
+    start_noise = rng.uniform(-noise_max, noise_max, n_con)
+    end_noise = rng.uniform(-noise_max, noise_max, n_con)
+
+    nodes_n_con_seen = np.zeros_like(nodes_n_con)
+    for i, (start, end) in enumerate(zip(indices[0], indices[1])):
+        nodes_n_con_seen[start] += 1
+        nodes_n_con_seen[end] += 1
+
+        start_noise[i] *= ((nodes_n_con[start] - nodes_n_con_seen[start]) /
+                           float(nodes_n_con[start]))
+        end_noise[i] *= ((nodes_n_con[end] - nodes_n_con_seen[end]) /
+                         float(nodes_n_con[end]))
+
+    # scale connectivity for colormap (vmin<=>0, vmax<=>1)
+    con_val_scaled = (con - vmin) / vrange
+
+    # Finally, we draw the connections
+    for pos, (i, j) in enumerate(zip(indices[0], indices[1])):
+        # Start point
+        t0, r0 = node_angles[i], 10
+
+        # End point
+        t1, r1 = node_angles[j], 10
+
+        # Some noise in start and end point
+        t0 += start_noise[pos]
+        t1 += end_noise[pos]
+
+        verts = [(t0, r0), (t0, 5), (t1, 5), (t1, r1)]
+        codes = [m_path.Path.MOVETO, m_path.Path.CURVE4, m_path.Path.CURVE4,
+                 m_path.Path.LINETO]
+        path = m_path.Path(verts, codes)
+
+        color = colormap(con_val_scaled[pos])
+
+        # Actual line
+        patch = m_patches.PathPatch(path, fill=False, edgecolor=color,
+                                    linewidth=linewidth, alpha=1.)
+        axes.add_patch(patch)
+
+    # Draw ring with colored nodes
+    height = np.ones(n_nodes) * 1.0
+    bars = axes.bar(node_angles, height, width=node_width, bottom=9,
+                    edgecolor=node_edgecolor, lw=node_linewidth,
+                    facecolor='.9', align='center')
+
+    for bar, color in zip(bars, node_colors):
+        bar.set_facecolor(color)
+
+    # Draw node labels
+    angles_deg = 180 * node_angles / np.pi
+    for name, angle_rad, angle_deg in zip(node_names, node_angles, angles_deg):
+        if angle_deg >= 270:
+            ha = 'left'
+        else:
+            # Flip the label, so text is always upright
+            angle_deg += 180
+            ha = 'right'
+
+        axes.text(angle_rad, 10.4, name, size=fontsize_names,
+                  rotation=angle_deg, rotation_mode='anchor',
+                  horizontalalignment=ha, verticalalignment='center',
+                  color=textcolor)
+
+    if title is not None:
+        plt.title(title, color=textcolor, fontsize=fontsize_title,
+                  axes=axes)
+
+    if colorbar:
+        norm = normalize_colors(vmin=vmin, vmax=vmax)
+        sm = plt.cm.ScalarMappable(cmap=colormap, norm=norm)
+        sm.set_array(np.linspace(vmin, vmax))
+        cb = plt.colorbar(sm, ax=axes, use_gridspec=False,
+                          shrink=colorbar_size,
+                          anchor=colorbar_pos)
+        cb_yticks = plt.getp(cb.ax.axes, 'yticklabels')
+        cb.ax.tick_params(labelsize=fontsize_colorbar)
+        plt.setp(cb_yticks, color=textcolor)
+
+    # Add callback for interaction
+    if interactive:
+        callback = partial(_plot_connectivity_circle_onpick, fig=fig,
+                           axes=axes, indices=indices, n_nodes=n_nodes,
+                           node_angles=node_angles)
+
+        fig.canvas.mpl_connect('button_press_event', callback)
+
+    if show:
+        plt.show()
+    return fig, axes
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/viz/decoding.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/viz/decoding.py
new file mode 100644
index 0000000..9d88f15
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/viz/decoding.py
@@ -0,0 +1,236 @@
+"""Functions to plot decoding results
+"""
+from __future__ import print_function
+
+# Authors: Denis Engemann <denis.engemann at gmail.com>
+#          Clement Moutard <clement.moutard at gmail.com>
+#          Jean-Remi King <jeanremi.king at gmail.com>
+#
+# License: Simplified BSD
+
+import numpy as np
+import warnings
+
+
+def plot_gat_matrix(gat, title=None, vmin=None, vmax=None, tlim=None,
+                    ax=None, cmap='RdBu_r', show=True, colorbar=True,
+                    xlabel=True, ylabel=True):
+    """Plotting function of GeneralizationAcrossTime object
+
+    Predict each classifier. If multiple classifiers are passed, average
+    prediction across all classifier to result in a single prediction per
+    classifier.
+
+    Parameters
+    ----------
+    gat : instance of mne.decoding.GeneralizationAcrossTime
+        The gat object.
+    title : str | None
+        Figure title. Defaults to None.
+    vmin : float | None
+        Min color value for scores. If None, sets to min(gat.scores_).
+        Defaults to None.
+    vmax : float | None
+        Max color value for scores. If None, sets to max(gat.scores_).
+        Defaults to None.
+    tlim : array-like, (4,) | None
+        The temporal boundaries. If None, expands to
+        [tmin_train, tmax_train, tmin_test, tmax_test]. Defaults to None.
+    ax : object | None
+        Plot pointer. If None, generate new figure. Defaults to None.
+    cmap : str | cmap object
+        The color map to be used. Defaults to 'RdBu_r'.
+    show : bool
+        If True, the figure will be shown. Defaults to True.
+    colorbar : bool
+        If True, the colorbar of the figure is displayed. Defaults to True.
+    xlabel : bool
+        If True, the xlabel is displayed. Defaults to True.
+    ylabel : bool
+        If True, the ylabel is displayed. Defaults to True.
+
+    Returns
+    -------
+    fig : instance of matplotlib.figure.Figure
+        The figure.
+    """
+    if not hasattr(gat, 'scores_'):
+        raise RuntimeError('Please score your data before trying to plot '
+                           'scores')
+    import matplotlib.pyplot as plt
+    if ax is None:
+        fig, ax = plt.subplots(1, 1)
+
+    # Define time limits
+    if tlim is None:
+        tt_times = gat.train_times_['times']
+        tn_times = gat.test_times_['times']
+        tlim = [tn_times[0][0], tn_times[-1][-1], tt_times[0], tt_times[-1]]
+
+    # Plot scores
+    im = ax.imshow(gat.scores_, interpolation='nearest', origin='lower',
+                   extent=tlim, vmin=vmin, vmax=vmax, cmap=cmap)
+    if xlabel is True:
+        ax.set_xlabel('Testing Time (s)')
+    if ylabel is True:
+        ax.set_ylabel('Training Time (s)')
+    if title is not None:
+        ax.set_title(title)
+    ax.axvline(0, color='k')
+    ax.axhline(0, color='k')
+    ax.set_xlim(tlim[:2])
+    ax.set_ylim(tlim[2:])
+    if colorbar is True:
+        plt.colorbar(im, ax=ax)
+    if show is True:
+        plt.show()
+    return fig if ax is None else ax.get_figure()
+
+
+def plot_gat_times(gat, train_time='diagonal', title=None, xmin=None,
+                   xmax=None, ymin=None, ymax=None, ax=None, show=True,
+                   color=None, xlabel=True, ylabel=True, legend=True,
+                   chance=True, label='Classif. score'):
+    """Plotting function of GeneralizationAcrossTime object
+
+    Plot the scores of the classifier trained at 'train_time'.
+
+    Parameters
+    ----------
+    gat : instance of mne.decoding.GeneralizationAcrossTime
+        The gat object.
+    train_time : 'diagonal' | float | list or array of float
+        Plot a 1d array of a portion of gat.scores_.
+        If set to 'diagonal', plots the gat.scores_ of classifiers
+        trained and tested at identical times
+        if set to float | list or array of float, plots scores of the
+        classifier(s) trained at (a) specific training time(s).
+        Default to 'diagonal'.
+    title : str | None
+        Figure title. Defaults to None.
+    xmin : float | None, optional
+        Min time value. Defaults to None.
+    xmax : float | None, optional
+        Max time value. Defaults to None.
+    ymin : float | None, optional
+        Min score value. If None, sets to min(scores). Defaults to None.
+    ymax : float | None, optional
+        Max score value. If None, sets to max(scores). Defaults to None.
+    ax : object | None
+        Plot pointer. If None, generate new figure. Defaults to None.
+    show : bool, optional
+        If True, the figure will be shown. Defaults to True.
+    color : str
+        Score line color. Defaults to 'steelblue'.
+    xlabel : bool
+        If True, the xlabel is displayed. Defaults to True.
+    ylabel : bool
+        If True, the ylabel is displayed. Defaults to True.
+    legend : bool
+        If True, a legend is displayed. Defaults to True.
+    chance : bool | float.
+        Plot chance level. If True, chance level is estimated from the type
+        of scorer. Defaults to None.
+    label : str
+        Score label used in the legend. Defaults to 'Classif. score'.
+
+    Returns
+    -------
+    fig : instance of matplotlib.figure.Figure
+        The figure.
+    """
+    if not hasattr(gat, 'scores_'):
+        raise RuntimeError('Please score your data before trying to plot '
+                           'scores')
+    import matplotlib.pyplot as plt
+    if ax is None:
+        fig, ax = plt.subplots(1, 1)
+
+    # Find and plot chance level
+    if chance is not False:
+        if chance is True:
+            chance = _get_chance_level(gat.scorer_, gat.y_train_)
+        ax.axhline(float(chance), color='k', linestyle='--',
+                   label="Chance level")
+    ax.axvline(0, color='k', label='')
+
+    if isinstance(train_time, (str, float)):
+        train_time = [train_time]
+        label = [label]
+    elif isinstance(train_time, (list, np.ndarray)):
+        label = train_time
+    else:
+        raise ValueError("train_time must be 'diagonal' | float | list or "
+                         "array of float.")
+
+    if color is None or isinstance(color, str):
+        color = np.tile(color, len(train_time))
+
+    for _train_time, _color, _label in zip(train_time, color, label):
+        _plot_gat_time(gat, _train_time, ax, _color, _label)
+
+    if title is not None:
+        ax.set_title(title)
+    if ymin is not None and ymax is not None:
+        ax.set_ylim(ymin, ymax)
+    if xmin is not None and xmax is not None:
+        ax.set_xlim(xmin, xmax)
+    if xlabel is True:
+        ax.set_xlabel('Time (s)')
+    if ylabel is True:
+        ax.set_ylabel('Classif. score ({0})'.format(
+                      'AUC' if 'roc' in repr(gat.scorer_) else r'%'))
+    if legend is True:
+        ax.legend(loc='best')
+    if show is True:
+        plt.show()
+    return fig if ax is None else ax.get_figure()
+
+
+def _plot_gat_time(gat, train_time, ax, color, label):
+    """Aux function of plot_gat_time
+
+    Plots a unique score 1d array"""
+    # Detect whether gat is a full matrix or just its diagonal
+    if np.all(np.unique([len(t) for t in gat.test_times_['times']]) == 1):
+        scores = gat.scores_
+    elif train_time == 'diagonal':
+        # Get scores from identical training and testing times even if GAT
+        # is not square.
+        scores = np.zeros(len(gat.scores_))
+        for train_idx, train_time in enumerate(gat.train_times_['times']):
+            for test_times in gat.test_times_['times']:
+                # find closest testing time from train_time
+                lag = test_times - train_time
+                test_idx = np.abs(lag).argmin()
+                # check that not more than 1 classifier away
+                if np.abs(lag[test_idx]) > gat.train_times_['step']:
+                    score = np.nan
+                else:
+                    score = gat.scores_[train_idx][test_idx]
+                scores[train_idx] = score
+    elif isinstance(train_time, float):
+        train_times = gat.train_times_['times']
+        idx = np.abs(train_times - train_time).argmin()
+        if train_times[idx] - train_time > gat.train_times_['step']:
+            raise ValueError("No classifier trained at %s " % train_time)
+        scores = gat.scores_[idx]
+    else:
+        raise ValueError("train_time must be 'diagonal' or a float.")
+    kwargs = dict()
+    if color is not None:
+        kwargs['color'] = color
+    ax.plot(gat.train_times_['times'], scores, label=str(label), **kwargs)
+
+
+def _get_chance_level(scorer, y_train):
+    # XXX JRK This should probably be solved within sklearn?
+    if scorer.__name__ == 'accuracy_score':
+        chance = np.max([np.mean(y_train == c) for c in np.unique(y_train)])
+    elif scorer.__name__ == 'roc_auc_score':
+        chance = 0.5
+    else:
+        chance = np.nan
+        warnings.warn('Cannot find chance level from %s, specify chance'
+                      ' level' % scorer.func_name)
+    return chance
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/viz/epochs.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/viz/epochs.py
new file mode 100644
index 0000000..4e4e830
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/viz/epochs.py
@@ -0,0 +1,1517 @@
+"""Functions to plot epochs data
+"""
+
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Denis Engemann <denis.engemann at gmail.com>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#          Eric Larson <larson.eric.d at gmail.com>
+#          Jaakko Leppakangas <jaeilepp at student.jyu.fi>
+#
+# License: Simplified BSD
+
+from functools import partial
+import copy
+
+import numpy as np
+
+from ..utils import verbose, get_config, set_config, deprecated
+from ..utils import logger
+from ..io.pick import pick_types, channel_type
+from ..io.proj import setup_proj
+from ..fixes import Counter, _in1d
+from ..time_frequency import compute_epochs_psd
+from .utils import tight_layout, figure_nobar, _toggle_proj
+from .utils import _toggle_options, _layout_figure, _setup_vmin_vmax
+from .utils import _channels_changed, _plot_raw_onscroll, _onclick_help
+from ..defaults import _handle_default
+
+
+def plot_epochs_image(epochs, picks=None, sigma=0., vmin=None,
+                      vmax=None, colorbar=True, order=None, show=True,
+                      units=None, scalings=None, cmap='RdBu_r', fig=None):
+    """Plot Event Related Potential / Fields image
+
+    Parameters
+    ----------
+    epochs : instance of Epochs
+        The epochs
+    picks : int | array-like of int | None
+        The indices of the channels to consider. If None, all good
+        data channels are plotted.
+    sigma : float
+        The standard deviation of the Gaussian smoothing to apply along
+        the epoch axis to apply in the image. If 0., no smoothing is applied.
+    vmin : float
+        The min value in the image. The unit is uV for EEG channels,
+        fT for magnetometers and fT/cm for gradiometers
+    vmax : float
+        The max value in the image. The unit is uV for EEG channels,
+        fT for magnetometers and fT/cm for gradiometers
+    colorbar : bool
+        Display or not a colorbar
+    order : None | array of int | callable
+        If not None, order is used to reorder the epochs on the y-axis
+        of the image. If it's an array of int it should be of length
+        the number of good epochs. If it's a callable the arguments
+        passed are the times vector and the data as 2d array
+        (data.shape[1] == len(times)
+    show : bool
+        Show figure if True.
+    units : dict | None
+        The units of the channel types used for axes lables. If None,
+        defaults to `units=dict(eeg='uV', grad='fT/cm', mag='fT')`.
+    scalings : dict | None
+        The scalings of the channel types to be applied for plotting.
+        If None, defaults to `scalings=dict(eeg=1e6, grad=1e13, mag=1e15,
+        eog=1e6)`
+    cmap : matplotlib colormap
+        Colormap.
+    fig : matplotlib figure | None
+        Figure instance to draw the image to. Figure must contain two axes for
+        drawing the single trials and evoked responses. If None a new figure is
+        created. Defaults to None.
+
+    Returns
+    -------
+    figs : the list of matplotlib figures
+        One figure per channel displayed
+    """
+    from scipy import ndimage
+    units = _handle_default('units', units)
+    scalings = _handle_default('scalings', scalings)
+
+    import matplotlib.pyplot as plt
+    if picks is None:
+        picks = pick_types(epochs.info, meg=True, eeg=True, ref_meg=False,
+                           exclude='bads')
+
+    if set(units.keys()) != set(scalings.keys()):
+        raise ValueError('Scalings and units must have the same keys.')
+
+    picks = np.atleast_1d(picks)
+    if fig is not None and len(picks) > 1:
+        raise ValueError('Only single pick can be drawn to a figure.')
+    evoked = epochs.average(picks)
+    data = epochs.get_data()[:, picks, :]
+    scale_vmin = True if vmin is None else False
+    scale_vmax = True if vmax is None else False
+    vmin, vmax = _setup_vmin_vmax(data, vmin, vmax)
+
+    figs = list()
+    for i, (this_data, idx) in enumerate(zip(np.swapaxes(data, 0, 1), picks)):
+        if fig is None:
+            this_fig = plt.figure()
+        else:
+            this_fig = fig
+        figs.append(this_fig)
+
+        ch_type = channel_type(epochs.info, idx)
+        if ch_type not in scalings:
+            # We know it's not in either scalings or units since keys match
+            raise KeyError('%s type not in scalings and units' % ch_type)
+        this_data *= scalings[ch_type]
+
+        this_order = order
+        if callable(order):
+            this_order = order(epochs.times, this_data)
+
+        if this_order is not None:
+            this_data = this_data[this_order]
+
+        if sigma > 0.:
+            this_data = ndimage.gaussian_filter1d(this_data, sigma=sigma,
+                                                  axis=0)
+        plt.figure(this_fig.number)
+        ax1 = plt.subplot2grid((3, 10), (0, 0), colspan=9, rowspan=2)
+        if scale_vmin:
+            vmin *= scalings[ch_type]
+        if scale_vmax:
+            vmax *= scalings[ch_type]
+        im = ax1.imshow(this_data,
+                        extent=[1e3 * epochs.times[0], 1e3 * epochs.times[-1],
+                                0, len(data)],
+                        aspect='auto', origin='lower', interpolation='nearest',
+                        vmin=vmin, vmax=vmax, cmap=cmap)
+        ax2 = plt.subplot2grid((3, 10), (2, 0), colspan=9, rowspan=1)
+        if colorbar:
+            ax3 = plt.subplot2grid((3, 10), (0, 9), colspan=1, rowspan=3)
+        ax1.set_title(epochs.ch_names[idx])
+        ax1.set_ylabel('Epochs')
+        ax1.axis('auto')
+        ax1.axis('tight')
+        ax1.axvline(0, color='m', linewidth=3, linestyle='--')
+        evoked_data = scalings[ch_type] * evoked.data[i]
+        ax2.plot(1e3 * evoked.times, evoked_data)
+        ax2.set_xlabel('Time (ms)')
+        ax2.set_xlim([1e3 * evoked.times[0], 1e3 * evoked.times[-1]])
+        ax2.set_ylabel(units[ch_type])
+        evoked_vmin = min(evoked_data) * 1.1 if scale_vmin else vmin
+        evoked_vmax = max(evoked_data) * 1.1 if scale_vmax else vmax
+        if scale_vmin or scale_vmax:
+            evoked_vmax = max(np.abs([evoked_vmax, evoked_vmin]))
+            evoked_vmin = -evoked_vmax
+        ax2.set_ylim([evoked_vmin, evoked_vmax])
+        ax2.axvline(0, color='m', linewidth=3, linestyle='--')
+        if colorbar:
+            plt.colorbar(im, cax=ax3)
+            tight_layout(fig=this_fig)
+
+    if show:
+        plt.show()
+
+    return figs
+
+
+ at deprecated('`plot_image_epochs` is deprecated and will be removed in '
+            '"MNE 0.11." Please use plot_epochs_image instead')
+def plot_image_epochs(epochs, picks=None, sigma=0., vmin=None,
+                      vmax=None, colorbar=True, order=None, show=True,
+                      units=None, scalings=None, cmap='RdBu_r', fig=None):
+
+    return plot_epochs_image(epochs=epochs, picks=picks, sigma=sigma,
+                             vmin=vmin, vmax=None, colorbar=True, order=order,
+                             show=show, units=None, scalings=None, cmap=cmap,
+                             fig=fig)
+
+
+def _drop_log_stats(drop_log, ignore=['IGNORED']):
+    """
+    Parameters
+    ----------
+    drop_log : list of lists
+        Epoch drop log from Epochs.drop_log.
+    ignore : list
+        The drop reasons to ignore.
+
+    Returns
+    -------
+    perc : float
+        Total percentage of epochs dropped.
+    """
+    # XXX: This function should be moved to epochs.py after
+    # removal of perc return parameter in plot_drop_log()
+
+    if not isinstance(drop_log, list) or not isinstance(drop_log[0], list):
+        raise ValueError('drop_log must be a list of lists')
+
+    perc = 100 * np.mean([len(d) > 0 for d in drop_log
+                          if not any(r in ignore for r in d)])
+
+    return perc
+
+
+def plot_drop_log(drop_log, threshold=0, n_max_plot=20, subject='Unknown',
+                  color=(0.9, 0.9, 0.9), width=0.8, ignore=['IGNORED'],
+                  show=True):
+    """Show the channel stats based on a drop_log from Epochs
+
+    Parameters
+    ----------
+    drop_log : list of lists
+        Epoch drop log from Epochs.drop_log.
+    threshold : float
+        The percentage threshold to use to decide whether or not to
+        plot. Default is zero (always plot).
+    n_max_plot : int
+        Maximum number of channels to show stats for.
+    subject : str
+        The subject name to use in the title of the plot.
+    color : tuple | str
+        Color to use for the bars.
+    width : float
+        Width of the bars.
+    ignore : list
+        The drop reasons to ignore.
+    show : bool
+        Show figure if True.
+
+    Returns
+    -------
+    fig : Instance of matplotlib.figure.Figure
+        The figure.
+    """
+    import matplotlib.pyplot as plt
+    perc = _drop_log_stats(drop_log, ignore)
+    scores = Counter([ch for d in drop_log for ch in d if ch not in ignore])
+    ch_names = np.array(list(scores.keys()))
+    fig = plt.figure()
+    if perc < threshold or len(ch_names) == 0:
+        plt.text(0, 0, 'No drops')
+        return fig
+    counts = 100 * np.array(list(scores.values()), dtype=float) / len(drop_log)
+    n_plot = min(n_max_plot, len(ch_names))
+    order = np.flipud(np.argsort(counts))
+    plt.title('%s: %0.1f%%' % (subject, perc))
+    x = np.arange(n_plot)
+    plt.bar(x, counts[order[:n_plot]], color=color, width=width)
+    plt.xticks(x + width / 2.0, ch_names[order[:n_plot]], rotation=45,
+               horizontalalignment='right')
+    plt.tick_params(axis='x', which='major', labelsize=10)
+    plt.ylabel('% of epochs rejected')
+    plt.xlim((-width / 2.0, (n_plot - 1) + width * 3 / 2))
+    plt.grid(True, axis='y')
+
+    if show:
+        plt.show()
+
+    return fig
+
+
+def _draw_epochs_axes(epoch_idx, good_ch_idx, bad_ch_idx, data, times, axes,
+                      title_str, axes_handler):
+    """Aux functioin"""
+    this = axes_handler[0]
+    for ii, data_, ax in zip(epoch_idx, data, axes):
+        for l, d in zip(ax.lines, data_[good_ch_idx]):
+            l.set_data(times, d)
+        if bad_ch_idx is not None:
+            bad_lines = [ax.lines[k] for k in bad_ch_idx]
+            for l, d in zip(bad_lines, data_[bad_ch_idx]):
+                l.set_data(times, d)
+        if title_str is not None:
+            ax.set_title(title_str % ii, fontsize=12)
+        ax.set_ylim(data.min(), data.max())
+        ax.set_yticks(list())
+        ax.set_xticks(list())
+        if vars(ax)[this]['reject'] is True:
+            #  memorizing reject
+            for l in ax.lines:
+                l.set_color((0.8, 0.8, 0.8))
+            ax.get_figure().canvas.draw()
+        else:
+            #  forgetting previous reject
+            for k in axes_handler:
+                if k == this:
+                    continue
+                if vars(ax).get(k, {}).get('reject', None) is True:
+                    for l in ax.lines[:len(good_ch_idx)]:
+                        l.set_color('k')
+                    if bad_ch_idx is not None:
+                        for l in ax.lines[-len(bad_ch_idx):]:
+                            l.set_color('r')
+                    ax.get_figure().canvas.draw()
+                    break
+
+
+def _epochs_navigation_onclick(event, params):
+    """Aux function"""
+    import matplotlib.pyplot as plt
+    p = params
+    here = None
+    if event.inaxes == p['back'].ax:
+        here = 1
+    elif event.inaxes == p['next'].ax:
+        here = -1
+    elif event.inaxes == p['reject-quit'].ax:
+        if p['reject_idx']:
+            p['epochs'].drop_epochs(p['reject_idx'])
+        plt.close(p['fig'])
+        plt.close(event.inaxes.get_figure())
+
+    if here is not None:
+        p['idx_handler'].rotate(here)
+        p['axes_handler'].rotate(here)
+        this_idx = p['idx_handler'][0]
+        _draw_epochs_axes(this_idx, p['good_ch_idx'], p['bad_ch_idx'],
+                          p['data'][this_idx],
+                          p['times'], p['axes'], p['title_str'],
+                          p['axes_handler'])
+        # XXX don't ask me why
+        p['axes'][0].get_figure().canvas.draw()
+
+
+def _epochs_axes_onclick(event, params):
+    """Aux function"""
+    reject_color = (0.8, 0.8, 0.8)
+    ax = event.inaxes
+    if event.inaxes is None:
+        return
+    p = params
+    here = vars(ax)[p['axes_handler'][0]]
+    if here.get('reject', None) is False:
+        idx = here['idx']
+        if idx not in p['reject_idx']:
+            p['reject_idx'].append(idx)
+            for l in ax.lines:
+                l.set_color(reject_color)
+            here['reject'] = True
+    elif here.get('reject', None) is True:
+        idx = here['idx']
+        if idx in p['reject_idx']:
+            p['reject_idx'].pop(p['reject_idx'].index(idx))
+            good_lines = [ax.lines[k] for k in p['good_ch_idx']]
+            for l in good_lines:
+                l.set_color('k')
+            if p['bad_ch_idx'] is not None:
+                bad_lines = ax.lines[-len(p['bad_ch_idx']):]
+                for l in bad_lines:
+                    l.set_color('r')
+            here['reject'] = False
+    ax.get_figure().canvas.draw()
+
+
+def plot_epochs(epochs, picks=None, scalings=None, n_epochs=20,
+                n_channels=20, title=None, show=True, block=False):
+    """ Visualize epochs
+
+    Bad epochs can be marked with a left click on top of the epoch. Bad
+    channels can be selected by clicking the channel name on the left side of
+    the main axes. Calling this function drops all the selected bad epochs as
+    well as bad epochs marked beforehand with rejection parameters.
+
+    Parameters
+    ----------
+
+    epochs : instance of Epochs
+        The epochs object
+    picks : array-like of int | None
+        Channels to be included. If None only good data channels are used.
+        Defaults to None
+    scalings : dict | None
+        Scale factors for the traces. If None, defaults to::
+
+            dict(mag=1e-12, grad=4e-11, eeg=20e-6, eog=150e-6, ecg=5e-4,
+                 emg=1e-3, ref_meg=1e-12, misc=1e-3, stim=1, resp=1, chpi=1e-4)
+
+    n_epochs : int
+        The number of epochs per view. Defaults to 20.
+    n_channels : int
+        The number of channels per view. Defaults to 20.
+    title : str | None
+        The title of the window. If None, epochs name will be displayed.
+        Defaults to None.
+    show : bool
+        Show figure if True. Defaults to True
+    block : bool
+        Whether to halt program execution until the figure is closed.
+        Useful for rejecting bad trials on the fly by clicking on an epoch.
+        Defaults to False.
+
+    Returns
+    -------
+    fig : Instance of matplotlib.figure.Figure
+        The figure.
+
+    Notes
+    -----
+    With trellis set to False, the arrow keys (up/down/left/right) can
+    be used to navigate between channels and epochs and the scaling can be
+    adjusted with - and + (or =) keys, but this depends on the backend
+    matplotlib is configured to use (e.g., mpl.use(``TkAgg``) should work).
+    Full screen mode can be to toggled with f11 key. The amount of epochs and
+    channels per view can be adjusted with home/end and page down/page up keys.
+    Butterfly plot can be toggled with ``b`` key. Right mouse click adds a
+    vertical line to the plot.
+    """
+    import matplotlib.pyplot as plt
+    scalings = _handle_default('scalings_plot_raw', scalings)
+
+    projs = epochs.info['projs']
+
+    params = {'epochs': epochs,
+              'orig_data': np.concatenate(epochs.get_data(), axis=1),
+              'info': copy.deepcopy(epochs.info),
+              'bad_color': (0.8, 0.8, 0.8),
+              't_start': 0}
+    params['label_click_fun'] = partial(_pick_bad_channels, params=params)
+    _prepare_mne_browse_epochs(params, projs, n_channels, n_epochs, scalings,
+                               title, picks)
+
+    callback_close = partial(_close_event, params=params)
+    params['fig'].canvas.mpl_connect('close_event', callback_close)
+    if show:
+        try:
+            plt.show(block=block)
+        except TypeError:  # not all versions have this
+            plt.show()
+
+    return params['fig']
+
+
+ at verbose
+def plot_epochs_psd(epochs, fmin=0, fmax=np.inf, tmin=None, tmax=None,
+                    proj=False, n_fft=256,
+                    picks=None, ax=None, color='black', area_mode='std',
+                    area_alpha=0.33, n_overlap=0,
+                    dB=True, n_jobs=1, show=True, verbose=None):
+    """Plot the power spectral density across epochs
+
+    Parameters
+    ----------
+    epochs : instance of Epochs
+        The epochs object
+    fmin : float
+        Start frequency to consider.
+    fmax : float
+        End frequency to consider.
+    tmin : float | None
+        Start time to consider.
+    tmax : float | None
+        End time to consider.
+    proj : bool
+        Apply projection.
+    n_fft : int
+        Number of points to use in Welch FFT calculations.
+    picks : array-like of int | None
+        List of channels to use.
+    ax : instance of matplotlib Axes | None
+        Axes to plot into. If None, axes will be created.
+    color : str | tuple
+        A matplotlib-compatible color to use.
+    area_mode : str | None
+        Mode for plotting area. If 'std', the mean +/- 1 STD (across channels)
+        will be plotted. If 'range', the min and max (across channels) will be
+        plotted. Bad channels will be excluded from these calculations.
+        If None, no area will be plotted.
+    area_alpha : float
+        Alpha for the area.
+    n_overlap : int
+        The number of points of overlap between blocks.
+    dB : bool
+        If True, transform data to decibels.
+    n_jobs : int
+        Number of jobs to run in parallel.
+    show : bool
+        Show figure if True.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    fig : instance of matplotlib figure
+        Figure distributing one image per channel across sensor topography.
+    """
+    import matplotlib.pyplot as plt
+    from .raw import _set_psd_plot_params
+    fig, picks_list, titles_list, ax_list, make_label = _set_psd_plot_params(
+        epochs.info, proj, picks, ax, area_mode)
+
+    for ii, (picks, title, ax) in enumerate(zip(picks_list, titles_list,
+                                                ax_list)):
+        psds, freqs = compute_epochs_psd(epochs, picks=picks, fmin=fmin,
+                                         fmax=fmax, tmin=tmin, tmax=tmax,
+                                         n_fft=n_fft,
+                                         n_overlap=n_overlap, proj=proj,
+                                         n_jobs=n_jobs)
+
+        # Convert PSDs to dB
+        if dB:
+            psds = 10 * np.log10(psds)
+            unit = 'dB'
+        else:
+            unit = 'power'
+        # mean across epochs and channels
+        psd_mean = np.mean(psds, axis=0).mean(axis=0)
+        if area_mode == 'std':
+            # std across channels
+            psd_std = np.std(np.mean(psds, axis=0), axis=0)
+            hyp_limits = (psd_mean - psd_std, psd_mean + psd_std)
+        elif area_mode == 'range':
+            hyp_limits = (np.min(np.mean(psds, axis=0), axis=0),
+                          np.max(np.mean(psds, axis=0), axis=0))
+        else:  # area_mode is None
+            hyp_limits = None
+
+        ax.plot(freqs, psd_mean, color=color)
+        if hyp_limits is not None:
+            ax.fill_between(freqs, hyp_limits[0], y2=hyp_limits[1],
+                            color=color, alpha=area_alpha)
+        if make_label:
+            if ii == len(picks_list) - 1:
+                ax.set_xlabel('Freq (Hz)')
+            if ii == len(picks_list) // 2:
+                ax.set_ylabel('Power Spectral Density (%s/Hz)' % unit)
+            ax.set_title(title)
+            ax.set_xlim(freqs[0], freqs[-1])
+    if make_label:
+        tight_layout(pad=0.1, h_pad=0.1, w_pad=0.1, fig=fig)
+    if show:
+        plt.show()
+    return fig
+
+
+def _prepare_mne_browse_epochs(params, projs, n_channels, n_epochs, scalings,
+                               title, picks, order=None):
+    """Helper for setting up the mne_browse_epochs window."""
+    import matplotlib.pyplot as plt
+    import matplotlib as mpl
+    from matplotlib.collections import LineCollection
+    from matplotlib.colors import colorConverter
+    epochs = params['epochs']
+
+    if picks is None:
+        picks = _handle_picks(epochs)
+    if len(picks) < 1:
+        raise RuntimeError('No appropriate channels found. Please'
+                           ' check your picks')
+    picks = sorted(picks)
+    # Reorganize channels
+    inds = list()
+    types = list()
+    for t in ['grad', 'mag']:
+        idxs = pick_types(params['info'], meg=t, ref_meg=False, exclude=[])
+        if len(idxs) < 1:
+            continue
+        mask = _in1d(idxs, picks, assume_unique=True)
+        inds.append(idxs[mask])
+        types += [t] * len(inds[-1])
+    pick_kwargs = dict(meg=False, ref_meg=False, exclude=[])
+    if order is None:
+        order = ['eeg', 'eog', 'ecg', 'emg', 'ref_meg', 'stim', 'resp', 'misc',
+                 'chpi', 'syst', 'ias', 'exci']
+    for ch_type in order:
+        pick_kwargs[ch_type] = True
+        idxs = pick_types(params['info'], **pick_kwargs)
+        if len(idxs) < 1:
+            continue
+        mask = _in1d(idxs, picks, assume_unique=True)
+        inds.append(idxs[mask])
+        types += [ch_type] * len(inds[-1])
+        pick_kwargs[ch_type] = False
+    inds = np.concatenate(inds).astype(int)
+    if not len(inds) == len(picks):
+        raise RuntimeError('Some channels not classified. Please'
+                           ' check your picks')
+    ch_names = [params['info']['ch_names'][x] for x in inds]
+
+    # set up plotting
+    size = get_config('MNE_BROWSE_RAW_SIZE')
+    n_epochs = min(n_epochs, len(epochs.events))
+    duration = len(epochs.times) * n_epochs
+    n_channels = min(n_channels, len(picks))
+    if size is not None:
+        size = size.split(',')
+        size = tuple(float(s) for s in size)
+    if title is None:
+        title = epochs.name
+        if epochs.name is None or len(title) == 0:
+            title = ''
+    fig = figure_nobar(facecolor='w', figsize=size, dpi=80)
+    fig.canvas.set_window_title('mne_browse_epochs')
+    ax = plt.subplot2grid((10, 15), (0, 1), colspan=13, rowspan=9)
+
+    ax.annotate(title, xy=(0.5, 1), xytext=(0, ax.get_ylim()[1] + 15),
+                ha='center', va='bottom', size=12, xycoords='axes fraction',
+                textcoords='offset points')
+    color = _handle_default('color', None)
+
+    ax.axis([0, duration, 0, 200])
+    ax2 = ax.twiny()
+    ax2.set_zorder(-1)
+    ax2.axis([0, duration, 0, 200])
+    ax_hscroll = plt.subplot2grid((10, 15), (9, 1), colspan=13)
+    ax_hscroll.get_yaxis().set_visible(False)
+    ax_hscroll.set_xlabel('Epochs')
+    ax_vscroll = plt.subplot2grid((10, 15), (0, 14), rowspan=9)
+    ax_vscroll.set_axis_off()
+    ax_vscroll.add_patch(mpl.patches.Rectangle((0, 0), 1, len(picks),
+                                               facecolor='w', zorder=2))
+
+    ax_help_button = plt.subplot2grid((10, 15), (9, 0), colspan=1)
+    help_button = mpl.widgets.Button(ax_help_button, 'Help')
+    help_button.on_clicked(partial(_onclick_help, params=params))
+
+    # populate vertical and horizontal scrollbars
+    for ci in range(len(picks)):
+        if ch_names[ci] in params['info']['bads']:
+            this_color = params['bad_color']
+        else:
+            this_color = color[types[ci]]
+        ax_vscroll.add_patch(mpl.patches.Rectangle((0, ci), 1, 1,
+                                                   facecolor=this_color,
+                                                   edgecolor=this_color,
+                                                   zorder=3))
+
+    vsel_patch = mpl.patches.Rectangle((0, 0), 1, n_channels, alpha=0.5,
+                                       edgecolor='w', facecolor='w', zorder=4)
+    ax_vscroll.add_patch(vsel_patch)
+
+    ax_vscroll.set_ylim(len(types), 0)
+    ax_vscroll.set_title('Ch.')
+
+    # populate colors list
+    type_colors = [colorConverter.to_rgba(color[c]) for c in types]
+    colors = list()
+    for color_idx in range(len(type_colors)):
+        colors.append([type_colors[color_idx]] * len(epochs.events))
+    lines = list()
+    n_times = len(epochs.times)
+
+    for ch_idx in range(n_channels):
+        if len(colors) - 1 < ch_idx:
+            break
+        lc = LineCollection(list(), antialiased=False, linewidths=0.5,
+                            zorder=2, picker=3.)
+        ax.add_collection(lc)
+        lines.append(lc)
+
+    times = epochs.times
+    data = np.zeros((params['info']['nchan'], len(times) * len(epochs.events)))
+
+    ylim = (25., 0.)  # Hardcoded 25 because butterfly has max 5 rows (5*5=25).
+    # make shells for plotting traces
+    offset = ylim[0] / n_channels
+    offsets = np.arange(n_channels) * offset + (offset / 2.)
+
+    times = np.arange(len(data[0]))
+    epoch_times = np.arange(0, len(times), n_times)
+
+    ax.set_yticks(offsets)
+    ax.set_ylim(ylim)
+    ticks = epoch_times + 0.5 * n_times
+    ax.set_xticks(ticks)
+    ax2.set_xticks(ticks[:n_epochs])
+    labels = list(range(1, len(ticks) + 1))  # epoch numbers
+    ax.set_xticklabels(labels)
+    ax2.set_xticklabels(labels)
+    xlim = epoch_times[-1] + len(epochs.times)
+    ax_hscroll.set_xlim(0, xlim)
+    vertline_t = ax_hscroll.text(0, 1, '', color='y', va='bottom', ha='right')
+
+    # fit horizontal scroll bar ticks
+    hscroll_ticks = np.arange(0, xlim, xlim / 7.0)
+    hscroll_ticks = np.append(hscroll_ticks, epoch_times[-1])
+    hticks = list()
+    for tick in hscroll_ticks:
+        hticks.append(epoch_times.flat[np.abs(epoch_times - tick).argmin()])
+    hlabels = [x / n_times + 1 for x in hticks]
+    ax_hscroll.set_xticks(hticks)
+    ax_hscroll.set_xticklabels(hlabels)
+
+    for epoch_idx in range(len(epoch_times)):
+        ax_hscroll.add_patch(mpl.patches.Rectangle((epoch_idx * n_times, 0),
+                                                   n_times, 1, facecolor='w',
+                                                   edgecolor='w', alpha=0.6))
+    hsel_patch = mpl.patches.Rectangle((0, 0), duration, 1,
+                                       edgecolor='k',
+                                       facecolor=(0.75, 0.75, 0.75),
+                                       alpha=0.25, linewidth=1, clip_on=False)
+    ax_hscroll.add_patch(hsel_patch)
+    text = ax.text(0, 0, 'blank', zorder=2, verticalalignment='baseline',
+                   ha='left', fontweight='bold')
+    text.set_visible(False)
+
+    params.update({'fig': fig,
+                   'ax': ax,
+                   'ax2': ax2,
+                   'ax_hscroll': ax_hscroll,
+                   'ax_vscroll': ax_vscroll,
+                   'vsel_patch': vsel_patch,
+                   'hsel_patch': hsel_patch,
+                   'lines': lines,
+                   'projs': projs,
+                   'ch_names': ch_names,
+                   'n_channels': n_channels,
+                   'n_epochs': n_epochs,
+                   'scalings': scalings,
+                   'duration': duration,
+                   'ch_start': 0,
+                   'colors': colors,
+                   'def_colors': type_colors,  # don't change at runtime
+                   'picks': picks,
+                   'bads': np.array(list(), dtype=int),
+                   'data': data,
+                   'times': times,
+                   'epoch_times': epoch_times,
+                   'offsets': offsets,
+                   'labels': labels,
+                   'scale_factor': 1.0,
+                   'butterfly_scale': 1.0,
+                   'fig_proj': None,
+                   'types': np.array(types),
+                   'inds': inds,
+                   'vert_lines': list(),
+                   'vertline_t': vertline_t,
+                   'butterfly': False,
+                   'text': text,
+                   'ax_help_button': ax_help_button,  # needed for positioning
+                   'help_button': help_button,  # reference needed for clicks
+                   'fig_options': None,
+                   'settings': [True, True, True, True],
+                   'image_plot': None})
+
+    params['plot_fun'] = partial(_plot_traces, params=params)
+
+    if len(projs) > 0 and not epochs.proj:
+        ax_button = plt.subplot2grid((10, 15), (9, 14))
+        opt_button = mpl.widgets.Button(ax_button, 'Proj')
+        callback_option = partial(_toggle_options, params=params)
+        opt_button.on_clicked(callback_option)
+        params['opt_button'] = opt_button
+        params['ax_button'] = ax_button
+
+    # callbacks
+    callback_scroll = partial(_plot_onscroll, params=params)
+    fig.canvas.mpl_connect('scroll_event', callback_scroll)
+    callback_click = partial(_mouse_click, params=params)
+    fig.canvas.mpl_connect('button_press_event', callback_click)
+    callback_key = partial(_plot_onkey, params=params)
+    fig.canvas.mpl_connect('key_press_event', callback_key)
+    callback_resize = partial(_resize_event, params=params)
+    fig.canvas.mpl_connect('resize_event', callback_resize)
+    fig.canvas.mpl_connect('pick_event', partial(_onpick, params=params))
+
+    # Draw event lines for the first time.
+    _plot_vert_lines(params)
+
+    # As here code is shared with plot_evoked, some extra steps:
+    # first the actual plot update function
+    params['plot_update_proj_callback'] = _plot_update_epochs_proj
+    # then the toggle handler
+    callback_proj = partial(_toggle_proj, params=params)
+    # store these for use by callbacks in the options figure
+    params['callback_proj'] = callback_proj
+    params['callback_key'] = callback_key
+
+    callback_proj('none')
+    _layout_figure(params)
+
+
+def _plot_traces(params):
+    """ Helper for plotting concatenated epochs """
+    params['text'].set_visible(False)
+    ax = params['ax']
+    butterfly = params['butterfly']
+    if butterfly:
+        ch_start = 0
+        n_channels = len(params['picks'])
+        data = params['data'] * params['butterfly_scale']
+    else:
+        ch_start = params['ch_start']
+        n_channels = params['n_channels']
+        data = params['data'] * params['scale_factor']
+    offsets = params['offsets']
+    lines = params['lines']
+    epochs = params['epochs']
+
+    n_times = len(epochs.times)
+    tick_list = list()
+    start_idx = int(params['t_start'] / n_times)
+    end = params['t_start'] + params['duration']
+    end_idx = int(end / n_times)
+    xlabels = params['labels'][start_idx:]
+    event_ids = params['epochs'].events[:, 2]
+    params['ax2'].set_xticklabels(event_ids[start_idx:])
+    ax.set_xticklabels(xlabels)
+    ylabels = ax.yaxis.get_ticklabels()
+    # do the plotting
+    for line_idx in range(n_channels):
+        ch_idx = line_idx + ch_start
+        if line_idx >= len(lines):
+            break
+        elif ch_idx < len(params['ch_names']):
+            if butterfly:
+                ch_type = params['types'][ch_idx]
+                if ch_type == 'grad':
+                    offset = offsets[0]
+                elif ch_type == 'mag':
+                    offset = offsets[1]
+                elif ch_type == 'eeg':
+                    offset = offsets[2]
+                elif ch_type == 'eog':
+                    offset = offsets[3]
+                elif ch_type == 'ecg':
+                    offset = offsets[4]
+                else:
+                    lines[line_idx].set_segments(list())
+            else:
+                tick_list += [params['ch_names'][ch_idx]]
+                offset = offsets[line_idx]
+            this_data = data[ch_idx][params['t_start']:end]
+
+            # subtraction here gets correct orientation for flipped ylim
+            ydata = offset - this_data
+            xdata = params['times'][:params['duration']]
+            num_epochs = np.min([params['n_epochs'],
+                                len(epochs.events)])
+            segments = np.split(np.array((xdata, ydata)).T, num_epochs)
+
+            ch_name = params['ch_names'][ch_idx]
+            if ch_name in params['info']['bads']:
+                if not butterfly:
+                    this_color = params['bad_color']
+                    ylabels[line_idx].set_color(this_color)
+                this_color = np.tile((params['bad_color']), (num_epochs, 1))
+                for bad_idx in params['bads']:
+                    if bad_idx < start_idx or bad_idx > end_idx:
+                        continue
+                    this_color[bad_idx - start_idx] = (1., 0., 0.)
+                lines[line_idx].set_zorder(1)
+            else:
+                this_color = params['colors'][ch_idx][start_idx:end_idx]
+                lines[line_idx].set_zorder(2)
+                if not butterfly:
+                    ylabels[line_idx].set_color('black')
+            lines[line_idx].set_segments(segments)
+            lines[line_idx].set_color(this_color)
+        else:
+            lines[line_idx].set_segments(list())
+
+    # finalize plot
+    ax.set_xlim(params['times'][0], params['times'][0] + params['duration'],
+                False)
+    params['ax2'].set_xlim(params['times'][0],
+                           params['times'][0] + params['duration'], False)
+    if butterfly:
+        factor = -1. / params['butterfly_scale']
+        labels = np.empty(20, dtype='S15')
+        labels.fill('')
+        ticks = ax.get_yticks()
+        idx_offset = 1
+        if 'grad' in params['types']:
+            labels[idx_offset + 1] = '0.00'
+            for idx in [idx_offset, idx_offset + 2]:
+                labels[idx] = '{0:.2f}'.format((ticks[idx] - offsets[0]) *
+                                               params['scalings']['grad'] *
+                                               1e13 * factor)
+            idx_offset += 4
+        if 'mag' in params['types']:
+            labels[idx_offset + 1] = '0.00'
+            for idx in [idx_offset, idx_offset + 2]:
+                labels[idx] = '{0:.2f}'.format((ticks[idx] - offsets[1]) *
+                                               params['scalings']['mag'] *
+                                               1e15 * factor)
+            idx_offset += 4
+        if 'eeg' in params['types']:
+            labels[idx_offset + 1] = '0.00'
+            for idx in [idx_offset, idx_offset + 2]:
+                labels[idx] = '{0:.2f}'.format((ticks[idx] - offsets[2]) *
+                                               params['scalings']['eeg'] *
+                                               1e6 * factor)
+            idx_offset += 4
+        if 'eog' in params['types']:
+            labels[idx_offset + 1] = '0.00'
+            for idx in [idx_offset, idx_offset + 2]:
+                labels[idx] = '{0:.2f}'.format((ticks[idx] - offsets[3]) *
+                                               params['scalings']['eog'] *
+                                               1e6 * factor)
+            idx_offset += 4
+        if 'ecg' in params['types']:
+            labels[idx_offset + 1] = '0.00'
+            for idx in [idx_offset, idx_offset + 2]:
+                labels[idx] = '{0:.2f}'.format((ticks[idx] - offsets[4]) *
+                                               params['scalings']['ecg'] *
+                                               1e6 * factor)
+        ax.set_yticklabels(labels, fontsize=12, color='black')
+    else:
+        ax.set_yticklabels(tick_list, fontsize=12)
+    params['vsel_patch'].set_y(ch_start)
+    params['fig'].canvas.draw()
+    # XXX This is a hack to make sure this figure gets drawn last
+    # so that when matplotlib goes to calculate bounds we don't get a
+    # CGContextRef error on the MacOSX backend :(
+    if params['fig_proj'] is not None:
+        params['fig_proj'].canvas.draw()
+
+
+def _plot_update_epochs_proj(params, bools):
+    """Helper only needs to be called when proj is changed"""
+    if bools is not None:
+        inds = np.where(bools)[0]
+        params['info']['projs'] = [copy.deepcopy(params['projs'][ii])
+                                   for ii in inds]
+        params['proj_bools'] = bools
+    params['projector'], _ = setup_proj(params['info'], add_eeg_ref=False,
+                                        verbose=False)
+
+    data = params['orig_data']
+    if params['projector'] is not None:
+        data = np.dot(params['projector'], data)
+    types = params['types']
+    for pick, ind in enumerate(params['inds']):
+        params['data'][pick] = data[ind] / params['scalings'][types[pick]]
+    params['plot_fun']()
+
+
+def _handle_picks(epochs):
+    """Aux function to handle picks."""
+    if any('ICA' in k for k in epochs.ch_names):
+        picks = pick_types(epochs.info, misc=True, ref_meg=False,
+                           exclude=[])
+    else:
+        picks = pick_types(epochs.info, meg=True, eeg=True, eog=True, ecg=True,
+                           ref_meg=False, exclude=[])
+    return picks
+
+
+def _plot_window(value, params):
+    """Deal with horizontal shift of the viewport."""
+    max_times = len(params['times']) - params['duration']
+    if value > max_times:
+        value = len(params['times']) - params['duration']
+    if value < 0:
+        value = 0
+    if params['t_start'] != value:
+        params['t_start'] = value
+        params['hsel_patch'].set_x(value)
+        params['plot_fun']()
+
+
+def _plot_vert_lines(params):
+    """ Helper function for plotting vertical lines."""
+    ax = params['ax']
+    while len(ax.lines) > 0:
+        ax.lines.pop()
+    params['vert_lines'] = list()
+    params['vertline_t'].set_text('')
+
+    epochs = params['epochs']
+    if params['settings'][3]:  # if zeroline visible
+        t_zero = np.where(epochs.times == 0.)[0]
+        if len(t_zero) == 1:
+            for event_idx in range(len(epochs.events)):
+                pos = [event_idx * len(epochs.times) + t_zero[0],
+                       event_idx * len(epochs.times) + t_zero[0]]
+                ax.plot(pos, ax.get_ylim(), 'g', zorder=3, alpha=0.4)
+    for epoch_idx in range(len(epochs.events)):
+        pos = [epoch_idx * len(epochs.times), epoch_idx * len(epochs.times)]
+        ax.plot(pos, ax.get_ylim(), color='black', linestyle='--', zorder=1)
+
+
+def _pick_bad_epochs(event, params):
+    """Helper for selecting / dropping bad epochs"""
+    if 'ica' in params:
+        pos = (event.xdata, event.ydata)
+        _pick_bad_channels(pos, params)
+        return
+    n_times = len(params['epochs'].times)
+    start_idx = int(params['t_start'] / n_times)
+    xdata = event.xdata
+    xlim = event.inaxes.get_xlim()
+    epoch_idx = start_idx + int(xdata / (xlim[1] / params['n_epochs']))
+    total_epochs = len(params['epochs'].events)
+    if epoch_idx > total_epochs - 1:
+        return
+    # remove bad epoch
+    if epoch_idx in params['bads']:
+        params['bads'] = params['bads'][(params['bads'] != epoch_idx)]
+        for ch_idx in range(len(params['ch_names'])):
+            params['colors'][ch_idx][epoch_idx] = params['def_colors'][ch_idx]
+        params['ax_hscroll'].patches[epoch_idx].set_color('w')
+        params['ax_hscroll'].patches[epoch_idx].set_zorder(1)
+        params['plot_fun']()
+        return
+    # add bad epoch
+    params['bads'] = np.append(params['bads'], epoch_idx)
+    params['ax_hscroll'].patches[epoch_idx].set_color((1., 0., 0., 1.))
+    params['ax_hscroll'].patches[epoch_idx].set_zorder(2)
+    params['ax_hscroll'].patches[epoch_idx].set_edgecolor('w')
+    for ch_idx in range(len(params['ch_names'])):
+        params['colors'][ch_idx][epoch_idx] = (1., 0., 0., 1.)
+    params['plot_fun']()
+
+
+def _pick_bad_channels(pos, params):
+    """Helper function for selecting bad channels."""
+    text, ch_idx = _label2idx(params, pos)
+    if text is None:
+        return
+    if text in params['info']['bads']:
+        while text in params['info']['bads']:
+            params['info']['bads'].remove(text)
+        color = params['def_colors'][ch_idx]
+        params['ax_vscroll'].patches[ch_idx + 1].set_color(color)
+    else:
+        params['info']['bads'].append(text)
+        color = params['bad_color']
+        params['ax_vscroll'].patches[ch_idx + 1].set_color(color)
+    if 'ica' in params:
+        params['plot_fun']()
+    else:
+        params['plot_update_proj_callback'](params, None)
+
+
+def _plot_onscroll(event, params):
+    """Function to handle scroll events."""
+    if event.key == 'control':
+        if event.step < 0:
+            event.key = '-'
+        else:
+            event.key = '+'
+        _plot_onkey(event, params)
+        return
+    if params['butterfly']:
+        return
+    _plot_raw_onscroll(event, params, len(params['ch_names']))
+
+
+def _mouse_click(event, params):
+    """Function to handle mouse click events."""
+    if event.inaxes is None:
+        if params['butterfly'] or not params['settings'][0]:
+            return
+        ax = params['ax']
+        ylim = ax.get_ylim()
+        pos = ax.transData.inverted().transform((event.x, event.y))
+        if pos[0] > 0 or pos[1] < 0 or pos[1] > ylim[0]:
+            return
+        if event.button == 1:  # left click
+            params['label_click_fun'](pos)
+        elif event.button == 3:  # right click
+            if 'ica' not in params:
+                _, ch_idx = _label2idx(params, pos)
+                if ch_idx is None:
+                    return
+                if channel_type(params['info'], ch_idx) not in ['mag', 'grad',
+                                                                'eeg', 'eog']:
+                    logger.info('Event related fields / potentials only '
+                                'available for MEG and EEG channels.')
+                    return
+                fig = plot_epochs_image(params['epochs'],
+                                        picks=params['inds'][ch_idx],
+                                        fig=params['image_plot'])[0]
+                params['image_plot'] = fig
+    elif event.button == 1:  # left click
+        # vertical scroll bar changed
+        if event.inaxes == params['ax_vscroll']:
+            if params['butterfly']:
+                return
+            ch_start = max(int(event.ydata) - params['n_channels'] // 2, 0)
+            if params['ch_start'] != ch_start:
+                params['ch_start'] = ch_start
+                params['plot_fun']()
+        # horizontal scroll bar changed
+        elif event.inaxes == params['ax_hscroll']:
+            # find the closest epoch time
+            times = params['epoch_times']
+            offset = 0.5 * params['n_epochs'] * len(params['epochs'].times)
+            xdata = times.flat[np.abs(times - (event.xdata - offset)).argmin()]
+            _plot_window(xdata, params)
+        # main axes
+        elif event.inaxes == params['ax']:
+            _pick_bad_epochs(event, params)
+
+    elif event.inaxes == params['ax'] and event.button == 2:  # middle click
+        params['fig'].canvas.draw()
+        if params['fig_proj'] is not None:
+            params['fig_proj'].canvas.draw()
+    elif event.inaxes == params['ax'] and event.button == 3:  # right click
+        n_times = len(params['epochs'].times)
+        xdata = int(event.xdata % n_times)
+        prev_xdata = 0
+        if len(params['vert_lines']) > 0:
+            prev_xdata = params['vert_lines'][0][0].get_data()[0][0]
+            while len(params['vert_lines']) > 0:
+                params['ax'].lines.remove(params['vert_lines'][0][0])
+                params['vert_lines'].pop(0)
+        if prev_xdata == xdata:  # lines removed
+            params['vertline_t'].set_text('')
+            params['plot_fun']()
+            return
+        ylim = params['ax'].get_ylim()
+        for epoch_idx in range(params['n_epochs']):  # plot lines
+            pos = [epoch_idx * n_times + xdata, epoch_idx * n_times + xdata]
+            params['vert_lines'].append(params['ax'].plot(pos, ylim, 'y',
+                                                          zorder=4))
+        params['vertline_t'].set_text('%0.3f' % params['epochs'].times[xdata])
+        params['plot_fun']()
+
+
+def _plot_onkey(event, params):
+    """Function to handle key presses."""
+    import matplotlib.pyplot as plt
+    if event.key == 'down':
+        if params['butterfly']:
+            return
+        params['ch_start'] += params['n_channels']
+        _channels_changed(params, len(params['ch_names']))
+    elif event.key == 'up':
+        if params['butterfly']:
+            return
+        params['ch_start'] -= params['n_channels']
+        _channels_changed(params, len(params['ch_names']))
+    elif event.key == 'left':
+        sample = params['t_start'] - params['duration']
+        sample = np.max([0, sample])
+        _plot_window(sample, params)
+    elif event.key == 'right':
+        sample = params['t_start'] + params['duration']
+        sample = np.min([sample, params['times'][-1] - params['duration']])
+        times = params['epoch_times']
+        xdata = times.flat[np.abs(times - sample).argmin()]
+        _plot_window(xdata, params)
+    elif event.key == '-':
+        if params['butterfly']:
+            params['butterfly_scale'] /= 1.1
+        else:
+            params['scale_factor'] /= 1.1
+        params['plot_fun']()
+    elif event.key in ['+', '=']:
+        if params['butterfly']:
+            params['butterfly_scale'] *= 1.1
+        else:
+            params['scale_factor'] *= 1.1
+        params['plot_fun']()
+    elif event.key == 'f11':
+        mng = plt.get_current_fig_manager()
+        mng.full_screen_toggle()
+    elif event.key == 'pagedown':
+        if params['n_channels'] == 1 or params['butterfly']:
+            return
+        n_channels = params['n_channels'] - 1
+        ylim = params['ax'].get_ylim()
+        offset = ylim[0] / n_channels
+        params['offsets'] = np.arange(n_channels) * offset + (offset / 2.)
+        params['n_channels'] = n_channels
+        params['ax'].collections.pop()
+        params['ax'].set_yticks(params['offsets'])
+        params['lines'].pop()
+        params['vsel_patch'].set_height(n_channels)
+        params['plot_fun']()
+    elif event.key == 'pageup':
+        if params['butterfly']:
+            return
+        from matplotlib.collections import LineCollection
+        n_channels = params['n_channels'] + 1
+        ylim = params['ax'].get_ylim()
+        offset = ylim[0] / n_channels
+        params['offsets'] = np.arange(n_channels) * offset + (offset / 2.)
+        params['n_channels'] = n_channels
+        lc = LineCollection(list(), antialiased=False, linewidths=0.5,
+                            zorder=2, picker=3.)
+        params['ax'].add_collection(lc)
+        params['ax'].set_yticks(params['offsets'])
+        params['lines'].append(lc)
+        params['vsel_patch'].set_height(n_channels)
+        params['plot_fun']()
+    elif event.key == 'home':
+        n_epochs = params['n_epochs'] - 1
+        if n_epochs <= 0:
+            return
+        n_times = len(params['epochs'].times)
+        ticks = params['epoch_times'] + 0.5 * n_times
+        params['ax2'].set_xticks(ticks[:n_epochs])
+        params['n_epochs'] = n_epochs
+        params['duration'] -= n_times
+        params['hsel_patch'].set_width(params['duration'])
+        params['plot_fun']()
+    elif event.key == 'end':
+        n_epochs = params['n_epochs'] + 1
+        n_times = len(params['epochs'].times)
+        if n_times * n_epochs > len(params['data'][0]):
+            return
+        if params['t_start'] + n_times * n_epochs > len(params['data'][0]):
+            params['t_start'] -= n_times
+            params['hsel_patch'].set_x(params['t_start'])
+        ticks = params['epoch_times'] + 0.5 * n_times
+        params['ax2'].set_xticks(ticks[:n_epochs])
+        params['n_epochs'] = n_epochs
+        if len(params['vert_lines']) > 0:
+            ax = params['ax']
+            pos = params['vert_lines'][0][0].get_data()[0] + params['duration']
+            params['vert_lines'].append(ax.plot(pos, ax.get_ylim(), 'y',
+                                                zorder=3))
+        params['duration'] += n_times
+        if params['t_start'] + params['duration'] > len(params['data'][0]):
+            params['t_start'] -= n_times
+            params['hsel_patch'].set_x(params['t_start'])
+        params['hsel_patch'].set_width(params['duration'])
+        params['plot_fun']()
+    elif event.key == 'b':
+        if params['fig_options'] is not None:
+            plt.close(params['fig_options'])
+            params['fig_options'] = None
+        _prepare_butterfly(params)
+        _plot_traces(params)
+    elif event.key == 'o':
+        if not params['butterfly']:
+            _open_options(params)
+    elif event.key == 'h':
+        _plot_histogram(params)
+    elif event.key == '?':
+        _onclick_help(event, params)
+    elif event.key == 'escape':
+        plt.close(params['fig'])
+
+
+def _prepare_butterfly(params):
+    """Helper function for setting up butterfly plot."""
+    from matplotlib.collections import LineCollection
+    butterfly = not params['butterfly']
+    if butterfly:
+        types = set(['grad', 'mag', 'eeg', 'eog',
+                     'ecg']) & set(params['types'])
+        if len(types) < 1:
+            return
+        params['ax_vscroll'].set_visible(False)
+        ax = params['ax']
+        labels = ax.yaxis.get_ticklabels()
+        for label in labels:
+            label.set_visible(True)
+        ylim = (5. * len(types), 0.)
+        ax.set_ylim(ylim)
+        offset = ylim[0] / (4. * len(types))
+        ticks = np.arange(0, ylim[0], offset)
+        ticks = [ticks[x] if x < len(ticks) else 0 for x in range(20)]
+        ax.set_yticks(ticks)
+        used_types = 0
+        params['offsets'] = [ticks[2]]
+        if 'grad' in types:
+            pos = (0, 1 - (ticks[2] / ylim[0]))
+            params['ax2'].annotate('Grad (fT/cm)', xy=pos, xytext=(-70, 0),
+                                   ha='left', size=12, va='center',
+                                   xycoords='axes fraction', rotation=90,
+                                   textcoords='offset points')
+            used_types += 1
+        params['offsets'].append(ticks[2 + used_types * 4])
+        if 'mag' in types:
+            pos = (0, 1 - (ticks[2 + used_types * 4] / ylim[0]))
+            params['ax2'].annotate('Mag (fT)', xy=pos, xytext=(-70, 0),
+                                   ha='left', size=12, va='center',
+                                   xycoords='axes fraction', rotation=90,
+                                   textcoords='offset points')
+            used_types += 1
+        params['offsets'].append(ticks[2 + used_types * 4])
+        if 'eeg' in types:
+            pos = (0, 1 - (ticks[2 + used_types * 4] / ylim[0]))
+            params['ax2'].annotate('EEG (uV)', xy=pos, xytext=(-70, 0),
+                                   ha='left', size=12, va='center',
+                                   xycoords='axes fraction', rotation=90,
+                                   textcoords='offset points')
+            used_types += 1
+        params['offsets'].append(ticks[2 + used_types * 4])
+        if 'eog' in types:
+            pos = (0, 1 - (ticks[2 + used_types * 4] / ylim[0]))
+            params['ax2'].annotate('EOG (uV)', xy=pos, xytext=(-70, 0),
+                                   ha='left', size=12, va='center',
+                                   xycoords='axes fraction', rotation=90,
+                                   textcoords='offset points')
+            used_types += 1
+        params['offsets'].append(ticks[2 + used_types * 4])
+        if 'ecg' in types:
+            pos = (0, 1 - (ticks[2 + used_types * 4] / ylim[0]))
+            params['ax2'].annotate('ECG (uV)', xy=pos, xytext=(-70, 0),
+                                   ha='left', size=12, va='center',
+                                   xycoords='axes fraction', rotation=90,
+                                   textcoords='offset points')
+            used_types += 1
+
+        while len(params['lines']) < len(params['picks']):
+            lc = LineCollection(list(), antialiased=False, linewidths=0.5,
+                                zorder=2, picker=3.)
+            ax.add_collection(lc)
+            params['lines'].append(lc)
+    else:  # change back to default view
+        labels = params['ax'].yaxis.get_ticklabels()
+        for label in labels:
+            label.set_visible(params['settings'][0])
+        params['ax_vscroll'].set_visible(True)
+        while len(params['ax2'].texts) > 0:
+            params['ax2'].texts.pop()
+        n_channels = params['n_channels']
+        while len(params['lines']) > n_channels:
+            params['ax'].collections.pop()
+            params['lines'].pop()
+        ylim = (25., 0.)
+        params['ax'].set_ylim(ylim)
+        offset = ylim[0] / n_channels
+        params['offsets'] = np.arange(n_channels) * offset + (offset / 2.)
+        params['ax'].set_yticks(params['offsets'])
+    params['butterfly'] = butterfly
+
+
+def _onpick(event, params):
+    """Helper to add a channel name on click"""
+    if event.mouseevent.button != 2 or not params['butterfly']:
+        return  # text label added with a middle mouse button
+    lidx = np.where([l is event.artist for l in params['lines']])[0][0]
+    text = params['text']
+    text.set_x(event.mouseevent.xdata)
+    text.set_y(event.mouseevent.ydata)
+    text.set_text(params['ch_names'][lidx])
+    text.set_visible(True)
+    # do NOT redraw here, since for butterfly plots hundreds of lines could
+    # potentially be picked -- use _mouse_click (happens once per click)
+    # to do the drawing
+
+
+def _close_event(event, params):
+    """Function to drop selected bad epochs. Called on closing of the plot."""
+    params['epochs'].drop_epochs(params['bads'])
+    logger.info('Channels marked as bad: %s' % params['epochs'].info['bads'])
+    params['epochs'].info['bads'] = params['info']['bads']
+
+
+def _resize_event(event, params):
+    """Function to handle resize event"""
+    size = ','.join([str(s) for s in params['fig'].get_size_inches()])
+    set_config('MNE_BROWSE_RAW_SIZE', size)
+    _layout_figure(params)
+
+
+def _update_channels_epochs(event, params):
+    """Function for changing the amount of channels and epochs per view."""
+    from matplotlib.collections import LineCollection
+    # Channels
+    n_channels = int(np.around(params['channel_slider'].val))
+    offset = params['ax'].get_ylim()[0] / n_channels
+    params['offsets'] = np.arange(n_channels) * offset + (offset / 2.)
+    while len(params['lines']) > n_channels:
+        params['ax'].collections.pop()
+        params['lines'].pop()
+    while len(params['lines']) < n_channels:
+        lc = LineCollection(list(), linewidths=0.5, antialiased=False,
+                            zorder=2, picker=3.)
+        params['ax'].add_collection(lc)
+        params['lines'].append(lc)
+    params['ax'].set_yticks(params['offsets'])
+    params['vsel_patch'].set_height(n_channels)
+    params['n_channels'] = n_channels
+
+    # Epochs
+    n_epochs = int(np.around(params['epoch_slider'].val))
+    n_times = len(params['epochs'].times)
+    ticks = params['epoch_times'] + 0.5 * n_times
+    params['ax2'].set_xticks(ticks[:n_epochs])
+    params['n_epochs'] = n_epochs
+    params['duration'] = n_times * n_epochs
+    params['hsel_patch'].set_width(params['duration'])
+    if params['t_start'] + n_times * n_epochs > len(params['data'][0]):
+        params['t_start'] = len(params['data'][0]) - n_times * n_epochs
+        params['hsel_patch'].set_x(params['t_start'])
+    _plot_traces(params)
+
+
+def _toggle_labels(label, params):
+    """Function for toggling axis labels on/off."""
+    if label == 'Channel names visible':
+        params['settings'][0] = not params['settings'][0]
+        labels = params['ax'].yaxis.get_ticklabels()
+        for label in labels:
+            label.set_visible(params['settings'][0])
+    elif label == 'Event-id visible':
+        params['settings'][1] = not params['settings'][1]
+        labels = params['ax2'].xaxis.get_ticklabels()
+        for label in labels:
+            label.set_visible(params['settings'][1])
+    elif label == 'Epoch-id visible':
+        params['settings'][2] = not params['settings'][2]
+        labels = params['ax'].xaxis.get_ticklabels()
+        for label in labels:
+            label.set_visible(params['settings'][2])
+    elif label == 'Zeroline visible':
+        params['settings'][3] = not params['settings'][3]
+        _plot_vert_lines(params)
+    params['fig'].canvas.draw()
+    if params['fig_proj'] is not None:
+        params['fig_proj'].canvas.draw()
+
+
+def _open_options(params):
+    """Function for opening the option window."""
+    import matplotlib.pyplot as plt
+    import matplotlib as mpl
+    if params['fig_options'] is not None:
+        # turn off options dialog
+        plt.close(params['fig_options'])
+        params['fig_options'] = None
+        return
+    width = 10
+    height = 3
+    fig_options = figure_nobar(figsize=(width, height), dpi=80)
+    fig_options.canvas.set_window_title('View settings')
+    params['fig_options'] = fig_options
+    ax_channels = plt.axes([0.15, 0.1, 0.65, 0.1])
+    ax_epochs = plt.axes([0.15, 0.25, 0.65, 0.1])
+    ax_button = plt.axes([0.85, 0.1, 0.1, 0.25])
+    ax_check = plt.axes([0.15, 0.4, 0.4, 0.55])
+    plt.axis('off')
+    params['update_button'] = mpl.widgets.Button(ax_button, 'Update')
+    params['channel_slider'] = mpl.widgets.Slider(ax_channels, 'Channels', 1,
+                                                  len(params['ch_names']),
+                                                  valfmt='%0.0f',
+                                                  valinit=params['n_channels'])
+    params['epoch_slider'] = mpl.widgets.Slider(ax_epochs, 'Epochs', 1,
+                                                len(params['epoch_times']),
+                                                valfmt='%0.0f',
+                                                valinit=params['n_epochs'])
+    params['checkbox'] = mpl.widgets.CheckButtons(ax_check,
+                                                  ['Channel names visible',
+                                                   'Event-id visible',
+                                                   'Epoch-id visible',
+                                                   'Zeroline visible'],
+                                                  actives=params['settings'])
+    update = partial(_update_channels_epochs, params=params)
+    params['update_button'].on_clicked(update)
+    labels_callback = partial(_toggle_labels, params=params)
+    params['checkbox'].on_clicked(labels_callback)
+    close_callback = partial(_settings_closed, params=params)
+    params['fig_options'].canvas.mpl_connect('close_event', close_callback)
+    try:
+        params['fig_options'].canvas.draw()
+        params['fig_options'].show()
+        if params['fig_proj'] is not None:
+            params['fig_proj'].canvas.draw()
+    except Exception:
+        pass
+
+
+def _settings_closed(events, params):
+    """Function to handle close event from settings dialog."""
+    params['fig_options'] = None
+
+
+def _plot_histogram(params):
+    """Function for plotting histogram of peak-to-peak values."""
+    import matplotlib.pyplot as plt
+    epochs = params['epochs']
+    p2p = np.ptp(epochs.get_data(), axis=2)
+    types = list()
+    data = list()
+    if 'eeg' in params['types']:
+        eegs = np.array([p2p.T[i] for i,
+                         x in enumerate(params['types']) if x == 'eeg'])
+        data.append(eegs.ravel())
+        types.append('eeg')
+    if 'mag' in params['types']:
+        mags = np.array([p2p.T[i] for i,
+                         x in enumerate(params['types']) if x == 'mag'])
+        data.append(mags.ravel())
+        types.append('mag')
+    if 'grad' in params['types']:
+        grads = np.array([p2p.T[i] for i,
+                          x in enumerate(params['types']) if x == 'grad'])
+        data.append(grads.ravel())
+        types.append('grad')
+    fig = plt.figure(len(types))
+    fig.clf()
+    scalings = _handle_default('scalings')
+    units = _handle_default('units')
+    titles = _handle_default('titles')
+    colors = _handle_default('color')
+    for idx in range(len(types)):
+        ax = plt.subplot(len(types), 1, idx + 1)
+        plt.xlabel(units[types[idx]])
+        plt.ylabel('count')
+        color = colors[types[idx]]
+        rej = None
+        if epochs.reject is not None and types[idx] in epochs.reject.keys():
+                rej = epochs.reject[types[idx]] * scalings[types[idx]]
+                rng = [0., rej * 1.1]
+        else:
+            rng = None
+        plt.hist(data[idx] * scalings[types[idx]], bins=100, color=color,
+                 range=rng)
+        if rej is not None:
+            ax.plot((rej, rej), (0, ax.get_ylim()[1]), color='r')
+        plt.title(titles[types[idx]])
+    fig.suptitle('Peak-to-peak histogram', y=0.99)
+    fig.subplots_adjust(hspace=0.6)
+    try:
+        fig.show()
+    except:
+        pass
+    if params['fig_proj'] is not None:
+        params['fig_proj'].canvas.draw()
+
+
+def _label2idx(params, pos):
+    """Aux function for click on labels. Returns channel name and idx."""
+    labels = params['ax'].yaxis.get_ticklabels()
+    offsets = np.array(params['offsets']) + params['offsets'][0]
+    line_idx = np.searchsorted(offsets, pos[1])
+    text = labels[line_idx].get_text()
+    if len(text) == 0:
+        return None, None
+    ch_idx = params['ch_start'] + line_idx
+    return text, ch_idx
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/viz/evoked.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/viz/evoked.py
new file mode 100644
index 0000000..f929fd5
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/viz/evoked.py
@@ -0,0 +1,809 @@
+"""Functions to make simple plot on evoked M/EEG data (besides topographies)
+"""
+from __future__ import print_function
+
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Denis Engemann <denis.engemann at gmail.com>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#          Eric Larson <larson.eric.d at gmail.com>
+#          Cathy Nangini <cnangini at gmail.com>
+#          Mainak Jas <mainak at neuro.hut.fi>
+#
+# License: Simplified BSD
+
+from itertools import cycle
+
+import numpy as np
+
+from ..io.pick import channel_type, pick_types, _picks_by_type
+from ..externals.six import string_types
+from ..defaults import _handle_default
+from .utils import _draw_proj_checkbox, tight_layout, _check_delayed_ssp
+from ..utils import logger
+from ..fixes import partial
+from ..io.pick import pick_info
+from .topo import _plot_evoked_topo
+from .topomap import _prepare_topo_plot, plot_topomap
+
+
+def _butterfly_onpick(event, params):
+    """Helper to add a channel name on click"""
+    params['need_draw'] = True
+    ax = event.artist.get_axes()
+    ax_idx = np.where([ax is a for a in params['axes']])[0][0]
+    lidx = np.where([l is event.artist for l in params['lines'][ax_idx]])[0][0]
+    ch_name = params['ch_names'][params['idxs'][ax_idx][lidx]]
+    text = params['texts'][ax_idx]
+    x = event.artist.get_xdata()[event.ind[0]]
+    y = event.artist.get_ydata()[event.ind[0]]
+    text.set_x(x)
+    text.set_y(y)
+    text.set_text(ch_name)
+    text.set_color(event.artist.get_color())
+    text.set_alpha(1.)
+    text.set_path_effects(params['path_effects'])
+    # do NOT redraw here, since for butterfly plots hundreds of lines could
+    # potentially be picked -- use on_button_press (happens once per click)
+    # to do the drawing
+
+
+def _butterfly_on_button_press(event, params):
+    """Helper to only draw once for picking"""
+    if params['need_draw']:
+        event.canvas.draw()
+    else:
+        idx = np.where([event.inaxes is ax for ax in params['axes']])[0]
+        if len(idx) == 1:
+            text = params['texts'][idx[0]]
+            text.set_alpha(0.)
+            text.set_path_effects([])
+            event.canvas.draw()
+    params['need_draw'] = False
+
+
+def _butterfly_onselect(xmin, xmax, ch_types, evoked, text=None):
+    """Function for drawing topomaps from the selected area."""
+    import matplotlib.pyplot as plt
+    vert_lines = list()
+    if text is not None:
+        text.set_visible(True)
+        ax = text.axes
+        ylim = ax.get_ylim()
+        vert_lines.append(ax.plot([xmin, xmin], ylim, zorder=0, color='red'))
+        vert_lines.append(ax.plot([xmax, xmax], ylim, zorder=0, color='red'))
+        fill = ax.fill_betweenx(ylim, x1=xmin, x2=xmax, alpha=0.2,
+                                color='green')
+        evoked_fig = plt.gcf()
+        evoked_fig.canvas.draw()
+        evoked_fig.canvas.flush_events()
+    times = evoked.times
+    xmin *= 0.001
+    minidx = np.abs(times - xmin).argmin()
+    xmax *= 0.001
+    maxidx = np.abs(times - xmax).argmin()
+    fig, axarr = plt.subplots(1, len(ch_types), squeeze=False,
+                              figsize=(3 * len(ch_types), 3))
+    for idx, ch_type in enumerate(ch_types):
+        picks, pos, merge_grads, _, ch_type = _prepare_topo_plot(evoked,
+                                                                 ch_type,
+                                                                 layout=None)
+        data = evoked.data[picks, minidx:maxidx]
+        if merge_grads:
+            from ..channels.layout import _merge_grad_data
+            data = _merge_grad_data(data)
+            title = '%s RMS' % ch_type
+        else:
+            title = ch_type
+        data = np.average(data, axis=1)
+        axarr[0][idx].set_title(title)
+        plot_topomap(data, pos, axis=axarr[0][idx], show=False)
+
+    fig.suptitle('Average over %.2fs - %.2fs' % (xmin, xmax), fontsize=15,
+                 y=0.1)
+    tight_layout(pad=2.0, fig=fig)
+    plt.show()
+    if text is not None:
+        text.set_visible(False)
+        close_callback = partial(_topo_closed, ax=ax, lines=vert_lines,
+                                 fill=fill)
+        fig.canvas.mpl_connect('close_event', close_callback)
+        evoked_fig.canvas.draw()
+        evoked_fig.canvas.flush_events()
+
+
+def _topo_closed(events, ax, lines, fill):
+    """Callback for removing lines from evoked plot as topomap is closed."""
+    for line in lines:
+        ax.lines.remove(line[0])
+    ax.collections.remove(fill)
+    ax.get_figure().canvas.draw()
+
+
+def _plot_evoked(evoked, picks, exclude, unit, show,
+                 ylim, proj, xlim, hline, units,
+                 scalings, titles, axes, plot_type,
+                 cmap=None, gfp=False):
+    """Aux function for plot_evoked and plot_evoked_image (cf. docstrings)
+
+    Extra param is:
+
+    plot_type : str, value ('butterfly' | 'image')
+        The type of graph to plot: 'butterfly' plots each channel as a line
+        (x axis: time, y axis: amplitude). 'image' plots a 2D image where
+        color depicts the amplitude of each channel at a given time point
+        (x axis: time, y axis: channel). In 'image' mode, the plot is not
+        interactive.
+    """
+    import matplotlib.pyplot as plt
+    from matplotlib import patheffects
+    from matplotlib.widgets import SpanSelector
+    if axes is not None and proj == 'interactive':
+        raise RuntimeError('Currently only single axis figures are supported'
+                           ' for interactive SSP selection.')
+    if isinstance(gfp, string_types) and gfp != 'only':
+        raise ValueError('gfp must be boolean or "only". Got %s' % gfp)
+
+    scalings = _handle_default('scalings', scalings)
+    titles = _handle_default('titles', titles)
+    units = _handle_default('units', units)
+    # Valid data types ordered for consistency
+    channel_types = ['eeg', 'grad', 'mag', 'seeg']
+
+    if picks is None:
+        picks = list(range(evoked.info['nchan']))
+
+    bad_ch_idx = [evoked.ch_names.index(ch) for ch in evoked.info['bads']
+                  if ch in evoked.ch_names]
+    if len(exclude) > 0:
+        if isinstance(exclude, string_types) and exclude == 'bads':
+            exclude = bad_ch_idx
+        elif (isinstance(exclude, list) and
+              all(isinstance(ch, string_types) for ch in exclude)):
+            exclude = [evoked.ch_names.index(ch) for ch in exclude]
+        else:
+            raise ValueError('exclude has to be a list of channel names or '
+                             '"bads"')
+
+        picks = list(set(picks).difference(exclude))
+    picks = np.array(picks)
+
+    types = np.array([channel_type(evoked.info, idx) for idx in picks])
+    n_channel_types = 0
+    ch_types_used = []
+    for t in channel_types:
+        if t in types:
+            n_channel_types += 1
+            ch_types_used.append(t)
+
+    axes_init = axes  # remember if axes where given as input
+
+    fig = None
+    if axes is None:
+        fig, axes = plt.subplots(n_channel_types, 1)
+
+    if isinstance(axes, plt.Axes):
+        axes = [axes]
+    elif isinstance(axes, np.ndarray):
+        axes = list(axes)
+
+    if axes_init is not None:
+        fig = axes[0].get_figure()
+
+    if not len(axes) == n_channel_types:
+        raise ValueError('Number of axes (%g) must match number of channel '
+                         'types (%g)' % (len(axes), n_channel_types))
+
+    # instead of projecting during each iteration let's use the mixin here.
+    if proj is True and evoked.proj is not True:
+        evoked = evoked.copy()
+        evoked.apply_proj()
+
+    times = 1e3 * evoked.times  # time in milliseconds
+    texts = list()
+    idxs = list()
+    lines = list()
+    selectors = list()  # for keeping reference to span_selectors
+    path_effects = [patheffects.withStroke(linewidth=2, foreground="w",
+                                           alpha=0.75)]
+    gfp_path_effects = [patheffects.withStroke(linewidth=5, foreground="w",
+                                               alpha=0.75)]
+    for ax, t in zip(axes, ch_types_used):
+        ch_unit = units[t]
+        this_scaling = scalings[t]
+        if unit is False:
+            this_scaling = 1.0
+            ch_unit = 'NA'  # no unit
+        idx = list(picks[types == t])
+        idxs.append(idx)
+        if len(idx) > 0:
+            # Parameters for butterfly interactive plots
+            if plot_type == 'butterfly':
+                if any(i in bad_ch_idx for i in idx):
+                    colors = ['k'] * len(idx)
+                    for i in bad_ch_idx:
+                        if i in idx:
+                            colors[idx.index(i)] = 'r'
+
+                    ax._get_lines.color_cycle = iter(colors)
+                else:
+                    ax._get_lines.color_cycle = cycle(['k'])
+                text = ax.annotate('Loading...', xy=(0.01, 0.1),
+                                   xycoords='axes fraction', fontsize=20,
+                                   color='green')
+                text.set_visible(False)
+                callback_onselect = partial(_butterfly_onselect,
+                                            ch_types=ch_types_used,
+                                            evoked=evoked, text=text)
+                blit = False if plt.get_backend() == 'MacOSX' else True
+                selectors.append(SpanSelector(ax, callback_onselect,
+                                              'horizontal', minspan=10,
+                                              useblit=blit,
+                                              rectprops=dict(alpha=0.5,
+                                                             facecolor='red')))
+            # Set amplitude scaling
+            D = this_scaling * evoked.data[idx, :]
+            if plot_type == 'butterfly':
+                gfp_only = (isinstance(gfp, string_types) and gfp == 'only')
+                if not gfp_only:
+                    lines.append(ax.plot(times, D.T, picker=3., zorder=0))
+                    for ii, line in zip(idx, lines[-1]):
+                        if ii in bad_ch_idx:
+                            line.set_zorder(1)
+                if gfp:  # 'only' or boolean True
+                    gfp_color = (0., 1., 0.)
+                    this_gfp = np.sqrt((D * D).mean(axis=0))
+                    this_ylim = ax.get_ylim()
+                    if not gfp_only:
+                        y_offset = this_ylim[0]
+                    else:
+                        y_offset = 0.
+                    this_gfp += y_offset
+                    ax.fill_between(times, y_offset, this_gfp, color='none',
+                                    facecolor=gfp_color, zorder=0, alpha=0.25)
+                    ax.plot(times, this_gfp, color=gfp_color, zorder=2)
+                    ax.text(times[0] + 0.01 * (times[-1] - times[0]),
+                            this_gfp[0] + 0.05 * np.diff(ax.get_ylim())[0],
+                            'GFP', zorder=3, color=gfp_color,
+                            path_effects=gfp_path_effects)
+                ax.set_ylabel('data (%s)' % ch_unit)
+                # for old matplotlib, we actually need this to have a bounding
+                # box (!), so we have to put some valid text here, change
+                # alpha and  path effects later
+                texts.append(ax.text(0, 0, 'blank', zorder=2,
+                                     verticalalignment='baseline',
+                                     horizontalalignment='left',
+                                     fontweight='bold', alpha=0))
+            elif plot_type == 'image':
+                im = ax.imshow(D, interpolation='nearest', origin='lower',
+                               extent=[times[0], times[-1], 0, D.shape[0]],
+                               aspect='auto', cmap=cmap)
+                cbar = plt.colorbar(im, ax=ax)
+                cbar.ax.set_title(ch_unit)
+                ax.set_ylabel('channels (%s)' % 'index')
+            else:
+                raise ValueError("plot_type has to be 'butterfly' or 'image'."
+                                 "Got %s." % plot_type)
+            if xlim is not None:
+                if xlim == 'tight':
+                    xlim = (times[0], times[-1])
+                ax.set_xlim(xlim)
+            if ylim is not None and t in ylim:
+                if plot_type == 'butterfly':
+                    ax.set_ylim(ylim[t])
+                elif plot_type == 'image':
+                    im.set_clim(ylim[t])
+            ax.set_title(titles[t] + ' (%d channel%s)' % (
+                         len(D), 's' if len(D) > 1 else ''))
+            ax.set_xlabel('time (ms)')
+
+            if (plot_type == 'butterfly') and (hline is not None):
+                for h in hline:
+                    ax.axhline(h, color='r', linestyle='--', linewidth=2)
+    if plot_type == 'butterfly':
+        params = dict(axes=axes, texts=texts, lines=lines,
+                      ch_names=evoked.ch_names, idxs=idxs, need_draw=False,
+                      path_effects=path_effects, selectors=selectors)
+        fig.canvas.mpl_connect('pick_event',
+                               partial(_butterfly_onpick, params=params))
+        fig.canvas.mpl_connect('button_press_event',
+                               partial(_butterfly_on_button_press,
+                                       params=params))
+
+    if axes_init is None:
+        plt.subplots_adjust(0.175, 0.08, 0.94, 0.94, 0.2, 0.63)
+
+    if proj == 'interactive':
+        _check_delayed_ssp(evoked)
+        params = dict(evoked=evoked, fig=fig, projs=evoked.info['projs'],
+                      axes=axes, types=types, units=units, scalings=scalings,
+                      unit=unit, ch_types_used=ch_types_used, picks=picks,
+                      plot_update_proj_callback=_plot_update_evoked,
+                      plot_type=plot_type)
+        _draw_proj_checkbox(None, params)
+
+    if show and plt.get_backend() != 'agg':
+        plt.show()
+        fig.canvas.draw()  # for axes plots update axes.
+    tight_layout(fig=fig)
+
+    return fig
+
+
+def plot_evoked(evoked, picks=None, exclude='bads', unit=True, show=True,
+                ylim=None, xlim='tight', proj=False, hline=None, units=None,
+                scalings=None, titles=None, axes=None, gfp=False):
+    """Plot evoked data
+
+    Left click to a line shows the channel name. Selecting an area by clicking
+    and holding left mouse button plots a topographic map of the painted area.
+
+    Note: If bad channels are not excluded they are shown in red.
+
+    Parameters
+    ----------
+    evoked : instance of Evoked
+        The evoked data
+    picks : array-like of int | None
+        The indices of channels to plot. If None show all.
+    exclude : list of str | 'bads'
+        Channels names to exclude from being shown. If 'bads', the
+        bad channels are excluded.
+    unit : bool
+        Scale plot with channel (SI) unit.
+    show : bool
+        Show figure if True.
+    ylim : dict | None
+        ylim for plots. e.g. ylim = dict(eeg=[-200e-6, 200e6])
+        Valid keys are eeg, mag, grad, misc. If None, the ylim parameter
+        for each channel equals the pyplot default.
+    xlim : 'tight' | tuple | None
+        xlim for plots.
+    proj : bool | 'interactive'
+        If true SSP projections are applied before display. If 'interactive',
+        a check box for reversible selection of SSP projection vectors will
+        be shown.
+    hline : list of floats | None
+        The values at which to show an horizontal line.
+    units : dict | None
+        The units of the channel types used for axes lables. If None,
+        defaults to `dict(eeg='uV', grad='fT/cm', mag='fT')`.
+    scalings : dict | None
+        The scalings of the channel types to be applied for plotting. If None,`
+        defaults to `dict(eeg=1e6, grad=1e13, mag=1e15)`.
+    titles : dict | None
+        The titles associated with the channels. If None, defaults to
+        `dict(eeg='EEG', grad='Gradiometers', mag='Magnetometers')`.
+    axes : instance of Axis | list | None
+        The axes to plot to. If list, the list must be a list of Axes of
+        the same length as the number of channel types. If instance of
+        Axes, there must be only one channel type plotted.
+    gfp : bool | 'only'
+        Plot GFP in green if True or "only". If "only", then the individual
+        channel traces will not be shown.
+    """
+    return _plot_evoked(evoked=evoked, picks=picks, exclude=exclude, unit=unit,
+                        show=show, ylim=ylim, proj=proj, xlim=xlim,
+                        hline=hline, units=units, scalings=scalings,
+                        titles=titles, axes=axes, plot_type="butterfly",
+                        gfp=gfp)
+
+
+def plot_evoked_topo(evoked, layout=None, layout_scale=0.945, color=None,
+                     border='none', ylim=None, scalings=None, title=None,
+                     proj=False, vline=[0.0], fig_facecolor='k',
+                     fig_background=None, axis_facecolor='k', font_color='w',
+                     show=True):
+    """Plot 2D topography of evoked responses.
+
+    Clicking on the plot of an individual sensor opens a new figure showing
+    the evoked response for the selected sensor.
+
+    Parameters
+    ----------
+    evoked : list of Evoked | Evoked
+        The evoked response to plot.
+    layout : instance of Layout | None
+        Layout instance specifying sensor positions (does not need to
+        be specified for Neuromag data). If possible, the correct layout is
+        inferred from the data.
+    layout_scale: float
+        Scaling factor for adjusting the relative size of the layout
+        on the canvas
+    color : list of color objects | color object | None
+        Everything matplotlib accepts to specify colors. If not list-like,
+        the color specified will be repeated. If None, colors are
+        automatically drawn.
+    border : str
+        matplotlib borders style to be used for each sensor plot.
+    ylim : dict | None
+        ylim for plots. The value determines the upper and lower subplot
+        limits. e.g. ylim = dict(eeg=[-200e-6, 200e6]). Valid keys are eeg,
+        mag, grad, misc. If None, the ylim parameter for each channel is
+        determined by the maximum absolute peak.
+    scalings : dict | None
+        The scalings of the channel types to be applied for plotting. If None,`
+        defaults to `dict(eeg=1e6, grad=1e13, mag=1e15)`.
+    title : str
+        Title of the figure.
+    proj : bool | 'interactive'
+        If true SSP projections are applied before display. If 'interactive',
+        a check box for reversible selection of SSP projection vectors will
+        be shown.
+    vline : list of floats | None
+        The values at which to show a vertical line.
+    fig_facecolor : str | obj
+        The figure face color. Defaults to black.
+    fig_background : None | numpy ndarray
+        A background image for the figure. This must work with a call to
+        plt.imshow. Defaults to None.
+    axis_facecolor : str | obj
+        The face color to be used for each sensor plot. Defaults to black.
+    font_color : str | obj
+        The color of text in the colorbar and title. Defaults to white.
+    show : bool
+        Show figure if True.
+
+    Returns
+    -------
+    fig : Instance of matplotlib.figure.Figure
+        Images of evoked responses at sensor locations
+    """
+    return _plot_evoked_topo(evoked=evoked, layout=layout,
+                             layout_scale=layout_scale, color=color,
+                             border=border, ylim=ylim, scalings=scalings,
+                             title=title, proj=proj, vline=vline,
+                             fig_facecolor=fig_facecolor,
+                             fig_background=fig_background,
+                             axis_facecolor=axis_facecolor,
+                             font_color=font_color, show=show)
+
+
+def plot_evoked_image(evoked, picks=None, exclude='bads', unit=True, show=True,
+                      clim=None, xlim='tight', proj=False, units=None,
+                      scalings=None, titles=None, axes=None, cmap='RdBu_r'):
+    """Plot evoked data as images
+
+    Parameters
+    ----------
+    evoked : instance of Evoked
+        The evoked data
+    picks : array-like of int | None
+        The indices of channels to plot. If None show all.
+    exclude : list of str | 'bads'
+        Channels names to exclude from being shown. If 'bads', the
+        bad channels are excluded.
+    unit : bool
+        Scale plot with channel (SI) unit.
+    show : bool
+        Show figure if True.
+    clim : dict | None
+        clim for plots. e.g. clim = dict(eeg=[-200e-6, 200e6])
+        Valid keys are eeg, mag, grad, misc. If None, the clim parameter
+        for each channel equals the pyplot default.
+    xlim : 'tight' | tuple | None
+        xlim for plots.
+    proj : bool | 'interactive'
+        If true SSP projections are applied before display. If 'interactive',
+        a check box for reversible selection of SSP projection vectors will
+        be shown.
+    units : dict | None
+        The units of the channel types used for axes lables. If None,
+        defaults to `dict(eeg='uV', grad='fT/cm', mag='fT')`.
+    scalings : dict | None
+        The scalings of the channel types to be applied for plotting. If None,`
+        defaults to `dict(eeg=1e6, grad=1e13, mag=1e15)`.
+    titles : dict | None
+        The titles associated with the channels. If None, defaults to
+        `dict(eeg='EEG', grad='Gradiometers', mag='Magnetometers')`.
+    axes : instance of Axis | list | None
+        The axes to plot to. If list, the list must be a list of Axes of
+        the same length as the number of channel types. If instance of
+        Axes, there must be only one channel type plotted.
+    cmap : matplotlib colormap
+        Colormap.
+    """
+    return _plot_evoked(evoked=evoked, picks=picks, exclude=exclude, unit=unit,
+                        show=show, ylim=clim, proj=proj, xlim=xlim,
+                        hline=None, units=units, scalings=scalings,
+                        titles=titles, axes=axes, plot_type="image",
+                        cmap=cmap)
+
+
+def _plot_update_evoked(params, bools):
+    """ update the plot evoked lines
+    """
+    picks, evoked = [params[k] for k in ('picks', 'evoked')]
+    times = evoked.times * 1e3
+    projs = [proj for ii, proj in enumerate(params['projs'])
+             if ii in np.where(bools)[0]]
+    params['proj_bools'] = bools
+    new_evoked = evoked.copy()
+    new_evoked.info['projs'] = []
+    new_evoked.add_proj(projs)
+    new_evoked.apply_proj()
+    for ax, t in zip(params['axes'], params['ch_types_used']):
+        this_scaling = params['scalings'][t]
+        idx = [picks[i] for i in range(len(picks)) if params['types'][i] == t]
+        D = this_scaling * new_evoked.data[idx, :]
+        if params['plot_type'] == 'butterfly':
+            for line, di in zip(ax.lines, D):
+                line.set_data(times, di)
+        else:
+            ax.images[0].set_data(D)
+    params['fig'].canvas.draw()
+
+
+def plot_evoked_white(evoked, noise_cov, show=True):
+    """Plot whitened evoked response
+
+    Plots the whitened evoked response and the whitened GFP as described in
+    [1]. If one single covariance object is passed, the GFP panel (bottom)
+    will depict different sensor types. If multiple covariance objects are
+    passed as a list, the left column will display the whitened evoked
+    responses for each channel based on the whitener from the noise covariance
+    that has the highest log-likelihood. The left column will depict the
+    whitened GFPs based on each estimator separately for each sensor type.
+    Instead of numbers of channels the GFP display shows the estimated rank.
+    Note. The rank estimation will be printed by the logger for each noise
+    covariance estimator that is passed.
+
+    Parameters
+    ----------
+    evoked : instance of mne.Evoked
+        The evoked response.
+    noise_cov : list | instance of Covariance | str
+        The noise covariance as computed by ``mne.cov.compute_covariance``.
+    show : bool
+        Show figure if True.
+
+    Returns
+    -------
+    fig : instance of matplotlib.figure.Figure
+        The figure object containing the plot.
+
+    References
+    ----------
+    [1] Engemann D. and Gramfort A. (2015) Automated model selection in
+        covariance estimation and spatial whitening of MEG and EEG signals,
+        vol. 108, 328-342, NeuroImage.
+    """
+    return _plot_evoked_white(evoked=evoked, noise_cov=noise_cov,
+                              scalings=None, rank=None, show=show)
+
+
+def _plot_evoked_white(evoked, noise_cov, scalings=None, rank=None, show=True):
+    """helper to plot_evoked_white
+
+    Additional Paramter
+    -------------------
+    scalings : dict | None
+        The rescaling method to be applied to improve the accuracy of rank
+        estimaiton. If dict, it will override the following default values
+        (used if None):
+
+            dict(mag=1e12, grad=1e11, eeg=1e5)
+
+        Note. Theses values were tested on different datests across various
+        conditions. You should not need to update them.
+
+    rank : dict of int | None
+        Dict of ints where keys are 'eeg', 'mag' or 'grad'. If None,
+        the rank is detected automatically. Defaults to None. Note.
+        The rank estimation will be printed by the logger for each noise
+        covariance estimator that is passed.
+
+    """
+
+    from ..cov import whiten_evoked, read_cov  # recursive import
+    from ..cov import _estimate_rank_meeg_cov
+    import matplotlib.pyplot as plt
+    if scalings is None:
+        scalings = dict(mag=1e12, grad=1e11, eeg=1e5)
+
+    ch_used = [ch for ch in ['eeg', 'grad', 'mag'] if ch in evoked]
+    has_meg = 'mag' in ch_used and 'grad' in ch_used
+
+    if isinstance(noise_cov, string_types):
+        noise_cov = read_cov(noise_cov)
+    if not isinstance(noise_cov, (list, tuple)):
+        noise_cov = [noise_cov]
+
+    proc_history = evoked.info.get('proc_history', [])
+    has_sss = False
+    if len(proc_history) > 0:
+        # if SSSed, mags and grad are not longer independent
+        # for correct display of the whitening we will drop the cross-terms
+        # (the gradiometer * magnetometer covariance)
+        has_sss = 'max_info' in proc_history[0] and has_meg
+    if has_sss:
+        logger.info('SSS has been applied to data. Showing mag and grad '
+                    'whitening jointly.')
+
+    evoked = evoked.copy()  # handle ref meg
+    evoked.info['projs'] = []  # either applied already or not-- else issue
+
+    picks = pick_types(evoked.info, meg=True, eeg=True, ref_meg=False,
+                       exclude='bads')
+    evoked.pick_channels([evoked.ch_names[k] for k in picks], copy=False)
+    # important to re-pick. will otherwise crash on systems with ref channels
+    # as first sensor block
+    picks = pick_types(evoked.info, meg=True, eeg=True, ref_meg=False,
+                       exclude='bads')
+
+    picks_list = _picks_by_type(evoked.info, meg_combined=has_sss)
+    if has_meg and has_sss:
+        # reduce ch_used to combined mag grad
+        ch_used = list(zip(*picks_list))[0]
+    # order pick list by ch_used (required for compat with plot_evoked)
+    picks_list = [x for x, y in sorted(zip(picks_list, ch_used))]
+    n_ch_used = len(ch_used)
+
+    # make sure we use the same rank estimates for GFP and whitening
+    rank_list = []
+    for cov in noise_cov:
+        rank_ = {}
+        C = cov['data'].copy()
+        picks_list2 = [k for k in picks_list]
+        if rank is None:
+            if has_meg and not has_sss:
+                picks_list2 += _picks_by_type(evoked.info,
+                                              meg_combined=True)
+            for ch_type, this_picks in picks_list2:
+                this_info = pick_info(evoked.info, this_picks)
+                idx = np.ix_(this_picks, this_picks)
+                this_rank = _estimate_rank_meeg_cov(C[idx], this_info,
+                                                    scalings)
+                rank_[ch_type] = this_rank
+        if rank is not None:
+            rank_.update(rank)
+        rank_list.append(rank_)
+    evokeds_white = [whiten_evoked(evoked, n, picks, rank=r)
+                     for n, r in zip(noise_cov, rank_list)]
+
+    axes_evoked = None
+
+    def whitened_gfp(x, rank=None):
+        """Whitened Global Field Power
+
+        The MNE inverse solver assumes zero mean whitened data as input.
+        Therefore, a chi^2 statistic will be best to detect model violations.
+        """
+        return np.sum(x ** 2, axis=0) / (len(x) if rank is None else rank)
+
+    # prepare plot
+    if len(noise_cov) > 1:
+        n_columns = 2
+        n_extra_row = 0
+    else:
+        n_columns = 1
+        n_extra_row = 1
+
+    n_rows = n_ch_used + n_extra_row
+    fig, axes = plt.subplots(n_rows,
+                             n_columns, sharex=True, sharey=False,
+                             figsize=(8.8, 2.2 * n_rows))
+    if n_columns > 1:
+        suptitle = ('Whitened evoked (left, best estimator = "%s")\n'
+                    'and global field power '
+                    '(right, comparison of estimators)' %
+                    noise_cov[0].get('method', 'empirical'))
+        fig.suptitle(suptitle)
+
+    ax_gfp = None
+    if any(((n_columns == 1 and n_ch_used == 1),
+            (n_columns == 1 and n_ch_used > 1),
+            (n_columns == 2 and n_ch_used == 1))):
+        axes_evoked = axes[:n_ch_used]
+        ax_gfp = axes[-1:]
+    elif n_columns == 2 and n_ch_used > 1:
+        axes_evoked = axes[:n_ch_used, 0]
+        ax_gfp = axes[:, 1]
+    else:
+        raise RuntimeError('Wrong axes inputs')
+
+    times = evoked.times * 1e3
+    titles_ = _handle_default('titles')
+    if has_sss:
+        titles_['meg'] = 'MEG (combined)'
+
+    colors = [plt.cm.Set1(i) for i in np.linspace(0, 0.5, len(noise_cov))]
+    ch_colors = {'eeg': 'black', 'mag': 'blue', 'grad': 'cyan',
+                 'meg': 'steelblue'}
+    iter_gfp = zip(evokeds_white, noise_cov, rank_list, colors)
+
+    if not has_sss:
+        evokeds_white[0].plot(unit=False, axes=axes_evoked,
+                              hline=[-1.96, 1.96], show=False)
+    else:
+        for ((ch_type, picks), ax) in zip(picks_list, axes_evoked):
+            ax.plot(times, evokeds_white[0].data[picks].T, color='k')
+            for hline in [-1.96, 1.96]:
+                ax.axhline(hline, color='red', linestyle='--')
+
+    # Now plot the GFP
+    for evoked_white, noise_cov, rank_, color in iter_gfp:
+        i = 0
+        for ch, sub_picks in picks_list:
+            this_rank = rank_[ch]
+            title = '{0} ({2}{1})'.format(
+                    titles_[ch] if n_columns > 1 else ch,
+                    this_rank, 'rank ' if n_columns > 1 else '')
+            label = noise_cov.get('method', 'empirical')
+
+            ax_gfp[i].set_title(title if n_columns > 1 else
+                                'whitened global field power (GFP),'
+                                ' method = "%s"' % label)
+
+            data = evoked_white.data[sub_picks]
+            gfp = whitened_gfp(data, rank=this_rank)
+            ax_gfp[i].plot(times, gfp,
+                           label=(label if n_columns > 1 else title),
+                           color=color if n_columns > 1 else ch_colors[ch])
+            ax_gfp[i].set_xlabel('times [ms]')
+            ax_gfp[i].set_ylabel('GFP [chi^2]')
+            ax_gfp[i].set_xlim(times[0], times[-1])
+            ax_gfp[i].set_ylim(0, 10)
+            ax_gfp[i].axhline(1, color='red', linestyle='--')
+            if n_columns > 1:
+                i += 1
+
+    ax = ax_gfp[0]
+    if n_columns == 1:
+        ax.legend(  # mpl < 1.2.1 compatibility: use prop instead of fontsize
+            loc='upper right', bbox_to_anchor=(0.98, 0.9), prop=dict(size=12))
+    else:
+        ax.legend(loc='upper right', prop=dict(size=10))
+        params = dict(top=[0.69, 0.82, 0.87][n_rows - 1],
+                      bottom=[0.22, 0.13, 0.09][n_rows - 1])
+        if has_sss:
+            params['hspace'] = 0.49
+        fig.subplots_adjust(**params)
+    fig.canvas.draw()
+
+    if show is True:
+        plt.show()
+    return fig
+
+
+def plot_snr_estimate(evoked, inv, show=True):
+    """Plot a data SNR estimate
+
+    Parameters
+    ----------
+    evoked : instance of Evoked
+        The evoked instance. This should probably be baseline-corrected.
+    inv : instance of InverseOperator
+        The minimum-norm inverse operator.
+    show : bool
+        Show figure if True.
+
+    Returns
+    -------
+    fig : instance of matplotlib.figure.Figure
+        The figure object containing the plot.
+
+    Notes
+    -----
+    .. versionadded:: 0.9.0
+    """
+    import matplotlib.pyplot as plt
+    from ..minimum_norm import estimate_snr
+    snr, snr_est = estimate_snr(evoked, inv, verbose=True)
+    fig, ax = plt.subplots(1, 1)
+    lims = np.concatenate([evoked.times[[0, -1]], [-1, snr_est.max()]])
+    ax.plot([0, 0], lims[2:], 'k:')
+    ax.plot(lims[:2], [0, 0], 'k:')
+    # Colors are "bluish green" and "vermillion" taken from:
+    #  http://bconnelly.net/2013/10/creating-colorblind-friendly-figures/
+    ax.plot(evoked.times, snr_est, color=[0.0, 0.6, 0.5])
+    ax.plot(evoked.times, snr, color=[0.8, 0.4, 0.0])
+    ax.set_xlim(lims[:2])
+    ax.set_ylim(lims[2:])
+    ax.set_ylabel('SNR')
+    ax.set_xlabel('Time (sec)')
+    if evoked.comment is not None:
+        ax.set_title(evoked.comment)
+    plt.draw()
+    if show:
+        plt.show()
+    return fig
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/viz/ica.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/viz/ica.py
new file mode 100644
index 0000000..122fd7c
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/viz/ica.py
@@ -0,0 +1,761 @@
+"""Functions to plot ICA specific data (besides topographies)
+"""
+from __future__ import print_function
+
+# Authors: Denis Engemann <denis.engemann at gmail.com>
+#          Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Teon Brooks <teon.brooks at gmail.com>
+#
+# License: Simplified BSD
+
+from functools import partial
+
+import numpy as np
+
+from .utils import tight_layout, _prepare_trellis, _select_bads
+from .utils import _layout_figure, _plot_raw_onscroll, _mouse_click
+from .utils import _helper_raw_resize, _plot_raw_onkey
+from .raw import _prepare_mne_browse_raw, _plot_raw_traces
+from .epochs import _prepare_mne_browse_epochs
+from .evoked import _butterfly_on_button_press, _butterfly_onpick
+from .topomap import _prepare_topo_plot, plot_topomap
+from ..utils import logger
+from ..defaults import _handle_default
+from ..io.meas_info import create_info
+from ..io.pick import pick_types
+
+
+def _ica_plot_sources_onpick_(event, sources=None, ylims=None):
+    """Onpick callback for plot_ica_panel"""
+
+    # make sure that the swipe gesture in OS-X doesn't open many figures
+    if event.mouseevent.inaxes is None or event.mouseevent.button != 1:
+        return
+
+    artist = event.artist
+    try:
+        import matplotlib.pyplot as plt
+        plt.figure()
+        src_idx = artist._mne_src_idx
+        component = artist._mne_component
+        plt.plot(sources[src_idx], 'r' if artist._mne_is_bad else 'k')
+        plt.ylim(ylims)
+        plt.grid(linestyle='-', color='gray', linewidth=.25)
+        plt.title('ICA #%i' % component)
+    except Exception as err:
+        # matplotlib silently ignores exceptions in event handlers, so we print
+        # it here to know what went wrong
+        print(err)
+        raise err
+
+
+def plot_ica_sources(ica, inst, picks=None, exclude=None, start=None,
+                     stop=None, show=True, title=None, block=False):
+    """Plot estimated latent sources given the unmixing matrix.
+
+    Typical usecases:
+
+    1. plot evolution of latent sources over time based on (Raw input)
+    2. plot latent source around event related time windows (Epochs input)
+    3. plot time-locking in ICA space (Evoked input)
+
+
+    Parameters
+    ----------
+    ica : instance of mne.preprocessing.ICA
+        The ICA solution.
+    inst : instance of mne.io.Raw, mne.Epochs, mne.Evoked
+        The object to plot the sources from.
+    picks : int | array_like of int | None.
+        The components to be displayed. If None, plot will show the
+        sources in the order as fitted.
+    exclude : array_like of int
+        The components marked for exclusion. If None (default), ICA.exclude
+        will be used.
+    start : int
+        X-axis start index. If None, from the beginning.
+    stop : int
+        X-axis stop index. If None, next 20 are shown, in case of evoked to the
+        end.
+    show : bool
+        Show figure if True.
+    title : str | None
+        The figure title. If None a default is provided.
+    block : bool
+        Whether to halt program execution until the figure is closed.
+        Useful for interactive selection of components in raw and epoch
+        plotter. For evoked, this parameter has no effect. Defaults to False.
+
+    Returns
+    -------
+    fig : instance of pyplot.Figure
+        The figure.
+
+    Notes
+    -----
+    For raw and epoch instances, it is possible to select components for
+    exclusion by clicking on the line. The selected components are added to
+    ``ica.exclude`` on close.
+
+    .. versionadded:: 0.10.0
+    """
+
+    from ..io.base import _BaseRaw
+    from ..evoked import Evoked
+    from ..epochs import _BaseEpochs
+
+    if exclude is None:
+        exclude = ica.exclude
+    elif len(ica.exclude) > 0:
+        exclude = np.union1d(ica.exclude, exclude)
+    if isinstance(inst, _BaseRaw):
+        fig = _plot_sources_raw(ica, inst, picks, exclude, start=start,
+                                stop=stop, show=show, title=title,
+                                block=block)
+    elif isinstance(inst, _BaseEpochs):
+        fig = _plot_sources_epochs(ica, inst, picks, exclude, start=start,
+                                   stop=stop, show=show, title=title,
+                                   block=block)
+    elif isinstance(inst, Evoked):
+        sources = ica.get_sources(inst)
+        if start is not None or stop is not None:
+            inst = inst.crop(start, stop, copy=True)
+        fig = _plot_ica_sources_evoked(evoked=sources,
+                                       picks=picks,
+                                       exclude=exclude,
+                                       title=title, show=show)
+    else:
+        raise ValueError('Data input must be of Raw or Epochs type')
+
+    return fig
+
+
+def _plot_ica_grid(sources, start, stop,
+                   source_idx, ncol, exclude,
+                   title, show):
+    """Create panel plots of ICA sources
+
+    Clicking on the plot of an individual source opens a new figure showing
+    the source.
+
+    Parameters
+    ----------
+    sources : ndarray
+        Sources as drawn from ica.get_sources.
+    start : int
+        x-axis start index. If None from the beginning.
+    stop : int
+        x-axis stop index. If None to the end.
+    n_components : int
+        Number of components fitted.
+    source_idx : array-like
+        Indices for subsetting the sources.
+    ncol : int
+        Number of panel-columns.
+    title : str
+        The figure title. If None a default is provided.
+    show : bool
+        If True, all open plots will be shown.
+    """
+    import matplotlib.pyplot as plt
+
+    if source_idx is None:
+        source_idx = np.arange(len(sources))
+    elif isinstance(source_idx, list):
+        source_idx = np.array(source_idx)
+    if exclude is None:
+        exclude = []
+
+    n_components = len(sources)
+    ylims = sources.min(), sources.max()
+    xlims = np.arange(sources.shape[-1])[[0, -1]]
+    fig, axes = _prepare_trellis(n_components, ncol)
+    if title is None:
+        fig.suptitle('Reconstructed latent sources', size=16)
+    elif title:
+        fig.suptitle(title, size=16)
+
+    plt.subplots_adjust(wspace=0.05, hspace=0.05)
+    my_iter = enumerate(zip(source_idx, axes, sources))
+    for i_source, (i_selection, ax, source) in my_iter:
+        component = '[%i]' % i_selection
+        # plot+ emebed idx and comp. name to use in callback
+        color = 'r' if i_selection in exclude else 'k'
+        line = ax.plot(source, linewidth=0.5, color=color, picker=1e9)[0]
+        vars(line)['_mne_src_idx'] = i_source
+        vars(line)['_mne_component'] = i_selection
+        vars(line)['_mne_is_bad'] = i_selection in exclude
+        ax.set_xlim(xlims)
+        ax.set_ylim(ylims)
+        ax.text(0.05, .95, component, transform=ax.transAxes,
+                verticalalignment='top')
+        plt.setp(ax.get_xticklabels(), visible=False)
+        plt.setp(ax.get_yticklabels(), visible=False)
+    # register callback
+    callback = partial(_ica_plot_sources_onpick_, sources=sources, ylims=ylims)
+    fig.canvas.mpl_connect('pick_event', callback)
+
+    if show:
+        plt.show()
+
+    return fig
+
+
+def _plot_ica_sources_evoked(evoked, picks, exclude, title, show):
+    """Plot average over epochs in ICA space
+
+    Parameters
+    ----------
+    evoked : instance of mne.Evoked
+        The Evoked to be used.
+    picks : int | array_like of int | None.
+        The components to be displayed. If None, plot will show the
+        sources in the order as fitted.
+    exclude : array_like of int
+        The components marked for exclusion. If None (default), ICA.exclude
+        will be used.
+    title : str
+        The figure title.
+    show : bool
+        Show figure if True.
+    """
+    import matplotlib.pyplot as plt
+    if title is None:
+        title = 'Reconstructed latent sources, time-locked'
+
+    fig, axes = plt.subplots(1)
+    ax = axes
+    axes = [axes]
+    idxs = [0]
+    times = evoked.times * 1e3
+
+    # plot unclassified sources and label excluded ones
+    lines = list()
+    texts = list()
+    if picks is None:
+        picks = np.arange(evoked.data.shape[0])
+    idxs = [picks]
+    for ii in picks:
+        if ii in exclude:
+            label = 'ICA %03d' % (ii + 1)
+            lines.extend(ax.plot(times, evoked.data[ii].T, picker=3.,
+                         zorder=1, color='r', label=label))
+        else:
+            lines.extend(ax.plot(times, evoked.data[ii].T, picker=3.,
+                                 color='k', zorder=0))
+
+    ax.set_title(title)
+    ax.set_xlim(times[[0, -1]])
+    ax.set_xlabel('Time (ms)')
+    ax.set_ylabel('(NA)')
+    if len(exclude) > 0:
+        plt.legend(loc='best')
+    tight_layout(fig=fig)
+
+    # for old matplotlib, we actually need this to have a bounding
+    # box (!), so we have to put some valid text here, change
+    # alpha and  path effects later
+    texts.append(ax.text(0, 0, 'blank', zorder=2,
+                         verticalalignment='baseline',
+                         horizontalalignment='left',
+                         fontweight='bold', alpha=0))
+    # this is done to give the structure of a list of lists of a group of lines
+    # in each subplot
+    lines = [lines]
+    ch_names = evoked.ch_names
+
+    from matplotlib import patheffects
+    path_effects = [patheffects.withStroke(linewidth=2, foreground="w",
+                                           alpha=0.75)]
+    params = dict(axes=axes, texts=texts, lines=lines, idxs=idxs,
+                  ch_names=ch_names, need_draw=False,
+                  path_effects=path_effects)
+    fig.canvas.mpl_connect('pick_event',
+                           partial(_butterfly_onpick, params=params))
+    fig.canvas.mpl_connect('button_press_event',
+                           partial(_butterfly_on_button_press,
+                                   params=params))
+    if show:
+        plt.show()
+
+    return fig
+
+
+def plot_ica_scores(ica, scores, exclude=None, axhline=None,
+                    title='ICA component scores',
+                    figsize=(12, 6), show=True):
+    """Plot scores related to detected components.
+
+    Use this function to asses how well your score describes outlier
+    sources and how well you were detecting them.
+
+    Parameters
+    ----------
+    ica : instance of mne.preprocessing.ICA
+        The ICA object.
+    scores : array_like of float, shape (n ica components) | list of arrays
+        Scores based on arbitrary metric to characterize ICA components.
+    exclude : array_like of int
+        The components marked for exclusion. If None (default), ICA.exclude
+        will be used.
+    axhline : float
+        Draw horizontal line to e.g. visualize rejection threshold.
+    title : str
+        The figure title.
+    figsize : tuple of int
+        The figure size. Defaults to (12, 6).
+    show : bool
+        Show figure if True.
+
+    Returns
+    -------
+    fig : instance of matplotlib.pyplot.Figure
+        The figure object
+    """
+    import matplotlib.pyplot as plt
+    my_range = np.arange(ica.n_components_)
+    if exclude is None:
+        exclude = ica.exclude
+    exclude = np.unique(exclude)
+    if not isinstance(scores[0], (list, np.ndarray)):
+        scores = [scores]
+    n_rows = len(scores)
+    figsize = (12, 6) if figsize is None else figsize
+    fig, axes = plt.subplots(n_rows, figsize=figsize, sharex=True, sharey=True)
+    if isinstance(axes, np.ndarray):
+        axes = axes.flatten()
+    else:
+        axes = [axes]
+    plt.suptitle(title)
+    for this_scores, ax in zip(scores, axes):
+        if len(my_range) != len(this_scores):
+            raise ValueError('The length of `scores` must equal the '
+                             'number of ICA components.')
+        ax.bar(my_range, this_scores, color='w')
+        for excl in exclude:
+            ax.bar(my_range[excl], this_scores[excl], color='r')
+        if axhline is not None:
+            if np.isscalar(axhline):
+                axhline = [axhline]
+            for axl in axhline:
+                ax.axhline(axl, color='r', linestyle='--')
+        ax.set_ylabel('score')
+        ax.set_xlabel('ICA components')
+        ax.set_xlim(0, len(this_scores))
+
+    tight_layout(fig=fig)
+    if len(axes) > 1:
+        plt.subplots_adjust(top=0.9)
+
+    if show:
+        plt.show()
+    return fig
+
+
+def plot_ica_overlay(ica, inst, exclude=None, picks=None, start=None,
+                     stop=None, title=None, show=True):
+    """Overlay of raw and cleaned signals given the unmixing matrix.
+
+    This method helps visualizing signal quality and artifact rejection.
+
+    Parameters
+    ----------
+    ica : instance of mne.preprocessing.ICA
+        The ICA object.
+    inst : instance of mne.io.Raw or mne.Evoked
+        The signals to be compared given the ICA solution. If Raw input,
+        The raw data are displayed before and after cleaning. In a second
+        panel the cross channel average will be displayed. Since dipolar
+        sources will be canceled out this display is sensitive to
+        artifacts. If evoked input, butterfly plots for clean and raw
+        signals will be superimposed.
+    exclude : array_like of int
+        The components marked for exclusion. If None (default), ICA.exclude
+        will be used.
+    picks : array-like of int | None (default)
+        Indices of channels to include (if None, all channels
+        are used that were included on fitting).
+    start : int
+        X-axis start index. If None from the beginning.
+    stop : int
+        X-axis stop index. If None to the end.
+    title : str
+        The figure title.
+    show : bool
+        Show figure if True.
+
+    Returns
+    -------
+    fig : instance of pyplot.Figure
+        The figure.
+    """
+    # avoid circular imports
+    from ..io.base import _BaseRaw
+    from ..evoked import Evoked
+    from ..preprocessing.ica import _check_start_stop
+
+    if not isinstance(inst, (_BaseRaw, Evoked)):
+        raise ValueError('Data input must be of Raw or Evoked type')
+    if title is None:
+        title = 'Signals before (red) and after (black) cleaning'
+    if picks is None:
+        picks = [inst.ch_names.index(k) for k in ica.ch_names]
+    if exclude is None:
+        exclude = ica.exclude
+    if isinstance(inst, _BaseRaw):
+        if start is None:
+            start = 0.0
+        if stop is None:
+            stop = 3.0
+        ch_types_used = [k for k in ['mag', 'grad', 'eeg'] if k in ica]
+        start_compare, stop_compare = _check_start_stop(inst, start, stop)
+        data, times = inst[picks, start_compare:stop_compare]
+
+        raw_cln = ica.apply(inst, exclude=exclude, start=start, stop=stop,
+                            copy=True)
+        data_cln, _ = raw_cln[picks, start_compare:stop_compare]
+        fig = _plot_ica_overlay_raw(data=data, data_cln=data_cln,
+                                    times=times * 1e3, title=title,
+                                    ch_types_used=ch_types_used, show=show)
+    elif isinstance(inst, Evoked):
+        if start is not None and stop is not None:
+            inst = inst.crop(start, stop, copy=True)
+        if picks is not None:
+            inst.pick_channels([inst.ch_names[p] for p in picks])
+        evoked_cln = ica.apply(inst, exclude=exclude, copy=True)
+        fig = _plot_ica_overlay_evoked(evoked=inst, evoked_cln=evoked_cln,
+                                       title=title, show=show)
+
+    return fig
+
+
+def _plot_ica_overlay_raw(data, data_cln, times, title, ch_types_used, show):
+    """Plot evoked after and before ICA cleaning
+
+    Parameters
+    ----------
+    ica : instance of mne.preprocessing.ICA
+        The ICA object.
+    epochs : instance of mne.Epochs
+        The Epochs to be regarded.
+    show : bool
+        Show figure if True.
+
+    Returns
+    -------
+    fig : instance of pyplot.Figure
+    """
+    import matplotlib.pyplot as plt
+    # Restore sensor space data and keep all PCA components
+    # let's now compare the date before and after cleaning.
+    # first the raw data
+    assert data.shape == data_cln.shape
+    fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True)
+    plt.suptitle(title)
+    ax1.plot(times, data.T, color='r')
+    ax1.plot(times, data_cln.T, color='k')
+    ax1.set_xlabel('time (s)')
+    ax1.set_xlim(times[0], times[-1])
+    ax1.set_xlim(times[0], times[-1])
+    ax1.set_title('Raw data')
+
+    _ch_types = {'mag': 'Magnetometers',
+                 'grad': 'Gradiometers',
+                 'eeg': 'EEG'}
+    ch_types = ', '.join([_ch_types[k] for k in ch_types_used])
+    ax2.set_title('Average across channels ({0})'.format(ch_types))
+    ax2.plot(times, data.mean(0), color='r')
+    ax2.plot(times, data_cln.mean(0), color='k')
+    ax2.set_xlim(100, 106)
+    ax2.set_xlabel('time (ms)')
+    ax2.set_xlim(times[0], times[-1])
+    tight_layout(fig=fig)
+
+    fig.subplots_adjust(top=0.90)
+    fig.canvas.draw()
+
+    if show:
+        plt.show()
+
+    return fig
+
+
+def _plot_ica_overlay_evoked(evoked, evoked_cln, title, show):
+    """Plot evoked after and before ICA cleaning
+
+    Parameters
+    ----------
+    ica : instance of mne.preprocessing.ICA
+        The ICA object.
+    epochs : instance of mne.Epochs
+        The Epochs to be regarded.
+    show : bool
+        If True, all open plots will be shown.
+
+    Returns
+    -------
+    fig : instance of pyplot.Figure
+    """
+    import matplotlib.pyplot as plt
+    ch_types_used = [c for c in ['mag', 'grad', 'eeg'] if c in evoked]
+    n_rows = len(ch_types_used)
+    ch_types_used_cln = [c for c in ['mag', 'grad', 'eeg'] if
+                         c in evoked_cln]
+
+    if len(ch_types_used) != len(ch_types_used_cln):
+        raise ValueError('Raw and clean evokeds must match. '
+                         'Found different channels.')
+
+    fig, axes = plt.subplots(n_rows, 1)
+    fig.suptitle('Average signal before (red) and after (black) ICA')
+    axes = axes.flatten() if isinstance(axes, np.ndarray) else axes
+
+    evoked.plot(axes=axes, show=show)
+    for ax in fig.axes:
+        for l in ax.get_lines():
+            l.set_color('r')
+    fig.canvas.draw()
+    evoked_cln.plot(axes=axes, show=show)
+    tight_layout(fig=fig)
+
+    fig.subplots_adjust(top=0.90)
+    fig.canvas.draw()
+
+    if show:
+        plt.show()
+
+    return fig
+
+
+def _plot_sources_raw(ica, raw, picks, exclude, start, stop, show, title,
+                      block):
+    """Function for plotting the ICA components as raw array."""
+    import matplotlib.pyplot as plt
+    color = _handle_default('color', (0., 0., 0.))
+    orig_data = ica._transform_raw(raw, 0, len(raw.times)) * 0.2
+    if picks is None:
+        picks = range(len(orig_data))
+    types = ['misc' for _ in picks]
+    picks = list(sorted(picks))
+    eog_chs = pick_types(raw.info, meg=False, eog=True, ref_meg=False)
+    ecg_chs = pick_types(raw.info, meg=False, ecg=True, ref_meg=False)
+    data = [orig_data[pick] for pick in picks]
+    c_names = ['ICA %03d' % x for x in range(len(orig_data))]
+    for eog_idx in eog_chs:
+        c_names.append(raw.ch_names[eog_idx])
+        types.append('eog')
+    for ecg_idx in ecg_chs:
+        c_names.append(raw.ch_names[ecg_idx])
+        types.append('ecg')
+    extra_picks = np.append(eog_chs, ecg_chs).astype(int)
+    if len(extra_picks) > 0:
+        eog_ecg_data, _ = raw[extra_picks, :]
+        for idx in range(len(eog_ecg_data)):
+            if idx < len(eog_chs):
+                eog_ecg_data[idx] /= 150e-6  # scaling for eog
+            else:
+                eog_ecg_data[idx] /= 5e-4  # scaling for ecg
+        data = np.append(data, eog_ecg_data, axis=0)
+
+    for idx in range(len(extra_picks)):
+        picks = np.append(picks, ica.n_components_ + idx)
+    if title is None:
+        title = 'ICA components'
+    info = create_info([c_names[x] for x in picks], raw.info['sfreq'])
+
+    info['bads'] = [c_names[x] for x in exclude]
+    if start is None:
+        start = 0
+    if stop is None:
+        stop = start + 20
+        stop = min(stop, raw.times[-1])
+    duration = stop - start
+    if duration <= 0:
+        raise RuntimeError('Stop must be larger than start.')
+    t_end = int(duration * raw.info['sfreq'])
+    times = raw.times[0:t_end]
+    bad_color = (1., 0., 0.)
+    inds = list(range(len(picks)))
+    data = np.array(data)
+    n_channels = min([20, len(picks)])
+    params = dict(raw=raw, orig_data=data, data=data[:, 0:t_end],
+                  ch_start=0, t_start=start, info=info, duration=duration,
+                  ica=ica, n_channels=n_channels, times=times, types=types,
+                  n_times=raw.n_times, bad_color=bad_color, picks=picks)
+    _prepare_mne_browse_raw(params, title, 'w', color, bad_color, inds,
+                            n_channels)
+    params['scale_factor'] = 1.0
+    params['plot_fun'] = partial(_plot_raw_traces, params=params, inds=inds,
+                                 color=color, bad_color=bad_color)
+    params['update_fun'] = partial(_update_data, params)
+    params['pick_bads_fun'] = partial(_pick_bads, params=params)
+    params['label_click_fun'] = partial(_label_clicked, params=params)
+    _layout_figure(params)
+    # callbacks
+    callback_key = partial(_plot_raw_onkey, params=params)
+    params['fig'].canvas.mpl_connect('key_press_event', callback_key)
+    callback_scroll = partial(_plot_raw_onscroll, params=params)
+    params['fig'].canvas.mpl_connect('scroll_event', callback_scroll)
+    callback_pick = partial(_mouse_click, params=params)
+    params['fig'].canvas.mpl_connect('button_press_event', callback_pick)
+    callback_resize = partial(_helper_raw_resize, params=params)
+    params['fig'].canvas.mpl_connect('resize_event', callback_resize)
+    callback_close = partial(_close_event, params=params)
+    params['fig'].canvas.mpl_connect('close_event', callback_close)
+    params['fig_proj'] = None
+    params['event_times'] = None
+    params['update_fun']()
+    params['plot_fun']()
+    if show:
+        try:
+            plt.show(block=block)
+        except TypeError:  # not all versions have this
+            plt.show()
+
+    return params['fig']
+
+
+def _update_data(params):
+    """Function for preparing the data on horizontal shift of the viewport."""
+    sfreq = params['info']['sfreq']
+    start = int(params['t_start'] * sfreq)
+    end = int((params['t_start'] + params['duration']) * sfreq)
+    params['data'] = params['orig_data'][:, start:end]
+    params['times'] = params['raw'].times[start:end]
+
+
+def _pick_bads(event, params):
+    """Function for selecting components on click."""
+    bads = params['info']['bads']
+    params['info']['bads'] = _select_bads(event, params, bads)
+    params['update_fun']()
+    params['plot_fun']()
+
+
+def _close_event(events, params):
+    """Function for excluding the selected components on close."""
+    info = params['info']
+    c_names = ['ICA %03d' % x for x in range(params['ica'].n_components_)]
+    exclude = [c_names.index(x) for x in info['bads'] if x.startswith('ICA')]
+    params['ica'].exclude = exclude
+
+
+def _plot_sources_epochs(ica, epochs, picks, exclude, start, stop, show,
+                         title, block):
+    """Function for plotting the components as epochs."""
+    import matplotlib.pyplot as plt
+    data = ica._transform_epochs(epochs, concatenate=True)
+    eog_chs = pick_types(epochs.info, meg=False, eog=True, ref_meg=False)
+    ecg_chs = pick_types(epochs.info, meg=False, ecg=True, ref_meg=False)
+    c_names = ['ICA %03d' % x for x in range(ica.n_components_)]
+    ch_types = np.repeat('misc', ica.n_components_)
+    for eog_idx in eog_chs:
+        c_names.append(epochs.ch_names[eog_idx])
+        ch_types = np.append(ch_types, 'eog')
+    for ecg_idx in ecg_chs:
+        c_names.append(epochs.ch_names[ecg_idx])
+        ch_types = np.append(ch_types, 'ecg')
+    extra_picks = np.append(eog_chs, ecg_chs).astype(int)
+    if len(extra_picks) > 0:
+        eog_ecg_data = np.concatenate(epochs.get_data()[:, extra_picks],
+                                      axis=1)
+        data = np.append(data, eog_ecg_data, axis=0)
+    scalings = _handle_default('scalings_plot_raw')
+    scalings['misc'] = 5.0
+    info = create_info(ch_names=c_names, sfreq=epochs.info['sfreq'],
+                       ch_types=ch_types)
+    info['projs'] = list()
+    info['bads'] = [c_names[x] for x in exclude]
+    if title is None:
+        title = 'ICA components'
+    if picks is None:
+        picks = list(range(ica.n_components_))
+    if start is None:
+        start = 0
+    if stop is None:
+        stop = start + 20
+        stop = min(stop, len(epochs.events))
+    for idx in range(len(extra_picks)):
+        picks = np.append(picks, ica.n_components_ + idx)
+    n_epochs = stop - start
+    if n_epochs <= 0:
+        raise RuntimeError('Stop must be larger than start.')
+    params = {'ica': ica,
+              'epochs': epochs,
+              'info': info,
+              'orig_data': data,
+              'bads': list(),
+              'bad_color': (1., 0., 0.),
+              't_start': start * len(epochs.times)}
+    params['label_click_fun'] = partial(_label_clicked, params=params)
+    _prepare_mne_browse_epochs(params, projs=list(), n_channels=20,
+                               n_epochs=n_epochs, scalings=scalings,
+                               title=title, picks=picks,
+                               order=['misc', 'eog', 'ecg'])
+    params['hsel_patch'].set_x(params['t_start'])
+    callback_close = partial(_close_epochs_event, params=params)
+    params['fig'].canvas.mpl_connect('close_event', callback_close)
+    if show:
+        try:
+            plt.show(block=block)
+        except TypeError:  # not all versions have this
+            plt.show()
+
+    return params['fig']
+
+
+def _close_epochs_event(events, params):
+    """Function for excluding the selected components on close."""
+    info = params['info']
+    exclude = [info['ch_names'].index(x) for x in info['bads']
+               if x.startswith('ICA')]
+    params['ica'].exclude = exclude
+
+
+def _label_clicked(pos, params):
+    """Function for plotting independent components on click to label."""
+    import matplotlib.pyplot as plt
+    offsets = np.array(params['offsets']) + params['offsets'][0]
+    line_idx = np.searchsorted(offsets, pos[1]) + params['ch_start']
+    if line_idx >= len(params['picks']):
+        return
+    ic_idx = [params['picks'][line_idx]]
+    types = list()
+    info = params['ica'].info
+    if len(pick_types(info, meg=False, eeg=True, ref_meg=False)) > 0:
+        types.append('eeg')
+    if len(pick_types(info, meg='mag', ref_meg=False)) > 0:
+        types.append('mag')
+    if len(pick_types(info, meg='grad', ref_meg=False)) > 0:
+        types.append('grad')
+
+    ica = params['ica']
+    data = np.dot(ica.mixing_matrix_[:, ic_idx].T,
+                  ica.pca_components_[:ica.n_components_])
+    data = np.atleast_2d(data)
+    fig, axes = _prepare_trellis(len(types), max_col=3)
+    for ch_idx, ch_type in enumerate(types):
+        try:
+            data_picks, pos, merge_grads, _, _ = _prepare_topo_plot(ica,
+                                                                    ch_type,
+                                                                    None)
+        except Exception as exc:
+            logger.warning(exc)
+            plt.close(fig)
+            return
+        this_data = data[:, data_picks]
+        ax = axes[ch_idx]
+        if merge_grads:
+            from ..channels.layout import _merge_grad_data
+        for ii, data_ in zip(ic_idx, this_data):
+            ax.set_title('IC #%03d ' % ii + ch_type, fontsize=12)
+            data_ = _merge_grad_data(data_) if merge_grads else data_
+            plot_topomap(data_.flatten(), pos, axis=ax, show=False)
+            ax.set_yticks([])
+            ax.set_xticks([])
+            ax.set_frame_on(False)
+    tight_layout(fig=fig)
+    fig.subplots_adjust(top=0.95)
+    fig.canvas.draw()
+
+    plt.show()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/viz/misc.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/viz/misc.py
new file mode 100644
index 0000000..abcff98
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/viz/misc.py
@@ -0,0 +1,580 @@
+"""Functions to make simple plots with M/EEG data
+"""
+from __future__ import print_function
+
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Denis Engemann <denis.engemann at gmail.com>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#          Eric Larson <larson.eric.d at gmail.com>
+#          Cathy Nangini <cnangini at gmail.com>
+#          Mainak Jas <mainak at neuro.hut.fi>
+#
+# License: Simplified BSD
+
+import copy
+import warnings
+from glob import glob
+import os.path as op
+from itertools import cycle
+
+import numpy as np
+from scipy import linalg
+
+from ..surface import read_surface
+from ..io.proj import make_projector
+from ..utils import logger, verbose, get_subjects_dir
+from ..io.pick import pick_types
+from .utils import tight_layout, COLORS, _prepare_trellis
+
+
+ at verbose
+def plot_cov(cov, info, exclude=[], colorbar=True, proj=False, show_svd=True,
+             show=True, verbose=None):
+    """Plot Covariance data
+
+    Parameters
+    ----------
+    cov : instance of Covariance
+        The covariance matrix.
+    info: dict
+        Measurement info.
+    exclude : list of string | str
+        List of channels to exclude. If empty do not exclude any channel.
+        If 'bads', exclude info['bads'].
+    colorbar : bool
+        Show colorbar or not.
+    proj : bool
+        Apply projections or not.
+    show_svd : bool
+        Plot also singular values of the noise covariance for each sensor
+        type. We show square roots ie. standard deviations.
+    show : bool
+        Show figure if True.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    fig_cov : instance of matplotlib.pyplot.Figure
+        The covariance plot.
+    fig_svd : instance of matplotlib.pyplot.Figure | None
+        The SVD spectra plot of the covariance.
+    """
+    if exclude == 'bads':
+        exclude = info['bads']
+    ch_names = [n for n in cov.ch_names if n not in exclude]
+    ch_idx = [cov.ch_names.index(n) for n in ch_names]
+    info_ch_names = info['ch_names']
+    sel_eeg = pick_types(info, meg=False, eeg=True, ref_meg=False,
+                         exclude=exclude)
+    sel_mag = pick_types(info, meg='mag', eeg=False, ref_meg=False,
+                         exclude=exclude)
+    sel_grad = pick_types(info, meg='grad', eeg=False, ref_meg=False,
+                          exclude=exclude)
+    idx_eeg = [ch_names.index(info_ch_names[c])
+               for c in sel_eeg if info_ch_names[c] in ch_names]
+    idx_mag = [ch_names.index(info_ch_names[c])
+               for c in sel_mag if info_ch_names[c] in ch_names]
+    idx_grad = [ch_names.index(info_ch_names[c])
+                for c in sel_grad if info_ch_names[c] in ch_names]
+
+    idx_names = [(idx_eeg, 'EEG covariance', 'uV', 1e6),
+                 (idx_grad, 'Gradiometers', 'fT/cm', 1e13),
+                 (idx_mag, 'Magnetometers', 'fT', 1e15)]
+    idx_names = [(idx, name, unit, scaling)
+                 for idx, name, unit, scaling in idx_names if len(idx) > 0]
+
+    C = cov.data[ch_idx][:, ch_idx]
+
+    if proj:
+        projs = copy.deepcopy(info['projs'])
+
+        #   Activate the projection items
+        for p in projs:
+            p['active'] = True
+
+        P, ncomp, _ = make_projector(projs, ch_names)
+        if ncomp > 0:
+            logger.info('    Created an SSP operator (subspace dimension'
+                        ' = %d)' % ncomp)
+            C = np.dot(P, np.dot(C, P.T))
+        else:
+            logger.info('    The projection vectors do not apply to these '
+                        'channels.')
+
+    import matplotlib.pyplot as plt
+
+    fig_cov = plt.figure(figsize=(2.5 * len(idx_names), 2.7))
+    for k, (idx, name, _, _) in enumerate(idx_names):
+        plt.subplot(1, len(idx_names), k + 1)
+        plt.imshow(C[idx][:, idx], interpolation="nearest", cmap='RdBu_r')
+        plt.title(name)
+    plt.subplots_adjust(0.04, 0.0, 0.98, 0.94, 0.2, 0.26)
+    tight_layout(fig=fig_cov)
+
+    fig_svd = None
+    if show_svd:
+        fig_svd = plt.figure()
+        for k, (idx, name, unit, scaling) in enumerate(idx_names):
+            s = linalg.svd(C[idx][:, idx], compute_uv=False)
+            plt.subplot(1, len(idx_names), k + 1)
+            plt.ylabel('Noise std (%s)' % unit)
+            plt.xlabel('Eigenvalue index')
+            plt.semilogy(np.sqrt(s) * scaling)
+            plt.title(name)
+            tight_layout(fig=fig_svd)
+
+    if show:
+        plt.show()
+
+    return fig_cov, fig_svd
+
+
+def plot_source_spectrogram(stcs, freq_bins, tmin=None, tmax=None,
+                            source_index=None, colorbar=False, show=True):
+    """Plot source power in time-freqency grid.
+
+    Parameters
+    ----------
+    stcs : list of SourceEstimate
+        Source power for consecutive time windows, one SourceEstimate object
+        should be provided for each frequency bin.
+    freq_bins : list of tuples of float
+        Start and end points of frequency bins of interest.
+    tmin : float
+        Minimum time instant to show.
+    tmax : float
+        Maximum time instant to show.
+    source_index : int | None
+        Index of source for which the spectrogram will be plotted. If None,
+        the source with the largest activation will be selected.
+    colorbar : bool
+        If true, a colorbar will be added to the plot.
+    show : bool
+        Show figure if True.
+    """
+    import matplotlib.pyplot as plt
+
+    # Input checks
+    if len(stcs) == 0:
+        raise ValueError('cannot plot spectrogram if len(stcs) == 0')
+
+    stc = stcs[0]
+    if tmin is not None and tmin < stc.times[0]:
+        raise ValueError('tmin cannot be smaller than the first time point '
+                         'provided in stcs')
+    if tmax is not None and tmax > stc.times[-1] + stc.tstep:
+        raise ValueError('tmax cannot be larger than the sum of the last time '
+                         'point and the time step, which are provided in stcs')
+
+    # Preparing time-frequency cell boundaries for plotting
+    if tmin is None:
+        tmin = stc.times[0]
+    if tmax is None:
+        tmax = stc.times[-1] + stc.tstep
+    time_bounds = np.arange(tmin, tmax + stc.tstep, stc.tstep)
+    freq_bounds = sorted(set(np.ravel(freq_bins)))
+    freq_ticks = copy.deepcopy(freq_bounds)
+
+    # Reject time points that will not be plotted and gather results
+    source_power = []
+    for stc in stcs:
+        stc = stc.copy()  # copy since crop modifies inplace
+        stc.crop(tmin, tmax - stc.tstep)
+        source_power.append(stc.data)
+    source_power = np.array(source_power)
+
+    # Finding the source with maximum source power
+    if source_index is None:
+        source_index = np.unravel_index(source_power.argmax(),
+                                        source_power.shape)[1]
+
+    # If there is a gap in the frequency bins record its locations so that it
+    # can be covered with a gray horizontal bar
+    gap_bounds = []
+    for i in range(len(freq_bins) - 1):
+        lower_bound = freq_bins[i][1]
+        upper_bound = freq_bins[i + 1][0]
+        if lower_bound != upper_bound:
+            freq_bounds.remove(lower_bound)
+            gap_bounds.append((lower_bound, upper_bound))
+
+    # Preparing time-frequency grid for plotting
+    time_grid, freq_grid = np.meshgrid(time_bounds, freq_bounds)
+
+    # Plotting the results
+    fig = plt.figure(figsize=(9, 6))
+    plt.pcolor(time_grid, freq_grid, source_power[:, source_index, :],
+               cmap='Reds')
+    ax = plt.gca()
+
+    plt.title('Time-frequency source power')
+    plt.xlabel('Time (s)')
+    plt.ylabel('Frequency (Hz)')
+
+    time_tick_labels = [str(np.round(t, 2)) for t in time_bounds]
+    n_skip = 1 + len(time_bounds) // 10
+    for i in range(len(time_bounds)):
+        if i % n_skip != 0:
+            time_tick_labels[i] = ''
+
+    ax.set_xticks(time_bounds)
+    ax.set_xticklabels(time_tick_labels)
+    plt.xlim(time_bounds[0], time_bounds[-1])
+    plt.yscale('log')
+    ax.set_yticks(freq_ticks)
+    ax.set_yticklabels([np.round(freq, 2) for freq in freq_ticks])
+    plt.ylim(freq_bounds[0], freq_bounds[-1])
+
+    plt.grid(True, ls='-')
+    if colorbar:
+        plt.colorbar()
+    tight_layout(fig=fig)
+
+    # Covering frequency gaps with horizontal bars
+    for lower_bound, upper_bound in gap_bounds:
+        plt.barh(lower_bound, time_bounds[-1] - time_bounds[0], upper_bound -
+                 lower_bound, time_bounds[0], color='#666666')
+
+    if show:
+        plt.show()
+
+    return fig
+
+
+def _plot_mri_contours(mri_fname, surf_fnames, orientation='coronal',
+                       slices=None, show=True):
+    """Plot BEM contours on anatomical slices.
+
+    Parameters
+    ----------
+    mri_fname : str
+        The name of the file containing anatomical data.
+    surf_fnames : list of str
+        The filenames for the BEM surfaces in the format
+        ['inner_skull.surf', 'outer_skull.surf', 'outer_skin.surf'].
+    orientation : str
+        'coronal' or 'axial' or 'sagittal'
+    slices : list of int
+        Slice indices.
+    show : bool
+        Show figure if True.
+
+    Returns
+    -------
+    fig : Instance of matplotlib.figure.Figure
+        The figure.
+    """
+    import matplotlib.pyplot as plt
+    import nibabel as nib
+
+    if orientation not in ['coronal', 'axial', 'sagittal']:
+        raise ValueError("Orientation must be 'coronal', 'axial' or "
+                         "'sagittal'. Got %s." % orientation)
+
+    # Load the T1 data
+    nim = nib.load(mri_fname)
+    data = nim.get_data()
+    affine = nim.get_affine()
+
+    n_sag, n_axi, n_cor = data.shape
+    orientation_name2axis = dict(sagittal=0, axial=1, coronal=2)
+    orientation_axis = orientation_name2axis[orientation]
+
+    if slices is None:
+        n_slices = data.shape[orientation_axis]
+        slices = np.linspace(0, n_slices, 12, endpoint=False).astype(np.int)
+
+    # create of list of surfaces
+    surfs = list()
+
+    trans = linalg.inv(affine)
+    # XXX : next line is a hack don't ask why
+    trans[:3, -1] = [n_sag // 2, n_axi // 2, n_cor // 2]
+
+    for surf_fname in surf_fnames:
+        surf = dict()
+        surf['rr'], surf['tris'] = read_surface(surf_fname)
+        # move back surface to MRI coordinate system
+        surf['rr'] = nib.affines.apply_affine(trans, surf['rr'])
+        surfs.append(surf)
+
+    fig, axs = _prepare_trellis(len(slices), 4)
+
+    for ax, sl in zip(axs, slices):
+
+        # adjust the orientations for good view
+        if orientation == 'coronal':
+            dat = data[:, :, sl].transpose()
+        elif orientation == 'axial':
+            dat = data[:, sl, :]
+        elif orientation == 'sagittal':
+            dat = data[sl, :, :]
+
+        # First plot the anatomical data
+        ax.imshow(dat, cmap=plt.cm.gray)
+        ax.axis('off')
+
+        # and then plot the contours on top
+        for surf in surfs:
+            if orientation == 'coronal':
+                ax.tricontour(surf['rr'][:, 0], surf['rr'][:, 1],
+                              surf['tris'], surf['rr'][:, 2],
+                              levels=[sl], colors='yellow', linewidths=2.0)
+            elif orientation == 'axial':
+                ax.tricontour(surf['rr'][:, 2], surf['rr'][:, 0],
+                              surf['tris'], surf['rr'][:, 1],
+                              levels=[sl], colors='yellow', linewidths=2.0)
+            elif orientation == 'sagittal':
+                ax.tricontour(surf['rr'][:, 2], surf['rr'][:, 1],
+                              surf['tris'], surf['rr'][:, 0],
+                              levels=[sl], colors='yellow', linewidths=2.0)
+
+    plt.subplots_adjust(left=0., bottom=0., right=1., top=1., wspace=0.,
+                        hspace=0.)
+    if show:
+        plt.show()
+
+    return fig
+
+
+def plot_bem(subject=None, subjects_dir=None, orientation='coronal',
+             slices=None, show=True):
+    """Plot BEM contours on anatomical slices.
+
+    Parameters
+    ----------
+    subject : str
+        Subject name.
+    subjects_dir : str | None
+        Path to the SUBJECTS_DIR. If None, the path is obtained by using
+        the environment variable SUBJECTS_DIR.
+    orientation : str
+        'coronal' or 'axial' or 'sagittal'.
+    slices : list of int
+        Slice indices.
+    show : bool
+        Show figure if True.
+
+    Returns
+    -------
+    fig : Instance of matplotlib.figure.Figure
+        The figure.
+    """
+    subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
+
+    # Get the MRI filename
+    mri_fname = op.join(subjects_dir, subject, 'mri', 'T1.mgz')
+    if not op.isfile(mri_fname):
+        raise IOError('MRI file "%s" does not exist' % mri_fname)
+
+    # Get the BEM surface filenames
+    bem_path = op.join(subjects_dir, subject, 'bem')
+
+    if not op.isdir(bem_path):
+        raise IOError('Subject bem directory "%s" does not exist' % bem_path)
+
+    surf_fnames = []
+    for surf_name in ['*inner_skull', '*outer_skull', '*outer_skin']:
+        surf_fname = glob(op.join(bem_path, surf_name + '.surf'))
+        if len(surf_fname) > 0:
+            surf_fname = surf_fname[0]
+            logger.info("Using surface: %s" % surf_fname)
+            surf_fnames.append(surf_fname)
+
+    if len(surf_fnames) == 0:
+        raise IOError('No surface files found. Surface files must end with '
+                      'inner_skull.surf, outer_skull.surf or outer_skin.surf')
+
+    # Plot the contours
+    return _plot_mri_contours(mri_fname, surf_fnames, orientation=orientation,
+                              slices=slices, show=show)
+
+
+def plot_events(events, sfreq=None, first_samp=0, color=None, event_id=None,
+                axes=None, equal_spacing=True, show=True):
+    """Plot events to get a visual display of the paradigm
+
+    Parameters
+    ----------
+    events : array, shape (n_events, 3)
+        The events.
+    sfreq : float | None
+        The sample frequency. If None, data will be displayed in samples (not
+        seconds).
+    first_samp : int
+        The index of the first sample. Typically the raw.first_samp
+        attribute. It is needed for recordings on a Neuromag
+        system as the events are defined relative to the system
+        start and not to the beginning of the recording.
+    color : dict | None
+        Dictionary of event_id value and its associated color. If None,
+        colors are automatically drawn from a default list (cycled through if
+        number of events longer than list of default colors).
+    event_id : dict | None
+        Dictionary of event label (e.g. 'aud_l') and its associated
+        event_id value. Label used to plot a legend. If None, no legend is
+        drawn.
+    axes : instance of matplotlib.axes.AxesSubplot
+       The subplot handle.
+    equal_spacing : bool
+        Use equal spacing between events in y-axis.
+    show : bool
+        Show figure if True.
+
+    Returns
+    -------
+    fig : matplotlib.figure.Figure
+        The figure object containing the plot.
+
+    Notes
+    -----
+    .. versionadded:: 0.9.0
+    """
+
+    if sfreq is None:
+        sfreq = 1.0
+        xlabel = 'samples'
+    else:
+        xlabel = 'Time (s)'
+
+    events = np.asarray(events)
+    unique_events = np.unique(events[:, 2])
+
+    if event_id is not None:
+        # get labels and unique event ids from event_id dict,
+        # sorted by value
+        event_id_rev = dict((v, k) for k, v in event_id.items())
+        conditions, unique_events_id = zip(*sorted(event_id.items(),
+                                                   key=lambda x: x[1]))
+
+        for this_event in unique_events_id:
+            if this_event not in unique_events:
+                raise ValueError('%s from event_id is not present in events.'
+                                 % this_event)
+
+        for this_event in unique_events:
+            if this_event not in unique_events_id:
+                warnings.warn('event %s missing from event_id will be ignored.'
+                              % this_event)
+    else:
+        unique_events_id = unique_events
+
+    if color is None:
+        if len(unique_events) > len(COLORS):
+            warnings.warn('More events than colors available. '
+                          'You should pass a list of unique colors.')
+        colors = cycle(COLORS)
+        color = dict()
+        for this_event, this_color in zip(unique_events_id, colors):
+            color[this_event] = this_color
+    else:
+        for this_event in color:
+            if this_event not in unique_events_id:
+                raise ValueError('%s from color is not present in events '
+                                 'or event_id.' % this_event)
+
+        for this_event in unique_events_id:
+            if this_event not in color:
+                warnings.warn('Color is not available for event %d. Default '
+                              'colors will be used.' % this_event)
+
+    import matplotlib.pyplot as plt
+
+    fig = None
+    if axes is None:
+        fig = plt.figure()
+    ax = axes if axes else plt.gca()
+
+    unique_events_id = np.array(unique_events_id)
+    min_event = np.min(unique_events_id)
+    max_event = np.max(unique_events_id)
+
+    for idx, ev in enumerate(unique_events_id):
+        ev_mask = events[:, 2] == ev
+        kwargs = {}
+        if event_id is not None:
+            event_label = '{0} ({1})'.format(event_id_rev[ev],
+                                             np.sum(ev_mask))
+            kwargs['label'] = event_label
+        if ev in color:
+            kwargs['color'] = color[ev]
+        if equal_spacing:
+            ax.plot((events[ev_mask, 0] - first_samp) / sfreq,
+                    (idx + 1) * np.ones(ev_mask.sum()), '.', **kwargs)
+        else:
+            ax.plot((events[ev_mask, 0] - first_samp) / sfreq,
+                    events[ev_mask, 2], '.', **kwargs)
+
+    if equal_spacing:
+        ax.set_ylim(0, unique_events_id.size + 1)
+        ax.set_yticks(1 + np.arange(unique_events_id.size))
+        ax.set_yticklabels(unique_events_id)
+    else:
+        ax.set_ylim([min_event - 1, max_event + 1])
+
+    ax.set_xlabel(xlabel)
+    ax.set_ylabel('Events id')
+
+    ax.grid('on')
+
+    fig = fig if fig is not None else plt.gcf()
+    if event_id is not None:
+        box = ax.get_position()
+        ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
+        ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
+        fig.canvas.draw()
+    if show:
+        plt.show()
+
+    return fig
+
+
+def _get_presser(fig):
+    """Helper to get our press callback"""
+    callbacks = fig.canvas.callbacks.callbacks['button_press_event']
+    func = None
+    for key, val in callbacks.items():
+        if val.func.__class__.__name__ == 'partial':
+            func = val.func
+            break
+    assert func is not None
+    return func
+
+
+def plot_dipole_amplitudes(dipoles, colors=None, show=True):
+    """Plot the amplitude traces of a set of dipoles
+
+    Parameters
+    ----------
+    dipoles : list of instance of Dipoles
+        The dipoles whose amplitudes should be shown.
+    colors: list of colors | None
+        Color to plot with each dipole. If None default colors are used.
+    show : bool
+        Show figure if True.
+
+    Returns
+    -------
+    fig : matplotlib.figure.Figure
+        The figure object containing the plot.
+
+    Notes
+    -----
+    .. versionadded:: 0.9.0
+    """
+    import matplotlib.pyplot as plt
+    if colors is None:
+        colors = cycle(COLORS)
+    fig, ax = plt.subplots(1, 1)
+    xlim = [np.inf, -np.inf]
+    for dip, color in zip(dipoles, colors):
+        ax.plot(dip.times, dip.amplitude, color=color, linewidth=1.5)
+        xlim[0] = min(xlim[0], dip.times[0])
+        xlim[1] = max(xlim[1], dip.times[-1])
+    ax.set_xlim(xlim)
+    ax.set_xlabel('Time (sec)')
+    ax.set_ylabel('Amplitude (nAm)')
+    if show:
+        fig.show()
+    return fig
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/viz/montage.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/viz/montage.py
new file mode 100644
index 0000000..184029a
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/viz/montage.py
@@ -0,0 +1,58 @@
+"""Functions to plot EEG sensor montages or digitizer montages
+"""
+import numpy as np
+
+
+def plot_montage(montage, scale_factor=1.5, show_names=False, show=True):
+    """Plot a montage
+
+    Parameters
+    ----------
+    montage : instance of Montage
+        The montage to visualize.
+    scale_factor : float
+        Determines the size of the points. Defaults to 1.5.
+    show_names : bool
+        Whether to show the channel names. Defaults to False.
+    show : bool
+        Show figure if True.
+
+    Returns
+    -------
+    fig : Instance of matplotlib.figure.Figure
+        The figure object.
+    """
+    from ..channels.montage import Montage, DigMontage
+
+    import matplotlib.pyplot as plt
+    from mpl_toolkits.mplot3d import Axes3D  # noqa
+    fig = plt.figure()
+    ax = fig.add_subplot(111, projection='3d')
+
+    if isinstance(montage, Montage):
+        pos = montage.pos
+        ax.scatter(pos[:, 0], pos[:, 1], pos[:, 2])
+        if show_names:
+            ch_names = montage.ch_names
+            for ch_name, x, y, z in zip(ch_names, pos[:, 0],
+                                        pos[:, 1], pos[:, 2]):
+                ax.text(x, y, z, ch_name)
+    elif isinstance(montage, DigMontage):
+        pos = np.vstack((montage.hsp, montage.elp))
+        ax.scatter(pos[:, 0], pos[:, 1], pos[:, 2])
+        if show_names:
+            if montage.point_names:
+                hpi_names = montage.point_names
+                for hpi_name, x, y, z in zip(hpi_names, montage.elp[:, 0],
+                                             montage.elp[:, 1],
+                                             montage.elp[:, 2]):
+                    ax.text(x, y, z, hpi_name)
+
+    ax.set_xlabel('x')
+    ax.set_ylabel('y')
+    ax.set_zlabel('z')
+
+    if show:
+        plt.show()
+
+    return fig
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/viz/raw.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/viz/raw.py
new file mode 100644
index 0000000..a5a3934
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/viz/raw.py
@@ -0,0 +1,672 @@
+"""Functions to plot raw M/EEG data
+"""
+from __future__ import print_function
+
+# Authors: Eric Larson <larson.eric.d at gmail.com>
+#          Jaakko Leppakangas <jaeilepp at student.jyu.fi>
+#
+# License: Simplified BSD
+
+import copy
+from functools import partial
+
+import numpy as np
+
+from ..externals.six import string_types
+from ..io.pick import pick_types
+from ..io.proj import setup_proj
+from ..utils import verbose, get_config
+from ..time_frequency import compute_raw_psd
+from .utils import _toggle_options, _toggle_proj, tight_layout
+from .utils import _layout_figure, _plot_raw_onkey, figure_nobar
+from .utils import _plot_raw_onscroll, _mouse_click
+from .utils import _helper_raw_resize, _select_bads, _onclick_help
+from ..defaults import _handle_default
+
+
+def _plot_update_raw_proj(params, bools):
+    """Helper only needs to be called when proj is changed"""
+    if bools is not None:
+        inds = np.where(bools)[0]
+        params['info']['projs'] = [copy.deepcopy(params['projs'][ii])
+                                   for ii in inds]
+        params['proj_bools'] = bools
+    params['projector'], _ = setup_proj(params['info'], add_eeg_ref=False,
+                                        verbose=False)
+    params['update_fun']()
+    params['plot_fun']()
+
+
+def _update_raw_data(params):
+    """Helper only needs to be called when time or proj is changed"""
+    from scipy.signal import filtfilt
+    start = params['t_start']
+    stop = params['raw'].time_as_index(start + params['duration'])[0]
+    start = params['raw'].time_as_index(start)[0]
+    data_picks = pick_types(params['raw'].info, meg=True, eeg=True)
+    data, times = params['raw'][:, start:stop]
+    if params['projector'] is not None:
+        data = np.dot(params['projector'], data)
+    # remove DC
+    if params['remove_dc'] is True:
+        data -= np.mean(data, axis=1)[:, np.newaxis]
+    if params['ba'] is not None:
+        data[data_picks] = filtfilt(params['ba'][0], params['ba'][1],
+                                    data[data_picks], axis=1, padlen=0)
+    # scale
+    for di in range(data.shape[0]):
+        data[di] /= params['scalings'][params['types'][di]]
+        # stim channels should be hard limited
+        if params['types'][di] == 'stim':
+            data[di] = np.minimum(data[di], 1.0)
+    # clip
+    if params['clipping'] == 'transparent':
+        data[np.logical_or(data > 1, data < -1)] = np.nan
+    elif params['clipping'] == 'clamp':
+        data = np.clip(data, -1, 1, data)
+    params['data'] = data
+    params['times'] = times
+
+
+def _pick_bad_channels(event, params):
+    """Helper for selecting / dropping bad channels onpick"""
+    # Both bad lists are updated. params['info'] used for colors.
+    bads = params['raw'].info['bads']
+    params['info']['bads'] = _select_bads(event, params, bads)
+    _plot_update_raw_proj(params, None)
+
+
+def plot_raw(raw, events=None, duration=10.0, start=0.0, n_channels=None,
+             bgcolor='w', color=None, bad_color=(0.8, 0.8, 0.8),
+             event_color='cyan', scalings=None, remove_dc=True, order='type',
+             show_options=False, title=None, show=True, block=False,
+             highpass=None, lowpass=None, filtorder=4, clipping=None):
+    """Plot raw data
+
+    Parameters
+    ----------
+    raw : instance of Raw
+        The raw data to plot.
+    events : array | None
+        Events to show with vertical bars.
+    duration : float
+        Time window (sec) to plot. The lesser of this value and the duration
+        of the raw file will be used.
+    start : float
+        Initial time to show (can be changed dynamically once plotted).
+    n_channels : int
+        Number of channels to plot at once.
+    bgcolor : color object
+        Color of the background.
+    color : dict | color object | None
+        Color for the data traces. If None, defaults to::
+
+            dict(mag='darkblue', grad='b', eeg='k', eog='k', ecg='m',
+                 emg='k', ref_meg='steelblue', misc='k', stim='k',
+                 resp='k', chpi='k')
+
+    bad_color : color object
+        Color to make bad channels.
+    event_color : color object | dict
+        Color to use for events. Can also be a dict with
+        ``{event_number: color}`` pairings. Use ``event_number==-1`` for
+        any event numbers in the events list that are not in the dictionary.
+    scalings : dict | None
+        Scale factors for the traces. If None, defaults to::
+
+            dict(mag=1e-12, grad=4e-11, eeg=20e-6, eog=150e-6, ecg=5e-4,
+                 emg=1e-3, ref_meg=1e-12, misc=1e-3, stim=1,
+                 resp=1, chpi=1e-4)
+
+    remove_dc : bool
+        If True remove DC component when plotting data.
+    order : 'type' | 'original' | array
+        Order in which to plot data. 'type' groups by channel type,
+        'original' plots in the order of ch_names, array gives the
+        indices to use in plotting.
+    show_options : bool
+        If True, a dialog for options related to projection is shown.
+    title : str | None
+        The title of the window. If None, and either the filename of the
+        raw object or '<unknown>' will be displayed as title.
+    show : bool
+        Show figure if True.
+    block : bool
+        Whether to halt program execution until the figure is closed.
+        Useful for setting bad channels on the fly by clicking on a line.
+        May not work on all systems / platforms.
+    highpass : float | None
+        Highpass to apply when displaying data.
+    lowpass : float | None
+        Lowpass to apply when displaying data.
+    filtorder : int
+        Filtering order. Note that for efficiency and simplicity,
+        filtering during plotting uses forward-backward IIR filtering,
+        so the effective filter order will be twice ``filtorder``.
+        Filtering the lines for display may also produce some edge
+        artifacts (at the left and right edges) of the signals
+        during display. Filtering requires scipy >= 0.10.
+    clipping : str | None
+        If None, channels are allowed to exceed their designated bounds in
+        the plot. If "clamp", then values are clamped to the appropriate
+        range for display, creating step-like artifacts. If "transparent",
+        then excessive values are not shown, creating gaps in the traces.
+
+    Returns
+    -------
+    fig : Instance of matplotlib.figure.Figure
+        Raw traces.
+
+    Notes
+    -----
+    The arrow keys (up/down/left/right) can typically be used to navigate
+    between channels and time ranges, but this depends on the backend
+    matplotlib is configured to use (e.g., mpl.use('TkAgg') should work). The
+    scaling can be adjusted with - and + (or =) keys. The viewport dimensions
+    can be adjusted with page up/page down and home/end keys. Full screen mode
+    can be to toggled with f11 key. To mark or un-mark a channel as bad, click
+    on the rather flat segments of a channel's time series. The changes will be
+    reflected immediately in the raw object's ``raw.info['bads']`` entry.
+    """
+    import matplotlib.pyplot as plt
+    import matplotlib as mpl
+    from scipy.signal import butter
+    color = _handle_default('color', color)
+    scalings = _handle_default('scalings_plot_raw', scalings)
+
+    if clipping is not None and clipping not in ('clamp', 'transparent'):
+        raise ValueError('clipping must be None, "clamp", or "transparent", '
+                         'not %s' % clipping)
+    # figure out the IIR filtering parameters
+    nyq = raw.info['sfreq'] / 2.
+    if highpass is None and lowpass is None:
+        ba = None
+    else:
+        filtorder = int(filtorder)
+        if filtorder <= 0:
+            raise ValueError('filtorder (%s) must be >= 1' % filtorder)
+        if highpass is not None and highpass <= 0:
+            raise ValueError('highpass must be > 0, not %s' % highpass)
+        if lowpass is not None and lowpass >= nyq:
+            raise ValueError('lowpass must be < nyquist (%s), not %s'
+                             % (nyq, lowpass))
+        if highpass is None:
+            ba = butter(filtorder, lowpass / nyq, 'lowpass', analog=False)
+        elif lowpass is None:
+            ba = butter(filtorder, highpass / nyq, 'highpass', analog=False)
+        else:
+            if lowpass <= highpass:
+                raise ValueError('lowpass (%s) must be > highpass (%s)'
+                                 % (lowpass, highpass))
+            ba = butter(filtorder, [highpass / nyq, lowpass / nyq], 'bandpass',
+                        analog=False)
+
+    # make a copy of info, remove projection (for now)
+    info = copy.deepcopy(raw.info)
+    projs = info['projs']
+    info['projs'] = []
+    n_times = raw.n_times
+
+    # allow for raw objects without filename, e.g., ICA
+    if title is None:
+        title = raw._filenames
+        if len(title) == 0:  # empty list or absent key
+            title = '<unknown>'
+        elif len(title) == 1:
+            title = title[0]
+        else:  # if len(title) > 1:
+            title = '%s ... (+ %d more) ' % (title[0], len(title) - 1)
+            if len(title) > 60:
+                title = '...' + title[-60:]
+    elif not isinstance(title, string_types):
+        raise TypeError('title must be None or a string')
+    if events is not None:
+        event_times = events[:, 0].astype(float) - raw.first_samp
+        event_times /= info['sfreq']
+        event_nums = events[:, 2]
+    else:
+        event_times = event_nums = None
+
+    # reorganize the data in plotting order
+    inds = list()
+    types = list()
+    for t in ['grad', 'mag']:
+        inds += [pick_types(info, meg=t, ref_meg=False, exclude=[])]
+        types += [t] * len(inds[-1])
+    pick_kwargs = dict(meg=False, ref_meg=False, exclude=[])
+    for t in ['eeg', 'eog', 'ecg', 'emg', 'ref_meg', 'stim', 'resp',
+              'misc', 'chpi', 'syst', 'ias', 'exci']:
+        pick_kwargs[t] = True
+        inds += [pick_types(raw.info, **pick_kwargs)]
+        types += [t] * len(inds[-1])
+        pick_kwargs[t] = False
+    inds = np.concatenate(inds).astype(int)
+    if not len(inds) == len(info['ch_names']):
+        raise RuntimeError('Some channels not classified, please report '
+                           'this problem')
+
+    # put them back to original or modified order for natral plotting
+    reord = np.argsort(inds)
+    types = [types[ri] for ri in reord]
+    if isinstance(order, str):
+        if order == 'original':
+            inds = inds[reord]
+        elif order != 'type':
+            raise ValueError('Unknown order type %s' % order)
+    elif isinstance(order, np.ndarray):
+        if not np.array_equal(np.sort(order),
+                              np.arange(len(info['ch_names']))):
+            raise ValueError('order, if array, must have integers from '
+                             '0 to n_channels - 1')
+        # put back to original order first, then use new order
+        inds = inds[reord][order]
+
+    if not isinstance(event_color, dict):
+        event_color = {-1: event_color}
+    else:
+        event_color = copy.deepcopy(event_color)  # we might modify it
+    for key in event_color:
+        if not isinstance(key, int):
+            raise TypeError('event_color key "%s" was a %s not an int'
+                            % (key, type(key)))
+        if key <= 0 and key != -1:
+            raise KeyError('only key <= 0 allowed is -1 (cannot use %s)'
+                           % key)
+
+    # set up projection and data parameters
+    duration = min(raw.times[-1], float(duration))
+    params = dict(raw=raw, ch_start=0, t_start=start, duration=duration,
+                  info=info, projs=projs, remove_dc=remove_dc, ba=ba,
+                  n_channels=n_channels, scalings=scalings, types=types,
+                  n_times=n_times, event_times=event_times,
+                  event_nums=event_nums, clipping=clipping, fig_proj=None)
+
+    _prepare_mne_browse_raw(params, title, bgcolor, color, bad_color, inds,
+                            n_channels)
+
+    # plot event_line first so it's in the back
+    event_lines = [params['ax'].plot([np.nan], color=event_color[ev_num])[0]
+                   for ev_num in sorted(event_color.keys())]
+    params['plot_fun'] = partial(_plot_raw_traces, params=params, inds=inds,
+                                 color=color, bad_color=bad_color,
+                                 event_lines=event_lines,
+                                 event_color=event_color)
+    params['update_fun'] = partial(_update_raw_data, params=params)
+    params['pick_bads_fun'] = partial(_pick_bad_channels, params=params)
+    params['label_click_fun'] = partial(_label_clicked, params=params)
+    params['scale_factor'] = 1.0
+    # set up callbacks
+    opt_button = None
+    if len(raw.info['projs']) > 0 and not raw.proj:
+        ax_button = plt.subplot2grid((10, 10), (9, 9))
+        params['ax_button'] = ax_button
+        opt_button = mpl.widgets.Button(ax_button, 'Proj')
+        callback_option = partial(_toggle_options, params=params)
+        opt_button.on_clicked(callback_option)
+    callback_key = partial(_plot_raw_onkey, params=params)
+    params['fig'].canvas.mpl_connect('key_press_event', callback_key)
+    callback_scroll = partial(_plot_raw_onscroll, params=params)
+    params['fig'].canvas.mpl_connect('scroll_event', callback_scroll)
+    callback_pick = partial(_mouse_click, params=params)
+    params['fig'].canvas.mpl_connect('button_press_event', callback_pick)
+    callback_resize = partial(_helper_raw_resize, params=params)
+    params['fig'].canvas.mpl_connect('resize_event', callback_resize)
+
+    # As here code is shared with plot_evoked, some extra steps:
+    # first the actual plot update function
+    params['plot_update_proj_callback'] = _plot_update_raw_proj
+    # then the toggle handler
+    callback_proj = partial(_toggle_proj, params=params)
+    # store these for use by callbacks in the options figure
+    params['callback_proj'] = callback_proj
+    params['callback_key'] = callback_key
+    # have to store this, or it could get garbage-collected
+    params['opt_button'] = opt_button
+
+    # do initial plots
+    callback_proj('none')
+    _layout_figure(params)
+
+    # deal with projectors
+    if show_options is True:
+        _toggle_options(None, params)
+
+    if show:
+        try:
+            plt.show(block=block)
+        except TypeError:  # not all versions have this
+            plt.show()
+
+    return params['fig']
+
+
+def _label_clicked(pos, params):
+    """Helper function for selecting bad channels."""
+    labels = params['ax'].yaxis.get_ticklabels()
+    offsets = np.array(params['offsets']) + params['offsets'][0]
+    line_idx = np.searchsorted(offsets, pos[1])
+    text = labels[line_idx].get_text()
+    if len(text) == 0:
+        return
+    ch_idx = params['ch_start'] + line_idx
+    bads = params['info']['bads']
+    if text in bads:
+        while text in bads:  # to make sure duplicates are removed
+            bads.remove(text)
+        color = vars(params['lines'][line_idx])['def_color']
+        params['ax_vscroll'].patches[ch_idx].set_color(color)
+    else:
+        bads.append(text)
+        color = params['bad_color']
+        params['ax_vscroll'].patches[ch_idx].set_color(color)
+    params['raw'].info['bads'] = bads
+    _plot_update_raw_proj(params, None)
+
+
+def _set_psd_plot_params(info, proj, picks, ax, area_mode):
+    """Aux function"""
+    import matplotlib.pyplot as plt
+    if area_mode not in [None, 'std', 'range']:
+        raise ValueError('"area_mode" must be "std", "range", or None')
+    if picks is None:
+        if ax is not None:
+            raise ValueError('If "ax" is not supplied (None), then "picks" '
+                             'must also be supplied')
+        megs = ['mag', 'grad', False]
+        eegs = [False, False, True]
+        names = ['Magnetometers', 'Gradiometers', 'EEG']
+        picks_list = list()
+        titles_list = list()
+        for meg, eeg, name in zip(megs, eegs, names):
+            picks = pick_types(info, meg=meg, eeg=eeg, ref_meg=False)
+            if len(picks) > 0:
+                picks_list.append(picks)
+                titles_list.append(name)
+        if len(picks_list) == 0:
+            raise RuntimeError('No MEG or EEG channels found')
+    else:
+        picks_list = [picks]
+        titles_list = ['Selected channels']
+        ax_list = [ax]
+
+    make_label = False
+    fig = None
+    if ax is None:
+        fig = plt.figure()
+        ax_list = list()
+        for ii in range(len(picks_list)):
+            # Make x-axes change together
+            if ii > 0:
+                ax_list.append(plt.subplot(len(picks_list), 1, ii + 1,
+                                           sharex=ax_list[0]))
+            else:
+                ax_list.append(plt.subplot(len(picks_list), 1, ii + 1))
+        make_label = True
+    else:
+        fig = ax_list[0].get_figure()
+
+    return fig, picks_list, titles_list, ax_list, make_label
+
+
+ at verbose
+def plot_raw_psd(raw, tmin=0., tmax=np.inf, fmin=0, fmax=np.inf, proj=False,
+                 n_fft=2048, picks=None, ax=None, color='black',
+                 area_mode='std', area_alpha=0.33,
+                 n_overlap=0, dB=True, show=True, n_jobs=1, verbose=None):
+    """Plot the power spectral density across channels
+
+    Parameters
+    ----------
+    raw : instance of io.Raw
+        The raw instance to use.
+    tmin : float
+        Start time for calculations.
+    tmax : float
+        End time for calculations.
+    fmin : float
+        Start frequency to consider.
+    fmax : float
+        End frequency to consider.
+    proj : bool
+        Apply projection.
+    n_fft : int
+        Number of points to use in Welch FFT calculations.
+    picks : array-like of int | None
+        List of channels to use. Cannot be None if `ax` is supplied. If both
+        `picks` and `ax` are None, separate subplots will be created for
+        each standard channel type (`mag`, `grad`, and `eeg`).
+    ax : instance of matplotlib Axes | None
+        Axes to plot into. If None, axes will be created.
+    color : str | tuple
+        A matplotlib-compatible color to use.
+    area_mode : str | None
+        Mode for plotting area. If 'std', the mean +/- 1 STD (across channels)
+        will be plotted. If 'range', the min and max (across channels) will be
+        plotted. Bad channels will be excluded from these calculations.
+        If None, no area will be plotted.
+    area_alpha : float
+        Alpha for the area.
+    n_overlap : int
+        The number of points of overlap between blocks. The default value
+        is 0 (no overlap).
+    dB : bool
+        If True, transform data to decibels.
+    show : bool
+        Show figure if True.
+    n_jobs : int
+        Number of jobs to run in parallel.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    fig : instance of matplotlib figure
+        Figure distributing one image per channel across sensor topography.
+    """
+    import matplotlib.pyplot as plt
+    fig, picks_list, titles_list, ax_list, make_label = _set_psd_plot_params(
+        raw.info, proj, picks, ax, area_mode)
+
+    for ii, (picks, title, ax) in enumerate(zip(picks_list, titles_list,
+                                                ax_list)):
+        psds, freqs = compute_raw_psd(raw, tmin=tmin, tmax=tmax, picks=picks,
+                                      fmin=fmin, fmax=fmax, proj=proj,
+                                      n_fft=n_fft, n_overlap=n_overlap,
+                                      n_jobs=n_jobs, verbose=None)
+
+        # Convert PSDs to dB
+        if dB:
+            psds = 10 * np.log10(psds)
+            unit = 'dB'
+        else:
+            unit = 'power'
+        psd_mean = np.mean(psds, axis=0)
+        if area_mode == 'std':
+            psd_std = np.std(psds, axis=0)
+            hyp_limits = (psd_mean - psd_std, psd_mean + psd_std)
+        elif area_mode == 'range':
+            hyp_limits = (np.min(psds, axis=0), np.max(psds, axis=0))
+        else:  # area_mode is None
+            hyp_limits = None
+
+        ax.plot(freqs, psd_mean, color=color)
+        if hyp_limits is not None:
+            ax.fill_between(freqs, hyp_limits[0], y2=hyp_limits[1],
+                            color=color, alpha=area_alpha)
+        if make_label:
+            if ii == len(picks_list) - 1:
+                ax.set_xlabel('Freq (Hz)')
+            if ii == len(picks_list) // 2:
+                ax.set_ylabel('Power Spectral Density (%s/Hz)' % unit)
+            ax.set_title(title)
+            ax.set_xlim(freqs[0], freqs[-1])
+    if make_label:
+        tight_layout(pad=0.1, h_pad=0.1, w_pad=0.1, fig=fig)
+    if show is True:
+        plt.show()
+    return fig
+
+
+def _prepare_mne_browse_raw(params, title, bgcolor, color, bad_color, inds,
+                            n_channels):
+    """Helper for setting up the mne_browse_raw window."""
+    import matplotlib.pyplot as plt
+    import matplotlib as mpl
+    size = get_config('MNE_BROWSE_RAW_SIZE')
+    if size is not None:
+        size = size.split(',')
+        size = tuple([float(s) for s in size])
+
+    fig = figure_nobar(facecolor=bgcolor, figsize=size)
+    fig.canvas.set_window_title('mne_browse_raw')
+    ax = plt.subplot2grid((10, 10), (0, 1), colspan=8, rowspan=9)
+    ax.set_title(title, fontsize=12)
+    ax_hscroll = plt.subplot2grid((10, 10), (9, 1), colspan=8)
+    ax_hscroll.get_yaxis().set_visible(False)
+    ax_hscroll.set_xlabel('Time (s)')
+    ax_vscroll = plt.subplot2grid((10, 10), (0, 9), rowspan=9)
+    ax_vscroll.set_axis_off()
+    ax_help_button = plt.subplot2grid((10, 10), (0, 0), colspan=1)
+    help_button = mpl.widgets.Button(ax_help_button, 'Help')
+    help_button.on_clicked(partial(_onclick_help, params=params))
+    # store these so they can be fixed on resize
+    params['fig'] = fig
+    params['ax'] = ax
+    params['ax_hscroll'] = ax_hscroll
+    params['ax_vscroll'] = ax_vscroll
+    params['ax_help_button'] = ax_help_button
+    params['help_button'] = help_button
+
+    # populate vertical and horizontal scrollbars
+    info = params['info']
+    for ci in range(len(info['ch_names'])):
+        this_color = (bad_color if info['ch_names'][inds[ci]] in info['bads']
+                      else color)
+        if isinstance(this_color, dict):
+            this_color = this_color[params['types'][inds[ci]]]
+        ax_vscroll.add_patch(mpl.patches.Rectangle((0, ci), 1, 1,
+                                                   facecolor=this_color,
+                                                   edgecolor=this_color))
+    vsel_patch = mpl.patches.Rectangle((0, 0), 1, n_channels, alpha=0.5,
+                                       facecolor='w', edgecolor='w')
+    ax_vscroll.add_patch(vsel_patch)
+    params['vsel_patch'] = vsel_patch
+    hsel_patch = mpl.patches.Rectangle((params['t_start'], 0),
+                                       params['duration'], 1, edgecolor='k',
+                                       facecolor=(0.75, 0.75, 0.75),
+                                       alpha=0.25, linewidth=1, clip_on=False)
+    ax_hscroll.add_patch(hsel_patch)
+    params['hsel_patch'] = hsel_patch
+    ax_hscroll.set_xlim(0, params['n_times'] / float(info['sfreq']))
+    n_ch = len(info['ch_names'])
+    ax_vscroll.set_ylim(n_ch, 0)
+    ax_vscroll.set_title('Ch.')
+
+    # make shells for plotting traces
+    ylim = [n_channels * 2 + 1, 0]
+    offset = ylim[0] / n_channels
+    offsets = np.arange(n_channels) * offset + (offset / 2.)
+    ax.set_yticks(offsets)
+    ax.set_ylim(ylim)
+    ax.set_xlim(params['t_start'], params['t_start'] + params['duration'],
+                False)
+
+    params['offsets'] = offsets
+    params['lines'] = [ax.plot([np.nan], antialiased=False, linewidth=0.5)[0]
+                       for _ in range(n_ch)]
+    ax.set_yticklabels(['X' * max([len(ch) for ch in info['ch_names']])])
+    vertline_color = (0., 0.75, 0.)
+    params['ax_vertline'] = ax.plot([0, 0], ylim, color=vertline_color,
+                                    zorder=-1)[0]
+    params['ax_vertline'].ch_name = ''
+    params['vertline_t'] = ax_hscroll.text(0, 1, '', color=vertline_color,
+                                           va='bottom', ha='right')
+    params['ax_hscroll_vertline'] = ax_hscroll.plot([0, 0], [0, 1],
+                                                    color=vertline_color,
+                                                    zorder=1)[0]
+
+
+def _plot_raw_traces(params, inds, color, bad_color, event_lines=None,
+                     event_color=None):
+    """Helper for plotting raw"""
+    lines = params['lines']
+    info = params['info']
+    n_channels = params['n_channels']
+    params['bad_color'] = bad_color
+    labels = params['ax'].yaxis.get_ticklabels()
+    # do the plotting
+    tick_list = list()
+    for ii in range(n_channels):
+        ch_ind = ii + params['ch_start']
+        # let's be generous here and allow users to pass
+        # n_channels per view >= the number of traces available
+        if ii >= len(lines):
+            break
+        elif ch_ind < len(info['ch_names']):
+            # scale to fit
+            ch_name = info['ch_names'][inds[ch_ind]]
+            tick_list += [ch_name]
+            offset = params['offsets'][ii]
+
+            # do NOT operate in-place lest this get screwed up
+            this_data = params['data'][inds[ch_ind]] * params['scale_factor']
+            this_color = bad_color if ch_name in info['bads'] else color
+            this_z = -1 if ch_name in info['bads'] else 0
+            if isinstance(this_color, dict):
+                this_color = this_color[params['types'][inds[ch_ind]]]
+
+            # subtraction here gets corect orientation for flipped ylim
+            lines[ii].set_ydata(offset - this_data)
+            lines[ii].set_xdata(params['times'])
+            lines[ii].set_color(this_color)
+            lines[ii].set_zorder(this_z)
+            vars(lines[ii])['ch_name'] = ch_name
+            vars(lines[ii])['def_color'] = color[params['types'][inds[ch_ind]]]
+
+            # set label color
+            this_color = bad_color if ch_name in info['bads'] else 'black'
+            labels[ii].set_color(this_color)
+        else:
+            # "remove" lines
+            lines[ii].set_xdata([])
+            lines[ii].set_ydata([])
+    # deal with event lines
+    if params['event_times'] is not None:
+        # find events in the time window
+        event_times = params['event_times']
+        mask = np.logical_and(event_times >= params['times'][0],
+                              event_times <= params['times'][-1])
+        event_times = event_times[mask]
+        event_nums = params['event_nums'][mask]
+        # plot them with appropriate colors
+        # go through the list backward so we end with -1, the catchall
+        used = np.zeros(len(event_times), bool)
+        ylim = params['ax'].get_ylim()
+        for ev_num, line in zip(sorted(event_color.keys())[::-1],
+                                event_lines[::-1]):
+            mask = (event_nums == ev_num) if ev_num >= 0 else ~used
+            assert not np.any(used[mask])
+            used[mask] = True
+            t = event_times[mask]
+            if len(t) > 0:
+                xs = list()
+                ys = list()
+                for tt in t:
+                    xs += [tt, tt, np.nan]
+                    ys += [0, ylim[0], np.nan]
+                line.set_xdata(xs)
+                line.set_ydata(ys)
+            else:
+                line.set_xdata([])
+                line.set_ydata([])
+    # finalize plot
+    params['ax'].set_xlim(params['times'][0],
+                          params['times'][0] + params['duration'], False)
+    params['ax'].set_yticklabels(tick_list)
+    params['vsel_patch'].set_y(params['ch_start'])
+    params['fig'].canvas.draw()
+    # XXX This is a hack to make sure this figure gets drawn last
+    # so that when matplotlib goes to calculate bounds we don't get a
+    # CGContextRef error on the MacOSX backend :(
+    if params['fig_proj'] is not None:
+        params['fig_proj'].canvas.draw()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/viz/tests/__init__.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/viz/tests/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/viz/tests/test_3d.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/viz/tests/test_3d.py
new file mode 100644
index 0000000..7baa32a
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/viz/tests/test_3d.py
@@ -0,0 +1,194 @@
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Denis Engemann <denis.engemann at gmail.com>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#          Eric Larson <larson.eric.d at gmail.com>
+#          Mainak Jas <mainak at neuro.hut.fi>
+#          Mark Wronkiewicz <wronk.mark at gmail.com>
+#
+# License: Simplified BSD
+
+import os.path as op
+import warnings
+
+import numpy as np
+from numpy.testing import assert_raises, assert_equal
+
+from mne import (make_field_map, pick_channels_evoked, read_evokeds,
+                 read_trans, read_dipole, SourceEstimate)
+from mne.viz import (plot_sparse_source_estimates, plot_source_estimates,
+                     plot_trans)
+from mne.utils import requires_mayavi, requires_pysurfer, run_tests_if_main
+from mne.datasets import testing
+from mne.source_space import read_source_spaces
+
+
+# Set our plotters to test mode
+import matplotlib
+matplotlib.use('Agg')  # for testing don't use X server
+
+data_dir = testing.data_path(download=False)
+subjects_dir = op.join(data_dir, 'subjects')
+trans_fname = op.join(data_dir, 'MEG', 'sample',
+                      'sample_audvis_trunc-trans.fif')
+src_fname = op.join(data_dir, 'subjects', 'sample', 'bem',
+                    'sample-oct-6-src.fif')
+dip_fname = op.join(data_dir, 'MEG', 'sample', 'sample_audvis_trunc_set1.dip')
+
+base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
+evoked_fname = op.join(base_dir, 'test-ave.fif')
+
+warnings.simplefilter('always')  # enable b/c these tests throw warnings
+
+
+ at testing.requires_testing_data
+ at requires_pysurfer
+ at requires_mayavi
+def test_plot_sparse_source_estimates():
+    """Test plotting of (sparse) source estimates
+    """
+    sample_src = read_source_spaces(src_fname)
+
+    # dense version
+    vertices = [s['vertno'] for s in sample_src]
+    n_time = 5
+    n_verts = sum(len(v) for v in vertices)
+    stc_data = np.zeros((n_verts * n_time))
+    stc_size = stc_data.size
+    stc_data[(np.random.rand(stc_size / 20) * stc_size).astype(int)] = \
+        np.random.rand(stc_data.size / 20)
+    stc_data.shape = (n_verts, n_time)
+    stc = SourceEstimate(stc_data, vertices, 1, 1)
+    colormap = 'mne_analyze'
+    plot_source_estimates(stc, 'sample', colormap=colormap,
+                          config_opts={'background': (1, 1, 0)},
+                          subjects_dir=subjects_dir, colorbar=True,
+                          clim='auto')
+    assert_raises(TypeError, plot_source_estimates, stc, 'sample',
+                  figure='foo', hemi='both', clim='auto')
+
+    # now do sparse version
+    vertices = sample_src[0]['vertno']
+    inds = [111, 333]
+    stc_data = np.zeros((len(inds), n_time))
+    stc_data[0, 1] = 1.
+    stc_data[1, 4] = 2.
+    vertices = [vertices[inds], np.empty(0, dtype=np.int)]
+    stc = SourceEstimate(stc_data, vertices, 1, 1)
+    plot_sparse_source_estimates(sample_src, stc, bgcolor=(1, 1, 1),
+                                 opacity=0.5, high_resolution=False)
+
+
+ at testing.requires_testing_data
+ at requires_mayavi
+def test_plot_evoked_field():
+    """Test plotting evoked field
+    """
+    evoked = read_evokeds(evoked_fname, condition='Left Auditory',
+                          baseline=(-0.2, 0.0))
+    evoked = pick_channels_evoked(evoked, evoked.ch_names[::10])  # speed
+    for t in ['meg', None]:
+        maps = make_field_map(evoked, trans_fname, subject='sample',
+                              subjects_dir=subjects_dir, n_jobs=1, ch_type=t)
+
+        evoked.plot_field(maps, time=0.1)
+
+
+ at testing.requires_testing_data
+ at requires_mayavi
+def test_plot_trans():
+    """Test plotting of -trans.fif files
+    """
+    evoked = read_evokeds(evoked_fname, condition='Left Auditory',
+                          baseline=(-0.2, 0.0))
+    plot_trans(evoked.info, trans_fname, subject='sample',
+               subjects_dir=subjects_dir)
+    assert_raises(ValueError, plot_trans, evoked.info, trans_fname,
+                  subject='sample', subjects_dir=subjects_dir,
+                  ch_type='bad-chtype')
+
+
+ at testing.requires_testing_data
+ at requires_pysurfer
+ at requires_mayavi
+def test_limits_to_control_points():
+    """Test functionality for determing control points
+    """
+    sample_src = read_source_spaces(src_fname)
+
+    vertices = [s['vertno'] for s in sample_src]
+    n_time = 5
+    n_verts = sum(len(v) for v in vertices)
+    stc_data = np.random.rand((n_verts * n_time))
+    stc_data.shape = (n_verts, n_time)
+    stc = SourceEstimate(stc_data, vertices, 1, 1, 'sample')
+
+    # Test for simple use cases
+    from mayavi import mlab
+    stc.plot(subjects_dir=subjects_dir)
+    stc.plot(clim=dict(pos_lims=(10, 50, 90)), subjects_dir=subjects_dir)
+    stc.plot(clim=dict(kind='value', lims=(10, 50, 90)), figure=99,
+             subjects_dir=subjects_dir)
+    stc.plot(colormap='hot', clim='auto', subjects_dir=subjects_dir)
+    stc.plot(colormap='mne', clim='auto', subjects_dir=subjects_dir)
+    figs = [mlab.figure(), mlab.figure()]
+    assert_raises(RuntimeError, stc.plot, clim='auto', figure=figs,
+                  subjects_dir=subjects_dir)
+
+    # Test both types of incorrect limits key (lims/pos_lims)
+    assert_raises(KeyError, plot_source_estimates, stc, colormap='mne',
+                  clim=dict(kind='value', lims=(5, 10, 15)),
+                  subjects_dir=subjects_dir)
+    assert_raises(KeyError, plot_source_estimates, stc, colormap='hot',
+                  clim=dict(kind='value', pos_lims=(5, 10, 15)),
+                  subjects_dir=subjects_dir)
+
+    # Test for correct clim values
+    assert_raises(ValueError, stc.plot,
+                  clim=dict(kind='value', pos_lims=[0, 1, 0]),
+                  subjects_dir=subjects_dir)
+    assert_raises(ValueError, stc.plot, colormap='mne',
+                  clim=dict(pos_lims=(5, 10, 15, 20)),
+                  subjects_dir=subjects_dir)
+    assert_raises(ValueError, stc.plot,
+                  clim=dict(pos_lims=(5, 10, 15), kind='foo'),
+                  subjects_dir=subjects_dir)
+    assert_raises(ValueError, stc.plot, colormap='mne', clim='foo',
+                  subjects_dir=subjects_dir)
+    assert_raises(ValueError, stc.plot, clim=(5, 10, 15),
+                  subjects_dir=subjects_dir)
+    assert_raises(ValueError, plot_source_estimates, 'foo', clim='auto',
+                  subjects_dir=subjects_dir)
+    assert_raises(ValueError, stc.plot, hemi='foo', clim='auto',
+                  subjects_dir=subjects_dir)
+
+    # Test handling of degenerate data
+    stc.plot(clim=dict(kind='value', lims=[0, 0, 1]),
+             subjects_dir=subjects_dir)  # ok
+    with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter('always')
+        # thresholded maps
+        stc._data.fill(1.)
+        plot_source_estimates(stc, subjects_dir=subjects_dir)
+        assert_equal(len(w), 0)
+        stc._data[0].fill(0.)
+        plot_source_estimates(stc, subjects_dir=subjects_dir)
+        assert_equal(len(w), 0)
+        stc._data.fill(0.)
+        plot_source_estimates(stc, subjects_dir=subjects_dir)
+        assert_equal(len(w), 1)
+    mlab.close()
+
+
+ at testing.requires_testing_data
+ at requires_mayavi
+def test_plot_dipole_locations():
+    """Test plotting dipole locations
+    """
+    dipoles = read_dipole(dip_fname)
+    trans = read_trans(trans_fname)
+    dipoles.plot_locations(trans, 'sample', subjects_dir, fig_name='foo')
+    assert_raises(ValueError, dipoles.plot_locations, trans, 'sample',
+                  subjects_dir, mode='foo')
+
+
+run_tests_if_main()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/viz/tests/test_circle.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/viz/tests/test_circle.py
new file mode 100644
index 0000000..1999221
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/viz/tests/test_circle.py
@@ -0,0 +1,94 @@
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Denis Engemann <denis.engemann at gmail.com>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#
+# License: Simplified BSD
+
+
+import numpy as np
+from numpy.testing import assert_raises
+
+from mne.viz import plot_connectivity_circle, circular_layout
+
+# Set our plotters to test mode
+import matplotlib
+matplotlib.use('Agg')  # for testing don't use X server
+
+
+def test_plot_connectivity_circle():
+    """Test plotting connectivity circle
+    """
+    import matplotlib.pyplot as plt
+    node_order = ['frontalpole-lh', 'parsorbitalis-lh',
+                  'lateralorbitofrontal-lh', 'rostralmiddlefrontal-lh',
+                  'medialorbitofrontal-lh', 'parstriangularis-lh',
+                  'rostralanteriorcingulate-lh', 'temporalpole-lh',
+                  'parsopercularis-lh', 'caudalanteriorcingulate-lh',
+                  'entorhinal-lh', 'superiorfrontal-lh', 'insula-lh',
+                  'caudalmiddlefrontal-lh', 'superiortemporal-lh',
+                  'parahippocampal-lh', 'middletemporal-lh',
+                  'inferiortemporal-lh', 'precentral-lh',
+                  'transversetemporal-lh', 'posteriorcingulate-lh',
+                  'fusiform-lh', 'postcentral-lh', 'bankssts-lh',
+                  'supramarginal-lh', 'isthmuscingulate-lh', 'paracentral-lh',
+                  'lingual-lh', 'precuneus-lh', 'inferiorparietal-lh',
+                  'superiorparietal-lh', 'pericalcarine-lh',
+                  'lateraloccipital-lh', 'cuneus-lh', 'cuneus-rh',
+                  'lateraloccipital-rh', 'pericalcarine-rh',
+                  'superiorparietal-rh', 'inferiorparietal-rh', 'precuneus-rh',
+                  'lingual-rh', 'paracentral-rh', 'isthmuscingulate-rh',
+                  'supramarginal-rh', 'bankssts-rh', 'postcentral-rh',
+                  'fusiform-rh', 'posteriorcingulate-rh',
+                  'transversetemporal-rh', 'precentral-rh',
+                  'inferiortemporal-rh', 'middletemporal-rh',
+                  'parahippocampal-rh', 'superiortemporal-rh',
+                  'caudalmiddlefrontal-rh', 'insula-rh', 'superiorfrontal-rh',
+                  'entorhinal-rh', 'caudalanteriorcingulate-rh',
+                  'parsopercularis-rh', 'temporalpole-rh',
+                  'rostralanteriorcingulate-rh', 'parstriangularis-rh',
+                  'medialorbitofrontal-rh', 'rostralmiddlefrontal-rh',
+                  'lateralorbitofrontal-rh', 'parsorbitalis-rh',
+                  'frontalpole-rh']
+    label_names = ['bankssts-lh', 'bankssts-rh', 'caudalanteriorcingulate-lh',
+                   'caudalanteriorcingulate-rh', 'caudalmiddlefrontal-lh',
+                   'caudalmiddlefrontal-rh', 'cuneus-lh', 'cuneus-rh',
+                   'entorhinal-lh', 'entorhinal-rh', 'frontalpole-lh',
+                   'frontalpole-rh', 'fusiform-lh', 'fusiform-rh',
+                   'inferiorparietal-lh', 'inferiorparietal-rh',
+                   'inferiortemporal-lh', 'inferiortemporal-rh', 'insula-lh',
+                   'insula-rh', 'isthmuscingulate-lh', 'isthmuscingulate-rh',
+                   'lateraloccipital-lh', 'lateraloccipital-rh',
+                   'lateralorbitofrontal-lh', 'lateralorbitofrontal-rh',
+                   'lingual-lh', 'lingual-rh', 'medialorbitofrontal-lh',
+                   'medialorbitofrontal-rh', 'middletemporal-lh',
+                   'middletemporal-rh', 'paracentral-lh', 'paracentral-rh',
+                   'parahippocampal-lh', 'parahippocampal-rh',
+                   'parsopercularis-lh', 'parsopercularis-rh',
+                   'parsorbitalis-lh', 'parsorbitalis-rh',
+                   'parstriangularis-lh', 'parstriangularis-rh',
+                   'pericalcarine-lh', 'pericalcarine-rh', 'postcentral-lh',
+                   'postcentral-rh', 'posteriorcingulate-lh',
+                   'posteriorcingulate-rh', 'precentral-lh', 'precentral-rh',
+                   'precuneus-lh', 'precuneus-rh',
+                   'rostralanteriorcingulate-lh',
+                   'rostralanteriorcingulate-rh', 'rostralmiddlefrontal-lh',
+                   'rostralmiddlefrontal-rh', 'superiorfrontal-lh',
+                   'superiorfrontal-rh', 'superiorparietal-lh',
+                   'superiorparietal-rh', 'superiortemporal-lh',
+                   'superiortemporal-rh', 'supramarginal-lh',
+                   'supramarginal-rh', 'temporalpole-lh', 'temporalpole-rh',
+                   'transversetemporal-lh', 'transversetemporal-rh']
+
+    group_boundaries = [0, len(label_names) / 2]
+    node_angles = circular_layout(label_names, node_order, start_pos=90,
+                                  group_boundaries=group_boundaries)
+    con = np.random.randn(68, 68)
+    plot_connectivity_circle(con, label_names, n_lines=300,
+                             node_angles=node_angles, title='test',
+                             )
+
+    assert_raises(ValueError, circular_layout, label_names, node_order,
+                  group_boundaries=[-1])
+    assert_raises(ValueError, circular_layout, label_names, node_order,
+                  group_boundaries=[20, 0])
+    plt.close('all')
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/viz/tests/test_decoding.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/viz/tests/test_decoding.py
new file mode 100644
index 0000000..b81ae21
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/viz/tests/test_decoding.py
@@ -0,0 +1,124 @@
+# Authors: Denis Engemann <denis.engemann at gmail.com>
+#          Jean-Remi King <jeanremi.king at gmail.com>
+#
+# License: Simplified BSD
+
+import os.path as op
+import warnings
+
+from nose.tools import assert_raises, assert_equals
+
+import numpy as np
+
+from mne.epochs import equalize_epoch_counts, concatenate_epochs
+from mne.decoding import GeneralizationAcrossTime
+from mne import io, Epochs, read_events, pick_types
+from mne.utils import requires_sklearn, run_tests_if_main
+import matplotlib
+matplotlib.use('Agg')  # for testing don't use X server
+
+
+data_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
+raw_fname = op.join(data_dir, 'test_raw.fif')
+event_name = op.join(data_dir, 'test-eve.fif')
+
+
+warnings.simplefilter('always')  # enable b/c these tests throw warnings
+
+
+def _get_data(tmin=-0.2, tmax=0.5, event_id=dict(aud_l=1, vis_l=3),
+              event_id_gen=dict(aud_l=2, vis_l=4), test_times=None):
+    """Aux function for testing GAT viz"""
+    gat = GeneralizationAcrossTime()
+    raw = io.Raw(raw_fname, preload=False)
+    events = read_events(event_name)
+    picks = pick_types(raw.info, meg='mag', stim=False, ecg=False,
+                       eog=False, exclude='bads')
+    picks = picks[1:13:3]
+    decim = 30
+    # Test on time generalization within one condition
+    with warnings.catch_warnings(record=True):
+        epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                        baseline=(None, 0), preload=True, decim=decim)
+    epochs_list = [epochs[k] for k in event_id]
+    equalize_epoch_counts(epochs_list)
+    epochs = concatenate_epochs(epochs_list)
+
+    # Test default running
+    gat = GeneralizationAcrossTime(test_times=test_times)
+    gat.fit(epochs)
+    gat.score(epochs)
+    return gat
+
+
+ at requires_sklearn
+def test_gat_plot_matrix():
+    """Test GAT matrix plot"""
+    gat = _get_data()
+    gat.plot()
+    del gat.scores_
+    assert_raises(RuntimeError, gat.plot)
+
+
+ at requires_sklearn
+def test_gat_plot_diagonal():
+    """Test GAT diagonal plot"""
+    gat = _get_data()
+    gat.plot_diagonal()
+    del gat.scores_
+    assert_raises(RuntimeError, gat.plot)
+
+
+ at requires_sklearn
+def test_gat_plot_times():
+    """Test GAT times plot"""
+    gat = _get_data()
+    # test one line
+    gat.plot_times(gat.train_times_['times'][0])
+    # test multiple lines
+    gat.plot_times(gat.train_times_['times'])
+    # test multiple colors
+    n_times = len(gat.train_times_['times'])
+    colors = np.tile(['r', 'g', 'b'],
+                     int(np.ceil(n_times / 3)))[:n_times]
+    gat.plot_times(gat.train_times_['times'], color=colors)
+    # test invalid time point
+    assert_raises(ValueError, gat.plot_times, -1.)
+    # test float type
+    assert_raises(ValueError, gat.plot_times, 1)
+    assert_raises(ValueError, gat.plot_times, 'diagonal')
+    del gat.scores_
+    assert_raises(RuntimeError, gat.plot)
+
+
+def chance(ax):
+    return ax.get_children()[1].get_lines()[0].get_ydata()[0]
+
+
+ at requires_sklearn
+def test_gat_chance_level():
+    """Test GAT plot_times chance level"""
+    gat = _get_data()
+    ax = gat.plot_diagonal(chance=False)
+    ax = gat.plot_diagonal()
+    assert_equals(chance(ax), .5)
+    gat = _get_data(event_id=dict(aud_l=1, vis_l=3, aud_r=2, vis_r=4))
+    ax = gat.plot_diagonal()
+    assert_equals(chance(ax), .25)
+    ax = gat.plot_diagonal(chance=1.234)
+    assert_equals(chance(ax), 1.234)
+    assert_raises(ValueError, gat.plot_diagonal, chance='foo')
+    del gat.scores_
+    assert_raises(RuntimeError, gat.plot)
+
+
+ at requires_sklearn
+def test_gat_plot_nonsquared():
+    """Test GAT diagonal plot"""
+    gat = _get_data(test_times=dict(start=0.))
+    gat.plot()
+    ax = gat.plot_diagonal()
+    scores = ax.get_children()[1].get_lines()[2].get_ydata()
+    assert_equals(len(scores), len(gat.estimators_))
+
+run_tests_if_main()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/viz/tests/test_epochs.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/viz/tests/test_epochs.py
new file mode 100644
index 0000000..6f3a3b4
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/viz/tests/test_epochs.py
@@ -0,0 +1,171 @@
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Denis Engemann <denis.engemann at gmail.com>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#          Eric Larson <larson.eric.d at gmail.com>
+#          Jaakko Leppakangas <jaeilepp at student.jyu.fi>
+#
+# License: Simplified BSD
+
+import os.path as op
+import warnings
+from nose.tools import assert_raises
+
+import numpy as np
+
+
+from mne import io, read_events, Epochs
+from mne import pick_types
+from mne.utils import run_tests_if_main, requires_version
+from mne.channels import read_layout
+
+from mne.viz import plot_drop_log, plot_epochs_image, plot_image_epochs
+from mne.viz.utils import _fake_click
+
+# Set our plotters to test mode
+import matplotlib
+matplotlib.use('Agg')  # for testing don't use X server
+
+warnings.simplefilter('always')  # enable b/c these tests throw warnings
+
+
+base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
+evoked_fname = op.join(base_dir, 'test-ave.fif')
+raw_fname = op.join(base_dir, 'test_raw.fif')
+cov_fname = op.join(base_dir, 'test-cov.fif')
+event_name = op.join(base_dir, 'test-eve.fif')
+event_id, tmin, tmax = 1, -0.1, 1.0
+n_chan = 15
+layout = read_layout('Vectorview-all')
+
+
+def _get_raw():
+    return io.Raw(raw_fname, preload=False)
+
+
+def _get_events():
+    return read_events(event_name)
+
+
+def _get_picks(raw):
+    return pick_types(raw.info, meg=True, eeg=False, stim=False,
+                      ecg=False, eog=False, exclude='bads')
+
+
+def _get_epochs():
+    raw = _get_raw()
+    events = _get_events()
+    picks = _get_picks(raw)
+    # Use a subset of channels for plotting speed
+    picks = np.round(np.linspace(0, len(picks) + 1, n_chan)).astype(int)
+    epochs = Epochs(raw, events[:5], event_id, tmin, tmax, picks=picks,
+                    baseline=(None, 0))
+    return epochs
+
+
+def _get_epochs_delayed_ssp():
+    raw = _get_raw()
+    events = _get_events()
+    picks = _get_picks(raw)
+    reject = dict(mag=4e-12)
+    epochs_delayed_ssp = Epochs(raw, events[:10], event_id, tmin, tmax,
+                                picks=picks, baseline=(None, 0),
+                                proj='delayed', reject=reject)
+    return epochs_delayed_ssp
+
+
+def test_plot_epochs():
+    """Test epoch plotting"""
+    import matplotlib.pyplot as plt
+    epochs = _get_epochs()
+    epochs.plot(scalings=None, title='Epochs')
+    plt.close('all')
+    fig = epochs[0].plot(picks=[0, 2, 3], scalings=None)
+    fig.canvas.key_press_event('escape')
+    plt.close('all')
+    fig = epochs.plot()
+    fig.canvas.key_press_event('left')
+    fig.canvas.key_press_event('right')
+    fig.canvas.scroll_event(0.5, 0.5, -0.5)  # scroll down
+    fig.canvas.scroll_event(0.5, 0.5, 0.5)  # scroll up
+    fig.canvas.key_press_event('up')
+    fig.canvas.key_press_event('down')
+    fig.canvas.key_press_event('pageup')
+    fig.canvas.key_press_event('pagedown')
+    fig.canvas.key_press_event('-')
+    fig.canvas.key_press_event('+')
+    fig.canvas.key_press_event('=')
+    fig.canvas.key_press_event('b')
+    fig.canvas.key_press_event('f11')
+    fig.canvas.key_press_event('home')
+    fig.canvas.key_press_event('?')
+    fig.canvas.key_press_event('h')
+    fig.canvas.key_press_event('o')
+    fig.canvas.key_press_event('end')
+    fig.canvas.resize_event()
+    fig.canvas.close_event()  # closing and epoch dropping
+    plt.close('all')
+    assert_raises(RuntimeError, epochs.plot, picks=[])
+    plt.close('all')
+    with warnings.catch_warnings(record=True):
+        fig = epochs.plot()
+        # test mouse clicks
+        x = fig.get_axes()[0].get_xlim()[1] / 2
+        y = fig.get_axes()[0].get_ylim()[0] / 2
+        data_ax = fig.get_axes()[0]
+        n_epochs = len(epochs)
+        _fake_click(fig, data_ax, [x, y], xform='data')  # mark a bad epoch
+        _fake_click(fig, data_ax, [x, y], xform='data')  # unmark a bad epoch
+        _fake_click(fig, data_ax, [0.5, 0.999])  # click elsewhere in 1st axes
+        _fake_click(fig, data_ax, [-0.1, 0.9])  # click on y-label
+        _fake_click(fig, data_ax, [-0.1, 0.9], button=3)
+        _fake_click(fig, fig.get_axes()[2], [0.5, 0.5])  # change epochs
+        _fake_click(fig, fig.get_axes()[3], [0.5, 0.5])  # change channels
+        fig.canvas.close_event()  # closing and epoch dropping
+        assert(n_epochs - 1 == len(epochs))
+        plt.close('all')
+
+
+def test_plot_epochs_image():
+    """Test plotting of epochs image
+    """
+    import matplotlib.pyplot as plt
+    epochs = _get_epochs()
+    plot_epochs_image(epochs, picks=[1, 2])
+    plt.close('all')
+    with warnings.catch_warnings(record=True):
+        plot_image_epochs(epochs, picks=[1, 2])
+        plt.close('all')
+
+
+def test_plot_drop_log():
+    """Test plotting a drop log
+    """
+    import matplotlib.pyplot as plt
+    epochs = _get_epochs()
+    assert_raises(ValueError, epochs.plot_drop_log)
+    epochs.drop_bad_epochs()
+
+    warnings.simplefilter('always', UserWarning)
+    with warnings.catch_warnings(record=True):
+        epochs.plot_drop_log()
+
+        plot_drop_log([['One'], [], []])
+        plot_drop_log([['One'], ['Two'], []])
+        plot_drop_log([['One'], ['One', 'Two'], []])
+    plt.close('all')
+
+
+ at requires_version('scipy', '0.12')
+def test_plot_psd_epochs():
+    """Test plotting epochs psd (+topomap)
+    """
+    import matplotlib.pyplot as plt
+    epochs = _get_epochs()
+    epochs.plot_psd()
+    assert_raises(RuntimeError, epochs.plot_psd_topomap,
+                  bands=[(0, 0.01, 'foo')])  # no freqs in range
+    epochs.plot_psd_topomap()
+    plt.close('all')
+
+
+run_tests_if_main()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/viz/tests/test_evoked.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/viz/tests/test_evoked.py
new file mode 100644
index 0000000..e2c308e
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/viz/tests/test_evoked.py
@@ -0,0 +1,137 @@
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Denis Engemann <denis.engemann at gmail.com>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#          Eric Larson <larson.eric.d at gmail.com>
+#          Cathy Nangini <cnangini at gmail.com>
+#          Mainak Jas <mainak at neuro.hut.fi>
+#
+# License: Simplified BSD
+
+import os.path as op
+import warnings
+
+import numpy as np
+from numpy.testing import assert_raises
+
+
+from mne import io, read_events, Epochs, pick_types, read_cov
+from mne.viz.evoked import _butterfly_onselect
+from mne.viz.utils import _fake_click
+from mne.utils import slow_test, run_tests_if_main
+from mne.channels import read_layout
+
+# Set our plotters to test mode
+import matplotlib
+matplotlib.use('Agg')  # for testing don't use X server
+
+warnings.simplefilter('always')  # enable b/c these tests throw warnings
+
+
+base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
+evoked_fname = op.join(base_dir, 'test-ave.fif')
+raw_fname = op.join(base_dir, 'test_raw.fif')
+cov_fname = op.join(base_dir, 'test-cov.fif')
+event_name = op.join(base_dir, 'test-eve.fif')
+event_id, tmin, tmax = 1, -0.1, 0.1
+n_chan = 6
+layout = read_layout('Vectorview-all')
+
+
+def _get_raw():
+    return io.Raw(raw_fname, preload=False)
+
+
+def _get_events():
+    return read_events(event_name)
+
+
+def _get_picks(raw):
+    return pick_types(raw.info, meg=True, eeg=False, stim=False,
+                      ecg=False, eog=False, exclude='bads')
+
+
+def _get_epochs():
+    raw = _get_raw()
+    events = _get_events()
+    picks = _get_picks(raw)
+    # Use a subset of channels for plotting speed
+    picks = picks[np.round(np.linspace(0, len(picks) - 1, n_chan)).astype(int)]
+    picks[0] = 2  # make sure we have a magnetometer
+    epochs = Epochs(raw, events[:5], event_id, tmin, tmax, picks=picks,
+                    baseline=(None, 0))
+    epochs.info['bads'] = [epochs.ch_names[-1]]
+    return epochs
+
+
+def _get_epochs_delayed_ssp():
+    raw = _get_raw()
+    events = _get_events()
+    picks = _get_picks(raw)
+    reject = dict(mag=4e-12)
+    epochs_delayed_ssp = Epochs(raw, events[:10], event_id, tmin, tmax,
+                                picks=picks, baseline=(None, 0),
+                                proj='delayed', reject=reject)
+    return epochs_delayed_ssp
+
+
+ at slow_test
+def test_plot_evoked():
+    """Test plotting of evoked
+    """
+    import matplotlib.pyplot as plt
+    evoked = _get_epochs().average()
+    with warnings.catch_warnings(record=True):
+        fig = evoked.plot(proj=True, hline=[1], exclude=[])
+        # Test a click
+        ax = fig.get_axes()[0]
+        line = ax.lines[0]
+        _fake_click(fig, ax,
+                    [line.get_xdata()[0], line.get_ydata()[0]], 'data')
+        _fake_click(fig, ax,
+                    [ax.get_xlim()[0], ax.get_ylim()[1]], 'data')
+        # plot with bad channels excluded
+        evoked.plot(exclude='bads')
+        evoked.plot(exclude=evoked.info['bads'])  # does the same thing
+
+        # test selective updating of dict keys is working.
+        evoked.plot(hline=[1], units=dict(mag='femto foo'))
+        evoked_delayed_ssp = _get_epochs_delayed_ssp().average()
+        evoked_delayed_ssp.plot(proj='interactive')
+        evoked_delayed_ssp.apply_proj()
+        assert_raises(RuntimeError, evoked_delayed_ssp.plot,
+                      proj='interactive')
+        evoked_delayed_ssp.info['projs'] = []
+        assert_raises(RuntimeError, evoked_delayed_ssp.plot,
+                      proj='interactive')
+        assert_raises(RuntimeError, evoked_delayed_ssp.plot,
+                      proj='interactive', axes='foo')
+        plt.close('all')
+
+        # test GFP plot overlay
+        evoked.plot(gfp=True)
+        evoked.plot(gfp='only')
+        assert_raises(ValueError, evoked.plot, gfp='foo')
+
+        evoked.plot_image(proj=True)
+        # plot with bad channels excluded
+        evoked.plot_image(exclude='bads')
+        evoked.plot_image(exclude=evoked.info['bads'])  # does the same thing
+        plt.close('all')
+
+        evoked.plot_topo()  # should auto-find layout
+        _butterfly_onselect(0, 200, ['mag'], evoked)  # test averaged topomap
+        plt.close('all')
+
+        cov = read_cov(cov_fname)
+        cov['method'] = 'empirical'
+        evoked.plot_white(cov)
+        evoked.plot_white([cov, cov])
+
+        # Hack to test plotting of maxfiltered data
+        evoked_sss = evoked.copy()
+        evoked_sss.info['proc_history'] = [dict(max_info=None)]
+        evoked_sss.plot_white(cov)
+        evoked_sss.plot_white(cov_fname)
+        plt.close('all')
+
+run_tests_if_main()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/viz/tests/test_ica.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/viz/tests/test_ica.py
new file mode 100644
index 0000000..ae0ce93
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/viz/tests/test_ica.py
@@ -0,0 +1,200 @@
+# Authors: Denis Engemann <denis.engemann at gmail.com>
+#          Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#
+# License: Simplified BSD
+
+import os.path as op
+import warnings
+
+from numpy.testing import assert_raises
+
+from mne import io, read_events, Epochs, read_cov
+from mne import pick_types
+from mne.utils import run_tests_if_main, requires_sklearn
+from mne.viz.utils import _fake_click
+from mne.preprocessing import ICA, create_ecg_epochs, create_eog_epochs
+
+# Set our plotters to test mode
+import matplotlib
+matplotlib.use('Agg')  # for testing don't use X server
+
+warnings.simplefilter('always')  # enable b/c these tests throw warnings
+
+base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
+evoked_fname = op.join(base_dir, 'test-ave.fif')
+raw_fname = op.join(base_dir, 'test_raw.fif')
+cov_fname = op.join(base_dir, 'test-cov.fif')
+event_name = op.join(base_dir, 'test-eve.fif')
+event_id, tmin, tmax = 1, -0.1, 0.2
+
+
+def _get_raw(preload=False):
+    return io.Raw(raw_fname, preload=preload)
+
+
+def _get_events():
+    return read_events(event_name)
+
+
+def _get_picks(raw):
+    return [0, 1, 2, 6, 7, 8, 12, 13, 14]  # take a only few channels
+
+
+def _get_epochs():
+    raw = _get_raw()
+    events = _get_events()
+    picks = _get_picks(raw)
+    epochs = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks,
+                    baseline=(None, 0))
+    return epochs
+
+
+ at requires_sklearn
+def test_plot_ica_components():
+    """Test plotting of ICA solutions
+    """
+    import matplotlib.pyplot as plt
+    raw = _get_raw()
+    ica = ICA(noise_cov=read_cov(cov_fname), n_components=2,
+              max_pca_components=3, n_pca_components=3)
+    ica_picks = _get_picks(raw)
+    ica.fit(raw, picks=ica_picks)
+    warnings.simplefilter('always', UserWarning)
+    with warnings.catch_warnings(record=True):
+        for components in [0, [0], [0, 1], [0, 1] * 2, None]:
+            ica.plot_components(components, image_interp='bilinear', res=16)
+    ica.info = None
+    assert_raises(ValueError, ica.plot_components, 1)
+    assert_raises(RuntimeError, ica.plot_components, 1, ch_type='mag')
+    plt.close('all')
+
+
+ at requires_sklearn
+def test_plot_ica_sources():
+    """Test plotting of ICA panel
+    """
+    import matplotlib.pyplot as plt
+    raw = io.Raw(raw_fname, preload=False)
+    raw.crop(0, 1, copy=False)
+    raw.load_data()
+    picks = _get_picks(raw)
+    epochs = _get_epochs()
+    raw.pick_channels([raw.ch_names[k] for k in picks])
+    ica_picks = pick_types(raw.info, meg=True, eeg=False, stim=False,
+                           ecg=False, eog=False, exclude='bads')
+    ica = ICA(n_components=2, max_pca_components=3, n_pca_components=3)
+    ica.fit(raw, picks=ica_picks)
+    raw.info['bads'] = ['MEG 0113']
+    assert_raises(RuntimeError, ica.plot_sources, inst=raw)
+    ica.plot_sources(epochs)
+    epochs.info['bads'] = ['MEG 0113']
+    assert_raises(RuntimeError, ica.plot_sources, inst=epochs)
+    epochs.info['bads'] = []
+    with warnings.catch_warnings(record=True):  # no labeled objects mpl
+        ica.plot_sources(epochs.average())
+        evoked = epochs.average()
+        fig = ica.plot_sources(evoked)
+        # Test a click
+        ax = fig.get_axes()[0]
+        line = ax.lines[0]
+        _fake_click(fig, ax,
+                    [line.get_xdata()[0], line.get_ydata()[0]], 'data')
+        _fake_click(fig, ax,
+                    [ax.get_xlim()[0], ax.get_ylim()[1]], 'data')
+        # plot with bad channels excluded
+        ica.plot_sources(evoked, exclude=[0])
+        ica.exclude = [0]
+        ica.plot_sources(evoked)  # does the same thing
+    assert_raises(ValueError, ica.plot_sources, 'meeow')
+    plt.close('all')
+
+
+ at requires_sklearn
+def test_plot_ica_overlay():
+    """Test plotting of ICA cleaning
+    """
+    import matplotlib.pyplot as plt
+    raw = _get_raw(preload=True)
+    picks = _get_picks(raw)
+    ica = ICA(noise_cov=read_cov(cov_fname), n_components=2,
+              max_pca_components=3, n_pca_components=3)
+    ica.fit(raw, picks=picks)
+    # don't test raw, needs preload ...
+    ecg_epochs = create_ecg_epochs(raw, picks=picks)
+    ica.plot_overlay(ecg_epochs.average())
+    eog_epochs = create_eog_epochs(raw, picks=picks)
+    ica.plot_overlay(eog_epochs.average())
+    assert_raises(ValueError, ica.plot_overlay, raw[:2, :3][0])
+    ica.plot_overlay(raw)
+    plt.close('all')
+
+
+ at requires_sklearn
+def test_plot_ica_scores():
+    """Test plotting of ICA scores
+    """
+    import matplotlib.pyplot as plt
+    raw = _get_raw()
+    picks = _get_picks(raw)
+    ica = ICA(noise_cov=read_cov(cov_fname), n_components=2,
+              max_pca_components=3, n_pca_components=3)
+    ica.fit(raw, picks=picks)
+    ica.plot_scores([0.3, 0.2], axhline=[0.1, -0.1])
+    assert_raises(ValueError, ica.plot_scores, [0.2])
+    plt.close('all')
+
+
+ at requires_sklearn
+def test_plot_instance_components():
+    """Test plotting of components as instances of raw and epochs."""
+    import matplotlib.pyplot as plt
+    raw = _get_raw()
+    picks = _get_picks(raw)
+    ica = ICA(noise_cov=read_cov(cov_fname), n_components=2,
+              max_pca_components=3, n_pca_components=3)
+    ica.fit(raw, picks=picks)
+    fig = ica.plot_sources(raw, exclude=[0], title='Components')
+    fig.canvas.key_press_event('down')
+    fig.canvas.key_press_event('up')
+    fig.canvas.key_press_event('right')
+    fig.canvas.key_press_event('left')
+    fig.canvas.key_press_event('o')
+    fig.canvas.key_press_event('-')
+    fig.canvas.key_press_event('+')
+    fig.canvas.key_press_event('=')
+    fig.canvas.key_press_event('pageup')
+    fig.canvas.key_press_event('pagedown')
+    fig.canvas.key_press_event('home')
+    fig.canvas.key_press_event('end')
+    fig.canvas.key_press_event('f11')
+    ax = fig.get_axes()[0]
+    line = ax.lines[0]
+    _fake_click(fig, ax, [line.get_xdata()[0], line.get_ydata()[0]], 'data')
+    _fake_click(fig, ax, [-0.1, 0.9])  # click on y-label
+    fig.canvas.key_press_event('escape')
+    plt.close('all')
+    epochs = _get_epochs()
+    fig = ica.plot_sources(epochs, exclude=[0], title='Components')
+    fig.canvas.key_press_event('down')
+    fig.canvas.key_press_event('up')
+    fig.canvas.key_press_event('right')
+    fig.canvas.key_press_event('left')
+    fig.canvas.key_press_event('o')
+    fig.canvas.key_press_event('-')
+    fig.canvas.key_press_event('+')
+    fig.canvas.key_press_event('=')
+    fig.canvas.key_press_event('pageup')
+    fig.canvas.key_press_event('pagedown')
+    fig.canvas.key_press_event('home')
+    fig.canvas.key_press_event('end')
+    fig.canvas.key_press_event('f11')
+    # Test a click
+    ax = fig.get_axes()[0]
+    line = ax.lines[0]
+    _fake_click(fig, ax, [line.get_xdata()[0], line.get_ydata()[0]], 'data')
+    _fake_click(fig, ax, [-0.1, 0.9])  # click on y-label
+    fig.canvas.key_press_event('escape')
+    plt.close('all')
+
+
+run_tests_if_main()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/viz/tests/test_misc.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/viz/tests/test_misc.py
new file mode 100644
index 0000000..fd38840
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/viz/tests/test_misc.py
@@ -0,0 +1,135 @@
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Denis Engemann <denis.engemann at gmail.com>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#          Eric Larson <larson.eric.d at gmail.com>
+#          Cathy Nangini <cnangini at gmail.com>
+#          Mainak Jas <mainak at neuro.hut.fi>
+#
+# License: Simplified BSD
+
+import os.path as op
+import warnings
+
+import numpy as np
+from numpy.testing import assert_raises
+
+from mne import (io, read_events, read_cov, read_source_spaces, read_evokeds,
+                 read_dipole, SourceEstimate)
+from mne.datasets import testing
+from mne.minimum_norm import read_inverse_operator
+from mne.viz import (plot_bem, plot_events, plot_source_spectrogram,
+                     plot_snr_estimate)
+from mne.utils import requires_nibabel, run_tests_if_main, slow_test
+
+# Set our plotters to test mode
+import matplotlib
+matplotlib.use('Agg')  # for testing don't use X server
+
+warnings.simplefilter('always')  # enable b/c these tests throw warnings
+
+data_path = testing.data_path(download=False)
+subjects_dir = op.join(data_path, 'subjects')
+inv_fname = op.join(data_path, 'MEG', 'sample',
+                    'sample_audvis_trunc-meg-eeg-oct-4-meg-inv.fif')
+evoked_fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis-ave.fif')
+dip_fname = op.join(data_path, 'MEG', 'sample',
+                    'sample_audvis_trunc_set1.dip')
+base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
+raw_fname = op.join(base_dir, 'test_raw.fif')
+cov_fname = op.join(base_dir, 'test-cov.fif')
+event_fname = op.join(base_dir, 'test-eve.fif')
+
+
+def _get_raw():
+    return io.Raw(raw_fname, preload=True)
+
+
+def _get_events():
+    return read_events(event_fname)
+
+
+def test_plot_cov():
+    """Test plotting of covariances
+    """
+    raw = _get_raw()
+    cov = read_cov(cov_fname)
+    fig1, fig2 = cov.plot(raw.info, proj=True, exclude=raw.ch_names[6:])
+
+
+ at testing.requires_testing_data
+ at requires_nibabel()
+def test_plot_bem():
+    """Test plotting of BEM contours
+    """
+    assert_raises(IOError, plot_bem, subject='bad-subject',
+                  subjects_dir=subjects_dir)
+    assert_raises(ValueError, plot_bem, subject='sample',
+                  subjects_dir=subjects_dir, orientation='bad-ori')
+    plot_bem(subject='sample', subjects_dir=subjects_dir,
+             orientation='sagittal', slices=[25, 50])
+
+
+def test_plot_events():
+    """Test plotting events
+    """
+    event_labels = {'aud_l': 1, 'aud_r': 2, 'vis_l': 3, 'vis_r': 4}
+    color = {1: 'green', 2: 'yellow', 3: 'red', 4: 'c'}
+    raw = _get_raw()
+    events = _get_events()
+    plot_events(events, raw.info['sfreq'], raw.first_samp)
+    plot_events(events, raw.info['sfreq'], raw.first_samp, equal_spacing=False)
+    # Test plotting events without sfreq
+    plot_events(events, first_samp=raw.first_samp)
+    warnings.simplefilter('always', UserWarning)
+    with warnings.catch_warnings(record=True):
+        plot_events(events, raw.info['sfreq'], raw.first_samp,
+                    event_id=event_labels)
+        plot_events(events, raw.info['sfreq'], raw.first_samp,
+                    color=color)
+        plot_events(events, raw.info['sfreq'], raw.first_samp,
+                    event_id=event_labels, color=color)
+        assert_raises(ValueError, plot_events, events, raw.info['sfreq'],
+                      raw.first_samp, event_id={'aud_l': 1}, color=color)
+        assert_raises(ValueError, plot_events, events, raw.info['sfreq'],
+                      raw.first_samp, event_id={'aud_l': 111}, color=color)
+
+
+ at testing.requires_testing_data
+def test_plot_source_spectrogram():
+    """Test plotting of source spectrogram
+    """
+    sample_src = read_source_spaces(op.join(subjects_dir, 'sample',
+                                            'bem', 'sample-oct-6-src.fif'))
+
+    # dense version
+    vertices = [s['vertno'] for s in sample_src]
+    n_times = 5
+    n_verts = sum(len(v) for v in vertices)
+    stc_data = np.ones((n_verts, n_times))
+    stc = SourceEstimate(stc_data, vertices, 1, 1)
+    plot_source_spectrogram([stc, stc], [[1, 2], [3, 4]])
+    assert_raises(ValueError, plot_source_spectrogram, [], [])
+    assert_raises(ValueError, plot_source_spectrogram, [stc, stc],
+                  [[1, 2], [3, 4]], tmin=0)
+    assert_raises(ValueError, plot_source_spectrogram, [stc, stc],
+                  [[1, 2], [3, 4]], tmax=7)
+
+
+ at slow_test
+ at testing.requires_testing_data
+def test_plot_snr():
+    """Test plotting SNR estimate
+    """
+    inv = read_inverse_operator(inv_fname)
+    evoked = read_evokeds(evoked_fname, baseline=(None, 0))[0]
+    plot_snr_estimate(evoked, inv)
+
+
+ at testing.requires_testing_data
+def test_plot_dipole_amplitudes():
+    """Test plotting dipole amplitudes
+    """
+    dipoles = read_dipole(dip_fname)
+    dipoles.plot_amplitudes(show=False)
+
+run_tests_if_main()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/viz/tests/test_montage.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/viz/tests/test_montage.py
new file mode 100644
index 0000000..6ea5b44
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/viz/tests/test_montage.py
@@ -0,0 +1,30 @@
+# Authors: Denis Engemann <denis.engemann at gmail.com>
+#          Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Teon Brooks <teon.brooks at gmail.com>
+#
+# License: Simplified BSD
+
+# Set our plotters to test mode
+import matplotlib
+import os.path as op
+matplotlib.use('Agg')  # for testing don't use X server
+
+from mne.channels import read_montage, read_dig_montage  # noqa
+
+
+p_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'kit', 'tests', 'data')
+elp = op.join(p_dir, 'test_elp.txt')
+hsp = op.join(p_dir, 'test_hsp.txt')
+hpi = op.join(p_dir, 'test_mrk.sqd')
+point_names = ['nasion', 'lpa', 'rpa', '1', '2', '3', '4', '5']
+
+
+def test_plot_montage():
+    """Test plotting montages
+    """
+    m = read_montage('easycap-M1')
+    m.plot()
+    m.plot(show_names=True)
+    d = read_dig_montage(hsp, hpi, elp, point_names)
+    d.plot()
+    d.plot(show_names=True)
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/viz/tests/test_raw.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/viz/tests/test_raw.py
new file mode 100644
index 0000000..311215c
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/viz/tests/test_raw.py
@@ -0,0 +1,125 @@
+# Authors: Eric Larson <larson.eric.d at gmail.com>
+#
+# License: Simplified BSD
+
+import os.path as op
+import warnings
+
+from numpy.testing import assert_raises
+
+from mne import io, read_events, pick_types
+from mne.utils import requires_version, run_tests_if_main
+from mne.viz.utils import _fake_click
+
+# Set our plotters to test mode
+import matplotlib
+matplotlib.use('Agg')  # for testing don't use X server
+
+warnings.simplefilter('always')  # enable b/c these tests throw warnings
+
+base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
+raw_fname = op.join(base_dir, 'test_raw.fif')
+event_name = op.join(base_dir, 'test-eve.fif')
+
+
+def _get_raw():
+    raw = io.Raw(raw_fname, preload=True)
+    raw.pick_channels(raw.ch_names[:9])
+    return raw
+
+
+def _get_events():
+    return read_events(event_name)
+
+
+def test_plot_raw():
+    """Test plotting of raw data
+    """
+    import matplotlib.pyplot as plt
+    raw = _get_raw()
+    events = _get_events()
+    plt.close('all')  # ensure all are closed
+    with warnings.catch_warnings(record=True):
+        fig = raw.plot(events=events, show_options=True)
+        # test mouse clicks
+        x = fig.get_axes()[0].lines[1].get_xdata().mean()
+        y = fig.get_axes()[0].lines[1].get_ydata().mean()
+        data_ax = fig.get_axes()[0]
+        _fake_click(fig, data_ax, [x, y], xform='data')  # mark a bad channel
+        _fake_click(fig, data_ax, [x, y], xform='data')  # unmark a bad channel
+        _fake_click(fig, data_ax, [0.5, 0.999])  # click elsewhere in 1st axes
+        _fake_click(fig, data_ax, [-0.1, 0.9])  # click on y-label
+        _fake_click(fig, fig.get_axes()[1], [0.5, 0.5])  # change time
+        _fake_click(fig, fig.get_axes()[2], [0.5, 0.5])  # change channels
+        _fake_click(fig, fig.get_axes()[3], [0.5, 0.5])  # open SSP window
+        fig.canvas.button_press_event(1, 1, 1)  # outside any axes
+        fig.canvas.scroll_event(0.5, 0.5, -0.5)  # scroll down
+        fig.canvas.scroll_event(0.5, 0.5, 0.5)  # scroll up
+        # sadly these fail when no renderer is used (i.e., when using Agg):
+        # ssp_fig = set(plt.get_fignums()) - set([fig.number])
+        # assert_equal(len(ssp_fig), 1)
+        # ssp_fig = plt.figure(list(ssp_fig)[0])
+        # ax = ssp_fig.get_axes()[0]  # only one axis is used
+        # t = [c for c in ax.get_children() if isinstance(c,
+        #      matplotlib.text.Text)]
+        # pos = np.array(t[0].get_position()) + 0.01
+        # _fake_click(ssp_fig, ssp_fig.get_axes()[0], pos, xform='data')  # off
+        # _fake_click(ssp_fig, ssp_fig.get_axes()[0], pos, xform='data')  # on
+        #  test keypresses
+        fig.canvas.key_press_event('escape')
+        fig.canvas.key_press_event('down')
+        fig.canvas.key_press_event('up')
+        fig.canvas.key_press_event('right')
+        fig.canvas.key_press_event('left')
+        fig.canvas.key_press_event('o')
+        fig.canvas.key_press_event('-')
+        fig.canvas.key_press_event('+')
+        fig.canvas.key_press_event('=')
+        fig.canvas.key_press_event('pageup')
+        fig.canvas.key_press_event('pagedown')
+        fig.canvas.key_press_event('home')
+        fig.canvas.key_press_event('end')
+        fig.canvas.key_press_event('?')
+        fig.canvas.key_press_event('f11')
+        fig.canvas.key_press_event('escape')
+        # Color setting
+        assert_raises(KeyError, raw.plot, event_color={0: 'r'})
+        assert_raises(TypeError, raw.plot, event_color={'foo': 'r'})
+        fig = raw.plot(events=events, event_color={-1: 'r', 998: 'b'})
+        plt.close('all')
+
+
+ at requires_version('scipy', '0.10')
+def test_plot_raw_filtered():
+    """Test filtering of raw plots
+    """
+    raw = _get_raw()
+    assert_raises(ValueError, raw.plot, lowpass=raw.info['sfreq'] / 2.)
+    assert_raises(ValueError, raw.plot, highpass=0)
+    assert_raises(ValueError, raw.plot, lowpass=1, highpass=1)
+    assert_raises(ValueError, raw.plot, lowpass=1, filtorder=0)
+    assert_raises(ValueError, raw.plot, clipping='foo')
+    raw.plot(lowpass=1, clipping='transparent')
+    raw.plot(highpass=1, clipping='clamp')
+    raw.plot(highpass=1, lowpass=2)
+
+
+ at requires_version('scipy', '0.12')
+def test_plot_raw_psd():
+    """Test plotting of raw psds
+    """
+    import matplotlib.pyplot as plt
+    raw = _get_raw()
+    # normal mode
+    raw.plot_psd(tmax=2.0)
+    # specific mode
+    picks = pick_types(raw.info, meg='mag', eeg=False)[:4]
+    raw.plot_psd(picks=picks, area_mode='range')
+    ax = plt.axes()
+    # if ax is supplied, picks must be, too:
+    assert_raises(ValueError, raw.plot_psd, ax=ax)
+    raw.plot_psd(picks=picks, ax=ax)
+    plt.close('all')
+
+
+run_tests_if_main()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/viz/tests/test_topo.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/viz/tests/test_topo.py
new file mode 100644
index 0000000..127c4af
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/viz/tests/test_topo.py
@@ -0,0 +1,137 @@
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Denis Engemann <denis.engemann at gmail.com>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#          Eric Larson <larson.eric.d at gmail.com>
+#
+# License: Simplified BSD
+
+import os.path as op
+import warnings
+from collections import namedtuple
+
+import numpy as np
+from numpy.testing import assert_raises
+
+
+from mne import io, read_events, Epochs
+from mne import pick_channels_evoked
+from mne.channels import read_layout
+from mne.time_frequency.tfr import AverageTFR
+from mne.utils import run_tests_if_main
+
+from mne.viz import (plot_topo, plot_topo_image_epochs, _get_presser,
+                     mne_analyze_colormap, plot_evoked_topo)
+from mne.viz.topo import _plot_update_evoked_topo
+
+# Set our plotters to test mode
+import matplotlib
+matplotlib.use('Agg')  # for testing don't use X server
+
+warnings.simplefilter('always')  # enable b/c these tests throw warnings
+
+
+base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
+evoked_fname = op.join(base_dir, 'test-ave.fif')
+raw_fname = op.join(base_dir, 'test_raw.fif')
+event_name = op.join(base_dir, 'test-eve.fif')
+event_id, tmin, tmax = 1, -0.2, 0.2
+layout = read_layout('Vectorview-all')
+
+
+def _get_raw():
+    return io.Raw(raw_fname, preload=False)
+
+
+def _get_events():
+    return read_events(event_name)
+
+
+def _get_picks(raw):
+    return [0, 1, 2, 6, 7, 8, 340, 341, 342]  # take a only few channels
+
+
+def _get_epochs():
+    raw = _get_raw()
+    events = _get_events()
+    picks = _get_picks(raw)
+    epochs = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks,
+                    baseline=(None, 0))
+    return epochs
+
+
+def _get_epochs_delayed_ssp():
+    raw = _get_raw()
+    events = _get_events()
+    picks = _get_picks(raw)
+    reject = dict(mag=4e-12)
+    epochs_delayed_ssp = Epochs(raw, events[:10], event_id, tmin, tmax,
+                                picks=picks, baseline=(None, 0),
+                                proj='delayed', reject=reject)
+    return epochs_delayed_ssp
+
+
+def test_plot_topo():
+    """Test plotting of ERP topography
+    """
+    import matplotlib.pyplot as plt
+    # Show topography
+    evoked = _get_epochs().average()
+    plot_evoked_topo(evoked)  # should auto-find layout
+    warnings.simplefilter('always', UserWarning)
+    picked_evoked = evoked.pick_channels(evoked.ch_names[:3], copy=True)
+    picked_evoked_eeg = evoked.pick_types(meg=False, eeg=True, copy=True)
+    picked_evoked_eeg.pick_channels(picked_evoked_eeg.ch_names[:3])
+
+    # test scaling
+    with warnings.catch_warnings(record=True):
+        for ylim in [dict(mag=[-600, 600]), None]:
+            plot_topo([picked_evoked] * 2, layout, ylim=ylim)
+
+        for evo in [evoked, [evoked, picked_evoked]]:
+            assert_raises(ValueError, plot_topo, evo, layout, color=['y', 'b'])
+
+        evoked_delayed_ssp = _get_epochs_delayed_ssp().average()
+        ch_names = evoked_delayed_ssp.ch_names[:3]  # make it faster
+        picked_evoked_delayed_ssp = pick_channels_evoked(evoked_delayed_ssp,
+                                                         ch_names)
+        fig = plot_topo(picked_evoked_delayed_ssp, layout, proj='interactive')
+        func = _get_presser(fig)
+        event = namedtuple('Event', 'inaxes')
+        func(event(inaxes=fig.axes[0]))
+        params = dict(evokeds=[picked_evoked_delayed_ssp],
+                      times=picked_evoked_delayed_ssp.times,
+                      fig=fig, projs=picked_evoked_delayed_ssp.info['projs'])
+        bools = [True] * len(params['projs'])
+        _plot_update_evoked_topo(params, bools)
+    # should auto-generate layout
+    plot_evoked_topo(picked_evoked_eeg.copy(),
+                     fig_background=np.zeros((4, 3, 3)), proj=True)
+    plt.close('all')
+
+
+def test_plot_topo_image_epochs():
+    """Test plotting of epochs image topography
+    """
+    import matplotlib.pyplot as plt
+    title = 'ERF images - MNE sample data'
+    epochs = _get_epochs()
+    cmap = mne_analyze_colormap(format='matplotlib')
+    plot_topo_image_epochs(epochs, sigma=0.5, vmin=-200, vmax=200,
+                           colorbar=True, title=title, cmap=cmap)
+    plt.close('all')
+
+
+def test_plot_tfr_topo():
+    """Test plotting of TFR data
+    """
+    epochs = _get_epochs()
+    n_freqs = 3
+    nave = 1
+    data = np.random.RandomState(0).randn(len(epochs.ch_names),
+                                          n_freqs, len(epochs.times))
+    tfr = AverageTFR(epochs.info, data, epochs.times, np.arange(n_freqs), nave)
+    tfr.plot_topo(baseline=(None, 0), mode='ratio', title='Average power',
+                  vmin=0., vmax=14., show=False)
+    tfr.plot([4], baseline=(None, 0), mode='ratio', show=False, title='foo')
+
+run_tests_if_main()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/viz/tests/test_topomap.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/viz/tests/test_topomap.py
new file mode 100644
index 0000000..3504bf4
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/viz/tests/test_topomap.py
@@ -0,0 +1,258 @@
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Denis Engemann <denis.engemann at gmail.com>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#          Eric Larson <larson.eric.d at gmail.com>
+#
+# License: Simplified BSD
+
+import os.path as op
+import warnings
+
+import numpy as np
+from numpy.testing import assert_raises, assert_array_equal
+
+from nose.tools import assert_true, assert_equal
+
+
+from mne import io, read_evokeds, read_proj
+from mne.io.constants import FIFF
+from mne.channels import read_layout, make_eeg_layout
+from mne.datasets import testing
+from mne.time_frequency.tfr import AverageTFR
+from mne.utils import slow_test
+
+from mne.viz import plot_evoked_topomap, plot_projs_topomap
+from mne.viz.topomap import (_check_outlines, _onselect, plot_topomap,
+                             _find_peaks)
+
+# Set our plotters to test mode
+import matplotlib
+matplotlib.use('Agg')  # for testing don't use X server
+
+warnings.simplefilter('always')  # enable b/c these tests throw warnings
+
+
+data_dir = testing.data_path(download=False)
+subjects_dir = op.join(data_dir, 'subjects')
+ecg_fname = op.join(data_dir, 'MEG', 'sample', 'sample_audvis_ecg_proj.fif')
+
+base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
+evoked_fname = op.join(base_dir, 'test-ave.fif')
+fname = op.join(base_dir, 'test-ave.fif')
+raw_fname = op.join(base_dir, 'test_raw.fif')
+event_name = op.join(base_dir, 'test-eve.fif')
+layout = read_layout('Vectorview-all')
+
+
+def _get_raw():
+    return io.Raw(raw_fname, preload=False)
+
+
+ at slow_test
+ at testing.requires_testing_data
+def test_plot_topomap():
+    """Test topomap plotting
+    """
+    import matplotlib.pyplot as plt
+    from matplotlib.patches import Circle
+    # evoked
+    warnings.simplefilter('always')
+    res = 16
+    evoked = read_evokeds(evoked_fname, 'Left Auditory',
+                          baseline=(None, 0))
+    ev_bad = evoked.pick_types(meg=False, eeg=True, copy=True)
+    ev_bad.pick_channels(ev_bad.ch_names[:2])
+    ev_bad.plot_topomap(times=ev_bad.times[:2] - 1e-6)  # auto, should plot EEG
+    assert_raises(ValueError, ev_bad.plot_topomap, ch_type='mag')
+    assert_raises(TypeError, ev_bad.plot_topomap, head_pos='foo')
+    assert_raises(KeyError, ev_bad.plot_topomap, head_pos=dict(foo='bar'))
+    assert_raises(ValueError, ev_bad.plot_topomap, head_pos=dict(center=0))
+    assert_raises(ValueError, ev_bad.plot_topomap, times=[-100])  # bad time
+    assert_raises(ValueError, ev_bad.plot_topomap, times=[[0]])  # bad time
+
+    evoked.plot_topomap(0.1, layout=layout, scale=dict(mag=0.1))
+    plt.close('all')
+    axes = [plt.subplot(221), plt.subplot(222)]
+    evoked.plot_topomap(axes=axes, colorbar=False)
+    plt.close('all')
+    evoked.plot_topomap(times=[-0.1, 0.2])
+    plt.close('all')
+    mask = np.zeros_like(evoked.data, dtype=bool)
+    mask[[1, 5], :] = True
+    evoked.plot_topomap(ch_type='mag', outlines=None)
+    times = [0.1]
+    evoked.plot_topomap(times, ch_type='eeg', res=res, scale=1)
+    evoked.plot_topomap(times, ch_type='grad', mask=mask, res=res)
+    evoked.plot_topomap(times, ch_type='planar1', res=res)
+    evoked.plot_topomap(times, ch_type='planar2', res=res)
+    evoked.plot_topomap(times, ch_type='grad', mask=mask, res=res,
+                        show_names=True, mask_params={'marker': 'x'})
+    plt.close('all')
+    assert_raises(ValueError, evoked.plot_topomap, times, ch_type='eeg',
+                  res=res, average=-1000)
+    assert_raises(ValueError, evoked.plot_topomap, times, ch_type='eeg',
+                  res=res, average='hahahahah')
+
+    p = evoked.plot_topomap(times, ch_type='grad', res=res,
+                            show_names=lambda x: x.replace('MEG', ''),
+                            image_interp='bilinear')
+    subplot = [x for x in p.get_children() if
+               isinstance(x, matplotlib.axes.Subplot)][0]
+    assert_true(all('MEG' not in x.get_text()
+                    for x in subplot.get_children()
+                    if isinstance(x, matplotlib.text.Text)))
+
+    # Test title
+    def get_texts(p):
+        return [x.get_text() for x in p.get_children() if
+                isinstance(x, matplotlib.text.Text)]
+
+    p = evoked.plot_topomap(times, ch_type='eeg', res=res, average=0.01)
+    assert_equal(len(get_texts(p)), 0)
+    p = evoked.plot_topomap(times, ch_type='eeg', title='Custom', res=res)
+    texts = get_texts(p)
+    assert_equal(len(texts), 1)
+    assert_equal(texts[0], 'Custom')
+    plt.close('all')
+
+    # delaunay triangulation warning
+    with warnings.catch_warnings(record=True):  # can't show
+        warnings.simplefilter('always')
+        evoked.plot_topomap(times, ch_type='mag', layout=None, res=res)
+    assert_raises(RuntimeError, plot_evoked_topomap, evoked, 0.1, 'mag',
+                  proj='interactive')  # projs have already been applied
+
+    # change to no-proj mode
+    evoked = read_evokeds(evoked_fname, 'Left Auditory',
+                          baseline=(None, 0), proj=False)
+    with warnings.catch_warnings(record=True):
+        warnings.simplefilter('always')
+        evoked.plot_topomap(0.1, 'mag', proj='interactive', res=res)
+    assert_raises(RuntimeError, plot_evoked_topomap, evoked,
+                  np.repeat(.1, 50))
+    assert_raises(ValueError, plot_evoked_topomap, evoked, [-3e12, 15e6])
+
+    with warnings.catch_warnings(record=True):  # file conventions
+        warnings.simplefilter('always')
+        projs = read_proj(ecg_fname)
+    projs = [pp for pp in projs if pp['desc'].lower().find('eeg') < 0]
+    plot_projs_topomap(projs, res=res)
+    plt.close('all')
+    ax = plt.subplot(111)
+    plot_projs_topomap([projs[0]], res=res, axes=ax)  # test axes param
+    plt.close('all')
+    for ch in evoked.info['chs']:
+        if ch['coil_type'] == FIFF.FIFFV_COIL_EEG:
+            ch['loc'].fill(0)
+
+    # Remove extra digitization point, so EEG digitization points
+    # correspond with the EEG electrodes
+    del evoked.info['dig'][85]
+
+    pos = make_eeg_layout(evoked.info).pos[:, :2]
+    pos, outlines = _check_outlines(pos, 'head')
+    assert_true('head' in outlines.keys())
+    assert_true('nose' in outlines.keys())
+    assert_true('ear_left' in outlines.keys())
+    assert_true('ear_right' in outlines.keys())
+    assert_true('autoshrink' in outlines.keys())
+    assert_true(outlines['autoshrink'])
+    assert_true('clip_radius' in outlines.keys())
+    assert_array_equal(outlines['clip_radius'], 0.5)
+
+    pos, outlines = _check_outlines(pos, 'skirt')
+    assert_true('head' in outlines.keys())
+    assert_true('nose' in outlines.keys())
+    assert_true('ear_left' in outlines.keys())
+    assert_true('ear_right' in outlines.keys())
+    assert_true('autoshrink' in outlines.keys())
+    assert_true(not outlines['autoshrink'])
+    assert_true('clip_radius' in outlines.keys())
+    assert_array_equal(outlines['clip_radius'], 0.625)
+
+    pos, outlines = _check_outlines(pos, 'skirt',
+                                    head_pos={'scale': [1.2, 1.2]})
+    assert_array_equal(outlines['clip_radius'], 0.75)
+
+    # Plot skirt
+    evoked.plot_topomap(times, ch_type='eeg', outlines='skirt')
+
+    # Pass custom outlines without patch
+    evoked.plot_topomap(times, ch_type='eeg', outlines=outlines)
+    plt.close('all')
+
+    # Pass custom outlines with patch callable
+    def patch():
+        return Circle((0.5, 0.4687), radius=.46,
+                      clip_on=True, transform=plt.gca().transAxes)
+    outlines['patch'] = patch
+    plot_evoked_topomap(evoked, times, ch_type='eeg', outlines=outlines)
+
+    # Remove digitization points. Now topomap should fail
+    evoked.info['dig'] = None
+    assert_raises(RuntimeError, plot_evoked_topomap, evoked,
+                  times, ch_type='eeg')
+    plt.close('all')
+
+    # Test error messages for invalid pos parameter
+    n_channels = len(pos)
+    data = np.ones(n_channels)
+    pos_1d = np.zeros(n_channels)
+    pos_3d = np.zeros((n_channels, 2, 2))
+    assert_raises(ValueError, plot_topomap, data, pos_1d)
+    assert_raises(ValueError, plot_topomap, data, pos_3d)
+    assert_raises(ValueError, plot_topomap, data, pos[:3, :])
+
+    pos_x = pos[:, :1]
+    pos_xyz = np.c_[pos, np.zeros(n_channels)[:, np.newaxis]]
+    assert_raises(ValueError, plot_topomap, data, pos_x)
+    assert_raises(ValueError, plot_topomap, data, pos_xyz)
+
+    # An #channels x 4 matrix should work though. In this case (x, y, width,
+    # height) is assumed.
+    pos_xywh = np.c_[pos, np.zeros((n_channels, 2))]
+    plot_topomap(data, pos_xywh)
+    plt.close('all')
+
+    # Test peak finder
+    axes = [plt.subplot(131), plt.subplot(132)]
+    evoked.plot_topomap(times='peaks', axes=axes)
+    plt.close('all')
+    evoked.data = np.zeros(evoked.data.shape)
+    evoked.data[50][1] = 1
+    assert_array_equal(_find_peaks(evoked, 10), evoked.times[1])
+    evoked.data[80][100] = 1
+    assert_array_equal(_find_peaks(evoked, 10), evoked.times[[1, 100]])
+    evoked.data[2][95] = 2
+    assert_array_equal(_find_peaks(evoked, 10), evoked.times[[1, 95]])
+    assert_array_equal(_find_peaks(evoked, 1), evoked.times[95])
+
+
+def test_plot_tfr_topomap():
+    """Test plotting of TFR data
+    """
+    import matplotlib as mpl
+    import matplotlib.pyplot as plt
+    raw = _get_raw()
+    times = np.linspace(-0.1, 0.1, 200)
+    n_freqs = 3
+    nave = 1
+    rng = np.random.RandomState(42)
+    data = rng.randn(len(raw.ch_names), n_freqs, len(times))
+    tfr = AverageTFR(raw.info, data, times, np.arange(n_freqs), nave)
+    tfr.plot_topomap(ch_type='mag', tmin=0.05, tmax=0.150, fmin=0, fmax=10,
+                     res=16)
+
+    eclick = mpl.backend_bases.MouseEvent('button_press_event',
+                                          plt.gcf().canvas, 0, 0, 1)
+    eclick.xdata = 0.1
+    eclick.ydata = 0.1
+    eclick.inaxes = plt.gca()
+    erelease = mpl.backend_bases.MouseEvent('button_release_event',
+                                            plt.gcf().canvas, 0.9, 0.9, 1)
+    erelease.xdata = 0.3
+    erelease.ydata = 0.2
+    pos = [[0.11, 0.11], [0.25, 0.5], [0.0, 0.2], [0.2, 0.39]]
+    _onselect(eclick, erelease, tfr, pos, 'mag', 1, 3, 1, 3, 'RdBu_r', list())
+    tfr._onselect(eclick, erelease, None, 'mean', None)
+    plt.close('all')
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/viz/tests/test_utils.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/viz/tests/test_utils.py
new file mode 100644
index 0000000..7a337ac
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/viz/tests/test_utils.py
@@ -0,0 +1,87 @@
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#
+# License: Simplified BSD
+
+import os.path as op
+import warnings
+import numpy as np
+from nose.tools import assert_true, assert_raises
+from numpy.testing import assert_allclose
+
+from mne.viz.utils import compare_fiff, _fake_click
+from mne.viz import ClickableImage, add_background_image, mne_analyze_colormap
+from mne.utils import run_tests_if_main
+
+# Set our plotters to test mode
+import matplotlib
+matplotlib.use('Agg')  # for testing don't use X server
+
+warnings.simplefilter('always')  # enable b/c these tests throw warnings
+
+base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
+raw_fname = op.join(base_dir, 'test_raw.fif')
+cov_fname = op.join(base_dir, 'test-cov.fif')
+
+
+def test_mne_analyze_colormap():
+    """Test mne_analyze_colormap
+    """
+    assert_raises(ValueError, mne_analyze_colormap, [0])
+    assert_raises(ValueError, mne_analyze_colormap, [-1, 1, 2])
+    assert_raises(ValueError, mne_analyze_colormap, [0, 2, 1])
+
+
+def test_compare_fiff():
+    import matplotlib.pyplot as plt
+    compare_fiff(raw_fname, cov_fname, read_limit=0, show=False)
+    plt.close('all')
+
+
+def test_clickable_image():
+    """Test the ClickableImage class."""
+    # Gen data and create clickable image
+    import matplotlib.pyplot as plt
+    im = np.random.randn(100, 100)
+    clk = ClickableImage(im)
+    clicks = [(12, 8), (46, 48), (10, 24)]
+
+    # Generate clicks
+    for click in clicks:
+        _fake_click(clk.fig, clk.ax, click, xform='data')
+    assert_allclose(np.array(clicks), np.array(clk.coords))
+    assert_true(len(clicks) == len(clk.coords))
+
+    # Exporting to layout
+    lt = clk.to_layout()
+    assert_true(lt.pos.shape[0] == len(clicks))
+    assert_allclose(lt.pos[1, 0] / lt.pos[2, 0],
+                    clicks[1][0] / float(clicks[2][0]))
+    clk.plot_clicks()
+    plt.close('all')
+
+
+def test_add_background_image():
+    """Test adding background image to a figure."""
+    import matplotlib.pyplot as plt
+    f, axs = plt.subplots(1, 2)
+    x, y = np.random.randn(2, 10)
+    im = np.random.randn(10, 10)
+    axs[0].scatter(x, y)
+    axs[1].scatter(y, x)
+    for ax in axs:
+        ax.set_aspect(1)
+
+    # Background without changing aspect
+    ax_im = add_background_image(f, im)
+    assert_true(ax_im.get_aspect() == 'auto')
+    for ax in axs:
+        assert_true(ax.get_aspect() == 1)
+
+    # Background with changing aspect
+    ax_im_asp = add_background_image(f, im, set_ratios='auto')
+    assert_true(ax_im_asp.get_aspect() == 'auto')
+    for ax in axs:
+        assert_true(ax.get_aspect() == 'auto')
+
+
+run_tests_if_main()
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/viz/topo.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/viz/topo.py
new file mode 100644
index 0000000..e847b0c
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/viz/topo.py
@@ -0,0 +1,622 @@
+"""Functions to plot M/EEG data on topo (one axes per channel)
+"""
+from __future__ import print_function
+
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Denis Engemann <denis.engemann at gmail.com>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#          Eric Larson <larson.eric.d at gmail.com>
+#
+# License: Simplified BSD
+
+import warnings
+from itertools import cycle
+from functools import partial
+
+import numpy as np
+
+from ..io.pick import channel_type, pick_types
+from ..fixes import normalize_colors
+from ..utils import _clean_names, deprecated
+
+from ..defaults import _handle_default
+from .utils import (_check_delayed_ssp, COLORS, _draw_proj_checkbox,
+                    add_background_image)
+
+
+def iter_topography(info, layout=None, on_pick=None, fig=None,
+                    fig_facecolor='k', axis_facecolor='k',
+                    axis_spinecolor='k', layout_scale=None):
+    """ Create iterator over channel positions
+
+    This function returns a generator that unpacks into
+    a series of matplotlib axis objects and data / channel
+    indices, both corresponding to the sensor positions
+    of the related layout passed or inferred from the channel info.
+    `iter_topography`, hence, allows to conveniently realize custom
+    topography plots.
+
+    Parameters
+    ----------
+    info : instance of mne.io.meas_info.Info
+        The measurement info.
+    layout : instance of mne.layout.Layout | None
+        The layout to use. If None, layout will be guessed
+    on_pick : callable | None
+        The callback function to be invoked on clicking one
+        of the axes. Is supposed to instantiate the following
+        API: `function(axis, channel_index)`
+    fig : matplotlib.figure.Figure | None
+        The figure object to be considered. If None, a new
+        figure will be created.
+    fig_facecolor : str | obj
+        The figure face color. Defaults to black.
+    axis_facecolor : str | obj
+        The axis face color. Defaults to black.
+    axis_spinecolor : str | obj
+        The axis spine color. Defaults to black. In other words,
+        the color of the axis' edge lines.
+    layout_scale: float | None
+        Scaling factor for adjusting the relative size of the layout
+        on the canvas. If None, nothing will be scaled.
+
+    Returns
+    -------
+    A generator that can be unpacked into
+
+    ax : matplotlib.axis.Axis
+        The current axis of the topo plot.
+    ch_dx : int
+        The related channel index.
+    """
+    import matplotlib.pyplot as plt
+
+    if fig is None:
+        fig = plt.figure()
+
+    fig.set_facecolor(fig_facecolor)
+    if layout is None:
+        from ..channels import find_layout
+        layout = find_layout(info)
+
+    if on_pick is not None:
+        callback = partial(_plot_topo_onpick, show_func=on_pick)
+        fig.canvas.mpl_connect('button_press_event', callback)
+
+    pos = layout.pos.copy()
+    if layout_scale:
+        pos[:, :2] *= layout_scale
+
+    ch_names = _clean_names(info['ch_names'])
+    iter_ch = [(x, y) for x, y in enumerate(layout.names) if y in ch_names]
+    for idx, name in iter_ch:
+        ax = plt.axes(pos[idx])
+        ax.patch.set_facecolor(axis_facecolor)
+        plt.setp(list(ax.spines.values()), color=axis_spinecolor)
+        ax.set_xticklabels([])
+        ax.set_yticklabels([])
+        plt.setp(ax.get_xticklines(), visible=False)
+        plt.setp(ax.get_yticklines(), visible=False)
+        ch_idx = ch_names.index(name)
+        vars(ax)['_mne_ch_name'] = name
+        vars(ax)['_mne_ch_idx'] = ch_idx
+        vars(ax)['_mne_ax_face_color'] = axis_facecolor
+        yield ax, ch_idx
+
+
+def _plot_topo(info=None, times=None, show_func=None, layout=None,
+               decim=None, vmin=None, vmax=None, ylim=None, colorbar=None,
+               border='none', axis_facecolor='k', fig_facecolor='k',
+               cmap='RdBu_r', layout_scale=None, title=None, x_label=None,
+               y_label=None, vline=None, font_color='w'):
+    """Helper function to plot on sensor layout"""
+    import matplotlib.pyplot as plt
+
+    # prepare callbacks
+    tmin, tmax = times[[0, -1]]
+    on_pick = partial(show_func, tmin=tmin, tmax=tmax, vmin=vmin,
+                      vmax=vmax, ylim=ylim, x_label=x_label,
+                      y_label=y_label, colorbar=colorbar)
+
+    fig = plt.figure()
+    if colorbar:
+        norm = normalize_colors(vmin=vmin, vmax=vmax)
+        sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm)
+        sm.set_array(np.linspace(vmin, vmax))
+        ax = plt.axes([0.015, 0.025, 1.05, .8], axisbg=fig_facecolor)
+        cb = fig.colorbar(sm, ax=ax)
+        cb_yticks = plt.getp(cb.ax.axes, 'yticklabels')
+        plt.setp(cb_yticks, color=font_color)
+        ax.axis('off')
+
+    my_topo_plot = iter_topography(info, layout=layout, on_pick=on_pick,
+                                   fig=fig, layout_scale=layout_scale,
+                                   axis_spinecolor=border,
+                                   axis_facecolor=axis_facecolor,
+                                   fig_facecolor=fig_facecolor)
+
+    for ax, ch_idx in my_topo_plot:
+        if layout.kind == 'Vectorview-all' and ylim is not None:
+            this_type = {'mag': 0, 'grad': 1}[channel_type(info, ch_idx)]
+            ylim_ = [v[this_type] if _check_vlim(v) else v for v in ylim]
+        else:
+            ylim_ = ylim
+
+        show_func(ax, ch_idx, tmin=tmin, tmax=tmax, vmin=vmin,
+                  vmax=vmax, ylim=ylim_)
+
+        if ylim_ and not any(v is None for v in ylim_):
+            plt.ylim(*ylim_)
+
+    if title is not None:
+        plt.figtext(0.03, 0.9, title, color=font_color, fontsize=19)
+
+    return fig
+
+
+def _plot_topo_onpick(event, show_func=None, colorbar=False):
+    """Onpick callback that shows a single channel in a new figure"""
+
+    # make sure that the swipe gesture in OS-X doesn't open many figures
+    orig_ax = event.inaxes
+    if event.inaxes is None:
+        return
+
+    import matplotlib.pyplot as plt
+    try:
+        ch_idx = orig_ax._mne_ch_idx
+        face_color = orig_ax._mne_ax_face_color
+        fig, ax = plt.subplots(1)
+
+        plt.title(orig_ax._mne_ch_name)
+        ax.set_axis_bgcolor(face_color)
+
+        # allow custom function to override parameters
+        show_func(plt, ch_idx)
+
+    except Exception as err:
+        # matplotlib silently ignores exceptions in event handlers,
+        # so we print
+        # it here to know what went wrong
+        print(err)
+        raise err
+
+
+def _imshow_tfr(ax, ch_idx, tmin, tmax, vmin, vmax, onselect, ylim=None,
+                tfr=None, freq=None, vline=None, x_label=None, y_label=None,
+                colorbar=False, picker=True, cmap='RdBu_r', title=None):
+    """ Aux function to show time-freq map on topo """
+    import matplotlib.pyplot as plt
+    from matplotlib.widgets import RectangleSelector
+    extent = (tmin, tmax, freq[0], freq[-1])
+    img = ax.imshow(tfr[ch_idx], extent=extent, aspect="auto", origin="lower",
+                    vmin=vmin, vmax=vmax, picker=picker, cmap=cmap)
+    if isinstance(ax, plt.Axes):
+        if x_label is not None:
+            ax.set_xlabel(x_label)
+        if y_label is not None:
+            ax.set_ylabel(y_label)
+    else:
+        if x_label is not None:
+            plt.xlabel(x_label)
+        if y_label is not None:
+            plt.ylabel(y_label)
+    if colorbar:
+        plt.colorbar(mappable=img)
+    if title:
+        plt.title(title)
+    if not isinstance(ax, plt.Axes):
+        ax = plt.gca()
+    ax.RS = RectangleSelector(ax, onselect=onselect)  # reference must be kept
+
+
+def _plot_timeseries(ax, ch_idx, tmin, tmax, vmin, vmax, ylim, data, color,
+                     times, vline=None, x_label=None, y_label=None,
+                     colorbar=False):
+    """ Aux function to show time series on topo """
+    import matplotlib.pyplot as plt
+    picker_flag = False
+    for data_, color_ in zip(data, color):
+        if not picker_flag:
+            # use large tol for picker so we can click anywhere in the axes
+            ax.plot(times, data_[ch_idx], color_, picker=1e9)
+            picker_flag = True
+        else:
+            ax.plot(times, data_[ch_idx], color_)
+    if vline:
+        for x in vline:
+            plt.axvline(x, color='w', linewidth=0.5)
+    if x_label is not None:
+        plt.xlabel(x_label)
+    if y_label is not None:
+        plt.ylabel(y_label)
+    if colorbar:
+        plt.colorbar()
+
+
+def _check_vlim(vlim):
+    """AUX function"""
+    return not np.isscalar(vlim) and vlim is not None
+
+
+ at deprecated("It will be removed in version 0.11. "
+            "Please use evoked.plot_topo or viz.evoked.plot_evoked_topo "
+            "for list of evoked instead.")
+def plot_topo(evoked, layout=None, layout_scale=0.945, color=None,
+              border='none', ylim=None, scalings=None, title=None, proj=False,
+              vline=[0.0], fig_facecolor='k', fig_background=None,
+              axis_facecolor='k', font_color='w', show=True):
+    """Plot 2D topography of evoked responses.
+
+    Clicking on the plot of an individual sensor opens a new figure showing
+    the evoked response for the selected sensor.
+
+    Parameters
+    ----------
+    evoked : list of Evoked | Evoked
+        The evoked response to plot.
+    layout : instance of Layout | None
+        Layout instance specifying sensor positions (does not need to
+        be specified for Neuromag data). If possible, the correct layout is
+        inferred from the data.
+    layout_scale: float
+        Scaling factor for adjusting the relative size of the layout
+        on the canvas
+    color : list of color objects | color object | None
+        Everything matplotlib accepts to specify colors. If not list-like,
+        the color specified will be repeated. If None, colors are
+        automatically drawn.
+    border : str
+        matplotlib borders style to be used for each sensor plot.
+    ylim : dict | None
+        ylim for plots. The value determines the upper and lower subplot
+        limits. e.g. ylim = dict(eeg=[-200e-6, 200e6]). Valid keys are eeg,
+        mag, grad, misc. If None, the ylim parameter for each channel is
+        determined by the maximum absolute peak.
+    scalings : dict | None
+        The scalings of the channel types to be applied for plotting. If None,`
+        defaults to `dict(eeg=1e6, grad=1e13, mag=1e15)`.
+    title : str
+        Title of the figure.
+    proj : bool | 'interactive'
+        If true SSP projections are applied before display. If 'interactive',
+        a check box for reversible selection of SSP projection vectors will
+        be shown.
+    vline : list of floats | None
+        The values at which to show a vertical line.
+    fig_facecolor : str | obj
+        The figure face color. Defaults to black.
+    fig_background : None | numpy ndarray
+        A background image for the figure. This must work with a call to
+        plt.imshow. Defaults to None.
+    axis_facecolor : str | obj
+        The face color to be used for each sensor plot. Defaults to black.
+    font_color : str | obj
+        The color of text in the colorbar and title. Defaults to white.
+    show : bool
+        Show figure if True.
+
+    Returns
+    -------
+    fig : Instance of matplotlib.figure.Figure
+        Images of evoked responses at sensor locations
+    """
+    return _plot_evoked_topo(evoked=evoked, layout=layout,
+                             layout_scale=layout_scale, color=color,
+                             border=border, ylim=ylim, scalings=scalings,
+                             title=title, proj=proj, vline=vline,
+                             fig_facecolor=fig_facecolor,
+                             fig_background=fig_background,
+                             axis_facecolor=axis_facecolor,
+                             font_color=font_color, show=show)
+
+
+def _plot_evoked_topo(evoked, layout=None, layout_scale=0.945, color=None,
+                      border='none', ylim=None, scalings=None, title=None,
+                      proj=False, vline=[0.0], fig_facecolor='k',
+                      fig_background=None, axis_facecolor='k', font_color='w',
+                      show=True):
+    """Plot 2D topography of evoked responses.
+
+    Clicking on the plot of an individual sensor opens a new figure showing
+    the evoked response for the selected sensor.
+
+    Parameters
+    ----------
+    evoked : list of Evoked | Evoked
+        The evoked response to plot.
+    layout : instance of Layout | None
+        Layout instance specifying sensor positions (does not need to
+        be specified for Neuromag data). If possible, the correct layout is
+        inferred from the data.
+    layout_scale: float
+        Scaling factor for adjusting the relative size of the layout
+        on the canvas
+    color : list of color objects | color object | None
+        Everything matplotlib accepts to specify colors. If not list-like,
+        the color specified will be repeated. If None, colors are
+        automatically drawn.
+    border : str
+        matplotlib borders style to be used for each sensor plot.
+    ylim : dict | None
+        ylim for plots. The value determines the upper and lower subplot
+        limits. e.g. ylim = dict(eeg=[-200e-6, 200e6]). Valid keys are eeg,
+        mag, grad, misc. If None, the ylim parameter for each channel is
+        determined by the maximum absolute peak.
+    scalings : dict | None
+        The scalings of the channel types to be applied for plotting. If None,`
+        defaults to `dict(eeg=1e6, grad=1e13, mag=1e15)`.
+    title : str
+        Title of the figure.
+    proj : bool | 'interactive'
+        If true SSP projections are applied before display. If 'interactive',
+        a check box for reversible selection of SSP projection vectors will
+        be shown.
+    vline : list of floats | None
+        The values at which to show a vertical line.
+    fig_facecolor : str | obj
+        The figure face color. Defaults to black.
+    fig_background : None | numpy ndarray
+        A background image for the figure. This must work with a call to
+        plt.imshow. Defaults to None.
+    axis_facecolor : str | obj
+        The face color to be used for each sensor plot. Defaults to black.
+    font_color : str | obj
+        The color of text in the colorbar and title. Defaults to white.
+    show : bool
+        Show figure if True.
+
+    Returns
+    -------
+    fig : Instance of matplotlib.figure.Figure
+        Images of evoked responses at sensor locations
+    """
+    import matplotlib.pyplot as plt
+
+    if not type(evoked) in (tuple, list):
+        evoked = [evoked]
+
+    if type(color) in (tuple, list):
+        if len(color) != len(evoked):
+            raise ValueError('Lists of evoked objects and colors'
+                             ' must have the same length')
+    elif color is None:
+        colors = ['w'] + COLORS
+        stop = (slice(len(evoked)) if len(evoked) < len(colors)
+                else slice(len(colors)))
+        color = cycle(colors[stop])
+        if len(evoked) > len(colors):
+            warnings.warn('More evoked objects than colors available.'
+                          'You should pass a list of unique colors.')
+    else:
+        color = cycle([color])
+
+    times = evoked[0].times
+    if not all((e.times == times).all() for e in evoked):
+        raise ValueError('All evoked.times must be the same')
+
+    info = evoked[0].info
+    ch_names = evoked[0].ch_names
+    if not all(e.ch_names == ch_names for e in evoked):
+        raise ValueError('All evoked.picks must be the same')
+    ch_names = _clean_names(ch_names)
+
+    if layout is None:
+        from ..channels.layout import find_layout
+        layout = find_layout(info)
+
+    # XXX. at the moment we are committed to 1- / 2-sensor-types layouts
+    chs_in_layout = set(layout.names) & set(ch_names)
+    types_used = set(channel_type(info, ch_names.index(ch))
+                     for ch in chs_in_layout)
+    # remove possible reference meg channels
+    types_used = set.difference(types_used, set('ref_meg'))
+    # one check for all vendors
+    meg_types = set(('mag', 'grad'))
+    is_meg = len(set.intersection(types_used, meg_types)) > 0
+    if is_meg:
+        types_used = list(types_used)[::-1]  # -> restore kwarg order
+        picks = [pick_types(info, meg=kk, ref_meg=False, exclude=[])
+                 for kk in types_used]
+    else:
+        types_used_kwargs = dict((t, True) for t in types_used)
+        picks = [pick_types(info, meg=False, exclude=[], **types_used_kwargs)]
+    assert isinstance(picks, list) and len(types_used) == len(picks)
+
+    scalings = _handle_default('scalings', scalings)
+    evoked = [e.copy() for e in evoked]
+    for e in evoked:
+        for pick, t in zip(picks, types_used):
+            e.data[pick] = e.data[pick] * scalings[t]
+
+    if proj is True and all(e.proj is not True for e in evoked):
+        evoked = [e.apply_proj() for e in evoked]
+    elif proj == 'interactive':  # let it fail early.
+        for e in evoked:
+            _check_delayed_ssp(e)
+
+    if ylim is None:
+        def set_ylim(x):
+            return np.abs(x).max()
+        ylim_ = [set_ylim([e.data[t] for e in evoked]) for t in picks]
+        ymax = np.array(ylim_)
+        ylim_ = (-ymax, ymax)
+    elif isinstance(ylim, dict):
+        ylim_ = _handle_default('ylim', ylim)
+        ylim_ = [ylim_[kk] for kk in types_used]
+        # extra unpack to avoid bug #1700
+        if len(ylim_) == 1:
+            ylim_ = ylim_[0]
+        else:
+            ylim_ = zip(*[np.array(yl) for yl in ylim_])
+    else:
+        raise ValueError('ylim must be None ore a dict')
+
+    plot_fun = partial(_plot_timeseries, data=[e.data for e in evoked],
+                       color=color, times=times, vline=vline)
+
+    fig = _plot_topo(info=info, times=times, show_func=plot_fun, layout=layout,
+                     decim=1, colorbar=False, ylim=ylim_, cmap=None,
+                     layout_scale=layout_scale, border=border,
+                     fig_facecolor=fig_facecolor, font_color=font_color,
+                     axis_facecolor=axis_facecolor,
+                     title=title, x_label='Time (s)', vline=vline)
+
+    if fig_background is not None:
+        add_background_image(fig, fig_background)
+
+    if proj == 'interactive':
+        for e in evoked:
+            _check_delayed_ssp(e)
+        params = dict(evokeds=evoked, times=times,
+                      plot_update_proj_callback=_plot_update_evoked_topo,
+                      projs=evoked[0].info['projs'], fig=fig)
+        _draw_proj_checkbox(None, params)
+
+    if show:
+        plt.show()
+
+    return fig
+
+
+def _plot_update_evoked_topo(params, bools):
+    """Helper function to update topo sensor plots"""
+    evokeds, times, fig = [params[k] for k in ('evokeds', 'times', 'fig')]
+
+    projs = [proj for ii, proj in enumerate(params['projs'])
+             if ii in np.where(bools)[0]]
+
+    params['proj_bools'] = bools
+    evokeds = [e.copy() for e in evokeds]
+    for e in evokeds:
+        e.info['projs'] = []
+        e.add_proj(projs)
+        e.apply_proj()
+
+    # make sure to only modify the time courses, not the ticks
+    axes = fig.get_axes()
+    n_lines = len(axes[0].lines)
+    n_diff = len(evokeds) - n_lines
+    ax_slice = slice(abs(n_diff)) if n_diff < 0 else slice(n_lines)
+    for ax in axes:
+        lines = ax.lines[ax_slice]
+        for line, evoked in zip(lines, evokeds):
+            line.set_data(times, evoked.data[ax._mne_ch_idx])
+
+    fig.canvas.draw()
+
+
+def _erfimage_imshow(ax, ch_idx, tmin, tmax, vmin, vmax, ylim=None,
+                     data=None, epochs=None, sigma=None,
+                     order=None, scalings=None, vline=None,
+                     x_label=None, y_label=None, colorbar=False,
+                     cmap='RdBu_r'):
+    """Aux function to plot erfimage on sensor topography"""
+    from scipy import ndimage
+    import matplotlib.pyplot as plt
+    this_data = data[:, ch_idx, :].copy()
+    ch_type = channel_type(epochs.info, ch_idx)
+    if ch_type not in scalings:
+        raise KeyError('%s channel type not in scalings' % ch_type)
+    this_data *= scalings[ch_type]
+
+    if callable(order):
+        order = order(epochs.times, this_data)
+
+    if order is not None:
+        this_data = this_data[order]
+
+    if sigma > 0.:
+        this_data = ndimage.gaussian_filter1d(this_data, sigma=sigma, axis=0)
+
+    ax.imshow(this_data, extent=[tmin, tmax, 0, len(data)], aspect='auto',
+              origin='lower', vmin=vmin, vmax=vmax, picker=True,
+              cmap=cmap, interpolation='nearest')
+
+    if x_label is not None:
+        plt.xlabel(x_label)
+    if y_label is not None:
+        plt.ylabel(y_label)
+    if colorbar:
+        plt.colorbar()
+
+
+def plot_topo_image_epochs(epochs, layout=None, sigma=0., vmin=None,
+                           vmax=None, colorbar=True, order=None, cmap='RdBu_r',
+                           layout_scale=.95, title=None, scalings=None,
+                           border='none', fig_facecolor='k', font_color='w',
+                           show=True):
+    """Plot Event Related Potential / Fields image on topographies
+
+    Parameters
+    ----------
+    epochs : instance of Epochs
+        The epochs.
+    layout: instance of Layout
+        System specific sensor positions.
+    sigma : float
+        The standard deviation of the Gaussian smoothing to apply along
+        the epoch axis to apply in the image. If 0., no smoothing is applied.
+    vmin : float
+        The min value in the image. The unit is uV for EEG channels,
+        fT for magnetometers and fT/cm for gradiometers.
+    vmax : float
+        The max value in the image. The unit is uV for EEG channels,
+        fT for magnetometers and fT/cm for gradiometers.
+    colorbar : bool
+        Display or not a colorbar.
+    order : None | array of int | callable
+        If not None, order is used to reorder the epochs on the y-axis
+        of the image. If it's an array of int it should be of length
+        the number of good epochs. If it's a callable the arguments
+        passed are the times vector and the data as 2d array
+        (data.shape[1] == len(times)).
+    cmap : instance of matplotlib.pyplot.colormap
+        Colors to be mapped to the values.
+    layout_scale: float
+        scaling factor for adjusting the relative size of the layout
+        on the canvas.
+    title : str
+        Title of the figure.
+    scalings : dict | None
+        The scalings of the channel types to be applied for plotting. If
+        None, defaults to `dict(eeg=1e6, grad=1e13, mag=1e15)`.
+    border : str
+        matplotlib borders style to be used for each sensor plot.
+    fig_facecolor : str | obj
+        The figure face color. Defaults to black.
+    font_color : str | obj
+        The color of tick labels in the colorbar. Defaults to white.
+    show : bool
+        Show figure if True.
+
+    Returns
+    -------
+    fig : instance of matplotlib figure
+        Figure distributing one image per channel across sensor topography.
+    """
+    import matplotlib.pyplot as plt
+    scalings = _handle_default('scalings', scalings)
+    data = epochs.get_data()
+    if vmin is None:
+        vmin = data.min()
+    if vmax is None:
+        vmax = data.max()
+    if layout is None:
+        from ..channels.layout import find_layout
+        layout = find_layout(epochs.info)
+
+    erf_imshow = partial(_erfimage_imshow, scalings=scalings, order=order,
+                         data=data, epochs=epochs, sigma=sigma,
+                         cmap=cmap)
+
+    fig = _plot_topo(info=epochs.info, times=epochs.times,
+                     show_func=erf_imshow, layout=layout, decim=1,
+                     colorbar=colorbar, vmin=vmin, vmax=vmax, cmap=cmap,
+                     layout_scale=layout_scale, title=title,
+                     fig_facecolor=fig_facecolor,
+                     font_color=font_color, border=border,
+                     x_label='Time (s)', y_label='Epoch')
+    if show:
+        plt.show()
+    return fig
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/viz/topomap.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/viz/topomap.py
new file mode 100644
index 0000000..1be92dc
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/viz/topomap.py
@@ -0,0 +1,1622 @@
+"""Functions to plot M/EEG data e.g. topographies
+"""
+from __future__ import print_function
+
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Denis Engemann <denis.engemann at gmail.com>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#          Eric Larson <larson.eric.d at gmail.com>
+#
+# License: Simplified BSD
+
+import math
+import copy
+from functools import partial
+
+import numpy as np
+from scipy import linalg
+
+from ..baseline import rescale
+from ..io.constants import FIFF
+from ..io.pick import pick_types
+from ..utils import _clean_names, _time_mask, verbose, logger
+from .utils import (tight_layout, _setup_vmin_vmax, _prepare_trellis,
+                    _check_delayed_ssp, _draw_proj_checkbox, figure_nobar)
+from ..time_frequency import compute_epochs_psd
+from ..defaults import _handle_default
+from ..channels.layout import _find_topomap_coords
+from ..fixes import _get_argrelmax
+
+
+def _prepare_topo_plot(inst, ch_type, layout):
+    """"Aux Function"""
+    info = copy.deepcopy(inst.info)
+
+    if layout is None and ch_type is not 'eeg':
+        from ..channels import find_layout
+        layout = find_layout(info)
+    elif layout == 'auto':
+        layout = None
+
+    info['ch_names'] = _clean_names(info['ch_names'])
+    for ii, this_ch in enumerate(info['chs']):
+        this_ch['ch_name'] = info['ch_names'][ii]
+
+    # special case for merging grad channels
+    if (ch_type == 'grad' and FIFF.FIFFV_COIL_VV_PLANAR_T1 in
+            np.unique([ch['coil_type'] for ch in info['chs']])):
+        from ..channels.layout import _pair_grad_sensors
+        picks, pos = _pair_grad_sensors(info, layout)
+        merge_grads = True
+    else:
+        merge_grads = False
+        if ch_type == 'eeg':
+            picks = pick_types(info, meg=False, eeg=True, ref_meg=False,
+                               exclude='bads')
+        else:
+            picks = pick_types(info, meg=ch_type, ref_meg=False,
+                               exclude='bads')
+
+        if len(picks) == 0:
+            raise ValueError("No channels of type %r" % ch_type)
+
+        if layout is None:
+            pos = _find_topomap_coords(info, picks)
+        else:
+            names = [n.upper() for n in layout.names]
+            pos = list()
+            for pick in picks:
+                this_name = info['ch_names'][pick].upper()
+                if this_name in names:
+                    pos.append(layout.pos[names.index(this_name)])
+                else:
+                    logger.warning('Failed to locate %s channel positions from'
+                                   ' layout. Inferring channel positions from '
+                                   'data.' % ch_type)
+                    pos = _find_topomap_coords(info, picks)
+                    break
+
+    ch_names = [info['ch_names'][k] for k in picks]
+    if merge_grads:
+        # change names so that vectorview combined grads appear as MEG014x
+        # instead of MEG0142 or MEG0143 which are the 2 planar grads.
+        ch_names = [ch_names[k][:-1] + 'x' for k in range(0, len(ch_names), 2)]
+    pos = np.array(pos)[:, :2]  # 2D plot, otherwise interpolation bugs
+    return picks, pos, merge_grads, ch_names, ch_type
+
+
+def _plot_update_evoked_topomap(params, bools):
+    """ Helper to update topomaps """
+    projs = [proj for ii, proj in enumerate(params['projs'])
+             if ii in np.where(bools)[0]]
+
+    params['proj_bools'] = bools
+    new_evoked = params['evoked'].copy()
+    new_evoked.info['projs'] = []
+    new_evoked.add_proj(projs)
+    new_evoked.apply_proj()
+
+    data = new_evoked.data[np.ix_(params['picks'],
+                                  params['time_idx'])] * params['scale']
+    if params['merge_grads']:
+        from ..channels.layout import _merge_grad_data
+        data = _merge_grad_data(data)
+    image_mask = params['image_mask']
+
+    pos_x, pos_y = np.asarray(params['pos'])[:, :2].T
+
+    xi = np.linspace(pos_x.min(), pos_x.max(), params['res'])
+    yi = np.linspace(pos_y.min(), pos_y.max(), params['res'])
+    Xi, Yi = np.meshgrid(xi, yi)
+    for ii, im in enumerate(params['images']):
+        Zi = _griddata(pos_x, pos_y, data[:, ii], Xi, Yi)
+        Zi[~image_mask] = np.nan
+        im.set_data(Zi)
+    for cont in params['contours']:
+        cont.set_array(np.c_[Xi, Yi, Zi])
+
+    params['fig'].canvas.draw()
+
+
+def plot_projs_topomap(projs, layout=None, cmap='RdBu_r', sensors=True,
+                       colorbar=False, res=64, size=1, show=True,
+                       outlines='head', contours=6, image_interp='bilinear',
+                       axes=None):
+    """Plot topographic maps of SSP projections
+
+    Parameters
+    ----------
+    projs : list of Projection
+        The projections
+    layout : None | Layout | list of Layout
+        Layout instance specifying sensor positions (does not need to be
+        specified for Neuromag data). Or a list of Layout if projections
+        are from different sensor types.
+    cmap : matplotlib colormap
+        Colormap.
+    sensors : bool | str
+        Add markers for sensor locations to the plot. Accepts matplotlib plot
+        format string (e.g., 'r+' for red plusses). If True, a circle will be
+        used (via .add_artist). Defaults to True.
+    colorbar : bool
+        Plot a colorbar.
+    res : int
+        The resolution of the topomap image (n pixels along each side).
+    size : scalar
+        Side length of the topomaps in inches (only applies when plotting
+        multiple topomaps at a time).
+    show : bool
+        Show figure if True.
+    outlines : 'head' | 'skirt' | dict | None
+        The outlines to be drawn. If 'head', the default head scheme will be
+        drawn. If 'skirt' the head scheme will be drawn, but sensors are
+        allowed to be plotted outside of the head circle. If dict, each key
+        refers to a tuple of x and y positions, the values in 'mask_pos' will
+        serve as image mask, and the 'autoshrink' (bool) field will trigger
+        automated shrinking of the positions due to points outside the outline.
+        Alternatively, a matplotlib patch object can be passed for advanced
+        masking options, either directly or as a function that returns patches
+        (required for multi-axis plots). If None, nothing will be drawn.
+        Defaults to 'head'.
+    contours : int | False | None
+        The number of contour lines to draw. If 0, no contours will be drawn.
+    image_interp : str
+        The image interpolation to be used. All matplotlib options are
+        accepted.
+    axes : instance of Axes | list | None
+        The axes to plot to. If list, the list must be a list of Axes of
+        the same length as the number of projectors. If instance of Axes,
+        there must be only one projector. Defaults to None.
+
+    Returns
+    -------
+    fig : instance of matplotlib figure
+        Figure distributing one image per channel across sensor topography.
+
+    Notes
+    -----
+    .. versionadded:: 0.9.0
+    """
+    import matplotlib.pyplot as plt
+
+    if layout is None:
+        from ..channels import read_layout
+        layout = read_layout('Vectorview-all')
+
+    if not isinstance(layout, list):
+        layout = [layout]
+
+    n_projs = len(projs)
+    nrows = math.floor(math.sqrt(n_projs))
+    ncols = math.ceil(n_projs / nrows)
+
+    if axes is None:
+        plt.figure()
+        axes = list()
+        for idx in range(len(projs)):
+            ax = plt.subplot(nrows, ncols, idx + 1)
+            axes.append(ax)
+    elif isinstance(axes, plt.Axes):
+        axes = [axes]
+    if len(axes) != len(projs):
+        raise RuntimeError('There must be an axes for each picked projector.')
+    for proj_idx, proj in enumerate(projs):
+        axes[proj_idx].set_title(proj['desc'][:10] + '...')
+        ch_names = _clean_names(proj['data']['col_names'])
+        data = proj['data']['data'].ravel()
+
+        idx = []
+        for l in layout:
+            is_vv = l.kind.startswith('Vectorview')
+            if is_vv:
+                from ..channels.layout import _pair_grad_sensors_from_ch_names
+                grad_pairs = _pair_grad_sensors_from_ch_names(ch_names)
+                if grad_pairs:
+                    ch_names = [ch_names[i] for i in grad_pairs]
+
+            idx = [l.names.index(c) for c in ch_names if c in l.names]
+            if len(idx) == 0:
+                continue
+
+            pos = l.pos[idx]
+            if is_vv and grad_pairs:
+                from ..channels.layout import _merge_grad_data
+                shape = (len(idx) / 2, 2, -1)
+                pos = pos.reshape(shape).mean(axis=1)
+                data = _merge_grad_data(data[grad_pairs]).ravel()
+
+            break
+
+        if len(idx):
+            plot_topomap(data, pos[:, :2], vmax=None, cmap=cmap,
+                         sensors=sensors, res=res, axis=axes[proj_idx],
+                         outlines=outlines, contours=contours,
+                         image_interp=image_interp, show=False)
+            if colorbar:
+                plt.colorbar()
+        else:
+            raise RuntimeError('Cannot find a proper layout for projection %s'
+                               % proj['desc'])
+    tight_layout(fig=axes[0].get_figure())
+    if show and plt.get_backend() != 'agg':
+        plt.show()
+
+    return axes[0].get_figure()
+
+
+def _check_outlines(pos, outlines, head_pos=None):
+    """Check or create outlines for topoplot
+    """
+    pos = np.array(pos, float)[:, :2]  # ensure we have a copy
+    head_pos = dict() if head_pos is None else head_pos
+    if not isinstance(head_pos, dict):
+        raise TypeError('head_pos must be dict or None')
+    head_pos = copy.deepcopy(head_pos)
+    for key in head_pos.keys():
+        if key not in ('center', 'scale'):
+            raise KeyError('head_pos must only contain "center" and '
+                           '"scale"')
+        head_pos[key] = np.array(head_pos[key], float)
+        if head_pos[key].shape != (2,):
+            raise ValueError('head_pos["%s"] must have shape (2,), not '
+                             '%s' % (key, head_pos[key].shape))
+
+    if outlines in ('head', 'skirt', None):
+        radius = 0.5
+        l = np.linspace(0, 2 * np.pi, 101)
+        head_x = np.cos(l) * radius
+        head_y = np.sin(l) * radius
+        nose_x = np.array([0.18, 0, -0.18]) * radius
+        nose_y = np.array([radius - .004, radius * 1.15, radius - .004])
+        ear_x = np.array([.497, .510, .518, .5299, .5419, .54, .547,
+                         .532, .510, .489])
+        ear_y = np.array([.0555, .0775, .0783, .0746, .0555, -.0055, -.0932,
+                          -.1313, -.1384, -.1199])
+
+        # shift and scale the electrode positions
+        if 'center' not in head_pos:
+            head_pos['center'] = 0.5 * (pos.max(axis=0) + pos.min(axis=0))
+        pos -= head_pos['center']
+
+        if outlines is not None:
+            # Define the outline of the head, ears and nose
+            outlines_dict = dict(head=(head_x, head_y), nose=(nose_x, nose_y),
+                                 ear_left=(ear_x, ear_y),
+                                 ear_right=(-ear_x, ear_y))
+        else:
+            outlines_dict = dict()
+
+        if outlines == 'skirt':
+            if 'scale' not in head_pos:
+                # By default, fit electrodes inside the head circle
+                head_pos['scale'] = 1.0 / (pos.max(axis=0) - pos.min(axis=0))
+            pos *= head_pos['scale']
+
+            # Make the figure encompass slightly more than all points
+            mask_scale = 1.25 * (pos.max(axis=0) - pos.min(axis=0))
+
+            outlines_dict['autoshrink'] = False
+            outlines_dict['mask_pos'] = (mask_scale[0] * head_x,
+                                         mask_scale[1] * head_y)
+            outlines_dict['clip_radius'] = (mask_scale / 2.)
+        else:
+            if 'scale' not in head_pos:
+                # The default is to make the points occupy a slightly smaller
+                # proportion (0.85) of the total width and height
+                # this number was empirically determined (seems to work well)
+                head_pos['scale'] = 0.85 / (pos.max(axis=0) - pos.min(axis=0))
+            pos *= head_pos['scale']
+            outlines_dict['autoshrink'] = True
+            outlines_dict['mask_pos'] = head_x, head_y
+            outlines_dict['clip_radius'] = (0.5, 0.5)
+
+        outlines = outlines_dict
+
+    elif isinstance(outlines, dict):
+        if 'mask_pos' not in outlines:
+            raise ValueError('You must specify the coordinates of the image'
+                             'mask')
+    else:
+        raise ValueError('Invalid value for `outlines')
+
+    return pos, outlines
+
+
+def _griddata(x, y, v, xi, yi):
+    """Aux function"""
+    xy = x.ravel() + y.ravel() * -1j
+    d = xy[None, :] * np.ones((len(xy), 1))
+    d = np.abs(d - d.T)
+    n = d.shape[0]
+    d.flat[::n + 1] = 1.
+
+    g = (d * d) * (np.log(d) - 1.)
+    g.flat[::n + 1] = 0.
+    weights = linalg.solve(g, v.ravel())
+
+    m, n = xi.shape
+    zi = np.zeros_like(xi)
+    xy = xy.T
+
+    g = np.empty(xy.shape)
+    for i in range(m):
+        for j in range(n):
+            d = np.abs(xi[i, j] + -1j * yi[i, j] - xy)
+            mask = np.where(d == 0)[0]
+            if len(mask):
+                d[mask] = 1.
+            np.log(d, out=g)
+            g -= 1.
+            g *= d * d
+            if len(mask):
+                g[mask] = 0.
+            zi[i, j] = g.dot(weights)
+    return zi
+
+
+def _plot_sensors(pos_x, pos_y, sensors, ax):
+    """Aux function"""
+    from matplotlib.patches import Circle
+    if sensors is True:
+        for x, y in zip(pos_x, pos_y):
+            ax.add_artist(Circle(xy=(x, y), radius=0.003, color='k'))
+    else:
+        ax.plot(pos_x, pos_y, sensors)
+
+
+def plot_topomap(data, pos, vmin=None, vmax=None, cmap='RdBu_r', sensors=True,
+                 res=64, axis=None, names=None, show_names=False, mask=None,
+                 mask_params=None, outlines='head', image_mask=None,
+                 contours=6, image_interp='bilinear', show=True,
+                 head_pos=None, onselect=None):
+    """Plot a topographic map as image
+
+    Parameters
+    ----------
+    data : array, length = n_points
+        The data values to plot.
+    pos : array, shape = (n_points, 2)
+        For each data point, the x and y coordinates.
+    vmin : float | callable | None
+        The value specifying the lower bound of the color range.
+        If None, and vmax is None, -vmax is used. Else np.min(data).
+        If callable, the output equals vmin(data). Defaults to None.
+    vmax : float | callable | None
+        The value specifying the upper bound of the color range.
+        If None, the maximum absolute value is used. If callable, the output
+        equals vmax(data). Defaults to None.
+    cmap : matplotlib colormap
+        Colormap.
+    sensors : bool | str
+        Add markers for sensor locations to the plot. Accepts matplotlib plot
+        format string (e.g., 'r+' for red plusses). If True, a circle will be
+        used (via .add_artist). Defaults to True.
+    res : int
+        The resolution of the topomap image (n pixels along each side).
+    axis : instance of Axis | None
+        The axis to plot to. If None, the current axis will be used.
+    names : list | None
+        List of channel names. If None, channel names are not plotted.
+    show_names : bool | callable
+        If True, show channel names on top of the map. If a callable is
+        passed, channel names will be formatted using the callable; e.g., to
+        delete the prefix 'MEG ' from all channel names, pass the function
+        lambda x: x.replace('MEG ', ''). If `mask` is not None, only
+        significant sensors will be shown.
+    mask : ndarray of bool, shape (n_channels, n_times) | None
+        The channels to be marked as significant at a given time point.
+        Indices set to `True` will be considered. Defaults to None.
+    mask_params : dict | None
+        Additional plotting parameters for plotting significant sensors.
+        Default (None) equals::
+
+           dict(marker='o', markerfacecolor='w', markeredgecolor='k',
+                linewidth=0, markersize=4)
+
+    outlines : 'head' | 'skirt' | dict | None
+        The outlines to be drawn. If 'head', the default head scheme will be
+        drawn. If 'skirt' the head scheme will be drawn, but sensors are
+        allowed to be plotted outside of the head circle. If dict, each key
+        refers to a tuple of x and y positions, the values in 'mask_pos' will
+        serve as image mask, and the 'autoshrink' (bool) field will trigger
+        automated shrinking of the positions due to points outside the outline.
+        Alternatively, a matplotlib patch object can be passed for advanced
+        masking options, either directly or as a function that returns patches
+        (required for multi-axis plots). If None, nothing will be drawn.
+        Defaults to 'head'.
+    image_mask : ndarray of bool, shape (res, res) | None
+        The image mask to cover the interpolated surface. If None, it will be
+        computed from the outline.
+    contours : int | False | None
+        The number of contour lines to draw. If 0, no contours will be drawn.
+    image_interp : str
+        The image interpolation to be used. All matplotlib options are
+        accepted.
+    show : bool
+        Show figure if True.
+    head_pos : dict | None
+        If None (default), the sensors are positioned such that they span
+        the head circle. If dict, can have entries 'center' (tuple) and
+        'scale' (tuple) for what the center and scale of the head should be
+        relative to the electrode locations.
+    onselect : callable | None
+        Handle for a function that is called when the user selects a set of
+        channels by rectangle selection (matplotlib ``RectangleSelector``). If
+        None interactive selection is disabled. Defaults to None.
+
+    Returns
+    -------
+    im : matplotlib.image.AxesImage
+        The interpolated data.
+    cn : matplotlib.contour.ContourSet
+        The fieldlines.
+    """
+    import matplotlib.pyplot as plt
+    from matplotlib.widgets import RectangleSelector
+
+    data = np.asarray(data)
+    if data.ndim > 1:
+        raise ValueError("Data needs to be array of shape (n_sensors,); got "
+                         "shape %s." % str(data.shape))
+
+    # Give a helpful error message for common mistakes regarding the position
+    # matrix.
+    pos_help = ("Electrode positions should be specified as a 2D array with "
+                "shape (n_channels, 2). Each row in this matrix contains the "
+                "(x, y) position of an electrode.")
+    if pos.ndim != 2:
+        error = ("{ndim}D array supplied as electrode positions, where a 2D "
+                 "array was expected").format(ndim=pos.ndim)
+        raise ValueError(error + " " + pos_help)
+    elif pos.shape[1] == 3:
+        error = ("The supplied electrode positions matrix contains 3 columns. "
+                 "Are you trying to specify XYZ coordinates? Perhaps the "
+                 "mne.channels.create_eeg_layout function is useful for you.")
+        raise ValueError(error + " " + pos_help)
+    # No error is raised in case of pos.shape[1] == 4. In this case, it is
+    # assumed the position matrix contains both (x, y) and (width, height)
+    # values, such as Layout.pos.
+    elif pos.shape[1] == 1 or pos.shape[1] > 4:
+        raise ValueError(pos_help)
+
+    if len(data) != len(pos):
+        raise ValueError("Data and pos need to be of same length. Got data of "
+                         "length %s, pos of length %s" % (len(data), len(pos)))
+
+    vmin, vmax = _setup_vmin_vmax(data, vmin, vmax)
+
+    pos, outlines = _check_outlines(pos, outlines, head_pos)
+    pos_x = pos[:, 0]
+    pos_y = pos[:, 1]
+
+    ax = axis if axis else plt.gca()
+    ax.set_xticks([])
+    ax.set_yticks([])
+    ax.set_frame_on(False)
+    if any([not pos_y.any(), not pos_x.any()]):
+        raise RuntimeError('No position information found, cannot compute '
+                           'geometries for topomap.')
+    if outlines is None:
+        xmin, xmax = pos_x.min(), pos_x.max()
+        ymin, ymax = pos_y.min(), pos_y.max()
+    else:
+        xlim = np.inf, -np.inf,
+        ylim = np.inf, -np.inf,
+        mask_ = np.c_[outlines['mask_pos']]
+        xmin, xmax = (np.min(np.r_[xlim[0], mask_[:, 0]]),
+                      np.max(np.r_[xlim[1], mask_[:, 0]]))
+        ymin, ymax = (np.min(np.r_[ylim[0], mask_[:, 1]]),
+                      np.max(np.r_[ylim[1], mask_[:, 1]]))
+
+    # interpolate data
+    xi = np.linspace(xmin, xmax, res)
+    yi = np.linspace(ymin, ymax, res)
+    Xi, Yi = np.meshgrid(xi, yi)
+    Zi = _griddata(pos_x, pos_y, data, Xi, Yi)
+
+    if outlines is None:
+        _is_default_outlines = False
+    elif isinstance(outlines, dict):
+        _is_default_outlines = any(k.startswith('head') for k in outlines)
+
+    if _is_default_outlines and image_mask is None:
+        # prepare masking
+        image_mask, pos = _make_image_mask(outlines, pos, res)
+
+    mask_params = _handle_default('mask_params', mask_params)
+
+    # plot outline
+    linewidth = mask_params['markeredgewidth']
+    patch = None
+    if 'patch' in outlines:
+        patch = outlines['patch']
+        patch_ = patch() if callable(patch) else patch
+        patch_.set_clip_on(False)
+        ax.add_patch(patch_)
+        ax.set_transform(ax.transAxes)
+        ax.set_clip_path(patch_)
+
+    # plot map and countour
+    im = ax.imshow(Zi, cmap=cmap, vmin=vmin, vmax=vmax, origin='lower',
+                   aspect='equal', extent=(xmin, xmax, ymin, ymax),
+                   interpolation=image_interp)
+
+    # This tackles an incomprehensible matplotlib bug if no contours are
+    # drawn. To avoid rescalings, we will always draw contours.
+    # But if no contours are desired we only draw one and make it invisible .
+    no_contours = False
+    if contours in (False, None):
+        contours, no_contours = 1, True
+    cont = ax.contour(Xi, Yi, Zi, contours, colors='k',
+                      linewidths=linewidth)
+    if no_contours is True:
+        for col in cont.collections:
+            col.set_visible(False)
+
+    if _is_default_outlines:
+        from matplotlib import patches
+        patch_ = patches.Ellipse((0, 0),
+                                 2 * outlines['clip_radius'][0],
+                                 2 * outlines['clip_radius'][1],
+                                 clip_on=True,
+                                 transform=ax.transData)
+    if _is_default_outlines or patch is not None:
+        im.set_clip_path(patch_)
+        # ax.set_clip_path(patch_)
+        if cont is not None:
+            for col in cont.collections:
+                col.set_clip_path(patch_)
+
+    if sensors is not False and mask is None:
+        _plot_sensors(pos_x, pos_y, sensors=sensors, ax=ax)
+    elif sensors and mask is not None:
+        idx = np.where(mask)[0]
+        ax.plot(pos_x[idx], pos_y[idx], **mask_params)
+        idx = np.where(~mask)[0]
+        _plot_sensors(pos_x[idx], pos_y[idx], sensors=sensors, ax=ax)
+    elif not sensors and mask is not None:
+        idx = np.where(mask)[0]
+        ax.plot(pos_x[idx], pos_y[idx], **mask_params)
+
+    if isinstance(outlines, dict):
+        outlines_ = dict([(k, v) for k, v in outlines.items() if k not in
+                          ['patch', 'autoshrink']])
+        for k, (x, y) in outlines_.items():
+            if 'mask' in k:
+                continue
+            ax.plot(x, y, color='k', linewidth=linewidth, clip_on=False)
+
+    if show_names:
+        if show_names is True:
+            def _show_names(x):
+                return x
+        else:
+            _show_names = show_names
+        show_idx = np.arange(len(names)) if mask is None else np.where(mask)[0]
+        for ii, (p, ch_id) in enumerate(zip(pos, names)):
+            if ii not in show_idx:
+                continue
+            ch_id = _show_names(ch_id)
+            ax.text(p[0], p[1], ch_id, horizontalalignment='center',
+                    verticalalignment='center', size='x-small')
+
+    plt.subplots_adjust(top=.95)
+
+    if onselect is not None:
+        ax.RS = RectangleSelector(ax, onselect=onselect)
+    if show:
+        plt.show()
+    return im, cont
+
+
+def _make_image_mask(outlines, pos, res):
+    """Aux function
+    """
+
+    mask_ = np.c_[outlines['mask_pos']]
+    xmin, xmax = (np.min(np.r_[np.inf, mask_[:, 0]]),
+                  np.max(np.r_[-np.inf, mask_[:, 0]]))
+    ymin, ymax = (np.min(np.r_[np.inf, mask_[:, 1]]),
+                  np.max(np.r_[-np.inf, mask_[:, 1]]))
+
+    if outlines.get('autoshrink', False) is not False:
+        inside = _inside_contour(pos, mask_)
+        outside = np.invert(inside)
+        outlier_points = pos[outside]
+        while np.any(outlier_points):  # auto shrink
+            pos *= 0.99
+            inside = _inside_contour(pos, mask_)
+            outside = np.invert(inside)
+            outlier_points = pos[outside]
+
+    image_mask = np.zeros((res, res), dtype=bool)
+    xi_mask = np.linspace(xmin, xmax, res)
+    yi_mask = np.linspace(ymin, ymax, res)
+    Xi_mask, Yi_mask = np.meshgrid(xi_mask, yi_mask)
+
+    pos_ = np.c_[Xi_mask.flatten(), Yi_mask.flatten()]
+    inds = _inside_contour(pos_, mask_)
+    image_mask[inds.reshape(image_mask.shape)] = True
+
+    return image_mask, pos
+
+
+def _inside_contour(pos, contour):
+    """Aux function"""
+    npos = len(pos)
+    x, y = pos[:, :2].T
+
+    check_mask = np.ones((npos), dtype=bool)
+    check_mask[((x < np.min(x)) | (y < np.min(y)) |
+                (x > np.max(x)) | (y > np.max(y)))] = False
+
+    critval = 0.1
+    sel = np.where(check_mask)[0]
+    for this_sel in sel:
+        contourx = contour[:, 0] - pos[this_sel, 0]
+        contoury = contour[:, 1] - pos[this_sel, 1]
+        angle = np.arctan2(contoury, contourx)
+        angle = np.unwrap(angle)
+        total = np.sum(np.diff(angle))
+        check_mask[this_sel] = np.abs(total) > critval
+
+    return check_mask
+
+
+def plot_ica_components(ica, picks=None, ch_type=None, res=64,
+                        layout=None, vmin=None, vmax=None, cmap='RdBu_r',
+                        sensors=True, colorbar=False, title=None,
+                        show=True, outlines='head', contours=6,
+                        image_interp='bilinear', head_pos=None):
+    """Project unmixing matrix on interpolated sensor topogrpahy.
+
+    Parameters
+    ----------
+    ica : instance of mne.preprocessing.ICA
+        The ICA solution.
+    picks : int | array-like | None
+        The indices of the sources to be plotted.
+        If None all are plotted in batches of 20.
+    ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
+        The channel type to plot. For 'grad', the gradiometers are
+        collected in pairs and the RMS for each pair is plotted.
+        If None, then channels are chosen in the order given above.
+    res : int
+        The resolution of the topomap image (n pixels along each side).
+    layout : None | Layout
+        Layout instance specifying sensor positions (does not need to
+        be specified for Neuromag data). If possible, the correct layout is
+        inferred from the data.
+    vmin : float | callable | None
+        The value specifying the lower bound of the color range.
+        If None, and vmax is None, -vmax is used. Else np.min(data).
+        If callable, the output equals vmin(data). Defaults to None.
+    vmax : float | callable | None
+        The value specifying the upper bound of the color range.
+        If None, the maximum absolute value is used. If callable, the output
+        equals vmax(data). Defaults to None.
+    cmap : matplotlib colormap
+        Colormap.
+    sensors : bool | str
+        Add markers for sensor locations to the plot. Accepts matplotlib
+        plot format string (e.g., 'r+' for red plusses). If True, a circle
+        will be used (via .add_artist). Defaults to True.
+    colorbar : bool
+        Plot a colorbar.
+    title : str | None
+        Title to use.
+    show : bool
+        Show figure if True.
+    outlines : 'head' | 'skirt' | dict | None
+        The outlines to be drawn. If 'head', the default head scheme will be
+        drawn. If 'skirt' the head scheme will be drawn, but sensors are
+        allowed to be plotted outside of the head circle. If dict, each key
+        refers to a tuple of x and y positions, the values in 'mask_pos' will
+        serve as image mask, and the 'autoshrink' (bool) field will trigger
+        automated shrinking of the positions due to points outside the outline.
+        Alternatively, a matplotlib patch object can be passed for advanced
+        masking options, either directly or as a function that returns patches
+        (required for multi-axis plots). If None, nothing will be drawn.
+        Defaults to 'head'.
+    contours : int | False | None
+        The number of contour lines to draw. If 0, no contours will be drawn.
+    image_interp : str
+        The image interpolation to be used. All matplotlib options are
+        accepted.
+    head_pos : dict | None
+        If None (default), the sensors are positioned such that they span
+        the head circle. If dict, can have entries 'center' (tuple) and
+        'scale' (tuple) for what the center and scale of the head should be
+        relative to the electrode locations.
+
+    Returns
+    -------
+    fig : instance of matplotlib.pyplot.Figure or list
+        The figure object(s).
+    """
+    import matplotlib.pyplot as plt
+    from mpl_toolkits.axes_grid import make_axes_locatable
+    from ..channels import _get_ch_type
+
+    if picks is None:  # plot components by sets of 20
+        ch_type = _get_ch_type(ica, ch_type)
+        n_components = ica.mixing_matrix_.shape[1]
+        p = 20
+        figs = []
+        for k in range(0, n_components, p):
+            picks = range(k, min(k + p, n_components))
+            fig = plot_ica_components(ica, picks=picks,
+                                      ch_type=ch_type, res=res, layout=layout,
+                                      vmax=vmax, cmap=cmap, sensors=sensors,
+                                      colorbar=colorbar, title=title,
+                                      show=show, outlines=outlines,
+                                      contours=contours,
+                                      image_interp=image_interp)
+            figs.append(fig)
+        return figs
+    elif np.isscalar(picks):
+        picks = [picks]
+    ch_type = _get_ch_type(ica, ch_type)
+
+    data = np.dot(ica.mixing_matrix_[:, picks].T,
+                  ica.pca_components_[:ica.n_components_])
+
+    if ica.info is None:
+        raise RuntimeError('The ICA\'s measurement info is missing. Please '
+                           'fit the ICA or add the corresponding info object.')
+
+    data_picks, pos, merge_grads, names, _ = _prepare_topo_plot(ica, ch_type,
+                                                                layout)
+    pos, outlines = _check_outlines(pos, outlines, head_pos)
+    if outlines not in (None, 'head'):
+        image_mask, pos = _make_image_mask(outlines, pos, res)
+    else:
+        image_mask = None
+
+    data = np.atleast_2d(data)
+    data = data[:, data_picks]
+
+    # prepare data for iteration
+    fig, axes = _prepare_trellis(len(data), max_col=5)
+    if title is None:
+        title = 'ICA components'
+    fig.suptitle(title)
+
+    if merge_grads:
+        from ..channels.layout import _merge_grad_data
+    for ii, data_, ax in zip(picks, data, axes):
+        ax.set_title('IC #%03d' % ii, fontsize=12)
+        data_ = _merge_grad_data(data_) if merge_grads else data_
+        vmin_, vmax_ = _setup_vmin_vmax(data_, vmin, vmax)
+        im = plot_topomap(data_.flatten(), pos, vmin=vmin_, vmax=vmax_,
+                          res=res, axis=ax, cmap=cmap, outlines=outlines,
+                          image_mask=image_mask, contours=contours,
+                          image_interp=image_interp, show=False)[0]
+        if colorbar:
+            divider = make_axes_locatable(ax)
+            cax = divider.append_axes("right", size="5%", pad=0.05)
+            cbar = plt.colorbar(im, cax=cax, format='%3.2f', cmap=cmap)
+            cbar.ax.tick_params(labelsize=12)
+            cbar.set_ticks((vmin_, vmax_))
+            cbar.ax.set_title('AU', fontsize=10)
+        ax.set_yticks([])
+        ax.set_xticks([])
+        ax.set_frame_on(False)
+    tight_layout(fig=fig)
+    fig.subplots_adjust(top=0.95)
+    fig.canvas.draw()
+
+    if show is True:
+        plt.show()
+    return fig
+
+
+def plot_tfr_topomap(tfr, tmin=None, tmax=None, fmin=None, fmax=None,
+                     ch_type=None, baseline=None, mode='mean', layout=None,
+                     vmin=None, vmax=None, cmap=None, sensors=True,
+                     colorbar=True, unit=None, res=64, size=2,
+                     cbar_fmt='%1.1e', show_names=False, title=None,
+                     axes=None, show=True, outlines='head', head_pos=None):
+    """Plot topographic maps of specific time-frequency intervals of TFR data
+
+    Parameters
+    ----------
+    tfr : AvereageTFR
+        The AvereageTFR object.
+    tmin : None | float
+        The first time instant to display. If None the first time point
+        available is used.
+    tmax : None | float
+        The last time instant to display. If None the last time point
+        available is used.
+    fmin : None | float
+        The first frequency to display. If None the first frequency
+        available is used.
+    fmax : None | float
+        The last frequency to display. If None the last frequency
+        available is used.
+    ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
+        The channel type to plot. For 'grad', the gradiometers are
+        collected in pairs and the RMS for each pair is plotted.
+        If None, then channels are chosen in the order given above.
+    baseline : tuple or list of length 2
+        The time interval to apply rescaling / baseline correction.
+        If None do not apply it. If baseline is (a, b)
+        the interval is between "a (s)" and "b (s)".
+        If a is None the beginning of the data is used
+        and if b is None then b is set to the end of the interval.
+        If baseline is equal to (None, None) all the time
+        interval is used.
+    mode : 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
+        Do baseline correction with ratio (power is divided by mean
+        power during baseline) or z-score (power is divided by standard
+        deviation of power during baseline after subtracting the mean,
+        power = [power - mean(power_baseline)] / std(power_baseline))
+        If None, baseline no correction will be performed.
+    layout : None | Layout
+        Layout instance specifying sensor positions (does not need to
+        be specified for Neuromag data). If possible, the correct layout
+        file is inferred from the data; if no appropriate layout file
+        was found, the layout is automatically generated from the sensor
+        locations.
+    vmin : float | callable | None
+        The value specifying the lower bound of the color range.
+        If None, and vmax is None, -vmax is used. Else np.min(data) or in case
+        data contains only positive values 0. If callable, the output equals
+        vmin(data). Defaults to None.
+    vmax : float | callable | None
+        The value specifying the upper bound of the color range. If None, the
+        maximum value is used. If callable, the output equals vmax(data).
+        Defaults to None.
+    cmap : matplotlib colormap | None
+        Colormap. If None and the plotted data is all positive, defaults to
+        'Reds'. If None and data contains also negative values, defaults to
+        'RdBu_r'. Defaults to None.
+    sensors : bool | str
+        Add markers for sensor locations to the plot. Accepts matplotlib
+        plot format string (e.g., 'r+' for red plusses). If True, a circle will
+        be used (via .add_artist). Defaults to True.
+    colorbar : bool
+        Plot a colorbar.
+    unit : str | None
+        The unit of the channel type used for colorbar labels.
+    res : int
+        The resolution of the topomap image (n pixels along each side).
+    size : float
+        Side length per topomap in inches.
+    cbar_fmt : str
+        String format for colorbar values.
+    show_names : bool | callable
+        If True, show channel names on top of the map. If a callable is
+        passed, channel names will be formatted using the callable; e.g., to
+        delete the prefix 'MEG ' from all channel names, pass the function
+        lambda x: x.replace('MEG ', ''). If `mask` is not None, only
+        significant sensors will be shown.
+    title : str | None
+        Title. If None (default), no title is displayed.
+    axes : instance of Axis | None
+        The axes to plot to. If None the axes is defined automatically.
+    show : bool
+        Show figure if True.
+    outlines : 'head' | 'skirt' | dict | None
+        The outlines to be drawn. If 'head', the default head scheme will be
+        drawn. If 'skirt' the head scheme will be drawn, but sensors are
+        allowed to be plotted outside of the head circle. If dict, each key
+        refers to a tuple of x and y positions, the values in 'mask_pos' will
+        serve as image mask, and the 'autoshrink' (bool) field will trigger
+        automated shrinking of the positions due to points outside the outline.
+        Alternatively, a matplotlib patch object can be passed for advanced
+        masking options, either directly or as a function that returns patches
+        (required for multi-axis plots). If None, nothing will be drawn.
+        Defaults to 'head'.
+    head_pos : dict | None
+        If None (default), the sensors are positioned such that they span
+        the head circle. If dict, can have entries 'center' (tuple) and
+        'scale' (tuple) for what the center and scale of the head should be
+        relative to the electrode locations.
+
+    Returns
+    -------
+    fig : matplotlib.figure.Figure
+        The figure containing the topography.
+    """
+    from ..channels import _get_ch_type
+    ch_type = _get_ch_type(tfr, ch_type)
+    import matplotlib.pyplot as plt
+    from mpl_toolkits.axes_grid1 import make_axes_locatable
+
+    picks, pos, merge_grads, names, _ = _prepare_topo_plot(tfr, ch_type,
+                                                           layout)
+    if not show_names:
+        names = None
+
+    data = tfr.data
+
+    if mode is not None and baseline is not None:
+        data = rescale(data, tfr.times, baseline, mode, copy=True)
+
+    # crop time
+    itmin, itmax = None, None
+    idx = np.where(_time_mask(tfr.times, tmin, tmax))[0]
+    if tmin is not None:
+        itmin = idx[0]
+    if tmax is not None:
+        itmax = idx[-1] + 1
+
+    # crop freqs
+    ifmin, ifmax = None, None
+    idx = np.where(_time_mask(tfr.freqs, fmin, fmax))[0]
+    if fmin is not None:
+        ifmin = idx[0]
+    if fmax is not None:
+        ifmax = idx[-1] + 1
+
+    data = data[picks, ifmin:ifmax, itmin:itmax]
+    data = np.mean(np.mean(data, axis=2), axis=1)[:, np.newaxis]
+
+    if merge_grads:
+        from ..channels.layout import _merge_grad_data
+        data = _merge_grad_data(data)
+
+    norm = False if np.min(data) < 0 else True
+    vmin, vmax = _setup_vmin_vmax(data, vmin, vmax, norm)
+    if cmap is None:
+        cmap = 'Reds' if norm else 'RdBu_r'
+
+    if axes is None:
+        fig = plt.figure()
+        ax = fig.gca()
+    else:
+        fig = axes.figure
+        ax = axes
+
+    ax.set_yticks([])
+    ax.set_xticks([])
+    ax.set_frame_on(False)
+
+    if title is not None:
+        ax.set_title(title)
+    fig_wrapper = list()
+    selection_callback = partial(_onselect, tfr=tfr, pos=pos, ch_type=ch_type,
+                                 itmin=itmin, itmax=itmax, ifmin=ifmin,
+                                 ifmax=ifmax, cmap=cmap, fig=fig_wrapper,
+                                 layout=layout)
+
+    im, _ = plot_topomap(data[:, 0], pos, vmin=vmin, vmax=vmax,
+                         axis=ax, cmap=cmap, image_interp='bilinear',
+                         contours=False, names=names, show_names=show_names,
+                         show=False, onselect=selection_callback)
+
+    if colorbar:
+        divider = make_axes_locatable(ax)
+        cax = divider.append_axes("right", size="5%", pad=0.05)
+        cbar = plt.colorbar(im, cax=cax, format=cbar_fmt, cmap=cmap)
+        cbar.set_ticks((vmin, vmax))
+        cbar.ax.tick_params(labelsize=12)
+        cbar.ax.set_title('AU')
+
+    if show:
+        plt.show()
+
+    return fig
+
+
+def plot_evoked_topomap(evoked, times="auto", ch_type=None, layout=None,
+                        vmin=None, vmax=None, cmap='RdBu_r', sensors=True,
+                        colorbar=True, scale=None, scale_time=1e3, unit=None,
+                        res=64, size=1, cbar_fmt='%3.1f',
+                        time_format='%01d ms', proj=False, show=True,
+                        show_names=False, title=None, mask=None,
+                        mask_params=None, outlines='head', contours=6,
+                        image_interp='bilinear', average=None, head_pos=None,
+                        axes=None):
+    """Plot topographic maps of specific time points of evoked data
+
+    Parameters
+    ----------
+    evoked : Evoked
+        The Evoked object.
+    times : float | array of floats | "auto" | "peaks".
+        The time point(s) to plot. If "auto", the number of ``axes`` determines
+        the amount of time point(s). If ``axes`` is also None, 10 topographies
+        will be shown with a regular time spacing between the first and last
+        time instant. If "peaks", finds time points automatically by checking
+        for local maxima in global field power.
+    ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
+        The channel type to plot. For 'grad', the gradiometers are collected in
+        pairs and the RMS for each pair is plotted.
+        If None, then channels are chosen in the order given above.
+    layout : None | Layout
+        Layout instance specifying sensor positions (does not need to
+        be specified for Neuromag data). If possible, the correct layout file
+        is inferred from the data; if no appropriate layout file was found, the
+        layout is automatically generated from the sensor locations.
+    vmin : float | callable | None
+        The value specifying the lower bound of the color range.
+        If None, and vmax is None, -vmax is used. Else np.min(data).
+        If callable, the output equals vmin(data). Defaults to None.
+    vmax : float | callable | None
+        The value specifying the upper bound of the color range.
+        If None, the maximum absolute value is used. If callable, the output
+        equals vmax(data). Defaults to None.
+    cmap : matplotlib colormap
+        Colormap. For magnetometers and eeg defaults to 'RdBu_r', else
+        'Reds'.
+    sensors : bool | str
+        Add markers for sensor locations to the plot. Accepts matplotlib plot
+        format string (e.g., 'r+' for red plusses). If True, a circle will be
+        used (via .add_artist). Defaults to True.
+    colorbar : bool
+        Plot a colorbar.
+    scale : dict | float | None
+        Scale the data for plotting. If None, defaults to 1e6 for eeg, 1e13
+        for grad and 1e15 for mag.
+    scale_time : float | None
+        Scale the time labels. Defaults to 1e3 (ms).
+    unit : dict | str | None
+        The unit of the channel type used for colorbar label. If
+        scale is None the unit is automatically determined.
+    res : int
+        The resolution of the topomap image (n pixels along each side).
+    size : float
+        Side length per topomap in inches.
+    cbar_fmt : str
+        String format for colorbar values.
+    time_format : str
+        String format for topomap values. Defaults to "%01d ms"
+    proj : bool | 'interactive'
+        If true SSP projections are applied before display. If 'interactive',
+        a check box for reversible selection of SSP projection vectors will
+        be show.
+    show : bool
+        Show figure if True.
+    show_names : bool | callable
+        If True, show channel names on top of the map. If a callable is
+        passed, channel names will be formatted using the callable; e.g., to
+        delete the prefix 'MEG ' from all channel names, pass the function
+        lambda x: x.replace('MEG ', ''). If `mask` is not None, only
+        significant sensors will be shown.
+    title : str | None
+        Title. If None (default), no title is displayed.
+    mask : ndarray of bool, shape (n_channels, n_times) | None
+        The channels to be marked as significant at a given time point.
+        Indicies set to `True` will be considered. Defaults to None.
+    mask_params : dict | None
+        Additional plotting parameters for plotting significant sensors.
+        Default (None) equals::
+
+            dict(marker='o', markerfacecolor='w', markeredgecolor='k',
+                 linewidth=0, markersize=4)
+
+    outlines : 'head' | 'skirt' | dict | None
+        The outlines to be drawn. If 'head', the default head scheme will be
+        drawn. If 'skirt' the head scheme will be drawn, but sensors are
+        allowed to be plotted outside of the head circle. If dict, each key
+        refers to a tuple of x and y positions, the values in 'mask_pos' will
+        serve as image mask, and the 'autoshrink' (bool) field will trigger
+        automated shrinking of the positions due to points outside the outline.
+        Alternatively, a matplotlib patch object can be passed for advanced
+        masking options, either directly or as a function that returns patches
+        (required for multi-axis plots). If None, nothing will be drawn.
+        Defaults to 'head'.
+    contours : int | False | None
+        The number of contour lines to draw. If 0, no contours will be drawn.
+    image_interp : str
+        The image interpolation to be used. All matplotlib options are
+        accepted.
+    average : float | None
+        The time window around a given time to be used for averaging (seconds).
+        For example, 0.01 would translate into window that starts 5 ms before
+        and ends 5 ms after a given time point. Defaults to None, which means
+        no averaging.
+    head_pos : dict | None
+        If None (default), the sensors are positioned such that they span
+        the head circle. If dict, can have entries 'center' (tuple) and
+        'scale' (tuple) for what the center and scale of the head should be
+        relative to the electrode locations.
+    axes : instance of Axes | list | None
+        The axes to plot to. If list, the list must be a list of Axes of the
+        same length as ``times`` (unless ``times`` is None). If instance of
+        Axes, ``times`` must be a float or a list of one float.
+        Defaults to None.
+
+    Returns
+    -------
+    fig : instance of matplotlib.figure.Figure
+       The figure.
+    """
+    from ..channels import _get_ch_type
+    ch_type = _get_ch_type(evoked, ch_type)
+    import matplotlib.pyplot as plt
+    from mpl_toolkits.axes_grid1 import make_axes_locatable  # noqa
+
+    mask_params = _handle_default('mask_params', mask_params)
+    mask_params['markersize'] *= size / 2.
+    mask_params['markeredgewidth'] *= size / 2.
+
+    if isinstance(axes, plt.Axes):
+        axes = [axes]
+
+    if times == "peaks":
+        npeaks = 10 if axes is None else len(axes)
+        times = _find_peaks(evoked, npeaks)
+    elif times == "auto":
+        if axes is None:
+            times = np.linspace(evoked.times[0], evoked.times[-1], 10)
+        else:
+            times = np.linspace(evoked.times[0], evoked.times[-1], len(axes))
+    elif np.isscalar(times):
+        times = [times]
+
+    times = np.array(times)
+
+    if times.ndim != 1:
+        raise ValueError('times must be 1D, got %d dimensions' % times.ndim)
+    if len(times) > 20:
+        raise RuntimeError('Too many plots requested. Please pass fewer '
+                           'than 20 time instants.')
+
+    n_times = len(times)
+    nax = n_times + bool(colorbar)
+    width = size * nax
+    height = size + max(0, 0.1 * (4 - size)) + bool(title) * 0.5
+    if axes is None:
+        plt.figure(figsize=(width, height))
+        axes = list()
+        for ax_idx in range(len(times)):
+            if colorbar:  # Make room for the colorbar
+                axes.append(plt.subplot(1, n_times + 1, ax_idx + 1))
+            else:
+                axes.append(plt.subplot(1, n_times, ax_idx + 1))
+    elif colorbar:
+        logger.warning('Colorbar is drawn to the rightmost column of the '
+                       'figure.\nBe sure to provide enough space for it '
+                       'or turn it off with colorbar=False.')
+    if len(axes) != n_times:
+        raise RuntimeError('Axes and times must be equal in sizes.')
+    tmin, tmax = evoked.times[[0, -1]]
+    _time_comp = _time_mask(times=times, tmin=tmin,  tmax=tmax)
+    if not np.all(_time_comp):
+        raise ValueError('Times should be between {0:0.3f} and {1:0.3f}. (Got '
+                         '{2}).'.format(tmin, tmax,
+                                        ['%03.f' % t
+                                         for t in times[_time_comp]]))
+
+    picks, pos, merge_grads, names, ch_type = _prepare_topo_plot(
+        evoked, ch_type, layout)
+
+    if ch_type.startswith('planar'):
+        key = 'grad'
+    else:
+        key = ch_type
+
+    scale = _handle_default('scalings', scale)[key]
+    unit = _handle_default('units', unit)[key]
+
+    if not show_names:
+        names = None
+
+    w_frame = plt.rcParams['figure.subplot.wspace'] / (2 * nax)
+    top_frame = max((0.05 if title is None else 0.25), .2 / size)
+    fig = axes[0].get_figure()
+    fig.subplots_adjust(left=w_frame, right=1 - w_frame, bottom=0,
+                        top=1 - top_frame)
+    time_idx = [np.where(evoked.times >= t)[0][0] for t in times]
+
+    if proj is True and evoked.proj is not True:
+        data = evoked.copy().apply_proj().data
+    else:
+        data = evoked.data
+    if average is None:
+        data = data[np.ix_(picks, time_idx)]
+    elif isinstance(average, float):
+        if not average > 0:
+            raise ValueError('The average parameter must be positive. You '
+                             'passed a negative value')
+        data_ = np.zeros((len(picks), len(time_idx)))
+        ave_time = float(average) / 2.
+        iter_times = evoked.times[time_idx]
+        for ii, (idx, tmin_, tmax_) in enumerate(zip(time_idx,
+                                                     iter_times - ave_time,
+                                                     iter_times + ave_time)):
+            my_range = (tmin_ < evoked.times) & (evoked.times < tmax_)
+            data_[:, ii] = data[picks][:, my_range].mean(-1)
+        data = data_
+    else:
+        raise ValueError('The average parameter must be None or a float.'
+                         'Check your input.')
+
+    data *= scale
+    if merge_grads:
+        from ..channels.layout import _merge_grad_data
+        data = _merge_grad_data(data)
+
+    vmin, vmax = _setup_vmin_vmax(data, vmin, vmax)
+
+    images, contours_ = [], []
+
+    if mask is not None:
+        _picks = picks[::2 if ch_type not in ['mag', 'eeg'] else 1]
+        mask_ = mask[np.ix_(_picks, time_idx)]
+
+    pos, outlines = _check_outlines(pos, outlines, head_pos)
+    if outlines is not None:
+        image_mask, pos = _make_image_mask(outlines, pos, res)
+    else:
+        image_mask = None
+
+    for idx, time in enumerate(times):
+        tp, cn = plot_topomap(data[:, idx], pos, vmin=vmin, vmax=vmax,
+                              sensors=sensors, res=res, names=names,
+                              show_names=show_names, cmap=cmap,
+                              mask=mask_[:, idx] if mask is not None else None,
+                              mask_params=mask_params, axis=axes[idx],
+                              outlines=outlines, image_mask=image_mask,
+                              contours=contours, image_interp=image_interp,
+                              show=False)
+
+        images.append(tp)
+        if cn is not None:
+            contours_.append(cn)
+        if time_format is not None:
+            axes[idx].set_title(time_format % (time * scale_time))
+
+    if title is not None:
+        plt.suptitle(title, verticalalignment='top', size='x-large')
+        tight_layout(pad=size, fig=fig)
+
+    if colorbar:
+        cax = plt.subplot(1, n_times + 1, n_times + 1)
+        # resize the colorbar (by default the color fills the whole axes)
+        cpos = cax.get_position()
+        if size <= 1:
+            cpos.x0 = 1 - (.7 + .1 / size) / nax
+        cpos.x1 = cpos.x0 + .1 / nax
+        cpos.y0 = .2
+        cpos.y1 = .7
+        cax.set_position(cpos)
+        if unit is not None:
+            cax.set_title(unit)
+        cbar = fig.colorbar(images[-1], ax=cax, cax=cax, format=cbar_fmt)
+        cbar.set_ticks([vmin, 0, vmax])
+
+    if proj == 'interactive':
+        _check_delayed_ssp(evoked)
+        params = dict(evoked=evoked, fig=fig, projs=evoked.info['projs'],
+                      picks=picks, images=images, contours=contours_,
+                      time_idx=time_idx, scale=scale, merge_grads=merge_grads,
+                      res=res, pos=pos, image_mask=image_mask,
+                      plot_update_proj_callback=_plot_update_evoked_topomap)
+        _draw_proj_checkbox(None, params)
+
+    if show:
+        plt.show()
+
+    return fig
+
+
+def _plot_topomap_multi_cbar(data, pos, ax, title=None, unit=None,
+                             vmin=None, vmax=None, cmap='RdBu_r',
+                             colorbar=False, cbar_fmt='%3.3f'):
+    """Aux Function"""
+    import matplotlib.pyplot as plt
+    from mpl_toolkits.axes_grid1 import make_axes_locatable
+
+    ax.set_yticks([])
+    ax.set_xticks([])
+    ax.set_frame_on(False)
+    vmin = np.min(data) if vmin is None else vmin
+    vmax = np.max(data) if vmax is None else vmax
+
+    if title is not None:
+        ax.set_title(title, fontsize=10)
+    im, _ = plot_topomap(data, pos, vmin=vmin, vmax=vmax, axis=ax,
+                         cmap=cmap, image_interp='bilinear', contours=False,
+                         show=False)
+
+    if colorbar is True:
+        divider = make_axes_locatable(ax)
+        cax = divider.append_axes("right", size="10%", pad=0.25)
+        cbar = plt.colorbar(im, cax=cax, format=cbar_fmt)
+        cbar.set_ticks((vmin, vmax))
+        if unit is not None:
+            cbar.ax.set_title(unit, fontsize=8)
+        cbar.ax.tick_params(labelsize=8)
+
+
+ at verbose
+def plot_epochs_psd_topomap(epochs, bands=None, vmin=None, vmax=None,
+                            tmin=None, tmax=None,
+                            proj=False, n_fft=256, ch_type=None,
+                            n_overlap=0, layout=None,
+                            cmap='RdBu_r', agg_fun=None, dB=False, n_jobs=1,
+                            normalize=False, cbar_fmt='%0.3f',
+                            outlines='head', show=True, verbose=None):
+    """Plot the topomap of the power spectral density across epochs
+
+    Parameters
+    ----------
+    epochs : instance of Epochs
+        The epochs object
+    bands : list of tuple | None
+        The lower and upper frequency and the name for that band. If None,
+        (default) expands to:
+
+        bands = [(0, 4, 'Delta'), (4, 8, 'Theta'), (8, 12, 'Alpha'),
+                 (12, 30, 'Beta'), (30, 45, 'Gamma')]
+
+    vmin : float | callable | None
+        The value specifying the lower bound of the color range.
+        If None np.min(data) is used. If callable, the output equals
+        vmin(data).
+    vmax : float | callable | None
+        The value specifying the upper bound of the color range.
+        If None, the maximum absolute value is used. If callable, the output
+        equals vmax(data). Defaults to None.
+    tmin : float | None
+        Start time to consider.
+    tmax : float | None
+        End time to consider.
+    proj : bool
+        Apply projection.
+    n_fft : int
+        Number of points to use in Welch FFT calculations.
+    ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
+        The channel type to plot. For 'grad', the gradiometers are collected in
+        pairs and the RMS for each pair is plotted.
+        If None, then channels are chosen in the order given above.
+    n_overlap : int
+        The number of points of overlap between blocks.
+    layout : None | Layout
+        Layout instance specifying sensor positions (does not need to
+        be specified for Neuromag data). If possible, the correct layout
+        file is inferred from the data; if no appropriate layout file was
+        found, the layout is automatically generated from the sensor
+        locations.
+    cmap : matplotlib colormap
+        Colormap. For magnetometers and eeg defaults to 'RdBu_r', else
+        'Reds'.
+    agg_fun : callable
+        The function used to aggregate over frequencies.
+        Defaults to np.sum. if normalize is True, else np.mean.
+    dB : bool
+        If True, transform data to decibels (with ``10 * np.log10(data)``)
+        following the application of `agg_fun`. Only valid if normalize is
+        False.
+    n_jobs : int
+        Number of jobs to run in parallel.
+    normalize : bool
+        If True, each band will be devided by the total power. Defaults to
+        False.
+    cbar_fmt : str
+        The colorbar format. Defaults to '%0.3f'.
+    outlines : 'head' | 'skirt' | dict | None
+        The outlines to be drawn. If 'head', the default head scheme will be
+        drawn. If 'skirt' the head scheme will be drawn, but sensors are
+        allowed to be plotted outside of the head circle. If dict, each key
+        refers to a tuple of x and y positions, the values in 'mask_pos' will
+        serve as image mask, and the 'autoshrink' (bool) field will trigger
+        automated shrinking of the positions due to points outside the outline.
+        Alternatively, a matplotlib patch object can be passed for advanced
+        masking options, either directly or as a function that returns patches
+        (required for multi-axis plots). If None, nothing will be drawn.
+        Defaults to 'head'.
+    show : bool
+        Show figure if True.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    fig : instance of matplotlib figure
+        Figure distributing one image per channel across sensor topography.
+    """
+    from ..channels import _get_ch_type
+    ch_type = _get_ch_type(epochs, ch_type)
+
+    picks, pos, merge_grads, names, ch_type = _prepare_topo_plot(
+        epochs, ch_type, layout)
+
+    psds, freqs = compute_epochs_psd(epochs, picks=picks, n_fft=n_fft,
+                                     tmin=tmin, tmax=tmax,
+                                     n_overlap=n_overlap, proj=proj,
+                                     n_jobs=n_jobs)
+    psds = np.mean(psds, axis=0)
+
+    if merge_grads:
+        from ..channels.layout import _merge_grad_data
+        psds = _merge_grad_data(psds)
+
+    return plot_psds_topomap(
+        psds=psds, freqs=freqs, pos=pos, agg_fun=agg_fun, vmin=vmin,
+        vmax=vmax, bands=bands, cmap=cmap, dB=dB, normalize=normalize,
+        cbar_fmt=cbar_fmt, outlines=outlines, show=show)
+
+
+def plot_psds_topomap(
+        psds, freqs, pos, agg_fun=None, vmin=None, vmax=None, bands=None,
+        cmap='RdBu_r', dB=True, normalize=False, cbar_fmt='%0.3f',
+        outlines='head', show=True):
+    """Plot spatial maps of PSDs
+
+    Parameters
+    ----------
+    psds : np.ndarray of float, shape (n_channels, n_freqs)
+        Power spectral densities
+    freqs : np.ndarray of float, shape (n_freqs)
+        Frequencies used to compute psds.
+    pos : numpy.ndarray of float, shape (n_sensors, 2)
+        The positions of the sensors.
+    agg_fun : callable
+        The function used to aggregate over frequencies.
+        Defaults to np.sum. if normalize is True, else np.mean.
+    vmin : float | callable | None
+        The value specifying the lower bound of the color range.
+        If None np.min(data) is used. If callable, the output equals
+        vmin(data).
+    vmax : float | callable | None
+        The value specifying the upper bound of the color range.
+        If None, the maximum absolute value is used. If callable, the output
+        equals vmax(data). Defaults to None.
+    bands : list of tuple | None
+        The lower and upper frequency and the name for that band. If None,
+        (default) expands to:
+
+            bands = [(0, 4, 'Delta'), (4, 8, 'Theta'), (8, 12, 'Alpha'),
+                     (12, 30, 'Beta'), (30, 45, 'Gamma')]
+
+    cmap : matplotlib colormap
+        Colormap. For magnetometers and eeg defaults to 'RdBu_r', else
+        'Reds'.
+    dB : bool
+        If True, transform data to decibels (with ``10 * np.log10(data)``)
+        following the application of `agg_fun`. Only valid if normalize is
+        False.
+    normalize : bool
+        If True, each band will be devided by the total power. Defaults to
+        False.
+    cbar_fmt : str
+        The colorbar format. Defaults to '%0.3f'.
+    outlines : 'head' | 'skirt' | dict | None
+        The outlines to be drawn. If 'head', the default head scheme will be
+        drawn. If 'skirt' the head scheme will be drawn, but sensors are
+        allowed to be plotted outside of the head circle. If dict, each key
+        refers to a tuple of x and y positions, the values in 'mask_pos' will
+        serve as image mask, and the 'autoshrink' (bool) field will trigger
+        automated shrinking of the positions due to points outside the outline.
+        Alternatively, a matplotlib patch object can be passed for advanced
+        masking options, either directly or as a function that returns patches
+        (required for multi-axis plots). If None, nothing will be drawn.
+        Defaults to 'head'.
+    show : bool
+        Show figure if True.
+
+    Returns
+    -------
+    fig : instance of matplotlib figure
+        Figure distributing one image per channel across sensor topography.
+    """
+
+    import matplotlib.pyplot as plt
+
+    if bands is None:
+        bands = [(0, 4, 'Delta'), (4, 8, 'Theta'), (8, 12, 'Alpha'),
+                 (12, 30, 'Beta'), (30, 45, 'Gamma')]
+
+    if agg_fun is None:
+        agg_fun = np.sum if normalize is True else np.mean
+
+    if normalize is True:
+        psds /= psds.sum(axis=-1)[..., None]
+        assert np.allclose(psds.sum(axis=-1), 1.)
+
+    n_axes = len(bands)
+    fig, axes = plt.subplots(1, n_axes, figsize=(2 * n_axes, 1.5))
+    if n_axes == 1:
+        axes = [axes]
+
+    for ax, (fmin, fmax, title) in zip(axes, bands):
+        freq_mask = (fmin < freqs) & (freqs < fmax)
+        if freq_mask.sum() == 0:
+            raise RuntimeError('No frequencies in band "%s" (%s, %s)'
+                               % (title, fmin, fmax))
+        data = agg_fun(psds[:, freq_mask], axis=1)
+        if dB is True and normalize is False:
+            data = 10 * np.log10(data)
+            unit = 'dB'
+        else:
+            unit = 'power'
+
+        _plot_topomap_multi_cbar(data, pos, ax, title=title,
+                                 vmin=vmin, vmax=vmax, cmap=cmap,
+                                 colorbar=True, unit=unit, cbar_fmt=cbar_fmt)
+    tight_layout(fig=fig)
+    fig.canvas.draw()
+    if show:
+        plt.show()
+    return fig
+
+
+def _onselect(eclick, erelease, tfr, pos, ch_type, itmin, itmax, ifmin, ifmax,
+              cmap, fig, layout=None):
+    """Callback called from topomap for drawing average tfr over channels."""
+    import matplotlib.pyplot as plt
+    pos, _ = _check_outlines(pos, outlines='head', head_pos=None)
+    ax = eclick.inaxes
+    xmin = min(eclick.xdata, erelease.xdata)
+    xmax = max(eclick.xdata, erelease.xdata)
+    ymin = min(eclick.ydata, erelease.ydata)
+    ymax = max(eclick.ydata, erelease.ydata)
+    indices = [i for i in range(len(pos)) if pos[i][0] < xmax and
+               pos[i][0] > xmin and pos[i][1] < ymax and pos[i][1] > ymin]
+    for idx, circle in enumerate(ax.artists):
+        if idx in indices:
+            circle.set_color('r')
+        else:
+            circle.set_color('black')
+    plt.gcf().canvas.draw()
+    if not indices:
+        return
+    data = tfr.data
+    if ch_type == 'mag':
+        picks = pick_types(tfr.info, meg=ch_type, ref_meg=False)
+        data = np.mean(data[indices, ifmin:ifmax, itmin:itmax], axis=0)
+        chs = [tfr.ch_names[picks[x]] for x in indices]
+    elif ch_type == 'grad':
+        picks = pick_types(tfr.info, meg=ch_type, ref_meg=False)
+        from ..channels.layout import _pair_grad_sensors
+        grads = _pair_grad_sensors(tfr.info, layout=layout,
+                                   topomap_coords=False)
+        idxs = list()
+        for idx in indices:
+            idxs.append(grads[idx * 2])
+            idxs.append(grads[idx * 2 + 1])  # pair of grads
+        data = np.mean(data[idxs, ifmin:ifmax, itmin:itmax], axis=0)
+        chs = [tfr.ch_names[x] for x in idxs]
+    elif ch_type == 'eeg':
+        picks = pick_types(tfr.info, meg=False, eeg=True, ref_meg=False)
+        data = np.mean(data[indices, ifmin:ifmax, itmin:itmax], axis=0)
+        chs = [tfr.ch_names[picks[x]] for x in indices]
+    logger.info('Averaging TFR over channels ' + str(chs))
+    if len(fig) == 0:
+        fig.append(figure_nobar())
+    if not plt.fignum_exists(fig[0].number):
+        fig[0] = figure_nobar()
+    ax = fig[0].add_subplot(111)
+    itmax = min(itmax, len(tfr.times) - 1)
+    ifmax = min(ifmax, len(tfr.freqs) - 1)
+    extent = (tfr.times[itmin] * 1e3, tfr.times[itmax] * 1e3, tfr.freqs[ifmin],
+              tfr.freqs[ifmax])
+
+    title = 'Average over %d %s channels.' % (len(chs), ch_type)
+    ax.set_title(title)
+    ax.set_xlabel('Time (ms)')
+    ax.set_ylabel('Frequency (Hz)')
+    img = ax.imshow(data, extent=extent, aspect="auto", origin="lower",
+                    cmap=cmap)
+    if len(fig[0].get_axes()) < 2:
+        fig[0].get_axes()[1].cbar = fig[0].colorbar(mappable=img)
+    else:
+        fig[0].get_axes()[1].cbar.on_mappable_changed(mappable=img)
+    fig[0].canvas.draw()
+    plt.figure(fig[0].number)
+    plt.show()
+
+
+def _find_peaks(evoked, npeaks):
+    """Helper function for finding peaks from evoked data
+    Returns ``npeaks`` biggest peaks as a list of time points.
+    """
+    argrelmax = _get_argrelmax()
+    gfp = evoked.data.std(axis=0)
+    order = len(evoked.times) // 30
+    if order < 1:
+        order = 1
+    peaks = argrelmax(gfp, order=order, axis=0)[0]
+    if len(peaks) > npeaks:
+        max_indices = np.argsort(gfp[peaks])[-npeaks:]
+        peaks = np.sort(peaks[max_indices])
+    times = evoked.times[peaks]
+    if len(times) == 0:
+        times = [evoked.times[gfp.argmax()]]
+    return times
diff --git a/debian/python-mne/usr/lib/python2.7/dist-packages/mne/viz/utils.py b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/viz/utils.py
new file mode 100644
index 0000000..89796a3
--- /dev/null
+++ b/debian/python-mne/usr/lib/python2.7/dist-packages/mne/viz/utils.py
@@ -0,0 +1,844 @@
+"""Utility functions for plotting M/EEG data
+"""
+from __future__ import print_function
+
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Denis Engemann <denis.engemann at gmail.com>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#          Eric Larson <larson.eric.d at gmail.com>
+#          Mainak Jas <mainak at neuro.hut.fi>
+#
+# License: Simplified BSD
+
+import math
+from functools import partial
+import difflib
+import webbrowser
+from warnings import warn
+import tempfile
+import numpy as np
+
+from ..io import show_fiff
+from ..utils import verbose, set_config
+
+
+COLORS = ['b', 'g', 'r', 'c', 'm', 'y', 'k', '#473C8B', '#458B74',
+          '#CD7F32', '#FF4040', '#ADFF2F', '#8E2323', '#FF1493']
+
+
+def _setup_vmin_vmax(data, vmin, vmax, norm=False):
+    """Aux function to handle vmin and vmax parameters"""
+    if vmax is None and vmin is None:
+        vmax = np.abs(data).max()
+        if norm:
+            vmin = 0.
+        else:
+            vmin = -vmax
+    else:
+        if callable(vmin):
+            vmin = vmin(data)
+        elif vmin is None:
+            if norm:
+                vmin = 0.
+            else:
+                vmin = np.min(data)
+        if callable(vmax):
+            vmax = vmax(data)
+        elif vmax is None:
+            vmax = np.max(data)
+    return vmin, vmax
+
+
+def tight_layout(pad=1.2, h_pad=None, w_pad=None, fig=None):
+    """ Adjust subplot parameters to give specified padding.
+
+    Note. For plotting please use this function instead of plt.tight_layout
+
+    Parameters
+    ----------
+    pad : float
+        padding between the figure edge and the edges of subplots, as a
+        fraction of the font-size.
+    h_pad : float
+        Padding height between edges of adjacent subplots.
+        Defaults to `pad_inches`.
+    w_pad : float
+        Padding width between edges of adjacent subplots.
+        Defaults to `pad_inches`.
+    fig : instance of Figure
+        Figure to apply changes to.
+    """
+    import matplotlib.pyplot as plt
+    fig = plt.gcf() if fig is None else fig
+
+    fig.canvas.draw()
+    try:  # see https://github.com/matplotlib/matplotlib/issues/2654
+        fig.tight_layout(pad=pad, h_pad=h_pad, w_pad=w_pad)
+    except Exception:
+        warn('Matplotlib function \'tight_layout\' is not supported.'
+             ' Skipping subplot adjusment.')
+    else:
+        try:
+            fig.set_tight_layout(dict(pad=pad, h_pad=h_pad, w_pad=w_pad))
+        except Exception:
+            pass
+
+
+def _check_delayed_ssp(container):
+    """ Aux function to be used for interactive SSP selection
+    """
+    if container.proj is True or\
+       all(p['active'] for p in container.info['projs']):
+        raise RuntimeError('Projs are already applied. Please initialize'
+                           ' the data with proj set to False.')
+    elif len(container.info['projs']) < 1:
+        raise RuntimeError('No projs found in evoked.')
+
+
+def mne_analyze_colormap(limits=[5, 10, 15], format='mayavi'):
+    """Return a colormap similar to that used by mne_analyze
+
+    Parameters
+    ----------
+    limits : list (or array) of length 3 or 6
+        Bounds for the colormap, which will be mirrored across zero if length
+        3, or completely specified (and potentially asymmetric) if length 6.
+    format : str
+        Type of colormap to return. If 'matplotlib', will return a
+        matplotlib.colors.LinearSegmentedColormap. If 'mayavi', will
+        return an RGBA array of shape (256, 4).
+
+    Returns
+    -------
+    cmap : instance of matplotlib.pyplot.colormap | array
+        A teal->blue->gray->red->yellow colormap.
+
+    Notes
+    -----
+    For this will return a colormap that will display correctly for data
+    that are scaled by the plotting function to span [-fmax, fmax].
+
+    Examples
+    --------
+    The following code will plot a STC using standard MNE limits:
+
+        colormap = mne.viz.mne_analyze_colormap(limits=[5, 10, 15])
+        brain = stc.plot('fsaverage', 'inflated', 'rh', colormap)
+        brain.scale_data_colormap(fmin=-15, fmid=0, fmax=15, transparent=False)
+
+    """
+    # Ensure limits is an array
+    limits = np.asarray(limits, dtype='float')
+
+    if len(limits) != 3 and len(limits) != 6:
+        raise ValueError('limits must have 3 or 6 elements')
+    if len(limits) == 3 and any(limits < 0.):
+        raise ValueError('if 3 elements, limits must all be non-negative')
+    if any(np.diff(limits) <= 0):
+        raise ValueError('limits must be monotonically increasing')
+    if format == 'matplotlib':
+        from matplotlib import colors
+        if len(limits) == 3:
+            limits = (np.concatenate((-np.flipud(limits), limits)) +
+                      limits[-1]) / (2 * limits[-1])
+        else:
+            limits = (limits - np.min(limits)) / np.max(limits -
+                                                        np.min(limits))
+
+        cdict = {'red': ((limits[0], 0.0, 0.0),
+                         (limits[1], 0.0, 0.0),
+                         (limits[2], 0.5, 0.5),
+                         (limits[3], 0.5, 0.5),
+                         (limits[4], 1.0, 1.0),
+                         (limits[5], 1.0, 1.0)),
+                 'green': ((limits[0], 1.0, 1.0),
+                           (limits[1], 0.0, 0.0),
+                           (limits[2], 0.5, 0.5),
+                           (limits[3], 0.5, 0.5),
+                           (limits[4], 0.0, 0.0),
+                           (limits[5], 1.0, 1.0)),
+                 'blue': ((limits[0], 1.0, 1.0),
+                          (limits[1], 1.0, 1.0),
+                          (limits[2], 0.5, 0.5),
+                          (limits[3], 0.5, 0.5),
+                          (limits[4], 0.0, 0.0),
+                          (limits[5], 0.0, 0.0))}
+        return colors.LinearSegmentedColormap('mne_analyze', cdict)
+    elif format == 'mayavi':
+        if len(limits) == 3:
+            limits = np.concatenate((-np.flipud(limits), [0], limits)) /\
+                limits[-1]
+        else:
+            limits = np.concatenate((limits[:3], [0], limits[3:]))
+            limits /= np.max(np.abs(limits))
+        r = np.array([0, 0, 0, 0, 1, 1, 1])
+        g = np.array([1, 0, 0, 0, 0, 0, 1])
+        b = np.array([1, 1, 1, 0, 0, 0, 0])
+        a = np.array([1, 1, 0, 0, 0, 1, 1])
+        xp = (np.arange(256) - 128) / 128.0
+        colormap = np.r_[[np.interp(xp, limits, 255 * c)
+                          for c in [r, g, b, a]]].T
+        return colormap
+    else:
+        raise ValueError('format must be either matplotlib or mayavi')
+
+
+def _toggle_options(event, params):
+    """Toggle options (projectors) dialog"""
+    import matplotlib.pyplot as plt
+    if len(params['projs']) > 0:
+        if params['fig_proj'] is None:
+            _draw_proj_checkbox(event, params, draw_current_state=False)
+        else:
+            # turn off options dialog
+            plt.close(params['fig_proj'])
+            del params['proj_checks']
+            params['fig_proj'] = None
+
+
+def _toggle_proj(event, params):
+    """Operation to perform when proj boxes clicked"""
+    # read options if possible
+    if 'proj_checks' in params:
+        bools = [x[0].get_visible() for x in params['proj_checks'].lines]
+        for bi, (b, p) in enumerate(zip(bools, params['projs'])):
+            # see if they tried to deactivate an active one
+            if not b and p['active']:
+                bools[bi] = True
+    else:
+        bools = [True] * len(params['projs'])
+
+    compute_proj = False
+    if 'proj_bools' not in params:
+        compute_proj = True
+    elif not np.array_equal(bools, params['proj_bools']):
+        compute_proj = True
+
+    # if projectors changed, update plots
+    if compute_proj is True:
+        params['plot_update_proj_callback'](params, bools)
+
+
+def _get_help_text(params):
+    """Aux function for customizing help dialogs text."""
+    text, text2 = list(), list()
+
+    text.append(u'\u2190 : \n')
+    text.append(u'\u2192 : \n')
+    text.append(u'\u2193 : \n')
+    text.append(u'\u2191 : \n')
+    text.append(u'- : \n')
+    text.append(u'+ or = : \n')
+    text.append(u'Home : \n')
+    text.append(u'End : \n')
+    text.append(u'Page down : \n')
+    text.append(u'Page up : \n')
+
+    text.append(u'F11 : \n')
+    text.append(u'? : \n')
+    text.append(u'Esc : \n\n')
+    text.append(u'Mouse controls\n')
+    text.append(u'click on data :\n')
+
+    text2.append('Navigate left\n')
+    text2.append('Navigate right\n')
+
+    text2.append('Scale down\n')
+    text2.append('Scale up\n')
+
+    text2.append('Toggle full screen mode\n')
+    text2.append('Open help box\n')
+    text2.append('Quit\n\n\n')
+    if 'raw' in params:
+        text2.insert(4, 'Reduce the time shown per view\n')
+        text2.insert(5, 'Increase the time shown per view\n')
+        text.append(u'click elsewhere in the plot :\n')
+        if 'ica' in params:
+            text.append(u'click component name :\n')
+            text2.insert(2, 'Navigate components down\n')
+            text2.insert(3, 'Navigate components up\n')
+            text2.insert(8, 'Reduce the number of components per view\n')
+            text2.insert(9, 'Increase the number of components per view\n')
+            text2.append('Mark bad channel\n')
+            text2.append('Vertical line at a time instant\n')
+            text2.append('Show topography for the component\n')
+        else:
+            text.append(u'click channel name :\n')
+            text2.insert(2, 'Navigate channels down\n')
+            text2.insert(3, 'Navigate channels up\n')
+            text2.insert(8, 'Reduce the number of channels per view\n')
+            text2.insert(9, 'Increase the number of channels per view\n')
+            text2.append('Mark bad channel\n')
+            text2.append('Vertical line at a time instant\n')
+            text2.append('Mark bad channel\n')
+
+    elif 'epochs' in params:
+        text.append(u'right click :\n')
+        text2.insert(4, 'Reduce the number of epochs per view\n')
+        text2.insert(5, 'Increase the number of epochs per view\n')
+        if 'ica' in params:
+            text.append(u'click component name :\n')
+            text2.insert(2, 'Navigate components down\n')
+            text2.insert(3, 'Navigate components up\n')
+            text2.insert(8, 'Reduce the number of components per view\n')
+            text2.insert(9, 'Increase the number of components per view\n')
+            text2.append('Mark component for exclusion\n')
+            text2.append('Vertical line at a time instant\n')
+            text2.append('Show topography for the component\n')
+        else:
+            text.append(u'click channel name :\n')
+            text.append(u'right click channel name :\n')
+            text2.insert(2, 'Navigate channels down\n')
+            text2.insert(3, 'Navigate channels up\n')
+            text2.insert(8, 'Reduce the number of channels per view\n')
+            text2.insert(9, 'Increase the number of channels per view\n')
+            text.insert(10, u'b : \n')
+            text2.insert(10, 'Toggle butterfly plot on/off\n')
+            text.insert(11, u'h : \n')
+            text2.insert(11, 'Show histogram of peak-to-peak values\n')
+            text2.append('Mark bad epoch\n')
+            text2.append('Vertical line at a time instant\n')
+            text2.append('Mark bad channel\n')
+            text2.append('Plot ERP/ERF image\n')
+            text.append(u'middle click :\n')
+            text2.append('Show channel name (butterfly plot)\n')
+        text.insert(11, u'o : \n')
+        text2.insert(11, 'View settings (orig. view only)\n')
+
+    return ''.join(text), ''.join(text2)
+
+
+def _prepare_trellis(n_cells, max_col):
+    """Aux function
+    """
+    import matplotlib.pyplot as plt
+    if n_cells == 1:
+        nrow = ncol = 1
+    elif n_cells <= max_col:
+        nrow, ncol = 1, n_cells
+    else:
+        nrow, ncol = int(math.ceil(n_cells / float(max_col))), max_col
+
+    fig, axes = plt.subplots(nrow, ncol, figsize=(7.4, 1.5 * nrow + 1))
+    axes = [axes] if ncol == nrow == 1 else axes.flatten()
+    for ax in axes[n_cells:]:  # hide unused axes
+        ax.set_visible(False)
+    return fig, axes
+
+
+def _draw_proj_checkbox(event, params, draw_current_state=True):
+    """Toggle options (projectors) dialog"""
+    from matplotlib import widgets
+    projs = params['projs']
+    # turn on options dialog
+
+    labels = [p['desc'] for p in projs]
+    actives = ([p['active'] for p in projs] if draw_current_state else
+               [True] * len(params['projs']))
+
+    width = max([len(p['desc']) for p in projs]) / 6.0 + 0.5
+    height = len(projs) / 6.0 + 0.5
+    fig_proj = figure_nobar(figsize=(width, height))
+    fig_proj.canvas.set_window_title('SSP projection vectors')
+    params['fig_proj'] = fig_proj  # necessary for proper toggling
+    ax_temp = fig_proj.add_axes((0, 0, 1, 1), frameon=False)
+
+    proj_checks = widgets.CheckButtons(ax_temp, labels=labels, actives=actives)
+    # change already-applied projectors to red
+    for ii, p in enumerate(projs):
+        if p['active'] is True:
+            for x in proj_checks.lines[ii]:
+                x.set_color('r')
+    # make minimal size
+    # pass key presses from option dialog over
+
+    proj_checks.on_clicked(partial(_toggle_proj, params=params))
+    params['proj_checks'] = proj_checks
+
+    # this should work for non-test cases
+    try:
+        fig_proj.canvas.draw()
+        fig_proj.show()
+    except Exception:
+        pass
+
+
+def _layout_figure(params):
+    """Function for setting figure layout. Shared with raw and epoch plots"""
+    size = params['fig'].get_size_inches() * params['fig'].dpi
+    scroll_width = 25
+    hscroll_dist = 25
+    vscroll_dist = 10
+    l_border = 100
+    r_border = 10
+    t_border = 35
+    b_border = 40
+
+    # only bother trying to reset layout if it's reasonable to do so
+    if size[0] < 2 * scroll_width or size[1] < 2 * scroll_width + hscroll_dist:
+        return
+
+    # convert to relative units
+    scroll_width_x = scroll_width / size[0]
+    scroll_width_y = scroll_width / size[1]
+    vscroll_dist /= size[0]
+    hscroll_dist /= size[1]
+    l_border /= size[0]
+    r_border /= size[0]
+    t_border /= size[1]
+    b_border /= size[1]
+    # main axis (traces)
+    ax_width = 1.0 - scroll_width_x - l_border - r_border - vscroll_dist
+    ax_y = hscroll_dist + scroll_width_y + b_border
+    ax_height = 1.0 - ax_y - t_border
+
+    pos = [l_border, ax_y, ax_width, ax_height]
+
+    params['ax'].set_position(pos)
+    if 'ax2' in params:
+        params['ax2'].set_position(pos)
+    params['ax'].set_position(pos)
+    # vscroll (channels)
+    pos = [ax_width + l_border + vscroll_dist, ax_y,
+           scroll_width_x, ax_height]
+    params['ax_vscroll'].set_position(pos)
+    # hscroll (time)
+    pos = [l_border, b_border, ax_width, scroll_width_y]
+    params['ax_hscroll'].set_position(pos)
+    if 'ax_button' in params:
+        # options button
+        pos = [l_border + ax_width + vscroll_dist, b_border,
+               scroll_width_x, scroll_width_y]
+        params['ax_button'].set_position(pos)
+    if 'ax_help_button' in params:
+        pos = [l_border - vscroll_dist - scroll_width_x * 2, b_border,
+               scroll_width_x * 2, scroll_width_y]
+        params['ax_help_button'].set_position(pos)
+    params['fig'].canvas.draw()
+
+
+ at verbose
+def compare_fiff(fname_1, fname_2, fname_out=None, show=True, indent='    ',
+                 read_limit=np.inf, max_str=30, verbose=None):
+    """Compare the contents of two fiff files using diff and show_fiff
+
+    Parameters
+    ----------
+    fname_1 : str
+        First file to compare.
+    fname_2 : str
+        Second file to compare.
+    fname_out : str | None
+        Filename to store the resulting diff. If None, a temporary
+        file will be created.
+    show : bool
+        If True, show the resulting diff in a new tab in a web browser.
+    indent : str
+        How to indent the lines.
+    read_limit : int
+        Max number of bytes of data to read from a tag. Can be np.inf
+        to always read all data (helps test read completion).
+    max_str : int
+        Max number of characters of string representation to print for
+        each tag's data.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    fname_out : str
+        The filename used for storing the diff. Could be useful for
+        when a temporary file is used.
+    """
+    file_1 = show_fiff(fname_1, output=list, indent=indent,
+                       read_limit=read_limit, max_str=max_str)
+    file_2 = show_fiff(fname_2, output=list, indent=indent,
+                       read_limit=read_limit, max_str=max_str)
+    diff = difflib.HtmlDiff().make_file(file_1, file_2, fname_1, fname_2)
+    if fname_out is not None:
+        f = open(fname_out, 'w')
+    else:
+        f = tempfile.NamedTemporaryFile('w', delete=False, suffix='.html')
+        fname_out = f.name
+    with f as fid:
+        fid.write(diff)
+    if show is True:
+        webbrowser.open_new_tab(fname_out)
+    return fname_out
+
+
+def figure_nobar(*args, **kwargs):
+    """Make matplotlib figure with no toolbar"""
+    from matplotlib import rcParams, pyplot as plt
+    old_val = rcParams['toolbar']
+    try:
+        rcParams['toolbar'] = 'none'
+        fig = plt.figure(*args, **kwargs)
+        # remove button press catchers (for toolbar)
+        cbs = list(fig.canvas.callbacks.callbacks['key_press_event'].keys())
+        for key in cbs:
+            fig.canvas.callbacks.disconnect(key)
+    except Exception as ex:
+        raise ex
+    finally:
+        rcParams['toolbar'] = old_val
+    return fig
+
+
+def _helper_raw_resize(event, params):
+    """Helper for resizing"""
+    size = ','.join([str(s) for s in params['fig'].get_size_inches()])
+    set_config('MNE_BROWSE_RAW_SIZE', size)
+    _layout_figure(params)
+
+
+def _plot_raw_onscroll(event, params, len_channels=None):
+    """Interpret scroll events"""
+    if len_channels is None:
+        len_channels = len(params['info']['ch_names'])
+    orig_start = params['ch_start']
+    if event.step < 0:
+        params['ch_start'] = min(params['ch_start'] + params['n_channels'],
+                                 len_channels - params['n_channels'])
+    else:  # event.key == 'up':
+        params['ch_start'] = max(params['ch_start'] - params['n_channels'], 0)
+    if orig_start != params['ch_start']:
+        _channels_changed(params, len_channels)
+
+
+def _channels_changed(params, len_channels):
+    """Helper function for dealing with the vertical shift of the viewport."""
+    if params['ch_start'] + params['n_channels'] > len_channels:
+        params['ch_start'] = len_channels - params['n_channels']
+    if params['ch_start'] < 0:
+        params['ch_start'] = 0
+    params['plot_fun']()
+
+
+def _plot_raw_time(value, params):
+    """Deal with changed time value"""
+    info = params['info']
+    max_times = params['n_times'] / float(info['sfreq']) - params['duration']
+    if value > max_times:
+        value = params['n_times'] / info['sfreq'] - params['duration']
+    if value < 0:
+        value = 0
+    if params['t_start'] != value:
+        params['t_start'] = value
+        params['hsel_patch'].set_x(value)
+
+
+def _plot_raw_onkey(event, params):
+    """Interpret key presses"""
+    import matplotlib.pyplot as plt
+    if event.key == 'escape':
+        plt.close(params['fig'])
+    elif event.key == 'down':
+        params['ch_start'] += params['n_channels']
+        _channels_changed(params, len(params['info']['ch_names']))
+    elif event.key == 'up':
+        params['ch_start'] -= params['n_channels']
+        _channels_changed(params, len(params['info']['ch_names']))
+    elif event.key == 'right':
+        value = params['t_start'] + params['duration']
+        _plot_raw_time(value, params)
+        params['update_fun']()
+        params['plot_fun']()
+    elif event.key == 'left':
+        value = params['t_start'] - params['duration']
+        _plot_raw_time(value, params)
+        params['update_fun']()
+        params['plot_fun']()
+    elif event.key in ['+', '=']:
+        params['scale_factor'] *= 1.1
+        params['plot_fun']()
+    elif event.key == '-':
+        params['scale_factor'] /= 1.1
+        params['plot_fun']()
+    elif event.key == 'pageup':
+        n_channels = params['n_channels'] + 1
+        offset = params['ax'].get_ylim()[0] / n_channels
+        params['offsets'] = np.arange(n_channels) * offset + (offset / 2.)
+        params['n_channels'] = n_channels
+        params['ax'].set_yticks(params['offsets'])
+        params['vsel_patch'].set_height(n_channels)
+        _channels_changed(params, len(params['info']['ch_names']))
+    elif event.key == 'pagedown':
+        n_channels = params['n_channels'] - 1
+        if n_channels == 0:
+            return
+        offset = params['ax'].get_ylim()[0] / n_channels
+        params['offsets'] = np.arange(n_channels) * offset + (offset / 2.)
+        params['n_channels'] = n_channels
+        params['ax'].set_yticks(params['offsets'])
+        params['vsel_patch'].set_height(n_channels)
+        if len(params['lines']) > n_channels:  # remove line from view
+            params['lines'][n_channels].set_xdata([])
+            params['lines'][n_channels].set_ydata([])
+        _channels_changed(params, len(params['info']['ch_names']))
+    elif event.key == 'home':
+        duration = params['duration'] - 1.0
+        if duration <= 0:
+            return
+        params['duration'] = duration
+        params['hsel_patch'].set_width(params['duration'])
+        params['update_fun']()
+        params['plot_fun']()
+    elif event.key == 'end':
+        duration = params['duration'] + 1.0
+        if duration > params['raw'].times[-1]:
+            duration = params['raw'].times[-1]
+        params['duration'] = duration
+        params['hsel_patch'].set_width(params['duration'])
+        params['update_fun']()
+        params['plot_fun']()
+    elif event.key == '?':
+        _onclick_help(event, params)
+    elif event.key == 'f11':
+        mng = plt.get_current_fig_manager()
+        mng.full_screen_toggle()
+
+
+def _mouse_click(event, params):
+    """Vertical select callback"""
+    if event.button != 1:
+        return
+    if event.inaxes is None:
+        if params['n_channels'] > 100:
+            return
+        ax = params['ax']
+        ylim = ax.get_ylim()
+        pos = ax.transData.inverted().transform((event.x, event.y))
+        if pos[0] > params['t_start'] or pos[1] < 0 or pos[1] > ylim[0]:
+            return
+        params['label_click_fun'](pos)
+    # vertical scrollbar changed
+    if event.inaxes == params['ax_vscroll']:
+        ch_start = max(int(event.ydata) - params['n_channels'] // 2, 0)
+        if params['ch_start'] != ch_start:
+            params['ch_start'] = ch_start
+            params['plot_fun']()
+    # horizontal scrollbar changed
+    elif event.inaxes == params['ax_hscroll']:
+        _plot_raw_time(event.xdata - params['duration'] / 2, params)
+        params['update_fun']()
+        params['plot_fun']()
+
+    elif event.inaxes == params['ax']:
+        params['pick_bads_fun'](event)
+
+
+def _select_bads(event, params, bads):
+    """Helper for selecting bad channels onpick. Returns updated bads list."""
+    # trade-off, avoid selecting more than one channel when drifts are present
+    # however for clean data don't click on peaks but on flat segments
+    def f(x, y):
+        return y(np.mean(x), x.std() * 2)
+    lines = event.inaxes.lines
+    for line in lines:
+        ydata = line.get_ydata()
+        if not isinstance(ydata, list) and not np.isnan(ydata).any():
+            ymin, ymax = f(ydata, np.subtract), f(ydata, np.add)
+            if ymin <= event.ydata <= ymax:
+                this_chan = vars(line)['ch_name']
+                if this_chan in params['info']['ch_names']:
+                    ch_idx = params['ch_start'] + lines.index(line)
+                    if this_chan not in bads:
+                        bads.append(this_chan)
+                        color = params['bad_color']
+                        line.set_zorder(-1)
+                    else:
+                        while this_chan in bads:
+                            bads.remove(this_chan)
+                        color = vars(line)['def_color']
+                        line.set_zorder(0)
+                    line.set_color(color)
+                    params['ax_vscroll'].patches[ch_idx].set_color(color)
+                    break
+    else:
+        x = np.array([event.xdata] * 2)
+        params['ax_vertline'].set_data(x, np.array(params['ax'].get_ylim()))
+        params['ax_hscroll_vertline'].set_data(x, np.array([0., 1.]))
+        params['vertline_t'].set_text('%0.3f' % x[0])
+    return bads
+
+
+def _onclick_help(event, params):
+    """Function for drawing help window"""
+    import matplotlib.pyplot as plt
+    text, text2 = _get_help_text(params)
+
+    width = 6
+    height = 5
+
+    fig_help = figure_nobar(figsize=(width, height), dpi=80)
+    fig_help.canvas.set_window_title('Help')
+    ax = plt.subplot2grid((8, 5), (0, 0), colspan=5)
+    ax.set_title('Keyboard shortcuts')
+    plt.axis('off')
+    ax1 = plt.subplot2grid((8, 5), (1, 0), rowspan=7, colspan=2)
+    ax1.set_yticklabels(list())
+    plt.text(0.99, 1, text, fontname='STIXGeneral', va='top', weight='bold',
+             ha='right')
+    plt.axis('off')
+
+    ax2 = plt.subplot2grid((8, 5), (1, 2), rowspan=7, colspan=3)
+    ax2.set_yticklabels(list())
+    plt.text(0, 1, text2, fontname='STIXGeneral', va='top')
+    plt.axis('off')
+
+    tight_layout(fig=fig_help)
+    # this should work for non-test cases
+    try:
+        fig_help.canvas.draw()
+        fig_help.show()
+    except Exception:
+        pass
+
+
+class ClickableImage(object):
+
+    """
+    Display an image so you can click on it and store x/y positions.
+
+    Takes as input an image array (can be any array that works with imshow,
+    but will work best with images.  Displays the image and lets you
+    click on it.  Stores the xy coordinates of each click, so now you can
+    superimpose something on top of it.
+
+    Upon clicking, the x/y coordinate of the cursor will be stored in
+    self.coords, which is a list of (x, y) tuples.
+
+    Parameters
+    ----------
+    imdata: ndarray
+        The image that you wish to click on for 2-d points.
+    **kwargs : dict
+        Keyword arguments. Passed to ax.imshow.
+
+    Notes
+    -----
+    .. versionadded:: 0.9.0
+
+    """
+
+    def __init__(self, imdata, **kwargs):
+        """Display the image for clicking."""
+        from matplotlib.pyplot import figure, show
+        self.coords = []
+        self.imdata = imdata
+        self.fig = figure()
+        self.ax = self.fig.add_subplot(111)
+        self.ymax = self.imdata.shape[0]
+        self.xmax = self.imdata.shape[1]
+        self.im = self.ax.imshow(imdata, aspect='auto',
+                                 extent=(0, self.xmax, 0, self.ymax),
+                                 picker=True, **kwargs)
+        self.ax.axis('off')
+        self.fig.canvas.mpl_connect('pick_event', self.onclick)
+        show()
+
+    def onclick(self, event):
+        """Mouse click handler.
+
+        Parameters
+        ----------
+        event: matplotlib event object
+            The matplotlib object that we use to get x/y position.
+        """
+        mouseevent = event.mouseevent
+        self.coords.append((mouseevent.xdata, mouseevent.ydata))
+
+    def plot_clicks(self, **kwargs):
+        """Plot the x/y positions stored in self.coords.
+
+        Parameters
+        ----------
+        **kwargs : dict
+            Arguments are passed to imshow in displaying the bg image.
+        """
+        from matplotlib.pyplot import subplots, show
+        f, ax = subplots()
+        ax.imshow(self.imdata, extent=(0, self.xmax, 0, self.ymax), **kwargs)
+        xlim, ylim = [ax.get_xlim(), ax.get_ylim()]
+        xcoords, ycoords = zip(*self.coords)
+        ax.scatter(xcoords, ycoords, c='r')
+        ann_text = np.arange(len(self.coords)).astype(str)
+        for txt, coord in zip(ann_text, self.coords):
+            ax.annotate(txt, coord, fontsize=20, color='r')
+        ax.set_xlim(xlim)
+        ax.set_ylim(ylim)
+        show()
+
+    def to_layout(self, **kwargs):
+        """Turn coordinates into an MNE Layout object.
+
+        Normalizes by the image you used to generate clicks
+
+        Parameters
+        ----------
+        **kwargs : dict
+            Arguments are passed to generate_2d_layout
+        """
+        from mne.channels.layout import generate_2d_layout
+        coords = np.array(self.coords)
+        lt = generate_2d_layout(coords, bg_image=self.imdata, **kwargs)
+        return lt
+
+
+def _fake_click(fig, ax, point, xform='ax', button=1):
+    """Helper to fake a click at a relative point within axes."""
+    if xform == 'ax':
+        x, y = ax.transAxes.transform_point(point)
+    elif xform == 'data':
+        x, y = ax.transData.transform_point(point)
+    else:
+        raise ValueError('unknown transform')
+    try:
+        fig.canvas.button_press_event(x, y, button, False, None)
+    except Exception:  # for old MPL
+        fig.canvas.button_press_event(x, y, button, False)
+
+
+def add_background_image(fig, im, set_ratios=None):
+    """Add a background image to a plot.
+
+    Adds the image specified in `im` to the
+    figure `fig`. This is generally meant to
+    be done with topo plots, though it could work
+    for any plot.
+
+    Note: This modifies the figure and/or axes
+    in place.
+
+    Parameters
+    ----------
+    fig: plt.figure
+        The figure you wish to add a bg image to.
+    im: ndarray
+        A numpy array that works with a call to
+        plt.imshow(im). This will be plotted
+        as the background of the figure.
+    set_ratios: None | str
+        Set the aspect ratio of any axes in fig
+        to the value in set_ratios. Defaults to None,
+        which does nothing to axes.
+
+    Returns
+    -------
+    ax_im: instance of the create matplotlib axis object
+        corresponding to the image you added.
+
+    Notes
+    -----
+    .. versionadded:: 0.9.0
+
+    """
+    if set_ratios is not None:
+        for ax in fig.axes:
+            ax.set_aspect(set_ratios)
+
+    ax_im = fig.add_axes([0, 0, 1, 1])
+    ax_im.imshow(im, aspect='auto')
+    ax_im.set_zorder(-1)
+    return ax_im
diff --git a/debian/python-mne/usr/share/doc/python-mne/AUTHORS.rst b/debian/python-mne/usr/share/doc/python-mne/AUTHORS.rst
new file mode 100644
index 0000000..90a4e7e
--- /dev/null
+++ b/debian/python-mne/usr/share/doc/python-mne/AUTHORS.rst
@@ -0,0 +1,19 @@
+.. -*- mode: rst -*-
+
+Authors
+=======
+
+  * Alexandre Gramfort 2011-2013
+  * Matti Hamalainen 2011-2013
+  * Emily Ruzich 2011
+  * Martin Luessi 2011-2013
+  * Christian Brodbeck 2012-2013
+  * Louis Thibault 2012
+  * Eric Larson 2012-2013
+  * Denis A. Engemann 2012-2013
+  * Daniel Strohmeier 2012
+  * Brad Buran 2013
+  * Simon Kornblith 2013
+  * Mainak Jas 2013
+  * Roman Goj 2013
+  * Teon Brooks 2013
diff --git a/debian/python-mne/usr/share/doc/python-mne/README.rst.gz b/debian/python-mne/usr/share/doc/python-mne/README.rst.gz
new file mode 100644
index 0000000..ee7d199
Binary files /dev/null and b/debian/python-mne/usr/share/doc/python-mne/README.rst.gz differ
diff --git a/debian/python-mne/usr/share/doc/python-mne/changelog.Debian.gz b/debian/python-mne/usr/share/doc/python-mne/changelog.Debian.gz
new file mode 100644
index 0000000..4188a13
Binary files /dev/null and b/debian/python-mne/usr/share/doc/python-mne/changelog.Debian.gz differ
diff --git a/debian/python-mne/usr/share/doc/python-mne/copyright b/debian/python-mne/usr/share/doc/python-mne/copyright
new file mode 100644
index 0000000..c608176
--- /dev/null
+++ b/debian/python-mne/usr/share/doc/python-mne/copyright
@@ -0,0 +1,150 @@
+Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
+Upstream-Name: MNE-Python
+Upstream-Contact: Alexandre Gramfort <alexandre.gramfort at m4x.org>
+Source: http://github.com/mne-tools/mne-python
+Files-Excluded: mne/html/*.min.js
+Comment: File AppEULA.rst refers to code that is documented in this package but not shipped with it.
+
+Files: *
+Copyright: 2010-2014, MNE-Python Developers
+License: BSD-3
+
+Files: doc/sphinxext/*
+Copyright: 2008, Stefan van der Walt <stefan at mentat.za.net>, Pauli Virtanen <pav at iki.fi>
+License: BSD-2
+  1. Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+  2. Redistributions in binary form must reproduce the above copyright
+     notice, this list of conditions and the following disclaimer in
+     the documentation and/or other materials provided with the
+     distribution.
+
+Files: debian/*
+Copyright: 2010-2013, Yaroslav Halchenko <debian at onerussian.com>, Alexandre Gramfort <alexandre.gramfort at m4x.org>
+License: BSD-3
+
+Files: mne/externals/decorator.py
+Copyright: 2005-2012, Michele Simionato
+License: BSD
+
+Files: mne/externals/Fieldtrip.py
+Copyright: 2008, Robert Oostenveld, S. Klanke
+  F.C. Donders Centre for Cognitive Neuroimaging,
+  Radboud University Nijmegen, The Netherlands.
+  http://www.ru.nl/fcdonders/fieldtrip
+License: BSD-3
+
+Files: mne/externals/jdcal.py
+Copyright: 2010, Prasanth Nair, <prasanthhn at gmail.com>
+License: BSD
+
+Files: mne/externals/six.py
+Copyright: 2010-2013 Benjamin Peterson
+License: MIT
+
+Files: mne/externals/tempita/*
+Copyright: 2008 Ian Bicking and Contributors
+License: MIT
+
+Files: debian/JS/bootstrap
+Copyright: 2011-2014 Twitter, Inc.
+License: MIT
+
+Files: mne/html/bootstrap*
+Copyright: 2013 Twitter, Inc.
+License: Apache
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ .
+   http://www.apache.org/licenses/LICENSE-2.0
+ .
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ .
+ On Debian systems you can find the full test of the Apache 2 license
+ at /usr/share/common-licenses/Apache-2.0
+
+Files: mne/html/jquery*
+Copyright: 2005, 2013 jQuery Foundation, Inc.
+License: MIT
+
+Files: mne/html/d3*
+Copyright: 2013 Mike Bostock.
+License: BSD-3
+
+Files: mne/html/mpld3*
+Copyright: 2013, Jake Vanderplas
+License: BSD-3
+
+License: BSD
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions are
+  met:
+  .
+  Redistributions of source code must retain the above copyright
+  notice, this list of conditions and the following disclaimer.
+  Redistributions in bytecode form must reproduce the above copyright
+  notice, this list of conditions and the following disclaimer in
+  the documentation and/or other materials provided with the
+  distribution.
+  .
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+  BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+  OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+  ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+  TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+  USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+  DAMAGE.
+
+License: BSD-3
+  Redistribution and use in source and binary forms, with or without modification,
+  are permitted provided that the following conditions are met:
+  .
+  a. Redistributions of source code must retain the above copyright notice,
+     this list of conditions and the following disclaimer.
+  b. Redistributions in binary form must reproduce the above copyright
+     notice, this list of conditions and the following disclaimer in the
+     documentation and/or other materials provided with the distribution.
+  c. Neither the name of the MNE Developers nor the names of
+     its contributors may be used to endorse or promote products
+     derived from this software without specific prior written
+     permission.
+  .
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+  ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+  WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+  DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
+  ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+  (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+  LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+  ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+License: MIT
+  Permission is hereby granted, free of charge, to any person obtaining a copy
+  of this software and associated documentation files (the "Software"), to deal
+  in the Software without restriction, including without limitation the rights
+  to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+  copies of the Software, and to permit persons to whom the Software is
+  furnished to do so, subject to the following conditions:
+  .
+  The above copyright notice and this permission notice shall be included in all
+  copies or substantial portions of the Software.
+  .
+  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+  IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+  FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+  AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+  LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+  OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+  SOFTWARE.
diff --git a/debian/python-mne/usr/share/man/man1/mne.1.gz b/debian/python-mne/usr/share/man/man1/mne.1.gz
new file mode 100644
index 0000000..8a9560b
Binary files /dev/null and b/debian/python-mne/usr/share/man/man1/mne.1.gz differ
diff --git a/debian/rules b/debian/rules
index cc3e3f3..920b4cd 100755
--- a/debian/rules
+++ b/debian/rules
@@ -13,6 +13,7 @@ override_dh_clean:
 override_dh_auto_build:
 	dh_auto_build
 	yui-compressor debian/JS/bootstrap/bootstrap.js > $(CURDIR)/mne/html/bootstrap.min.js
+	echo -e "\n" >> $(CURDIR)/mne/html/bootstrap.min.js
 
 override_dh_auto_test:
 	MNE_SKIP_SAMPLE_DATASET_TESTS=true MNE_FORCE_SERIAL=true MNE_SKIP_NETWORK_TESTS=1 \

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-med/python-mne.git



More information about the debian-med-commit mailing list