[med-svn] [python-mne] 208/376: ENH : source_induced_power and SourceEstimate class + viz source WIP

Yaroslav Halchenko debian at onerussian.com
Fri Nov 27 17:22:39 UTC 2015


This is an automated email from the git hooks/post-receive script.

yoh pushed a commit to annotated tag v0.1
in repository python-mne.

commit 35c7ba14cb0fba1b0933217b9bd07940ad75896d
Author: Alexandre Gramfort <alexandre.gramfort at inria.fr>
Date:   Tue Apr 19 13:17:06 2011 -0400

    ENH : source_induced_power and SourceEstimate class + viz source WIP
---
 examples/plot_compute_mne_inverse.py               |  18 +--
 examples/plot_minimum_norm_estimate.py             |  17 +--
 examples/plot_read_stc.py                          |  13 +-
 .../plot_source_space_time_frequency.py            |  53 +++++++
 mne/__init__.py                                    |   3 +-
 mne/minimum_norm/__init__.py                       |   2 +
 mne/{ => minimum_norm}/inverse.py                  |  56 ++++----
 mne/minimum_norm/tests/__init__.py                 |   0
 mne/{ => minimum_norm}/tests/test_inverse.py       |  29 ++--
 mne/minimum_norm/time_frequency.py                 | 153 +++++++++++++++++++++
 mne/stc.py                                         |  53 +++++++
 mne/viz.py                                         |  49 +++++++
 12 files changed, 370 insertions(+), 76 deletions(-)

diff --git a/examples/plot_compute_mne_inverse.py b/examples/plot_compute_mne_inverse.py
index 7c1fbbf..48f9be3 100755
--- a/examples/plot_compute_mne_inverse.py
+++ b/examples/plot_compute_mne_inverse.py
@@ -19,6 +19,7 @@ import pylab as pl
 import mne
 from mne.datasets import sample
 from mne.fiff import Evoked
+from mne.minimum_norm import apply_inverse, read_inverse_operator
 
 data_path = sample.data_path('.')
 fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
@@ -31,26 +32,17 @@ dSPM = True
 
 # Load data
 evoked = Evoked(fname_evoked, setno=setno, baseline=(None, 0))
-inverse_operator = mne.read_inverse_operator(fname_inv)
+inverse_operator = read_inverse_operator(fname_inv)
 
 # Compute inverse solution
-res = mne.apply_inverse(evoked, inverse_operator, lambda2, dSPM)
+stc = apply_inverse(evoked, inverse_operator, lambda2, dSPM)
 
 # Save result in stc files
-lh_vertices = res['inv']['src'][0]['vertno']
-rh_vertices = res['inv']['src'][1]['vertno']
-lh_data = res['sol'][:len(lh_vertices)]
-rh_data = res['sol'][-len(rh_vertices):]
-
-mne.write_stc('mne_dSPM_inverse-lh.stc', tmin=res['tmin'], tstep=res['tstep'],
-               vertices=lh_vertices, data=lh_data)
-mne.write_stc('mne_dSPM_inverse-rh.stc', tmin=res['tmin'], tstep=res['tstep'],
-               vertices=rh_vertices, data=rh_data)
+stc.save('mne_dSPM_inverse')
 
 ###############################################################################
 # View activation time-series
-times = res['tmin'] + res['tstep'] * np.arange(lh_data.shape[1])
-pl.plot(1e3 * times, res['sol'][::100, :].T)
+pl.plot(1e3 * stc.times, stc.data[::100, :].T)
 pl.xlabel('time (ms)')
 pl.ylabel('dSPM value')
 pl.show()
diff --git a/examples/plot_minimum_norm_estimate.py b/examples/plot_minimum_norm_estimate.py
index 6316017..73a8756 100644
--- a/examples/plot_minimum_norm_estimate.py
+++ b/examples/plot_minimum_norm_estimate.py
@@ -14,11 +14,11 @@ and stores the solution in stc files for visualisation.
 
 print __doc__
 
-import numpy as np
 import pylab as pl
 import mne
 from mne.datasets import sample
 from mne.fiff import Evoked
+from mne.minimum_norm import minimum_norm
 
 data_path = sample.data_path('.')
 fname_fwd = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
@@ -39,25 +39,16 @@ noise_cov = mne.Covariance(fname_cov)
 whitener = noise_cov.get_whitener(evoked.info, mag_reg=0.1,
                                   grad_reg=0.1, eeg_reg=0.1, pca=True)
 # Compute inverse solution
-stc, K, W = mne.minimum_norm(evoked, forward, whitener, orientation='loose',
+stc = minimum_norm(evoked, forward, whitener, orientation='loose',
                              method='dspm', snr=3, loose=0.2)
 
 # Save result in stc files
-lh_vertices = stc['inv']['src'][0]['vertno']
-rh_vertices = stc['inv']['src'][1]['vertno']
-lh_data = stc['sol'][:len(lh_vertices)]
-rh_data = stc['sol'][-len(rh_vertices):]
-
-mne.write_stc('mne_dSPM_inverse-lh.stc', tmin=stc['tmin'], tstep=stc['tstep'],
-            vertices=lh_vertices, data=lh_data)
-mne.write_stc('mne_dSPM_inverse-rh.stc', tmin=stc['tmin'], tstep=stc['tstep'],
-            vertices=rh_vertices, data=rh_data)
+stc.save('mne_dSPM_inverse')
 
 ###############################################################################
 # View activation time-series
-times = stc['tmin'] + stc['tstep'] * np.arange(stc['sol'].shape[1])
 pl.close('all')
-pl.plot(1e3 * times, stc['sol'][::100, :].T)
+pl.plot(1e3 * stc.times, stc.data[::100, :].T)
 pl.xlabel('time (ms)')
 pl.ylabel('dSPM value')
 pl.show()
diff --git a/examples/plot_read_stc.py b/examples/plot_read_stc.py
index 4c58577..a10a7ff 100755
--- a/examples/plot_read_stc.py
+++ b/examples/plot_read_stc.py
@@ -12,26 +12,21 @@ reconstructions
 
 print __doc__
 
-import numpy as np
 import mne
 from mne.datasets import sample
 
 data_path = sample.data_path('.')
-fname = data_path + '/MEG/sample/sample_audvis-meg-lh.stc'
+fname = data_path + '/MEG/sample/sample_audvis-meg'
 
-stc = mne.read_stc(fname)
+stc = mne.SourceEstimate(fname)
 
-n_vertices, n_samples = stc['data'].shape
-print "tmin : %s (s)" % stc['tmin']
-print "tstep : %s" % stc['tstep']
-print "tmax : %s (s)" % (stc['tmin'] + stc['tstep'] * n_samples)
+n_vertices, n_samples = stc.data.shape
 print "stc data size: %s (nb of vertices) x %s (nb of samples)" % (
                                                     n_vertices, n_samples)
 
 # View source activations
-times = stc['tmin'] + stc['tstep'] * np.arange(n_samples)
 import pylab as pl
-pl.plot(times, stc['data'][::100, :].T)
+pl.plot(stc.times, stc.data[::100, :].T)
 pl.xlabel('time (ms)')
 pl.ylabel('Source amplitude')
 pl.show()
diff --git a/examples/time_frequency/plot_source_space_time_frequency.py b/examples/time_frequency/plot_source_space_time_frequency.py
new file mode 100644
index 0000000..3e04be1
--- /dev/null
+++ b/examples/time_frequency/plot_source_space_time_frequency.py
@@ -0,0 +1,53 @@
+"""
+===================================================
+Compute induced power in the source space with dSPM
+===================================================
+
+XXX
+
+"""
+# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+print __doc__
+
+import mne
+from mne import fiff
+from mne.datasets import sample
+from mne.minimum_norm import read_inverse_operator, source_induced_power
+
+###############################################################################
+# Set parameters
+data_path = sample.data_path('..')
+raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
+fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
+tmin, tmax, event_id = -0.2, 0.5, 1
+
+# Setup for reading the raw data
+raw = fiff.Raw(raw_fname)
+events = mne.find_events(raw)
+inverse_operator = read_inverse_operator(fname_inv)
+
+include = []
+exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053']  # bads + 2 more
+
+# picks MEG gradiometers
+picks = fiff.pick_types(raw.info, meg=True, eeg=False, eog=True,
+                                stim=False, include=include, exclude=exclude)
+
+# Load condition 1
+event_id = 1
+# events = events[:5]
+epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                    baseline=(None, 0), reject=dict(grad=4000e-13, eog=150e-6),
+                    preload=True)
+
+# Compute a source estimate per frequency band
+# bands = dict(alpha=[9, 12], beta=[13, 25])
+bands = dict(alpha=[8, 9])
+stcs = source_induced_power(epochs, inverse_operator, bands, n_cycles=2,
+                            use_fft=False)
+
+for b, stc in stcs.iteritems():
+    stc.save('induced_power_%s' % b)
diff --git a/mne/__init__.py b/mne/__init__.py
index 47aba8d..289ef1d 100755
--- a/mne/__init__.py
+++ b/mne/__init__.py
@@ -4,10 +4,9 @@ from .cov import read_cov, write_cov, write_cov_file, Covariance, \
                  compute_raw_data_covariance, compute_covariance
 from .event import read_events, write_events, find_events
 from .forward import read_forward_solution
-from .stc import read_stc, write_stc
+from .stc import read_stc, write_stc, SourceEstimate
 from .bem_surfaces import read_bem_surfaces
 from .source_space import read_source_spaces
-from .inverse import read_inverse_operator, apply_inverse, minimum_norm
 from .epochs import Epochs
 from .label import label_time_courses, read_label
 from .misc import parse_config, read_reject_parameters
diff --git a/mne/minimum_norm/__init__.py b/mne/minimum_norm/__init__.py
new file mode 100644
index 0000000..1f8924b
--- /dev/null
+++ b/mne/minimum_norm/__init__.py
@@ -0,0 +1,2 @@
+from .inverse import read_inverse_operator, apply_inverse, minimum_norm
+from .time_frequency import source_induced_power
diff --git a/mne/inverse.py b/mne/minimum_norm/inverse.py
similarity index 95%
rename from mne/inverse.py
rename to mne/minimum_norm/inverse.py
index a7a8a2a..5e10929 100755
--- a/mne/inverse.py
+++ b/mne/minimum_norm/inverse.py
@@ -8,18 +8,19 @@ from math import sqrt
 import numpy as np
 from scipy import linalg
 
-from .fiff.constants import FIFF
-from .fiff.open import fiff_open
-from .fiff.tag import find_tag
-from .fiff.matrix import _read_named_matrix, _transpose_named_matrix
-from .fiff.proj import read_proj, make_projector
-from .fiff.tree import dir_tree_find
-from .fiff.pick import pick_channels_evoked
+from ..fiff.constants import FIFF
+from ..fiff.open import fiff_open
+from ..fiff.tag import find_tag
+from ..fiff.matrix import _read_named_matrix, _transpose_named_matrix
+from ..fiff.proj import read_proj, make_projector
+from ..fiff.tree import dir_tree_find
+from ..fiff.pick import pick_channels_evoked
 
-from .cov import read_cov
-from .source_space import read_source_spaces_from_tree, find_source_space_hemi
-from .forward import _block_diag
-from .transforms import invert_transform, transform_source_space_to
+from ..cov import read_cov
+from ..source_space import read_source_spaces_from_tree, find_source_space_hemi
+from ..forward import _block_diag
+from ..transforms import invert_transform, transform_source_space_to
+from ..stc import SourceEstimate
 
 
 def read_inverse_operator(fname):
@@ -480,14 +481,17 @@ def apply_inverse(evoked, inverse_operator, lambda2, dSPM=True):
         print '(dSPM)...',
         sol *= inv['noisenorm'][:, None]
 
-    res = dict()
-    res['inv'] = inv
-    res['sol'] = sol
-    res['tmin'] = float(evoked.first) / evoked.info['sfreq']
-    res['tstep'] = 1.0 / evoked.info['sfreq']
+    src = inv['src']
+    stc = SourceEstimate(None)
+    stc.data = sol
+    stc.tmin = float(evoked.first) / evoked.info['sfreq']
+    stc.tstep = 1.0 / evoked.info['sfreq']
+    stc.lh_vertno = src[0]['vertno']
+    stc.rh_vertno = src[1]['vertno']
+    stc._init_times()
     print '[done]'
 
-    return res
+    return stc
 
 
 def _xyz2lf(Lf_xyz, normals):
@@ -740,14 +744,14 @@ def minimum_norm(evoked, forward, whitener, method='dspm',
         print 'combining the current components...',
         sol = combine_xyz(sol)
 
-    stc = dict()
-    stc['inv'] = dict()
-    stc['inv']['src'] = forward['src']
-    # stc['vertices'] = np.concatenate(forward['src'][0]['vertno'],
-    #                                  forward['src'][1]['vertno'])
-    stc['sol'] = sol
-    stc['tmin'] = float(evoked.first) / evoked.info['sfreq']
-    stc['tstep'] = 1.0 / evoked.info['sfreq']
+    src = forward['src']
+    stc = SourceEstimate(None)
+    stc.data = sol
+    stc.tmin = float(evoked.first) / evoked.info['sfreq']
+    stc.tstep = 1.0 / evoked.info['sfreq']
+    stc.lh_vertno = src[0]['vertno']
+    stc.rh_vertno = src[1]['vertno']
+    stc._init_times()
     print '[done]'
 
-    return stc, Kernel, W
+    return stc
diff --git a/mne/minimum_norm/tests/__init__.py b/mne/minimum_norm/tests/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/mne/tests/test_inverse.py b/mne/minimum_norm/tests/test_inverse.py
similarity index 56%
rename from mne/tests/test_inverse.py
rename to mne/minimum_norm/tests/test_inverse.py
index d6753b0..ce3ce2a 100755
--- a/mne/tests/test_inverse.py
+++ b/mne/minimum_norm/tests/test_inverse.py
@@ -3,10 +3,12 @@ import os.path as op
 import numpy as np
 # from numpy.testing import assert_array_almost_equal, assert_equal
 
-import mne
-from mne.datasets import sample
+from ...datasets import sample
+from ... import fiff, Covariance, read_forward_solution
+from ..inverse import minimum_norm, apply_inverse, read_inverse_operator
 
-examples_folder = op.join(op.dirname(__file__), '..', '..', 'examples')
+
+examples_folder = op.join(op.dirname(__file__), '..', '..', '..', 'examples')
 data_path = sample.data_path(examples_folder)
 fname_inv = op.join(data_path, 'MEG', 'sample',
                                         'sample_audvis-meg-oct-6-meg-inv.fif')
@@ -27,13 +29,13 @@ def test_apply_mne_inverse_operator():
     lambda2 = 1.0 / snr ** 2
     dSPM = True
 
-    evoked = mne.fiff.Evoked(fname_data, setno=setno, baseline=(None, 0))
-    inverse_operator = mne.read_inverse_operator(fname_inv)
+    evoked = fiff.Evoked(fname_data, setno=setno, baseline=(None, 0))
+    inverse_operator = read_inverse_operator(fname_inv)
 
-    res = mne.apply_inverse(evoked, inverse_operator, lambda2, dSPM)
+    stc = apply_inverse(evoked, inverse_operator, lambda2, dSPM)
 
-    assert np.all(res['sol'] > 0)
-    assert np.all(res['sol'] < 35)
+    assert np.all(stc.data > 0)
+    assert np.all(stc.data < 35)
 
 
 def test_compute_minimum_norm():
@@ -41,12 +43,13 @@ def test_compute_minimum_norm():
     """
 
     setno = 0
-    noise_cov = mne.Covariance(fname_cov)
-    forward = mne.read_forward_solution(fname_fwd)
-    evoked = mne.fiff.Evoked(fname_data, setno=setno, baseline=(None, 0))
+    noise_cov = Covariance(fname_cov)
+    forward = read_forward_solution(fname_fwd)
+    evoked = fiff.Evoked(fname_data, setno=setno, baseline=(None, 0))
     whitener = noise_cov.get_whitener(evoked.info, mag_reg=0.1,
                                       grad_reg=0.1, eeg_reg=0.1, pca=True)
-    stc, K, W = mne.minimum_norm(evoked, forward, whitener,
-                        orientation='loose', method='dspm', snr=3, loose=0.2)
+    stc = minimum_norm(evoked, forward, whitener,
+                       orientation='loose', method='dspm', snr=3, loose=0.2)
 
+    assert np.all(stc.data > 0)
     # XXX : test something
diff --git a/mne/minimum_norm/time_frequency.py b/mne/minimum_norm/time_frequency.py
new file mode 100644
index 0000000..9998bda
--- /dev/null
+++ b/mne/minimum_norm/time_frequency.py
@@ -0,0 +1,153 @@
+# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+import numpy as np
+
+from ..fiff.constants import FIFF
+from ..stc import SourceEstimate
+from ..time_frequency.tfr import cwt, morlet
+from .inverse import combine_xyz, prepare_inverse_operator
+
+
+def source_induced_power(epochs, inverse_operator, bands, lambda2=1.0 / 9.0,
+                         dSPM=True, n_cycles=5, df=1, use_fft=False,
+                         baseline=None, baseline_mode='ratio'):
+    """XXX for source_induced_power
+
+    Parameters
+    ----------
+    baseline: None (default) or tuple of length 2
+        The time interval to apply baseline correction.
+        If None do not apply it. If baseline is (a, b)
+        the interval is between "a (s)" and "b (s)".
+        If a is None the beginning of the data is used
+        and if b is None then b is set to the end of the interval.
+        If baseline is equal ot (None, None) all the time
+        interval is used.
+    baseline_mode : None | 'ratio' | 'zscore'
+        Do baseline correction with ratio (power is divided by mean
+        power during baseline) or zscore (power is divided by standard
+        deviatio of power during baseline after substracting the mean,
+        power = [power - mean(power_baseline)] / std(power_baseline))
+
+
+    """
+
+    #
+    #   Set up the inverse according to the parameters
+    #
+    epochs_data = epochs.get_data()
+    nave = len(epochs_data)  # XXX : can do better when no preload
+
+    inv = prepare_inverse_operator(inverse_operator, nave, lambda2, dSPM)
+    #
+    #   Pick the correct channels from the data
+    #
+    sel = [epochs.ch_names.index(name) for name in inv['noise_cov']['names']]
+    print 'Picked %d channels from the data' % len(sel)
+    print 'Computing inverse...',
+    #
+    #   Simple matrix multiplication followed by combination of the
+    #   three current components
+    #
+    #   This does all the data transformations to compute the weights for the
+    #   eigenleads
+    #
+    K = inv['reginv'][:, None] * reduce(np.dot,
+                                           [inv['eigen_fields']['data'],
+                                           inv['whitener'],
+                                           inv['proj']])
+    #
+    #   Transformation into current distributions by weighting the
+    #   eigenleads with the weights computed above
+    #
+    if inv['eigen_leads_weighted']:
+        #
+        #     R^0.5 has been already factored in
+        #
+        # print '(eigenleads already weighted)...',
+        K = np.dot(inv['eigen_leads']['data'], K)
+    else:
+        #
+        #     R^0.5 has to factored in
+        #
+        # print '(eigenleads need to be weighted)...',
+        K = np.sqrt(inv['source_cov']['data'])[:, None] * \
+                             np.dot(inv['eigen_leads']['data'], K)
+
+    Fs = epochs.info['sfreq']  # sampling in Hz
+
+    stcs = dict()
+    src = inv['src']
+    n_times = len(epochs.times)
+
+    for name, band in bands.iteritems():
+        print 'Computing power in band %s [%s, %s] Hz...' % (name, band[0],
+                                                             band[1])
+
+        freqs = np.arange(band[0], band[1] + df / 2.0, df)  # frequencies
+        n_freqs = len(freqs)
+        Ws = morlet(Fs, freqs, n_cycles=n_cycles)
+
+        power = 0
+
+        for e in epochs_data:
+            e = e[sel]  # keep only selected channels
+            tfr = cwt(e, Ws, use_fft=use_fft)
+            tfr = tfr.reshape(len(e), -1)
+
+            sol = np.dot(K, tfr)
+
+            if inv['source_ori'] == FIFF.FIFFV_MNE_FREE_ORI:
+                # print 'combining the current components...',
+                sol = combine_xyz(sol)
+
+            if dSPM:
+                # print '(dSPM)...',
+                sol *= inv['noisenorm'][:, None]
+
+            # average power in band
+            sol = np.mean(np.reshape(sol ** 2, (-1, n_freqs, n_times)), axis=1)
+            power += sol
+            del sol
+
+        power /= len(epochs_data)
+
+        # Run baseline correction
+        if baseline is not None:
+            print "Applying baseline correction ..."
+            times = epochs.times
+            bmin = baseline[0]
+            bmax = baseline[1]
+            if bmin is None:
+                imin = 0
+            else:
+                imin = int(np.where(times >= bmin)[0][0])
+            if bmax is None:
+                imax = len(times)
+            else:
+                imax = int(np.where(times <= bmax)[0][-1]) + 1
+            mean_baseline_power = np.mean(power[:, imin:imax], axis=1)
+            if baseline_mode is 'logratio':
+                power /= mean_baseline_power[:, None]
+                power = np.log(power)
+            elif baseline_mode is 'zscore':
+                power -= mean_baseline_power[:, None]
+                power /= np.std(power[:, imin:imax], axis=1)[:, None]
+        else:
+            print "No baseline correction applied..."
+
+        stc = SourceEstimate(None)
+        stc.data = power
+        stc.tmin = epochs.times[0]
+        stc.tstep = 1.0 / Fs
+        stc.lh_vertno = src[0]['vertno']
+        stc.rh_vertno = src[1]['vertno']
+        stc._init_times()
+
+        stcs[name] = stc
+
+        print '[done]'
+
+    return stcs
diff --git a/mne/stc.py b/mne/stc.py
index 129a1ae..e698418 100755
--- a/mne/stc.py
+++ b/mne/stc.py
@@ -98,3 +98,56 @@ def write_stc(filename, tmin, tstep, vertices, data):
 
     # close the file
     fid.close()
+
+
+class SourceEstimate(object):
+    """SourceEstimate container
+
+    Can be saved and loaded from .stc files
+
+    Attributes
+    ----------
+    data : array of shape [n_dipoles x n_times]
+        The data in source space
+    times : array of shape [n_times]
+        The time vector
+    lh_vertno : array of shape [n_dipoles in left hemisphere]
+        The indices of the dipoles in the left hemisphere
+    rh_vertno : array of shape [n_dipoles in right hemisphere]
+        The indices of the dipoles in the right hemisphere
+    """
+    def __init__(self, fname):
+        if fname is not None:
+            lh = read_stc(fname + '-lh.stc')
+            rh = read_stc(fname + '-lh.stc')
+            self.data = np.r_[lh['data'], rh['data']]
+            assert lh['tmin'] == rh['tmin']
+            assert lh['tstep'] == rh['tstep']
+            self.tmin = lh['tmin']
+            self.tstep = lh['tstep']
+            self.times = self.tmin + self.tstep * np.arange(self.data.shape[1])
+            self.lh_vertno = lh['vertices']
+            self.rh_vertno = rh['vertices']
+
+    def _init_times(self):
+        """create self.times"""
+        self.times = self.tmin + self.tstep * np.arange(self.data.shape[1])
+
+    def save(self, fname):
+        """save to source estimates to file"""
+        lh_data = self.data[:len(self.lh_vertno)]
+        rh_data = self.data[-len(self.rh_vertno):]
+
+        print 'Writing STC to disk...',
+        write_stc(fname + '-lh.stc', tmin=self.tmin, tstep=self.tstep,
+                       vertices=self.lh_vertno, data=lh_data)
+        write_stc(fname + '-rh.stc', tmin=self.tmin, tstep=self.tstep,
+                       vertices=self.rh_vertno, data=rh_data)
+        print '[done]'
+
+    # def view(self, src, t, n_smooth=200, colorbar=True):
+    #     """View in source space
+    #     """
+    #     idx = np.where(evoked.times > 1e-3*t)[0][0]
+    #     plot_sources(src, self.data[:,idx], text='%d ms' % t,
+    #                  colorbar=colorbar, n_smooth=n_smooth)
diff --git a/mne/viz.py b/mne/viz.py
index 943cefe..01a2325 100755
--- a/mne/viz.py
+++ b/mne/viz.py
@@ -5,6 +5,7 @@
 #
 # License: Simplified BSD
 
+import numpy as np
 import pylab as pl
 from .fiff.pick import channel_type
 
@@ -75,3 +76,51 @@ def plot_evoked(evoked, picks=None, unit=True, show=True):
     pl.subplots_adjust(0.175, 0.08, 0.94, 0.94, 0.2, 0.63)
     if show:
         pl.show()
+
+
+def plot_sources(src, data, text=None, n_smooth=200, colorbar=True,
+                 cmap="jet"):
+    """Source space data
+    """
+    from enthought.mayavi import mlab
+    from enthought.tvtk.api import tvtk
+    lh_points = src[0]['rr']
+    rh_points = src[1]['rr']
+    # lh_faces = src[0]['tris']
+    # rh_faces = src[1]['tris']
+    lh_faces = src[0]['use_tris']
+    rh_faces = src[1]['use_tris']
+    points = np.r_[lh_points, rh_points]
+    points *= 200
+    faces = np.r_[lh_faces, lh_points.shape[0] + rh_faces]
+
+    lh_idx = np.where(src[0]['inuse'])[0]
+    rh_idx = np.where(src[1]['inuse'])[0]
+    use_idx = np.r_[lh_idx, lh_points.shape[0] + rh_idx]
+
+    points = points[use_idx]
+    faces = np.searchsorted(use_idx, faces)
+
+    mlab.test_quiver3d()
+    mlab.clf()
+    mlab.options.offscreen = True
+    f = mlab.figure(512, bgcolor=(.05, 0, .1), size=(800, 800))
+    mlab.clf()
+    f.scene.disable_render = True
+    surface_mesh = mlab.pipeline.triangular_mesh_source(points[:, 0],
+                                    points[:, 1], points[:, 2], faces,
+                                    scalars=data)
+    smooth_ = tvtk.SmoothPolyDataFilter(number_of_iterations=n_smooth,
+                                        relaxation_factor=0.18,
+                                        feature_angle=70,
+                                        feature_edge_smoothing=False,
+                                        boundary_smoothing=False,
+                                        convergence=0.)
+    surface_mesh_smooth = mlab.pipeline.user_defined(surface_mesh,
+                                                     filter=smooth_)
+    mlab.pipeline.surface(surface_mesh_smooth, colormap=cmap)
+    bar = mlab.scalarbar()
+    if text is not None:
+        mlab.text(0.7, 0.9, text, width=0.2)
+    if not colorbar:
+        bar.visible = False

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-med/python-mne.git



More information about the debian-med-commit mailing list