[med-svn] [python-mne] 347/376: ENH : adding support to assemble inverse operator in Python and removing old version

Yaroslav Halchenko debian at onerussian.com
Fri Nov 27 17:23:19 UTC 2015


This is an automated email from the git hooks/post-receive script.

yoh pushed a commit to annotated tag v0.1
in repository python-mne.

commit 45600a4562ce0681f1e108d2e0601afeb4c2e3d9
Author: Alexandre Gramfort <alexandre.gramfort at inria.fr>
Date:   Tue Sep 13 16:40:03 2011 -0400

    ENH : adding support to assemble inverse operator in Python and removing old version
---
 examples/inverse/plot_make_inverse_operator.py |  53 ++++++
 mne/cov.py                                     | 237 +++++++----------------
 mne/forward.py                                 | 101 +++++-----
 mne/minimum_norm/__init__.py                   |   4 +-
 mne/minimum_norm/inverse.py                    | 253 +++++++++----------------
 mne/minimum_norm/tests/test_inverse.py         |  42 ++--
 6 files changed, 299 insertions(+), 391 deletions(-)

diff --git a/examples/inverse/plot_make_inverse_operator.py b/examples/inverse/plot_make_inverse_operator.py
new file mode 100644
index 0000000..c5bab4d
--- /dev/null
+++ b/examples/inverse/plot_make_inverse_operator.py
@@ -0,0 +1,53 @@
+"""
+===============================================================
+Assemble inverse operator and compute MNE-dSPM inverse solution
+===============================================================
+
+Assemble an EEG inverse operator and compute dSPM inverse solution
+on MNE evoked dataset and stores the solution in stc files for
+visualisation.
+
+"""
+
+# Author: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+print __doc__
+
+import pylab as pl
+import mne
+from mne.datasets import sample
+from mne.fiff import Evoked
+from mne.minimum_norm import make_inverse_operator, apply_inverse
+
+data_path = sample.data_path('..')
+fname_fwd = data_path + '/MEG/sample/sample_audvis-eeg-oct-6-fwd.fif'
+fname_cov = data_path + '/MEG/sample/sample_audvis-cov.fif'
+fname_evoked = data_path + '/MEG/sample/sample_audvis-ave.fif'
+
+setno = 0
+snr = 3.0
+lambda2 = 1.0 / snr ** 2
+dSPM = True
+
+# Load data
+evoked = Evoked(fname_evoked, setno=setno, baseline=(None, 0))
+forward = mne.read_forward_solution(fname_fwd, surf_ori=True)
+noise_cov = mne.Covariance(fname_cov)
+inverse_operator = make_inverse_operator(evoked.info, forward, noise_cov,
+                                         loose=0.2, depth=0.8)
+
+# Compute inverse solution
+stc = apply_inverse(evoked, inverse_operator, lambda2, dSPM, pick_normal=False)
+
+# Save result in stc files
+stc.save('mne_dSPM_inverse')
+
+###############################################################################
+# View activation time-series
+pl.close('all')
+pl.plot(1e3 * stc.times, stc.data[::150, :].T)
+pl.xlabel('time (ms)')
+pl.ylabel('dSPM value')
+pl.show()
diff --git a/mne/cov.py b/mne/cov.py
index 9fd1215..e5f7a48 100644
--- a/mne/cov.py
+++ b/mne/cov.py
@@ -23,31 +23,6 @@ from .fiff.pick import pick_types, channel_indices_by_type
 from .epochs import _is_good
 
 
-def rank(A, tol=1e-8):
-    s = linalg.svd(A, compute_uv=0)
-    return np.sum(np.where(s > s[0] * tol, 1, 0))
-
-
-def _get_whitener(A, rnk, pca, ch_type):
-    # whitening operator
-    D, V = linalg.eigh(A, overwrite_a=True)
-    I = np.argsort(D)[::-1]
-    D = D[I]
-    V = V[:, I]
-    D = 1.0 / D
-    if not pca:  # No PCA case.
-        print 'Not doing PCA for %s.' % ch_type
-        W = np.sqrt(D)[:, None] * V.T
-    else:  # Rey's approach. MNE has been changed to implement this.
-        print 'Setting small %s eigenvalues to zero.' % ch_type
-        D[rnk:] = 0.0
-        W = np.sqrt(D)[:, None] * V.T
-        # This line will reduce the actual number of variables in data
-        # and leadfield to the true rank.
-        W = W[:rnk]
-    return W
-
-
 class Covariance(object):
     """Noise covariance matrix"""
 
@@ -79,129 +54,6 @@ class Covariance(object):
         """save covariance matrix in a FIF file"""
         write_cov_file(fname, self._cov)
 
-    def get_whitener(self, info, mag_reg=0.1, grad_reg=0.1, eeg_reg=0.1,
-                     pca=True):
-        """Compute whitener based on a list of channels
-
-        Parameters
-        ----------
-        info : dict
-            Measurement info of data to apply the whitener.
-            Defines data channels and which are the bad channels
-            to be ignored.
-        mag_reg : float
-            Regularization of the magnetometers.
-            Recommended between 0.05 and 0.2
-        grad_reg : float
-            Regularization of the gradiometers.
-            Recommended between 0.05 and 0.2
-        eeg_reg : float
-            Regularization of the EGG channels.
-            Recommended between 0.05 and 0.2
-        pca : bool
-            If True, whitening is restricted to the space of
-            the data. It makes sense when data have a low rank
-            due to SSP or maxfilter.
-
-        Returns
-        -------
-        W : instance of Whitener
-        """
-
-        if not 0 <= grad_reg <= 1:
-            raise ValueError('grad_reg should be a scalar between 0 and 1')
-        if not 0 <= mag_reg <= 1:
-            raise ValueError('mag_reg should be a scalar between 0 and 1')
-        if not 0 <= eeg_reg <= 1:
-            raise ValueError('eeg_reg should be a scalar between 0 and 1')
-
-        if pca and self.kind == 'diagonal':
-            print "Setting pca to False with a diagonal covariance matrix."
-            pca = False
-
-        bads = info['bads']
-        C_idx = [k for k, name in enumerate(self.ch_names)
-                 if name in info['ch_names'] and name not in bads]
-        ch_names = [self.ch_names[k] for k in C_idx]
-        C_noise = self.data[np.ix_(C_idx, C_idx)]  # take covariance submatrix
-
-        # Create the projection operator
-        proj, ncomp, _ = make_projector(info['projs'], ch_names)
-        if ncomp > 0:
-            print '\tCreated an SSP operator (subspace dimension = %d)' % ncomp
-            C_noise = np.dot(proj, np.dot(C_noise, proj.T))
-
-        # Regularize Noise Covariance Matrix.
-        variances = np.diag(C_noise)
-        ind_meg = pick_types(info, meg=True, eeg=False, exclude=bads)
-        names_meg = [info['ch_names'][k] for k in ind_meg]
-        C_ind_meg = [ch_names.index(name) for name in names_meg]
-
-        ind_grad = pick_types(info, meg='grad', eeg=False, exclude=bads)
-        names_grad = [info['ch_names'][k] for k in ind_grad]
-        C_ind_grad = [ch_names.index(name) for name in names_grad]
-
-        ind_mag = pick_types(info, meg='mag', eeg=False, exclude=bads)
-        names_mag = [info['ch_names'][k] for k in ind_mag]
-        C_ind_mag = [ch_names.index(name) for name in names_mag]
-
-        ind_eeg = pick_types(info, meg=False, eeg=True, exclude=bads)
-        names_eeg = [info['ch_names'][k] for k in ind_eeg]
-        C_ind_eeg = [ch_names.index(name) for name in names_eeg]
-
-        has_meg = len(ind_meg) > 0
-        has_eeg = len(ind_eeg) > 0
-
-        if self.kind == 'diagonal':
-            C_noise = np.diag(variances)
-            rnkC_noise = len(variances)
-            print 'Rank of noise covariance is %d' % rnkC_noise
-        else:
-            # estimate noise covariance matrix rank
-            # Loop on all the required data types (MEG MAG, MEG GRAD, EEG)
-
-            if has_meg:  # Separate rank of MEG
-                rank_meg = rank(C_noise[C_ind_meg][:, C_ind_meg])
-                print 'Rank of MEG part of noise covariance is %d' % rank_meg
-            if has_eeg:  # Separate rank of EEG
-                rank_eeg = rank(C_noise[C_ind_eeg][:, C_ind_eeg])
-                print 'Rank of EEG part of noise covariance is %d' % rank_eeg
-
-            for ind, reg in zip([C_ind_grad, C_ind_mag, C_ind_eeg],
-                                [grad_reg, mag_reg, eeg_reg]):
-                if len(ind) > 0:
-                    # add constant on diagonal
-                    C_noise[ind, ind] += reg * np.mean(variances[ind])
-
-            if has_meg and has_eeg:  # Sets cross terms to zero
-                C_noise[np.ix_(C_ind_meg, C_ind_eeg)] = 0.0
-                C_noise[np.ix_(C_ind_eeg, C_ind_meg)] = 0.0
-
-        # whitening operator
-        if has_meg:
-            W_meg = _get_whitener(C_noise[C_ind_meg][:, C_ind_meg], rank_meg,
-                                  pca, 'MEG')
-
-        if has_eeg:
-            W_eeg = _get_whitener(C_noise[C_ind_eeg][:, C_ind_eeg], rank_eeg,
-                                  pca, 'EEG')
-
-        if has_meg and not has_eeg:  # Only MEG case.
-            W = W_meg
-        elif has_eeg and not has_meg:  # Only EEG case.
-            W = W_eeg
-        elif has_eeg and has_meg:  # Bimodal MEG and EEG case.
-            # Whitening of MEG and EEG separately, which assumes zero
-            # covariance between MEG and EEG (i.e., a block diagonal noise
-            # covariance). This was recommended by Matti as EEG does not
-            # measure all the signals from the same environmental noise sources
-            # as MEG.
-            W = np.r_[np.c_[W_meg, np.zeros((W_meg.shape[0], W_eeg.shape[1]))],
-                      np.c_[np.zeros((W_eeg.shape[0], W_meg.shape[1])), W_eeg]]
-
-        whitener = Whitener(W, names_meg + names_eeg)
-        return whitener
-
     def __repr__(self):
         s = "kind : %s" % self.kind
         s += ", size : %s x %s" % self.data.shape
@@ -209,25 +61,9 @@ class Covariance(object):
         return "Covariance (%s)" % s
 
 
-class Whitener(object):
-    """Whitener
-
-    Attributes
-    ----------
-    W : array
-        Whiten matrix
-    ch_names : list of strings
-        Channel names (columns of W)
-    """
-
-    def __init__(self, W, ch_names):
-        self.W = W
-        self.ch_names = ch_names
-
 ###############################################################################
 # IO
 
-
 def read_cov(fid, node, cov_kind):
     """Read a noise covariance matrix
 
@@ -336,6 +172,7 @@ def read_cov(fid, node, cov_kind):
 
     return None
 
+
 ###############################################################################
 # Estimate from data
 
@@ -554,3 +391,75 @@ def write_cov_file(fname, cov):
         raise '%s', inst
 
     end_file(fid)
+
+
+###############################################################################
+# Prepare for inverse modeling
+
+def rank(A, tol=1e-8):
+    s = linalg.svd(A, compute_uv=0)
+    return np.sum(np.where(s > s[0] * tol, 1, 0))
+
+
+def _get_whitener(A, pca, ch_type):
+    # whitening operator
+    rnk = rank(A)
+    eig, eigvec = linalg.eigh(A, overwrite_a=True)
+    eigvec = eigvec.T
+    eig[:-rnk] = 0.0
+    print 'Setting small %s eigenvalues to zero.' % ch_type
+    if not pca:  # No PCA case.
+        print 'Not doing PCA for %s.' % ch_type
+    else:
+        print 'Doing PCA for %s.' % ch_type
+        # This line will reduce the actual number of variables in data
+        # and leadfield to the true rank.
+        eigvec = eigvec[:-rnk].copy()
+    return eig, eigvec
+
+
+def prepare_noise_cov(noise_cov, info, ch_names):
+    C_ch_idx = [noise_cov.ch_names.index(c) for c in ch_names]
+    C = noise_cov.data[C_ch_idx][:, C_ch_idx]
+
+    # Create the projection operator
+    proj, ncomp, _ = make_projector(info['projs'], ch_names)
+    if ncomp > 0:
+        print '\tCreated an SSP operator (subspace dimension = %d)' % ncomp
+        C = np.dot(proj, np.dot(C, proj.T))
+
+    pick_meg = pick_types(info, meg=True, eeg=False, exclude=info['bads'])
+    pick_eeg = pick_types(info, meg=False, eeg=True, exclude=info['bads'])
+    meg_names = [info['chs'][k]['ch_name'] for k in pick_meg]
+    C_meg_idx = [k for k in range(len(C)) if ch_names[k] in meg_names]
+    eeg_names = [info['chs'][k]['ch_name'] for k in pick_eeg]
+    C_eeg_idx = [k for k in range(len(C)) if ch_names[k] in eeg_names]
+
+    has_meg = len(C_meg_idx) > 0
+    has_eeg = len(C_eeg_idx) > 0
+
+    if has_meg:
+        C_meg = C[C_meg_idx][:, C_meg_idx]
+        C_meg_eig, C_meg_eigvec = _get_whitener(C_meg, False, 'MEG')
+
+    if has_eeg:
+        C_eeg = C[C_eeg_idx][:, C_eeg_idx]
+        C_eeg_eig, C_eeg_eigvec = _get_whitener(C_eeg, False, 'EEG')
+
+    n_chan = len(ch_names)
+    eigvec = np.zeros((n_chan, n_chan), dtype=np.float)
+    eig = np.zeros(n_chan, dtype=np.float)
+
+    if has_meg:
+        eigvec[np.ix_(C_meg_idx, C_meg_idx)] = C_meg_eigvec
+        eig[C_meg_idx] = C_meg_eig
+    if has_eeg:
+        eigvec[np.ix_(C_eeg_idx, C_eeg_idx)] = C_eeg_eigvec
+        eig[C_eeg_idx] = C_eeg_eig
+
+    assert(len(C_meg_idx) + len(C_eeg_idx) == n_chan)
+
+    noise_cov = dict(data=C, eig=eig, eigvec=eigvec, dim=len(ch_names),
+                     diag=False, names=ch_names)
+
+    return noise_cov
diff --git a/mne/forward.py b/mne/forward.py
index 6b51ec5..61d017d 100644
--- a/mne/forward.py
+++ b/mne/forward.py
@@ -341,7 +341,7 @@ def read_forward_solution(fname, force_fixed=False, surf_ori=False,
     fwd['src'] = src
 
     #   Handle the source locations and orientations
-    if fwd['source_ori'] == FIFF.FIFFV_MNE_FIXED_ORI or force_fixed == True:
+    if (fwd['source_ori'] == FIFF.FIFFV_MNE_FIXED_ORI) or force_fixed:
         nuse = 0
         fwd['source_rr'] = np.zeros((fwd['nsource'], 3))
         fwd['source_nn'] = np.zeros((fwd['nsource'], 3))
@@ -366,50 +366,49 @@ def read_forward_solution(fname, force_fixed=False, surf_ori=False,
                 fwd['sol_grad']['ncol'] = 3 * fwd['nsource']
 
             print '[done]'
+    elif surf_ori:
+        #   Rotate the local source coordinate systems
+        print '\tConverting to surface-based source orientations...'
+        nuse = 0
+        pp = 0
+        nuse_total = sum([s['nuse'] for s in src])
+        fwd['source_rr'] = np.zeros((fwd['nsource'], 3))
+        fwd['source_nn'] = np.empty((3 * nuse_total, 3), dtype=np.float)
+        for s in src:
+            rr = s['rr'][s['vertno'], :]
+            fwd['source_rr'][nuse:nuse + s['nuse'], :] = rr
+            for p in range(s['nuse']):
+                #  Project out the surface normal and compute SVD
+                nn = s['nn'][s['vertno'][p], :][:, None]
+                U, S, _ = linalg.svd(np.eye(3, 3) - nn * nn.T)
+                #  Make sure that ez is in the direction of nn
+                if np.sum(nn.ravel() * U[:, 2].ravel()) < 0:
+                    U *= -1.0
+                fwd['source_nn'][pp:pp + 3, :] = U.T
+                pp += 3
+
+            nuse += s['nuse']
+
+        surf_rot = _block_diag(fwd['source_nn'].T, 3)
+        fwd['sol']['data'] = fwd['sol']['data'] * surf_rot
+        if fwd['sol_grad'] is not None:
+            fwd['sol_grad']['data'] = np.dot(fwd['sol_grad']['data'] * \
+                                             np.kron(surf_rot, np.eye(3)))
+
+        print '[done]'
     else:
-        if surf_ori:
-            #   Rotate the local source coordinate systems
-            print '\tConverting to surface-based source orientations...'
-            nuse = 0
-            pp = 0
-            nuse_total = sum([s['nuse'] for s in src])
-            fwd['source_rr'] = np.zeros((fwd['nsource'], 3))
-            fwd['source_nn'] = np.empty((3 * nuse_total, 3), dtype=np.float)
-            for s in src:
-                fwd['source_rr'][nuse:nuse + s['nuse'], :] = \
-                                                    s['rr'][s['vertno'], :]
-                for p in range(s['nuse']):
-                    #  Project out the surface normal and compute SVD
-                    nn = s['nn'][s['vertno'][p], :].T
-                    nn = nn[:, None]
-                    U, S, _ = linalg.svd(np.eye(3, 3) - nn * nn.T)
-                    #  Make sure that ez is in the direction of nn
-                    if np.sum(nn * U[:, 2]) < 0:
-                        U *= -1
-
-                    fwd['source_nn'][pp:pp + 3, :] = U.T
-                    pp += 3
-
-                nuse += s['nuse']
-
-            surf_rot = _block_diag(fwd['source_nn'].T, 3)
-            fwd['sol']['data'] = fwd['sol']['data'] * surf_rot
-            if fwd['sol_grad'] is not None:
-                fwd['sol_grad']['data'] = np.dot(fwd['sol_grad']['data'] * \
-                                                 np.kron(surf_rot, np.eye(3)))
+        print '\tCartesian source orientations...'
+        nuse = 0
+        fwd['source_rr'] = np.zeros((fwd['nsource'], 3))
+        for s in src:
+            rr = s['rr'][s['vertno'], :]
+            fwd['source_rr'][nuse:nuse + s['nuse'], :] = rr
+            nuse += s['nuse']
 
-            print '[done]'
-        else:
-            print '\tCartesian source orientations...'
-            nuse = 0
-            fwd['source_rr'] = np.zeros((fwd['nsource'], 3))
-            for s in src:
-                fwd['source_rr'][nuse:nuse + s['nuse'], :] = \
-                                                s['rr'][s['vertno'], :]
-                nuse += s['nuse']
-
-            fwd['source_nn'] = np.kron(np.ones((fwd['nsource'], 1)), np.eye(3))
-            print '[done]'
+        fwd['source_nn'] = np.kron(np.ones((fwd['nsource'], 1)), np.eye(3))
+        print '[done]'
+
+    fwd['surf_ori'] = surf_ori
 
     # Add channel information
     fwd['chs'] = chs
@@ -417,3 +416,19 @@ def read_forward_solution(fname, force_fixed=False, surf_ori=False,
     fwd = pick_channels_forward(fwd, include=include, exclude=exclude)
 
     return fwd
+
+
+def compute_depth_prior(G, exp=0.8, limit=10.0):
+    """Compute weighting for depth prior
+    """
+    n_pos = G.shape[1] // 3
+    d = np.zeros(n_pos)
+    for k in xrange(n_pos):
+        Gk = G[:, 3 * k:3 * (k + 1)]
+        d[k] = linalg.svdvals(np.dot(Gk.T, Gk))[0]
+    w = 1.0 / d
+    wmax = np.min(w) * (limit ** 2)
+    wp = np.minimum(w, wmax)
+    wpp = (wp / wmax) ** exp
+    depth_prior = np.ravel(wpp[:, None] * np.ones((1, 3)))
+    return depth_prior
diff --git a/mne/minimum_norm/__init__.py b/mne/minimum_norm/__init__.py
index 273ffa8..fe3d453 100644
--- a/mne/minimum_norm/__init__.py
+++ b/mne/minimum_norm/__init__.py
@@ -1,3 +1,3 @@
-from .inverse import read_inverse_operator, apply_inverse, minimum_norm, \
-                     apply_inverse_raw
+from .inverse import read_inverse_operator, apply_inverse, \
+                     apply_inverse_raw, make_inverse_operator
 from .time_frequency import source_induced_power
diff --git a/mne/minimum_norm/inverse.py b/mne/minimum_norm/inverse.py
index a53a39d..6183dcd 100644
--- a/mne/minimum_norm/inverse.py
+++ b/mne/minimum_norm/inverse.py
@@ -1,9 +1,10 @@
 # Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
 #          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
-#          Rey Rene Ramirez, Ph.D. <rrramirez at mcw.edu>
 #
 # License: BSD (3-clause)
 
+import warnings
+from copy import deepcopy
 from math import sqrt
 import numpy as np
 from scipy import linalg
@@ -16,9 +17,9 @@ from ..fiff.proj import read_proj, make_projector
 from ..fiff.tree import dir_tree_find
 from ..fiff.pick import pick_channels_evoked, pick_channels
 
-from ..cov import read_cov
+from ..cov import read_cov, prepare_noise_cov
+from ..forward import compute_depth_prior
 from ..source_space import read_source_spaces_from_tree, find_source_space_hemi
-from ..forward import _block_diag
 from ..transforms import invert_transform, transform_source_space_to
 from ..source_estimate import SourceEstimate
 
@@ -319,7 +320,7 @@ def prepare_inverse_operator(orig, nave, lambda2, dSPM):
     #   Create the projection operator
     #
     inv['proj'], ncomp, _ = make_projector(inv['projs'],
-                                        inv['noise_cov']['names'])
+                                           inv['noise_cov']['names'])
     if ncomp > 0:
         print '\tCreated an SSP operator (subspace dimension = %d)' % ncomp
 
@@ -340,13 +341,14 @@ def prepare_inverse_operator(orig, nave, lambda2, dSPM):
         #
         inv['whitener'] = np.dot(inv['whitener'], inv['noise_cov']['eigvec'])
         print ('\tCreated the whitener using a full noise covariance matrix '
-              '(%d small eigenvalues omitted)' % (inv['noise_cov']['dim']
+               '(%d small eigenvalues omitted)' % (inv['noise_cov']['dim']
                                                   - np.sum(nzero)))
     else:
         #
         #   No need to omit the zeroes due to projection
         #
-        inv['whitener'] = np.diag(1.0 / np.sqrt(inv['noise_cov']['data'].ravel()))
+        inv['whitener'] = np.diag(1.0 /
+                                  np.sqrt(inv['noise_cov']['data'].ravel()))
         print ('\tCreated the whitener using a diagonal noise covariance '
                'matrix (%d small eigenvalues discarded)' % ncomp)
 
@@ -677,203 +679,134 @@ def _xyz2lf(Lf_xyz, normals):
     return Lf_cortex
 
 
-def minimum_norm(evoked, forward, whitener, method='dspm',
-                 orientation='fixed', snr=3, loose=0.2, depth=0.8,
-                 depth_weight_limit=10, pick_normal=False):
-    """Minimum norm estimate (MNE)
+###############################################################################
+# Assemble the inverse operator
 
-    Compute MNE, dSPM and sLORETA on evoked data starting from
-    a forward operator.
+def make_inverse_operator(info, forward, noise_cov, loose=0.2, depth=0.8):
+    """Assemble inverse operator
 
     Parameters
     ----------
-    evoked: Evoked
-        Evoked data to invert
+    info: dict
+        The measurement info to specify the channels to include.
+        Bad channels in info['bads'] are ignored.
     forward: dict
         Forward operator
-    whitener: Whitener
-        Whitening matrix derived from noise covariance matrix
-    method: 'wmne' | 'dspm' | 'sloreta'
-        The method to use
-    orientation: 'fixed' | 'free' | 'loose'
-        Type of orientation constraints 'fixed'.
-    snr: float
-        Signal-to noise ratio defined as in MNE (default: 3).
+    noise_cov: Covariance
+        The noise covariance matrix
     loose: float in [0, 1]
         Value that weights the source variances of the dipole components
         defining the tangent space of the cortical surfaces.
     depth: None | float in [0, 1]
         Depth weighting coefficients. If None, no depth weighting is performed.
-    weight_exp: float
-        Order of the depth weighting. {0=no, 1=full normalization, default=0.8}
-    depth_weight_limit: float
-        Maximal amount depth weighting (default: 10).
-    pick_normal: bool
-        If True, rather than pooling the orientations by taking the norm,
-        only the radial component is kept. This is only implemented
-        when working with loose orientations.
+
+    # XXX : add support for megreg=0.0, eegreg=0.0
 
     Returns
     -------
     stc: dict
         Source time courses
     """
-    assert method in ['wmne', 'dspm', 'sloreta']
-    assert orientation in ['fixed', 'free', 'loose']
-
-    if not 0 <= loose <= 1:
+    is_fixed_ori = (forward['source_ori'] == FIFF.FIFFV_MNE_FIXED_ORI)
+    if is_fixed_ori and loose is not None:
+        warnings.warn('Ignoring loose parameter with forward operator with '
+                      'fixed orientation.')
+    if not forward['surf_ori'] and loose is not None:
+        raise ValueError('Forward operator is not oriented in surface '
+                         'coordinates. loose parameter should be None '
+                         'not %s.' % loose)
+
+    if loose is not None and not (0 <= loose <= 1):
         raise ValueError('loose value should be smaller than 1 and bigger than'
-                         ' 0, or empty for no loose orientations.')
-    if depth is not None and not 0 < depth <= 1:
+                         ' 0, or None for not loose orientations.')
+    if depth is not None and not (0 < depth <= 1):
         raise ValueError('depth should be a scalar between 0 and 1')
 
-    # Set regularization parameter based on SNR
-    lambda2 = 1.0 / snr ** 2
+    fwd_ch_names = [c['ch_name'] for c in forward['chs']]
+    ch_names = [c['ch_name'] for c in info['chs']
+                                    if (c['ch_name'] not in info['bads'])
+                                        and (c['ch_name'] in fwd_ch_names)]
+    n_chan = len(ch_names)
+
+    print "Computing inverse operator with %d channels." % n_chan
 
-    normals = []
-    for s in forward['src']:
-        normals.append(s['nn'][s['inuse'] != 0])
-    normals = np.concatenate(normals)
+    noise_cov = prepare_noise_cov(noise_cov, info, ch_names)
 
-    W, ch_names = whitener.W, whitener.ch_names
+    W = np.zeros((n_chan, n_chan), dtype=np.float)
+    #
+    #   Omit the zeroes due to projection
+    #
+    eig = noise_cov['eig']
+    nzero = (eig > 0)
+    W[nzero, nzero] = 1.0 / np.sqrt(eig[nzero])
+    n_nzero = sum(nzero)
+    #
+    #   Rows of eigvec are the eigenvectors
+    #
+    W = np.dot(W, noise_cov['eigvec'])
 
     gain = forward['sol']['data']
-    fwd_ch_names = [forward['chs'][k]['ch_name'] for k in range(gain.shape[0])]
+
+    n_positions = gain.shape[1] / 3
+
     fwd_idx = [fwd_ch_names.index(name) for name in ch_names]
     gain = gain[fwd_idx]
 
-    print "Computing inverse solution with %d channels." % len(ch_names)
-
-    rank_noise = len(W)
-    print 'Total rank is %d' % rank_noise
+    # Handle depth prior scaling
+    depth_prior = np.ones(gain.shape[1])
+    if depth is not None:
+        depth_prior = compute_depth_prior(gain, exp=depth)
 
-    # processing lead field matrices, weights, areas & orientations
-    # Initializing.
-    n_positions = gain.shape[1] / 3
+    print "Computing inverse operator with %d channels." % len(ch_names)
 
-    if orientation == 'fixed':
+    if is_fixed_ori:
         n_dip_per_pos = 1
-    elif orientation in ['free', 'loose']:
+    else:
         n_dip_per_pos = 3
 
     n_dipoles = n_positions * n_dip_per_pos
 
-    w = np.ones(n_dipoles)
-
-    # compute power
-    if depth is not None:
-        w = np.sum(gain ** 2, axis=0)
-        w = w[0::3] + w[1::3] + w[2::3]
-        if n_dip_per_pos != 1:
-            w = w[:, None] * np.ones((1, n_dip_per_pos))
-        w = w.ravel()
-
-    if orientation == 'fixed':
-        print 'Appying fixed dipole orientations.'
-        gain = gain * _block_diag(normals.ravel()[None, :], 3).T
-    elif orientation == 'free':
-        print 'Using free dipole orientations. No constraints.'
-    elif orientation == 'loose':
-        print 'Transforming lead field matrix to cortical coordinate system.'
-        gain = _xyz2lf(gain, normals)
-        # Getting indices for tangential dipoles: [1, 2, 4, 5]
-        itangential = [k for k in xrange(n_dipoles) if (n_dipoles % 3) != 0]
-
     # Whiten lead field.
     print 'Whitening lead field matrix.'
     gain = np.dot(W, gain)
 
-    # Computing reciprocal of power.
-    w = 1.0 / w
-
-    # apply depth weighthing
-    if depth is not None:
-        # Apply weight limit.
-        print 'Applying weight limit.'
-        depth_weight_limit2 = depth_weight_limit ** 2
-        # limit = min(w(w>min(w) * weight_limit2));  % This is the Matti way.
-        # we do the Rey way (robust to possible weight discontinuity).
-        limit = np.min(w) * depth_weight_limit2
-        w[w > limit] = limit
-
-        # Applying weight exponent.
-        print 'Applying weight exponent (%f).' % depth
-        w = w ** depth
-
     # apply loose orientations
-    if orientation == 'loose':
+    orient_prior = np.ones(n_dipoles, dtype=np.float)
+    if loose is not None:
         print 'Applying loose dipole orientations. Loose value of %s.' % loose
-        w[itangential] *= loose
+        orient_prior[np.mod(np.arange(n_dipoles), 3) != 2] *= loose
+
+    source_cov = orient_prior * depth_prior
 
-    # Adjusting Source Covariance matrix to make trace of L*C_J*L' equal
+    # Adjusting Source Covariance matrix to make trace of G*R*G' equal
     # to number of sensors.
     print 'Adjusting source covariance matrix.'
-    source_std = np.sqrt(w)  # sqrt(C_J)
-    trclcl = linalg.norm(gain * source_std[None, :], ord='fro')
-    source_std *= sqrt(rank_noise) / trclcl  # correct C_J
+    source_std = np.sqrt(source_cov)
     gain *= source_std[None, :]
+    trace_GRGT = linalg.norm(gain, ord='fro') ** 2
+    scaling_source_cov = n_nzero / trace_GRGT
+    source_cov *= scaling_source_cov
+    gain *= sqrt(scaling_source_cov)
 
-    # Compute SVD.
-    print 'Computing SVD of whitened and weighted lead field matrix.'
-    U, s, Vh = linalg.svd(gain, full_matrices=False)
-    ss = s / (s ** 2 + lambda2)
-
-    # Compute whitened MNE operator.
-    Kernel = source_std[:, None] * np.dot(Vh.T, ss[:, None] * U.T)
-
-    # Compute dSPM operator.
-    if method == 'dspm':
-        print 'Computing dSPM inverse operator.'
-        dspm_diag = np.sum(Kernel ** 2, axis=1)
-        if n_dip_per_pos == 1:
-            dspm_diag = np.sqrt(dspm_diag)
-        elif n_dip_per_pos in [2, 3]:
-            dspm_diag = dspm_diag.reshape(-1, n_dip_per_pos)
-            dspm_diag = np.sqrt(np.sum(dspm_diag, axis=1))
-            dspm_diag = (np.ones((1, n_dip_per_pos)) *
-                         dspm_diag[:, None]).ravel()
-
-        Kernel /= dspm_diag[:, None]
-
-    # whitened sLORETA imaging kernel
-    elif method == 'sloreta':
-        print 'Computing sLORETA inverse operator.'
-        if n_dip_per_pos == 1:
-            sloreta_diag = np.sqrt(np.sum(Kernel * gain.T, axis=1))
-            Kernel /= sloreta_diag[:, None]
-        elif n_dip_per_pos in [2, 3]:
-            for k in n_positions:
-                start = k * n_dip_per_pos
-                stop = start + n_dip_per_pos
-                R = np.dot(Kernel[start:stop, :], gain[:, start:stop])
-                SIR = linalg.matfuncs.sqrtm(R, linalg.pinv(R))
-                Kernel[start:stop] = np.dot(SIR, Kernel[start:stop])
-
-    # Multiply inverse operator by whitening matrix, so no need to whiten data
-    Kernel = np.dot(Kernel, W)
-    sel = [evoked.ch_names.index(name) for name in ch_names]
-    sol = np.dot(Kernel, evoked.data[sel])
-
-    if n_dip_per_pos > 1:
-        if pick_normal:
-            print 'Picking only the normal components...',
-            if orientation != 'loose':
-                raise ValueError('The pick_normal parameter is only valid '
-                                 'when working with loose orientations.')
-            sol = sol[2::3]  # take one every 3 sources ie. only the normal
-        else:
-            print 'Combining the current components...',
-            sol = combine_xyz(sol)
-
+    # now np.trace(np.dot(gain, gain.T)) == n_nzero
+    # print np.trace(np.dot(gain, gain.T)), n_nzero
 
-    src = forward['src']
-    stc = SourceEstimate(None)
-    stc.data = sol
-    stc.tmin = float(evoked.first) / evoked.info['sfreq']
-    stc.tstep = 1.0 / evoked.info['sfreq']
-    stc.lh_vertno = src[0]['vertno']
-    stc.rh_vertno = src[1]['vertno']
-    stc._init_times()
-    print '[done]'
-
-    return stc
+    print 'Computing SVD of whitened and weighted lead field matrix.'
+    eigen_fields, sing, eigen_leads = linalg.svd(gain, full_matrices=False)
+
+    eigen_fields = dict(data=eigen_fields.T)
+    eigen_leads = dict(data=eigen_leads.T, nrow=eigen_leads.shape[1])
+    depth_prior = dict(data=depth_prior)
+    orient_prior = dict(data=orient_prior)
+    source_cov = dict(data=source_cov)
+    nave = 1.0
+
+    inv_op = dict(eigen_fields=eigen_fields, eigen_leads=eigen_leads,
+                  sing=sing, nave=nave, depth_prior=depth_prior,
+                  source_cov=source_cov, noise_cov=noise_cov,
+                  orient_prior=orient_prior, projs=deepcopy(info['projs']),
+                  eigen_leads_weighted=False, source_ori=forward['source_ori'],
+                  mri_head_t=deepcopy(forward['mri_head_t']),
+                  src=deepcopy(forward['src']))
+
+    return inv_op
diff --git a/mne/minimum_norm/tests/test_inverse.py b/mne/minimum_norm/tests/test_inverse.py
index 7ad3ef2..bec08b9 100644
--- a/mne/minimum_norm/tests/test_inverse.py
+++ b/mne/minimum_norm/tests/test_inverse.py
@@ -1,28 +1,30 @@
 import os.path as op
 
 import numpy as np
-# from numpy.testing import assert_array_almost_equal, assert_equal
+from numpy.testing import assert_array_almost_equal, assert_equal
 
 from ...datasets import sample
 from ... import fiff, Covariance, read_forward_solution
-from ..inverse import minimum_norm, apply_inverse, read_inverse_operator
+from ..inverse import apply_inverse, read_inverse_operator, \
+                      make_inverse_operator
 
 
 examples_folder = op.join(op.dirname(__file__), '..', '..', '..', 'examples')
 data_path = sample.data_path(examples_folder)
 fname_inv = op.join(data_path, 'MEG', 'sample',
-                                        'sample_audvis-meg-oct-6-meg-inv.fif')
+                            # 'sample_audvis-meg-eeg-oct-6-meg-eeg-inv.fif')
+                            'sample_audvis-meg-oct-6-meg-inv.fif')
 fname_data = op.join(data_path, 'MEG', 'sample',
-                                        'sample_audvis-ave.fif')
+                            'sample_audvis-ave.fif')
 fname_cov = op.join(data_path, 'MEG', 'sample',
-                                        'sample_audvis-cov.fif')
+                            'sample_audvis-cov.fif')
 fname_fwd = op.join(data_path, 'MEG', 'sample',
-                                        'sample_audvis-meg-eeg-oct-6-fwd.fif')
+                            'sample_audvis-meg-oct-6-fwd.fif')
+                            # 'sample_audvis-meg-eeg-oct-6-fwd.fif')
 
 
-def test_apply_mne_inverse_operator():
-    """Test MNE inverse computation with precomputed inverse operator
-    """
+def test_inverse_operator():
+    """Test MNE inverse computation with precomputed inverse operator."""
     setno = 0
     snr = 3.0
     lambda2 = 1.0 / snr ** 2
@@ -36,18 +38,14 @@ def test_apply_mne_inverse_operator():
     assert np.all(stc.data > 0)
     assert np.all(stc.data < 35)
 
-
-def test_compute_minimum_norm():
-    """Test MNE inverse computation starting from forward operator
-    """
-    setno = 0
+    # Test MNE inverse computation starting from forward operator
     noise_cov = Covariance(fname_cov)
-    forward = read_forward_solution(fname_fwd)
-    evoked = fiff.Evoked(fname_data, setno=setno, baseline=(None, 0))
-    whitener = noise_cov.get_whitener(evoked.info, mag_reg=0.1,
-                                      grad_reg=0.1, eeg_reg=0.1, pca=True)
-    stc = minimum_norm(evoked, forward, whitener,
-                       orientation='loose', method='dspm', snr=3, loose=0.2)
+    evoked = fiff.Evoked(fname_data, setno=0, baseline=(None, 0))
+    fwd_op = read_forward_solution(fname_fwd, surf_ori=True)
+    my_inv_op = make_inverse_operator(evoked.info, fwd_op, noise_cov,
+                                      loose=0.2, depth=0.8)
 
-    assert np.all(stc.data > 0)
-    # XXX : test something
+    my_stc = apply_inverse(evoked, my_inv_op, lambda2, dSPM)
+
+    assert_equal(stc.times, my_stc.times)
+    assert_array_almost_equal(stc.data, my_stc.data, 2)

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-med/python-mne.git



More information about the debian-med-commit mailing list