diff --git a/doc/changes/latest.inc b/doc/changes/latest.inc index 3a30c27f4be..b86676f677f 100644 --- a/doc/changes/latest.inc +++ b/doc/changes/latest.inc @@ -17,6 +17,8 @@ Changelog - Improved :func:`mne.viz.plot_epochs` to label epoch counts starting from 0 `Sophie Herbst`_ +- Add :func:`minimum_norm.apply_inverse_cov` to compute static power by applying inverse solutions to a data covariance matrix by `Denis Engemann`_,`Luke Bloy`_, and `Eric Larson`_ + - Add :func:`mne.minimum_norm.resolution_metrics` to compute various resolution metrics for inverse solutions, by `Olaf Hauk`_ - Add current source density :func:`mne.preprocessing.compute_current_source_density` to compute the surface Laplacian in order to reduce volume conduction in data by `Alex Rockhill`_ diff --git a/doc/glossary.rst b/doc/glossary.rst index 94439c7b267..ce8c2944d0f 100644 --- a/doc/glossary.rst +++ b/doc/glossary.rst @@ -45,6 +45,11 @@ general neuroimaging concepts. If you think a term is missing, please consider a type, such as gradiometer, and a unit, such as Tesla/Meter that is used in the code base, e.g. for plotting. + DICS + Dynamic Imaging of Coherent Sources, a method for computing source + power in different frequency bands. see :ref:`ex-inverse-source-power` + and :func:`mne.beamformer.make_dics`. + digitization Digitization is a procedure of recording the headshape of a subject and the fiducial coils (or :term:`HPI`) and/or eeg electrodes locations on @@ -171,6 +176,11 @@ general neuroimaging concepts. If you think a term is missing, please consider diagrams of approximate sensor positions in top-down diagrams of the head, so-called topographies or topomaps). + LCMV beamformer + Linearly constrained minimum variance beamformer, which attempts to + estimate activity for a given source while suppressing cross-talk from + other regions, see :func:`mne.beamformer.make_lcmv`. + minimum-norm estimation Minimum-norm estimation (abbr. ``MNE``) can be used to generate a distributed map of activation on a :term:`source space`, usually on a cortical surface. diff --git a/doc/overview/roadmap.rst b/doc/overview/roadmap.rst index 0de6ee20cce..1c9aa2bc804 100644 --- a/doc/overview/roadmap.rst +++ b/doc/overview/roadmap.rst @@ -137,6 +137,8 @@ several (mostly) separable steps: 1. Refactor code to use traitlets 2. GUI elements to use PyQt5 (rather than TraitsUI/pyface) 3. 3D plotting to use our abstracted 3D viz functions rather than Mayavi +4. Refactor distance/fitting classes to public ones to enable the example + from :gh:`6693`. Once this is done, we can effectively switch to a PyVista backend. @@ -160,6 +162,7 @@ as well as: - `OpenNEURO `__ "A free and open platform for sharing MRI, MEG, EEG, iEEG, and ECoG data." + See for example :gh:`6687`. - `Human Connectome Project Datasets `__ Over a 3-year span (2012-2015), the Human Connectome Project (HCP) scanned 1,200 healthy adult subjects. The available data includes MR structural diff --git a/doc/python_reference.rst b/doc/python_reference.rst index f74b6f22cdd..a1458a2263f 100644 --- a/doc/python_reference.rst +++ b/doc/python_reference.rst @@ -651,6 +651,7 @@ Inverse Solutions InverseOperator apply_inverse + apply_inverse_cov apply_inverse_epochs apply_inverse_raw compute_source_psd diff --git a/examples/inverse/plot_evoked_ers_source_power.py b/examples/inverse/plot_evoked_ers_source_power.py new file mode 100644 index 00000000000..e8b5c6409e2 --- /dev/null +++ b/examples/inverse/plot_evoked_ers_source_power.py @@ -0,0 +1,130 @@ +""" +==================================================================== +Compute evoked ERS source power using DICS, LCMV beamfomer, and dSPM +==================================================================== + +Here we examine 3 ways of localizing event-related synchronization (ERS) of +beta band activity in this dataset: :ref:`somato-dataset` using +:term:`DICS`, :term:`LCMV beamformer`, and :term:`dSPM` applied to active and +baseline covariance matrices. +""" +# Authors: Luke Bloy +# Eric Larson +# +# License: BSD (3-clause) +import os.path as op + +import numpy as np +import mne +from mne.cov import compute_covariance +from mne.datasets import somato +from mne.time_frequency import csd_morlet +from mne.beamformer import (make_dics, apply_dics_csd, make_lcmv, + apply_lcmv_cov) +from mne.minimum_norm import (make_inverse_operator, apply_inverse_cov) + +print(__doc__) + +############################################################################### +# Reading the raw data and creating epochs: +data_path = somato.data_path() +subject = '01' +task = 'somato' +raw_fname = op.join(data_path, 'sub-{}'.format(subject), 'meg', + 'sub-{}_task-{}_meg.fif'.format(subject, task)) + +raw = mne.io.read_raw_fif(raw_fname) + +# We are interested in the beta band (12-30 Hz) +raw.load_data().filter(12, 30) + +# The DICS beamformer currently only supports a single sensor type. +# We'll use the gradiometers in this example. +picks = mne.pick_types(raw.info, meg='grad', exclude='bads') + +# Read epochs +events = mne.find_events(raw) +epochs = mne.Epochs(raw, events, event_id=1, tmin=-1.5, tmax=2, picks=picks, + preload=True) + +# Read forward operator and point to freesurfer subject directory +fname_fwd = op.join(data_path, 'derivatives', 'sub-{}'.format(subject), + 'sub-{}_task-{}-fwd.fif'.format(subject, task)) +subjects_dir = op.join(data_path, 'derivatives', 'freesurfer', 'subjects') + +fwd = mne.read_forward_solution(fname_fwd) + +############################################################################### +# Compute covariances +# ------------------- +# ERS activity starts at 0.5 seconds after stimulus onset. + +active_win = (0.5, 1.5) +baseline_win = (-1, 0) + +baseline_cov = compute_covariance(epochs, tmin=baseline_win[0], + tmax=baseline_win[1], method='shrunk', + rank=None) +active_cov = compute_covariance(epochs, tmin=active_win[0], tmax=active_win[1], + method='shrunk', rank=None) + +# Weighted averaging is already in the addition of covariance objects. +common_cov = baseline_cov + active_cov + + +############################################################################### +# Compute some source estimates +# ----------------------------- +# Here we will use DICS, LCMV beamformer, and dSPM. +# +# See :ref:`ex-inverse-source-power` for more information about DICS. + +def _gen_dics(active_win, baseline_win, epochs): + freqs = np.logspace(np.log10(12), np.log10(30), 9) + csd = csd_morlet(epochs, freqs, tmin=-1, tmax=1.5, decim=20) + csd_baseline = csd_morlet(epochs, freqs, tmin=baseline_win[0], + tmax=baseline_win[1], decim=20) + csd_ers = csd_morlet(epochs, freqs, tmin=active_win[0], tmax=active_win[1], + decim=20) + filters = make_dics(epochs.info, fwd, csd.mean(), pick_ori='max-power') + stc_base, freqs = apply_dics_csd(csd_baseline.mean(), filters) + stc_act, freqs = apply_dics_csd(csd_ers.mean(), filters) + stc_act /= stc_base + return stc_act + + +# generate lcmv source estimate +def _gen_lcmv(active_cov, baseline_cov, common_cov): + filters = make_lcmv(epochs.info, fwd, common_cov, reg=0.05, + noise_cov=None, pick_ori='max-power') + stc_base = apply_lcmv_cov(baseline_cov, filters) + stc_act = apply_lcmv_cov(active_cov, filters) + stc_act /= stc_base + return stc_act + + +# generate mne/dSPM source estimate +def _gen_mne(active_cov, baseline_cov, common_cov, fwd, info, method='dSPM'): + inverse_operator = make_inverse_operator(info, fwd, common_cov) + stc_act = apply_inverse_cov(active_cov, info, inverse_operator, + method=method, verbose=True) + stc_base = apply_inverse_cov(baseline_cov, info, inverse_operator, + method=method, verbose=True) + stc_act /= stc_base + return stc_act + + +# Compute source estimates +stc_dics = _gen_dics(active_win, baseline_win, epochs) +stc_lcmv = _gen_lcmv(active_cov, baseline_cov, common_cov) +stc_dspm = _gen_mne(active_cov, baseline_cov, common_cov, fwd, epochs.info) + +############################################################################### +# Plot source estimates +# --------------------- + +for method, stc in zip(['DICS', 'LCMV', 'dSPM'], + [stc_dics, stc_lcmv, stc_dspm]): + title = '%s source power in the 12-30 Hz frequency band' % method + brain = stc.plot(hemi='rh', subjects_dir=subjects_dir, + subject=subject, time_label=title) diff --git a/examples/inverse/plot_mne_cov_power.py b/examples/inverse/plot_mne_cov_power.py new file mode 100644 index 00000000000..9b220396452 --- /dev/null +++ b/examples/inverse/plot_mne_cov_power.py @@ -0,0 +1,139 @@ +""" +=================================================================== +Compute source power estimate by projecting the covariance with MNE +=================================================================== + +We can apply the MNE inverse operator to a covariance matrix to obtain +an estimate of source power. This is computationally more efficient than first +estimating the source timecourses and then computing their power. +""" +# Author: Denis A. Engemann +# Luke Bloy +# +# License: BSD (3-clause) + +import os.path as op +import numpy as np + +import mne +from mne.datasets import sample +from mne.minimum_norm import make_inverse_operator, apply_inverse_cov + +data_path = sample.data_path() +subjects_dir = data_path + '/subjects' +raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif' +raw = mne.io.read_raw_fif(raw_fname) + +############################################################################### +# Compute empty-room covariance +# ----------------------------- +# First we compute an empty-room covariance, which captures noise from the +# sensors and environment. + +raw_empty_room_fname = op.join( + data_path, 'MEG', 'sample', 'ernoise_raw.fif') +raw_empty_room = mne.io.read_raw_fif(raw_empty_room_fname) +raw_empty_room.crop(0, 60) +raw_empty_room.info['bads'] = ['MEG 2443'] +raw_empty_room.info['projs'] = raw.info['projs'] +noise_cov = mne.compute_raw_covariance( + raw_empty_room, method=['empirical', 'shrunk']) +del raw_empty_room + +############################################################################### +# Epoch the data +# -------------- + +raw.info['bads'] = ['MEG 2443', 'EEG 053'] +raw.load_data().filter(4, 12) +events = mne.find_events(raw, stim_channel='STI 014') +event_id = dict(aud_l=1, aud_r=2, vis_l=3, vis_r=4) +tmin, tmax = -0.2, 0.5 +baseline = (None, 0) # means from the first instant to t = 0 +reject = dict(grad=4000e-13, mag=4e-12, eog=150e-6) +epochs = mne.Epochs(raw.copy().filter(4, 12), events, event_id, tmin, tmax, + proj=True, picks=('meg', 'eog'), baseline=None, + reject=reject, preload=True) +del raw + +############################################################################### +# Compute and plot covariances +# ---------------------------- +# In addition to the empty-room covariance above, we compute two additional +# covariances: +# +# 1. Baseline covariance, which captures signals not of interest in our +# analysis (e.g., sensor noise, environmental noise, physiological +# artifacts, and also resting-state-like brain activity / "noise"). +# 2. Data covariance, which captures our activation of interest (in addition +# to noise sources). + +base_cov = mne.compute_covariance( + epochs, tmin=-0.2, tmax=0, method=['shrunk', 'empirical'], rank=None, + verbose=True) +data_cov = mne.compute_covariance( + epochs, tmin=0., tmax=0.2, method=['shrunk', 'empirical'], rank=None, + verbose=True) + +fig_noise_cov = mne.viz.plot_cov(noise_cov, epochs.info, show_svd=False) +fig_base_cov = mne.viz.plot_cov(base_cov, epochs.info, show_svd=False) +fig_data_cov = mne.viz.plot_cov(data_cov, epochs.info, show_svd=False) + +############################################################################### +# We can also look at the covariances using topomaps, here we just show the +# baseline and data covariances, followed by the data covariance whitened +# by the baseline covariance: + +evoked = epochs.average().pick('meg') +evoked.drop_channels(evoked.info['bads']) +evoked.plot(time_unit='s') +evoked.plot_topomap(times=np.linspace(0.05, 0.15, 5), ch_type='mag', + time_unit='s') + +evoked_noise_cov = mne.EvokedArray(data=np.diag(noise_cov['data'])[:, None], + info=evoked.info) +evoked_data_cov = mne.EvokedArray(data=np.diag(data_cov['data'])[:, None], + info=evoked.info) +evoked_data_cov_white = mne.whiten_evoked(evoked_data_cov, noise_cov) + + +def plot_cov_diag_topomap(evoked, ch_type='grad'): + evoked.plot_topomap( + ch_type=ch_type, times=[0], + vmin=np.min, vmax=np.max, cmap='viridis', + units=dict(mag='None', grad='None'), + scalings=dict(mag=1, grad=1), + cbar_fmt=None) + + +plot_cov_diag_topomap(evoked_noise_cov, 'grad') +plot_cov_diag_topomap(evoked_data_cov, 'grad') +plot_cov_diag_topomap(evoked_data_cov_white, 'grad') + +############################################################################### +# Apply inverse operator to covariance +# ------------------------------------ +# Finally, we can construct an inverse using the empty-room noise covariance: + +# Read the forward solution and compute the inverse operator +fname_fwd = data_path + '/MEG/sample/sample_audvis-meg-oct-6-fwd.fif' +fwd = mne.read_forward_solution(fname_fwd) + +# make an MEG inverse operator +info = evoked.info +inverse_operator = make_inverse_operator(info, fwd, noise_cov, + loose=0.2, depth=0.8) + +############################################################################### +# Project our data and baseline covariance to source space: + +stc_data = apply_inverse_cov(data_cov, evoked.info, inverse_operator, + nave=len(epochs), method='dSPM', verbose=True) +stc_base = apply_inverse_cov(base_cov, evoked.info, inverse_operator, + nave=len(epochs), method='dSPM', verbose=True) + +############################################################################### +# And visualize power is relative to the baseline: +stc_data /= stc_base +brain = stc_data.plot(subject='sample', subjects_dir=subjects_dir, + clim=dict(kind='percent', lims=(50, 90, 98))) diff --git a/mne/inverse_sparse/_gamma_map.py b/mne/inverse_sparse/_gamma_map.py index 24c395a01fe..2457166b8c6 100644 --- a/mne/inverse_sparse/_gamma_map.py +++ b/mne/inverse_sparse/_gamma_map.py @@ -217,7 +217,7 @@ def gamma_map(evoked, forward, noise_cov, alpha, loose="auto", depth=0.8, %(rank_None)s .. versionadded:: 0.18 - %(pick_ori-vec)s + %(pick_ori)s %(verbose)s Returns diff --git a/mne/inverse_sparse/mxne_inverse.py b/mne/inverse_sparse/mxne_inverse.py index c3caa92d4ad..52c989a36a0 100644 --- a/mne/inverse_sparse/mxne_inverse.py +++ b/mne/inverse_sparse/mxne_inverse.py @@ -319,7 +319,7 @@ def mixed_norm(evoked, forward, noise_cov, alpha, loose='auto', depth=0.8, %(rank_None)s .. versionadded:: 0.18 - %(pick_ori-vec)s + %(pick_ori)s %(verbose)s Returns @@ -557,7 +557,7 @@ def tf_mixed_norm(evoked, forward, noise_cov, %(rank_None)s .. versionadded:: 0.18 - %(pick_ori-vec)s + %(pick_ori)s n_tfmxne_iter : int Number of TF-MxNE iterations. If > 1, iterative reweighting is applied. %(verbose)s diff --git a/mne/minimum_norm/__init__.py b/mne/minimum_norm/__init__.py index 4e5efde7c1e..0fb0e33390f 100644 --- a/mne/minimum_norm/__init__.py +++ b/mne/minimum_norm/__init__.py @@ -4,7 +4,7 @@ apply_inverse_raw, make_inverse_operator, apply_inverse_epochs, write_inverse_operator, compute_rank_inverse, prepare_inverse_operator, - estimate_snr) + estimate_snr, apply_inverse_cov) from .psf_ctf import point_spread_function, cross_talk_function from .time_frequency import (source_band_induced_power, source_induced_power, compute_source_psd, compute_source_psd_epochs) diff --git a/mne/minimum_norm/inverse.py b/mne/minimum_norm/inverse.py index 734da8daacb..46882a2ff5a 100644 --- a/mne/minimum_norm/inverse.py +++ b/mne/minimum_norm/inverse.py @@ -24,9 +24,10 @@ start_block, end_block, end_file, write_float, write_coord_trans, write_string) -from ..io.pick import channel_type, pick_info, pick_types +from ..io.pick import channel_type, pick_info, pick_types, pick_channels from ..cov import (compute_whitener, _read_cov, _write_cov, Covariance, prepare_noise_cov) +from ..evoked import EvokedArray from ..forward import (compute_depth_prior, _read_forward_meas_info, is_fixed_orient, compute_orient_prior, convert_forward_solution, _select_orient_forward) @@ -748,7 +749,7 @@ def _assemble_kernel(inv, label, method, pick_ori, use_cps=True, verbose=None): return K, noise_norm, vertno, source_nn -def _check_ori(pick_ori, source_ori, src): +def _check_ori(pick_ori, source_ori, src, allow_vector=True): """Check pick_ori.""" _check_option('pick_ori', pick_ori, [None, 'normal', 'vector']) if pick_ori == 'vector' and source_ori != FIFF.FIFFV_MNE_FREE_ORI: @@ -797,16 +798,7 @@ def apply_inverse(evoked, inverse_operator, lambda2=1. / 9., method="dSPM", dSPM (default) :footcite:`DaleEtAl2000`, sLORETA :footcite:`Pascual-Marqui2002`, or eLORETA :footcite:`Pascual-Marqui2011`. - pick_ori : None | "normal" | "vector" - By default (None) pooling is performed by taking the norm of loose/free - orientations. In case of a fixed source space no norm is computed - leading to signed source activity. - If "normal" only the radial component is kept. This is only implemented - when working with loose orientations. - If "vector", no pooling of the orientations is done and the vector - result will be returned in the form of a - :class:`mne.VectorSourceEstimate` object. This is only implemented when - working with loose orientations. + %(pick_ori)s prepared : bool If True, do not call :func:`prepare_inverse_operator`. label : Label | None @@ -881,6 +873,15 @@ def apply_inverse(evoked, inverse_operator, lambda2=1. / 9., method="dSPM", ---------- .. footbibliography:: """ + out = _apply_inverse( + evoked, inverse_operator, lambda2, method, pick_ori, prepared, label, + method_params, return_residual, use_cps) + logger.info('[done]') + return out + + +def _apply_inverse(evoked, inverse_operator, lambda2, method, pick_ori, + prepared, label, method_params, return_residual, use_cps): _check_reference(evoked, inverse_operator['info']['ch_names']) _check_option('method', method, INVERSE_METHODS) _check_ori(pick_ori, inverse_operator['source_ori'], @@ -942,7 +943,6 @@ def apply_inverse(evoked, inverse_operator, lambda2=1. / 9., method="dSPM", stc = _make_stc(sol, vertno, tmin=tmin, tstep=tstep, subject=subject, vector=(pick_ori == 'vector'), source_nn=source_nn, src_type=src_type) - logger.info('[done]') return (stc, residual) if return_residual else stc @@ -977,14 +977,7 @@ def apply_inverse_raw(raw, inverse_operator, lambda2, method="dSPM", Set to 1 on raw data. time_func : callable Linear function applied to sensor space time series. - pick_ori : None | "normal" | "vector" - If "normal", rather than pooling the orientations by taking the norm, - only the radial component is kept. This is only implemented - when working with loose orientations. - If "vector", no pooling of the orientations is done and the vector - result will be returned in the form of a - :class:`mne.VectorSourceEstimate` object. This does not work when using - an inverse operator with fixed orientations. + %(pick_ori)s buffer_size : int (or None) If not None, the computation of the inverse and the combination of the current components is performed in segments of length buffer_size @@ -1070,7 +1063,6 @@ def apply_inverse_raw(raw, inverse_operator, lambda2, method="dSPM", if is_free_ori and pick_ori != 'vector': logger.info(' combining the current components...') sol = combine_xyz(sol) - if noise_norm is not None: if pick_ori == 'vector' and is_free_ori: noise_norm = noise_norm.repeat(3, axis=0) @@ -1183,14 +1175,7 @@ def apply_inverse_epochs(epochs, inverse_operator, lambda2, method="dSPM", nave : int Number of averages used to regularize the solution. Set to 1 on single Epoch by default. - pick_ori : None | "normal" | "vector" - If "normal", rather than pooling the orientations by taking the norm, - only the radial component is kept. This is only implemented - when working with loose orientations. - If "vector", no pooling of the orientations is done and the vector - result will be returned in the form of a - :class:`mne.VectorSourceEstimate` object. This does not work when using - an inverse operator with fixed orientations. + %(pick_ori)s return_generator : bool Return a generator object instead of a list. This allows iterating over the stcs without having to keep them all in memory. @@ -1227,51 +1212,90 @@ def apply_inverse_epochs(epochs, inverse_operator, lambda2, method="dSPM", return stcs -# XXX what is this??? -''' -def _xyz2lf(Lf_xyz, normals): - """Reorient leadfield to one component matching the normal to the cortex - - This program takes a leadfield matrix computed for dipole components - pointing in the x, y, and z directions, and outputs a new lead field - matrix for dipole components pointing in the normal direction of the - cortical surfaces and in the two tangential directions to the cortex - (that is on the tangent cortical space). These two tangential dipole - components are uniquely determined by the SVD (reduction of variance). +@verbose +def apply_inverse_cov(cov, info, inverse_operator, nave=1, lambda2=1 / 9, + method="dSPM", pick_ori=None, prepared=False, + label=None, method_params=None, use_cps=True, + verbose=None): + """Apply inverse operator to covariance data. Parameters ---------- - Lf_xyz: array of shape [n_sensors, n_positions x 3] - Leadfield - normals : array of shape [n_positions, 3] - Normals to the cortex + cov : instance of Covariance + Covariance data, computed on the time segment for which to compute + source power. + info : dict + The measurement info to specify the channels to include. + inverse_operator : instance of InverseOperator + Inverse operator. + nave : int + Number of averages used to regularize the solution. + lambda2 : float + The regularization parameter. + method : "MNE" | "dSPM" | "sLORETA" | "eLORETA" + Use minimum norm, dSPM (default), sLORETA, or eLORETA. + %(pick_ori-novec)s + prepared : bool + If True, do not call :func:`prepare_inverse_operator`. + label : Label | None + Restricts the source estimates to a given label. If None, + source estimates will be computed for the entire source space. + method_params : dict | None + Additional options for eLORETA. See Notes for details. + %(use_cps)s + %(verbose)s Returns ------- - Lf_cortex : array of shape [n_sensors, n_positions x 3] - Lf_cortex is a leadfield matrix for dipoles in rotated orientations, so - that the first column is the gain vector for the cortical normal dipole - and the following two column vectors are the gain vectors for the - tangential orientations (tangent space of cortical surface). + stc : SourceEstimate | VectorSourceEstimate | VolSourceEstimate + The source estimates. + + See Also + -------- + apply_inverse : Apply inverse operator to evoked object. + apply_inverse_raw : Apply inverse operator to raw object. + apply_inverse_epochs : Apply inverse operator to epochs object. + + Notes + ----- + .. versionadded:: 0.20 """ - n_sensors, n_dipoles = Lf_xyz.shape - n_positions = n_dipoles // 3 - Lf_xyz = Lf_xyz.reshape(n_sensors, n_positions, 3) - n_sensors, n_positions, _ = Lf_xyz.shape - Lf_cortex = np.zeros_like(Lf_xyz) - - for k in range(n_positions): - lf_normal = np.dot(Lf_xyz[:, k, :], normals[k]) - lf_normal_n = lf_normal[:, None] / linalg.norm(lf_normal) - P = np.eye(n_sensors, n_sensors) - np.dot(lf_normal_n, lf_normal_n.T) - lf_p = np.dot(P, Lf_xyz[:, k, :]) - U, s, Vh = linalg.svd(lf_p) - Lf_cortex[:, k, 0] = lf_normal - Lf_cortex[:, k, 1:] = np.c_[U[:, 0] * s[0], U[:, 1] * s[1]] - - Lf_cortex = Lf_cortex.reshape(n_sensors, n_dipoles) - return Lf_cortex -''' + _validate_type(cov, Covariance, cov) + _validate_type(inverse_operator, InverseOperator, 'inverse_operator') + sel = _pick_channels_inverse_operator(cov['names'], inverse_operator) + use_names = [cov['names'][idx] for idx in sel] + info = pick_info( + info, pick_channels(info['ch_names'], use_names, ordered=True)) + evoked = EvokedArray( + np.eye(len(info['ch_names'])), info, nave=nave, comment='cov') + is_free_ori = (inverse_operator['source_ori'] == FIFF.FIFFV_MNE_FREE_ORI) + _check_option('pick_ori', pick_ori, (None, 'normal')) + if is_free_ori and pick_ori is None: + use_ori = 'vector' + combine = True + else: + use_ori = pick_ori + combine = False + stc = _apply_inverse( + evoked, inverse_operator, lambda2, method, use_ori, prepared, label, + method_params, return_residual=False, use_cps=use_cps) + # apply (potentially rotated in the vector case) operator twice + K = np.reshape(stc.data, (-1, stc.data.shape[-1])) + # diagonal entries of A @ B are given by (A * B.T).sum(axis=1), so this is + # equivalent to np.diag(K @ cov.data[sel][:, sel] @ K.T)[:, np.newaxis]: + sol = cov.data[sel][:, sel] @ K.T + sol = np.sum(K * sol.T, axis=1, keepdims=True) + # Reshape back to (n_src, ..., 1) + sol.shape = stc.data.shape[:-1] + (1,) + stc = stc.__class__( + sol, stc.vertices, stc.tmin, stc.tstep, stc.subject, stc.verbose) + if combine: # combine the three directions + logger.info(' Combining the current components...') + np.sqrt(stc.data, out=stc.data) + stc = stc.magnitude() + stc.data *= stc.data + logger.info('[done]') + return stc ############################################################################### diff --git a/mne/minimum_norm/tests/test_inverse.py b/mne/minimum_norm/tests/test_inverse.py index 261f17f7ef4..458b5464f7d 100644 --- a/mne/minimum_norm/tests/test_inverse.py +++ b/mne/minimum_norm/tests/test_inverse.py @@ -24,14 +24,14 @@ pick_types_forward, make_forward_solution, EvokedArray, convert_forward_solution, Covariance, combine_evoked, SourceEstimate, make_sphere_model, make_ad_hoc_cov, - pick_channels_forward) + pick_channels_forward, compute_raw_covariance) from mne.io import read_raw_fif from mne.io.proj import make_projector from mne.minimum_norm.inverse import (apply_inverse, read_inverse_operator, apply_inverse_raw, apply_inverse_epochs, make_inverse_operator, write_inverse_operator, - compute_rank_inverse, + compute_rank_inverse, apply_inverse_cov, prepare_inverse_operator, INVERSE_METHODS) from mne.utils import _TempDir, run_tests_if_main, catch_logging @@ -797,6 +797,57 @@ def test_io_inverse_operator(): _compare(inv_prep, inv_prep_prep) +# eLORETA is slow and we can trust that it will work because we just route +# through apply_inverse +_fast_methods = list(INVERSE_METHODS) +_fast_methods.pop(_fast_methods.index('eLORETA')) + + +@testing.requires_testing_data +@pytest.mark.parametrize('method', _fast_methods) +@pytest.mark.parametrize('pick_ori', ['normal', None]) +def test_apply_inverse_cov(method, pick_ori): + """Test MNE with precomputed inverse operator on cov.""" + raw = read_raw_fif(fname_raw, preload=True) + # use 10 sec of data + raw.crop(0, 10) + + raw.filter(1, None) + label_lh = read_label(fname_label % 'Aud-lh') + + # test with a free ori inverse + inverse_operator = read_inverse_operator(fname_inv) + + data_cov = compute_raw_covariance(raw, tstep=None) + + with pytest.raises(ValueError, match='has not been prepared'): + apply_inverse_cov(data_cov, raw.info, inverse_operator, + lambda2=lambda2, prepared=True) + + this_inv_op = prepare_inverse_operator(inverse_operator, nave=1, + lambda2=lambda2, method=method) + + raw_ori = 'normal' if pick_ori == 'normal' else 'vector' + stc_raw = apply_inverse_raw( + raw, this_inv_op, lambda2, method, label=label_lh, nave=1, + pick_ori=raw_ori, prepared=True) + stc_cov = apply_inverse_cov( + data_cov, raw.info, this_inv_op, method=method, pick_ori=pick_ori, + label=label_lh, prepared=True, lambda2=lambda2) + n_sources = np.prod(stc_cov.data.shape[:-1]) + raw_data = stc_raw.data.reshape(n_sources, -1) + exp_res = np.diag(np.cov(raw_data, ddof=1)).copy() + exp_res *= 1 if raw_ori == pick_ori else 3. + # There seems to be some precision penalty when combining orientations, + # but it's probably acceptable + rtol = 5e-4 if pick_ori is None else 1e-12 + assert_allclose(exp_res, stc_cov.data.ravel(), rtol=rtol) + + with pytest.raises(ValueError, match='Invalid value'): + apply_inverse_cov( + data_cov, raw.info, this_inv_op, method=method, pick_ori='vector') + + @testing.requires_testing_data def test_apply_mne_inverse_raw(): """Test MNE with precomputed inverse operator on Raw.""" diff --git a/mne/utils/docs.py b/mne/utils/docs.py index 3a33977fe42..d36e6a21c26 100644 --- a/mne/utils/docs.py +++ b/mne/utils/docs.py @@ -318,13 +318,27 @@ .. versionchanged:: 0.20 Depth bias ignored for ``method='eLORETA'``. """ -docdict['pick_ori-vec'] = """ - pick_ori : None | "vector" - Only applies to loose/free orientation. By default (None) pooling is - performed by taking the norm of the current vectors. Use - pick_ori="vector" to return vector source estimate. +_pick_ori_novec = """ + Options: - .. versionadded:: 0.20 + - ``None`` + Pooling is performed by taking the norm of loose/free + orientations. In case of a fixed source space no norm is computed + leading to signed source activity. + - ``"normal"`` + Only the normal to the cortical surface is kept. This is only + implemented when working with loose orientations. +""" +docdict['pick_ori-novec'] = """ +pick_ori : None | "normal" +""" + _pick_ori_novec +docdict['pick_ori'] = """ +pick_ori : None | "normal" | "vector" +""" + _pick_ori_novec + """ + - ``"vector"`` + No pooling of the orientations is done, and the vector result + will be returned in the form of a :class:`mne.VectorSourceEstimate` + object. This is only implemented when working with loose orientations. """ docdict['reduce_rank'] = """ reduce_rank : bool