diff --git a/docs/build-docs.sh b/docs/build-docs.sh index ccb159aae..4a94ab206 100755 --- a/docs/build-docs.sh +++ b/docs/build-docs.sh @@ -10,6 +10,9 @@ python $STEP_DIR/source/examples/gen_examples.py echo "Generating pipeline table …" python $STEP_DIR/source/features/gen_steps.py +echo "Generating config docs …" +python $STEP_DIR/source/settings/gen_settings.py + echo "Building the documentation …" cd $STEP_DIR mkdocs build diff --git a/docs/source/settings/execution.md b/docs/source/settings/execution.md new file mode 100644 index 000000000..189433a92 --- /dev/null +++ b/docs/source/settings/execution.md @@ -0,0 +1,18 @@ +::: mne_bids_pipeline._config + options: + members: + - n_jobs + - parallel_backend + - dask_open_dashboard + - dask_temp_dir + - dask_worker_memory_limit + - random_state + - shortest_event + - log_level + - mne_log_level + - on_error + - memory_location + - memory_subdir + - memory_file_method + - memory_verbose + - config_validation diff --git a/docs/source/settings/gen_settings.py b/docs/source/settings/gen_settings.py new file mode 100755 index 000000000..b93335a50 --- /dev/null +++ b/docs/source/settings/gen_settings.py @@ -0,0 +1,142 @@ +"""Generate settings .md files.""" + +import re +from pathlib import Path + +from tqdm import tqdm + +import mne_bids_pipeline._config + +root_file = Path(mne_bids_pipeline._config.__file__) +settings_dir = Path(__file__).parent + +# Mapping between first two lower-case words in the section name and the desired +# file or folder name +section_to_file = { # .md will be added to the files + # root file + "general settings": "general", + # folder + "preprocessing": "preprocessing", + "break detection": "breaks", + "bad channel": "autobads", + "maxwell filter": "maxfilter", + "filtering": "filter", + "resampling": "resample", + "epoching": "epochs", + "filtering &": None, # just a header + "artifact removal": None, + "stimulation artifact": "stim_artifact", + "ssp, ica,": "ssp_ica", + "amplitude-based artifact": "artifacts", + # folder + "sensor-level analysis": "sensor", + "condition contrasts": "contrasts", + "decoding /": "mvpa", + "time-frequency analysis": "time_freq", + "group-level analysis": "group_level", + # folder + "source-level analysis": "source", + "bem surface": "bem", + "source space": "forward", + "inverse solution": "inverse", + # folder + "reports": "reports", + "report generation": "report_generation", + # root file + "execution": "execution", +} + +option_header = """\ +::: mne_bids_pipeline._config + options: + members:""" +prefix = """\ + - """ + +# We cannot use ast for this because it doesn't preserve comments. We could use +# something like redbaron, but our code is hopefully simple enough! +assign_re = re.compile( + # Line starts with annotation syntax (name captured by the first group). + r"^(\w+): " + # Then the annotation can be ... + "(" + # ... a standard assignment ... + ".+ = .+" + # ... or ... + "|" + # ... the start of a multiline type annotation like "a: Union[" + r"(Union|Optional|Literal)\[" + # To the end of the line. + ")$", + re.MULTILINE, +) + + +def main(): + print(f"Parsing {root_file} to generate settings .md files.") + # max file-level depth is 2 even though we have 3 subsection levels + levels = [None, None] + current_path, current_lines = None, list() + text = root_file.read_text("utf-8") + lines = text.splitlines() + lines += ["# #"] # add a dummy line to trigger the last write + in_header = False + for li, line in enumerate(tqdm(lines)): + line = line.rstrip() + if line.startswith("# #"): # a new (sub)section / file + this_def = line[2:] + this_level = this_def.split()[0] + assert this_level.count("#") == len(this_level), this_level + this_level = this_level.count("#") - 1 + if this_level == 2: + # flatten preprocessing/filtering/filter to preprocessing/filter + # for example + this_level = 1 + assert this_level in (0, 1), (this_level, this_def) + this_def = this_def[this_level + 2 :] + levels[this_level] = this_def + # Write current lines and reset + if len(current_lines) > 1: # more than just the header + assert current_path is not None, levels + if current_lines[0] == "": # this happens with tags + current_lines = current_lines[1:] + current_path.write_text("\n".join(current_lines + [""]), "utf-8") + if this_level == 0: + this_root = settings_dir + else: + this_root = settings_dir / f"{section_to_file[levels[0].lower()]}" + this_root.mkdir(exist_ok=True) + key = " ".join(this_def.split()[:2]).lower() + if key == "": + assert li == len(lines) - 1, (li, line) + continue # our dummy line + fname = section_to_file[key] + if fname is None: + current_path = None + else: + current_path = this_root / f"{fname}.md" + current_lines = [] + in_header = True + continue + + if in_header: + if line == "": + in_header = False + if current_lines: + current_lines.append("") + current_lines.append(option_header) + else: + assert line == "#" or line.startswith("# "), (li, line) # a comment + current_lines.append(line[2:]) + continue + + # Could be an option + match = assign_re.match(line) + if match is not None: + name, typ, desc = match.groups() + current_lines.append(f"{prefix}{name}") + continue + + +if __name__ == "__main__": + main() diff --git a/docs/source/settings/general.md b/docs/source/settings/general.md index 2640f5f2b..60d371833 100644 --- a/docs/source/settings/general.md +++ b/docs/source/settings/general.md @@ -27,22 +27,7 @@ - eeg_reference - eeg_template_montage - drop_channels + - analyze_channels - reader_extra_params - read_raw_bids_verbose - - analyze_channels - plot_psd_for_runs - - n_jobs - - parallel_backend - - dask_open_dashboard - - dask_temp_dir - - dask_worker_memory_limit - - random_state - - shortest_event - - memory_location - - memory_subdir - - memory_file_method - - memory_verbose - - config_validation - - log_level - - mne_log_level - - on_error diff --git a/docs/source/settings/preprocessing/artifacts.md b/docs/source/settings/preprocessing/artifacts.md index 88407cd2c..21aac9a9a 100644 --- a/docs/source/settings/preprocessing/artifacts.md +++ b/docs/source/settings/preprocessing/artifacts.md @@ -11,6 +11,7 @@ tags: You can do a quick average of blink data and check what the amplitude looks like. + ::: mne_bids_pipeline._config options: members: diff --git a/docs/source/settings/preprocessing/epochs.md b/docs/source/settings/preprocessing/epochs.md index 02dd1f71d..2fdcc3208 100644 --- a/docs/source/settings/preprocessing/epochs.md +++ b/docs/source/settings/preprocessing/epochs.md @@ -13,14 +13,14 @@ tags: - rename_events - on_rename_missing_events - event_repeated - - conditions - - epochs_tmin - - epochs_tmax - - baseline - epochs_metadata_tmin - epochs_metadata_tmax - epochs_metadata_keep_first - epochs_metadata_keep_last - epochs_metadata_query + - conditions + - epochs_tmin + - epochs_tmax - rest_epochs_duration - rest_epochs_overlap + - baseline diff --git a/docs/source/settings/preprocessing/filter.md b/docs/source/settings/preprocessing/filter.md index 9d1301412..a34ef3cd8 100644 --- a/docs/source/settings/preprocessing/filter.md +++ b/docs/source/settings/preprocessing/filter.md @@ -30,8 +30,8 @@ of tips! 😇 members: - l_freq - h_freq + - notch_freq - l_trans_bandwidth - h_trans_bandwidth - - notch_freq - notch_trans_bandwidth - notch_widths diff --git a/docs/source/settings/preprocessing/ssp_ica.md b/docs/source/settings/preprocessing/ssp_ica.md index f25110729..ab5985a07 100644 --- a/docs/source/settings/preprocessing/ssp_ica.md +++ b/docs/source/settings/preprocessing/ssp_ica.md @@ -17,11 +17,11 @@ tags: - min_eog_epochs - n_proj_eog - n_proj_ecg - - ssp_meg - ecg_proj_from_average - eog_proj_from_average - - ssp_reject_eog + - ssp_meg - ssp_reject_ecg + - ssp_reject_eog - ssp_ecg_channel - ica_reject - ica_algorithm diff --git a/docs/source/settings/sensor/mvpa.md b/docs/source/settings/sensor/mvpa.md index 3a56d22d7..febf1de29 100644 --- a/docs/source/settings/sensor/mvpa.md +++ b/docs/source/settings/sensor/mvpa.md @@ -18,10 +18,10 @@ tags: - decoding_n_splits - decoding_time_generalization - decoding_time_generalization_decim - - decoding_csp - - decoding_csp_times - - decoding_csp_freqs - n_boot - cluster_forming_t_threshold - cluster_n_permutations - cluster_permutation_p_threshold + - decoding_csp + - decoding_csp_times + - decoding_csp_freqs diff --git a/docs/source/settings/source/forward.md b/docs/source/settings/source/forward.md index 8ce5c87ad..4871bb731 100644 --- a/docs/source/settings/source/forward.md +++ b/docs/source/settings/source/forward.md @@ -11,4 +11,3 @@ tags: - mri_landmarks_kind - spacing - mindist - - source_info_path_update diff --git a/docs/source/v1.6.md.inc b/docs/source/v1.6.md.inc index aba57baea..0c0e204c0 100644 --- a/docs/source/v1.6.md.inc +++ b/docs/source/v1.6.md.inc @@ -37,3 +37,4 @@ - Code formatting now uses `ruff format` instead of `black` (#834, #838 by @larsoner) - Code caching is now tested using GitHub Actions (#836 by @larsoner) - Steps in the documentation are now automatically parsed into flowcharts (#859 by @larsoner) +- New configuration options are now automatically added to the docs (#863 by @larsoner) diff --git a/mne_bids_pipeline/_config.py b/mne_bids_pipeline/_config.py index b45753966..2d67330f4 100644 --- a/mne_bids_pipeline/_config.py +++ b/mne_bids_pipeline/_config.py @@ -1,4 +1,4 @@ -# Default settings for data processing and analysis. +"""Default settings for data processing and analysis.""" from typing import Annotated, Any, Callable, Literal, Optional, Sequence, Union @@ -13,9 +13,8 @@ PathLike, ) -############################################################################### -# Config parameters -# ----------------- +# %% +# # General settings study_name: str = "" """ @@ -95,6 +94,11 @@ The task to process. """ +task_is_rest: bool = False +""" +Whether the task should be treated as resting-state data. +""" + runs: Union[Sequence, Literal["all"]] = "all" """ The runs to process. If `'all'`, will process all runs found in the @@ -144,14 +148,6 @@ The BIDS `space` entity. """ -plot_psd_for_runs: Union[Literal["all"], Sequence[str]] = "all" -""" -For which runs to add a power spectral density (PSD) plot to the generated -report. This can take a considerable amount of time if you have many long -runs. In this case, specify the runs, or pass an empty list to disable raw PSD -plotting. -""" - subjects: Union[Sequence[str], Literal["all"]] = "all" """ Subjects to analyze. If `'all'`, include all subjects. To only @@ -426,9 +422,26 @@ `'error'` to suppress warnings emitted by read_raw_bids. """ -############################################################################### -# BREAK DETECTION -# --------------- +plot_psd_for_runs: Union[Literal["all"], Sequence[str]] = "all" +""" +For which runs to add a power spectral density (PSD) plot to the generated +report. This can take a considerable amount of time if you have many long +runs. In this case, specify the runs, or pass an empty list to disable raw PSD +plotting. +""" + +# %% +# # Preprocessing + +# ## Break detection +# +# --- +# tags: +# - preprocessing +# - artifact-removal +# - raw +# - events +# --- find_breaks: bool = False """ @@ -527,10 +540,30 @@ ``` """ -############################################################################### -# MAXWELL FILTER PARAMETERS -# ------------------------- -# done in 01-import_and_maxfilter.py +# %% +# ## Bad channel detection +# +# --- +# tags: +# - preprocessing +# - raw +# - bad-channels +# --- +# +# !!! warning +# This functionality will soon be removed from the pipeline, and +# will be integrated into MNE-BIDS. +# +# "Bad", i.e. flat and overly noisy channels, can be automatically detected +# using a procedure inspired by the commercial MaxFilter by Elekta. First, +# a copy of the data is low-pass filtered at 40 Hz. Then, channels with +# unusually low variability are flagged as "flat", while channels with +# excessively high variability are flagged as "noisy". Flat and noisy channels +# are marked as "bad" and excluded from subsequent analysis. See +# :func:`mne.preprocssessing.find_bad_channels_maxwell` for more information +# on this procedure. The list of bad channels detected through this procedure +# will be merged with the list of bad channels already present in the dataset, +# if any. find_flat_channels_meg: bool = False """ @@ -543,6 +576,16 @@ Auto-detect "noisy" channels and mark them as bad. """ +# %% +# ## Maxwell filter +# +# --- +# tags: +# - preprocessing +# - maxwell-filter +# - raw +# --- + use_maxwell_filter: bool = False """ Whether or not to use Maxwell filtering to preprocess the data. @@ -738,45 +781,36 @@ Only used when [`use_maxwell_filter=True`][mne_bids_pipeline._config.use_maxwell_filter] """ # noqa: E501 -############################################################################### -# STIMULATION ARTIFACT -# -------------------- -# used in 01-import_and_maxfilter.py +# ## Filtering & resampling -fix_stim_artifact: bool = False -""" -Apply interpolation to fix stimulation artifact. - -???+ example "Example" - ```python - fix_stim_artifact = False - ``` -""" - -stim_artifact_tmin: float = 0.0 -""" -Start time of the interpolation window in seconds. - -???+ example "Example" - ```python - stim_artifact_tmin = 0. # on stim onset - ``` -""" - -stim_artifact_tmax: float = 0.01 -""" -End time of the interpolation window in seconds. - -???+ example "Example" - ```python - stim_artifact_tmax = 0.01 # up to 10ms post-stimulation - ``` -""" - -############################################################################### -# FREQUENCY FILTERING & RESAMPLING -# -------------------------------- -# done in 02-frequency_filter.py +# ### Filtering +# +# --- +# tags: +# - preprocessing +# - frequency-filter +# - raw +# --- +# +# It is typically better to set your filtering properties on the raw data so +# as to avoid what we call border (or edge) effects. +# +# If you use this pipeline for evoked responses, you could consider +# a low-pass filter cut-off of h_freq = 40 Hz +# and possibly a high-pass filter cut-off of l_freq = 1 Hz +# so you would preserve only the power in the 1Hz to 40 Hz band. +# Note that highpass filtering is not necessarily recommended as it can +# distort waveforms of evoked components, or simply wash out any low +# frequency that can may contain brain signal. It can also act as +# a replacement for baseline correction in Epochs. See below. +# +# If you use this pipeline for time-frequency analysis, a default filtering +# could be a high-pass filter cut-off of l_freq = 1 Hz +# a low-pass filter cut-off of h_freq = 120 Hz +# so you would preserve only the power in the 1Hz to 120 Hz band. +# +# If you need more fancy analysis, you are already likely past this kind +# of tips! 😇 l_freq: Optional[float] = None """ @@ -833,6 +867,24 @@ Specifies the width of each stop band. `None` uses the MNE default. """ +# ### Resampling +# +# --- +# tags: +# - preprocessing +# - resampling +# - decimation +# - raw +# - epochs +# --- +# +# If you have acquired data with a very high sampling frequency (e.g. 2 kHz) +# you will likely want to downsample to lighten up the size of the files you +# are working with (pragmatics) +# If you are interested in typical analysis (up to 120 Hz) you can typically +# resample your data down to 500 Hz without preventing reliable time-frequency +# exploration of your data. + raw_resample_sfreq: Optional[float] = None """ Specifies at which sampling frequency the data should be resampled. @@ -845,10 +897,6 @@ ``` """ -############################################################################### -# DECIMATION -# ---------- - epochs_decim: int = 1 """ Says how much to decimate data at the epochs level. @@ -867,9 +915,17 @@ """ -############################################################################### -# RENAME EXPERIMENTAL EVENTS -# -------------------------- +# ## Epoching +# +# --- +# tags: +# - preprocessing +# - epochs +# - events +# - metadata +# - resting-state +# --- + rename_events: dict = dict() """ @@ -895,10 +951,6 @@ to only get a warning instead, or `'ignore'` to ignore it completely. """ -############################################################################### -# HANDLING OF REPEATED EVENTS -# --------------------------- - event_repeated: Literal["error", "drop", "merge"] = "error" """ How to handle repeated events. We call events "repeated" if more than one event @@ -914,10 +966,6 @@ April 1st, 2021. """ -############################################################################### -# EPOCHING -# -------- - epochs_metadata_tmin: Optional[float] = None """ The beginning of the time window for metadata generation, in seconds, @@ -1032,11 +1080,6 @@ ``` """ -task_is_rest: bool = False -""" -Whether the task should be treated as resting-state data. -""" - rest_epochs_duration: Optional[float] = None """ Duration of epochs in seconds. @@ -1059,72 +1102,64 @@ ``` """ -contrasts: Sequence[Union[tuple[str, str], ArbitraryContrast]] = [] -""" -The conditions to contrast via a subtraction of ERPs / ERFs. The list elements -can either be tuples or dictionaries (or a mix of both). Each element in the -list corresponds to a single contrast. - -A tuple specifies a one-vs-one contrast, where the second condition is -subtracted from the first. - -If a dictionary, must contain the following keys: - -- `name`: a custom name of the contrast -- `conditions`: the conditions to contrast -- `weights`: the weights associated with each condition. +# ## Artifact removal -Pass an empty list to avoid calculation of any contrasts. +# ### Stimulation artifact +# +# --- +# tags: +# - preprocessing +# - artifact-removal +# - raw +# - epochs +# --- +# +# When using electric stimulation systems, e.g. for median nerve or index +# stimulation, it is frequent to have a stimulation artifact. This option +# allows to fix it by linear interpolation early in the pipeline on the raw +# data. -For the contrasts to be computed, the appropriate conditions must have been -epoched, and therefore the conditions should either match or be subsets of -`conditions` above. +fix_stim_artifact: bool = False +""" +Apply interpolation to fix stimulation artifact. ???+ example "Example" - Contrast the "left" and the "right" conditions by calculating - `left - right` at every time point of the evoked responses: ```python - contrasts = [('left', 'right')] # Note we pass a tuple inside the list! + fix_stim_artifact = False ``` +""" - Contrast the "left" and the "right" conditions within the "auditory" and - the "visual" modality, and "auditory" vs "visual" regardless of side: +stim_artifact_tmin: float = 0.0 +""" +Start time of the interpolation window in seconds. + +???+ example "Example" ```python - contrasts = [('auditory/left', 'auditory/right'), - ('visual/left', 'visual/right'), - ('auditory', 'visual')] + stim_artifact_tmin = 0. # on stim onset ``` +""" - Contrast the "left" and the "right" regardless of side, and compute an - arbitrary contrast with a gradient of weights: +stim_artifact_tmax: float = 0.01 +""" +End time of the interpolation window in seconds. + +???+ example "Example" ```python - contrasts = [ - ('auditory/left', 'auditory/right'), - { - 'name': 'gradedContrast', - 'conditions': [ - 'auditory/left', - 'auditory/right', - 'visual/left', - 'visual/right' - ], - 'weights': [-1.5, -.5, .5, 1.5] - } - ] + stim_artifact_tmax = 0.01 # up to 10ms post-stimulation ``` """ -############################################################################### -# ARTIFACT REMOVAL -# ---------------- +# ### SSP, ICA, and artifact regression # -# You can choose between ICA and SSP to remove eye and heart artifacts. -# SSP: https://mne-tools.github.io/stable/auto_tutorials/plot_artifacts_correction_ssp.html?highlight=ssp # noqa -# ICA: https://mne-tools.github.io/stable/auto_tutorials/plot_artifacts_correction_ica.html?highlight=ica # noqa -# if you choose ICA, run steps 5a and 6a -# if you choose SSP, run steps 5b and 6b -# -# Currently you cannot use both. +# --- +# tags: +# - preprocessing +# - artifact-removal +# - raw +# - epochs +# - ssp +# - ica +# --- regress_artifact: Optional[dict[str, Any]] = None """ @@ -1171,9 +1206,6 @@ Minimal number of EOG epochs needed to compute SSP projectors. """ - -# Rejection based on SSP -# ~~~~~~~~~~~~~~~~~~~~~~ n_proj_eog: dict[str, float] = dict(n_mag=1, n_grad=1, n_eeg=1) """ Number of SSP vectors to create for EOG artifacts for each channel type. @@ -1249,8 +1281,6 @@ is not reliable. """ -# Rejection based on ICA -# ~~~~~~~~~~~~~~~~~~~~~~ ica_reject: Optional[Union[dict[str, float], Literal["autoreject_local"]]] = None """ Peak-to-peak amplitude limits to exclude epochs from ICA fitting. This allows you to @@ -1388,8 +1418,21 @@ false-alarm rate increases dramatically. """ -# Rejection based on peak-to-peak amplitude -# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# ### Amplitude-based artifact rejection +# +# --- +# tags: +# - preprocessing +# - artifact-removal +# - epochs +# --- +# +# ???+ info "Good Practice / Advice" +# Have a look at your raw data and train yourself to detect a blink, a heart +# beat and an eye movement. +# You can do a quick average of blink data and check what the amplitude looks +# like. +# reject: Optional[ Union[dict[str, float], Literal["autoreject_global", "autoreject_local"]] @@ -1471,9 +1514,83 @@ be considered (i.e., will remain marked as bad and not analyzed by autoreject). """ -############################################################################### -# DECODING -# -------- +# %% +# # Sensor-level analysis + +# ## Condition contrasts +# +# --- +# tags: +# - epochs +# - evoked +# - contrast +# --- + +contrasts: Sequence[Union[tuple[str, str], ArbitraryContrast]] = [] +""" +The conditions to contrast via a subtraction of ERPs / ERFs. The list elements +can either be tuples or dictionaries (or a mix of both). Each element in the +list corresponds to a single contrast. + +A tuple specifies a one-vs-one contrast, where the second condition is +subtracted from the first. + +If a dictionary, must contain the following keys: + +- `name`: a custom name of the contrast +- `conditions`: the conditions to contrast +- `weights`: the weights associated with each condition. + +Pass an empty list to avoid calculation of any contrasts. + +For the contrasts to be computed, the appropriate conditions must have been +epoched, and therefore the conditions should either match or be subsets of +`conditions` above. + +???+ example "Example" + Contrast the "left" and the "right" conditions by calculating + `left - right` at every time point of the evoked responses: + ```python + contrasts = [('left', 'right')] # Note we pass a tuple inside the list! + ``` + + Contrast the "left" and the "right" conditions within the "auditory" and + the "visual" modality, and "auditory" vs "visual" regardless of side: + ```python + contrasts = [('auditory/left', 'auditory/right'), + ('visual/left', 'visual/right'), + ('auditory', 'visual')] + ``` + + Contrast the "left" and the "right" regardless of side, and compute an + arbitrary contrast with a gradient of weights: + ```python + contrasts = [ + ('auditory/left', 'auditory/right'), + { + 'name': 'gradedContrast', + 'conditions': [ + 'auditory/left', + 'auditory/right', + 'visual/left', + 'visual/right' + ], + 'weights': [-1.5, -.5, .5, 1.5] + } + ] + ``` +""" + +# ## Decoding / MVPA +# +# --- +# tags: +# - epochs +# - evoked +# - contrast +# - decoding +# - mvpa +# --- decode: bool = True """ @@ -1608,78 +1725,6 @@ [`cluster_forming_t_threshold`][mne_bids_pipeline._config.cluster_forming_t_threshold]. """ -############################################################################### -# GROUP AVERAGE SENSORS -# --------------------- - -interpolate_bads_grand_average: bool = True -""" -Interpolate bad sensors in each dataset before calculating the grand -average. This parameter is passed to the `mne.grand_average` function via -the keyword argument `interpolate_bads`. It requires to have channel -locations set. - -???+ example "Example" - ```python - interpolate_bads_grand_average = True - ``` -""" - -############################################################################### -# TIME-FREQUENCY -# -------------- - -time_frequency_conditions: Sequence[str] = [] -""" -The conditions to compute time-frequency decomposition on. - -???+ example "Example" - ```python - time_frequency_conditions = ['left', 'right'] - ``` -""" - -time_frequency_freq_min: Optional[float] = 8 -""" -Minimum frequency for the time frequency analysis, in Hz. -???+ example "Example" - ```python - time_frequency_freq_min = 0.3 # 0.3 Hz - ``` -""" - -time_frequency_freq_max: Optional[float] = 40 -""" -Maximum frequency for the time frequency analysis, in Hz. -???+ example "Example" - ```python - time_frequency_freq_max = 22.3 # 22.3 Hz - ``` -""" - -time_frequency_cycles: Optional[Union[float, FloatArrayLike]] = None -""" -The number of cycles to use in the Morlet wavelet. This can be a single number -or one per frequency, where frequencies are calculated via -`np.arange(time_frequency_freq_min, time_frequency_freq_max)`. -If `None`, uses -`np.arange(time_frequency_freq_min, time_frequency_freq_max) / 3`. -""" - -time_frequency_subtract_evoked: bool = False -""" -Whether to subtract the evoked response (averaged across all epochs) from the -epochs before passing them to time-frequency analysis. Set this to `True` to -highlight induced activity. - -!!! info - This also applies to CSP analysis. -""" - -############################################################################### -# TIME-FREQUENCY CSP -# ------------------ - decoding_csp: bool = False """ Whether to run decoding via Common Spatial Patterns (CSP) analysis on the @@ -1752,6 +1797,62 @@ } """ +# ## Time-frequency analysis +# +# --- +# tags: +# - epochs +# - evoked +# - time-frequency +# --- + +time_frequency_conditions: Sequence[str] = [] +""" +The conditions to compute time-frequency decomposition on. + +???+ example "Example" + ```python + time_frequency_conditions = ['left', 'right'] + ``` +""" + +time_frequency_freq_min: Optional[float] = 8 +""" +Minimum frequency for the time frequency analysis, in Hz. +???+ example "Example" + ```python + time_frequency_freq_min = 0.3 # 0.3 Hz + ``` +""" + +time_frequency_freq_max: Optional[float] = 40 +""" +Maximum frequency for the time frequency analysis, in Hz. +???+ example "Example" + ```python + time_frequency_freq_max = 22.3 # 22.3 Hz + ``` +""" + +time_frequency_cycles: Optional[Union[float, FloatArrayLike]] = None +""" +The number of cycles to use in the Morlet wavelet. This can be a single number +or one per frequency, where frequencies are calculated via +`np.arange(time_frequency_freq_min, time_frequency_freq_max)`. +If `None`, uses +`np.arange(time_frequency_freq_min, time_frequency_freq_max) / 3`. +""" + +time_frequency_subtract_evoked: bool = False +""" +Whether to subtract the evoked response (averaged across all epochs) from the +epochs before passing them to time-frequency analysis. Set this to `True` to +highlight induced activity. + +!!! info + This also applies to CSP analysis. +""" + time_frequency_baseline: Optional[tuple[float, float]] = None """ Baseline period to use for the time-frequency analysis. If `None`, no baseline. @@ -1782,16 +1883,52 @@ ``` """ -############################################################################### -# SOURCE ESTIMATION PARAMETERS -# ---------------------------- +# ## Group-level analysis +# +# --- +# tags: +# - evoked +# - group-level +# --- + + +interpolate_bads_grand_average: bool = True +""" +Interpolate bad sensors in each dataset before calculating the grand +average. This parameter is passed to the `mne.grand_average` function via +the keyword argument `interpolate_bads`. It requires to have channel +locations set. + +???+ example "Example" + ```python + interpolate_bads_grand_average = True + ``` +""" + +# %% +# # Source-level analysis + +# ## General settings # +# --- +# tags: +# - inverse-solution +# --- run_source_estimation: bool = True """ Whether to run source estimation processing steps if not explicitly requested. """ +# ## BEM surface +# +# --- +# tags: +# - inverse-solution +# - bem +# - freesurfer +# --- + use_template_mri: Optional[str] = None """ Whether to use a template MRI subject such as FreeSurfer's `fsaverage` subject. @@ -1864,6 +2001,14 @@ Whether to print the complete output of FreeSurfer commands. Note that if `False`, no FreeSurfer output might be displayed at all!""" +# ## Source space & forward solution +# +# --- +# tags: +# - inverse-solution +# - forward-model +# --- + mri_t1_path_generator: Optional[Callable[[BIDSPath], BIDSPath]] = None """ To perform source-level analyses, the Pipeline needs to generate a @@ -1956,6 +2101,14 @@ def mri_landmarks_kind(bids_path): Exclude points closer than this distance (mm) to the bounding surface. """ +# ## Inverse solution +# +# --- +# tags: +# - inverse-solution +# --- + + loose: Union[float, Literal["auto"]] = 0.2 """ Value that weights the source variances of the dipole components @@ -2103,9 +2256,16 @@ def noise_cov(bids_path): ``` """ -############################################################################### -# Report generation -# ----------------- +# %% +# # Reports + +# ## Report generation +# +# --- +# tags: +# - report +# --- + report_evoked_n_time_points: Optional[int] = None """ @@ -2131,9 +2291,8 @@ def noise_cov(bids_path): ``` """ -############################################################################### -# Execution -# --------- +# %% +# # Execution n_jobs: int = 1 """ diff --git a/mne_bids_pipeline/tests/test_documented.py b/mne_bids_pipeline/tests/test_documented.py index dd90f7ad5..dcea2d4c8 100644 --- a/mne_bids_pipeline/tests/test_documented.py +++ b/mne_bids_pipeline/tests/test_documented.py @@ -2,6 +2,7 @@ import ast import os import re +import sys from pathlib import Path import yaml @@ -29,14 +30,15 @@ def test_options_documented(): config_names = set(d for d in dir(config) if not d.startswith("_")) assert in_config == config_names settings_path = root_path.parent / "docs" / "source" / "settings" + sys.path.append(str(settings_path)) + try: + from gen_settings import main + finally: + sys.path.pop() + main() assert settings_path.is_dir() in_doc = set() key = " - " - allowed_duplicates = set( - [ - "source_info_path_update", - ] - ) for dirpath, _, fnames in os.walk(settings_path): for fname in fnames: if not fname.endswith(".md"): @@ -48,8 +50,7 @@ def test_options_documented(): continue # The line starts with our magic key val = line[len(key) :].strip() - if val not in allowed_duplicates: - assert val not in in_doc, "Duplicate documentation" + assert val not in in_doc, "Duplicate documentation" in_doc.add(val) what = "docs/source/settings doc" assert in_doc.difference(in_config) == set(), f"Extra values in {what}"