-
Notifications
You must be signed in to change notification settings - Fork 67
/
_config.py
2355 lines (1959 loc) · 79.1 KB
/
_config.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
# Default settings for data processing and analysis.
from collections.abc import Callable, Sequence
from typing import Annotated, Any, Literal
from annotated_types import Ge, Interval, Len, MinLen
from mne import Covariance
from mne_bids import BIDSPath
from mne_bids_pipeline.typing import (
ArbitraryContrast,
DigMontageType,
FloatArrayLike,
PathLike,
)
# %%
# # General settings
bids_root: PathLike | None = None
"""
Specify the BIDS root directory. Pass an empty string or ```None` to use
the value specified in the `BIDS_ROOT` environment variable instead.
Raises an exception if the BIDS root has not been specified.
???+ example "Example"
``` python
bids_root = '/path/to/your/bids_root' # Use this to specify a path here.
bids_root = None # Make use of the `BIDS_ROOT` environment variable.
```
"""
deriv_root: PathLike | None = None
"""
The root of the derivatives directory in which the pipeline will store
the processing results. If `None`, this will be
`derivatives/mne-bids-pipeline` inside the BIDS root.
!!! info
If specified and you wish to run the source analysis steps, you must
set [`subjects_dir`][mne_bids_pipeline._config.subjects_dir] as well.
"""
subjects_dir: PathLike | None = None
"""
Path to the directory that contains the FreeSurfer reconstructions of all
subjects. Specifically, this defines the `SUBJECTS_DIR` that is used by
FreeSurfer.
- When running the `freesurfer` processing step to create the
reconstructions from anatomical scans in the BIDS dataset, the
output will be stored in this directory.
- When running the source analysis steps, we will look for the surfaces in this
directory and also store the BEM surfaces there.
If `None`, this will default to
[`bids_root`][mne_bids_pipeline._config.bids_root]`/derivatives/freesurfer/subjects`.
!!! info
This setting is required if you specify
[`deriv_root`][mne_bids_pipeline._config.deriv_root]
and want to run the source analysis steps.
"""
interactive: bool = False
"""
If True, the steps will provide some interactive elements, such as
figures. If running the steps from a notebook or Spyder,
run `%matplotlib qt` in the command line to open the figures in a separate
window.
!!! info
Enabling interactive mode deactivates parallel processing.
"""
sessions: list[str] | Literal["all"] = "all"
"""
The sessions to process. If `'all'`, will process all sessions found in the
BIDS dataset.
"""
allow_missing_sessions: bool = False
"""
Whether to continue processing the dataset if some combinations of `subjects` and
`sessions` are missing.
"""
task: str = ""
"""
The task to process.
"""
task_is_rest: bool = False
"""
Whether the task should be treated as resting-state data.
"""
runs: Sequence[str] | Literal["all"] = "all"
"""
The runs to process. If `'all'`, will process all runs found in the
BIDS dataset.
"""
exclude_runs: dict[str, list[str]] | None = None
"""
Specify runs to exclude from analysis, for each participant individually.
???+ example "Example"
```python
exclude_runs = None # Include all runs.
exclude_runs = {'01': ['02']} # Exclude run 02 of subject 01.
```
???+ info "Good Practice / Advice"
Keep track of the criteria leading you to exclude
a run (e.g. too many movements, missing blocks, aborted experiment,
did not understand the instructions, etc.).
"""
crop_runs: tuple[float, float] | None = None
"""
Crop the raw data of each run to the specified time interval `[tmin, tmax]`,
in seconds. The runs will be cropped before Maxwell or frequency filtering is
applied. If `None`, do not crop the data.
"""
acq: str | None = None
"""
The BIDS `acquisition` entity.
"""
proc: str | None = None
"""
The BIDS `processing` entity.
"""
rec: str | None = None
"""
The BIDS `recording` entity.
"""
space: str | None = None
"""
The BIDS `space` entity.
"""
subjects: Sequence[str] | Literal["all"] = "all"
"""
Subjects to analyze. If `'all'`, include all subjects. To only
include a subset of subjects, pass a list of their identifiers. Even
if you plan on analyzing only a single subject, pass their identifier
as a list.
Please note that if you intend to EXCLUDE only a few subjects, you
should consider setting `subjects = 'all'` and adding the
identifiers of the excluded subjects to `exclude_subjects` (see next
section).
???+ example "Example"
```python
subjects = 'all' # Include all subjects.
subjects = ['05'] # Only include subject 05.
subjects = ['01', '02'] # Only include subjects 01 and 02.
```
"""
exclude_subjects: Sequence[str] = []
"""
Specify subjects to exclude from analysis. The MEG empty-room mock-subject
is automatically excluded from regular analysis.
???+ info "Good Practice / Advice"
Keep track of the criteria leading you to exclude
a participant (e.g. too many movements, missing blocks, aborted experiment,
did not understand the instructions, etc, ...)
The `emptyroom` subject will be excluded automatically.
"""
process_empty_room: bool = True
"""
Whether to apply the same pre-processing steps to the empty-room data as
to the experimental data (up until including frequency filtering). This
is required if you wish to use the empty-room recording to estimate noise
covariance (via `noise_cov='emptyroom'`). The empty-room recording
corresponding to the processed experimental data will be retrieved
automatically.
"""
process_rest: bool = True
"""
Whether to apply the same pre-processing steps to the resting-state data as
to the experimental data (up until including frequency filtering). This
is required if you wish to use the resting-state recording to estimate noise
covariance (via `noise_cov='rest'`).
"""
ch_types: Annotated[Sequence[Literal["meg", "mag", "grad", "eeg"]], Len(1, 4)] = []
"""
The channel types to consider.
???+ example "Example"
```python
# Use EEG channels:
ch_types = ['eeg']
# Use magnetometer and gradiometer MEG channels:
ch_types = ['mag', 'grad']
# Use MEG and EEG channels:
ch_types = ['meg', 'eeg']
```
"""
data_type: Literal["meg", "eeg"] | None = None
"""
The BIDS data type.
For MEG recordings, this will usually be 'meg'; and for EEG, 'eeg'.
However, if your dataset contains simultaneous recordings of MEG and EEG,
stored in a single file, you will typically need to set this to 'meg'.
If `None`, we will assume that the data type matches the channel type.
???+ example "Example"
The dataset contains simultaneous recordings of MEG and EEG, and we only
wish to process the EEG data, which is stored inside the MEG files:
```python
ch_types = ['eeg']
data_type = 'meg'
```
The dataset contains simultaneous recordings of MEG and EEG, and we only
wish to process the gradiometer data:
```python
ch_types = ['grad']
data_type = 'meg' # or data_type = None
```
The dataset contains only EEG data:
```python
ch_types = ['eeg']
data_type = 'eeg' # or data_type = None
```
"""
eog_channels: Sequence[str] | None = None
"""
Specify EOG channels to use, or create virtual EOG channels.
Allows the specification of custom channel names that shall be used as
(virtual) EOG channels. For example, say you recorded EEG **without** dedicated
EOG electrodes, but with some EEG electrodes placed close to the eyes, e.g.
Fp1 and Fp2. These channels can be expected to have captured large quantities
of ocular activity, and you might want to use them as "virtual" EOG channels,
while also including them in the EEG analysis. By default, MNE won't know that
these channels are suitable for recovering EOG, and hence won't be able to
perform tasks like automated blink removal, unless a "true" EOG sensor is
present in the data as well. Specifying channel names here allows MNE to find
the respective EOG signals based on these channels.
You can specify one or multiple channel names. Each will be treated as if it
were a dedicated EOG channel, without excluding it from any other analyses.
If `None`, only actual EOG channels will be used for EOG recovery.
If there are multiple actual EOG channels in your data, and you only specify
a subset of them here, only this subset will be used during processing.
???+ example "Example"
Treat `Fp1` as virtual EOG channel:
```python
eog_channels = ['Fp1']
```
Treat `Fp1` and `Fp2` as virtual EOG channels:
```python
eog_channels = ['Fp1', 'Fp2']
```
"""
eeg_bipolar_channels: dict[str, tuple[str, str]] | None = None
"""
Combine two channels into a bipolar channel, whose signal is the **difference**
between the two combined channels, and add it to the data.
A typical use case is the combination of two EOG channels – for example, a
left and a right horizontal EOG – into a single, bipolar EOG channel. You need
to pass a dictionary whose **keys** are the name of the new bipolar channel you
wish to create, and whose **values** are tuples consisting of two strings: the
name of the channel acting as anode and the name of the channel acting as
cathode, i.e. `{'ch_name': ('anode', 'cathode')}`. You can request
to construct more than one bipolar channel by specifying multiple key/value
pairs. See the examples below.
Can also be `None` if you do not want to create bipolar channels.
!!! info
The channels used to create the bipolar channels are **not** automatically
dropped from the data. To drop channels, set `drop_channels`.
???+ example "Example"
Combine the existing channels `HEOG_left` and `HEOG_right` into a new,
bipolar channel, `HEOG`:
```python
eeg_add_bipolar_channels = {'HEOG': ('HEOG_left', 'HEOG_right')}
```
Create two bipolar channels, `HEOG` and `VEOG`:
```python
eeg_add_bipolar_channels = {'HEOG': ('HEOG_left', 'HEOG_right'),
'VEOG': ('VEOG_lower', 'VEOG_upper')}
```
"""
eeg_reference: Literal["average"] | str | Sequence["str"] = "average"
"""
The EEG reference to use. If `average`, will use the average reference,
i.e. the average across all channels. If a string, must be the name of a single
channel. To use multiple channels as reference, set to a list of channel names.
???+ example "Example"
Use the average reference:
```python
eeg_reference = 'average'
```
Use the `P9` channel as reference:
```python
eeg_reference = 'P9'
```
Use the average of the `P9` and `P10` channels as reference:
```python
eeg_reference = ['P9', 'P10']
```
"""
eeg_template_montage: str | DigMontageType | None = None
"""
In situations where you wish to process EEG data and no individual
digitization points (measured channel locations) are available, you can apply
a "template" montage. This means we will assume the EEG cap was placed
either according to an international system like 10/20, or as suggested by
the cap manufacturers in their respective manual.
Please be aware that the actual cap placement most likely deviated somewhat
from the template, and, therefore, source reconstruction may be impaired.
If `None`, do not apply a template montage. If a string, must be the
name of a built-in template montage in MNE-Python.
You can find an overview of supported template montages at
https://mne.tools/stable/generated/mne.channels.make_standard_montage.html
!!! warning
If the data contains channel names that are not part of the template montage, the
pipeline run will fail with an error message. You must either pick a different
montage or remove those channels via
[`drop_channels`][mne_bids_pipeline._config.drop_channels] to continue.
???+ example "Example"
Do not apply template montage:
```python
eeg_template_montage = None
```
Apply 64-channel Biosemi 10/20 template montage:
```python
eeg_template_montage = 'biosemi64'
```
"""
drop_channels: Sequence[str] = []
"""
Names of channels to remove from the data. This can be useful, for example,
if you have added a new bipolar channel via `eeg_bipolar_channels` and now wish
to remove the anode, cathode, or both; or if your selected EEG template montage
doesn't contain coordinates for some channels.
???+ example "Example"
Exclude channels `Fp1` and `Cz` from processing:
```python
drop_channels = ['Fp1', 'Cz]
```
"""
analyze_channels: Literal["all", "ch_types"] | Annotated[Sequence["str"], MinLen(1)] = (
"ch_types"
)
"""
The names of the channels to analyze during ERP/ERF and time-frequency analysis
steps. For certain paradigms, e.g. EEG ERP research, it is common to constrain
sensor-space analysis to only a few specific sensors. If `'all'`, do not
exclude any channels (except for those selected for removal via the
`drop_channels` setting; use with caution as this can include things like STIM
channels during the decoding step). If 'ch_types' (default), restrict to the
channels listed in the `ch_types` parameter. The constraint will be applied to
all sensor-level analyses after the preprocessing stage, but not to the
preprocessing stage itself, nor to the source analysis stage.
???+ example "Example"
Only use channel `Pz` for ERP, evoked contrasts, time-by-time
decoding, and time-frequency analysis:
```python
analyze_channels = ['Pz']
```
"""
reader_extra_params: dict[str, Any] = {}
"""
Parameters to be passed to `read_raw_bids()` calls when importing raw data.
???+ example "Example"
Enforce units for EDF files:
```python
reader_extra_params = {"units": "uV"}
```
"""
read_raw_bids_verbose: Literal["error"] | None = None
"""
Verbosity level to pass to `read_raw_bids(..., verbose=read_raw_bids_verbose)`.
If you know your dataset will contain files that are not perfectly BIDS
compliant (e.g., "Did not find any meg.json..."), you can set this to
`'error'` to suppress warnings emitted by read_raw_bids.
"""
plot_psd_for_runs: Literal["all"] | Sequence[str] = "all"
"""
For which runs to add a power spectral density (PSD) plot to the generated
report. This can take a considerable amount of time if you have many long
runs. In this case, specify the runs, or pass an empty list to disable raw PSD
plotting.
"""
random_state: int | None = 42
"""
You can specify the seed of the random number generator (RNG).
This setting is passed to the ICA algorithm and to the decoding function,
ensuring reproducible results. Set to `None` to avoid setting the RNG
to a defined state.
"""
# %%
# # Preprocessing
# ## Break detection
find_breaks: bool = False
"""
During an experimental run, the recording might be interrupted by breaks of
various durations, e.g. to allow the participant to stretch, blink, and swallow
freely. During these periods, large-scale artifacts are often picked up by the
recording system. These artifacts can impair certain stages of processing, e.g.
the peak-detection algorithms we use to find EOG and ECG activity. In some
cases, even the bad channel detection algorithms might not function optimally.
It is therefore advisable to mark such break periods for exclusion at early
processing stages.
If `True`, try to mark breaks by finding segments of the data where no
experimental events have occurred. This will then add annotations with the
description `BAD_break` to the continuous data, causing these segments to be
ignored in all following processing steps.
???+ example "Example"
Automatically find break periods, and annotate them as `BAD_break`.
```python
find_breaks = True
```
Disable break detection.
```python
find_breaks = False
```
"""
min_break_duration: float = 15.0
"""
The minimal duration (in seconds) of a data segment without any experimental
events for it to be considered a "break". Note that the minimal duration of the
generated `BAD_break` annotation will typically be smaller than this, as by
default, the annotation will not extend across the entire break.
See [`t_break_annot_start_after_previous_event`][mne_bids_pipeline._config.t_break_annot_start_after_previous_event]
and [`t_break_annot_stop_before_next_event`][mne_bids_pipeline._config.t_break_annot_stop_before_next_event]
to control this behavior.
???+ example "Example"
Periods between two consecutive experimental events must span at least
`15` seconds for this period to be considered a "break".
```python
min_break_duration = 15.
```
""" # noqa : E501
t_break_annot_start_after_previous_event: float = 5.0
"""
Once a break of at least
[`min_break_duration`][mne_bids_pipeline._config.min_break_duration]
seconds has been discovered, we generate a `BAD_break` annotation that does not
necessarily span the entire break period. Instead, you will typically want to
start it some time after the last event before the break period, as to not
unnecessarily discard brain activity immediately following that event.
This parameter controls how much time (in seconds) should pass after the last
pre-break event before we start annotating the following segment of the break
period as bad.
???+ example "Example"
Once a break period has been detected, add a `BAD_break` annotation to it,
starting `5` seconds after the latest pre-break event.
```python
t_break_annot_start_after_previous_event = 5.
```
Start the `BAD_break` annotation immediately after the last pre-break
event.
```python
t_break_annot_start_after_previous_event = 0.
```
"""
t_break_annot_stop_before_next_event: float = 5.0
"""
Similarly to how
[`t_break_annot_start_after_previous_event`][mne_bids_pipeline._config.t_break_annot_start_after_previous_event]
controls the "gap" between beginning of the break period and `BAD_break`
annotation onset, this parameter controls how far the annotation should extend
toward the first experimental event immediately following the break period
(in seconds). This can help not to waste a post-break trial by marking its
pre-stimulus period as bad.
???+ example "Example"
Once a break period has been detected, add a `BAD_break` annotation to it,
starting `5` seconds after the latest pre-break event.
```python
t_break_annot_start_after_previous_event = 5.
```
Start the `BAD_break` annotation immediately after the last pre-break
event.
```python
t_break_annot_start_after_previous_event = 0.
```
"""
# %%
# ## Bad channel detection
#
# !!! warning
# This functionality will soon be removed from the pipeline, and
# will be integrated into MNE-BIDS.
#
# "Bad", i.e. flat and overly noisy channels, can be automatically detected
# using a procedure inspired by the commercial MaxFilter by Elekta. First,
# a copy of the data is low-pass filtered at 40 Hz. Then, channels with
# unusually low variability are flagged as "flat", while channels with
# excessively high variability are flagged as "noisy". Flat and noisy channels
# are marked as "bad" and excluded from subsequent analysis. See
# :func:`mne.preprocssessing.find_bad_channels_maxwell` for more information
# on this procedure. The list of bad channels detected through this procedure
# will be merged with the list of bad channels already present in the dataset,
# if any.
find_flat_channels_meg: bool = False
"""
Auto-detect "flat" channels (i.e. those with unusually low variability) and
mark them as bad.
"""
find_noisy_channels_meg: bool = False
"""
Auto-detect "noisy" channels and mark them as bad.
"""
# %%
# ## Maxwell filter
use_maxwell_filter: bool = False
"""
Whether or not to use Maxwell filtering to preprocess the data.
!!! warning
If the data were recorded with internal active compensation (MaxShield),
they need to be run through Maxwell filter to avoid distortions.
Bad channels need to be set through BIDS channels.tsv and / or via the
`find_flat_channels_meg` and `find_noisy_channels_meg` options above
before applying Maxwell filter.
"""
mf_st_duration: float | None = None
"""
There are two kinds of Maxwell filtering: SSS (signal space separation) and
tSSS (temporal signal space separation)
(see [Taulu et al., 2004](http://cds.cern.ch/record/709081/files/0401166.pdf)).
If not None, apply spatiotemporal SSS (tSSS) with specified buffer
duration (in seconds). MaxFilter™'s default is 10.0 seconds in v2.2.
Spatiotemporal SSS acts as implicitly as a high-pass filter where the
cut-off frequency is 1/st_dur Hz. For this (and other) reasons, longer
buffers are generally better as long as your system can handle the
higher memory usage. To ensure that each window is processed
identically, choose a buffer length that divides evenly into your data.
Any data at the trailing edge that doesn't fit evenly into a whole
buffer window will be lumped into the previous buffer.
???+ info "Good Practice / Advice"
If you are interested in low frequency activity (<0.1Hz), avoid using
tSSS and set `mf_st_duration` to `None`.
If you are interested in low frequency above 0.1 Hz, you can use the
default `mf_st_duration` to 10 s, meaning it acts like a 0.1 Hz
high-pass filter.
???+ example "Example"
```python
mf_st_duration = None
mf_st_duration = 10. # to apply tSSS with 0.1Hz highpass filter.
```
"""
mf_st_correlation: float = 0.98
"""
The correlation limit for spatio-temporal SSS (tSSS).
???+ example "Example"
```python
st_correlation = 0.98
```
"""
mf_head_origin: Literal["auto"] | FloatArrayLike = "auto"
"""
`mf_head_origin` : array-like, shape (3,) | 'auto'
Origin of internal and external multipolar moment space in meters.
If 'auto', it will be estimated from headshape points.
If automatic fitting fails (e.g., due to having too few digitization
points), consider separately calling the fitting function with different
options or specifying the origin manually.
???+ example "Example"
```python
mf_head_origin = 'auto'
```
"""
mf_destination: Literal["reference_run"] | FloatArrayLike = "reference_run"
"""
Despite all possible care to avoid movements in the MEG, the participant
will likely slowly drift down from the Dewar or slightly shift the head
around in the course of the recording session. Hence, to take this into
account, we are realigning all data to a single position. For this, you can:
1. Choose a reference run. Often one from the middle of the recording session
is a good choice. Set `mf_destination = "reference_run" and then set
[`config.mf_reference_run`][mne_bids_pipeline._config.mf_reference_run].
This will result in a device-to-head transformation that differs between
subjects.
2. Choose a standard position in the MEG coordinate frame. For this, pass
a 4x4 transformation matrix for the device-to-head
transform. This will result in a device-to-head transformation that is
the same across all subjects.
???+ example "A Standardized Position"
```python
from mne.transforms import translation
mf_destination = translation(z=0.04)
```
"""
mf_int_order: int = 8
"""
Internal order for the Maxwell basis. Can be set to something lower (e.g., 6
or higher for datasets where lower or higher spatial complexity, respectively,
is expected.
"""
mf_reference_run: str | None = None
"""
Which run to take as the reference for adjusting the head position of all
runs when [`mf_destination="reference_run"`][mne_bids_pipeline._config.mf_destination].
If `None`, pick the first run.
???+ example "Example"
```python
mf_reference_run = '01' # Use run "01"
```
"""
mf_cal_fname: str | None = None
"""
!!! warning
This parameter should only be used for BIDS datasets that don't store
the fine-calibration file
[according to BIDS](https://bids-specification.readthedocs.io/en/stable/99-appendices/06-meg-file-formats.html#cross-talk-and-fine-calibration-files).
Path to the Maxwell Filter calibration file. If `None`, the recommended
location is used.
???+ example "Example"
```python
mf_cal_fname = '/path/to/your/file/calibration_cal.dat'
```
""" # noqa : E501
mf_ctc_fname: str | None = None
"""
Path to the Maxwell Filter cross-talk file. If `None`, the recommended
location is used.
!!! warning
This parameter should only be used for BIDS datasets that don't store
the cross-talk file
[according to BIDS](https://bids-specification.readthedocs.io/en/stable/99-appendices/06-meg-file-formats.html#cross-talk-and-fine-calibration-files).
???+ example "Example"
```python
mf_ctc_fname = '/path/to/your/file/crosstalk_ct.fif'
```
""" # noqa : E501
mf_esss: int = 0
"""
Number of extended SSS (eSSS) basis projectors to use from empty-room data.
"""
mf_esss_reject: dict[str, float] | None = None
"""
Rejection parameters to use when computing the extended SSS (eSSS) basis.
"""
mf_mc: bool = False
"""
If True, perform movement compensation on the data.
"""
mf_mc_t_step_min: float = 0.01
"""
Minimum time step to use during cHPI coil amplitude estimation.
"""
mf_mc_t_window: float | Literal["auto"] = "auto"
"""
The window to use during cHPI coil amplitude estimation and in cHPI filtering.
Can be "auto" to autodetect a reasonable value or a float (in seconds).
"""
mf_mc_gof_limit: float = 0.98
"""
Minimum goodness of fit to accept for each cHPI coil.
"""
mf_mc_dist_limit: float = 0.005
"""
Minimum distance (m) to accept for cHPI position fitting.
"""
mf_mc_rotation_velocity_limit: float | None = None
"""
The rotation velocity limit (degrees/second) to use when annotating
movement-compensated data. If `None`, no annotations will be added.
"""
mf_mc_translation_velocity_limit: float | None = None
"""
The translation velocity limit (meters/second) to use when annotating
movement-compensated data. If `None`, no annotations will be added.
"""
mf_filter_chpi: bool | None = None
"""
Use mne.chpi.filter_chpi after Maxwell filtering. Can be None to use
the same value as [`mf_mc`][mne_bids_pipeline._config.mf_mc].
Only used when [`use_maxwell_filter=True`][mne_bids_pipeline._config.use_maxwell_filter]
""" # noqa: E501
# ## Filtering & resampling
# ### Filtering
#
# It is typically better to set your filtering properties on the raw data so
# as to avoid what we call border (or edge) effects.
#
# If you use this pipeline for evoked responses, you could consider
# a low-pass filter cut-off of h_freq = 40 Hz
# and possibly a high-pass filter cut-off of l_freq = 1 Hz
# so you would preserve only the power in the 1Hz to 40 Hz band.
# Note that highpass filtering is not necessarily recommended as it can
# distort waveforms of evoked components, or simply wash out any low
# frequency that can may contain brain signal. It can also act as
# a replacement for baseline correction in Epochs. See below.
#
# If you use this pipeline for time-frequency analysis, a default filtering
# could be a high-pass filter cut-off of l_freq = 1 Hz
# a low-pass filter cut-off of h_freq = 120 Hz
# so you would preserve only the power in the 1Hz to 120 Hz band.
#
# If you need more fancy analysis, you are already likely past this kind
# of tips! 😇
l_freq: float | None = None
"""
The low-frequency cut-off in the highpass filtering step.
Keep it `None` if no highpass filtering should be applied.
"""
h_freq: float | None = 40.0
"""
The high-frequency cut-off in the lowpass filtering step.
Keep it `None` if no lowpass filtering should be applied.
"""
l_trans_bandwidth: float | Literal["auto"] = "auto"
"""
Specifies the transition bandwidth of the
highpass filter. By default it's `'auto'` and uses default MNE
parameters.
"""
h_trans_bandwidth: float | Literal["auto"] = "auto"
"""
Specifies the transition bandwidth of the
lowpass filter. By default it's `'auto'` and uses default MNE
parameters.
"""
notch_freq: float | Sequence[float] | None = None
"""
Notch filter frequency. More than one frequency can be supplied, e.g. to remove
harmonics. Keep it `None` if no notch filter should be applied.
!!! info
The notch filter will be applied before high- and lowpass filtering.
???+ example "Example"
Remove line noise at 50 Hz:
```python
notch_freq = 50
```
Remove line noise at 50 Hz and its (sub-)harmonics
```python
notch_freq = [25, 50, 100, 150]
```
"""
notch_trans_bandwidth: float = 1.0
"""
Specifies the transition bandwidth of the notch filter. The default is `1.`.
"""
notch_widths: float | Sequence[float] | None = None
"""
Specifies the width of each stop band. `None` uses the MNE default.
"""
# ### Resampling
#
# If you have acquired data with a very high sampling frequency (e.g. 2 kHz)
# you will likely want to downsample to lighten up the size of the files you
# are working with (pragmatics)
# If you are interested in typical analysis (up to 120 Hz) you can typically
# resample your data down to 500 Hz without preventing reliable time-frequency
# exploration of your data.
raw_resample_sfreq: float | None = None
"""
Specifies at which sampling frequency the data should be resampled.
If `None`, then no resampling will be done.
???+ example "Example"
```python
raw_resample_sfreq = None # no resampling
raw_resample_sfreq = 500 # resample to 500Hz
```
"""
epochs_decim: int = 1
"""
Says how much to decimate data at the epochs level.
It is typically an alternative to the `resample_sfreq` parameter that
can be used for resampling raw data. `1` means no decimation.
???+ info "Good Practice / Advice"
Decimation requires to lowpass filtered the data to avoid aliasing.
Note that using decimation is much faster than resampling.
???+ example "Example"
```python
epochs_decim = 1 # no decimation
epochs_decim = 4 # decimate by 4, i.e., divide sampling frequency by 4
```
"""
# ## Epoching
rename_events: dict[str, str] = dict()
"""
A dictionary specifying which events in the BIDS dataset to rename upon
loading, and before processing begins.
Pass an empty dictionary to not perform any renaming.
???+ example "Example"
Rename `audio_left` in the BIDS dataset to `audio/left` in the
pipeline:
```python
rename_events = {'audio_left': 'audio/left'}
```
"""
on_rename_missing_events: Literal["ignore", "warn", "raise"] = "raise"
"""
How to handle the situation where you specified an event to be renamed via
`rename_events`, but this particular event is not present in the data. By
default, we will raise an exception to avoid accidental mistakes due to typos;
however, if you're sure what you're doing, you may change this to `'warn'`
to only get a warning instead, or `'ignore'` to ignore it completely.
"""
event_repeated: Literal["error", "drop", "merge"] = "error"
"""
How to handle repeated events. We call events "repeated" if more than one event
occurred at the exact same time point. Currently, MNE-Python cannot handle
this situation gracefully when trying to create epochs, and will throw an
error. To only keep the event of that time point ("first" here referring to
the order that events appear in `*_events.tsv`), pass `'drop'`. You can also
request to create a new type of event by merging repeated events by setting
this to `'merge'`.
!!! warning
The `'merge'` option is entirely untested in the MNE BIDS Pipeline as of
April 1st, 2021.
"""
epochs_metadata_tmin: float | str | list[str] | None = None
"""
The beginning of the time window used for epochs metadata generation. This setting
controls the `tmin` value passed to
[`mne.epochs.make_metadata`](https://mne.tools/stable/generated/mne.epochs.make_metadata.html).
If a float, the time in seconds relative to the time-locked event of the respective
epoch. Negative indicate times before, positive values indicate times after the
time-locked event.
If a string or a list of strings, the name(s) of events marking the start of time
window.
If `None`, use the first time point of the epoch.
???+ info
Note that `None` here behaves differently than `tmin=None` in
`mne.epochs.make_metadata`. To achieve the same behavior, pass the name(s) of the
time-locked events instead.
"""
epochs_metadata_tmax: float | str | list[str] | None = None
"""
Same as `epochs_metadata_tmin`, but specifying the **end** of the time
window for metadata generation.
"""
epochs_metadata_keep_first: Sequence[str] | None = None
"""
Event groupings using hierarchical event descriptors (HEDs) for which to store
the time of the **first** occurrence of any event of this group in a new column
with the group name, and the **type** of that event in a column named after the
group, but with a `first_` prefix. If `None` (default), no event
aggregation will take place and no new columns will be created.
???+ example "Example"
Assume you have two response events types, `response/left` and
`response/right`; in some trials, both responses occur, because the
participant pressed both buttons. Now, you want to keep the first response
only. To achieve this, set
```python
epochs_metadata_keep_first = ['response']
```
This will add two new columns to the metadata: `response`, indicating
the **time** relative to the time-locked event; and `first_response`,
depicting the **type** of event (`'left'` or `'right'`).
You may also specify a grouping for multiple event types:
```python
epochs_metadata_keep_first = ['response', 'stimulus']
```
This will add the columns `response`, `first_response`, `stimulus`,
and `first_stimulus`.
"""
epochs_metadata_keep_last: Sequence[str] | None = None
"""
Same as `epochs_metadata_keep_first`, but for keeping the **last**
occurrence of matching event types. The columns indicating the event types
will be named with a `last_` instead of a `first_` prefix.
"""
epochs_metadata_query: str | None = None
"""
A [metadata query](https://mne.tools/stable/auto_tutorials/epochs/30_epochs_metadata.html)
specifying which epochs to keep. If the query fails because it refers to an
unknown metadata column, a warning will be emitted and all epochs will be kept.