Skip to content

Commit

Permalink
changed core feature to use change in voltage, maintenance cleanup
Browse files Browse the repository at this point in the history
  • Loading branch information
alexrockhill committed May 2, 2021
1 parent 0ae1889 commit 525b9b9
Show file tree
Hide file tree
Showing 255 changed files with 9,728 additions and 6,419 deletions.
2 changes: 1 addition & 1 deletion docs/.buildinfo
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Sphinx build info version 1
# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done.
config: a34f014baffc3a18f2e4451097838d56
config: 1986480a93c54f2f17e09825d323461e
tags: 645f666f9bcd5a90fca523b33c5a78b7
Binary file not shown.
Original file line number Diff line number Diff line change
Expand Up @@ -23,15 +23,15 @@
from mne.utils import _TempDir

import pd_parser
from pd_parser.parse_pd import _to_tsv
from pd_parser.parse_pd import _read_raw, _to_tsv

out_dir = _TempDir()
print(f'After running this example, you can find the data here: {out_dir}')

# simulate photodiode data
n_events = 300
prop_corrupted = 0.01
raw, beh_df, events, corrupted_indices = \
raw, beh, events, corrupted_indices = \
pd_parser.simulate_pd_data(n_events=n_events,
prop_corrupted=prop_corrupted)

Expand All @@ -48,6 +48,9 @@
fname = op.join(out_dir, 'sub-1_task-mytask_raw.fif')
raw.save(fname)

# roundtrip so that raw is properly loaded from disk and has a filename
raw = _read_raw(fname)

###############################################################################
# Make behavior data:
#
Expand All @@ -70,12 +73,12 @@
for i in [10, 129, 232, 288]:
response_time[i] = 'n/a' # make some no responses
# put in dictionary to be converted to tsv file
beh_df['fix_onset_time'] = beh_df['time'] + offsets
beh_df['go_time'] = go_time
beh_df['response_time'] = response_time
beh['fix_onset_time'] = beh['time'] + offsets
beh['go_time'] = go_time
beh['response_time'] = response_time
behf = op.join(out_dir, 'sub-1_task-mytask_beh.tsv')
# save behavior file out
_to_tsv(behf, beh_df)
_to_tsv(behf, beh)

###############################################################################
# Use the interactive graphical user interface (GUI) to find parameters:
Expand All @@ -86,21 +89,21 @@
# interact with the photodiode data to pick reasonable parameters by
# following the instructions.

pd_parser.find_pd_params(fname, pd_ch_names=['pd'])
pd_parser.find_pd_params(raw, pd_ch_names=['pd'])

###############################################################################
# Find the photodiode events relative to the behavioral timing of interest:
#
# This function will use the default parameters or the parameters you
# found from :func:`pd_parser.find_pd_parameters` to find and align the
# photodiode events, excluding events that were off because the commuter
# photodiode events, excluding events that were off because the computer
# hung up on computation, for instance. That data is saved in the same folder
# as the raw file (in this case, a temperary directory generated by
# :func:`_TempDir`). The data can be used directly, or it can be accessed via
# :func:`pd_parser.pd_parser_save_to_bids` to store it in the brain imagine
# data structure (BIDS) standardized format before using it.

pd_parser.parse_pd(fname, behf=behf, pd_ch_names=['pd'], max_len=1.5)
pd_parser.parse_pd(raw, beh=beh, pd_ch_names=['pd'], max_len=1.5)

###############################################################################
# Add events relative to the photodiode events:
Expand All @@ -116,9 +119,9 @@
# Note: if more than one photodiode event is used, the parser can be
# used for each event separately using the keyword `add_event=True`.

pd_parser.add_pd_relative_events(
fname, behf,
relative_event_cols=['go_time', 'response_time'],
pd_parser.add_relative_events(
raw, beh,
relative_event_keys=['go_time', 'response_time'],
relative_event_names=['Go Cue', 'Response'])


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@
from mne.utils import _TempDir

import pd_parser
from pd_parser.parse_pd import _to_tsv
from pd_parser.parse_pd import _load_data

import matplotlib.pyplot as plt

Expand All @@ -38,7 +38,7 @@
n_events = 300
# let's make our photodiode events on random uniform from 0.25 to 0.75 seconds
n_secs_on = np.random.random(n_events) * 0.5 + 0.25
raw, beh_df, events, _ = \
raw, beh, events, _ = \
pd_parser.simulate_pd_data(n_events=n_events, n_secs_on=n_secs_on,
prop_corrupted=0.0)
sfreq = np.round(raw.info['sfreq']).astype(int)
Expand All @@ -51,16 +51,19 @@
axes[0].set_ylabel('voltage')
for j, i in enumerate(events[corrupted_indices, 0]):
if j == 0:
raw._data[0, i - sfreq // 3: i - sfreq // 4] = -amount
raw._data[0, i - sfreq // 5: i - sfreq // 10] = -amount
elif j == 1:
raw._data[0, i + sfreq // 4: i + sfreq // 3] = -amount
else:
raw._data[0, i + 2 * sfreq // 3: i + 4 * sfreq // 4] = amount
raw._data[0, i + 3 * sfreq // 4: i + 5 * sfreq // 6] = amount
axes[j].plot(np.linspace(-1, 2, 3 * sfreq),
raw._data[0, i - sfreq: i + sfreq * 2])
axes[j].set_xlabel('time (s)')


# make figure nicer
fig.tight_layout()

# make fake electrophysiology data
info = mne.create_info(['ch1', 'ch2', 'ch3'], raw.info['sfreq'],
['seeg'] * 3)
Expand All @@ -71,15 +74,13 @@
raw.info['dig'] = None
raw.info['line_freq'] = 60

# save to disk as required by ``pd-parser``
fname = op.join(out_dir, 'sub-1_task-mytask_raw.fif')
raw.save(fname)
# add some offsets to the behavior so it's a bit more realistic
offsets = np.random.randn(n_events) * 0.001
beh_df['time'] = np.array(beh_df['time']) + offsets
behf = op.join(out_dir, 'sub-1_task-mytask_beh.tsv')
_to_tsv(behf, beh_df)
beh['time'] = np.array(beh['time']) + offsets

# save to disk as required by ``pd-parser``, raw needs to have a filename
fname = op.join(out_dir, 'sub-1_task-mytask_raw.fif')
raw.save(fname)

###############################################################################
# Find the photodiode events relative to the behavioral timing of interest:
Expand All @@ -92,8 +93,8 @@
# your own input depending on whether you want to keep the events or not.

with mock.patch('builtins.input', return_value='y'):
pd_parser.parse_pd(fname, pd_event_name='Stim On', behf=behf,
pd_ch_names=['pd'], beh_col='time', recover=True)
pd_parser.parse_pd(fname, pd_event_name='Stim On', beh=beh,
pd_ch_names=['pd'], beh_key='time', recover=True)

###############################################################################
# Find cessations of the photodiode deflections
Expand All @@ -108,3 +109,25 @@
# following the instructions.

pd_parser.add_pd_off_events(fname, off_event_name='Stim Off')

###############################################################################
# Check the results:
#
# Finally, we'll check that the recovered events and the original events match.

annot = _load_data(fname)[0]
raw.set_annotations(annot)
events2, event_id = mne.events_from_annotations(raw)
on_events = events2[events2[:, 2] == event_id['Stim On']]
print(f'Original: {events[corrupted_indices, 0]}\n'
f'Recovered: {on_events[corrupted_indices, 0]}')

'''
# uncomment when using interactively, this section doesn't work
# for the non-interactive documentation
off_events = events2[events2[:, 2] == event_id['Stim Off']]
original_off = events[corrupted_indices, 0] + \
np.round(n_secs_on[corrupted_indices] * raw.info['sfreq']).astype(int)
print(f'Original off: {original_off}\n'
f'Recovered off: {on_events[corrupted_indices, 0]}')
'''
Original file line number Diff line number Diff line change
@@ -0,0 +1,108 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"%matplotlib inline"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"\n# Use Audio to Align Video Data\nIn this example, we use ``pd-parser`` to find audio events using the same\nalgorithm for matching with time-stamps and rejecting misaligned\naudio, but applied using the onset of an audio deflection instead of detecting\nphotodiode events based on their square wave shape.\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"# Authors: Alex Rockhill <[email protected]>\n#\n# License: BSD (3-clause)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Load in a video with audio:\n\nIn this example, we'll use audio and instead of aligning electrophysiology\ndata, we'll align a video. This example data is from a task where movements\nare played on a monitor for the participant to mirror and the video recording\nis synchronized by playing a pre-recorded clap. This clap sound, or a similar\nsound, is recommended for synchronizing audio because the onset is clear and\nallows good precision in synchronizing events.\n\nNote that the commands that require ffmpeg are pre-computed and commented\nout because ffmpeg must be installed to use them and it is not required by\n``pd-parser``.\n\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"import os\nimport os.path as op\nimport numpy as np\nfrom scipy.io import wavfile\nfrom subprocess import call\n# from subprocess import run, PIPE, STDOUT\n# import datetime\n\nimport mne\nfrom mne.utils import _TempDir\n\nimport pd_parser\nfrom pd_parser.parse_pd import _load_data # , _read_tsv\n\n# get the data\nout_dir = _TempDir()\ncall(['curl -L https://raw.githubusercontent.com/alexrockhill/pd-parser/'\n 'master/pd_parser/tests/data/test_video.mp4 '\n '-o ' + op.join(out_dir, 'test_video.mp4')], shell=True, env=os.environ)\ncall(['curl -L https://raw.githubusercontent.com/alexrockhill/pd-parser/'\n 'master/pd_parser/tests/data/test_video.wav '\n '-o ' + op.join(out_dir, 'test_video.wav')], shell=True, env=os.environ)\ncall(['curl -L https://raw.githubusercontent.com/alexrockhill/pd-parser/'\n 'master/pd_parser/tests/data/test_video_beh.tsv '\n '-o ' + op.join(out_dir, 'test_video_beh.tsv')],\n shell=True, env=os.environ)\n\n# navigate to the example video\nvideo_fname = op.join(out_dir, 'test_video.mp4')\n\naudio_fname = video_fname.replace('mp4', 'wav') # pre-computed\n# extract audio (requires ffmpeg)\n# run(['ffmpeg', '-i', video_fname, audio_fname])\n\nfs, data = wavfile.read(audio_fname)\ndata = data.mean(axis=1) # stereo audio but only need one source\ninfo = mne.create_info(['audio'], fs, ['stim'])\nraw = mne.io.RawArray(data[np.newaxis], info)\n\n# find audio-visual time offset\noffset = 0 # pre-computed value for this video\n'''\nresult = run(['ffprobe', '-show_entries', 'stream=codec_type,start_time',\n '-v', '0', '-of', 'compact=p=1:nk=0', video_fname],\n stdout=PIPE, stderr=STDOUT)\noutput = result.stdout.decode('utf-8').split('\\n')\noffset = float(output[0].strip('stream|codec_type=video|start_time')) - \\\n float(output[1].strip('stream|codec_type=audio|start_time'))\n'''\n\n# save to disk as required by ``pd-parser``, raw needs a filename\nfname = op.join(out_dir, 'sub-1_task-mytask_raw.fif')\nraw.save(fname)\n\n# navigate to corresponding behavior\nbehf = op.join(out_dir, 'test_video_beh.tsv')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Run the parser:\n\nNow we'll call the main function to automatically parse the audio events.\n\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"pd_parser.parse_audio(fname, beh=behf, beh_key='tone_onset_time',\n audio_ch_names=['audio'], zscore=10)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Load the results:\n\nFinally, we'll load the events and use them to crop the video although it\nrequires ffmpeg so it is commented out.\n\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"annot = _load_data(fname)[0]\nprint('Here are the event times: ', annot.onset)\n\n# Crop the videos with ffmpeg\n'''\nbeh = _read_tsv(behf)\nfor i in range(annot.onset.size): # skip the first video\n action_time = (beh['tone_onset'][i] - beh['action_onset'][i]) / 1000\n run(['ffmpeg', '-i', f'{video_fname}', '-ss',\n str(datetime.timedelta(\n seconds=annot.onset[i] - action_time - offset)),\n '-to', str(datetime.timedelta(seconds=annot.onset[i] - offset)),\n op.join(out_dir, 'movement-{}+action_type-{}.mp4'.format(\n beh['movement'][i], beh['action_type'][i]))])\n'''"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.1"
}
},
"nbformat": 4,
"nbformat_minor": 0
}
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"\n# Find Photodiode Events\n\nIn this example, we use ``pd-parser`` to find photodiode events and\nalign them to behavior. Then, we save the data to BIDS format.\n"
"\n# Find Photodiode Events\nIn this example, we use ``pd-parser`` to find photodiode events and\nalign them to behavior. Then, we save the data to BIDS format.\n"
]
},
{
Expand Down Expand Up @@ -44,7 +44,7 @@
},
"outputs": [],
"source": [
"import os.path as op\nimport numpy as np\n\nimport mne\nfrom mne.utils import _TempDir\n\nimport pd_parser\nfrom pd_parser.parse_pd import _to_tsv\n\nout_dir = _TempDir()\nprint(f'After running this example, you can find the data here: {out_dir}')\n\n# simulate photodiode data\nn_events = 300\nprop_corrupted = 0.01\nraw, beh_df, events, corrupted_indices = \\\n pd_parser.simulate_pd_data(n_events=n_events,\n prop_corrupted=prop_corrupted)\n\n# make fake electrophysiology data\ninfo = mne.create_info(['ch1', 'ch2', 'ch3'], raw.info['sfreq'],\n ['seeg'] * 3)\nraw2 = mne.io.RawArray(np.random.random((3, raw.times.size)) * 1e-6, info)\nraw2.info['lowpass'] = raw.info['lowpass'] # these must match to combine\nraw.add_channels([raw2])\n# bids needs these data fields\nraw.info['dig'] = None\nraw.info['line_freq'] = 60\n\nfname = op.join(out_dir, 'sub-1_task-mytask_raw.fif')\nraw.save(fname)"
"import os.path as op\nimport numpy as np\n\nimport mne\nfrom mne.utils import _TempDir\n\nimport pd_parser\nfrom pd_parser.parse_pd import _read_raw, _to_tsv\n\nout_dir = _TempDir()\nprint(f'After running this example, you can find the data here: {out_dir}')\n\n# simulate photodiode data\nn_events = 300\nprop_corrupted = 0.01\nraw, beh, events, corrupted_indices = \\\n pd_parser.simulate_pd_data(n_events=n_events,\n prop_corrupted=prop_corrupted)\n\n# make fake electrophysiology data\ninfo = mne.create_info(['ch1', 'ch2', 'ch3'], raw.info['sfreq'],\n ['seeg'] * 3)\nraw2 = mne.io.RawArray(np.random.random((3, raw.times.size)) * 1e-6, info)\nraw2.info['lowpass'] = raw.info['lowpass'] # these must match to combine\nraw.add_channels([raw2])\n# bids needs these data fields\nraw.info['dig'] = None\nraw.info['line_freq'] = 60\n\nfname = op.join(out_dir, 'sub-1_task-mytask_raw.fif')\nraw.save(fname)\n\n# roundtrip so that raw is properly loaded from disk and has a filename\nraw = _read_raw(fname)"
]
},
{
Expand All @@ -62,7 +62,7 @@
},
"outputs": [],
"source": [
"np.random.seed(12)\n# add some noise to make it harder to align, use just over\n# the exclusion of 0.03 to make some events excluded\noffsets = np.random.random(n_events) * 0.035 - 0.0125\n# in this example, the fixation would always be 700 ms\n# after which point a cue would appear which is the \"go time\"\ngo_time = np.repeat(0.7, n_events)\n# let's make the response time between 0.5 and 1.5 seconds uniform random\nresponse_time = list(go_time + np.random.random(n_events) + 1.5)\nfor i in [10, 129, 232, 288]:\n response_time[i] = 'n/a' # make some no responses\n# put in dictionary to be converted to tsv file\nbeh_df['fix_onset_time'] = beh_df['time'] + offsets\nbeh_df['go_time'] = go_time\nbeh_df['response_time'] = response_time\nbehf = op.join(out_dir, 'sub-1_task-mytask_beh.tsv')\n# save behavior file out\n_to_tsv(behf, beh_df)"
"np.random.seed(12)\n# add some noise to make it harder to align, use just over\n# the exclusion of 0.03 to make some events excluded\noffsets = np.random.random(n_events) * 0.035 - 0.0125\n# in this example, the fixation would always be 700 ms\n# after which point a cue would appear which is the \"go time\"\ngo_time = np.repeat(0.7, n_events)\n# let's make the response time between 0.5 and 1.5 seconds uniform random\nresponse_time = list(go_time + np.random.random(n_events) + 1.5)\nfor i in [10, 129, 232, 288]:\n response_time[i] = 'n/a' # make some no responses\n# put in dictionary to be converted to tsv file\nbeh['fix_onset_time'] = beh['time'] + offsets\nbeh['go_time'] = go_time\nbeh['response_time'] = response_time\nbehf = op.join(out_dir, 'sub-1_task-mytask_beh.tsv')\n# save behavior file out\n_to_tsv(behf, beh)"
]
},
{
Expand All @@ -80,14 +80,14 @@
},
"outputs": [],
"source": [
"pd_parser.find_pd_params(fname, pd_ch_names=['pd'])"
"pd_parser.find_pd_params(raw, pd_ch_names=['pd'])"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Find the photodiode events relative to the behavioral timing of interest:\n\nThis function will use the default parameters or the parameters you\nfound from :func:`pd_parser.find_pd_parameters` to find and align the\nphotodiode events, excluding events that were off because the commuter\nhung up on computation, for instance. That data is saved in the same folder\nas the raw file (in this case, a temperary directory generated by\n:func:`_TempDir`). The data can be used directly, or it can be accessed via\n:func:`pd_parser.pd_parser_save_to_bids` to store it in the brain imagine\ndata structure (BIDS) standardized format before using it.\n\n"
"Find the photodiode events relative to the behavioral timing of interest:\n\nThis function will use the default parameters or the parameters you\nfound from :func:`pd_parser.find_pd_parameters` to find and align the\nphotodiode events, excluding events that were off because the computer\nhung up on computation, for instance. That data is saved in the same folder\nas the raw file (in this case, a temperary directory generated by\n:func:`_TempDir`). The data can be used directly, or it can be accessed via\n:func:`pd_parser.pd_parser_save_to_bids` to store it in the brain imagine\ndata structure (BIDS) standardized format before using it.\n\n"
]
},
{
Expand All @@ -98,7 +98,7 @@
},
"outputs": [],
"source": [
"pd_parser.parse_pd(fname, behf=behf, pd_ch_names=['pd'], max_len=1.5)"
"pd_parser.parse_pd(raw, beh=beh, pd_ch_names=['pd'], max_len=1.5)"
]
},
{
Expand All @@ -116,7 +116,7 @@
},
"outputs": [],
"source": [
"pd_parser.add_pd_relative_events(\n fname, behf,\n relative_event_cols=['go_time', 'response_time'],\n relative_event_names=['Go Cue', 'Response'])"
"pd_parser.add_relative_events(\n raw, beh,\n relative_event_keys=['go_time', 'response_time'],\n relative_event_names=['Go Cue', 'Response'])"
]
},
{
Expand Down Expand Up @@ -154,7 +154,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.6"
"version": "3.9.1"
}
},
"nbformat": 4,
Expand Down
Loading

0 comments on commit 525b9b9

Please sign in to comment.