-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
changed core feature to use change in voltage, maintenance cleanup
- Loading branch information
1 parent
0ae1889
commit 525b9b9
Showing
255 changed files
with
9,728 additions
and
6,419 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,4 +1,4 @@ | ||
# Sphinx build info version 1 | ||
# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. | ||
config: a34f014baffc3a18f2e4451097838d56 | ||
config: 1986480a93c54f2f17e09825d323461e | ||
tags: 645f666f9bcd5a90fca523b33c5a78b7 |
Binary file renamed
BIN
+18.9 KB
...7294485992514de3/auto_examples_python.zip → ...3d83d4e40ec44385/auto_examples_python.zip
Binary file not shown.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
108 changes: 108 additions & 0 deletions
108
docs/_downloads/45db2a75d199d40435fefaab0063e78d/plot_find_audio_events.ipynb
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,108 @@ | ||
{ | ||
"cells": [ | ||
{ | ||
"cell_type": "code", | ||
"execution_count": null, | ||
"metadata": { | ||
"collapsed": false | ||
}, | ||
"outputs": [], | ||
"source": [ | ||
"%matplotlib inline" | ||
] | ||
}, | ||
{ | ||
"cell_type": "markdown", | ||
"metadata": {}, | ||
"source": [ | ||
"\n# Use Audio to Align Video Data\nIn this example, we use ``pd-parser`` to find audio events using the same\nalgorithm for matching with time-stamps and rejecting misaligned\naudio, but applied using the onset of an audio deflection instead of detecting\nphotodiode events based on their square wave shape.\n" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": null, | ||
"metadata": { | ||
"collapsed": false | ||
}, | ||
"outputs": [], | ||
"source": [ | ||
"# Authors: Alex Rockhill <[email protected]>\n#\n# License: BSD (3-clause)" | ||
] | ||
}, | ||
{ | ||
"cell_type": "markdown", | ||
"metadata": {}, | ||
"source": [ | ||
"Load in a video with audio:\n\nIn this example, we'll use audio and instead of aligning electrophysiology\ndata, we'll align a video. This example data is from a task where movements\nare played on a monitor for the participant to mirror and the video recording\nis synchronized by playing a pre-recorded clap. This clap sound, or a similar\nsound, is recommended for synchronizing audio because the onset is clear and\nallows good precision in synchronizing events.\n\nNote that the commands that require ffmpeg are pre-computed and commented\nout because ffmpeg must be installed to use them and it is not required by\n``pd-parser``.\n\n" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": null, | ||
"metadata": { | ||
"collapsed": false | ||
}, | ||
"outputs": [], | ||
"source": [ | ||
"import os\nimport os.path as op\nimport numpy as np\nfrom scipy.io import wavfile\nfrom subprocess import call\n# from subprocess import run, PIPE, STDOUT\n# import datetime\n\nimport mne\nfrom mne.utils import _TempDir\n\nimport pd_parser\nfrom pd_parser.parse_pd import _load_data # , _read_tsv\n\n# get the data\nout_dir = _TempDir()\ncall(['curl -L https://raw.githubusercontent.com/alexrockhill/pd-parser/'\n 'master/pd_parser/tests/data/test_video.mp4 '\n '-o ' + op.join(out_dir, 'test_video.mp4')], shell=True, env=os.environ)\ncall(['curl -L https://raw.githubusercontent.com/alexrockhill/pd-parser/'\n 'master/pd_parser/tests/data/test_video.wav '\n '-o ' + op.join(out_dir, 'test_video.wav')], shell=True, env=os.environ)\ncall(['curl -L https://raw.githubusercontent.com/alexrockhill/pd-parser/'\n 'master/pd_parser/tests/data/test_video_beh.tsv '\n '-o ' + op.join(out_dir, 'test_video_beh.tsv')],\n shell=True, env=os.environ)\n\n# navigate to the example video\nvideo_fname = op.join(out_dir, 'test_video.mp4')\n\naudio_fname = video_fname.replace('mp4', 'wav') # pre-computed\n# extract audio (requires ffmpeg)\n# run(['ffmpeg', '-i', video_fname, audio_fname])\n\nfs, data = wavfile.read(audio_fname)\ndata = data.mean(axis=1) # stereo audio but only need one source\ninfo = mne.create_info(['audio'], fs, ['stim'])\nraw = mne.io.RawArray(data[np.newaxis], info)\n\n# find audio-visual time offset\noffset = 0 # pre-computed value for this video\n'''\nresult = run(['ffprobe', '-show_entries', 'stream=codec_type,start_time',\n '-v', '0', '-of', 'compact=p=1:nk=0', video_fname],\n stdout=PIPE, stderr=STDOUT)\noutput = result.stdout.decode('utf-8').split('\\n')\noffset = float(output[0].strip('stream|codec_type=video|start_time')) - \\\n float(output[1].strip('stream|codec_type=audio|start_time'))\n'''\n\n# save to disk as required by ``pd-parser``, raw needs a filename\nfname = op.join(out_dir, 'sub-1_task-mytask_raw.fif')\nraw.save(fname)\n\n# navigate to corresponding behavior\nbehf = op.join(out_dir, 'test_video_beh.tsv')" | ||
] | ||
}, | ||
{ | ||
"cell_type": "markdown", | ||
"metadata": {}, | ||
"source": [ | ||
"Run the parser:\n\nNow we'll call the main function to automatically parse the audio events.\n\n" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": null, | ||
"metadata": { | ||
"collapsed": false | ||
}, | ||
"outputs": [], | ||
"source": [ | ||
"pd_parser.parse_audio(fname, beh=behf, beh_key='tone_onset_time',\n audio_ch_names=['audio'], zscore=10)" | ||
] | ||
}, | ||
{ | ||
"cell_type": "markdown", | ||
"metadata": {}, | ||
"source": [ | ||
"Load the results:\n\nFinally, we'll load the events and use them to crop the video although it\nrequires ffmpeg so it is commented out.\n\n" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": null, | ||
"metadata": { | ||
"collapsed": false | ||
}, | ||
"outputs": [], | ||
"source": [ | ||
"annot = _load_data(fname)[0]\nprint('Here are the event times: ', annot.onset)\n\n# Crop the videos with ffmpeg\n'''\nbeh = _read_tsv(behf)\nfor i in range(annot.onset.size): # skip the first video\n action_time = (beh['tone_onset'][i] - beh['action_onset'][i]) / 1000\n run(['ffmpeg', '-i', f'{video_fname}', '-ss',\n str(datetime.timedelta(\n seconds=annot.onset[i] - action_time - offset)),\n '-to', str(datetime.timedelta(seconds=annot.onset[i] - offset)),\n op.join(out_dir, 'movement-{}+action_type-{}.mp4'.format(\n beh['movement'][i], beh['action_type'][i]))])\n'''" | ||
] | ||
} | ||
], | ||
"metadata": { | ||
"kernelspec": { | ||
"display_name": "Python 3", | ||
"language": "python", | ||
"name": "python3" | ||
}, | ||
"language_info": { | ||
"codemirror_mode": { | ||
"name": "ipython", | ||
"version": 3 | ||
}, | ||
"file_extension": ".py", | ||
"mimetype": "text/x-python", | ||
"name": "python", | ||
"nbconvert_exporter": "python", | ||
"pygments_lexer": "ipython3", | ||
"version": "3.9.1" | ||
} | ||
}, | ||
"nbformat": 4, | ||
"nbformat_minor": 0 | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Oops, something went wrong.