diff --git a/.DS_Store b/.DS_Store new file mode 100644 index 00000000..780867f6 Binary files /dev/null and b/.DS_Store differ diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index e14204c7..722a2517 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -34,10 +34,4 @@ jobs: if: github.ref == 'refs/heads/master' # TODO: Deploy seperate develop-version of docs? with: github_token: ${{ secrets.GITHUB_TOKEN }} - publish_dir: doc/_build/html - - name: Deploy Dev Docs - uses: peaceiris/actions-gh-pages@v3 - with: - github_token: ${{ secrets.GITHUB_TOKEN }} - publish_dir: doc/_build/html - destination_dir: develop + publish_dir: doc/_build/html \ No newline at end of file diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 678c8b4f..1aaaa2ca 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -52,11 +52,26 @@ jobs: if: "startsWith(runner.os, 'Linux')" run: | make install-deps-wxpython + - name: Install conda + uses: conda-incubator/setup-miniconda@v3 + with: + environment-file: environment.yml + auto-activate-base: true + python-version: 3.8 + activate-environment: EEG-ExPy + channels: conda-forge + miniconda-version: "latest" + - name: Install dependencies via conda + shell: bash -el {0} + run: | + conda info + conda activate EEG-ExPy - name: Install dependencies + shell: bash -el {0} run: | make build - name: Run eegnb install test - shell: bash + shell: bash -el {0} run: | if [ "$RUNNER_OS" == "Linux" ]; then Xvfb :0 -screen 0 1024x768x24 -ac +extension GLX +render -noreset &> xvfb.log & @@ -65,7 +80,7 @@ jobs: eegnb --help eegnb runexp --help - name: Run examples with coverage - shell: bash + shell: bash -el {0} run: | if [ "$RUNNER_OS" == "Linux" ]; then Xvfb :0 -screen 0 1024x768x24 -ac +extension GLX +render -noreset &> xvfb.log & diff --git a/.gitignore b/.gitignore index d6bf9a8f..f2b04e5f 100644 --- a/.gitignore +++ b/.gitignore @@ -16,4 +16,6 @@ coverage.xml htmlcov # PyCharm -.idea/ \ No newline at end of file +.idea/ + +*.DS_Store* \ No newline at end of file diff --git a/doc/getting_started/running_experiments.md b/doc/getting_started/running_experiments.md index 951e6506..cc13e42c 100644 --- a/doc/getting_started/running_experiments.md +++ b/doc/getting_started/running_experiments.md @@ -88,20 +88,20 @@ The first step is to import all of the necessary library dependencies. These are ```python from eegnb import generate_save_fn from eegnb.devices.eeg import EEG -from eegnb.experiments.visual_n170 import n170 +from eegnb.experiments import VisualN170 ``` Next we need to define session parameters which are otherwise handled via input prompts in the run `run_notebooks.py` script. After we define the session parameters we will pass them to the file name generator. ```python -board_name = 'cyton' -experiment = 'visual_n170' -session = 1 -subject = 1 -record_duration = 120 +board_name = "muse2" # board name +experiment_name = "visual_n170" # experiment name +subject_id = 0 # test subject id +session_nb = 0 # session number +record_duration = 120 # recording duration # Create output filename -save_fn = generate_save_fn(board_name, experiment, subject, session) +save_fn = generate_save_fn(board_name, experiment_name, subject_id, session_nb) ``` Next it is necessary to call the `eegnb.devices.eeg.EEG` class which handles all of the backend processes related to each device. @@ -113,31 +113,46 @@ eeg_device = EEG(device=board_name) Finally, we call the `present` method of the class corresponding to our desired experiment, in this case the visual N170. We pass both the EEG device and generated save file name in order to collect and save data. The presentation can also be run without an EEG device/save file for testing and debugging. ```python -n170.present(duration=record_duration, eeg=eeg_device, save_fn=save_fn) +experiment = VisualN170(duration=record_duration, eeg=eeg_device, save_fn=save_fn, use_vr=False) + +experiment.run() ``` All together the example script looks like ```python +################################################################################################### +# Setup +# --------------------- +# # Imports from eegnb import generate_save_fn from eegnb.devices.eeg import EEG -from eegnb.experiments.visual_n170 import n170 +from eegnb.experiments import VisualN170 # Define some variables -board_name = 'cyton' -experiment = 'visual_n170' -session = 1 -subject = 1 -record_duration = 120 +board_name = "muse2" # board name +experiment_name = "visual_n170" # experiment name +subject_id = 0 # test subject id +session_nb = 0 # session number +record_duration = 120 # recording duration -# Create output filename -save_fn = generate_save_fn(board_name, experiment, subject, session) +# generate save path +save_fn = generate_save_fn(board_name, experiment_name, subject_id, session_nb) -# Setup EEG device +# create device object eeg_device = EEG(device=board_name) -# Run stimulus presentation -n170.present(duration=record_duration, eeg=eeg_device, save_fn=save_fn) +# Experiment type +experiment = VisualN170(duration=record_duration, eeg=eeg_device, save_fn=save_fn, use_vr=False) + +################################################################################################### +# Run experiment +# --------------------- +# +experiment.run() + +# Saved csv location +print("Recording saved in", experiment.save_fn) ``` @@ -162,7 +177,7 @@ The N170 experiment for example, can have its stimulus displayed on the VR heads ```python # Run stimulus presentation with VR enabled. -n170.present(duration=record_duration, eeg=eeg_device, save_fn=save_fn, use_vr=True) +experiment = VisualN170(duration=record_duration, eeg=eeg_device, save_fn=save_fn, use_vr=True) ``` ### diff --git a/doc/getting_started/streaming.md b/doc/getting_started/streaming.md index d9f956b2..f28ebde8 100644 --- a/doc/getting_started/streaming.md +++ b/doc/getting_started/streaming.md @@ -76,6 +76,12 @@ be run to begin the notebooks interfacing with the bluemuse backend. **Backend:** Brainflow **Needed Parameters:** No additional parameters are needed to connect to the Notion. It is necessary however to make sure the Notion is on the same network and readable by Neurosity's developer console. +### Neurosity Crown +![fig](../img/notion.png) +**Device Name:** *'crown'* +**Backend:** Brainflow +**Needed Parameters:** No additional parameters are needed to connect to the Notion. It is necessary however to make sure the Notion is on the same network and readable by Neurosity's developer console. + #### Connecting on Windows In order to connect to the Notion on Windows you must first turn off your network firewall for the Open Sound Control (OSC) protocol to function for the notion. diff --git a/eegnb/analysis/analysis_utils.py b/eegnb/analysis/analysis_utils.py index 72fd2eea..6bc65bf2 100644 --- a/eegnb/analysis/analysis_utils.py +++ b/eegnb/analysis/analysis_utils.py @@ -7,7 +7,6 @@ from glob import glob from typing import Union, List from time import sleep, time -import keyboard import os import pandas as pd diff --git a/eegnb/analysis/streaming_utils.py b/eegnb/analysis/streaming_utils.py index c7f66702..e80ff01e 100644 --- a/eegnb/analysis/streaming_utils.py +++ b/eegnb/analysis/streaming_utils.py @@ -7,7 +7,7 @@ from glob import glob from typing import Union, List from time import sleep, time -import keyboard +from pynput import keyboard import os import pandas as pd @@ -192,12 +192,20 @@ def check_report(eeg: EEG, n_times: int=60, pause_time=5, thres_std_low=None, th print(f"\n\nLooks like you still have {len(bad_channels)} bad channels after {loop_index+1} tries\n") prompt_time = time() - print(f"Starting next cycle in 5 seconds, press C and enter to cancel") - while time() < prompt_time + 5: - if keyboard.is_pressed('c'): + print(f"Starting next cycle in 5 seconds, press C and enter to cancel") + c_key_pressed = False + + def update_key_press(key): + if key.char == 'c': + globals().update(c_key_pressed=True) + listener = keyboard.Listener(on_press=update_key_press) + listener.start() + while time() < prompt_time + 5: + if c_key_pressed: print("\nStopping signal quality checks!") flag = True - break + break + listener.stop() if flag: break diff --git a/eegnb/analysis/utils.py b/eegnb/analysis/utils.py index 21269a83..0236a571 100644 --- a/eegnb/analysis/utils.py +++ b/eegnb/analysis/utils.py @@ -7,7 +7,6 @@ from glob import glob from typing import Union, List from time import sleep, time -import keyboard import os import pandas as pd diff --git a/eegnb/cli/__main__.py b/eegnb/cli/__main__.py index d43702e4..9163ca4a 100644 --- a/eegnb/cli/__main__.py +++ b/eegnb/cli/__main__.py @@ -128,7 +128,6 @@ def create_analysis_report( experiment, eegdevice, subject, session, site, filepath = analysis_intro_prompt() analysis_report(experiment, eegdevice, subject, session, site, filepath) - @main.command() @click.option("-ed", "--eegdevice", help="EEG device to use", required=True) def checksigqual(eegdevice: str): diff --git a/eegnb/cli/introprompt.py b/eegnb/cli/introprompt.py index 6f8aa488..ea96b368 100644 --- a/eegnb/cli/introprompt.py +++ b/eegnb/cli/introprompt.py @@ -199,7 +199,7 @@ def analysis_device_prompt(): def analysis_intro_prompt(): # check if user has filepath - print("Welcome to NeurotechX EEG Notebooks\n") + print("Welcome to NeurotechX EEG-ExPy\n") print("Do you have a filepath to a .csv file you would like to analyze? \n") print("[1] Yes \n") print("[0] No \n") @@ -220,7 +220,6 @@ def analysis_intro_prompt(): return experiment, eegdevice, subject, session, site, filepath - def intro_prompt_zip() -> Tuple[str,str]: """This function handles the user prompts for inputting information for zipping their function.""" diff --git a/eegnb/experiments/auditory_oddball/aob.py b/eegnb/experiments/auditory_oddball/aob.py index 02da516f..607b2df8 100644 --- a/eegnb/experiments/auditory_oddball/aob.py +++ b/eegnb/experiments/auditory_oddball/aob.py @@ -1,5 +1,8 @@ import numpy as np from pandas import DataFrame +from psychopy import prefs +# PTB does not yet support macOS Apple Silicon, need to fall back to sounddevice. +prefs.hardware['audioLib'] = ['sounddevice'] from psychopy import visual, core, event, sound from time import time @@ -8,7 +11,6 @@ from typing import Optional - class AuditoryOddball(Experiment.BaseExperiment): def __init__(self, duration=120, eeg: Optional[EEG]=None, save_fn=None, n_trials = 2010, iti = 0.3, soa = 0.2, jitter = 0.2, secs=0.2, volume=0.8, random_state=42, s1_freq="C", s2_freq="D", s1_octave=5, s2_octave=6): diff --git a/environment.yml b/environment.yml new file mode 100644 index 00000000..d63c2b50 --- /dev/null +++ b/environment.yml @@ -0,0 +1,6 @@ +channels: + - conda-forge +dependencies: + - python=3.8 + - pytables # install pytables for macOS arm64, so do not need to build from source. + - liblsl # install liblsl to prevent error on macOS and Ubuntu: "RuntimeError: LSL binary library file was not found." \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index 71d155dd..f70bea07 100644 --- a/requirements.txt +++ b/requirements.txt @@ -15,7 +15,9 @@ pyserial>=3.5 h5py>=3.1.0 pytest-shutil pyo>=1.0.3; platform_system == "Linux" -keyboard==0.13.5 +#pynput requires pyobjc, psychopy requires a version less than 8, setting pyobjc to +# a specific version prevents an endless dependency resolution loop. +pyobjc==7.3; sys_platform == 'darwin' airium>=0.1.0 attrdict>=2.0.1 attrdict3 @@ -24,14 +26,20 @@ attrdict3 ## ~~ Streaming Requirements ~~ muselsl>=2.0.2 -pylsl==1.10.5 # due to https://github.com/NeuroTechX/eeg-notebooks/issues/187 +# Upgrade from 1.10.5 to 1.16.2 so the arm64 lib is available to macOS Apple Silicon for preventing error: +# pylsl/liblsl64.dylib' (mach-o file, but is an incompatible architecture (have 'x86_64', need 'arm64e' or 'arm64')) +pylsl==1.16.2 brainflow>=4.8.2 pysocks>=1.7.1 pyserial>=3.5 h5py>=3.1.0 pytest-shutil pyo>=1.0.3; platform_system == "Linux" -keyboard==0.13.5 +#pynput requires pyobjc, psychopy requires a version less than 8, setting pyobjc to +# a specific version prevents an endless dependency resolution loop. +pyobjc==7.3; sys_platform == 'darwin' +#Removed keyboard dependency due segmentation fault on Apple Silicon: https://github.com/boppreh/keyboard/issues/507 +pynput airium>=0.1.0 attrdict>=2.0.1 attrdict3 @@ -40,7 +48,13 @@ click ## ~~ Stimpres Requirements ~~ -psychopy==2023.1.0 +#pynput requires pyobjc, psychopy requires a version less than 8, setting pyobjc to +# a specific version prevents an endless dependency resolution loop. +pyobjc==7.3; sys_platform == 'darwin' +#upgrade psychopy to use newer wxpython dependency which is prebuilt for m1 support. +psychopy==2023.2.2 +# PTB does not yet support macOS Apple Silicon, need to fallback to sounddevice. +psychopy-sounddevice psychtoolbox scikit-learn>=0.23.2 pandas>=1.1.4 @@ -52,7 +66,6 @@ pyserial>=3.5 h5py>=3.1.0 pytest-shutil pyo>=1.0.3; platform_system == "Linux" -keyboard==0.13.5 airium>=0.1.0 attrdict>=2.0.1 attrdict3