Skip to content

Commit

Permalink
Merge branch 'main' into add_first_dockerfile
Browse files Browse the repository at this point in the history
  • Loading branch information
CodyCBakerPhD authored Apr 8, 2024
2 parents 87e5373 + 468157b commit c29264e
Show file tree
Hide file tree
Showing 9 changed files with 68 additions and 47 deletions.
2 changes: 0 additions & 2 deletions .github/workflows/testing.yml
Original file line number Diff line number Diff line change
@@ -1,7 +1,5 @@
name: Minimal and Full Tests
on:
schedule:
- cron: "0 16 * * *" # Daily at noon EST
workflow_call:
secrets:
AWS_ACCESS_KEY_ID:
Expand Down
10 changes: 8 additions & 2 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,9 +1,15 @@
# Upcoming

### Features
### Deprecations
* Removed `stream_id` as an argument from `IntanRecordingInterface` [PR #794](https://github.com/catalystneuro/neuroconv/pull/794)
* Replaced `waveform_extractor.is_extension` with `waveform_extractor.has_extension`[PR #799](https://github.com/catalystneuro/neuroconv/pull/799)

### Features
* Released the first official Docker images for the package on the GitHub Container Repository (GHCR). [PR #383](https://github.com/catalystneuro/neuroconv/pull/383)

### Bug fixes
* Fixed writing waveforms directly to file [PR #799](https://github.com/catalystneuro/neuroconv/pull/799)



# v0.4.8 (March 20, 2024)
Expand All @@ -17,7 +23,7 @@
* Keyword argument `field_name` of the `DatasetIOConfiguration.from_neurodata_object` method has been renamed to `dataset_name` to be more consistent with its usage. This only affects direct initialization of the model; usage via the `BackendConfiguration` constructor and its associated helper functions in `neuroconv.tools.nwb_helpers` is unaffected. [PR #767](https://github.com/catalystneuro/neuroconv/pull/767)
* Manual construction of a `DatasetIOConfiguration` now requires the field `dataset_name`, and will be validated to match the final path of `location_in_file`. Usage via the automated constructors is unchanged. [PR #767](https://github.com/catalystneuro/neuroconv/pull/767)
* Enhance `get_schema_from_method_signature` to extract descriptions from the method docval. [PR #771](https://github.com/catalystneuro/neuroconv/pull/771)

* Avoid writing `channel_to_uV` and `offset_to_uV` in `add_electrodes` [PR #803](https://github.com/catalystneuro/neuroconv/pull/803)


# v0.4.7 (February 21, 2024)
Expand Down
2 changes: 1 addition & 1 deletion docs/index.rst
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ Features:

.. toctree::
:maxdepth: 2
:caption: Contents
:hidden:

user_guide/user_guide
conversion_examples_gallery/conversion_example_gallery
Expand Down
43 changes: 11 additions & 32 deletions src/neuroconv/datainterfaces/ecephys/intan/intandatainterface.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
from pathlib import Path
import warnings
from typing import Optional

from packaging.version import Version
from pynwb.ecephys import ElectricalSeries
Expand All @@ -8,35 +9,6 @@
from ....utils import FilePathType, get_schema_from_hdmf_class


def extract_electrode_metadata_with_pyintan(file_path) -> dict:
pyintan = get_package(package_name="pyintan")

if ".rhd" in Path(file_path).suffixes:
intan_file_metadata = pyintan.intan.read_rhd(file_path)[1]
else:
intan_file_metadata = pyintan.intan.read_rhs(file_path)[1]

exclude_chan_types = ["AUX", "ADC", "VDD", "_STIM", "ANALOG"]

valid_channels = [
x for x in intan_file_metadata if not any([y in x["native_channel_name"] for y in exclude_chan_types])
]

group_names = [channel["native_channel_name"].split("-")[0] for channel in valid_channels]
unique_group_names = set(group_names)
group_electrode_numbers = [channel["native_order"] for channel in valid_channels]
custom_names = [channel["custom_channel_name"] for channel in valid_channels]

electrodes_metadata = dict(
group_names=group_names,
unique_group_names=unique_group_names,
group_electrode_numbers=group_electrode_numbers,
custom_names=custom_names,
)

return electrodes_metadata


def extract_electrode_metadata(recording_extractor) -> dict:

neo_version = get_package_version(name="neo")
Expand Down Expand Up @@ -73,11 +45,12 @@ class IntanRecordingInterface(BaseRecordingExtractorInterface):
display_name = "Intan Recording"
associated_suffixes = (".rhd", ".rhs")
info = "Interface for Intan recording data."
stream_id = "0" # This is the only stream_id of Intan that might have neural data

def __init__(
self,
file_path: FilePathType,
stream_id: str = "0",
stream_id: Optional[str] = None,
verbose: bool = True,
es_key: str = "ElectricalSeries",
):
Expand All @@ -95,7 +68,13 @@ def __init__(
es_key : str, default: "ElectricalSeries"
"""

self.stream_id = stream_id
if stream_id is not None:
warnings.warn(
"Use of the 'stream_id' parameter is deprecated and it will be removed after September 2024.",
DeprecationWarning,
)
self.stream_id = stream_id

super().__init__(file_path=file_path, stream_id=self.stream_id, verbose=verbose, es_key=es_key)
electrodes_metadata = extract_electrode_metadata(recording_extractor=self.recording_extractor)

Expand Down
Original file line number Diff line number Diff line change
@@ -1 +0,0 @@
pyintan>=0.3.0
15 changes: 10 additions & 5 deletions src/neuroconv/tools/spikeinterface/spikeinterface.py
Original file line number Diff line number Diff line change
Expand Up @@ -175,7 +175,12 @@ def add_electrode_groups(recording: BaseRecording, nwbfile: pynwb.NWBFile, metad
nwbfile.create_electrode_group(**electrode_group_kwargs)


def add_electrodes(recording: BaseRecording, nwbfile: pynwb.NWBFile, metadata: dict = None, exclude: tuple = ()):
def add_electrodes(
recording: BaseRecording,
nwbfile: pynwb.NWBFile,
metadata: dict = None,
exclude: tuple = (),
):
"""
Add channels from recording object as electrodes to nwbfile object.
Expand Down Expand Up @@ -237,7 +242,7 @@ def add_electrodes(recording: BaseRecording, nwbfile: pynwb.NWBFile, metadata: d
data_to_add = defaultdict(dict)

recorder_properties = recording.get_property_keys()
excluded_properties = list(exclude) + ["contact_vector"]
excluded_properties = list(exclude) + ["offset_to_uV", "gain_to_uV", "contact_vector"]
properties_to_extract = [property for property in recorder_properties if property not in excluded_properties]

for property in properties_to_extract:
Expand Down Expand Up @@ -1307,12 +1312,12 @@ def add_waveforms(

# metrics properties (quality, template) are added as properties to the sorting copy
sorting_copy = sorting.select_units(unit_ids=sorting.unit_ids)
if waveform_extractor.is_extension("quality_metrics"):
if waveform_extractor.has_extension("quality_metrics"):
qm = waveform_extractor.load_extension("quality_metrics").get_data()
for prop in qm.columns:
if prop not in sorting_copy.get_property_keys():
sorting_copy.set_property(prop, qm[prop])
if waveform_extractor.is_extension("template_metrics"):
if waveform_extractor.has_extension("template_metrics"):
tm = waveform_extractor.load_extension("template_metrics").get_data()
for prop in tm.columns:
if prop not in sorting_copy.get_property_keys():
Expand Down Expand Up @@ -1422,7 +1427,7 @@ def write_waveforms(

add_waveforms(
waveform_extractor=waveform_extractor,
nwbfile=nwbfile,
nwbfile=nwbfile_out,
metadata=metadata,
recording=recording,
unit_ids=unit_ids,
Expand Down
4 changes: 3 additions & 1 deletion src/neuroconv/tools/testing/data_interface_mixins.py
Original file line number Diff line number Diff line change
Expand Up @@ -290,7 +290,9 @@ def check_read_nwb(self, nwbfile_path: str):

# Spikeinterface behavior is to load the electrode table channel_name property as a channel_id
self.nwb_recording = NwbRecordingExtractor(
file_path=nwbfile_path, electrical_series_name=electrical_series_name
file_path=nwbfile_path,
electrical_series_name=electrical_series_name,
use_pynwb=True,
)

# Set channel_ids right for comparison
Expand Down
23 changes: 21 additions & 2 deletions tests/test_ecephys/test_tools_spikeinterface.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,11 +11,11 @@
from hdmf.backends.hdf5.h5_utils import H5DataIO
from hdmf.data_utils import DataChunkIterator
from hdmf.testing import TestCase
from pynwb import NWBFile
from pynwb import NWBHDF5IO, NWBFile
from spikeinterface.core.generate import generate_recording, generate_sorting
from spikeinterface.extractors import NumpyRecording

from neuroconv.tools.nwb_helpers import get_module
from neuroconv.tools.nwb_helpers import get_default_nwbfile_metadata, get_module
from neuroconv.tools.spikeinterface import (
add_electrical_series,
add_electrodes,
Expand Down Expand Up @@ -1191,6 +1191,10 @@ def setUpClass(cls):
cls.we_recless = WaveformExtractor.load_from_folder(cls.waveform_recordingless_path, with_recording=False)
cls.we_recless_recording = single_segment_rec

cls.nwbfile_path = cls.tmpdir / "test.nwb"
if cls.nwbfile_path.exists():
cls.nwbfile_path.unlink()

@classmethod
def tearDownClass(cls):
rmtree(cls.tmpdir)
Expand Down Expand Up @@ -1261,6 +1265,21 @@ def test_write_recordingless(self):
write_electrical_series=True,
)

def test_write_waveforms_to_file(self):
"""This tests that the waveforms are written to file"""
metadata = get_default_nwbfile_metadata()
metadata["NWBFile"]["session_start_time"] = datetime.now()
write_waveforms(
waveform_extractor=self.single_segment_we,
nwbfile_path=self.nwbfile_path,
write_electrical_series=True,
metadata=metadata,
)
with NWBHDF5IO(self.nwbfile_path, "r") as io:
nwbfile = io.read()
self._test_waveform_write(self.single_segment_we, nwbfile)
self.assertIn("ElectricalSeriesRaw", nwbfile.acquisition)

def test_write_multiple_probes_without_electrical_series(self):
"""This test that the waveforms are written to different electrode groups"""
# we write the first set of waveforms as belonging to group 0
Expand Down
15 changes: 14 additions & 1 deletion tests/test_on_data/test_gin_ecephys/test_raw_recordings.py
Original file line number Diff line number Diff line change
Expand Up @@ -236,6 +236,17 @@ class TestConverter(NWBConverter):
converter.run_conversion(nwbfile_path=nwbfile_path, overwrite=True, metadata=metadata)
recording = converter.data_interface_objects["TestRecording"].recording_extractor

# Read NWB file
from pynwb import NWBHDF5IO

with NWBHDF5IO(path=nwbfile_path, mode="r") as io:
nwbfile = io.read()
electrodes = nwbfile.electrodes
electrodes_columns = electrodes.colnames

assert "offset_to_uV" not in electrodes_columns
assert "grain_to_uV" not in electrodes_columns

es_key = converter.data_interface_objects["TestRecording"].es_key
electrical_series_name = metadata["Ecephys"][es_key]["name"] if es_key else None
if not isinstance(recording, BaseRecording):
Expand All @@ -244,7 +255,9 @@ class TestConverter(NWBConverter):
# NWBRecordingExtractor on spikeinterface does not yet support loading data written from multiple segment.
if recording.get_num_segments() == 1:
# Spikeinterface behavior is to load the electrode table channel_name property as a channel_id
nwb_recording = NwbRecordingExtractor(file_path=nwbfile_path, electrical_series_name=electrical_series_name)
nwb_recording = NwbRecordingExtractor(
file_path=nwbfile_path, electrical_series_name=electrical_series_name, use_pynwb=True
)
if "channel_name" in recording.get_property_keys():
renamed_channel_ids = recording.get_property("channel_name")
else:
Expand Down

0 comments on commit c29264e

Please sign in to comment.