From 41d7a6fee470234fe8aa49774420a661f4c4beaa Mon Sep 17 00:00:00 2001 From: "Jan C. Brammer" Date: Tue, 22 Sep 2020 18:39:33 +0200 Subject: [PATCH] Run PPG benchmark directly on the Capnobase download (no need to unpack data first). --- biopeaks/benchmarks/benchmark_PPG.py | 40 -------------------- biopeaks/benchmarks/benchmark_PPG_local.py | 44 ++++++++++++++++++++++ docs/tests.md | 17 +++++++-- 3 files changed, 58 insertions(+), 43 deletions(-) delete mode 100644 biopeaks/benchmarks/benchmark_PPG.py create mode 100644 biopeaks/benchmarks/benchmark_PPG_local.py diff --git a/biopeaks/benchmarks/benchmark_PPG.py b/biopeaks/benchmarks/benchmark_PPG.py deleted file mode 100644 index ad6fdb2..0000000 --- a/biopeaks/benchmarks/benchmark_PPG.py +++ /dev/null @@ -1,40 +0,0 @@ -# -*- coding: utf-8 -*- - -import os -import numpy as np -from biopeaks.heart import ppg_peaks -from wfdb.processing import compare_annotations - - -record_dir = r"C:\Users\JohnDoe\surfdrive\Beta\example_data\PPG\signal" -annotation_dir = r"C:\Users\JohnDoe\surfdrive\Beta\example_data\PPG\annotations" -records = os.listdir(record_dir) -annotations = os.listdir(annotation_dir) -subjects = list(zip(records, annotations)) - -sfreq = 300 -# Set tolerance to 50 milliseconds (Elgendi et al., 2013) -tolerance = int(np.rint(.05 * sfreq)) # tolerance must be in samples for wfdb -print(f"Setting tolerance for match between algorithmic and manual annotation" - f" to {tolerance} samples, corresponding to 50 milliseconds at a sampling rate of {sfreq}.") - -sensitivity = [] -precision = [] - -for subject in subjects: - - data = np.loadtxt(os.path.join(record_dir, subject[0])) - annotation = np.loadtxt(os.path.join(annotation_dir, subject[1])) - peaks = ppg_peaks(data, sfreq) - - comparitor = compare_annotations(peaks, annotation, tolerance) - tp = comparitor.tp - fp = comparitor.fp - fn = comparitor.fn - - sensitivity.append(float(tp) / (tp + fn)) - precision.append(float(tp) / (tp + fp)) - print(f"sensitivity = {sensitivity[-1]}, precision = {precision[-1]}") - -print(f"mean precision = {np.mean(precision)}, std precision = {np.std(precision)}") -print(f"mean sensitivity = {np.mean(sensitivity)}, std sensitivity = {np.std(sensitivity)}") diff --git a/biopeaks/benchmarks/benchmark_PPG_local.py b/biopeaks/benchmarks/benchmark_PPG_local.py new file mode 100644 index 0000000..066393b --- /dev/null +++ b/biopeaks/benchmarks/benchmark_PPG_local.py @@ -0,0 +1,44 @@ +# -*- coding: utf-8 -*- + +import h5py +import numpy as np +from pathlib import Path +from biopeaks.heart import ppg_peaks +from wfdb.processing import compare_annotations + + +data_dir = Path(".../TBME2013-PPGRR-Benchmark_R3/data") # replace with your local "data" directory once you've downloaded the database + +sfreq = 300 +tolerance = int(np.rint(.05 * sfreq)) # in samples; 50 milliseconds in accordance with Elgendi et al., 2013, doi:10.1371/journal.pone.0076585 +print(f"Setting tolerance for match between algorithmic and manual annotation" + f" to {tolerance} samples, corresponding to 50 milliseconds at a sampling rate of {sfreq}.") + +sensitivity = [] +precision = [] + +for subject in data_dir.iterdir(): + + f = h5py.File(subject, "r") + record = np.ravel(f["signal"]["pleth"]["y"]) + annotation = np.ravel(f["labels"]["pleth"]["peak"]["x"]) + + peaks = ppg_peaks(record, sfreq) + + comparitor = compare_annotations(peaks, annotation, tolerance) + tp = comparitor.tp + fp = comparitor.fp + fn = comparitor.fn + + sensitivity.append(float(tp) / (tp + fn)) + precision.append(float(tp) / (tp + fp)) + + print(f"\nResults {subject}") + print("-" * len(str(subject))) + print(f"sensitivity = {sensitivity[-1]}") + print(f"precision = {precision[-1]}") + +print(f"\nAverage results over {len(precision)} records") +print("-" * 31) +print(f"sensitivity: mean = {np.mean(sensitivity)}, std = {np.std(sensitivity)}") +print(f"precision: mean = {np.mean(precision)}, std = {np.std(precision)}") diff --git a/docs/tests.md b/docs/tests.md index 3759f69..6675f7b 100644 --- a/docs/tests.md +++ b/docs/tests.md @@ -17,6 +17,8 @@ pytest -v ## Extrema detection benchmarks + +### ECG To validate the performance of the ECG peak detector `heart.ecg_peaks()`, please install the [wfdb](https://github.com/MIT-LCP/wfdb-python) and [aiohttp](https://github.com/aio-libs/aiohttp): ``` conda install -c conda-forge wfdb @@ -24,8 +26,17 @@ conda install -c conda-forge aiohttp ``` You can then run the `benchmark_ECG_stream` script in the `benchmarks` folder. The script streams ECG and annotation files from the [Glasgow University Database (GUDB)](http://researchdata.gla.ac.uk/716/). -You can select an experiment, ECG channel, and annotation file. +You can select an experiment, ECG channel, and annotation file (for details have a look at the docstrings of `BenchmarkDetectorGUDB.benchmark_records()` in `benchmarks\benchmark_utils`). + +Alternatively, you can download the GUDB and run the `benchmark_ECG_local` script in the `benchmarks` folder. In the script, replace the `data_dir` with your local directory (see comments in the script). + +### PPG To validate the performance of the PPG peak detector `heart.ppg_peaks()` -please download the [Capnobase IEEE TBME benchmark dataset](http://www.capnobase.org/index.php?id=857). -After extracting the PPG signals and peak annotations you can run the `benchmark_PPG` script in the `benchmarks` folder. \ No newline at end of file +please download the [Capnobase IEEE TBME benchmark dataset](http://www.capnobase.org/index.php?id=857) and install [wfdb](https://github.com/MIT-LCP/wfdb-python) and [h5py](https://www.h5py.org/): +``` +conda install -c conda-forge wfdb +conda install -c conda-forge h5py +``` + +You can then run the `benchmark_PPG_local` script in the `benchmarks` folder. In the script, replace the `data_dir` with your local directory (see comments in the script). \ No newline at end of file