diff --git a/slab/binaural.py b/slab/binaural.py index 78b566b..5536c46 100644 --- a/slab/binaural.py +++ b/slab/binaural.py @@ -18,6 +18,9 @@ class Binaural(Sound): data (slab.Signal | numpy.ndarray | list | str): see documentation of slab.Sound for details. the `data` must have either one or two channels. If it has one, that channel is duplicated samplerate (int): samplerate in Hz, must only be specified when creating an instance from an array. + name (str): A string label for the Sound object. The inbuilt sound generating functions will automatically + set .name to the name of the method used. Useful for logging during experiments. + Attributes: .left: the first data channel, containing the sound for the left ear. .right: the second data channel, containing the sound for the right ear @@ -25,7 +28,8 @@ class Binaural(Sound): .n_channels: the number of channels in `data`. Must be 2 for a binaural sound. .n_samples: the number of samples in `data`. Equals `duration` * `samplerate`. .duration: the duration of the sound in seconds. Equals `n_samples` / `samplerate`. - """ + .name: string label of the sound. + """ # instance properties def _set_left(self, other): if hasattr(other, 'samplerate'): # probably an slab object @@ -44,8 +48,9 @@ def _set_right(self, other): right = property(fget=lambda self: Sound(self.channel(1)), fset=_set_right, doc='The right channel for a stereo sound.') - def __init__(self, data, samplerate=None): + def __init__(self, data, samplerate=None, name='unnamed'): if isinstance(data, (Sound, Signal)): + self.name = data.name if data.n_channels == 1: # if there is only one channel, duplicate it. self.data = numpy.tile(data.data, 2) elif data.n_channels == 2: @@ -53,24 +58,24 @@ def __init__(self, data, samplerate=None): else: raise ValueError("Data must have one or two channel!") self.samplerate = data.samplerate - elif isinstance(data, (list, tuple)): + elif isinstance(data, (list, tuple)): # list of Sounds if isinstance(data[0], (Sound, Signal)): if data[0].n_samples != data[1].n_samples: raise ValueError('Sounds must have same number of samples!') if data[0].samplerate != data[1].samplerate: raise ValueError('Sounds must have same samplerate!') - super().__init__([data[0].data[:, 0], data[1].data[:, 0]], data[0].samplerate) - else: - super().__init__(data, samplerate) - elif isinstance(data, str): + super().__init__([data[0].data[:, 0], data[1].data[:, 0]], data[0].samplerate, name=data[0].name) + else: # list of samples + super().__init__(data, samplerate, name=name) + elif isinstance(data, str): # file name super().__init__(data, samplerate) if self.n_channels == 1: self.data = numpy.tile(self.data, 2) # duplicate channel if monaural file - else: - super().__init__(data, samplerate) + else: # anything but Sound, list, or file name + super().__init__(data, samplerate, name=name) if self.n_channels == 1: self.data = numpy.tile(self.data, 2) # duplicate channel if monaural file - if self.n_channels != 2: + if self.n_channels != 2: # bail if unable to enforce 2 channels ValueError('Binaural sounds must have two channels!') def itd(self, duration=None, max_lag=0.001): @@ -96,7 +101,9 @@ def itd(self, duration=None, max_lag=0.001): """ if duration is None: return self._get_itd(max_lag) - return self._apply_itd(duration) + out = copy.deepcopy(self) + out.name = f'{str(duration)}-itd_{self.name}' + return out._apply_itd(duration) def _get_itd(self, max_lag): max_lag = Sound.in_samples(max_lag, self.samplerate) @@ -135,11 +142,12 @@ def ild(self, dB=None): """ if dB is None: return self.right.level - self.left.level - new = copy.deepcopy(self) # so that we can return a new sound + out = copy.deepcopy(self) # so that we can return a new sound level = numpy.mean(self.level) - new_levels = (level - dB/2, level + dB/2) - new.level = new_levels - return new + out_levels = (level - dB/2, level + dB/2) + out.level = out_levels + out.name = f'{str(dB)}-ild_{self.name}' + return out def itd_ramp(self, from_itd=-6e-4, to_itd=6e-4): """ @@ -158,7 +166,7 @@ def itd_ramp(self, from_itd=-6e-4, to_itd=6e-4): moving = sig.itd_ramp(from_itd=-0.001, to_itd=0.01) moving.play() """ - new = copy.deepcopy(self) + out = copy.deepcopy(self) # make the ITD ramps left_ramp = numpy.linspace(from_itd / 2, to_itd / 2, self.n_samples) right_ramp = numpy.linspace(-from_itd / 2, -to_itd / 2, self.n_samples) @@ -168,9 +176,10 @@ def itd_ramp(self, from_itd=-6e-4, to_itd=6e-4): filter_length = self.n_samples // 16 * 2 # 1/8th of n_samples, always even else: raise ValueError('Signal too short! (min 512 samples)') - new = new.delay(duration=left_ramp, channel=0, filter_length=filter_length) - new = new.delay(duration=right_ramp, channel=1, filter_length=filter_length) - return new + out = out.delay(duration=left_ramp, channel=0, filter_length=filter_length) + out = out.delay(duration=right_ramp, channel=1, filter_length=filter_length) + out.name = f'ild-ramp_{self.name}' + return out def ild_ramp(self, from_ild=-50, to_ild=50): """ @@ -190,16 +199,17 @@ def ild_ramp(self, from_ild=-50, to_ild=50): moving = sig.ild_ramp(from_ild=-10, to_ild=10) move.play() """ - new = self.ild(0) # set ild to zero + out = self.ild(0) # set ild to zero # make ramps left_ramp = numpy.linspace(-from_ild / 2, -to_ild / 2, self.n_samples) right_ramp = numpy.linspace(from_ild / 2, to_ild / 2, self.n_samples) left_ramp = 10**(left_ramp/20.) right_ramp = 10**(right_ramp/20.) # multiply channels with ramps - new.data[:, 0] *= left_ramp - new.data[:, 1] *= right_ramp - return new + out.data[:, 0] *= left_ramp + out.data[:, 1] *= right_ramp + out.name = f'ild-ramp_{self.name}' + return out @staticmethod def azimuth_to_itd(azimuth, frequency=2000, head_radius=8.75): @@ -275,6 +285,7 @@ def at_azimuth(self, azimuth=0, ils=None): itd = Binaural.azimuth_to_itd(azimuth, frequency=centroid) ild = Binaural.azimuth_to_ild(azimuth, frequency=centroid, ils=ils) out = self.itd(duration=itd) + out.name = f'{azimuth}-azi_{self.name}' return out.ild(dB=ild) def externalize(self, hrtf=None): @@ -301,9 +312,10 @@ def externalize(self, hrtf=None): # if sound and HRTF has different samplerates, resample the sound, apply the HRTF, and resample back: resampled_signal = resampled_signal.resample(hrtf.data[0].samplerate) # resample to hrtf rate filt = Filter(10**(h/20), fir='TF', samplerate=hrtf.data[0].samplerate) - filtered_signal = filt.apply(resampled_signal) - filtered_signal = filtered_signal.resample(self.samplerate) - return filtered_signal + out = filt.apply(resampled_signal) + out = out.resample(self.samplerate) + out.name = f'externalized_{self.name}' + return out @staticmethod def make_interaural_level_spectrum(hrtf=None): @@ -397,6 +409,7 @@ def interaural_level_spectrum(self, azimuth, ils=None): out_left = Filter.collapse_subbands(subbands_left, filter_bank=fbank) out_right = Filter.collapse_subbands(subbands_right, filter_bank=fbank) out = Binaural([out_left, out_right]) + out.name = f'ils_{self.name}' return out.resample(samplerate=original_samplerate) def drr(self, winlength=0.0025): @@ -465,6 +478,7 @@ def whitenoise(kind='diotic', **kwargs): out.left = out.right else: raise ValueError("kind must be 'dichotic' or 'diotic'.") + out.name = f'{kind}-{out.name}' return out @staticmethod @@ -473,7 +487,9 @@ def pinknoise(kind='diotic', **kwargs): Generate binaural pink noise. `kind`='diotic' produces the same noise samples in both channels, `kind`='dichotic' produces uncorrelated noise. The rest is identical to `slab.Sound.pinknoise`. """ - return Binaural.powerlawnoise(alpha=1, kind=kind, **kwargs) + out = Binaural.powerlawnoise(alpha=1, kind=kind, **kwargs) + out.name = f'{kind}-pinknoise' + return out @staticmethod def powerlawnoise(kind='diotic', **kwargs): @@ -489,6 +505,7 @@ def powerlawnoise(kind='diotic', **kwargs): out.left = out.right else: raise ValueError("kind must be 'dichotic' or 'diotic'.") + out.name = f'{kind}-{out.name}' return out @staticmethod @@ -502,6 +519,7 @@ def irn(kind='diotic', **kwargs): out.left = out.right else: raise ValueError("kind must be 'dichotic' or 'diotic'.") + out.name = f'{kind}-{out.name}' return out @staticmethod diff --git a/slab/signal.py b/slab/signal.py index 023909b..22b09c9 100644 --- a/slab/signal.py +++ b/slab/signal.py @@ -32,11 +32,13 @@ class Signal: it must have a .data attribute containing an array. If it's a list, the elements can be arrays or objects. The output will be a multi-channel sound with each channel corresponding to an element of the list. samplerate (int | None): the samplerate of the sound. If None, use the default samplerate. + name (str): a string label for the signal object. Default is 'unnamed'. Attributes: .duration: duration of the sound in seconds .n_samples: duration of the sound in samples .n_channels: number of channels in the sound .times: list with the time point of each sample + .name: string label Examples:: import slab, numpy @@ -57,7 +59,7 @@ class Signal: doc='The number of channels in the Signal.') # __methods (class creation, printing, and slice functionality) - def __init__(self, data, samplerate=None): + def __init__(self, data, samplerate=None, name='unnamed'): if hasattr(data, 'samplerate') and samplerate is not None: warnings.warn('First argument has a samplerate property. Ignoring given samplerate.') if isinstance(data, numpy.ndarray): @@ -90,19 +92,24 @@ def __init__(self, data, samplerate=None): self.samplerate = _default_samplerate else: self.samplerate = samplerate + if hasattr(data, 'name') and name == 'unnamed': # carry over name if source object has one and no new name provided + self.name = data.name + else: + self.name = name + def __repr__(self): - return f'{type(self)} (\n{repr(self.data)}\n{repr(self.samplerate)})' + return f'{type(self)} (\n{repr(self.data)}\n{repr(self.samplerate)}\n{repr(self.name)})' def __str__(self): - return f'{type(self)} duration {self.duration}, samples {self.n_samples}, channels {self.n_channels},' \ + return f'{type(self)} ({self.name}) duration {self.duration}, samples {self.n_samples}, channels {self.n_channels},' \ f'samplerate {self.samplerate}' def _repr_html_(self): 'HTML image representation for Jupyter notebook support' elipses = '\u2026' class_name = str(type(self))[8:-2] - html = [f'
# | '] samps, chans = self.data.shape html += (f'channel {j} | ' for j in range(chans)) @@ -336,7 +343,7 @@ def _get_envelope(self, kind, cutoff): envs = 20 * numpy.log10(envs) # convert amplitude to dB elif not kind == 'gain': raise ValueError('Kind must be either "gain" or "dB"!') - return Signal(envs, samplerate=self.samplerate) + return Signal(envs, samplerate=self.samplerate, name='envelope') def _apply_envelope(self, envelope, times, kind): # TODO: write tests for the new options! diff --git a/slab/sound.py b/slab/sound.py index 8cda06b..dcc8a62 100644 --- a/slab/sound.py +++ b/slab/sound.py @@ -66,15 +66,19 @@ class Sound(Signal): samplerate or by using one of the sound-generating methods of the class (all of the @staticmethods). Arguments: - data ( str | pathlib.Path | numpy.ndarray | slab.Signal | list): Given a string or Path pointing to the + data (str | pathlib.Path | numpy.ndarray | slab.Signal | list): Given a string or Path pointing to the .wav file, the `data` and `samplerate` will be loaded from the file. Given an array, and instance of a `Signal` or a list, the data will be passed to the super class (see documentation of slab.Signal). - samplerate(int | float): must only be defined when creating a `Sound` from an array. + samplerate (int | float): must only be defined when creating a `Sound` from an array. + name (str): A string label for the Sound object. The inbuilt sound generating functions will automatically + set .name to the name of the method used (f.i. Sound.pinknoise will set the name of the resulting sound + object to 'pinknoise'). Useful for logging during experiments. Attributes: .data: the data-array of the Sound object which has the shape `n_samples` x `n_channels`. .n_channels: the number of channels in `data`. .n_samples: the number of samples in `data`. Equals `duration` * `samplerate`. .duration: the duration of the sound in seconds. Equals `n_samples` / `samplerate`. + .name: string label of the sound. Examples:: import slab, numpy @@ -136,7 +140,7 @@ def _set_level(self, level): :meth:`slab.calibrate` to make the computed level reflect output intensity. """) - def __init__(self, data, samplerate=None): + def __init__(self, data, samplerate=None, name='unnamed'): if isinstance(data, pathlib.Path): # Sound initialization from a file name (pathlib object) data = str(data) if isinstance(data, str): # Sound initialization from a file name (string) @@ -145,9 +149,10 @@ def __init__(self, data, samplerate=None): _ = Sound.read(data) self.data = _.data self.samplerate = _.samplerate + self.name = _.name else: # delegate to the baseclass init - super().__init__(data, samplerate) + super().__init__(data, samplerate, name) # static methods (creating sounds) @staticmethod @@ -164,7 +169,7 @@ def read(filename): raise ImportError( 'Reading wav files requires SoundFile (pip install git+https://github.com/bastibe/SoundFile.git') data, samplerate = soundfile.read(filename) - return Sound(data, samplerate=samplerate) + return Sound(data, samplerate=samplerate, name=filename) @staticmethod def tone(frequency=500, duration=1., phase=0, samplerate=None, level=None, n_channels=1): @@ -187,21 +192,22 @@ def tone(frequency=500, duration=1., phase=0, samplerate=None, level=None, n_cha if samplerate is None: samplerate = slab.get_default_samplerate() duration = Sound.in_samples(duration, samplerate) - frequency = numpy.array(frequency) + freq = numpy.array(frequency) phase = numpy.array(phase) - if frequency.size > n_channels == 1: - n_channels = frequency.size + if freq.size > n_channels == 1: + n_channels = freq.size if phase.size > n_channels == 1: n_channels = phase.size - if frequency.size == n_channels: - frequency.shape = (1, n_channels) + if freq.size == n_channels: + freq.shape = (1, n_channels) if phase.size == n_channels: phase.shape = (1, n_channels) t = numpy.arange(0, duration, 1) / samplerate t.shape = (t.size, 1) # ensures C-order - x = numpy.sin(phase + 2 * numpy.pi * frequency * numpy.tile(t, (1, n_channels))) + x = numpy.sin(phase + 2 * numpy.pi * freq * numpy.tile(t, (1, n_channels))) out = Sound(x, samplerate) out.level = level + out.name = f'tone_{str(frequency)}' return out @staticmethod @@ -243,6 +249,7 @@ def dynamic_tone(frequencies=None, times=None, phase=0, samplerate=None, level=N x = numpy.sin(phase + 2 * numpy.pi * numpy.cumsum(frequencies) / samplerate) out = Sound(x, samplerate) out.level = level + out.name = 'dynamic_tone' return out @staticmethod @@ -300,6 +307,7 @@ def harmoniccomplex(f0=500, duration=1., amplitude=0, phase=0, samplerate=None, tmp.level = lvl + amplitudes[i] out += tmp out.level = level + out.name = f'harmonic_{str(f0)}' return out @staticmethod @@ -326,6 +334,7 @@ def whitenoise(duration=1.0, samplerate=None, level=None, n_channels=1): x = numpy.random.randn(duration, n_channels) out = Sound(x, samplerate) out.level = level + out.name = 'whitenoise' return out @staticmethod @@ -380,6 +389,7 @@ def powerlawnoise(duration=1.0, alpha=1, samplerate=None, level=None, n_channels x.shape = (n, n_channels) out = Sound(x, samplerate) out.level = level + out.name = f'powerlawnoise_{str(alpha)}' return out @staticmethod @@ -393,7 +403,9 @@ def pinknoise(duration=1.0, samplerate=None, level=None, n_channels=1): Returns: (slab.Sound): power law noise generated from the parameters with exponent alpha==1. """ - return Sound.powerlawnoise(duration=duration, alpha=1, samplerate=samplerate, level=level, n_channels=n_channels) + out = Sound.powerlawnoise(duration=duration, alpha=1, samplerate=samplerate, level=level, n_channels=n_channels) + out.name = 'pinknoise' + return out @staticmethod def irn(frequency=100, gain=1, n_iter=4, duration=1.0, samplerate=None, level=None, n_channels=1): @@ -436,6 +448,7 @@ def irn(frequency=100, gain=1, n_iter=4, duration=1.0, samplerate=None, level=No out.append(x) out = Sound(out, samplerate) out.level = level + out.name = f'irn_{str(frequency)}' return out @staticmethod @@ -457,6 +470,7 @@ def click(duration=0.0001, samplerate=None, level=None, n_channels=1): duration = Sound.in_samples(duration, samplerate) out = Sound(numpy.ones((duration, n_channels)), samplerate) out.level = level + out.name = 'click' return out @staticmethod @@ -485,6 +499,7 @@ def clicktrain(duration=1.0, frequency=500, clickduration=0.0001, level=None, sa oneclick = oneclick.resize(interval) oneclick = oneclick.repeat(n) oneclick.level = level + oneclick.name = f'clicktrain_{str(frequency)}' return oneclick @staticmethod @@ -517,6 +532,7 @@ def chirp(duration=1.0, from_frequency=100, to_frequency=None, samplerate=None, t, from_frequency, t[-1], to_frequency, method=kind, vertex_zero=True) out = Sound(chirp, samplerate=samplerate) out.level = level + out.name = 'chirp' return out @staticmethod @@ -535,6 +551,7 @@ def silence(duration=1.0, samplerate=None, n_channels=1): samplerate = slab.get_default_samplerate() duration = Sound.in_samples(duration, samplerate) out = Sound(numpy.zeros((duration, n_channels)), samplerate) + out.name = 'silence' return out @staticmethod @@ -595,10 +612,11 @@ def vowel(vowel='a', gender=None, glottal_pulse_time=12, formant_multiplier=1, f * numpy.mod(times, glottal_pulse_time)) if n_channels > 1: out = numpy.tile(out, (n_channels, 1)) - vowel = Sound(data=out, samplerate=samplerate) - vowel.filter(frequency=0.75 * samplerate / 2, kind='lp') - vowel.level = level - return vowel + out = Sound(data=out, samplerate=samplerate) + out.filter(frequency=0.75 * samplerate / 2, kind='lp') + out.level = level + out.name = f'vowel_{str(formants)}' + return out @staticmethod def multitone_masker(duration=1.0, low_cutoff=125, high_cutoff=4000, bandwidth=1 / 3, samplerate=None, level=None): @@ -634,6 +652,7 @@ def multitone_masker(duration=1.0, low_cutoff=125, high_cutoff=4000, bandwidth=1 data = numpy.sum(sig.data, axis=1) / len(freqs) # collapse across channels out = Sound(data, samplerate=samplerate) out.level = level + out.name = 'multitone_masker' return out @staticmethod @@ -676,6 +695,7 @@ def equally_masking_noise(duration=1.0, low_cutoff=125, high_cutoff=4000, sample fnoise = fnoise[:duration] out = Sound(data=fnoise, samplerate=samplerate) out.level = level + out.name = 'equally_masking_noise' return out @staticmethod @@ -696,9 +716,10 @@ def sequence(*sounds): for sound in sounds: if sound.samplerate != samplerate: raise ValueError('All sounds must have the same sample rate.') - sounds = tuple(s.data for s in sounds) - x = numpy.vstack(sounds) - return Sound(x, samplerate) + data = tuple(s.data for s in sounds) + data = numpy.vstack(data) + name = '_x_'.join([s.name for s in sounds]) + return Sound(data, samplerate, name) # instance methods def write(self, filename, normalise=True, fmt='WAV'): @@ -756,6 +777,7 @@ def repeat(self, n): """ sound = copy.deepcopy(self) sound.data = numpy.vstack((sound.data,) * int(n)) + sound.name = f'{n}x_{sound.name}' return sound @staticmethod @@ -799,18 +821,23 @@ def crossfade(*sounds, overlap=0.01): n_silence_after = n_total - n_silence_before - sound.n_samples if i == len(sounds) - 1: sound = sound.ramp(duration=overlap, when="onset") # for the last sound only add onset ramp + name = sound.name sounds[i] = Sound.sequence( Sound.silence(n_silence_before, samplerate=sound.samplerate, n_channels=sound.n_channels), sound) + sounds[i].name = name # avoid adding _x_silence to the name else: sound = sound.ramp(duration=overlap, when="both") # for all other sounds add both + name = sound.name sounds[i] = Sound.sequence( Sound.silence(n_silence_before, samplerate=sound.samplerate, n_channels=sound.n_channels), sound, Sound.silence(n_silence_after, samplerate=sound.samplerate, n_channels=sound.n_channels)) + sounds[i].name = name # avoid adding _x_silence to the name n_previous += n_samples - sound = sum(sounds) - return sound + out = sum(sounds) + out.name = '_x_'.join([s.name for s in sounds]) + return out def pulse(self, frequency=4, duty=0.75, gate_time=0.005): """ @@ -825,24 +852,25 @@ def pulse(self, frequency=4, duty=0.75, gate_time=0.005): Returns: slab.Sound: pulsed copy of the instance. """ - sound = copy.deepcopy(self) + out = copy.deepcopy(self) pulse_period = 1 / frequency - n_pulses = round(sound.duration / pulse_period) # number of pulses in the stimulus - pulse_period = sound.duration / n_pulses # period in s, fits into stimulus duration - pulse_samples = Sound.in_samples(pulse_period * duty, sound.samplerate) - fall_samples = Sound.in_samples(gate_time, sound.samplerate) # 5ms rise/fall time + n_pulses = round(out.duration / pulse_period) # number of pulses in the stimulus + pulse_period = out.duration / n_pulses # period in s, fits into stimulus duration + pulse_samples = Sound.in_samples(pulse_period * duty, out.samplerate) + fall_samples = Sound.in_samples(gate_time, out.samplerate) # 5ms rise/fall time if (pulse_samples - 2 * fall_samples) < 0: raise ValueError(f'The pulse duration {pulse_samples} is shorter than the combined ramps' f'({fall_samples} each). Reduce ´pulse_frequency´ or `gate_time`!') fall = numpy.cos(numpy.pi * numpy.arange(fall_samples) / (2 * fall_samples)) ** 2 pulse = numpy.concatenate((1 - fall, numpy.ones(pulse_samples - 2 * fall_samples), fall)) pulse = numpy.concatenate( - (pulse, numpy.zeros(Sound.in_samples(pulse_period, sound.samplerate) - len(pulse)))) + (pulse, numpy.zeros(Sound.in_samples(pulse_period, out.samplerate) - len(pulse)))) envelope = numpy.tile(pulse, n_pulses) envelope = envelope[:, None] # add an empty axis to get to the same shape as sound.data # if data is 2D (>1 channel) broadcast the envelope to fit - sound.data *= numpy.broadcast_to(envelope, sound.data.shape) - return sound + out.data *= numpy.broadcast_to(envelope, out.data.shape) + out.name = f'{frequency}Hz-pulsed_{out.name}' + return out def am(self, frequency=10, depth=1, phase=0): """ @@ -855,11 +883,12 @@ def am(self, frequency=10, depth=1, phase=0): Returns: slab.Sound: amplitude modulated copy of the instance. """ - sound = copy.deepcopy(self) - envelope = (1 + depth * numpy.sin(2 * numpy.pi * frequency * sound.times + phase)) + out = copy.deepcopy(self) + envelope = (1 + depth * numpy.sin(2 * numpy.pi * frequency * out.times + phase)) envelope = envelope[:, None] - sound.data *= numpy.broadcast_to(envelope, sound.data.shape) - return sound + out.data *= numpy.broadcast_to(envelope, out.data.shape) + out.name = f'{frequency}Hz-am_{out.name}' + return out def filter(self, frequency=100, kind='hp'): """ @@ -873,12 +902,13 @@ def filter(self, frequency=100, kind='hp'): Returns: slab.Sound: filtered copy of the instance. """ - sound = copy.deepcopy(self) + out = copy.deepcopy(self) n = min(1000, self.n_samples) filt = Filter.band( frequency=frequency, kind=kind, samplerate=self.samplerate, length=n) - sound.data = filt.apply(self).data - return sound + out.data = filt.apply(self).data + out.name = f'{frequency}Hz-{kind}_{out.name}' + return out def aweight(self): """ @@ -901,6 +931,7 @@ def aweight(self): b, a = scipy.signal.filter_design.bilinear(numerators, denominators, self.samplerate) out = copy.deepcopy(self) out.data = scipy.signal.lfilter(b, a, self.data, axis=0) + out.name = f'aweighted_{out.name}' return out @staticmethod @@ -935,6 +966,7 @@ def record(duration=1.0, samplerate=None): 'Windows: see SoX website: http://sox.sourceforge.net/)') time.sleep(duration/samplerate+0.1) # add 100ms to make sure the tmp file is written out = Sound(filename) + out.name = 'recorded' return out def play(self): @@ -1257,7 +1289,9 @@ def vocode(self, bandwidth=1 / 3): subbands_noise = fbank.apply(noise) # divide into same subbands as sound subbands_noise *= envs # apply envelopes subbands_noise.level = subbands.level - return Sound(Filter.collapse_subbands(subbands=subbands_noise, filter_bank=fbank)) + out = Sound(Filter.collapse_subbands(subbands=subbands_noise, filter_bank=fbank)) + out.name = f'vocoded_{self.name}' + return out def crest_factor(self): """ diff --git a/tests/test_binaural.py b/tests/test_binaural.py index 81ca90d..7a8ae71 100644 --- a/tests/test_binaural.py +++ b/tests/test_binaural.py @@ -100,13 +100,16 @@ def test_interaural_level_spectrum(): subbands_right = fbank.apply(lateral.right) assert -1 < (level_differences - (subbands_left.level - subbands_right.level)).mean() < 1 + def test_overloaded_sound_generators(): - methods = [attribute for attribute in dir(slab.Sound) if callable(getattr(slab.Sound, attribute)) and attribute.startswith('__') is False] + methods = ['chirp', 'click', 'clicktrain', 'dynamic_tone', 'equally_masking_noise', + 'harmoniccomplex', 'irn', 'multitone_masker', 'pinknoise', 'powerlawnoise', + 'silence', 'tone', 'vowel', 'whitenoise'] for method in methods: - func = getattr(slab.Sound, method) - args, _, _ = inspect.getargs(func.__code__) - if 'n_channels' in args: - assert getattr(slab.Binaural, method)().n_channels == 2 + func = getattr(slab.Binaural, method) + assert func().n_channels == 2 + assert func().name != 'unnamed' + def test_drr(): for _ in range(10): diff --git a/tests/test_signal.py b/tests/test_signal.py index 1e84f45..6c5b7e7 100644 --- a/tests/test_signal.py +++ b/tests/test_signal.py @@ -14,6 +14,7 @@ def test_signal_generation(): assert sig.samplerate == samplerate assert len(sig.times) == len(sig.data) numpy.testing.assert_almost_equal(sig.times.max()*samplerate, n_samples, decimal=-1) + assert sig.name == 'unnamed' def test_arithmetics(): diff --git a/tests/test_sounds.py b/tests/test_sounds.py index d8adc64..a7ef886 100644 --- a/tests/test_sounds.py +++ b/tests/test_sounds.py @@ -8,7 +8,7 @@ def test_sound_generation(): # numpy.ndarray | str | pathlib.Path | list - for _ in range(100): + for _ in range(20): data = numpy.ones([10, 2]) sound = slab.Sound(data, samplerate=10) # generate sound from array sound1 = slab.Sound([data, data], samplerate=10) # generate sound from list of arrays @@ -27,10 +27,11 @@ def test_sound_generation(): loaded2 = slab.Sound(str(tmpdir/"sound.wav")) numpy.testing.assert_equal(loaded1.data, loaded2.data) numpy.testing.assert_equal(loaded1.times, loaded2.times) - + # test if .name attribute of loaded sound object is the path string + assert loaded1.name == f"{tmpdir / 'sound.wav'}" def test_read_write(): - for _ in range(100): + for _ in range(20): for normalize in [True, False]: sound = slab.Sound(numpy.random.randn(1000, 2), samplerate=numpy.random.randint(100, 1000)) if normalize is False: @@ -42,24 +43,27 @@ def test_read_write(): def test_tone(): - for freq in range(50, 20000, 100): + for freq in range(50, 20000, 500): sound = slab.Sound.tone(duration=numpy.random.randint(1000, 5000), frequency=freq, samplerate=44100) Z, freqs = sound.spectrum(show=False) assert numpy.abs(freqs[numpy.where(Z == Z.max())[0][0]] - freq) < 50 - for freq in range(500, 5000, 100): + assert sound.name == f'tone_{str(freq)}' + for freq in range(500, 5000, 200): harmonic = slab.Sound.harmoniccomplex(duration=numpy.random.randint(1000, 5000), f0=freq, samplerate=44100) Z, freqs = harmonic.spectrum(show=False) peaks = scipy.signal.find_peaks(Z.flatten())[0] peak_freqs = freqs[peaks] peak_freqs = peak_freqs/freq numpy.testing.assert_almost_equal(peak_freqs, numpy.linspace(1, len(peaks), len(peaks)), decimal=0) + assert harmonic.name == f'harmonic_{str(freq)}' def test_powerlawnoise(): - for _ in range(100): + for _ in range(20): centroids = [] for alpha in numpy.linspace(.5, 1., 3): sound = slab.Sound.powerlawnoise(alpha=alpha, samplerate=44100) + assert sound.name == f'powerlawnoise_{str(alpha)}' centroids.append(sound.spectral_feature("centroid")) assert all([centroids[i] > centroids[i+1] for i in range(len(centroids)-1)]) @@ -80,10 +84,12 @@ def test_crossfade(): if overlap == 0: # crossfade with overlap 0 should be the same as sequence noise2vowel_seq = slab.Sound.sequence(vowel, noise, vowel) assert all(noise2vowel.data == noise2vowel_seq.data) + noise_x_noise = slab.Sound.crossfade(noise, noise) + assert noise_x_noise.name == f'{noise.name}_x_{noise.name}' def test_frames(): - for _ in range(100): + for _ in range(20): frame_dur = numpy.random.randint(10, 5000) sound_dur = numpy.abs(numpy.random.randn())+0.1 sound = slab.Sound.whitenoise(duration=sound_dur)
---|