-
Notifications
You must be signed in to change notification settings - Fork 485
/
nikl_m.py
88 lines (67 loc) · 3.33 KB
/
nikl_m.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
from concurrent.futures import ProcessPoolExecutor
from functools import partial
import numpy as np
import os
import audio
import re
from hparams import hparams
def build_from_path(in_dir, out_dir, num_workers=1, tqdm=lambda x: x):
'''Preprocesses the LJ Speech dataset from a given input path into a given output directory.
Args:
in_dir: The directory where you have downloaded the LJ Speech dataset
out_dir: The directory to write the output into
num_workers: Optional number of worker processes to parallelize across
tqdm: You can optionally pass tqdm to get a nice progress bar
Returns:
A list of tuples describing the training examples. This should be written to train.txt
'''
# We use ProcessPoolExecutor to parallize across processes. This is just an optimization and you
# can omit it and just call _process_utterance on each input if you want.
# You will need to modify and format NIKL transcrption file will UTF-8 format
# please check https://github.com/homink/deepspeech.pytorch.ko/blob/master/data/local/clean_corpus.sh
executor = ProcessPoolExecutor(max_workers=num_workers)
futures = []
spk_id = {}
with open(in_dir + '/speaker.mid', encoding='utf-8') as f:
for i, line in enumerate(f):
spk_id[line.rstrip()] = i
index = 1
with open(in_dir + '/metadata.txt', encoding='utf-8') as f:
for line in f:
parts = line.strip().split('|')
wav_path = parts[0]
text = parts[1]
uid = re.search(r'([a-z][a-z][0-9][0-9]_t)', wav_path)
uid = uid.group(1).replace('_t', '')
futures.append(executor.submit(
partial(_process_utterance, out_dir, index + 1, spk_id[uid], wav_path, text)))
index += 1
return [future.result() for future in tqdm(futures)]
def _process_utterance(out_dir, index, speaker_id, wav_path, text):
'''Preprocesses a single utterance audio/text pair.
This writes the mel and linear scale spectrograms to disk and returns a tuple to write
to the train.txt file.
Args:
out_dir: The directory to write the spectrograms into
index: The numeric index to use in the spectrogram filenames.
wav_path: Path to the audio file containing the speech input
text: The text spoken in the input audio file
Returns:
A (spectrogram_filename, mel_filename, n_frames, text) tuple to write to train.txt
'''
# Load the audio to a numpy array:
wav = audio.load_wav(wav_path)
if hparams.rescaling:
wav = wav / np.abs(wav).max() * hparams.rescaling_max
# Compute the linear-scale spectrogram from the wav:
spectrogram = audio.spectrogram(wav).astype(np.float32)
n_frames = spectrogram.shape[1]
# Compute a mel-scale spectrogram from the wav:
mel_spectrogram = audio.melspectrogram(wav).astype(np.float32)
# Write the spectrograms to disk:
spectrogram_filename = 'nikl-multi-spec-%05d.npy' % index
mel_filename = 'nikl-multi-mel-%05d.npy' % index
np.save(os.path.join(out_dir, spectrogram_filename), spectrogram.T, allow_pickle=False)
np.save(os.path.join(out_dir, mel_filename), mel_spectrogram.T, allow_pickle=False)
# Return a tuple describing this training example:
return (spectrogram_filename, mel_filename, n_frames, text, speaker_id)