-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtrain_usd.py
90 lines (65 loc) · 2.58 KB
/
train_usd.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
import torch
from torch import nn
import torchaudio
from torch.utils.data import DataLoader
from urbansounddataset import UrbanSoundDataset
from cnn import CNNetwork
BATCH_SIZE = 128
EPOCHS = 10
LEARNING_RATE = 0.001
ANNOTATIONS_FILE = "data/UrbanSounds8K/UrbanSound8K.csv"
AUDIO_DIR = "data/UrbanSounds8K"
SAMPLE_RATE = 22050
NUM_SAMPLES = 22050
def create_data_loader(train_data, batch_size):
train_dataloader = DataLoader(train_data, batch_size=batch_size)
return train_dataloader
def train_single_epoch(model, data_loader, loss_fn, optimiser, device):
for input, target in data_loader:
input, target = input.to(device), target.to(device)
# calculate loss
prediction = model(input)
loss = loss_fn(prediction, target)
# backpropagate error and update weights
optimiser.zero_grad()
loss.backward()
optimiser.step()
print(f"loss: {loss.item()}")
def train(model, data_loader, loss_fn, optimiser, device, epochs):
for i in range(epochs):
print(f"Epoch {i+1}")
train_single_epoch(model, data_loader, loss_fn, optimiser, device)
print("---------------------------")
print("Finished training")
if __name__ == "__main__":
if torch.cuda.is_available():
device = "cuda"
else:
device = "cpu"
print(f"Using {device}")
# instantiating ours dataset object and create data loader
mel_spectrogram = torchaudio.transforms.MelSpectrogram(
sample_rate = SAMPLE_RATE,
n_fft = 1024,
hop_length = 512,
n_mels = 64)
# ms = mel_spectrogram(signal)
usd = UrbanSoundDataset(ANNOTATIONS_FILE,
AUDIO_DIR,
mel_spectrogram,
SAMPLE_RATE,
NUM_SAMPLES,
device)
train_dataloader = create_data_loader(usd, BATCH_SIZE)
# construct model and assign it to device
cnn = CNNetwork().to(device)
print(cnn)
# initialise loss funtion + optimiser
loss_fn = nn.CrossEntropyLoss()
optimiser = torch.optim.Adam(cnn.parameters(),
lr=LEARNING_RATE)
# train model
train(cnn, train_dataloader, loss_fn, optimiser, device, EPOCHS)
# save model
torch.save(cnn.state_dict(), "cnnet.pth")
print("Trained feed forward net saved at cnn.pth")