-
Notifications
You must be signed in to change notification settings - Fork 2.7k
/
Copy pathpytorch_tensorboard_.py
140 lines (121 loc) · 5.03 KB
/
pytorch_tensorboard_.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
"""
Example code of how to use the TensorBoard in PyTorch.
This code uses a lot of different functions from TensorBoard
and tries to have them all in a compact way, it might not be
super clear exactly what calls does what, for that I recommend
watching the YouTube video.
Programmed by Aladdin Persson <aladdin.persson at hotmail dot com>
* 2020-04-17 Initial coding
* 2022-12-19 Small revision of code, checked that it works with latest PyTorch version
"""
# Imports
import torch
import torchvision
import torch.nn as nn # All neural network modules, nn.Linear, nn.Conv2d, BatchNorm, Loss functions
import torch.optim as optim # For all Optimization algorithms, SGD, Adam, etc.
import torch.nn.functional as F # All functions that don't have any parameters
import torchvision.datasets as datasets # Has standard datasets we can import in a nice way
import torchvision.transforms as transforms # Transformations we can perform on our dataset
from torch.utils.data import (
DataLoader,
) # Gives easier dataset managment and creates mini batches
from torch.utils.tensorboard import SummaryWriter # to print to tensorboard
# Simple CNN
class CNN(nn.Module):
def __init__(self, in_channels=1, num_classes=10):
super(CNN, self).__init__()
self.conv1 = nn.Conv2d(
in_channels=in_channels, out_channels=8, kernel_size=3, stride=1, padding=1
)
self.pool = nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2))
self.conv2 = nn.Conv2d(
in_channels=8, out_channels=16, kernel_size=3, stride=1, padding=1
)
self.fc1 = nn.Linear(16 * 7 * 7, num_classes)
def forward(self, x):
x = F.relu(self.conv1(x))
x = self.pool(x)
x = F.relu(self.conv2(x))
x = self.pool(x)
x = x.reshape(x.shape[0], -1)
x = self.fc1(x)
return x
# Set device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Hyperparameters
in_channels = 1
num_classes = 10
num_epochs = 3
# Load Data
train_dataset = datasets.MNIST(
root="dataset/", train=True, transform=transforms.ToTensor(), download=True
)
# To do hyperparameter search, include more batch_sizes you want to try
# and more learning rates!
batch_sizes = [32, 256]
learning_rates = [1e-2, 1e-3, 1e-4, 1e-5]
classes = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
for batch_size in batch_sizes:
for learning_rate in learning_rates:
step = 0
# Initialize network
model = CNN(in_channels=in_channels, num_classes=num_classes)
model.to(device)
model.train()
criterion = nn.CrossEntropyLoss()
train_loader = DataLoader(
dataset=train_dataset, batch_size=batch_size, shuffle=True
)
optimizer = optim.Adam(model.parameters(), lr=learning_rate, weight_decay=0.0)
writer = SummaryWriter(
f"runs/MNIST/MiniBatchSize {batch_size} LR {learning_rate}"
)
# Visualize model in TensorBoard
images, _ = next(iter(train_loader))
writer.add_graph(model, images.to(device))
writer.close()
for epoch in range(num_epochs):
losses = []
accuracies = []
for batch_idx, (data, targets) in enumerate(train_loader):
# Get data to cuda if possible
data = data.to(device=device)
targets = targets.to(device=device)
# forward
scores = model(data)
loss = criterion(scores, targets)
losses.append(loss.item())
# backward
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Calculate 'running' training accuracy
features = data.reshape(data.shape[0], -1)
img_grid = torchvision.utils.make_grid(data)
_, predictions = scores.max(1)
num_correct = (predictions == targets).sum()
running_train_acc = float(num_correct) / float(data.shape[0])
accuracies.append(running_train_acc)
# Plot things to tensorboard
class_labels = [classes[label] for label in predictions]
writer.add_image("mnist_images", img_grid)
writer.add_histogram("fc1", model.fc1.weight)
writer.add_scalar("Training loss", loss, global_step=step)
writer.add_scalar(
"Training Accuracy", running_train_acc, global_step=step
)
if batch_idx == 230:
writer.add_embedding(
features,
metadata=class_labels,
label_img=data,
global_step=batch_idx,
)
step += 1
writer.add_hparams(
{"lr": learning_rate, "bsize": batch_size},
{
"accuracy": sum(accuracies) / len(accuracies),
"loss": sum(losses) / len(losses),
},
)