-
Notifications
You must be signed in to change notification settings - Fork 7
/
mnist_00.py
155 lines (128 loc) · 5.32 KB
/
mnist_00.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
# flake8: noqa
# Original example from: https://github.com/PyTorchLightning/pytorch-lightning/blob/master/pl_examples/basic_examples/mnist.py
# NOTE: no more argparse:
# from argparse import ArgumentParser
from dataclasses import dataclass
from typing import Any
import hydra
import pytorch_lightning as pl
import torch
from hydra.core.config_store import ConfigStore
from torch.nn import functional as F
from torch.utils.data import DataLoader
from torch.utils.data import random_split
from torchvision import transforms
from torchvision.datasets.mnist import MNIST
from hydra_configs.pytorch_lightning.trainer import TrainerConf
from hydra_configs.torch.optim import AdamConf
from hydra_configs.torch.utils.data import DataLoaderConf
# ====== NOTE: HYDRA BLOCK =========
# structured config imports
@dataclass
class LitClassifierConf:
trainer: TrainerConf = TrainerConf()
# `dataset` must be initialized to `None` in conf.
# We populate it later in `instantiate()`:
dataloader: DataLoaderConf = DataLoaderConf(dataset=None)
# similarly, `params` must be initialized to `None`:
optim: Any = AdamConf(params=None)
hidden_dim: int = 128
data_shape: Any = 1 * 28 * 28
target_shape: Any = 1 * 10
root_dir: str = "."
seed: int = 1234
cs = ConfigStore.instance()
cs.store(name="litconf", node=LitClassifierConf)
# ====== / HYDRA BLOCK =========
class LitClassifier(pl.LightningModule):
def __init__(
self,
data_shape: int = 1 * 28 * 28,
hidden_dim: int = 128,
target_shape: int = 1 * 10,
learning_rate: float = 1e-3,
**kwargs # NOTE: if you want hparams to contain/log your whole cfg, this is important
):
super().__init__()
self.save_hyperparameters()
self.l1 = torch.nn.Linear(data_shape, self.hparams.hidden_dim)
self.l2 = torch.nn.Linear(self.hparams.hidden_dim, target_shape)
def forward(self, x):
x = x.view(x.size(0), -1)
x = torch.relu(self.l1(x))
x = torch.relu(self.l2(x))
return x
def training_step(self, batch, batch_idx):
x, y = batch
y_hat = self(x)
loss = F.cross_entropy(y_hat, y)
return loss
def validation_step(self, batch, batch_idx):
x, y = batch
y_hat = self(x)
loss = F.cross_entropy(y_hat, y)
self.log("valid_loss", loss)
def test_step(self, batch, batch_idx):
x, y = batch
y_hat = self(x)
loss = F.cross_entropy(y_hat, y)
self.log("test_loss", loss)
def configure_optimizers(self):
return hydra.utils.instantiate(self.hparams.optim, params=self.parameters())
# NOTE: This is no longer needed as the __init__ defines this interface
# @staticmethod
# def add_model_specific_args(parent_parser):
# parser = ArgumentParser(parents=[parent_parser], add_help=False)
# parser.add_argument('--hidden_dim', type=int, default=128)
# parser.add_argument('--learning_rate', type=float, default=0.0001)
# return parser
@hydra.main(config_name="litconf")
def cli_main(cfg):
# NOTE: This is needed so that data is only downloaded once. It keeps the data directory at the root.
cfg.root_dir = hydra.utils.get_original_cwd()
print(cfg.pretty())
pl.seed_everything(cfg.seed)
# ------------
# args
# ------------
# NOTE: These are no longer needed
# parser = ArgumentParser()
# parser.add_argument('--batch_size', default=32, type=int)
# parser = pl.Trainer.add_argparse_args(parser)
# parser = LitClassifier.add_model_specific_args(parser)
##args = parser.parse_args()
# ------------
# data
# ------------
dataset = MNIST(
root=cfg.root_dir, train=True, download=True, transform=transforms.ToTensor()
)
mnist_test = MNIST(
root=cfg.root_dir, train=False, download=True, transform=transforms.ToTensor()
)
mnist_train, mnist_val = random_split(dataset, [55000, 5000])
# NOTE: We use `hydra.utils.instantiate(cfg.dataloader, dataset=<split name>))` because the DataLoaderConf is an autogenerated hydra-torch config. `dataset=<split name>` enables hydra's instantiate to pass each previously created dataset to the DataLoader constructor.
train_loader = hydra.utils.instantiate(cfg.dataloader, dataset=mnist_train)
val_loader = hydra.utils.instantiate(
cfg.dataloader, dataset=mnist_val, _recursive_=False
)
test_loader = hydra.utils.instantiate(
cfg.dataloader, dataset=mnist_test, _recursive_=False
)
# ------------
# model
# ------------
# NOTE: Here we use LitClassifier(**cfg) since LitClassifier is not an autogenerated hydra-lightning config class and therefore does not have a `__target__` field. One could add this in order to use `hydra.utils.instantiate(cfg.trainer)`.
model = LitClassifier(**cfg)
# ------------
# training
# ------------
# NOTE: Again, we use `hydra.utils.instantiate(cfg.trainer)` because the trainer conf is an autogenerated hydra-lightning config. This is additionally useful as it supports recursive instnantiation (for example adding Callbacks).
trainer = hydra.utils.instantiate(cfg.trainer)
trainer.fit(model, train_loader, val_loader)
# ------------
# testing
# ------------
trainer.test(test_dataloaders=test_loader)
if __name__ == "__main__":
cli_main()