forked from lucidrains/denoising-diffusion-pytorch
-
Notifications
You must be signed in to change notification settings - Fork 0
/
train.py
102 lines (94 loc) · 3.1 KB
/
train.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
import torch
from torch.utils.data import DataLoader
from datetime import datetime
import logging
import torchio as tio
import argparse
from denoising_diffusion_pytorch import Trainer, Unet, GaussianDiffusion
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("Boolean value expected.")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="3D Single cell generational models")
parser.add_argument(
"--dataset_path",
default="/run/user/1128299809/gvfs/smb-share:server=rds.icr.ac.uk,share=data/DBI/DUDBI/DYNCESYS/mvries/Kaggle/hpa-single-cell-image-classification",
type=str,
help="Please provide the path to the dataset of 3D tif images",
)
parser.add_argument(
"--dataframe",
default="all_data_removedwrong_ori_removedTwo.csv",
type=str,
help="Please provide the path to the dataframe "
"containing information on the dataset.",
)
parser.add_argument(
"--output_dir",
default="/home/mvries/Documents/DiffusionHPA/",
type=str,
help="Please provide the path for where to save output.",
)
parser.add_argument(
"--learning_rate",
default=8e-5,
type=float,
help="Please provide the learning rate " "for the autoencoder training.",
)
parser.add_argument(
"--batch_size",
default=16,
type=int,
help="Please provide the batch size.",
)
parser.add_argument(
"--pretrained_path",
default="/run/user/1128299809/gvfs/smb-share:server=rds.icr.ac.uk,share=data/DBI/DUDBI"
"/DYNCESYS/mvries/Projects/TearingNetNew/Reconstruct_dgcnn_cls_k20_plane/models/shapenetcorev2_250.pkl",
type=str,
help="Please provide the path to a pretrained autoencoder.",
)
parser.add_argument(
"--num_steps",
default=700000,
type=int,
help="Provide the number of epochs for the autoencoder training.",
)
parser.add_argument(
"--seed",
default=1,
type=int,
help="Random seed.",
)
parser.add_argument(
"--image_size",
default=128,
type=int,
help="Input image size.",
)
args = parser.parse_args()
model = Unet(dim=64, channels=3).cuda()
diffusion = GaussianDiffusion(
model,
image_size=args.image_size,
timesteps=1000, # number of steps
loss_type="l1", # L1 or L2
).cuda()
trainer = Trainer(
diffusion,
folder=args.dataset_path,
train_batch_size=args.batch_size,
train_lr=args.learning_rate,
train_num_steps=args.num_steps, # total training steps
gradient_accumulate_every=2, # gradient accumulation steps
ema_decay=0.995, # exponential moving average decay
amp=False, # turn on mixed precision,
results_folder=args.output_dir + "/results/"
)
trainer.train()