-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtorchAdamTest.py
78 lines (59 loc) · 1.4 KB
/
torchAdamTest.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
# -*- coding: utf-8 -*-
import torch
import time
N=128
print torch.__version__
print torch.cuda.is_available()
device = torch.device("cuda:0")
print torch.cuda.get_device_name(0)
D_in, H1, H2, D_out = 10, 600, 300, 2
x = torch.randn(N, D_in, device=device)
y = torch.randn(N, D_out, device=device)
sq = torch.nn.Sequential(
torch.nn.Linear(D_in, H1),
torch.nn.ReLU(),
torch.nn.Linear(H1, H2),
torch.nn.ReLU(),
torch.nn.Linear(H2, D_out),
)
loss_fn = torch.nn.MSELoss(reduction='sum')
model=sq.cuda()
learning_rate = 1e-4
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
sumV=0;
sumModelTime=0
sumLossTime=0
sumOptiTime=0
sumStepTime=0
for t in range(500):
start= time.time()
lstart=start
y_pred = model(x)
#
end=time.time()
sumModelTime=sumModelTime+(end-start)
print '\tmodelTime:', (sumModelTime/(t+1))
start=end
#
loss = loss_fn(y_pred, y)
# if t % 100 == 99:
# print(t, loss.item())
optimizer.zero_grad()
loss.backward()
#
end=time.time()
sumLossTime=sumLossTime+(end-start)
print '\tlossTime :', (sumLossTime/(t+1))
start=end
#
optimizer.step()
#
end=time.time()
sumOptiTime=sumOptiTime+(end-start)
print '\toptiTime :', (sumOptiTime/(t+1))
start=end
#
end = time.time()
sumV=sumV+(end-lstart)
print '\tfTime:', (end-lstart), '|', (sumV/(t+1)),'\n'
print sumV/500