-
Notifications
You must be signed in to change notification settings - Fork 141
/
main_train_matterport.lua
executable file
·146 lines (113 loc) · 4.24 KB
/
main_train_matterport.lua
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
require 'nn'
require 'cutorch'
require 'cunn'
require 'cudnn'
require 'nngraph'
require 'optim'
require 'BatchIterator'
require 'utils'
require 'utils_matterport'
require 'BatchIterator_matterport'
require 'hdf5'
-- config
local config = dofile('config_matterport.lua')
config = config.parse(arg)
cutorch.setDevice(config.gpuid)
print("Start: " .. config.ps)
-- model
local model_old = dofile(config.model)(config)
parameters_old, gradParameters_old = model_old:getParameters()
if not isempty(config.finetune_model) then
print('finetune from saved model weight...')
parameters_old:copy(torch.load(config.finetune_model))
print('set up learning rate...')
config.optim_state.learningRate = config.finetune_init_lr
end
local model = dofile(config.model_nobn)(config)
parameters, gradParameters = model:getParameters()
if not isempty(config.finetune_model) then
parameters:copy(parameters_old[{{1,parameters:size(1)}}])
end
-- modify model
model.forwardnodes[24].data.module.modules[1]:__init(128,64,3,3,1,1,1,1)
model.forwardnodes[24].data.module.modules[3]:__init(64,3,3,3,1,1,1,1)
model:cuda()
parameters, gradParameters = model:getParameters()
-- criterion
local criterion_n = nn.CosineEmbeddingCriterion():cuda()
-- dataset
if config.use_render_normal_gt then
print('training with RENDERED ground truth surface normal...')
train_data = loadMatterportRender(config.train_file, config.root_path)
test_data = loadMatterportRender(config.test_file, config.root_path)
else
print('training with RAW ground truth surface normal...')
train_data = loadMatterport(config.train_file, config.root_path)
test_data = loadMatterport(config.test_file, config.root_path)
end
local batch_iterator = BatchIterator(config, train_data, test_data)
-- logger
local logger = optim.Logger(config.log_path .. 'log', true)
-- main training
for it_batch = 1, math.floor(config.nb_epoch * #batch_iterator.train.data / config.batch_size) do
local batch = batch_iterator:nextBatchMatterport('train', config)
-- inputs and targets
local inputs = batch.pr_color
inputs = inputs:contiguous():cuda()
local feval = function(x)
-- prepare
collectgarbage()
if x ~= parameters then
parameters:copy(x)
end
-- forward propagation
local est = model:forward(inputs)
local valid = batch.norm_valid
valid = valid:cuda()
local gnd = batch.cam_normal
gnd = gnd:cuda()
bz, ch, h, w = est:size(1), est:size(2), est:size(3), est:size(4)
est = est:permute(1,3,4,2):contiguous():view(-1,ch)
local normalize_layer = nn.Normalize(2):cuda()
est_n = normalize_layer:forward(est)
gnd = gnd:permute(1,3,4,2):contiguous():view(-1,ch)
f = criterion_n:forward({est_n, gnd}, torch.Tensor(est_n:size(1)):cuda():fill(1))
df = criterion_n:backward({est_n, gnd}, torch.Tensor(est_n:size(1)):cuda():fill(1))
df = df[1]
df = normalize_layer:backward(est, df)
valid = valid:view(-1,1):expandAs(df)
df[torch.eq(valid,0)] = 0
df = df:view(-1, h, w, ch)
df = df:permute(1, 4, 2, 3):contiguous()
gradParameters:zero()
model:backward(inputs, df)
-- print
if it_batch % config.print_iters == 0 then
print( it_batch, f)
end
-- log
if it_batch % config.log_iters == 0 then
logger:add{ f }
end
return f, gradParameters
end
-- optimizer
optim.rmsprop(feval, parameters, config.optim_state)
-- save
if it_batch % config.snapshot_iters == 0 then
print('saving model weight...')
local filename
filename = config.ps .. '_iter_' .. it_batch .. '.t7'
torch.save(filename, parameters)
end
-- lr
if it_batch % config.lr_decay == 0 then
config.optim_state.learningRate = config.optim_state.learningRate / config.lr_decay_t
config.optim_state.learningRate = math.max(config.optim_state.learningRate, config.optim_state.learningRateMin)
print('decresing lr... new lr:', config.optim_state.learningRate)
end
end
print('saving model weight...')
local filename
filename = config.ps .. 'final' .. '.t7'
torch.save(filename, parameters)