-
Notifications
You must be signed in to change notification settings - Fork 5
/
runTesting_prob_ourData.py
222 lines (188 loc) · 12.6 KB
/
runTesting_prob_ourData.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
# from __future__ import print_function
import argparse, os
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
import torch.optim as optim
import torch
import torch.utils.data as data_utils
from utils import *
from ganComponents import *
from nnBuildUnits import CrossEntropy2d
from nnBuildUnits import computeSampleAttentionWeight
from nnBuildUnits import adjust_learning_rate
import time
from morpologicalTransformation import denoiseImg_closing,denoiseImg_isolation
# Training settings
parser = argparse.ArgumentParser(description="PyTorch InfantSeg")
parser.add_argument("--NDim", type=int, default=3, help="the dimension of the shape, 1D, 2D or 3D?")
parser.add_argument("--in_channels", type=int, default=1, help="the input channels ?")
parser.add_argument("--out_channels", type=int, default=4, help="the output channels (num of classes)?")
# parser.add_argument("--in_slices", type=int, default=3, help="the num of consecutive slices for input unit?")
# parser.add_argument("--out_slices", type=int, default=1, help="the num of consecutive slices for output unit?")
parser.add_argument("--input_sz", type=arg_as_list, default=[16,64,64], help="the input patch size of list")
parser.add_argument("--output_sz", type=arg_as_list, default=[16,64,64], help="the output patch size of list")
parser.add_argument("--test_step_sz", type=arg_as_list, default=[2,8,8], help="the step size at testing one subject")
parser.add_argument("--isSegReg", action="store_true", help="is Seg and Reg?", default=False)
parser.add_argument("--isDiceLoss", action="store_true", help="is Dice Loss used?", default=True)
parser.add_argument("--isSoftmaxLoss", action="store_true", help="is Softmax Loss used?", default=True)
parser.add_argument("--isContourLoss", action="store_true", help="is Contour Loss used?", default=False)
parser.add_argument("--isResidualEnhancement", action="store_true", help="is residual learning operation enhanced?", default=False)
parser.add_argument("--isViewExpansion", action="store_true", help="is view expanded?", default=True)
parser.add_argument("--isAdLoss", action="store_true", help="is adversarial loss used?", default=False)
parser.add_argument("--isSpatialDropOut", action="store_true", help="is spatial dropout used?", default=False)
parser.add_argument("--isFocalLoss", action="store_true", help="is focal loss used?", default=False)
parser.add_argument("--isSampleImportanceFromAd", action="store_true", help="is sample importance from adversarial network used?", default=False)
parser.add_argument("--dropoutRate", type=float, default=0.25, help="Spatial Dropout Rate. Default=0.25")
parser.add_argument("--lambdaAD", type=float, default=0, help="loss coefficient for AD loss. Default=0")
parser.add_argument("--adImportance", type=float, default=0, help="Sample importance from AD network. Default=0")
parser.add_argument("--batchSize", type=int, default=10, help="training batch size")
parser.add_argument("--numofIters", type=int, default=200000, help="number of iterations to train for")
parser.add_argument("--showTrainLossEvery", type=int, default=100, help="number of iterations to show train loss")
parser.add_argument("--saveModelEvery", type=int, default=5000, help="number of iterations to save the model")
parser.add_argument("--showTestPerformanceEvery", type=int, default=1000, help="number of iterations to show test performance")
parser.add_argument("--lr", type=float, default=1e-4, help="Learning Rate. Default=1e-4")
parser.add_argument("--decLREvery", type=int, default=100000, help="Sets the learning rate to the initial LR decayed by momentum every n iterations, Default: n=40000")
parser.add_argument("--cuda", action="store_true", help="Use cuda?", default=True)
parser.add_argument("--resume", default="", type=str, help="Path to checkpoint (default: none)")
parser.add_argument("--start_epoch", default=1, type=int, help="Manual epoch number (useful on restarts)")
parser.add_argument("--threads", type=int, default=1, help="Number of threads for data loader to use, Default: 1")
parser.add_argument("--momentum", default=0.9, type=float, help="Momentum, Default: 0.9")
parser.add_argument("--weight-decay", "--wd", default=1e-4, type=float, help="weight decay, Default: 1e-4")
parser.add_argument("--pretrained", default="", type=str, help="path to pretrained model (default: none)")
parser.add_argument("--prefixModelName", default="/shenlab/lab_stor5/dongnie/pelvic/Seg3D_wce_viewExp_resEnhance_iter50000_0224_ourData", type=str, help="prefix of the to-be-saved model name")
parser.add_argument("--test_input_file_name",default='img1_nocrop.nii.gz',type=str, help="the input file name for testing subject")
parser.add_argument("--test_gt_file_name",default='img1_label_nie_nocrop.nii.gz',type=str, help="the ground-truth file name for testing subject")
parser.add_argument("--modelPath", default="/shenlab/lab_stor5/dongnie/Seg3D_wce_viewExp_resEnhance_0224_ourData_50000.pt", type=str, help="prefix of the to-be-saved model name")
parser.add_argument("--prefixPredictedFN", default="Seg3D_wce_viewExp_resEnhance_iter50000_0224_ourData", type=str, help="prefix of the to-be-saved predicted filename")
parser.add_argument("--how2normalize", type=int, default=4, help="how to normalize the data")
parser.add_argument("--resType", type=int, default=2, help="resType: 0: segmentation map (integer); 1: regression map (continuous); 2: segmentation map + probability map")
def main():
global opt
opt = parser.parse_args()
print opt
path_test = '/home/dongnie/warehouse/pelvicSeg/prostateChallenge/data/'
path_test = '/shenlab/lab_stor5/dongnie/challengeData/data/'
path_test = '/shenlab/lab_stor5/dongnie/pelvic/'
if opt.isSegReg:
negG = ResSegRegNet(opt.in_channels, opt.out_channels, nd=opt.NDim)
elif opt.isContourLoss:
netG = ResSegContourNet(opt.in_channels, opt.out_channels, nd=opt.NDim, isRandomConnection=opt.isResidualEnhancement,isSmallDilation=opt.isViewExpansion, isSpatialDropOut=opt.isSpatialDropOut,dropoutRate=opt.dropoutRate)
else:
netG = ResSegNet(opt.in_channels, opt.out_channels, nd=opt.NDim, isRandomConnection=opt.isResidualEnhancement,isSmallDilation=opt.isViewExpansion, isSpatialDropOut=opt.isSpatialDropOut,dropoutRate=opt.dropoutRate)
#netG.apply(weights_init)
netG.cuda()
checkpoint = torch.load(opt.modelPath)
# netG.load_state_dict(checkpoint["model"].state_dict())
netG.load_state_dict(checkpoint["model"])
# netG.load_state_dict(torch.load(opt.modelPath))
ids = [1,2,3,4,6,7,8,10,11,12,13]
# ids = [45,46,47,48,49]
ids = [1,2,3,4,13,29]
for ind in ids:
# mr_test_itk=sitk.ReadImage(os.path.join(path_test,'Case%d.nii.gz'%ind))
# ct_test_itk=sitk.ReadImage(os.path.join(path_test,'Case%d_segmentation.nii.gz'%ind))
mr_test_itk=sitk.ReadImage(os.path.join(path_test,'img%d_nocrop.nii.gz'%ind))
ct_test_itk=sitk.ReadImage(os.path.join(path_test,'img%d_label_nie_nocrop.nii.gz'%ind))
mrnp=sitk.GetArrayFromImage(mr_test_itk)
mu=np.mean(mrnp)
ctnp=sitk.GetArrayFromImage(ct_test_itk)
#for training data in pelvicSeg
if opt.how2normalize == 1:
maxV, minV=np.percentile(mrnp, [99 ,1])
print 'maxV,',maxV,' minV, ',minV
mrnp=(mrnp-mu)/(maxV-minV)
print 'unique value: ',np.unique(ctnp)
#for training data in pelvicSeg
elif opt.how2normalize == 2:
maxV, minV = np.percentile(mrnp, [99 ,1])
print 'maxV,',maxV,' minV, ',minV
mrnp = (mrnp-mu)/(maxV-minV)
print 'unique value: ',np.unique(ctnp)
#for training data in pelvicSegRegH5
elif opt.how2normalize== 3:
std = np.std(mrnp)
mrnp = (mrnp - mu)/std
print 'maxV,',np.ndarray.max(mrnp),' minV, ',np.ndarray.min(mrnp)
elif opt.how2normalize== 4:
maxV, minV = np.percentile(mrnp, [99.2 ,1])
print 'maxV is: ',np.ndarray.max(mrnp)
mrnp[np.where(mrnp>maxV)] = maxV
print 'maxV is: ',np.ndarray.max(mrnp)
mu=np.mean(mrnp)
std = np.std(mrnp)
mrnp = (mrnp - mu)/std
print 'maxV,',np.ndarray.max(mrnp),' minV, ',np.ndarray.min(mrnp)
# full image version with average over the overlapping regions
# ct_estimated = testOneSubject(mrnp,ctnp,[3,168,112],[1,168,112],[1,8,8],netG,'Segmentor_model_%d.pt'%iter)
# the attention regions
# x1=80
# x2=192
# y1=35
# y2=235
# matFA = mrnp[:,y1:y2,x1:x2] #note, matFA and matFAOut same size
# matGT = ctnp[:,y1:y2,x1:x2]
matFA = mrnp
matGT = ctnp
if opt.resType==2:
matOut, matProb, _ = testOneSubject(matFA,matGT,opt.out_channels,opt.input_sz,opt.output_sz,opt.test_step_sz,netG,opt.modelPath,resType=opt.resType, nd = opt.NDim)
else:
matOut,_ = testOneSubject(matFA,matGT,opt.out_channels,opt.input_sz,opt.output_sz,opt.test_step_sz,netG,opt.modelPath,resType=opt.resType, nd = opt.NDim)
#matOut,_ = testOneSubject(matFA,matGT,opt.out_channels,opt.input_sz, opt.output_sz, opt.test_step_sz,netG,opt.modelPath, nd = opt.NDim)
ct_estimated = np.zeros([ctnp.shape[0],ctnp.shape[1],ctnp.shape[2]])
ct_prob = np.zeros([opt.out_channels, ctnp.shape[0],ctnp.shape[1],ctnp.shape[2]])
# print 'matOut shape: ',matOut.shape
# ct_estimated[:,y1:y2,x1:x2] = matOut
ct_estimated = matOut
ct_prob = matProb
matProb_Bladder = np.squeeze(ct_prob[1,:,:,:])
matProb_Prostate = np.squeeze(ct_prob[2,:,:,:])
matProb_Rectum = np.squeeze(ct_prob[3,:,:,:])
threshold = 0.9
tmat_prob = np.zeros(matProb_Bladder.shape)
tmat = np.zeros(matProb_Bladder.shape)
#for bladder
inds1 = np.where(matProb_Bladder>threshold)
tmat_prob[inds1] = matProb_Bladder[inds1]
tmat[inds1] = 1
#for prostate
inds2 = np.where(matProb_Prostate>threshold)
tmat_prob[inds2] = matProb_Prostate[inds2]
tmat[inds2] = 2
#for rectum
inds3 = np.where(matProb_Rectum>threshold)
tmat_prob[inds3] = matProb_Rectum[inds3]
tmat[inds3] = 3
tmat = denoiseImg_closing(tmat, kernel=np.ones((20,20,20)))
tmat = denoiseImg_isolation(tmat, struct=np.ones((3,3,3)))
diceBladder = dice(tmat,ctnp,1)
diceProstate = dice(tmat,ctnp,2)
diceRectum = dice(tmat,ctnp,3)
print 'sub%d'%ind,'dice1 = ',diceBladder,' dice2= ',diceProstate,' dice3= ',diceRectum
# volout = sitk.GetImageFromArray(matProb_Prostate)
# sitk.WriteImage(volout,opt.prefixPredictedFN+'probmap_Prostate_sub{:02d}'.format(ind)+'.nii.gz')
volout = sitk.GetImageFromArray(tmat_prob)
sitk.WriteImage(volout,opt.prefixPredictedFN+'threshold_probmap_sub{:02d}'.format(ind)+'.nii.gz')
volout = sitk.GetImageFromArray(tmat)
sitk.WriteImage(volout,opt.prefixPredictedFN+'threshold_segmap_sub{:02d}'.format(ind)+'.nii.gz')
ct_estimated = np.rint(ct_estimated)
ct_estimated = denoiseImg_closing(ct_estimated, kernel=np.ones((20,20,20)))
ct_estimated = denoiseImg_isolation(ct_estimated, struct=np.ones((3,3,3)))
# diceProstate = dice(ct_estimated,ctnp,2)
# diceRectumm = dice(ct_estimated,ctnp,3)
# print 'pred: ',ct_estimated.dtype, ' shape: ',ct_estimated.shape
# print 'gt: ',ctnp.dtype,' shape: ',ctnp.shape
# print 'sub%d'%ind,'dice1 = ',diceBladder,' dice2= ',diceProstate,' dice3= ',diceRectumm
diceBladder = dice(ct_estimated,ctnp,1)
diceProstate = dice(ct_estimated,ctnp,2)
diceRectum = dice(ct_estimated,ctnp,3)
print 'sub%d'%ind,'dice1 = ',diceBladder,' dice2= ',diceProstate,' dice3= ',diceRectum
volout = sitk.GetImageFromArray(ct_estimated)
sitk.WriteImage(volout,opt.prefixPredictedFN+'sub{:02d}'.format(ind)+'.nii.gz')
volgt = sitk.GetImageFromArray(ctnp)
sitk.WriteImage(volgt,'gt_sub{:02d}'.format(ind)+'.nii.gz')
if __name__ == '__main__':
# testGradients()
os.environ['CUDA_VISIBLE_DEVICES'] = '7'
main()