-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathconvert_pth_to_trtpth.py
45 lines (30 loc) · 1.29 KB
/
convert_pth_to_trtpth.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
from utils.loading_utils import load_model, get_device
import torch
from model.model import *
from torch2trt import torch2trt
path = './pretrained/firenet_1000.pth.tar'
model = load_model(path)
for x in model.parameters():
x.requires_grad = False
device = get_device('True')
model = model.to(device)
model.half()
model.eval()
curr_state = torch.zeros((1,5,240,320),dtype=torch.float16)
curr_state = curr_state.to(device)
prev_state1 = torch.zeros((1,16,240,320),dtype=torch.float16)
prev_state1 = prev_state1.to(device)
prev_state2 = torch.zeros((1,16,240,320),dtype=torch.float16)
prev_state2 = prev_state2.to(device)
prev_state = [prev_state1,prev_state2]
state = [curr_state,prev_state1,prev_state2]
model_trt = torch2trt(model,state,input_names=['input','prev_state_1','prev_state_2'],output_names=['output','new_state_1','new_state_2'],use_onnx=True,fp16_mode=True)
curr_state = curr_state.type(torch.float16)
prev_state1 = prev_state1.type(torch.float16)
prev_state2 = prev_state2.type(torch.float16)
with torch.no_grad():
a,b,c=model_trt(curr_state,prev_state1,prev_state2)
# print(a,a.dtype)
torch.save(model_trt.state_dict(),'firenet_trt_fp16_half.pth')
# model_dict = {'epoch':raw_model['epoch'],'model':model.eval(),'optimizer':raw_model['optimizer']}
# torch.save(model_dict,'firenet.pt')