-
Notifications
You must be signed in to change notification settings - Fork 6
/
Copy pathprediction.py
61 lines (50 loc) · 1.69 KB
/
prediction.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
import pickle
import PIL.Image
import torch
from torchvision import transforms
from .models import EncoderCNN, DecoderRNN
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def process(image, encoder_path, decoder_path, vocab_path, embedding_dim, hidden_dim, n_layers):
# apply data transformation
data_transforms = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])
])
# load vocab
with open(vocab_path, 'rb') as f:
vocab = pickle.load(f)
# build models
encoder = EncoderCNN(embedding_dim)
decoder = DecoderRNN(embedding_dim, hidden_dim, len(vocab), n_layers)
# evaluation mode
encoder.eval()
decoder.eval()
# move to cpu
encoder.to(device)
decoder.to(device)
# load model
checkpoint_encoder = torch.load(encoder_path)
checkpoint_decoder = torch.load(decoder_path)
encoder.load_state_dict(checkpoint_encoder)
decoder.load_state_dict(checkpoint_decoder)
# process image
img = PIL.Image.open(image)
img = data_transforms(img)
img.unsqueeze_(0) # Add batch size for PyTorch: [B, C, H, W]
# generate captions from the image
extracted_features = encoder(img)
out_idx = decoder.sample(extracted_features)
out_idx = out_idx[0].numpy()
# convert word ids to word
sampled_caption = []
for word_id in out_idx:
word = vocab.idx2word[word_id]
sampled_caption.append(word)
if word == '<end>':
break
output = ' '.join(sampled_caption)
print(output)
return output