-
Notifications
You must be signed in to change notification settings - Fork 1
/
frontend.py
127 lines (108 loc) · 3.75 KB
/
frontend.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
from time import sleep
import torch
import os
import numpy as np
import cv2
from model import *
import sys
import pickle
import collections
def loader(path):
image = np.asarray(cv2.imread(path)).astype(np.uint8) # [H x W x C, BGR format]
return image.copy()
def files_inference(weights, data_folder, class_labels, device='cpu'):
face_classifier = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
classifier = EmotionNet(5)
with open(weights, "rb") as weightfile:
data = pickle.load(weightfile)
data = collections.OrderedDict(data)
classifier.load_state_dict(data)
classifier.eval()
try:
files = [f for f in os.listdir(data_folder)]
except:
print("No such file or directory exists %s" %data_folder)
return
inference_folder = os.path.join("./data", "inference")
if not os.path.exists(inference_folder):
os.makedirs(inference_folder)
invalid_files = []
for file in files:
try:
sample = loader(os.path.join(data_folder, file))
except:
invalid_files.append(os.path.join(data_folder, file))
continue
labels = []
gray = sample.copy()
if len(gray.shape) == 3 and gray.shape[-1] != 1:
gray = cv2.cvtColor(gray, cv2.COLOR_BGR2GRAY)
faces = face_classifier.detectMultiScale(gray,1.3,5)
for (x, y, w, h) in faces:
cv2.rectangle(sample, (x,y), (x+w,y+h), (255,0,0), 2)
roi = gray[y:y+h, x:x+w]
roi = cv2.resize(roi, (48,48), interpolation=cv2.INTER_AREA)
if np.sum([roi]) != 0:
roi = roi.astype('float')/255
roi = torch.from_numpy(roi.copy()).unsqueeze(0).unsqueeze(0)
roi = roi.type(torch.FloatTensor).to(device)
roi = (roi - 0.5076) / 0.0647
with torch.no_grad():
pred = classifier(roi).squeeze()
_, ind = torch.max(pred, dim=0)
label = class_labels[ind.item()]
label_position = (x,y)
cv2.putText(sample, label, label_position, cv2.FONT_HERSHEY_SIMPLEX, 2, (0,255,0), 3)
else:
cv2.putText(sample, 'No Face Found', (20,60), cv2.FONT_HERSHEY_SIMPLEX, 2, (0,255,0), 3)
cv2.imwrite(os.path.join(inference_folder, file), sample)
if len(invalid_files) > 0:
print("The following files %d could not be processed:" %(len(invalid_files)))
for i in range(len(invalid_files)):
print(invalid_files[i])
def camfeed_inference(weights, class_labels, device='cpu'):
face_classifier = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
classifier = EmotionNet(5)
with open(weights, "rb") as weightfile:
data = pickle.load(weightfile)
data = collections.OrderedDict(data)
classifier.load_state_dict(data)
classifier.eval()
flag = False
try:
cap = cv2.VideoCapture(0)
while True:
# Grab a single frame of video
ret, frame = cap.read()
labels = []
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_classifier.detectMultiScale(gray,1.3,5)
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x,y), (x+w,y+h), (255,0,0), 2)
roi = gray[y:y+h, x:x+w]
roi = cv2.resize(roi, (48,48), interpolation=cv2.INTER_AREA)
if np.sum([roi]) != 0:
roi = roi.astype('float')/255
roi = torch.from_numpy(roi.copy()).unsqueeze(0).unsqueeze(0)
roi = roi.type(torch.FloatTensor).to(device)
roi = (roi - 0.5076) / 0.0647
with torch.no_grad():
pred = classifier(roi).squeeze()
_, ind = torch.max(pred, dim=0)
label = class_labels[ind.item()]
label_position = (x,y)
cv2.putText(frame, label, label_position, cv2.FONT_HERSHEY_SIMPLEX, 2, (0,255,0), 3)
else:
cv2.putText(frame, 'No Face Found', (20,60), cv2.FONT_HERSHEY_SIMPLEX, 2, (0,255,0), 3)
cv2.imshow('Fast Face Expression', frame)
if cv2.waitKey(1) == 27: # & 0xFF == ord('q'):
flag = True
break
cap.release()
cv2.destroyAllWindows()
except:
print("Unexpected error!\n")
if not flag:
cap.release()
cv2.destroyAllWindows()
raise