-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathFaceDetection_Module.py
121 lines (81 loc) · 4.09 KB
/
FaceDetection_Module.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
import cv2
import numpy as np
from PIL import Image
import torch
def feats_extract(face_crop, model, device):
face_96 = Image.fromarray(face_crop).resize((96,96),Image.ANTIALIAS)
# convert back to manipulate
test_face = np.array(face_96)
test_face_copy = test_face.copy()
# We convert it to torch domain so we can use it in our model
test_face_torch = torch.from_numpy(test_face).float().to(device)
test_face = test_face_torch.reshape(1,1,96,96)
# Using the model to predict the coordinates in the face we are dealing in this iteration
test_predictions = model(test_face)
test_predictions = test_predictions.cpu().data.numpy()
# This is the list with the face keypoints we are detecting
#keypts_labels_plantilla = train_data.columns.tolist()
# We pair the coordinates and pile then in columns for coord x and coord y
coord = np.vstack(np.split(test_predictions[0],15))
for (x, y) in coord:
cv2.circle(test_face_copy, (x, y), 2, (0, 255, 0), -1) # the coordinates are plotted directly onto the cropped image
cv2.imshow("Prova", test_face_copy)
# Le due classi qui sotto sono obsolete, le mantengo perché potrebbero risultare utili in futuro
class FaceDetection():
def __init__(self, path2img = 'AbccEAc.jpg', path2class = 'haarcascade_frontalface_default.xml'):
#Load image
self.img_original = cv2.imread(path2img)
# Convert to RGB colorspace
self.img_original = self.convertToRGB(self.img_original)
# copy original image
self.img_with_detections = np.copy(self.img_original)
#convert image to gray (opencv expects gray images)
self.gray_img = self.convertToGray(self.img_original)
#load cascade classifier (haarcascade) training file
self.haar_face_cascade = cv2.CascadeClassifier(path2class)
#Detect multiscale images
self.faces = self.haar_face_cascade.detectMultiScale(self.gray_img, scaleFactor=1.1, minNeighbors=5);
def number_faces(self):
#print the number of faces found
print('Faces found: ', len(self.faces))
def convertToGray(self, img):
# Convert the RGB image to grayscale
return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
def convertToRGB(self, img):
return cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
def detection(self):
faces_crop = []
for (x, y, w, h) in self.faces:
obj = self.img_original[y:y + h, x:x + w]
faces_crop.append(obj)
cv2.rectangle(self.img_with_detections, (x, y), (x+w, y+h), (0, 255, 0), 2)
return faces_crop
class FaceDetection2():
def __init__(self, img = [] , path2class = 'haarcascade_frontalface_default.xml'):
#Load image
self.img_original = img
# Convert to RGB colorspace
self.img_original = self.convertToRGB(self.img_original)
# copy original image
self.img_with_detections = np.copy(self.img_original)
#convert image to gray (opencv expects gray images)
self.gray_img = self.convertToGray(self.img_original)
#load cascade classifier (haarcascade) training file
self.haar_face_cascade = cv2.CascadeClassifier(path2class)
#Detect multiscale images
self.faces = self.haar_face_cascade.detectMultiScale(self.gray_img, scaleFactor=1.1, minNeighbors=5);
def number_faces(self):
#print the number of faces found
print('Faces found: ', len(self.faces))
def convertToGray(self, img):
# Convert the RGB image to grayscale
return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
def convertToRGB(self, img):
return cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
def detection(self):
faces_crop = []
for (x, y, w, h) in self.faces:
obj = self.img_original[y:y + h, x:x + w]
faces_crop.append(obj)
cv2.rectangle(self.img_with_detections, (x, y), (x+w, y+h), (0, 255, 0), 2)
return faces_crop