-
Notifications
You must be signed in to change notification settings - Fork 6
/
Copy pathfaceRecog.py
204 lines (158 loc) · 6.93 KB
/
faceRecog.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
import cv2
import numpy as np
from PIL import Image
import os
import requests as req
import json
from datetime import *
import base64
#import gpiozero # The GPIO library for Raspberry Pi
import time # Enables Python to manage timing
#led = gpiozero.LED(17) # Reference GPIO17
'''https://rmsf-smartlock.ew.r.appspot.com/'''
service_id = 54321
class ImageRecogn:
def __init__(self,path = 'dataset'):
# Path for face image database
self.path = 'dataset'
self.recognizer = cv2.face.LBPHFaceRecognizer_create()
self.detector = cv2.CascadeClassifier("haarcascade_frontalface_default.xml");
self.names = { 0:'none'}
# function to get the images and label data
def getImagesAndLabels(self,path):
imagePaths = [os.path.join(path,f) for f in os.listdir(path)]
faceSamples=[]
ids = []
names = []
for imagePath in imagePaths:
try:
PIL_img = Image.open(imagePath).convert('L') # convert it to grayscale
img_numpy = np.array(PIL_img,'uint8')
id = int(os.path.split(imagePath)[1].split(".")[1])
name = str(os.path.split(imagePath)[1].split(".")[2])
faces = self.detector.detectMultiScale(img_numpy)
for (x,y,w,h) in faces:
faceSamples.append(img_numpy[y:y+h,x:x+w])
ids.append(id)
names.append(name)
except:
print('*Alert* Fail to read '+ imagePath)
return faceSamples,ids,names
def fit(self):
print ("\n [INFO] Training faces. It will take a few seconds. Wait ...")
faces,ids,names = self.getImagesAndLabels(self.path)
self.recognizer.train(faces, np.array(ids))
for i in range(1, len(np.unique(ids)) + 1):
index = ids.index(i)
self.recognizer.setLabelInfo(i, str(names[index]))
# Save the model into trainer/trainer.yml
self.recognizer.write('trainer/trainer.yml') # recognizer.save() worked on Mac, but not on Pi
# Print the numer of faces trained and end program
print("\n [INFO] {0} faces trained. Exiting Program".format(len(np.unique(ids))))
def getLabels(self, max=100):
print("Getting labels %i \n" %(max))
for i in range(1, max):
retval = self.recognizer.getLabelInfo(i)
if retval == "":
print('*Alert* empty labels')
break
else:
self.names[i]=retval
def classify(self,id,confidence=101):
# Check if confidence is less them 100 ==> "0" is perfect match
if (confidence < 65):
label = self.names[id]
elif (confidence > 65 and confidence < 101):
label = str(self.names[id]) + " no match"
else:
label = "unknown"
return label
def preditct(self):
self.recognizer.read('trainer/trainer.yml')
font = cv2.FONT_HERSHEY_SIMPLEX
_historic={}
delta=0
#iniciate id counter
id = 0
# names related to ids: example ==> Marcelo: id=1, etc
self.getLabels(100)
# Initialize and start realtime video capture
cam = cv2.VideoCapture(0)
cam.set(3, 640) # set video widht
cam.set(4, 480) # set video height
# Define min window size to be recognized as a face
minW = 0.1*cam.get(3)
minH = 0.1*cam.get(4)
while True:
ret, img =cam.read()
img = cv2.flip(img, 1) # Flip vertically
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
faces = self.detector.detectMultiScale(
gray,
scaleFactor = 1.2,
minNeighbors = 5,
minSize = (int(minW), int(minH)),
)
for(x,y,w,h) in faces:
label=None
cv2.rectangle(img, (x,y), (x+w,y+h), (0,255,0), 2)
id, confidence = self.recognizer.predict(gray[y:y+h,x:x+w])
label = self.classify(id,confidence)
time2=datetime.now().strftime("%d/%m/%Y %H:%M:%S")
time_com=datetime.now()
if _historic and (label in _historic):
delta = (time_com - _historic[label])
if (delta.total_seconds() > 5):
try:
print("Preparing to send: " + label)
_historic={label:time_com}
_, imdata = cv2.imencode('.JPG',img)
jpac = json.dumps({"image": base64.b64encode(imdata).decode('utf-8'), "time":time2, "token":12345, "name":label})
try:
req.put("https://rmsf-smartlock.ew.r.appspot.com/add/54321", headers = {'Content-type': 'application/json'}, json=jpac)
except:
pass
except:
pass
if not _historic:
try:
print("Preparing to send 2: " + label)
_historic={label:time_com}
_, imdata = cv2.imencode('.JPG',img)
jpac = json.dumps({"image": base64.b64encode(imdata).decode('utf-8'), "time":time2, "token":12345, "name":label})
try:
req.put("https://rmsf-smartlock.ew.r.appspot.com/add/54321", headers = {'Content-type': 'application/json'}, json=jpac)
except:
pass
except:
pass
if( label in self.names[id]):
#led.on()
time.sleep(0.5)
#led.off() # Turn the LED off
pass
try:
door=req.get("https://rmsf-smartlock.ew.r.appspot.com/door/54321").text
print("DOOR: ", door)
if(door["door"]==1):
#led.on() # Turn on the LED
pass
elif door["door"]==0:
#led.off()
pass
except:
pass
cv2.putText(img, label, (x+5,y-5), font, 1, (255,255,255), 2)
confidence = " {0}%".format(confidence)
cv2.putText(img, str(confidence), (x+5,y+h-5), font, 1, (255,255,0), 1)
cv2.imshow('camera',img)
k = cv2.waitKey(10) & 0xff # Press 'ESC' for exiting video
if k == 27:
break
# Do a bit of cleanup
print("\n [INFO] Exiting Program and cleanup stuff")
cam.release()
cv2.destroyAllWindows()
a=ImageRecogn()
#a.fit()
a.preditct()