-
Notifications
You must be signed in to change notification settings - Fork 6
/
from_camera.py
62 lines (48 loc) · 1.89 KB
/
from_camera.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
# Tutorial: https://youtu.be/PdkPI92KSIs?list=PLQVvvaa0QuDcDqgpLLJJM15NpIGNfrKY5
import os
import cv2
import face_recognition
KNOWN_FACES_DIR = "known_faces"
TOLERANCE = 0.5
FRAME_THICKNESS = 1
FONT_THICKNESS = 1
MODEL = "hog"
video = cv2.VideoCapture(0)
print("[*] Loading known faces...")
known_faces = []
known_names = []
for name in os.listdir(KNOWN_FACES_DIR):
for filename in os.listdir(f"{KNOWN_FACES_DIR}/{name}"):
image = face_recognition.load_image_file(f"{KNOWN_FACES_DIR}/{name}/{filename}")
encoding = face_recognition.face_encodings(image)[0]
known_faces.append(encoding)
known_names.append(name)
print("[*] Processing camera feed...")
while True:
ret, image = video.read()
locations = face_recognition.face_locations(image, model=MODEL)
encodings = face_recognition.face_encodings(image, locations)
for face_encoding, face_location in zip(encodings, locations):
results = face_recognition.compare_faces(known_faces, face_encoding, TOLERANCE)
match = None
if True in results:
match = known_names[results.index(True)]
top_left = (face_location[3], face_location[0])
bottom_right = (face_location[1], face_location[2])
color = [0, 255, 0]
cv2.rectangle(image, top_left, bottom_right, color, FRAME_THICKNESS)
top_left = (face_location[3], face_location[2])
bottom_right = (face_location[1], face_location[2] + 22)
cv2.rectangle(image, top_left, bottom_right, color, cv2.FILLED)
cv2.putText(
image,
match,
(face_location[3] + 10, face_location[2] + 15),
cv2.FONT_HERSHEY_TRIPLEX,
0.35,
(0, 0, 0),
FONT_THICKNESS,
)
cv2.imshow(filename, image)
if cv2.waitKey(1) & 0xFF == ord("q"):
break