Skip to content

Commit

Permalink
시연 코드
Browse files Browse the repository at this point in the history
  • Loading branch information
ljy6712 committed May 24, 2024
1 parent d35dd1e commit d4dcd18
Show file tree
Hide file tree
Showing 24 changed files with 212 additions and 7 deletions.
Binary file modified __pycache__/createTarget.cpython-310.pyc
Binary file not shown.
Binary file added __pycache__/deep2.cpython-310.pyc
Binary file not shown.
Binary file modified __pycache__/mosaic.cpython-310.pyc
Binary file not shown.
Binary file modified __pycache__/mosaic_jiyeon.cpython-310.pyc
Binary file not shown.
Binary file modified __pycache__/yoona_target.cpython-310.pyc
Binary file not shown.
Binary file removed __pycache__/yoona_target.cpython-311.pyc
Binary file not shown.
9 changes: 6 additions & 3 deletions app.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
import os
from createTarget import extract_and_identify_faces_from_video
from yoona_target import yoona_test
import mosaic
from yoona_target import arcface_recognition, group_and_save_faces
# import mosaic
import mosaic_jiyeon
# import deep2


from flask import (Flask, request, send_file, jsonify)
Expand All @@ -25,7 +26,8 @@ def process_video():
@app.route('/target2', methods=['POST'])
def yoona():
video_path='./cutVideo.mp4'
identified_faces = yoona_test(video_path)
identified_faces = arcface_recognition(video_path)
base64_faces = group_and_save_faces(identified_faces)
# face_base64_arrays = save_faces(identified_faces) # 이미지를 Base64 인코딩된 문자열로 반환
return jsonify({"images": identified_faces}) # JSON 객체로 변환

Expand All @@ -50,6 +52,7 @@ def handle_video():

# output_video_path = mosaic.mosaic(video_file.filename, image_paths)
output_video_path = mosaic_jiyeon.mosaic(video_file.filename, image_paths)
# output_video_path = deep2.mosaic(video_file.filename, image_paths)
print(output_video_path)
return send_file(output_video_path, mimetype='video/mp4', as_attachment=True, download_name='output_video.mp4')

Expand Down
Binary file removed cutVideo.mp4
Binary file not shown.
2 changes: 1 addition & 1 deletion deep2.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,5 +99,5 @@ def mosaic(video_path, image_paths):
if __name__ == "__main__":
import sys
video_path = sys.argv[1]
image_paths = ["save/train/Gongyoo/1.jpeg","save/train/Gongyoo/2.jpeg","save/train/Gongyoo/3.jpeg","save/train/Gongyoo/4.jpeg","save/train/Gongyoo/5.jpeg","save/train/Gongyoo/6.jpeg"]
image_paths = ["save/train/bbo/bbo.png","save/train/bbo/bbo2.png","save/train/bbo/bbo3.png","save/train/bbo/bbo4.png"]
mosaic(video_path, image_paths)
107 changes: 107 additions & 0 deletions mosaic.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,107 @@
import cv2
import os
from deepface import DeepFace
import torch


def mosaic(video_path, image_paths):
model_name = "Facenet"

output_video_path = os.path.join('tmp', 'output2.mp4')
cap = cv2.VideoCapture(video_path)
fps = int(cap.get(cv2.CAP_PROP_FPS))
frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
out = cv2.VideoWriter(output_video_path, cv2.VideoWriter.fourcc(*'mp4v'), fps, (frame_width, frame_height))
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) # 동영상의 전체 프레임 수를 얻음

print(f"Total number of frames in the video: {total_frames}")

faces_dir = os.path.join('tmp', 'faces')
if not os.path.exists(faces_dir):
os.makedirs(faces_dir)

face_count = 0
current_frame_count = 0

model = torch.hub.load('./yolov5', 'custom', path='best.pt', force_reload=True, source='local')

embedding_list = []
for image_path in image_paths:
embedding_result = DeepFace.create_verification_result(
img1_path=image_path,
detector_backend='retinaface',
model_name=model_name,
enforce_detection=False
)
embedding_list.append(embedding_result["embeddings"][0])

while cap.isOpened():
ret, frame = cap.read()
if not ret:
break

# detections = RetinaFace.detect_faces(img_path=frame)
detections = model(frame)

print(f"{current_frame_count}감지 시작")

if len(detections.xyxy[0]) > 2:
threshold = 0.27
not_threshold = 0.2851
else:
threshold = 0.3
not_threshold = 0.47

for face_id in detections.xyxy[0]:
x1, y1, x2, y2 = face_id[:4].int().tolist()
if y2 - y1 > 50 and x2 - x1 > 50:
face_image = frame[y1:y2, x1:x2]
for ref_face in embedding_list:
result = DeepFace.verify(img1_path=face_image, img2_path=ref_face, model_name=model_name,
detector_backend='retinaface', enforce_detection=False)
distance = result['distance']

if not_threshold >= distance >= threshold:
face_filename = f"face_{face_count}.jpg"
verified_str = 'Different'
distance_str = '(%.4f >= %.4f)' % (distance, threshold)
print(face_filename,verified_str, distance_str)
face = cv2.resize(face_image, (10, 10))
face = cv2.resize(face, (x2 - x1, y2 - y1), interpolation=cv2.INTER_AREA)
frame[y1:y2, x1:x2] = face
face_filepath = os.path.join(faces_dir, face_filename)
cv2.imwrite(face_filepath, face_image)
break

if distance < threshold:
face_filename = f"face_{face_count}.jpg"
verified_str = 'Same'
distance_str = '(%.4f >= %.4f)' % (distance, threshold)
print(face_filename, verified_str, distance_str)
face_filepath = os.path.join(faces_dir, face_filename)
cv2.imwrite(face_filepath, face_image)
break

if distance > not_threshold:
face = cv2.resize(face_image, (10, 10))
face = cv2.resize(face, (x2 - x1, y2 - y1), interpolation=cv2.INTER_AREA)
frame[y1:y2, x1:x2] = face
break

face_count += 1
current_frame_count += 1
out.write(frame)

cap.release()
out.release()
cv2.destroyAllWindows()

return output_video_path


if __name__ == "__main__":
import sys
video_path = sys.argv[1]
image_paths = ["save/train/Gongyoo/1.jpeg","save/train/Gongyoo/2.jpeg","save/train/Gongyoo/3.jpeg","save/train/Gongyoo/4.jpeg","save/train/Gongyoo/5.jpeg","save/train/Gongyoo/6.jpeg"]
mosaic(video_path, image_paths)
6 changes: 3 additions & 3 deletions mosaic_jiyeon.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ def mosaic(video_path, image_paths):
# YOLOv5를 사용하여 객체 감지
results = model(frame)

threshold = 0.45
threshold = 0.6

# 감지된 얼굴에 모자이크 처리
for result in results.xyxy[0]:
Expand Down Expand Up @@ -112,7 +112,7 @@ def mosaic(video_path, image_paths):
# 작업 완료 후 파일 닫기
cap.release()
out.release()
cv2.destroyAllWindows()
# cv2.destroyAllWindows()

end_time = time.time() # 종료 시간 기록
elapsed_time = end_time - start_time # 소요된 시간 계산
Expand All @@ -123,5 +123,5 @@ def mosaic(video_path, image_paths):
if __name__ == "__main__":
import sys
video_path = sys.argv[1]
image_paths = ["save/train/Gongyoo/1.jpeg","save/train/Gongyoo/2.jpeg","save/train/Gongyoo/3.jpeg","save/train/Gongyoo/4.jpeg","save/train/Gongyoo/5.jpeg","save/train/Gongyoo/6.jpeg"]
image_paths = ["save/train/yoo/yoo1.png","save/train/yoo/yoo2.png","save/train/yoo/yoo3.png"]
mosaic(video_path, image_paths)
Binary file removed pisik.mp4
Binary file not shown.
Binary file added save/train/bbo/bbo.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added save/train/bbo/bbo2.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added save/train/bbo/bbo3.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added save/train/bbo/bbo4.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added save/train/image1.jpeg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added save/train/image2.jpeg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added save/train/image3.jpeg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added save/train/image4.jpeg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added save/train/image5.jpeg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added save/train/image6.jpeg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file modified tmp/cutVideo.mp4
Binary file not shown.
95 changes: 95 additions & 0 deletions yoona_target.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,97 @@

import cv2
import numpy as np
import os
import base64
from io import BytesIO
from insightface.app import FaceAnalysis
from PIL import Image

def arcface_recognition(video_path):
app = FaceAnalysis(providers=['CPUExecutionProvider']) # CPU 사용
app.prepare(ctx_id=0, det_size=(640, 640))

video_capture = cv2.VideoCapture(video_path)
identified_faces = []

while video_capture.isOpened():
success, frame = video_capture.read()
if not success:
break

faces = app.get(frame)
for face in faces:
# 얼굴 이미지 추출
box = face.bbox.astype(int)
face_image = frame[box[1]:box[3], box[0]:box[2]]
if face_image.size == 0: # 얼굴 이미지가 빈 경우 건너뛰기
continue
embedding = face.normed_embedding
identified_faces.append((face_image, embedding))

video_capture.release()
return identified_faces

def group_and_save_faces(identified_faces, save_dir='saved_faces', threshold=0.6):
if not os.path.exists(save_dir):
os.makedirs(save_dir)

face_groups = []
for face_image, encoding in identified_faces:
if not face_groups:
face_groups.append([(face_image, encoding)])
else:
matched = False
for face_group in face_groups:
group_encodings = [enc for _, enc in face_group]
avg_encoding = np.mean(group_encodings, axis=0)

# cosine similarity
sim = np.dot(avg_encoding, encoding) / (np.linalg.norm(avg_encoding) * np.linalg.norm(encoding))

if sim > threshold:
face_group.append((face_image, encoding))
matched = True
break

if not matched:
face_groups.append([(face_image, encoding)])
# 얼굴순서 - 내림차순
face_groups.sort(key=lambda x: len(x), reverse=True)

return save_faces(face_groups)

def save_faces(face_groups):
face_base64_arrays = []

for face_group in face_groups:
encoded_faces = []
count = 0 # 각 그룹별로 이미지 개수를 세는 카운터
for face_image, _ in face_group:
# OpenCV는 BGR 형식으로 이미지를 읽기 때문에 RGB로 변환
face_image = cv2.cvtColor(face_image, cv2.COLOR_BGR2RGB)
# 이미지를 PIL 이미지 객체로 변환
pil_img = Image.fromarray(face_image)
# 메모리 내에서 이미지를 저장하기 위한 버퍼 생성
buf = BytesIO()
# 이미지를 JPEG 포맷으로 저장
pil_img.save(buf, format="JPEG")
# 버퍼의 바이트 데이터를 Base64 인코딩 문자열로 변환
base64_string = base64.b64encode(buf.getvalue()).decode('utf-8')
# 해당 인물의 인코딩된 이미지를 추가
encoded_faces.append(base64_string)
count += 1
if count == 3: # 각 인물 그룹에서 최대 3개의 이미지만 저장
break
# 모든 인물의 인코딩된 이미지를 배열에 추가
face_base64_arrays.append(encoded_faces)

return face_base64_arrays



# 원본
"""
import base64
from io import BytesIO
import cv2
Expand Down Expand Up @@ -85,3 +179,4 @@ def save_faces(identified_faces):
return face_base64_arrays
"""

0 comments on commit d4dcd18

Please sign in to comment.