-
Notifications
You must be signed in to change notification settings - Fork 97
/
head_pose_estimation.hpp
110 lines (80 loc) · 2.9 KB
/
head_pose_estimation.hpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
#ifndef __HEAD_POSE_ESTIMATION
#define __HEAD_POSE_ESTIMATION
#include <opencv2/core/core.hpp>
#include <dlib/opencv.h>
#include <dlib/image_processing/frontal_face_detector.h>
#include <vector>
#include <array>
#include <string>
// Anthropometric for male adult
// Relative position of various facial feature relative to sellion
// Values taken from https://en.wikipedia.org/wiki/Human_head
// X points forward
const static cv::Point3f P3D_SELLION(0., 0.,0.);
const static cv::Point3f P3D_RIGHT_EYE(-20., -65.5,-5.);
const static cv::Point3f P3D_LEFT_EYE(-20., 65.5,-5.);
const static cv::Point3f P3D_RIGHT_EAR(-100., -77.5,-6.);
const static cv::Point3f P3D_LEFT_EAR(-100., 77.5,-6.);
const static cv::Point3f P3D_NOSE(21.0, 0., -48.0);
const static cv::Point3f P3D_STOMMION(10.0, 0., -75.0);
const static cv::Point3f P3D_MENTON(0., 0.,-133.0);
static const int MAX_FEATURES_TO_TRACK=100;
// Interesting facial features with their landmark index
enum FACIAL_FEATURE {
NOSE=30,
RIGHT_EYE=36,
LEFT_EYE=45,
RIGHT_SIDE=0,
LEFT_SIDE=16,
EYEBROW_RIGHT=21,
EYEBROW_LEFT=22,
MOUTH_UP=51,
MOUTH_DOWN=57,
MOUTH_RIGHT=48,
MOUTH_LEFT=54,
SELLION=27,
MOUTH_CENTER_TOP=62,
MOUTH_CENTER_BOTTOM=66,
MENTON=8
};
typedef cv::Matx44d head_pose;
class HeadPoseEstimation {
public:
HeadPoseEstimation(const std::string& face_detection_model = "shape_predictor_68_face_landmarks.dat", float focalLength=455.);
void update(cv::Mat image);
bool smileDetector(size_t face_idx);
float novelty(std::vector<bool> lookAt,
std::vector<dlib::rectangle> faces,
float mu, float eps, float threshold);
head_pose pose(size_t face_idx);
std::vector<head_pose> poses();
float quantityOfMovement(cv::Mat rgbFrames,
cv::Mat grayFrames,
cv::Mat prevGrayFrame,
cv::Mat opticalFlow,
std::vector<cv::Point2f> &points1,
std::vector<cv::Point2f> &points2,
bool needToInit);
float focalLength;
float opticalCenterX;
float opticalCenterY;
#ifdef HEAD_POSE_ESTIMATION_DEBUG
cv::Mat _debug;
#endif
private:
dlib::cv_image<dlib::bgr_pixel> current_image;
dlib::frontal_face_detector detector;
dlib::shape_predictor pose_model;
std::vector<dlib::rectangle> faces;
std::vector<dlib::full_object_detection> shapes;
/** Return the point corresponding to the dictionary marker.
*/
cv::Point2f coordsOf(size_t face_idx, FACIAL_FEATURE feature);
/** Returns true if the lines intersect (and set r to the intersection
* coordinates), false otherwise.
*/
bool intersection(cv::Point2f o1, cv::Point2f p1,
cv::Point2f o2, cv::Point2f p2,
cv::Point2f &r);
};
#endif // __HEAD_POSE_ESTIMATION