-
Notifications
You must be signed in to change notification settings - Fork 3
/
blender_camera_util.py
198 lines (172 loc) · 7.04 KB
/
blender_camera_util.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
import bpy
import bpy_extras
from mathutils import Matrix
import numpy as np
#---------------------------------------------------------------
# 3x4 P matrix from Blender camera
#---------------------------------------------------------------
# Build intrinsic camera parameters from Blender camera data
#
# See notes on this in
# blender.stackexchange.com/questions/15102/what-is-blenders-camera-projection-matrix-model
def get_calibration_matrix_K_from_blender(camd):
f_in_mm = camd.lens
scene = bpy.context.scene
resolution_x_in_px = scene.render.resolution_x
resolution_y_in_px = scene.render.resolution_y
scale = scene.render.resolution_percentage / 100
sensor_width_in_mm = camd.sensor_width
sensor_height_in_mm = camd.sensor_height
pixel_aspect_ratio = scene.render.pixel_aspect_x / scene.render.pixel_aspect_y
if (camd.sensor_fit == 'VERTICAL'):
# the sensor height is fixed (sensor fit is horizontal),
# the sensor width is effectively changed with the pixel aspect ratio
s_u = resolution_x_in_px * scale / sensor_width_in_mm / pixel_aspect_ratio
s_v = resolution_y_in_px * scale / sensor_height_in_mm
else: # 'HORIZONTAL' and 'AUTO'
# the sensor width is fixed (sensor fit is horizontal),
# the sensor height is effectively changed with the pixel aspect ratio
pixel_aspect_ratio = scene.render.pixel_aspect_x / scene.render.pixel_aspect_y
s_u = resolution_x_in_px * scale / sensor_width_in_mm
s_v = resolution_y_in_px * scale * pixel_aspect_ratio / sensor_height_in_mm
# Parameters of intrinsic calibration matrix K
alpha_u = f_in_mm * s_u
alpha_v = f_in_mm * s_v
u_0 = resolution_x_in_px * scale / 2
v_0 = resolution_y_in_px * scale / 2
skew = 0 # only use rectangular pixels
K = Matrix(
((alpha_u, skew, u_0),
( 0 , alpha_v, v_0),
( 0 , 0, 1 )))
return K
# Returns camera rotation and translation matrices from Blender.
#
# There are 3 coordinate systems involved:
# 1. The World coordinates: "world"
# - right-handed
# 2. The Blender camera coordinates: "bcam"
# - x is horizontal
# - y is up
# - right-handed: negative z look-at direction
# 3. The desired computer vision camera coordinates: "cv"
# - x is horizontal
# - y is down (to align to the actual pixel coordinates
# used in digital images)
# - right-handed: positive z look-at direction
def get_3x4_RT_matrix_from_blender(cam):
'''NOTE: this function returns world to cv matrix, NOT camera pose matrix!'''
# bcam stands for blender camera
R_bcam2cv = Matrix(
((1, 0, 0),
(0, -1, 0),
(0, 0, -1)))
# Transpose since the rotation is object rotation,
# and we want coordinate rotation
# R_world2bcam = cam.rotation_euler.to_matrix().transposed()
# T_world2bcam = -1*R_world2bcam * location
#
# Use matrix_world instead to account for all constraints
location, rotation = cam.matrix_world.decompose()[0:2]
R_world2bcam = rotation.to_matrix().transposed()
# Convert camera location to translation vector used in coordinate changes
# T_world2bcam = -1*R_world2bcam*cam.location
# Use location from matrix_world to account for constraints:
T_world2bcam = -1*R_world2bcam * location
# Build the coordinate transform matrix from world to computer vision camera
R_world2cv = R_bcam2cv*R_world2bcam
T_world2cv = R_bcam2cv*T_world2bcam
# put into 3x4 matrix
RT = Matrix((
R_world2cv[0][:] + (T_world2cv[0],),
R_world2cv[1][:] + (T_world2cv[1],),
R_world2cv[2][:] + (T_world2cv[2],)
))
return RT
def get_3x4_P_matrix_from_blender(cam):
K = get_calibration_matrix_K_from_blender(cam.data)
RT = get_3x4_RT_matrix_from_blender(cam)
return K*RT, K, RT
def get_bcam2world_RT_matrix_from_blender(cam):
location, rotation = cam.matrix_world.decompose()[0:2]
R_world2bcam = rotation.to_matrix().transposed()
T_world2bcam = -1*R_world2bcam * location
RT = np.zeros((4,4))
RT[3, 3] = 1
RT[:3, :3] = np.array(R_world2bcam)
RT[:3, 3] = np.array(T_world2bcam)
RT = np.linalg.inv(RT)
return RT
def get_world2bcam_R_matrix_from_blender(cam):
location, rotation = cam.matrix_world.decompose()[0:2]
R_world2bcam = rotation.to_matrix().transposed()
R = np.zeros((4,4))
R[3, 3] = 1
R[:3, :3] = np.array(R_world2bcam)
return R
def get_world2bcam_RT_matrix_from_blender(cam):
location, rotation = cam.matrix_world.decompose()[0:2]
R_world2bcam = rotation.to_matrix().transposed()
T_world2bcam = -1*R_world2bcam * location
RT = np.zeros((4,4))
RT[3, 3] = 1
RT[:3, :3] = np.array(R_world2bcam)
RT[:3, 3] = np.array(T_world2bcam)
return RT
# ----------------------------------------------------------
# Alternate 3D coordinates to 2D pixel coordinate projection code
# adapted from https://blender.stackexchange.com/questions/882/how-to-find-image-coordinates-of-the-rendered-vertex?lq=1
# to have the y axes pointing up and origin at the top-left corner
def project_by_object_utils(cam, point):
scene = bpy.context.scene
co_2d = bpy_extras.object_utils.world_to_camera_view(scene, cam, point)
render_scale = scene.render.resolution_percentage / 100
render_size = (
int(scene.render.resolution_x * render_scale),
int(scene.render.resolution_y * render_scale),
)
return Vector((co_2d.x * render_size[0], render_size[1] - co_2d.y * render_size[1]))
# ----------------------------------------------------------
if __name__ == "__main__":
# Insert your camera name here
cam = bpy.data.objects['Camera.001']
P, K, RT = get_3x4_P_matrix_from_blender(cam)
print("K")
print(K)
print("RT")
print(RT)
print("P")
print(P)
print("==== Tests ====")
e1 = Vector((1, 0, 0, 1))
e2 = Vector((0, 1, 0, 1))
e3 = Vector((0, 0, 1, 1))
O = Vector((0, 0, 0, 1))
p1 = P * e1
p1 /= p1[2]
print("Projected e1")
print(p1)
print("proj by object_utils")
print(project_by_object_utils(cam, Vector(e1[0:3])))
p2 = P * e2
p2 /= p2[2]
print("Projected e2")
print(p2)
print("proj by object_utils")
print(project_by_object_utils(cam, Vector(e2[0:3])))
p3 = P * e3
p3 /= p3[2]
print("Projected e3")
print(p3)
print("proj by object_utils")
print(project_by_object_utils(cam, Vector(e3[0:3])))
pO = P * O
pO /= pO[2]
print("Projected world origin")
print(pO)
print("proj by object_utils")
print(project_by_object_utils(cam, Vector(O[0:3])))
# Bonus code: save the 3x4 P matrix into a plain text file
# Don't forget to import numpy for this
nP = numpy.matrix(P)
numpy.savetxt("/tmp/P3x4.txt", nP) # to select precision, use e.g. fmt='%.2f'