-
Notifications
You must be signed in to change notification settings - Fork 15
/
process_arkit_data.py
118 lines (98 loc) · 4.46 KB
/
process_arkit_data.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
# This file is derived from [NeuralRecon](https://github.com/zju3dv/NeuralRecon).
# Originating Author: Yiming Xie, Jiaming Sun
# Original header:
# Copyright SenseTime. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pickle
from tqdm import tqdm
import sys
sys.path.append('.')
from tools.kp_reproject import *
from tools.sync_poses import *
# params
project_path = './data/demo/2022-05-09T17-21-06'
def process_data(data_path, data_source='ARKit', window_size=9, min_angle=15, min_distance=0.1, ori_size=(1920, 1440), size=(640, 480)):
# save image
print('Extract images from video...')
video_path = os.path.join(data_path, 'Frames.m4v')
image_path = os.path.join(data_path, 'images')
if not os.path.exists(image_path):
os.mkdir(image_path)
extract_frames(video_path, out_folder=image_path, size=size)
# load intrin and extrin
print('Load intrinsics and extrinsics')
sync_intrinsics_and_poses(os.path.join(data_path, 'Frames.txt'), os.path.join(data_path, 'ARposes.txt'),
os.path.join(data_path, 'SyncedPoses.txt'))
path_dict = path_parser(data_path, data_source=data_source)
cam_intrinsic_dict = load_camera_intrinsic(
path_dict['cam_intrinsic'], data_source=data_source)
for k, v in tqdm(cam_intrinsic_dict.items(), desc='Processing camera intrinsics...'):
cam_intrinsic_dict[k]['K'][0, :] /= (ori_size[0] / size[0])
cam_intrinsic_dict[k]['K'][1, :] /= (ori_size[1] / size[1])
cam_pose_dict = load_camera_pose(
path_dict['camera_pose'], data_source=data_source)
# save_intrinsics_extrinsics
if not os.path.exists(os.path.join(data_path, 'poses')):
os.mkdir(os.path.join(data_path, 'poses'))
for k, v in tqdm(cam_pose_dict.items(), desc='Saving camera extrinsics...'):
np.savetxt(os.path.join(data_path, 'poses', '{}.txt'.format(k)), v, delimiter=' ')
if not os.path.exists(os.path.join(data_path, 'intrinsics')):
os.mkdir(os.path.join(data_path, 'intrinsics'))
for k, v in tqdm(cam_intrinsic_dict.items(), desc='Saving camera intrinsics...'):
np.savetxt(os.path.join(data_path, 'intrinsics', '{}.txt'.format(k)), v['K'], delimiter=' ')
# generate fragment
fragments = []
all_ids = []
ids = []
count = 0
last_pose = None
for id in tqdm(cam_intrinsic_dict.keys(), desc='Keyframes selection...'):
cam_intrinsic = cam_intrinsic_dict[id]
cam_pose = cam_pose_dict[id]
if count == 0:
ids.append(id)
last_pose = cam_pose
count += 1
else:
angle = np.arccos(
((np.linalg.inv(cam_pose[:3, :3]) @ last_pose[:3, :3] @ np.array([0, 0, 1]).T) * np.array(
[0, 0, 1])).sum())
dis = np.linalg.norm(cam_pose[:3, 3] - last_pose[:3, 3])
if angle > (min_angle / 180) * np.pi or dis > min_distance:
ids.append(id)
last_pose = cam_pose
# Compute camera view frustum and extend convex hull
count += 1
if count == window_size:
all_ids.append(ids)
ids = []
count = 0
# save fragments
for i, ids in enumerate(tqdm(all_ids, desc='Saving fragments file...')):
poses = []
intrinsics = []
for id in ids:
# Moving down the X-Y plane in the ARKit coordinate to meet the training settings in ScanNet.
cam_pose_dict[id][2, 3] += 1.5
poses.append(cam_pose_dict[id])
intrinsics.append(cam_intrinsic_dict[id]['K'])
fragments.append({
'scene': data_path.split('/')[-1],
'fragment_id': i,
'image_ids': ids,
'extrinsics': poses,
'intrinsics': intrinsics
})
with open(os.path.join(data_path, 'fragments.pkl'), 'wb') as f:
pickle.dump(fragments, f)
if __name__ == '__main__':
process_data(project_path)