diff --git a/configs/hmr/resnet50_hmr_pw3d.py b/configs/hmr/resnet50_hmr_pw3d.py index 1fc2ff83..8c3d0167 100644 --- a/configs/hmr/resnet50_hmr_pw3d.py +++ b/configs/hmr/resnet50_hmr_pw3d.py @@ -170,14 +170,51 @@ ann_file='cmu_mosh.npz')), test=dict( type=dataset_type, + body_model=dict( - type='GenderedSMPL', + type='SMPL', keypoint_src='h36m', keypoint_dst='h36m', model_path='data/body_models/smpl', joints_regressor='data/body_models/J_regressor_h36m.npy'), - dataset_name='pw3d', + dataset_name='humman', + convention='coco_wholebody', data_prefix='data', pipeline=test_pipeline, - ann_file='pw3d_test.npz'), + ann_file='humman_test_kinect_ds10_smpl.npz'), + + # body_model=dict( + # type='SMPL', + # keypoint_src='h36m', + # keypoint_dst='h36m', + # model_path='data/body_models/smpl', + # joints_regressor='data/body_models/J_regressor_h36m.npy'), + # dataset_name='humman', + # convention='coco_wholebody', + # data_prefix='data', + # pipeline=test_pipeline, + # ann_file='humman_test_iphone_ds10_smpl.npz'), + + # body_model=dict( + # type='GenderedSMPL', + # keypoint_src='h36m', + # keypoint_dst='h36m', + # model_path='data/body_models/smpl', + # joints_regressor='data/body_models/J_regressor_h36m.npy'), + # dataset_name='pw3d', + # data_prefix='data', + # pipeline=test_pipeline, + # ann_file='pw3d_test.npz'), + + # body_model=dict( + # type='SMPL', + # keypoint_src='h36m', + # keypoint_dst='h36m', + # model_path='data/body_models/smpl', + # joints_regressor='data/body_models/J_regressor_h36m.npy'), + # dataset_name='h36m', + # convention='h36m', # convert keypoints to h36m + # data_prefix='data', + # pipeline=test_pipeline, + # ann_file='h36m_valid_protocol2.npz'), ) diff --git a/mmhuman3d/data/data_converters/humman.py b/mmhuman3d/data/data_converters/humman.py index 4249fa5d..ccb42ca9 100644 --- a/mmhuman3d/data/data_converters/humman.py +++ b/mmhuman3d/data/data_converters/humman.py @@ -52,6 +52,7 @@ def __init__(self, *args, **kwargs): self.device = torch.device( 'cuda') if torch.cuda.is_available() else torch.device('cpu') + # Body model used for keypoint computation self.smpl = build_body_model( dict( type='SMPL', @@ -61,6 +62,18 @@ def __init__(self, *args, **kwargs): extra_joints_regressor='data/body_models/J_regressor_extra.npy' )).to(self.device) + # Body model used for pelvis computation in SMCReader + self.smpl_smc = build_body_model( + dict( + type='SMPL', + gender='neutral', + num_betas=10, + keypoint_src='smpl_45', + keypoint_dst='smpl_45', + model_path='data/body_models/smpl', + batch_size=1, + )).to(self.device) + def _derive_keypoints(self, global_orient, body_pose, betas, transl, focal_length, image_size, camera_center): """Get SMPL-derived keypoints.""" @@ -241,6 +254,12 @@ def convert_by_mode(self, dataset_path: str, out_path: str, ann_paths = sorted(glob.glob(os.path.join(dataset_path, '*.smc'))) + # temp action + if mode != 'test': return + view = 10 + # body_part = 'lower_limb' + # with open(os.path.join(dataset_path, f'{body_part}.txt'), 'r') as f: + # split = set(f.read().splitlines()) with open(os.path.join(dataset_path, f'{mode}.txt'), 'r') as f: split = set(f.read().splitlines()) @@ -250,7 +269,7 @@ def convert_by_mode(self, dataset_path: str, out_path: str, continue try: - smc_reader = SMCReader(ann_path) + smc_reader = SMCReader(ann_path, body_model=self.smpl_smc) except OSError: print(f'Unable to load {ann_path}.') continue @@ -268,6 +287,9 @@ def convert_by_mode(self, dataset_path: str, out_path: str, [('iPhone', i) for i in range(num_iphone)] assert len(device_list) == num_kinect + num_iphone + # temp + device_list = [('Kinect', view)] + for device, device_id in device_list: assert device in { 'Kinect', 'iPhone' @@ -399,17 +421,20 @@ def convert_by_mode(self, dataset_path: str, out_path: str, kinect_keypoints3d_smpl_, kinect_keypoints2d_humman_, kinect_keypoints3d_humman_) - file_name = f'humman_{mode}_kinect_ds{self.downsample_ratio}_smpl.npz' + # temp + # file_name = f'humman_{mode}_kinect_ds{self.downsample_ratio}_smpl.npz' + file_name = f'humman_{mode}_kinect_ds10_view{view}.npz' out_file = os.path.join(out_path, file_name) kinect_human_data.dump(out_file) - # make iphone human data - iphone_human_data = self._make_human_data( - iphone_smpl, iphone_image_path_, iphone_image_id_, - iphone_bbox_xywh_, iphone_keypoints2d_smpl_, - iphone_keypoints3d_smpl_, iphone_keypoints2d_humman_, - iphone_keypoints3d_humman_) - - file_name = f'humman_{mode}_iphone_ds{self.downsample_ratio}_smpl.npz' - out_file = os.path.join(out_path, file_name) - iphone_human_data.dump(out_file) + # temp + # # make iphone human data + # iphone_human_data = self._make_human_data( + # iphone_smpl, iphone_image_path_, iphone_image_id_, + # iphone_bbox_xywh_, iphone_keypoints2d_smpl_, + # iphone_keypoints3d_smpl_, iphone_keypoints2d_humman_, + # iphone_keypoints3d_humman_) + # + # file_name = f'humman_{mode}_iphone_ds{self.downsample_ratio}_smpl.npz' + # out_file = os.path.join(out_path, file_name) + # iphone_human_data.dump(out_file) diff --git a/mmhuman3d/data/data_structures/smc_reader.py b/mmhuman3d/data/data_structures/smc_reader.py index 25a503bd..6c470514 100644 --- a/mmhuman3d/data/data_structures/smc_reader.py +++ b/mmhuman3d/data/data_structures/smc_reader.py @@ -3,19 +3,25 @@ import cv2 import h5py import numpy as np +import torch import tqdm -from mmhuman3d.utils.transforms import aa_to_rotmat, rotmat_to_aa +from mmhuman3d.models.body_models.utils import batch_transform_to_camera_frame +from mmhuman3d.models.builder import build_body_model class SMCReader: - def __init__(self, file_path): + def __init__(self, file_path, body_model=None): """Read SenseMocapFile endswith ".smc". Args: file_path (str): Path to an SMC file. + body_model (nn.Module or dict): + Only needed for SMPL transformation to device frame + if nn.Module: a body_model instance + if dict: a body_model config """ self.smc = h5py.File(file_path, 'r') self.__calibration_dict__ = None @@ -47,6 +53,26 @@ def __init__(self, file_path): self.smpl_num_frames = self.smc['SMPL'].attrs['num_frame'] self.smpl_created_time = self.smc['SMPL'].attrs['created_time'] + # initialize body model + if isinstance(body_model, torch.nn.Module): + self.body_model = body_model + elif isinstance(body_model, dict): + self.body_model = build_body_model(body_model) + else: + # in most cases, SMCReader is instantiated for image reading + # only. Hence, it is wasteful to initialize a body model until + # really needed in get_smpl() + self.body_model = None + self.default_body_model_config = dict( + type='SMPL', + gender='neutral', + num_betas=10, + keypoint_src='smpl_45', + keypoint_dst='smpl_45', + model_path='data/body_models/smpl', + batch_size=1, + ) + def get_kinect_color_extrinsics(self, kinect_id, homogeneous=True): """Get extrinsics(cam2world) of a kinect RGB camera by kinect id. @@ -837,12 +863,6 @@ def get_keypoints3d(self, if device_id is not None: assert device_id >= 0 - kps3d_dict = self.smc['Keypoints3D'] - - # keypoints3d are in world coordinate system - keypoints3d_world = kps3d_dict['keypoints3d'][...] - keypoints3d_mask = kps3d_dict['keypoints3d_mask'][...] - if frame_id is None: frame_list = range(self.get_keypoints_num_frames()) elif isinstance(frame_id, list): @@ -854,7 +874,12 @@ def get_keypoints3d(self, else: raise TypeError('frame_id should be int, list or None.') + kps3d_dict = self.smc['Keypoints3D'] + + # keypoints3d are in world coordinate system + keypoints3d_world = kps3d_dict['keypoints3d'][...] keypoints3d_world = keypoints3d_world[frame_list, ...] + keypoints3d_mask = kps3d_dict['keypoints3d_mask'][...] # return keypoints3d in world coordinate system if device is None: @@ -923,12 +948,21 @@ def get_smpl(self, body_pose = smpl_dict['body_pose'][...] transl = smpl_dict['transl'][...] betas = smpl_dict['betas'][...] - if frame_id is not None: - if isinstance(frame_id, int): - frame_id = [frame_id] - body_pose = body_pose[frame_id, ...] - global_orient = global_orient[frame_id, ...] - transl = transl[frame_id, ...] + + if frame_id is None: + frame_list = range(self.get_smpl_num_frames()) + elif isinstance(frame_id, list): + frame_list = frame_id + elif isinstance(frame_id, int): + assert frame_id < self.get_keypoints_num_frames(),\ + 'Index out of range...' + frame_list = [frame_id] + else: + raise TypeError('frame_id should be int, list or None.') + + body_pose = body_pose[frame_list, ...] + global_orient = global_orient[frame_list, ...] + transl = transl[frame_list, ...] # return SMPL parameters in world coordinate system if device is None: @@ -942,36 +976,44 @@ def get_smpl(self, # return SMPL parameters in device coordinate system else: + + if self.body_model is None: + self.body_model = \ + build_body_model(self.default_body_model_config) + torch_device = self.body_model.global_orient.device + + assert device in { + 'Kinect', 'iPhone' + }, f'Undefined device: {device}, should be "Kinect" or "iPhone"' + assert device_id >= 0 + if device == 'Kinect': - cam2world = self.get_kinect_color_extrinsics( + T_cam2world = self.get_kinect_color_extrinsics( kinect_id=device_id, homogeneous=True) else: - cam2world = self.get_iphone_extrinsics( + T_cam2world = self.get_iphone_extrinsics( iphone_id=device_id, vertical=vertical) - num_frames = global_orient.shape[0] - - T_smpl2world = np.repeat( - np.eye(4).reshape(1, 4, 4), num_frames, axis=0) - assert T_smpl2world.shape == (num_frames, 4, 4) - - T_smpl2world[:, :3, :3] = aa_to_rotmat(global_orient) - T_smpl2world[:, :3, 3] = transl + T_world2cam = np.linalg.inv(T_cam2world) - T_world2cam = np.linalg.inv(cam2world) - T_world2cam = np.repeat( - T_world2cam.reshape(1, 4, 4), num_frames, axis=0) - assert T_world2cam.shape == (num_frames, 4, 4) + output = self.body_model( + global_orient=torch.tensor(global_orient, device=torch_device), + body_pose=torch.tensor(body_pose, device=torch_device), + transl=torch.tensor(transl, device=torch_device), + betas=torch.tensor(betas, device=torch_device)) + joints = output['joints'].detach().cpu().numpy() + pelvis = joints[:, 0, :] - T_smpl2cam = T_world2cam @ T_smpl2world - - global_orient = rotmat_to_aa(T_smpl2cam[:, :3, :3]) - transl = T_smpl2world[:, :3, 3] + new_global_orient, new_transl = batch_transform_to_camera_frame( + global_orient=global_orient, + transl=transl, + pelvis=pelvis, + extrinsic=T_world2cam) smpl_dict = dict( - global_orient=global_orient, + global_orient=new_global_orient, body_pose=body_pose, - transl=transl, + transl=new_transl, betas=betas) return smpl_dict diff --git a/mmhuman3d/data/datasets/pipelines/loading.py b/mmhuman3d/data/datasets/pipelines/loading.py index ef15f3f5..30129649 100644 --- a/mmhuman3d/data/datasets/pipelines/loading.py +++ b/mmhuman3d/data/datasets/pipelines/loading.py @@ -4,7 +4,7 @@ import mmcv import numpy as np -from mmhuman3d.data.data_structures import SMCReader +import mmhuman3d.data.data_structures as data_structures from ..builder import PIPELINES @@ -50,7 +50,7 @@ def __call__(self, results): assert 'image_id' in results, 'Load image from .smc, ' \ 'but image_id is not provided.' device, device_id, frame_id = results['image_id'] - smc_reader = SMCReader(filename) + smc_reader = data_structures.SMCReader(filename) img = smc_reader.get_color( device, device_id, frame_id, disable_tqdm=True) img = img.squeeze() # (1, H, W, 3) -> (H, W, 3) diff --git a/mmhuman3d/models/body_models/__init__.py b/mmhuman3d/models/body_models/__init__.py index 38d276a4..fc7881e9 100644 --- a/mmhuman3d/models/body_models/__init__.py +++ b/mmhuman3d/models/body_models/__init__.py @@ -2,5 +2,9 @@ from .smpl import SMPL, GenderedSMPL, HybrIKSMPL from .smplx import SMPLX +from .utils import batch_transform_to_camera_frame, transform_to_camera_frame -__all__ = ['SMPL', 'GenderedSMPL', 'HybrIKSMPL', 'SMPLX'] +__all__ = [ + 'SMPL', 'GenderedSMPL', 'HybrIKSMPL', 'SMPLX', 'transform_to_camera_frame', + 'batch_transform_to_camera_frame' +] diff --git a/mmhuman3d/models/body_models/utils.py b/mmhuman3d/models/body_models/utils.py new file mode 100644 index 00000000..5e8de2df --- /dev/null +++ b/mmhuman3d/models/body_models/utils.py @@ -0,0 +1,118 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +import numpy as np + +from mmhuman3d.utils.transforms import aa_to_rotmat, rotmat_to_aa + + +def transform_to_camera_frame(global_orient, transl, pelvis, extrinsic): + """Transform body model parameters to camera frame. + + Args: + global_orient (np.ndarray): shape (3, ). Only global_orient and + transl needs to be updated in the rigid transformation + transl (np.ndarray): shape (3, ). + pelvis (np.ndarray): shape (3, ). 3D joint location of pelvis + This is necessary to eliminate the offset from SMPL + canonical space origin to pelvis, because the global orient + is conducted around the pelvis, not the canonical space origin + extrinsic (np.ndarray): shape (4, 4). Transformation matrix + from world frame to camera frame + Returns: + (new_gloabl_orient, new_transl) + new_gloabl_orient: transformed global orient + new_transl: transformed transl + """ + + # take out the small offset from smpl origin to pelvis + transl_offset = pelvis - transl + T_p2w = np.eye(4) + T_p2w[:3, 3] = transl_offset + + # camera extrinsic: transformation from world frame to camera frame + T_w2c = extrinsic + + # smpl transformation: from vertex frame to world frame + T_v2p = np.eye(4) + global_orient_mat = aa_to_rotmat(global_orient) + T_v2p[:3, :3] = global_orient_mat + T_v2p[:3, 3] = transl + + # compute combined transformation from vertex to world + T_v2w = T_p2w @ T_v2p + + # compute transformation from vertex to camera + T_v2c = T_w2c @ T_v2w + + # decompose vertex to camera transformation + # np: new pelvis frame + # T_v2c = T_np2c x T_v2np + T_np2c = T_p2w + T_v2np = np.linalg.inv(T_np2c) @ T_v2c + + # decompose into new global orient and new transl + new_global_orient_mat = T_v2np[:3, :3] + new_gloabl_orient = rotmat_to_aa(new_global_orient_mat) + new_transl = T_v2np[:3, 3] + + return new_gloabl_orient, new_transl + + +def batch_transform_to_camera_frame(global_orient, transl, pelvis, extrinsic): + """Transform body model parameters to camera frame by batch. + + Args: + global_orient (np.ndarray): shape (N, 3). Only global_orient and + transl needs to be updated in the rigid transformation + transl (np.ndarray): shape (N, 3). + pelvis (np.ndarray): shape (N, 3). 3D joint location of pelvis + This is necessary to eliminate the offset from SMPL + canonical space origin to pelvis, because the global orient + is conducted around the pelvis, not the canonical space origin + extrinsic (np.ndarray): shape (4, 4). Transformation matrix + from world frame to camera frame + Returns: + (new_gloabl_orient, new_transl) + new_gloabl_orient: transformed global orient + new_transl: transformed transl + """ + N = len(global_orient) + assert global_orient.shape == (N, 3) + assert transl.shape == (N, 3) + assert pelvis.shape == (N, 3) + + # take out the small offset from smpl origin to pelvis + transl_offset = pelvis - transl + T_p2w = np.eye(4).reshape(1, 4, 4).repeat(N, axis=0) + T_p2w[:, :3, 3] = transl_offset + + # camera extrinsic: transformation from world frame to camera frame + T_w2c = extrinsic + + # smpl transformation: from vertex frame to world frame + T_v2p = np.eye(4).reshape(1, 4, 4).repeat(N, axis=0) + global_orient_mat = aa_to_rotmat(global_orient) + T_v2p[:, :3, :3] = global_orient_mat + T_v2p[:, :3, 3] = transl + + # compute combined transformation from vertex to world + T_v2w = T_p2w @ T_v2p + + # compute transformation from vertex to camera + T_v2c = T_w2c @ T_v2w + + # decompose vertex to camera transformation + # np: new pelvis frame + # T_v2c = T_np2c x T_v2np + T_np2c = T_p2w + T_v2np = np.linalg.inv(T_np2c) @ T_v2c + + # decompose into new global orient and new transl + new_global_orient_mat = T_v2np[:, :3, :3] + new_gloabl_orient = rotmat_to_aa(new_global_orient_mat) + new_transl = T_v2np[:, :3, 3] + + assert new_gloabl_orient.shape == (N, 3) + assert new_transl.shape == (N, 3) + + return new_gloabl_orient, new_transl diff --git a/tests/test_models/test_body_models/test_smpl.py b/tests/test_models/test_body_models/test_smpl.py new file mode 100644 index 00000000..df93c869 --- /dev/null +++ b/tests/test_models/test_body_models/test_smpl.py @@ -0,0 +1,99 @@ +import torch + +from mmhuman3d.models.builder import build_body_model + +body_model_load_dir = 'data/body_models/smpl' +extra_joints_regressor = 'data/J_regressor_extra.npy' + + +def test_smpl(): + + random_betas = torch.rand((1, 69)) + + # test SMPL + smpl_54 = build_body_model( + dict( + type='SMPL', + keypoint_src='smpl_54', + keypoint_dst='smpl_54', + model_path=body_model_load_dir, + extra_joints_regressor=extra_joints_regressor)) + + smpl_54_output = smpl_54(betas=random_betas) + smpl_54_joints = smpl_54_output['joints'] + + smpl_49 = build_body_model( + dict( + type='SMPL', + keypoint_src='smpl_54', + keypoint_dst='smpl_49', + model_path=body_model_load_dir, + extra_joints_regressor=extra_joints_regressor)) + + smpl_49_output = smpl_49(betas=random_betas) + smpl_49_joints = smpl_49_output['joints'] + + joint_mapping = [ + 24, 12, 17, 19, 21, 16, 18, 20, 0, 2, 5, 8, 1, 4, 7, 25, 26, 27, 28, + 29, 30, 31, 32, 33, 34, 8, 5, 45, 46, 4, 7, 21, 19, 17, 16, 18, 20, 47, + 48, 49, 50, 51, 52, 53, 24, 26, 25, 28, 27 + ] + + assert torch.isclose(smpl_54_joints[:, joint_mapping, :], + smpl_49_joints).all() + + +def test_gendered_smpl(): + random_betas_neutral = torch.rand((1, 10)) + random_betas_male = torch.rand((1, 10)) + random_betas_female = torch.rand((1, 10)) + gender = torch.Tensor([-1, 0, 1]) + + smpl_neutral = build_body_model( + dict( + type='SMPL', + gender='neutral', + keypoint_src='smpl_45', + keypoint_dst='smpl_45', + model_path=body_model_load_dir, + )) + + smpl_male = build_body_model( + dict( + type='SMPL', + gender='male', + keypoint_src='smpl_45', + keypoint_dst='smpl_45', + model_path=body_model_load_dir, + )) + + smpl_female = build_body_model( + dict( + type='SMPL', + gender='female', + keypoint_src='smpl_45', + keypoint_dst='smpl_45', + model_path=body_model_load_dir, + )) + + gendered_smpl = build_body_model( + dict( + type='GenderedSMPL', + keypoint_src='smpl_45', + keypoint_dst='smpl_45', + model_path=body_model_load_dir)) + + smpl_neutral_output = smpl_neutral(betas=random_betas_neutral) + smpl_male_output = smpl_male(betas=random_betas_male) + smpl_female_output = smpl_female(betas=random_betas_female) + + betas_concat = torch.cat( + [random_betas_neutral, random_betas_male, random_betas_female]) + joint_concat = torch.cat([ + smpl_neutral_output['joints'], smpl_male_output['joints'], + smpl_female_output['joints'] + ]) + + gendered_smpl_output = gendered_smpl(betas=betas_concat, gender=gender) + + assert torch.isclose(joint_concat, gendered_smpl_output['joints']).all() diff --git a/tests/test_models/test_body_models/test_utils.py b/tests/test_models/test_body_models/test_utils.py new file mode 100644 index 00000000..7570b291 --- /dev/null +++ b/tests/test_models/test_body_models/test_utils.py @@ -0,0 +1,132 @@ +import torch + +from mmhuman3d.models.body_models import ( + batch_transform_to_camera_frame, + transform_to_camera_frame, +) +from mmhuman3d.models.builder import build_body_model +from mmhuman3d.utils.transforms import ee_to_rotmat + + +def test_transform_to_camera_frame(): + + # initialize body model + body_model = build_body_model( + dict( + type='SMPL', + keypoint_src='smpl_45', + keypoint_dst='smpl_45', + model_path='data/body_models/smpl', + )) + + # generate random values + random_transl = torch.rand((1, 3)) + random_rotation = torch.rand((1, 3)) + random_rotmat = ee_to_rotmat(random_rotation) + + random_extrinsic = torch.eye(4) + random_extrinsic[:3, :3] = random_rotmat + random_extrinsic[:3, 3] = random_transl + + random_global_orient = torch.rand((1, 3)) + random_body_pose = torch.rand((1, 69)) + random_transl = torch.rand((1, 3)) + random_betas = torch.rand((1, 10)) + + random_output = body_model( + global_orient=random_global_orient, + body_pose=random_body_pose, + transl=random_transl, + betas=random_betas) + + random_joints = random_output['joints'] + random_pelvis = random_joints[:, 0, :] + + # transform params + transformed_global_orient, transformed_transl = \ + transform_to_camera_frame( + global_orient=random_global_orient.numpy().squeeze(), # (3, ) + transl=random_transl.numpy().squeeze(), # (3, ) + pelvis=random_pelvis.numpy().squeeze(), # (3, ) + extrinsic=random_extrinsic.numpy().squeeze() # (4, 4) + ) + + transformed_output = body_model( + global_orient=torch.tensor(transformed_global_orient.reshape(1, 3)), + transl=torch.tensor(transformed_transl.reshape(1, 3)), + body_pose=random_body_pose, + betas=random_betas) + + transformed_joints = transformed_output['joints'] + + # check validity + random_joints = random_joints.squeeze() # (45, 3) + random_joints = torch.cat([random_joints, torch.ones(45, 1)], + dim=1) # (45, 4) + test_joints = torch.einsum('ij,kj->ki', random_extrinsic, + random_joints) # (45, 4) + test_joints = test_joints[:, :3] # (45, 3) + assert torch.allclose(transformed_joints, test_joints) + + +def test_batch_transform_to_camera_frame(): + # batch size + N = 2 + + # initialize body model + body_model = build_body_model( + dict( + type='SMPL', + keypoint_src='smpl_45', + keypoint_dst='smpl_45', + model_path='data/body_models/smpl', + )) + + # generate random values + random_transl = torch.rand((1, 3)) + random_rotation = torch.rand((1, 3)) + random_rotmat = ee_to_rotmat(random_rotation) + + random_extrinsic = torch.eye(4) + random_extrinsic[:3, :3] = random_rotmat + random_extrinsic[:3, 3] = random_transl + + random_global_orient = torch.rand((N, 3)) + random_body_pose = torch.rand((N, 69)) + random_transl = torch.rand((N, 3)) + random_betas = torch.rand((N, 10)) + + random_output = body_model( + global_orient=random_global_orient, + body_pose=random_body_pose, + transl=random_transl, + betas=random_betas) + + random_joints = random_output['joints'] + random_pelvis = random_joints[:, 0, :] + + # transform params + transformed_global_orient, transformed_transl = \ + batch_transform_to_camera_frame( + global_orient=random_global_orient.numpy(), # (N, 3) + transl=random_transl.numpy(), # (N, 3) + pelvis=random_pelvis.numpy(), # (N, 3) + extrinsic=random_extrinsic.numpy() # (4, 4) + ) + + transformed_output = body_model( + global_orient=torch.tensor(transformed_global_orient.reshape(N, 3)), + transl=torch.tensor(transformed_transl.reshape(N, 3)), + body_pose=random_body_pose, + betas=random_betas) + + transformed_joints = transformed_output['joints'] + + # check validity + random_joints = random_joints # (N, 45, 3) + random_joints = torch.cat( + [random_joints, torch.ones(N, 45, 1)], dim=2) # (N, 45, 4) + test_joints = torch.einsum('ij,bkj->bki', random_extrinsic, + random_joints) # (N, 45, 4) + test_joints = test_joints[:, :, :3] # (N, 45, 3) + assert torch.allclose(transformed_joints, test_joints) diff --git a/tests/test_smc_reader.py b/tests/test_smc_reader.py index 765e33c9..adbf48cf 100644 --- a/tests/test_smc_reader.py +++ b/tests/test_smc_reader.py @@ -345,7 +345,7 @@ def test_get_keypoints3d_by_device(): with pytest.raises(KeyError): _ = smc.get_keypoints3d(device='Kinect', device_id=10) with pytest.raises(TypeError): - _ = smc.get_color(device='Kinect', device_id=0, frame_id=0.0) + _ = smc.get_keypoints3d(device='Kinect', device_id=0, frame_id=0.0) # get by frame_id with pytest.raises(AssertionError): @@ -385,10 +385,9 @@ def test_get_all_smpl(): body_pose = smpl['body_pose'] transl = smpl['transl'] betas = smpl['betas'] - assert body_pose.shape[0] == smpl_num_frames - assert global_orient.shape == (1, 3) - assert body_pose.shape == (1, 69) - assert transl.shape == (1, 3) + assert global_orient.shape == (smpl_num_frames, 3) + assert body_pose.shape == (smpl_num_frames, 69) + assert transl.shape == (smpl_num_frames, 3) assert betas.shape == (1, 10) assert isinstance(smpl_created_time, str) assert isinstance(global_orient, np.ndarray) @@ -415,6 +414,45 @@ def test_get_smpl_by_frame(): assert isinstance(betas, np.ndarray) +def test_get_smpl_by_device(): + smc = SMCReader(TEST_SMC_PATH) + + with pytest.raises(AssertionError): + _ = smc.get_smpl(device='kinect', device_id=0) + with pytest.raises(AssertionError): + _ = smc.get_smpl(device='Kinect', device_id=-1) + with pytest.raises(KeyError): + _ = smc.get_smpl(device='Kinect', device_id=10) + with pytest.raises(TypeError): + _ = smc.get_smpl(device='Kinect', device_id=0, frame_id=0.0) + + with pytest.raises(AssertionError): + _ = smc.get_smpl(device='iphone', device_id=0) + with pytest.raises(AssertionError): + _ = smc.get_smpl(device='iPhone', device_id=-1) + with pytest.raises(KeyError): + _ = smc.get_smpl(device='iPhone', device_id=10) + with pytest.raises(TypeError): + _ = smc.get_smpl(device='iPhone', device_id=0, frame_id=0.0) + + smpl = smc.get_smpl(device='Kinect', device_id=0) + smpl_num_frames = smc.get_smpl_num_frames() + smpl_created_time = smc.get_smpl_created_time() + global_orient = smpl['global_orient'] + body_pose = smpl['body_pose'] + transl = smpl['transl'] + betas = smpl['betas'] + assert global_orient.shape == (smpl_num_frames, 3) + assert body_pose.shape == (smpl_num_frames, 69) + assert transl.shape == (smpl_num_frames, 3) + assert betas.shape == (1, 10) + assert isinstance(smpl_created_time, str) + assert isinstance(global_orient, np.ndarray) + assert isinstance(body_pose, np.ndarray) + assert isinstance(transl, np.ndarray) + assert isinstance(betas, np.ndarray) + + def test_iphone_rotation(): smc = SMCReader(TEST_SMC_PATH)