Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Bringing cropping_h36m feature up to speed. #52

Merged
merged 1 commit into from
Apr 12, 2022
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
121 changes: 79 additions & 42 deletions datasets/h36m/export_to_yarp.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@
"""
Copyright (C) 2021 Event-driven Perception for Robotics
Author: Franco Di Pietro and Gaurvi Goyal

LICENSE GOES HERE
"""
# Reading the GT and videos form h36 and generating Yarp compatible formats
Expand All @@ -14,37 +13,46 @@
import os

from datasets.utils.constants import HPECoreSkeleton
from datasets.utils.export import skeleton_to_yarp_row
from datasets.utils.export import skeleton_to_yarp_row, format_crop_file
from datasets.utils.export import crop_pose, crop_frame
from utils import parsing
from os.path import join, isfile
from tqdm import tqdm

############### ########
# Configuration values #
########################
WRITE_POSE = False
WRITE_FRAMES = False
CROP_DATA = True

# paths and parameters
dataset_path = '/home/ggoyal/data/h36m/extracted'
data_output_path = '/home/ggoyal/data/h36m/yarp/'
output_width = 346
output_height = 260
# subs = ['S1', 'S5', 'S6', 'S7', 'S8', 'S9', 'S11']
subs = ['S1']
crop_file = '/media/Data/data/h36m/cropping_data.txt' # file left right top bottom
output_width = 640 # 346
output_height = 480 # 260
subs = ['S1', 'S5', 'S6', 'S7', 'S8', 'S9', 'S11']
# subs = ['S1']
all_cameras = {1: '54138969', 2: '55011271', 3: '58860488', 4: '60457274'}
cam = 2 # And maybe later camera 4.
cams = [2, 4] # The two front facing cameras
errorlog = '/home/ggoyal/data/h36m/errorlog.txt'
OUTPUT_FRAMES = False




def write_video_and_pose(video_path, gt_path, directory_frames, directory_skl, write_frames=True, write_pose=True,
overwrite=False):
overwrite=False, crop=None):
# Convert the video and annotations to yarp formats.
counter = 0
frame_lines = []
pose_lines = []
vid = cv2.VideoCapture(video_path)
cdf_file = cdflib.CDF(gt_path)
data = (cdf_file.varget("Pose")).squeeze()
dim = (output_width, output_height)

if not overwrite:
if isfile(join(dir_pose, 'data.log')):
if isfile(join(directory_skl, 'data.log')):
write_pose = False
if write_frames:
if not os.path.exists(directory_frames):
Expand All @@ -58,6 +66,7 @@ def write_video_and_pose(video_path, gt_path, directory_frames, directory_skl, w
return 1
while vid.isOpened():
frame_exists, frame = vid.read()

if frame_exists:
timestamp = vid.get(cv2.CAP_PROP_POS_MSEC) / 1000 # convert timestamp to seconds
else:
Expand All @@ -73,17 +82,22 @@ def write_video_and_pose(video_path, gt_path, directory_frames, directory_skl, w

filename = 'frame_' + str(counter).zfill(10) + '.png'
filename_full = os.path.join(directory_frames, filename)

frame = crop_frame(frame, crop)
dim_input = frame.shape

if write_frames:
frame_resized = cv2.resize(src=frame, dsize=dim, interpolation=cv2.INTER_AREA)
frame_resized = cv2.resize(src=frame, dsize=(output_width, output_height), interpolation=cv2.INTER_AREA)
cv2.imwrite(filename_full, frame_resized) # create the images

# convert h3.6m joints order to hpecore one
skeleton = skeleton.reshape(-1, 2)
skeleton = parsing.h36m_to_hpecore_skeleton(skeleton)

# rescale skeleton
skeleton[:, 0] = skeleton[:, 0] * output_width / 1000
skeleton[:, 1] = skeleton[:, 1] * output_height / 1000
skeleton = crop_pose(skeleton, crop)
skeleton[:, 0] = skeleton[:, 0] * output_width / dim_input[1]
skeleton[:, 1] = skeleton[:, 1] * output_height / dim_input[0]
skeleton = np.rint(skeleton).astype(int)

torso_size = HPECoreSkeleton.compute_torso_sizes(skeleton)
Expand All @@ -103,47 +117,70 @@ def write_video_and_pose(video_path, gt_path, directory_frames, directory_skl, w
parsing.writer(directory_frames, frame_lines, frame_linesInfo)
if write_pose:
parsing.writer(directory_skl, pose_lines, pose_linesInfo)
# print(pose_lines)
# print(pose_linesInfo)

return 0


all_files = []
if __name__ == '__main__':

for sub in subs:
files = os.listdir(join(dataset_path, sub, 'Videos'))
for file in files:
if all_cameras[cam] in file:
all_files.append("%s^%s" % (sub, file))
# List all the relevant files
all_files = []
for sub in subs:
files = os.listdir(join(dataset_path, sub, 'Videos'))
for file in files:
for cam in cams:
if '_ALL' in file:
continue
if all_cameras[cam] in file:
all_files.append("%s^%s^%s" % (cam, sub, file))

for i in tqdm(range(len(all_files))):
sub, file = all_files[i].split('^')
video_file = (join(dataset_path, sub, 'Videos', file))
pose_file = (join(dataset_path, sub, "Poses_D2_Positions", file.replace('mp4', 'cdf')))
output_folder = ("%s_%s" % (sub, file.split('.')[0].replace(' ', '_')))
dir_frames = join(data_output_path, output_folder, f'ch{cam}frames')
dir_pose = join(data_output_path, output_folder, f'ch{cam}GT50Hzskeleton')
# if isfile(join(dir_pose,'data.log')):
# continue
# print((isfile(video_file),isfile(pose_file),dir_frames,dir_pose))
if not isfile(pose_file):
continue
exitcode = write_video_and_pose(video_file, pose_file, dir_frames, dir_pose, write_frames=OUTPUT_FRAMES,
write_pose=True,overwrite=True)
# Read the cropping data
crop_values = None
if CROP_DATA:
try:
f = open(crop_file, "r")
crop_lines = f.readlines()
drop_dict = format_crop_file(crop_lines)
except FileNotFoundError:
print("Cropping file does not exist.")
exit()

# Process and export the relevant data to YARP
for i in tqdm(range(len(all_files))):
cam, sub, file = all_files[i].split('^')
video_file = (join(dataset_path, sub, 'Videos', file))
pose_file = (join(dataset_path, sub, "Poses_D2_Positions", file.replace('mp4', 'cdf')))
output_folder = ("cam%s_%s_%s" % (cam, sub, file.split('.')[0].replace(' ', '_')))
dir_frames = join(data_output_path, output_folder, f'ch{cam}frames')
dir_pose = join(data_output_path, output_folder, f'ch{cam}GT50Hzskeleton')

if CROP_DATA:
try:
crop_values = drop_dict[output_folder]
except KeyError:
print("Cropping values not present in file for %s. Proceeding without cropping" % output_folder)
crop_values = None

if exitcode:
with open(errorlog, 'a') as f:
f.write("%s" % all_files[i])
if isfile(join(dir_pose, 'data.log')):
continue
print((isfile(video_file), isfile(pose_file), dir_frames, dir_pose))
if not isfile(pose_file):
continue
exitcode = write_video_and_pose(video_file, pose_file, dir_frames, dir_pose, write_frames=WRITE_FRAMES,
write_pose=WRITE_POSE, overwrite=False, crop=crop_values)

if exitcode:
with open(errorlog, 'a') as f:
f.write("%s" % all_files[i])

# sub, file = all_files[10].split('^')
# video_file = (join(dataset_path, sub, 'Videos', file))
# pose_file = (join(dataset_path, sub, "Poses_D2_Positions", file.replace('mp4', 'cdf')))
# output_folder = ("%s_%s" % (sub, file.split('.')[0].replace(' ', '_')))
# dir_frames = join(data_output_path, output_folder, 'ch0frames')
# dir_pose = join(data_output_path, output_folder, 'ch0GT50Hzskeleton')
# exitcode = write_video_and_pose(video_file, pose_file, dir_frames, dir_pose, write_frames=OUTPUT_FRAMES,
# write_pose=True,overwrite=True)
# exitcode = write_video_and_pose(video_file, pose_file, dir_frames, dir_pose, write_frames=WRITE_FRAMES,
# write_pose=WRITE_POSE, overwrite=True, crop=CROP_DATA)

# limbs = {0: 'PelvisC', 1: 'PelvisR', 2: 'KneeR', 3: 'AnkleR', 4: 'ToeR', 5: 'ToeROther', 6: 'PelvisL', 7: 'KneeL',
# 8: 'AnkleR', 9: 'ToeR', 10: 'ToeROther', 11: 'Spine', 12: 'SpineM', 13: 'Neck', 14: 'Head', 15: 'HeadOther',
Expand All @@ -155,4 +192,4 @@ def write_video_and_pose(video_path, gt_path, directory_frames, directory_skl, w
# 19 = 20
# 22 = 23
# 26 = 27
# 30 = 31
# 30 = 31