-
Notifications
You must be signed in to change notification settings - Fork 4.8k
/
read_bag_example.py
80 lines (65 loc) · 2.61 KB
/
read_bag_example.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
#####################################################
## Read bag from file ##
#####################################################
# First import library
import pyrealsense2 as rs
# Import Numpy for easy array manipulation
import numpy as np
# Import OpenCV for easy image rendering
import cv2
# Import argparse for command-line options
import argparse
# Import os.path for file path manipulation
import os.path
# Create object for parsing command-line options
parser = argparse.ArgumentParser(description="Read recorded bag file and display depth stream in jet colormap.\
Remember to change the stream fps and format to match the recorded.")
# Add argument which takes path to a bag file as an input
parser.add_argument("-i", "--input", type=str, help="Path to the bag file")
# Parse the command line arguments to an object
args = parser.parse_args()
# Safety if no parameter have been given
if not args.input:
print("No input paramater have been given.")
print("For help type --help")
exit()
# Check if the given file have bag extension
if os.path.splitext(args.input)[1] != ".bag":
print("The given file is not of correct file format.")
print("Only .bag files are accepted")
exit()
try:
# Create pipeline
pipeline = rs.pipeline()
# Create a config object
config = rs.config()
# Tell config that we will use a recorded device from file to be used by the pipeline through playback.
rs.config.enable_device_from_file(config, args.input)
# Configure the pipeline to stream the depth stream
# Change this parameters according to the recorded bag file resolution
config.enable_stream(rs.stream.depth, rs.format.z16, 30)
# Start streaming from file
pipeline.start(config)
# Create opencv window to render image in
cv2.namedWindow("Depth Stream", cv2.WINDOW_AUTOSIZE)
# Create colorizer object
colorizer = rs.colorizer()
# Streaming loop
while True:
# Get frameset of depth
frames = pipeline.wait_for_frames()
# Get depth frame
depth_frame = frames.get_depth_frame()
# Colorize depth frame to jet colormap
depth_color_frame = colorizer.colorize(depth_frame)
# Convert depth_frame to numpy array to render image in opencv
depth_color_image = np.asanyarray(depth_color_frame.get_data())
# Render image in opencv window
cv2.imshow("Depth Stream", depth_color_image)
key = cv2.waitKey(1)
# if pressed escape exit program
if key == 27:
cv2.destroyAllWindows()
break
finally:
pass