diff --git a/PythonClient/airsim/pfm.py b/PythonClient/airsim/pfm.py index d3acbd8912..6f9f963a8a 100644 --- a/PythonClient/airsim/pfm.py +++ b/PythonClient/airsim/pfm.py @@ -49,7 +49,6 @@ def read_pfm(file): data = np.reshape(data, shape) # DEY: I don't know why this was there. - #data = np.flipud(data) file.close() return data, scale @@ -64,8 +63,6 @@ def write_pfm(file, image, scale=1): if image.dtype.name != 'float32': raise Exception('Image dtype must be float32.') - image = np.flipud(image) - if len(image.shape) == 3 and image.shape[2] == 3: # color image color = True elif len(image.shape) == 2 or len(image.shape) == 3 and image.shape[2] == 1: # greyscale diff --git a/PythonClient/airsim/utils.py b/PythonClient/airsim/utils.py index 8e8533e640..cd56292417 100644 --- a/PythonClient/airsim/utils.py +++ b/PythonClient/airsim/utils.py @@ -157,7 +157,6 @@ def read_pfm(file): data = np.reshape(data, shape) # DEY: I don't know why this was there. - #data = np.flipud(data) file.close() return data, scale @@ -172,11 +171,9 @@ def write_pfm(file, image, scale=1): if image.dtype.name != 'float32': raise Exception('Image dtype must be float32.') - image = np.flipud(image) - if len(image.shape) == 3 and image.shape[2] == 3: # color image color = True - elif len(image.shape) == 2 or len(image.shape) == 3 and image.shape[2] == 1: # greyscale + elif len(image.shape) == 2 or len(image.shape) == 3 and image.shape[2] == 1: # grayscale color = False else: raise Exception('Image must have H x W x 3, H x W x 1 or H x W dimensions.') @@ -206,9 +203,9 @@ def write_png(filename, image): height = image.shape[0] # reverse the vertical line order and add null bytes at the start - width_byte_4 = width * 4 - raw_data = b''.join(b'\x00' + buf[span:span + width_byte_4] - for span in range((height - 1) * width_byte_4, -1, - width_byte_4)) + width_byte_3 = width * 3 + raw_data = b''.join(b'\x00' + buf[span:span + width_byte_3] + for span in range((height - 1) * width_byte_3, -1, - width_byte_3)) def png_pack(png_tag, data): chunk_head = png_tag + data diff --git a/PythonClient/car/drive_straight.py b/PythonClient/car/drive_straight.py index 9c583f96c4..6783ef8038 100644 --- a/PythonClient/car/drive_straight.py +++ b/PythonClient/car/drive_straight.py @@ -28,9 +28,8 @@ def get_image(): image = client.simGetImages([airsim.ImageRequest("0", airsim.ImageType.Scene, False, False)])[0] image1d = np.fromstring(image.image_data_uint8, dtype=np.uint8) - image_rgba = image1d.reshape(image.height, image.width, 4) - image_rgba = np.flipud(image_rgba) - return image_rgba[:, :, 0:3] + image_rgb = image1d.reshape(image.height, image.width, 3) + return image_rgb while (True): car_state = client.getCarState() @@ -42,7 +41,6 @@ def get_image(): else: car_controls.throttle = 0.0 - #image_buf[0] = get_image() #state_buf[0] = np.array([car_controls.steering, car_controls.throttle, car_controls.brake, car_state.speed]) #model_output = model.predict([image_buf, state_buf]) #car_controls.steering = float(model_output[0][0]) @@ -50,5 +48,4 @@ def get_image(): print('Sending steering = {0}, throttle = {1}'.format(car_controls.steering, car_controls.throttle)) - client.setCarControls(car_controls) - + client.setCarControls(car_controls) \ No newline at end of file diff --git a/PythonClient/car/hello_car.py b/PythonClient/car/hello_car.py index 9e11c37939..a1402b1f5d 100644 --- a/PythonClient/car/hello_car.py +++ b/PythonClient/car/hello_car.py @@ -1,9 +1,9 @@ -import setup_path import airsim - -import time -import os +import cv2 import numpy as np +import os +import setup_path +import time # connect to the AirSim simulator client = airsim.CarClient() @@ -53,7 +53,7 @@ airsim.ImageRequest("0", airsim.ImageType.DepthVis), #depth visualization image airsim.ImageRequest("1", airsim.ImageType.DepthPerspective, True), #depth in perspective projection airsim.ImageRequest("1", airsim.ImageType.Scene), #scene vision image in png format - airsim.ImageRequest("1", airsim.ImageType.Scene, False, False)]) #scene vision image in uncompressed RGBA array + airsim.ImageRequest("1", airsim.ImageType.Scene, False, False)]) #scene vision image in uncompressed RGB array print('Retrieved images: %d', len(responses)) for response in responses: @@ -68,12 +68,9 @@ airsim.write_file(os.path.normpath(filename + '.png'), response.image_data_uint8) else: #uncompressed array print("Type %d, size %d" % (response.image_type, len(response.image_data_uint8))) - img1d = np.fromstring(response.image_data_uint8, dtype=np.uint8) #get numpy array - img_rgba = img1d.reshape(response.height, response.width, 4) #reshape array to 4 channel image array H X W X 4 - img_rgba = np.flipud(img_rgba) #original image is flipped vertically - img_rgba[:,:,1:2] = 100 #just for fun add little bit of green in all pixels - airsim.write_png(os.path.normpath(filename + '.greener.png'), img_rgba) #write to png - + img1d = np.fromstring(response.image_data_uint8, dtype=np.uint8) # get numpy array + img_rgb = img1d.reshape(response.height, response.width, 3) # reshape array to 3 channel image array H X W X 3 + cv2.imwrite(os.path.normpath(filename + '.png'), img_rgb) # write to png #restore to original state client.reset() diff --git a/PythonClient/car/legacy_hello_car.py b/PythonClient/car/legacy_hello_car.py index da3f13ab21..1c37bd1b5f 100644 --- a/PythonClient/car/legacy_hello_car.py +++ b/PythonClient/car/legacy_hello_car.py @@ -40,7 +40,7 @@ ImageRequest(0, airsim.AirSimImageType.DepthVis), #depth visualiztion image ImageRequest(1, airsim.AirSimImageType.DepthPerspective, True), #depth in perspective projection ImageRequest(1, airsim.AirSimImageType.Scene), #scene vision image in png format - ImageRequest(1, airsim.AirSimImageType.Scene, False, False)]) #scene vision image in uncompressed RGBA array + ImageRequest(1, airsim.AirSimImageType.Scene, False, False)]) #scene vision image in uncompressed RGB array print('Retrieved images: %d' % len(responses)) tmp_dir = os.path.join(tempfile.gettempdir(), "airsim_drone") @@ -64,10 +64,8 @@ else: #uncompressed array print("Type %d, size %d" % (response.image_type, len(response.image_data_uint8))) img1d = np.fromstring(response.image_data_uint8, dtype=np.uint8) #get numpy array - img_rgba = img1d.reshape(response.height, response.width, 4) #reshape array to 4 channel image array H X W X 4 - img_rgba = np.flipud(img_rgba) #original image is fliped vertically - img_rgba[:,:,1:2] = 100 #just for fun add little bit of green in all pixels - AirSimClientBase.write_png(os.path.normpath(filename + '.greener.png'), img_rgba) #write to png + img_rgb = img1d.reshape(response.height, response.width, 3) #reshape array to 3 channel image array H X W X 3 + AirSimClientBase.write_png(os.path.normpath(filename + '.png'), img_rgb) #write to png AirSimClientBase.wait_key('Press any key to reset to original state') diff --git a/PythonClient/car/multi_agent_car.py b/PythonClient/car/multi_agent_car.py index aa355c0425..d696d32dc1 100644 --- a/PythonClient/car/multi_agent_car.py +++ b/PythonClient/car/multi_agent_car.py @@ -1,9 +1,9 @@ -import setup_path import airsim - -import time -import os +import cv2 import numpy as np +import os +import setup_path +import time # Use below in settings.json with blocks environment """ @@ -91,11 +91,11 @@ # get camera images from the car responses1 = client.simGetImages([ airsim.ImageRequest("0", airsim.ImageType.DepthVis), #depth visualization image - airsim.ImageRequest("1", airsim.ImageType.Scene, False, False)], "Car1") #scene vision image in uncompressed RGBA array + airsim.ImageRequest("1", airsim.ImageType.Scene, False, False)], "Car1") #scene vision image in uncompressed RGB array print('Car1: Retrieved images: %d' % (len(responses1))) responses2 = client.simGetImages([ airsim.ImageRequest("0", airsim.ImageType.Segmentation), #depth visualization image - airsim.ImageRequest("1", airsim.ImageType.Scene, False, False)], "Car2") #scene vision image in uncompressed RGBA array + airsim.ImageRequest("1", airsim.ImageType.Scene, False, False)], "Car2") #scene vision image in uncompressed RGB array print('Car2: Retrieved images: %d' % (len(responses2))) for response in responses1 + responses2: @@ -109,12 +109,9 @@ airsim.write_file(os.path.normpath(filename + '.png'), response.image_data_uint8) else: #uncompressed array print("Type %d, size %d" % (response.image_type, len(response.image_data_uint8))) - img1d = np.fromstring(response.image_data_uint8, dtype=np.uint8) #get numpy array - img_rgba = img1d.reshape(response.height, response.width, 4) #reshape array to 4 channel image array H X W X 4 - img_rgba = np.flipud(img_rgba) #original image is flipped vertically - img_rgba[:,:,1:2] = 100 #just for fun add little bit of green in all pixels - airsim.write_png(os.path.normpath(filename + '.greener.png'), img_rgba) #write to png - + img1d = np.fromstring(response.image_data_uint8, dtype=np.uint8) # get numpy array + img_rgb = img1d.reshape(response.height, response.width, 3) # reshape array to 3 channel image array H X W X 3 + cv2.imwrite(os.path.normpath(filename + '.png'), img_rgb) # write to png #restore to original state client.reset() diff --git a/PythonClient/computer_vision/segmentation.py b/PythonClient/computer_vision/segmentation.py index de2eee1d99..520dfa0270 100644 --- a/PythonClient/computer_vision/segmentation.py +++ b/PythonClient/computer_vision/segmentation.py @@ -1,10 +1,10 @@ # In settings.json first activate computer vision mode: # https://github.com/Microsoft/AirSim/blob/master/docs/image_apis.md#computer-vision-mode -import setup_path import airsim - +import cv2 import numpy as np +import setup_path client = airsim.VehicleClient() client.confirmConnection() @@ -54,20 +54,10 @@ else: #uncompressed array - numpy demo print("Type %d, size %d" % (response.image_type, len(response.image_data_uint8))) img1d = np.fromstring(response.image_data_uint8, dtype=np.uint8) #get numpy array - img_rgba = img1d.reshape(response.height, response.width, 4) #reshape array to 4 channel image array H X W X 4 - img_rgba = np.flipud(img_rgba) #original image is flipped vertically - #airsim.write_png(os.path.normpath(filename + '.numpy.png'), img_rgba) #write to png + img_rgb = img1d.reshape(response.height, response.width, 3) #reshape array to 3 channel image array H X W X 3 + # cv2.imwrite(os.path.normpath(filename + '.png'), img_rgb) # write to png #find unique colors - print(np.unique(img_rgba[:,:,0], return_counts=True)) #red - print(np.unique(img_rgba[:,:,1], return_counts=True)) #green - print(np.unique(img_rgba[:,:,2], return_counts=True)) #blue - print(np.unique(img_rgba[:,:,3], return_counts=True)) #blue - - - - - - - - + print(np.unique(img_rgb[:,:,0], return_counts=True)) #red + print(np.unique(img_rgb[:,:,1], return_counts=True)) #green + print(np.unique(img_rgb[:,:,2], return_counts=True)) #blue \ No newline at end of file diff --git a/PythonClient/imitation_learning/drive_model.py b/PythonClient/imitation_learning/drive_model.py index 41fb4ac722..25a67f780f 100644 --- a/PythonClient/imitation_learning/drive_model.py +++ b/PythonClient/imitation_learning/drive_model.py @@ -39,8 +39,8 @@ def get_image(): """ image_response = client.simGetImages([airsim.ImageRequest("0", airsim.ImageType.Scene, False, False)])[0] image1d = np.fromstring(image_response.image_data_uint8, dtype=np.uint8) - image_rgba = image1d.reshape(image_response.height, image_response.width, 4) - return image_rgba[78:144,27:227,0:3].astype(float) + image_rgb = image1d.reshape(image_response.height, image_response.width, 3) + return image_rgb[78:144,27:227,0:2].astype(float) while True: # Update throttle value according to steering angle diff --git a/PythonClient/multirotor/hello_drone.py b/PythonClient/multirotor/hello_drone.py index 4ea656ba41..dec37a25d2 100644 --- a/PythonClient/multirotor/hello_drone.py +++ b/PythonClient/multirotor/hello_drone.py @@ -5,6 +5,7 @@ import os import tempfile import pprint +import cv2 # connect to the AirSim simulator client = airsim.MultirotorClient() @@ -59,11 +60,9 @@ airsim.write_file(os.path.normpath(filename + '.png'), response.image_data_uint8) else: #uncompressed array print("Type %d, size %d" % (response.image_type, len(response.image_data_uint8))) - img1d = np.fromstring(response.image_data_uint8, dtype=np.uint8) #get numpy array - img_rgba = img1d.reshape(response.height, response.width, 4) #reshape array to 4 channel image array H X W X 4 - img_rgba = np.flipud(img_rgba) #original image is flipped vertically - img_rgba[:,:,1:2] = 100 #just for fun add little bit of green in all pixels - airsim.write_png(os.path.normpath(filename + '.greener.png'), img_rgba) #write to png + img1d = np.fromstring(response.image_data_uint8, dtype=np.uint8) # get numpy array + img_rgb = img1d.reshape(response.height, response.width, 3) # reshape array to 4 channel image array H X W X 3 + cv2.imwrite(os.path.normpath(filename + '.png'), img_rgb) # write to png airsim.wait_key('Press any key to reset to original state') diff --git a/PythonClient/multirotor/kinect_publisher.py b/PythonClient/multirotor/kinect_publisher.py index f334bcec1c..18da0d15b6 100644 --- a/PythonClient/multirotor/kinect_publisher.py +++ b/PythonClient/multirotor/kinect_publisher.py @@ -40,8 +40,8 @@ def getDepthImage(self,response_d): def getRGBImage(self,response_rgb): img1d = np.fromstring(response_rgb.image_data_uint8, dtype=np.uint8) - img_rgba = img1d.reshape(response_rgb.height, response_rgb.width, 4) - img_rgb = img_rgba[..., :3][..., ::-1] + img_rgb = img1d.reshape(response_rgb.height, response_rgb.width, 3) + img_rgb = img_rgb[..., :3][..., ::-1] return img_rgb def enhanceRGB(self,img_rgb): diff --git a/PythonClient/multirotor/multi_agent_drone.py b/PythonClient/multirotor/multi_agent_drone.py index 4df97dd577..bfd044c3f2 100644 --- a/PythonClient/multirotor/multi_agent_drone.py +++ b/PythonClient/multirotor/multi_agent_drone.py @@ -1,10 +1,10 @@ -import setup_path import airsim - +import cv2 import numpy as np import os -import tempfile import pprint +import setup_path +import tempfile # Use below in settings.json with Blocks environment """ @@ -59,11 +59,11 @@ # get camera images from the car responses1 = client.simGetImages([ airsim.ImageRequest("0", airsim.ImageType.DepthVis), #depth visualization image - airsim.ImageRequest("1", airsim.ImageType.Scene, False, False)], vehicle_name="Drone1") #scene vision image in uncompressed RGBA array + airsim.ImageRequest("1", airsim.ImageType.Scene, False, False)], vehicle_name="Drone1") #scene vision image in uncompressed RGB array print('Drone1: Retrieved images: %d' % len(responses1)) responses2 = client.simGetImages([ airsim.ImageRequest("0", airsim.ImageType.DepthVis), #depth visualization image - airsim.ImageRequest("1", airsim.ImageType.Scene, False, False)], vehicle_name="Drone2") #scene vision image in uncompressed RGBA array + airsim.ImageRequest("1", airsim.ImageType.Scene, False, False)], vehicle_name="Drone2") #scene vision image in uncompressed RGB array print('Drone2: Retrieved images: %d' % len(responses2)) tmp_dir = os.path.join(tempfile.gettempdir(), "airsim_drone") @@ -87,10 +87,8 @@ else: #uncompressed array print("Type %d, size %d" % (response.image_type, len(response.image_data_uint8))) img1d = np.fromstring(response.image_data_uint8, dtype=np.uint8) #get numpy array - img_rgba = img1d.reshape(response.height, response.width, 4) #reshape array to 4 channel image array H X W X 4 - img_rgba = np.flipud(img_rgba) #original image is flipped vertically - img_rgba[:,:,1:2] = 100 #just for fun add little bit of green in all pixels - airsim.write_png(os.path.normpath(filename + '.greener.png'), img_rgba) #write to png + img_rgb = img1d.reshape(response.height, response.width, 3) #reshape array to 3 channel image array H X W X 3 + cv2.imwrite(os.path.normpath(filename + '.png'), img_rgb) # write to png airsim.wait_key('Press any key to reset to original state') diff --git a/PythonClient/ros/car_image_raw.py b/PythonClient/ros/car_image_raw.py index 973acf303b..4ffe5bece3 100644 --- a/PythonClient/ros/car_image_raw.py +++ b/PythonClient/ros/car_image_raw.py @@ -22,21 +22,21 @@ def airpub(): while not rospy.is_shutdown(): # get camera images from the car responses = client.simGetImages([ - airsim.ImageRequest("1", airsim.ImageType.Scene, False, False)]) #scene vision image in uncompressed RGBA array + airsim.ImageRequest("1", airsim.ImageType.Scene, False, False)]) #scene vision image in uncompressed RGB array for response in responses: - img_rgba_string = response.image_data_uint8 + img_rgb_string = response.image_data_uint8 # Populate image message msg=Image() msg.header.stamp = rospy.Time.now() msg.header.frame_id = "frameId" - msg.encoding = "rgba8" + msg.encoding = "rgb8" msg.height = 360 # resolution should match values in settings.json msg.width = 640 - msg.data = img_rgba_string + msg.data = img_rgb_string msg.is_bigendian = 0 - msg.step = msg.width * 4 + msg.step = msg.width * 3 # log time and size of published image rospy.loginfo(len(response.image_data_uint8)) diff --git a/PythonClient/ros/drone_image_raw.py b/PythonClient/ros/drone_image_raw.py index f7df87914b..b41ba6248c 100644 --- a/PythonClient/ros/drone_image_raw.py +++ b/PythonClient/ros/drone_image_raw.py @@ -23,21 +23,21 @@ def airpub(): while not rospy.is_shutdown(): # get camera images from the car responses = client.simGetImages([ - airsim.ImageRequest("1", airsim.ImageType.Scene, False, False)]) #scene vision image in uncompressed RGBA array + airsim.ImageRequest("1", airsim.ImageType.Scene, False, False)]) #scene vision image in uncompressed RGB array for response in responses: - img_rgba_string = response.image_data_uint8 + img_rgb_string = response.image_data_uint8 # Populate image message msg=Image() msg.header.stamp = rospy.Time.now() msg.header.frame_id = "frameId" - msg.encoding = "rgba8" + msg.encoding = "rgb8" msg.height = 360 # resolution should match values in settings.json msg.width = 640 - msg.data = img_rgba_string + msg.data = img_rgb_string msg.is_bigendian = 0 - msg.step = msg.width * 4 + msg.step = msg.width * 3 # log time and size of published image rospy.loginfo(len(response.image_data_uint8)) diff --git a/Unreal/Plugins/AirSim/Source/PIPCamera.cpp b/Unreal/Plugins/AirSim/Source/PIPCamera.cpp index 250ec0e640..ff4a9a66b4 100644 --- a/Unreal/Plugins/AirSim/Source/PIPCamera.cpp +++ b/Unreal/Plugins/AirSim/Source/PIPCamera.cpp @@ -24,6 +24,15 @@ APIPCamera::APIPCamera() "", LogDebugLevel::Failure); PrimaryActorTick.bCanEverTick = true; + + image_type_to_pixel_format_map_.Add(0, EPixelFormat::PF_B8G8R8A8); + image_type_to_pixel_format_map_.Add(1, EPixelFormat::PF_DepthStencil); // not used. init_auto_format is called in setupCameraFromSettings() + image_type_to_pixel_format_map_.Add(2, EPixelFormat::PF_DepthStencil); // not used for same reason as above + image_type_to_pixel_format_map_.Add(3, EPixelFormat::PF_DepthStencil); // not used for same reason as above + image_type_to_pixel_format_map_.Add(4, EPixelFormat::PF_DepthStencil); // not used for same reason as above + image_type_to_pixel_format_map_.Add(5, EPixelFormat::PF_B8G8R8A8); + image_type_to_pixel_format_map_.Add(6, EPixelFormat::PF_B8G8R8A8); + image_type_to_pixel_format_map_.Add(7, EPixelFormat::PF_B8G8R8A8); } void APIPCamera::PostInitializeComponents() @@ -260,8 +269,12 @@ void APIPCamera::setupCameraFromSettings(const APIPCamera::CameraSetting& camera const auto& noise_setting = camera_setting.noise_settings.at(image_type); if (image_type >= 0) { //scene capture components - updateCaptureComponentSetting(captures_[image_type], render_targets_[image_type], - capture_setting, ned_transform); + if (image_type==0 || image_type==5 || image_type==6 || image_type==7) + updateCaptureComponentSetting(captures_[image_type], render_targets_[image_type], false, + image_type_to_pixel_format_map_[image_type], capture_setting, ned_transform); + else + updateCaptureComponentSetting(captures_[image_type], render_targets_[image_type], true, + image_type_to_pixel_format_map_[image_type], capture_setting, ned_transform); setNoiseMaterial(image_type, captures_[image_type], captures_[image_type]->PostProcessSettings, noise_setting); } @@ -274,9 +287,17 @@ void APIPCamera::setupCameraFromSettings(const APIPCamera::CameraSetting& camera } void APIPCamera::updateCaptureComponentSetting(USceneCaptureComponent2D* capture, UTextureRenderTarget2D* render_target, - const CaptureSetting& setting, const NedTransform& ned_transform) + bool auto_format, const EPixelFormat& pixel_format, const CaptureSetting& setting, const NedTransform& ned_transform) { - render_target->InitAutoFormat(setting.width, setting.height); //256 X 144, X 480 + if (auto_format) + { + render_target->InitAutoFormat(setting.width, setting.height); //256 X 144, X 480 + } + else + { + render_target->InitCustomFormat(setting.width, setting.height, pixel_format, false); + } + if (!std::isnan(setting.target_gamma)) render_target->TargetGamma = setting.target_gamma; diff --git a/Unreal/Plugins/AirSim/Source/PIPCamera.h b/Unreal/Plugins/AirSim/Source/PIPCamera.h index 813134a8f3..27e7d29eef 100644 --- a/Unreal/Plugins/AirSim/Source/PIPCamera.h +++ b/Unreal/Plugins/AirSim/Source/PIPCamera.h @@ -4,7 +4,7 @@ #include "Components/SceneCaptureComponent2D.h" #include "Camera/CameraActor.h" #include "Materials/Material.h" - +#include "Runtime/Core/Public/PixelFormat.h" #include "common/ImageCaptureBase.hpp" #include "common/common_utils/Utils.hpp" #include "common/AirSimSettings.hpp" @@ -65,6 +65,7 @@ class AIRSIM_API APIPCamera : public ACameraActor FRotator gimbald_rotator_; float gimbal_stabilization_; const NedTransform* ned_transform_; + TMap image_type_to_pixel_format_map_; private: //methods typedef common_utils::Utils Utils; @@ -73,8 +74,8 @@ class AIRSIM_API APIPCamera : public ACameraActor static unsigned int imageTypeCount(); void enableCaptureComponent(const ImageType type, bool is_enabled); - static void updateCaptureComponentSetting(USceneCaptureComponent2D* capture, UTextureRenderTarget2D* render_target, const CaptureSetting& setting, - const NedTransform& ned_transform); + static void updateCaptureComponentSetting(USceneCaptureComponent2D* capture, UTextureRenderTarget2D* render_target, + bool auto_format, const EPixelFormat& pixel_format, const CaptureSetting& setting, const NedTransform& ned_transform); void setNoiseMaterial(int image_type, UObject* outer, FPostProcessSettings& obj, const NoiseSetting& settings); static void updateCameraPostProcessingSetting(FPostProcessSettings& obj, const CaptureSetting& setting); static void updateCameraSetting(UCameraComponent* camera, const CaptureSetting& setting, const NedTransform& ned_transform); diff --git a/Unreal/Plugins/AirSim/Source/RenderRequest.cpp b/Unreal/Plugins/AirSim/Source/RenderRequest.cpp index afeec3bcca..3b6017a346 100644 --- a/Unreal/Plugins/AirSim/Source/RenderRequest.cpp +++ b/Unreal/Plugins/AirSim/Source/RenderRequest.cpp @@ -106,16 +106,15 @@ void RenderRequest::getScreenshot(std::shared_ptr params[], std::v for (unsigned int i = 0; i < req_size; ++i) { if (!params[i]->pixels_as_float) { if (results[i]->width != 0 && results[i]->height != 0) { - results[i]->image_data_uint8.SetNumUninitialized(results[i]->width * results[i]->height * 4, false); + results[i]->image_data_uint8.SetNumUninitialized(results[i]->width * results[i]->height * 3, false); if (params[i]->compress) UAirBlueprintLib::CompressImageArray(results[i]->width, results[i]->height, results[i]->bmp, results[i]->image_data_uint8); else { uint8* ptr = results[i]->image_data_uint8.GetData(); for (const auto& item : results[i]->bmp) { - *ptr++ = item.R; - *ptr++ = item.G; *ptr++ = item.B; - *ptr++ = item.A; + *ptr++ = item.G; + *ptr++ = item.R; } } } diff --git a/docs/image_apis.md b/docs/image_apis.md index bdf559aa0e..b4d87a8ee5 100644 --- a/docs/image_apis.md +++ b/docs/image_apis.md @@ -51,7 +51,7 @@ client = airsim.MultirotorClient() responses = client.simGetImages([ # png format airsim.ImageRequest(0, airsim.ImageType.Scene), - # uncompressed RGBA array bytes + # uncompressed RGB array bytes airsim.ImageRequest(1, airsim.ImageType.Scene, False, False), # floating point uncompressed image airsim.ImageRequest(1, airsim.ImageType.DepthPlanner, True)]) @@ -61,7 +61,7 @@ responses = client.simGetImages([ #### Using AirSim Images with NumPy -If you plan to use numpy for image manipulation, you should get uncompressed RGBA image and then convert to numpy like this: +If you plan to use numpy for image manipulation, you should get uncompressed RGB image and then convert to numpy like this: ```python responses = client.simGetImages([ImageRequest("0", airsim.ImageType.Scene, False, False)]) @@ -71,16 +71,13 @@ response = responses[0] img1d = np.fromstring(response.image_data_uint8, dtype=np.uint8) # reshape array to 4 channel image array H X W X 4 -img_rgba = img1d.reshape(response.height, response.width, 4) +img_rgb = img1d.reshape(response.height, response.width, 3) # original image is fliped vertically -img_rgba = np.flipud(img_rgba) - -# just for fun add little bit of green in all pixels -img_rgba[:,:,1:2] = 100 +img_rgb = np.flipud(img_rgb) # write to png -airsim.write_png(os.path.normpath(filename + '.greener.png'), img_rgba) +airsim.write_png(os.path.normpath(filename + '.png'), img_rgb) ``` #### Quick Tips @@ -112,7 +109,7 @@ int getStereoAndDepthImages() vector request = { //png format ImageRequest("0", ImageType::Scene), - //uncompressed RGBA array bytes + //uncompressed RGB array bytes ImageRequest("1", ImageType::Scene, false, false), //floating point uncompressed image ImageRequest("1", ImageType::DepthPlanner, true) @@ -246,13 +243,13 @@ It is recommended that you request uncompressed image using this API to ensure y ```python responses = client.simGetImages([ImageRequest(0, AirSimImageType.Segmentation, False, False)]) img1d = np.fromstring(response.image_data_uint8, dtype=np.uint8) #get numpy array -img_rgba = img1d.reshape(response.height, response.width, 4) #reshape array to 4 channel image array H X W X 4 -img_rgba = np.flipud(img_rgba) #original image is fliped vertically +img_rgb = img1d.reshape(response.height, response.width, 3) #reshape array to 3 channel image array H X W X 3 +img_rgb = np.flipud(img_rgb) #original image is fliped vertically #find unique colors -print(np.unique(img_rgba[:,:,0], return_counts=True)) #red -print(np.unique(img_rgba[:,:,1], return_counts=True)) #green -print(np.unique(img_rgba[:,:,2], return_counts=True)) #blue +print(np.unique(img_rgb[:,:,0], return_counts=True)) #red +print(np.unique(img_rgb[:,:,1], return_counts=True)) #green +print(np.unique(img_rgb[:,:,2], return_counts=True)) #blue ``` A complete ready-to-run example can be found in [segmentation.py](https://github.com/Microsoft/AirSim/tree/master/PythonClient//computer_vision/segmentation.py).