From a10905f461eaad9ba7ee29701c449834fec6c01f Mon Sep 17 00:00:00 2001 From: Chao Li Date: Sun, 5 May 2019 17:53:51 +0800 Subject: [PATCH 01/63] fix README and config issues Signed-off-by: Chao Li --- doc/BINARY_VERSION_README.md | 4 ++-- script/environment_setup.sh | 3 +-- script/environment_setup_binary.sh | 3 +-- vino_launch/param/pipeline_object_oss.yaml | 2 +- 4 files changed, 5 insertions(+), 7 deletions(-) diff --git a/doc/BINARY_VERSION_README.md b/doc/BINARY_VERSION_README.md index f4d731c8..63b11f76 100644 --- a/doc/BINARY_VERSION_README.md +++ b/doc/BINARY_VERSION_README.md @@ -177,7 +177,7 @@ sudo ln -s ~/catkin_ws/src/ros_openvino_toolkit /opt/openvino_toolkit/ros_openvi ```bash roslaunch vino_launch pipeline_image.launch ``` -* run object detection sample code input from RealSenseCamera.(connect Intel® Neural Compute Stick 2) +* run object detection sample code input from RealSenseCamera. ```bash roslaunch vino_launch pipeline_object.launch ``` @@ -185,7 +185,7 @@ sudo ln -s ~/catkin_ws/src/ros_openvino_toolkit /opt/openvino_toolkit/ros_openvi ```bash roslaunch vino_launch pipeline_object_topic.launch ``` -* run object segmentation sample code input from RealSenseCameraTopic.(connect Intel® Neural Compute Stick 2) +* run object segmentation sample code input from RealSenseCameraTopic. ```bash roslaunch vino_launch pipeline_segmentation.launch ``` diff --git a/script/environment_setup.sh b/script/environment_setup.sh index bcd0808a..903e7fb0 100755 --- a/script/environment_setup.sh +++ b/script/environment_setup.sh @@ -63,14 +63,13 @@ echo "Set OTHER_DEPENDENCY to $OTHER_DEPENDENCY" # Clean Existing Directories if [ "$CLEAN" == "1" ]; then read -n1 -p "The clean operation will delete some manually created directories, - including ~/code, ~/catkin_ws, /opt/intel, /opt/openvino_toolkit, and OpenVINO tar ball. + including ~/code, /opt/intel, /opt/openvino_toolkit, and OpenVINO tar ball. Do you want to clean existing directories[Y/N]?" answer case $answer in Y|y) echo echo "===================Cleaning...====================================" echo $ROOT_PASSWD | sudo -S rm -rf ~/code echo $ROOT_PASSWD | sudo -S rm -rf /opt/intel - rm -rf ~/catkin_ws echo $ROOT_PASSWD | sudo -S rm -rf /opt/openvino_toolkit if [[ $system_ver = "16.04" && -L "/usr/lib/x86_64-linux-gnu/libboost_python3.so" ]]; then echo $ROOT_PASSWD | sudo -S rm /usr/lib/x86_64-linux-gnu/libboost_python3.so diff --git a/script/environment_setup_binary.sh b/script/environment_setup_binary.sh index d5d6e2cb..32d54e48 100755 --- a/script/environment_setup_binary.sh +++ b/script/environment_setup_binary.sh @@ -59,13 +59,12 @@ if [ "$CLEAN" == "1" ]; then echo "===================Clean Existing Directories...====================================" read -n1 -p "The clean operation will delete some manually created directories, - including ~/code, ~/catkin_ws, /opt/intel, /opt/openvino_toolkit, and OpenVINO tar ball. + including ~/code, /opt/intel, /opt/openvino_toolkit, and OpenVINO tar ball. Do you want to clean existing directories[Y/N]?" answer case $answer in Y|y) echo echo "===================Cleaning...====================================" echo $ROOT_PASSWD | sudo -S rm -rf ~/code - rm -rf ~/catkin_ws echo $ROOT_PASSWD | sudo -S rm -rf /opt/intel echo $ROOT_PASSWD | sudo -S rm -rf /opt/openvino_toolkit if [[ $system_ver = "16.04" && -L "/usr/lib/x86_64-linux-gnu/libboost_python3.so" ]]; then diff --git a/vino_launch/param/pipeline_object_oss.yaml b/vino_launch/param/pipeline_object_oss.yaml index 4fe4ca7a..9b656fc1 100644 --- a/vino_launch/param/pipeline_object_oss.yaml +++ b/vino_launch/param/pipeline_object_oss.yaml @@ -3,7 +3,7 @@ Pipelines: inputs: [RealSenseCamera] infers: - name: ObjectDetection - model: /opt/intel/computer_vision_sdk/deployment_tools/model_downloader/object_detection/common/mobilenet-ssd/caffe/output/FP16/mobilenet-ssd.xml + model: /opt/openvino_toolkit/open_model_zoo/model_downloader/object_detection/common/mobilenet-ssd/caffe/output/FP32/mobilenet-ssd.xml engine: CPU label: to/be/set/xxx.labels batch: 16 From 0e0cdeabda863e2ec73b894f3e83520ff5baff7b Mon Sep 17 00:00:00 2001 From: RachelRen05 Date: Wed, 8 May 2019 19:27:38 +0800 Subject: [PATCH 02/63] support multi-pipeline from different input & a visualization tool in viewer folder --- dynamic_vino_lib/CMakeLists.txt | 2 + .../inputs/realsense_camera.h | 1 + .../dynamic_vino_lib/inputs/standard_camera.h | 1 + .../outputs/ros_service_output.h | 3 +- .../outputs/ros_topic_output.h | 4 +- .../dynamic_vino_lib/outputs/rviz_output.h | 2 +- .../include/dynamic_vino_lib/pipeline.h | 5 +- .../dynamic_vino_lib/pipeline_manager.h | 19 ++- .../services/frame_processing_server.h | 24 ++-- .../services/pipeline_processing_server.h | 74 +++++++++++ .../src/inputs/realsense_camera.cpp | 16 +++ .../src/inputs/standard_camera.cpp | 12 +- .../src/outputs/ros_service_output.cpp | 2 - .../src/outputs/ros_topic_output.cpp | 18 +-- dynamic_vino_lib/src/outputs/rviz_output.cpp | 7 +- dynamic_vino_lib/src/pipeline_manager.cpp | 31 +++-- .../src/services/frame_processing_server.cpp | 15 ++- .../services/pipeline_processing_server.cpp | 115 ++++++++++++++++ dynamic_vino_lib/src/services/test.cpp | 0 pipeline_srv_msgs/CMakeLists.txt | 88 ++++++++++++ pipeline_srv_msgs/msg/Pipeline.msg | 16 +++ pipeline_srv_msgs/msg/PipelineRequest.msg | 17 +++ pipeline_srv_msgs/msg/Pipelines.msg | 18 +++ pipeline_srv_msgs/package.xml | 39 ++++++ pipeline_srv_msgs/srv/PipelineSrv.srv | 17 +++ sample/CMakeLists.txt | 18 +++ sample/src/pipeline_service_client.cpp | 118 +++++++++++++++++ script/viewer/pipeTree.py | 125 ++++++++++++++++++ script/viewer/service.py | 59 +++++++++ script/viewer/viewer.py | 115 ++++++++++++++++ .../launch/multi_pipeline_service.launch | 11 ++ .../param/multi_pipleine_service_object.yaml | 46 +++++++ vino_param_lib/package.xml | 2 +- 33 files changed, 982 insertions(+), 58 deletions(-) create mode 100644 dynamic_vino_lib/include/dynamic_vino_lib/services/pipeline_processing_server.h create mode 100644 dynamic_vino_lib/src/services/pipeline_processing_server.cpp create mode 100644 dynamic_vino_lib/src/services/test.cpp create mode 100644 pipeline_srv_msgs/CMakeLists.txt create mode 100644 pipeline_srv_msgs/msg/Pipeline.msg create mode 100644 pipeline_srv_msgs/msg/PipelineRequest.msg create mode 100644 pipeline_srv_msgs/msg/Pipelines.msg create mode 100644 pipeline_srv_msgs/package.xml create mode 100644 pipeline_srv_msgs/srv/PipelineSrv.srv create mode 100644 sample/src/pipeline_service_client.cpp create mode 100644 script/viewer/pipeTree.py create mode 100644 script/viewer/service.py create mode 100644 script/viewer/viewer.py create mode 100644 vino_launch/launch/multi_pipeline_service.launch create mode 100644 vino_launch/param/multi_pipleine_service_object.yaml diff --git a/dynamic_vino_lib/CMakeLists.txt b/dynamic_vino_lib/CMakeLists.txt index 86c0679f..db0d3c7f 100644 --- a/dynamic_vino_lib/CMakeLists.txt +++ b/dynamic_vino_lib/CMakeLists.txt @@ -134,6 +134,8 @@ set(DEPENDENCIES realsense2 ${OpenCV_LIBS} cpu_extension) add_library(${PROJECT_NAME} SHARED src/services/frame_processing_server.cpp + src/services/pipeline_processing_server.cpp + src/services/test.cpp src/factory.cpp src/pipeline.cpp src/pipeline_params.cpp diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/realsense_camera.h b/dynamic_vino_lib/include/dynamic_vino_lib/inputs/realsense_camera.h index f4a87d0f..4ec9456d 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/realsense_camera.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inputs/realsense_camera.h @@ -66,6 +66,7 @@ class RealSenseCamera : public BaseInputDevice rs2::config cfg_; rs2::pipeline pipe_; bool first_read_ = true; + static int rscamera_count; }; } // namespace Input diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/standard_camera.h b/dynamic_vino_lib/include/dynamic_vino_lib/inputs/standard_camera.h index 8928a741..9a43d6b1 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/standard_camera.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inputs/standard_camera.h @@ -61,6 +61,7 @@ class StandardCamera : public BaseInputDevice private: cv::VideoCapture cap; + static int camera_count_; }; } // namespace Input #endif // DYNAMIC_VINO_LIB_INPUTS_STANDARD_CAMERA_H diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/ros_service_output.h b/dynamic_vino_lib/include/dynamic_vino_lib/outputs/ros_service_output.h index 3ce30e74..7195e6c4 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/ros_service_output.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/outputs/ros_service_output.h @@ -61,7 +61,7 @@ namespace Outputs class RosServiceOutput : public RosTopicOutput { public: - RosServiceOutput() {} + RosServiceOutput(std::string pipeline_name): pipeline_name_(pipeline_name) {} /** * @brief Publish all the detected infomations generated by the accept @@ -81,6 +81,7 @@ class RosServiceOutput : public RosTopicOutput private: const std::string service_name_; + const std::string pipeline_name_; }; } // namespace Outputs #endif // DYNAMIC_VINO_LIB__OUTPUTS__ROS_SERVICE_OUTPUT_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/ros_topic_output.h b/dynamic_vino_lib/include/dynamic_vino_lib/outputs/ros_topic_output.h index 583c9afd..a9eeb215 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/ros_topic_output.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/outputs/ros_topic_output.h @@ -54,7 +54,8 @@ namespace Outputs class RosTopicOutput : public BaseOutput { public: - RosTopicOutput(); + RosTopicOutput(){}; + RosTopicOutput(std::string pipeline_name); /** * @brief Calculate the camera matrix of a frame. * @param[in] A frame. @@ -112,6 +113,7 @@ class RosTopicOutput : public BaseOutput private: std_msgs::Header getHeader(); + const std::string pipeline_name_; const std::string topic_name_; cv::Mat frame_; ros::NodeHandle nh_; diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/rviz_output.h b/dynamic_vino_lib/include/dynamic_vino_lib/outputs/rviz_output.h index 595e1864..f301a6f3 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/rviz_output.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/outputs/rviz_output.h @@ -38,7 +38,7 @@ namespace Outputs class RvizOutput : public BaseOutput { public: - RvizOutput(); + RvizOutput(std::string); /** * @brief Construct frame for rviz * @param[in] A frame. diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/pipeline.h b/dynamic_vino_lib/include/dynamic_vino_lib/pipeline.h index bf7394f2..1c5654c7 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/pipeline.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/pipeline.h @@ -126,7 +126,10 @@ class Pipeline { return input_device_; } - + const std::multimap getPipelineDetail() + { + return next_; + } /** * @brief Get real time FPS (frames per second). */ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/pipeline_manager.h b/dynamic_vino_lib/include/dynamic_vino_lib/pipeline_manager.h index 3deb2aa9..974ac2cf 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/pipeline_manager.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/pipeline_manager.h @@ -58,13 +58,15 @@ class PipelineManager { void runAll(); void stopAll(); void joinAll(); - + enum PipelineState { - PipelineState_ThreadNotCreated, - PipelineState_ThreadStopped, - PipelineState_ThreadRunning, - PipelineState_Error + PipelineState_ThreadNotCreated = 0, + PipelineState_ThreadStopped = 1, + PipelineState_ThreadRunning = 2, + PipelineState_ThreadPasued = 3, + PipelineState_Error = 4 }; + struct PipelineData { Params::ParamManager::PipelineParams params; std::shared_ptr pipeline; @@ -72,12 +74,15 @@ class PipelineManager { std::shared_ptr thread; PipelineState state; }; - + void runService(); std::map getPipelines() { return pipelines_; } - + std::map* getPipelinesPtr() + { + return &pipelines_; + } private: PipelineManager(){}; diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/services/frame_processing_server.h b/dynamic_vino_lib/include/dynamic_vino_lib/services/frame_processing_server.h index 0284a47c..fbc4ca64 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/services/frame_processing_server.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/services/frame_processing_server.h @@ -31,6 +31,7 @@ #include #include #include +#include #include #include @@ -40,31 +41,22 @@ namespace vino_service { template -class FrameProcessingServer //: public ros::NodeHandle +class FrameProcessingServer { public: explicit FrameProcessingServer( const std::string & service_name, const std::string & config_path); - void initService(const std::string & config_path); - -//private: + void initService(); +private: + std::string service_name_; + std::string config_path_; std::shared_ptr nh_; + std::shared_ptr service_; -/* - void cbService( - std::shared_ptr request, - std::shared_ptr response); -*/ bool cbService(ros::ServiceEvent& event); - // rclcpp::Service::SharedPtr service_; - //ros::NodeHandle nh_; - std::shared_ptr service_; - ros::ServiceServer srv; - std::string service_name_; - std::string config_path_; - bool add3(ros::ServiceEvent &); + }; } // namespace vino_service #endif // DYNAMIC_VINO_LIB__SERVICES__FRAME_PROCESSING_SERVER_HPP_ \ No newline at end of file diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/services/pipeline_processing_server.h b/dynamic_vino_lib/include/dynamic_vino_lib/services/pipeline_processing_server.h new file mode 100644 index 00000000..e79eb63a --- /dev/null +++ b/dynamic_vino_lib/include/dynamic_vino_lib/services/pipeline_processing_server.h @@ -0,0 +1,74 @@ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#ifndef DYNAMIC_VINO_LIB__SERVICES__PIPELINE_PROCESSING_SERVER_HPP_ +#define DYNAMIC_VINO_LIB__SERVICES__PIPELINE_PROCESSING_SERVER_HPP_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include "dynamic_vino_lib/pipeline_manager.h" + +#include +#include +#include +#include + +namespace vino_service +{ +template +class PipelineProcessingServer +{ +public: + explicit PipelineProcessingServer( + const std::string & service_name); + +private: + + void initPipelineService(); + + std::shared_ptr nh_; + + bool cbService(ros::ServiceEvent& event); + + void setResponse( + ros::ServiceEvent& event); + + void setPipelineByRequest(std::string pipeline_name, PipelineManager::PipelineState state); + + std::shared_ptr service_; + + std::map * pipelines_; + + std::string service_name_; + + + +}; +} // namespace vino_service +#endif // DYNAMIC_VINO_LIB__SERVICES__FRAME_PROCESSING_SERVER_HPP_ \ No newline at end of file diff --git a/dynamic_vino_lib/src/inputs/realsense_camera.cpp b/dynamic_vino_lib/src/inputs/realsense_camera.cpp index 8ebd273c..1a0cc197 100644 --- a/dynamic_vino_lib/src/inputs/realsense_camera.cpp +++ b/dynamic_vino_lib/src/inputs/realsense_camera.cpp @@ -24,6 +24,17 @@ // RealSenseCamera bool Input::RealSenseCamera::initialize() { + static int rscamera_count = 0; + // Get all devices connected + rs2::context cxt; + auto device = cxt.query_devices(); + size_t device_count = device.size(); + slog::info << "Find RealSense num:"<< device_count << slog::endl; + auto hardware = device[rscamera_count]; + auto devSerialNumber = hardware.get_info(RS2_CAMERA_INFO_SERIAL_NUMBER); + //std::cout << "Camera " << rscamera_count << ": " << hardware.get_info(RS2_CAMERA_INFO_NAME) << std::endl; + slog::info << "RealSense Serial number : " << devSerialNumber << slog::endl; + cfg_.enable_device(devSerialNumber); cfg_.enable_stream(RS2_STREAM_COLOR, 640, 480, RS2_FORMAT_BGR8, 30); setInitStatus(pipe_.start(cfg_)); setWidth(640); @@ -49,11 +60,14 @@ bool Input::RealSenseCamera::initialize() } first_read_ = false; } + rscamera_count ++; + return true; } bool Input::RealSenseCamera::initialize(size_t width, size_t height) { + static int rscamera_count = 0; if (3 * width != 4 * height) { slog::err << "The aspect ratio must be 4:3 when using RealSense camera" @@ -86,6 +100,8 @@ bool Input::RealSenseCamera::initialize(size_t width, size_t height) } first_read_ = false; } + rscamera_count ++; + return true; } diff --git a/dynamic_vino_lib/src/inputs/standard_camera.cpp b/dynamic_vino_lib/src/inputs/standard_camera.cpp index d8649144..b21025c8 100644 --- a/dynamic_vino_lib/src/inputs/standard_camera.cpp +++ b/dynamic_vino_lib/src/inputs/standard_camera.cpp @@ -23,30 +23,38 @@ // StandardCamera bool Input::StandardCamera::initialize() { - setInitStatus(cap.open(0)); + static int camera_count_ = 0; + setInitStatus(cap.open(camera_count_)); setWidth((size_t)cap.get(CV_CAP_PROP_FRAME_WIDTH)); setHeight((size_t)cap.get(CV_CAP_PROP_FRAME_HEIGHT)); + camera_count_ ++; return isInit(); } bool Input::StandardCamera::initialize(int camera_num) { + static int camera_count_ = 0; setInitStatus(cap.open(camera_num)); setWidth((size_t)cap.get(CV_CAP_PROP_FRAME_WIDTH)); setHeight((size_t)cap.get(CV_CAP_PROP_FRAME_HEIGHT)); + + camera_count_ ++; return isInit(); } bool Input::StandardCamera::initialize(size_t width, size_t height) { + static int camera_count_ = 0; setWidth(width); setHeight(height); - setInitStatus(cap.open(0)); + setInitStatus(cap.open(camera_count_)); if (isInit()) { cap.set(CV_CAP_PROP_FRAME_WIDTH, width); cap.set(CV_CAP_PROP_FRAME_HEIGHT, height); } + + camera_count_ ++; return isInit(); } diff --git a/dynamic_vino_lib/src/outputs/ros_service_output.cpp b/dynamic_vino_lib/src/outputs/ros_service_output.cpp index f05c21d7..a47ad751 100644 --- a/dynamic_vino_lib/src/outputs/ros_service_output.cpp +++ b/dynamic_vino_lib/src/outputs/ros_service_output.cpp @@ -23,8 +23,6 @@ #include "dynamic_vino_lib/outputs/ros_service_output.h" #include "cv_bridge/cv_bridge.h" #include -// Outputs::RosServiceOutput::RosServiceOutput() - void Outputs::RosServiceOutput::setServiceResponse( boost::shared_ptr response) diff --git a/dynamic_vino_lib/src/outputs/ros_topic_output.cpp b/dynamic_vino_lib/src/outputs/ros_topic_output.cpp index 3dab5174..e726a94d 100644 --- a/dynamic_vino_lib/src/outputs/ros_topic_output.cpp +++ b/dynamic_vino_lib/src/outputs/ros_topic_output.cpp @@ -25,22 +25,24 @@ #include #include -Outputs::RosTopicOutput::RosTopicOutput() +Outputs::RosTopicOutput::RosTopicOutput(std::string pipeline_name): + pipeline_name_(pipeline_name) { pub_face_ = - nh_.advertise("/openvino_toolkit/faces", 16); + nh_.advertise( + "/openvino_toolkit/" + pipeline_name_ + "/faces", 16); pub_emotion_ = nh_.advertise( - "/openvino_toolkit/emotions", 16); + "/openvino_toolkit/" + pipeline_name_ + "/emotions", 16); pub_age_gender_ = nh_.advertise( - "/openvino_toolkit/age_genders", 16); + "/openvino_toolkit/" + pipeline_name_ + "/age_genders", 16); pub_headpose_ = nh_.advertise( - "/openvino_toolkit/headposes", 16); + "/openvino_toolkit/" + pipeline_name_ + "/headposes", 16); pub_object_ = nh_.advertise( - "/openvino_toolkit/detected_objects", 16); + "/openvino_toolkit/" + pipeline_name_ + "/detected_objects", 16); pub_person_reid_ = nh_.advertise( - "/openvino_toolkit/reidentified_persons", 16); + "/openvino_toolkit/" + pipeline_name_ + "/reidentified_persons", 16); pub_segmented_object_ = nh_.advertise( - "/openvino_toolkit/segmented_obejcts", 16); + "/openvino_toolkit/" + pipeline_name_ + "/segmented_obejcts", 16); emotions_msg_ptr_ = NULL; faces_msg_ptr_ = NULL; diff --git a/dynamic_vino_lib/src/outputs/rviz_output.cpp b/dynamic_vino_lib/src/outputs/rviz_output.cpp index 64f99b92..957ed9f0 100644 --- a/dynamic_vino_lib/src/outputs/rviz_output.cpp +++ b/dynamic_vino_lib/src/outputs/rviz_output.cpp @@ -25,11 +25,11 @@ #include "dynamic_vino_lib/pipeline.h" #include "dynamic_vino_lib/outputs/rviz_output.h" -Outputs::RvizOutput::RvizOutput() +Outputs::RvizOutput::RvizOutput(std::string pipeline_name) { image_topic_ = nullptr; - pub_image_ = nh_.advertise("/openvino_toolkit/images", 16); - image_window_output_ = std::make_shared("WindowForRviz", 950); + pub_image_ = nh_.advertise("/openvino_toolkit/"+pipeline_name+"/images", 16); + image_window_output_ = std::make_shared(pipeline_name, 950); } void Outputs::RvizOutput::feedFrame(const cv::Mat & frame) @@ -83,7 +83,6 @@ void Outputs::RvizOutput::handleOutput() std::make_shared(header, "bgr8", frame); sensor_msgs::Image image_msg; image_topic_ = cv_ptr->toImageMsg(); -// image_topic_ = std::make_shared(image_msg); pub_image_.publish(image_topic_); } std::shared_ptr image_topic_; diff --git a/dynamic_vino_lib/src/pipeline_manager.cpp b/dynamic_vino_lib/src/pipeline_manager.cpp index 0e066d8f..9e00e506 100644 --- a/dynamic_vino_lib/src/pipeline_manager.cpp +++ b/dynamic_vino_lib/src/pipeline_manager.cpp @@ -45,7 +45,7 @@ #include "dynamic_vino_lib/pipeline.h" #include "dynamic_vino_lib/pipeline_manager.h" #include "dynamic_vino_lib/pipeline_params.h" - +#include "dynamic_vino_lib/services/pipeline_processing_server.h" std::shared_ptr PipelineManager::createPipeline( const Params::ParamManager::PipelineParams& params) { if (params.name == "") { @@ -139,13 +139,13 @@ PipelineManager::parseOutput( slog::info << "Parsing Output: " << name << slog::endl; std::shared_ptr object = nullptr; if (name == kOutputTpye_RosTopic) { - object = std::make_shared(); + object = std::make_shared(params.name); } else if (name == kOutputTpye_ImageWindow) { - object = std::make_shared("Results"); + object = std::make_shared(params.name); } else if (name == kOutputTpye_RViz) { - object = std::make_shared(); + object = std::make_shared(params.name); } else if (name == kOutputTpye_RosService) { - object = std::make_shared(); + object = std::make_shared(params.name); } if (object != nullptr) { outputs.insert({name, object}); @@ -327,11 +327,13 @@ PipelineManager::createPersonReidentification( void PipelineManager::threadPipeline(const char* name) { PipelineData& p = pipelines_[name]; - while (p.state == PipelineState_ThreadRunning && p.pipeline != nullptr && ros::ok()) { - for (auto& node : p.spin_nodes) { + while ( p.state != PipelineState_ThreadStopped && p.pipeline != nullptr && ros::ok()) { + + if(p.state != PipelineState_ThreadPasued) + { ros::spinOnce(); + p.pipeline->runOnce(); } - p.pipeline->runOnce(); std::this_thread::sleep_for(std::chrono::milliseconds(1)); } } @@ -344,6 +346,8 @@ void PipelineManager::runAll() { it->second.thread = std::make_shared(&PipelineManager::threadPipeline, this, it->second.params.name.c_str()); } } + + } void PipelineManager::stopAll() @@ -353,9 +357,20 @@ void PipelineManager::stopAll() it->second.state = PipelineState_ThreadStopped; } } + } +void PipelineManager::runService() +{ + auto node = std::make_shared>("pipeline_service"); + ros::spin();//hold the thread waiting for pipeline service +} void PipelineManager::joinAll() { + + auto service_thread = std::make_shared(&PipelineManager::runService,this); + service_thread->join();// pipeline service + for (auto it = pipelines_.begin(); it != pipelines_.end(); ++it) { if(it->second.thread != nullptr && it->second.state == PipelineState_ThreadRunning) { it->second.thread->join(); diff --git a/dynamic_vino_lib/src/services/frame_processing_server.cpp b/dynamic_vino_lib/src/services/frame_processing_server.cpp index baaaae67..9434a979 100644 --- a/dynamic_vino_lib/src/services/frame_processing_server.cpp +++ b/dynamic_vino_lib/src/services/frame_processing_server.cpp @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -40,15 +41,14 @@ FrameProcessingServer::FrameProcessingServer( service_name_(service_name), config_path_(config_path) { - nh_=std::make_shared(); - initService(config_path); + nh_=std::make_shared(service_name_); + } template -void FrameProcessingServer::initService( - const std::string & config_path) +void FrameProcessingServer::initService() { - Params::ParamManager::getInstance().parse(config_path); + Params::ParamManager::getInstance().parse(config_path_); Params::ParamManager::getInstance().print(); auto pcommon = Params::ParamManager::getInstance().getCommon(); @@ -64,9 +64,10 @@ void FrameProcessingServer::initService( ros::ServiceServer srv = nh_->advertiseService >("/openvino_toolkit/service",std::bind(&FrameProcessingServer::cbService,this,std::placeholders::_1)); service_ = std::make_shared(srv); -} +} + template bool FrameProcessingServer::cbService( @@ -95,6 +96,8 @@ bool FrameProcessingServer::cbService( return false; } + + template class FrameProcessingServer; template class FrameProcessingServer; template class FrameProcessingServer; diff --git a/dynamic_vino_lib/src/services/pipeline_processing_server.cpp b/dynamic_vino_lib/src/services/pipeline_processing_server.cpp new file mode 100644 index 00000000..80c378ca --- /dev/null +++ b/dynamic_vino_lib/src/services/pipeline_processing_server.cpp @@ -0,0 +1,115 @@ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "dynamic_vino_lib/services/pipeline_processing_server.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "dynamic_vino_lib/pipeline_manager.h" +#include "dynamic_vino_lib/pipeline.h" +#include "dynamic_vino_lib/inputs/base_input.h" +#include "dynamic_vino_lib/inputs/image_input.h" +#include "dynamic_vino_lib/slog.h" + +namespace vino_service +{ +template +PipelineProcessingServer::PipelineProcessingServer( + const std::string & service_name): + service_name_(service_name) +{ + nh_=std::make_shared(); + pipelines_ = PipelineManager::getInstance().getPipelinesPtr(); + initPipelineService(); +} + +template +void PipelineProcessingServer::initPipelineService() +{ + ros::ServiceServer srv= nh_->advertiseService >("/openvino_toolkit/pipeline/service",std::bind(&PipelineProcessingServer::cbService,this,std::placeholders::_1)); + service_ = std::make_shared(srv); +} + +template void PipelineProcessingServer::setResponse( + ros::ServiceEvent& event) +{ + for (auto it = pipelines_->begin(); it != pipelines_->end(); ++it) { + pipeline_srv_msgs::Pipelines pipeline_msg; + pipeline_msg.name = it->first; + pipeline_msg.running_status = std::to_string(it->second.state); + + auto connection_map = it->second.pipeline->getPipelineDetail(); + for (auto& current_pipe : connection_map) + { + pipeline_srv_msgs::Pipeline connection; + connection.input = current_pipe.first.c_str(); + connection.output = current_pipe.second.c_str(); + pipeline_msg.connections.push_back(connection); + } + event.getResponse().pipelines.push_back(pipeline_msg); + } +} + +template +void PipelineProcessingServer::setPipelineByRequest(std::string pipeline_name, + PipelineManager::PipelineState state){ + for (auto it = pipelines_->begin(); it != pipelines_->end(); ++it) + { + if(pipeline_name == it->first) + { + it->second.state = state; + } + } +} + +template +bool PipelineProcessingServer::cbService( + ros::ServiceEvent& event) +{ + std::string req_cmd = event.getRequest().pipeline_request.cmd; + std::string req_val = event.getRequest().pipeline_request.value; + slog::info <<"[PipelineProcessingServer] Pipeline Service get request cmd: "<< req_cmd << + " val:"<< req_val<< slog::endl ; + + PipelineManager::PipelineState state; + if( req_cmd != "GET_PIPELINE")//not only get pipeline but also set pipeline by request + { + if(req_cmd == "STOP_PIPELINE") state = PipelineManager::PipelineState_ThreadStopped; + else if(req_cmd == "RUN_PIPELINE") state = PipelineManager::PipelineState_ThreadRunning; + else if(req_cmd == "PAUSE_PIPELINE") state = PipelineManager::PipelineState_ThreadPasued; + + setPipelineByRequest(req_val,state); + } + else //fill in pipeline status into service response + { + setResponse(event); + } + + return true; +} +template class PipelineProcessingServer; +} // namespace vino_service diff --git a/dynamic_vino_lib/src/services/test.cpp b/dynamic_vino_lib/src/services/test.cpp new file mode 100644 index 00000000..e69de29b diff --git a/pipeline_srv_msgs/CMakeLists.txt b/pipeline_srv_msgs/CMakeLists.txt new file mode 100644 index 00000000..f169b913 --- /dev/null +++ b/pipeline_srv_msgs/CMakeLists.txt @@ -0,0 +1,88 @@ +# Copyright (c) 2018 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +cmake_minimum_required(VERSION 2.8.3) + +project(pipeline_srv_msgs) + +find_package(catkin REQUIRED COMPONENTS + std_msgs + sensor_msgs + geometry_msgs + message_generation +) + +add_message_files(DIRECTORY msg FILES + PipelineRequest.msg + Pipeline.msg + Pipelines.msg +) + +add_service_files(FILES + PipelineSrv.srv +) +generate_messages(DEPENDENCIES + std_msgs + sensor_msgs + geometry_msgs + object_msgs +) + +catkin_package( + CATKIN_DEPENDS + std_msgs + sensor_msgs + geometry_msgs + message_runtime +) + +# Flags +if(UNIX OR APPLE) + # Linker flags. + if( ${CMAKE_CXX_COMPILER_ID} STREQUAL "GNU" OR ${CMAKE_CXX_COMPILER_ID} STREQUAL "Intel") + # GCC specific flags. ICC is compatible with them. + set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -z noexecstack -z relro -z now") + set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -z noexecstack -z relro -z now") + elseif(${CMAKE_CXX_COMPILER_ID} STREQUAL "Clang") + # In Clang, -z flags are not compatible, they need to be passed to linker via -Wl. + set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -Wl,-z,noexecstack -Wl,-z,relro -Wl,-z,now") + set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,-z,noexecstack -Wl,-z,relro -Wl,-z,now") + endif() + + # Compiler flags. + if( ${CMAKE_CXX_COMPILER_ID} STREQUAL "GNU") + # GCC specific flags. + if(CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 4.9 OR CMAKE_CXX_COMPILER_VERSION VERSION_EQUAL 4.9) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIE -fstack-protector-strong") + else() + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIE -fstack-protector") + endif() + elseif(${CMAKE_CXX_COMPILER_ID} STREQUAL "Clang") + # Clang is compatbile with some of the flags. + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIE -fstack-protector") + elseif(${CMAKE_CXX_COMPILER_ID} STREQUAL "Intel" ) + # Same as above, with exception that ICC compilation crashes with -fPIE option, even + # though it uses -pie linker option that require -fPIE during compilation. Checksec + # shows that it generates correct PIE anyway if only -pie is provided. + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fstack-protector") + endif() + + # Generic flags. + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIC -fno-operator-names -Wformat -Wformat-security -Wall") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") + # Dot not forward c++11 flag to GPU beucause it is not supported + set( CUDA_PROPAGATE_HOST_FLAGS OFF ) + set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -D_FORTIFY_SOURCE=2") + set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -pie") +endif() diff --git a/pipeline_srv_msgs/msg/Pipeline.msg b/pipeline_srv_msgs/msg/Pipeline.msg new file mode 100644 index 00000000..80f63a18 --- /dev/null +++ b/pipeline_srv_msgs/msg/Pipeline.msg @@ -0,0 +1,16 @@ +# Copyright (c) 2018 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +string input # From +string output # To diff --git a/pipeline_srv_msgs/msg/PipelineRequest.msg b/pipeline_srv_msgs/msg/PipelineRequest.msg new file mode 100644 index 00000000..565ea4fc --- /dev/null +++ b/pipeline_srv_msgs/msg/PipelineRequest.msg @@ -0,0 +1,17 @@ +# Copyright (c) 2018 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +std_msgs/Header header # Header +string cmd # Name of a request action +string value # value of an action \ No newline at end of file diff --git a/pipeline_srv_msgs/msg/Pipelines.msg b/pipeline_srv_msgs/msg/Pipelines.msg new file mode 100644 index 00000000..b2928b38 --- /dev/null +++ b/pipeline_srv_msgs/msg/Pipelines.msg @@ -0,0 +1,18 @@ +# Copyright (c) 2018 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +std_msgs/Header header # Header +string name # Name of pipeline +Pipeline[] connections # connection map of a pipeline +string running_status # Pipeline running state \ No newline at end of file diff --git a/pipeline_srv_msgs/package.xml b/pipeline_srv_msgs/package.xml new file mode 100644 index 00000000..6d174e93 --- /dev/null +++ b/pipeline_srv_msgs/package.xml @@ -0,0 +1,39 @@ + + + + + + pipeline_srv_msgs + 0.3.0 + A package containing people message definitions. + Yang Lu + Weizhi Liu + Chao Li + Hongkun Chen + Weizhi Liu + Weizhi Li + Apache License 2.0 + + catkin + + std_msgs + sensor_msgs + geometry_msgs + message_generation + + std_msgs + sensor_msgs + geometry_msgs + message_runtime + diff --git a/pipeline_srv_msgs/srv/PipelineSrv.srv b/pipeline_srv_msgs/srv/PipelineSrv.srv new file mode 100644 index 00000000..f3e3f952 --- /dev/null +++ b/pipeline_srv_msgs/srv/PipelineSrv.srv @@ -0,0 +1,17 @@ +# Copyright (c) 2017 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +PipelineRequest pipeline_request # request content of pipeline service +--- +Pipelines[] pipelines # return all pipeline status diff --git a/sample/CMakeLists.txt b/sample/CMakeLists.txt index f91efb5c..d5712f5f 100644 --- a/sample/CMakeLists.txt +++ b/sample/CMakeLists.txt @@ -272,6 +272,24 @@ target_link_libraries(image_reidentification_server ${OpenCV_LIBRARIES} ) +add_executable(pipeline_service_client + src/pipeline_service_client.cpp +) + +add_dependencies(pipeline_service_client + ${${PROJECT_NAME}_EXPORTED_TARGETS} + ${catkin_EXPORTED_TARGETS} + ${dynamic_vino_lib_TARGETS} +) + +target_link_libraries(pipeline_service_client + ${catkin_LIBRARIES} + cpu_extension + gflags + ${vino_param_lib_LIBRARIES} + ${InferenceEngine_LIBRARIES} + ${OpenCV_LIBRARIES} +) if(UNIX OR APPLE) # Linker flags. diff --git a/sample/src/pipeline_service_client.cpp b/sample/src/pipeline_service_client.cpp new file mode 100644 index 00000000..fb657a8b --- /dev/null +++ b/sample/src/pipeline_service_client.cpp @@ -0,0 +1,118 @@ +/* + * Copyright (c) 2018 Intel Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** +* \brief A sample for this library. This sample performs face detection, + * emotions detection, age gender detection and head pose estimation. +* \file sample/main.cpp +*/ + +#include +#include +#include "opencv2/opencv.hpp" + +#include +#include + + +pipeline_srv_msgs::PipelineSrv getRequestMsg(std::string cmd, std::string value){ + pipeline_srv_msgs::PipelineSrv srv; + srv.request.pipeline_request.cmd = cmd; + srv.request.pipeline_request.value = value; + return srv; +} + +bool stopPipelineByName(ros::ServiceClient& client,std::string name){ + + auto srv = getRequestMsg("STOP_PIPELINE",name);//object_pipeline1 + bool result = client.call(srv) ? true : false; + return result; +} + +bool pausePipelineByName(ros::ServiceClient& client,std::string name){ + + auto srv = getRequestMsg("PAUSE_PIPELINE",name);//object_pipeline1 + bool result = client.call(srv) ? true : false; + return result; +} + +bool runPipelineByName(ros::ServiceClient& client,std::string name){ + + auto srv = getRequestMsg("RUN_PIPELINE",name);//object_pipeline1 + bool result = client.call(srv) ? true : false; + return result; +} + +bool request(ros::ServiceClient& client,std::string cmd_name, std::string cmd_value){ + + auto srv = getRequestMsg(cmd_name, cmd_value);//object_pipeline1 + bool result = client.call(srv) ? true : false; + return result; +} + +bool getAllPipelines(ros::ServiceClient& client, std::vector & pipelines){ + auto srv = getRequestMsg("GET_PIPELINE",""); + bool success = client.call(srv) ? true : false; + if(success) + { + for (auto it = srv.response.pipelines.begin(); it != srv.response.pipelines.end(); ++it) + { + pipelines.push_back(*it); + std::cout<name<<" status:"<running_status << std::endl; + for( auto connect = it->connections.begin(); connect!= it->connections.end(); ++connect) + { + printf("%s --> %s\n", connect->input.c_str(), + connect->output.c_str()); + } + } + } + return success; +} + + +int main(int argc, char ** argv) +{ + if (argc != 3) { + ROS_INFO("Usage: rosrun dynamic_vino_sample pipeline_service_client "); + return -1; + } + std::string cmd_name = argv[1]; + std::string cmd_value = argv[2]; + + + ros::init(argc, argv, "pipeline_service_client"); + + ros::NodeHandle n; + + ros::ServiceClient client = n.serviceClient("/openvino_toolkit/pipeline/service"); + + std::vector pipelines; + + auto success = getAllPipelines(client,pipelines) ? true : false; + + if(!success) { ROS_ERROR("Failed to request service."); return -1;} + + success = request(client,cmd_name, cmd_value); + if(!success) { ROS_ERROR("Failed to request service."); return -1;} + //stopPipelineByName(client,"object_pipeline1"); + + + + +} + + + diff --git a/script/viewer/pipeTree.py b/script/viewer/pipeTree.py new file mode 100644 index 00000000..d5cd4a36 --- /dev/null +++ b/script/viewer/pipeTree.py @@ -0,0 +1,125 @@ +#!/usr/bin/python + +from __future__ import unicode_literals # at top of module +from __future__ import division, print_function, with_statement +from PyQt5.QtGui import QPainter,QPen,QBrush,QColor +from PyQt5.QtCore import QRect + +class TreeNode(object): + """The basic node of tree structure""" + + def __init__(self, name, parent=None): + super(TreeNode, self).__init__() + self.name = name + self.parent = parent + self.child = {} + + def __repr__(self) : + return 'TreeNode(%s)' % self.name + + + def __contains__(self, item): + return item in self.child + + + def __len__(self): + """return number of children node""" + return len(self.child) + + def __bool__(self, item): + """always return True for exist node""" + return True + + @property + def path(self): + """return path string (from root to current node)""" + if self.parent: + return '%s %s' % (self.parent.path.strip(), self.name) + else: + return self.name + + def get_child(self, name, defval=None): + """get a child node of current node""" + return self.child.get(name, defval) + + def add_child(self, name, obj=None): + """add a child node to current node""" + if obj and not isinstance(obj, TreeNode): + raise ValueError('TreeNode only add another TreeNode obj as child') + if obj is None: + obj = TreeNode(name) + obj.parent = self + self.child[name] = obj + return obj + def del_child(self, name): + """remove a child node from current node""" + if name in self.child: + del self.child[name] + + def find_child(self, path, create=False): + """find child node by path/name, return None if not found""" + # convert path to a list if input is a string + path = path if isinstance(path, list) else path.split() + cur = self + obj = None + for sub in path: + # search + obj = cur.get_child(sub) + if obj is None and create: + # create new node if need + obj = cur.add_child(sub) + # check if search done + if obj is None: + break + cur = obj + return obj + def find_node(self,name): + for name,obj in self.items(): + if name == name: + return obj + return obj.find_node(name) + def items(self): + return self.child.items() + + def dump(self, indent=0): + """dump tree to string""" + tab = ' '*(indent-1) + ' |- ' if indent > 0 else '' + print('%s%s' % (tab, self.name)) + for name, obj in self.items(): + obj.dump(indent+1) + + def dump_graphics(self,convas,name,state,x,y,w,h): + rect_width = 160 + rect_height = 30 + p = QPainter() + if state == '2': + color = QColor(0, 200, 0) + + elif state == '3': + color = QColor(255,255,0) + else: + color = QColor(200, 0, 0) + draw_x = x + (rect_width + 40) + for i,(name,obj) in enumerate(self.items()): + p.begin(convas) + p.setPen(QPen()) + p.setBrush(QBrush(color)) + draw_y = y + i * (rect_height + 10) + rect = QRect(draw_x ,draw_y,rect_width,rect_height) + p.drawRect(rect) + p.drawText(draw_x+2,draw_y + rect_height/2,name) + if self.name != 'root': + p.drawLine(x+rect_width,y+rect_height/2,draw_x,draw_y+rect_height/2) + p.end() + + obj.dump_graphics(convas,name,state,draw_x,draw_y,w,h) + + + def depth(self,depth = 0): + if not self.items(): + return depth + max_depth = 0 + for name,obj in self.items(): + + max_depth = max(max_depth, obj.depth(depth+1)) + return max_depth diff --git a/script/viewer/service.py b/script/viewer/service.py new file mode 100644 index 00000000..a9a475b1 --- /dev/null +++ b/script/viewer/service.py @@ -0,0 +1,59 @@ +#!/usr/bin/env python +from pipeline_srv_msgs.srv import * +import rospy +import sys +from pipeTree import TreeNode + +def getMaping(pipeline): + map_dict = dict() + for pipeline in pipeline.connections: + if pipeline.input not in map_dict.keys(): + map_dict[pipeline.input] = list() + map_dict[pipeline.input].append(pipeline.output) + + return map_dict + +def getTreeByMap(parent,input,map): + if input not in map.keys(): + return parent + for output in map[input]: + child = parent.add_child(output) + getTreeByMap(child,output,map) + return parent + + +def getTree(parent,input,pipeline): + map = getMaping(pipeline) + return getTreeByMap(parent,input,map) + +def reqPipelineService(cmd, value): + rospy.wait_for_service('/openvino_toolkit/pipeline/service') + try: + + req = rospy.ServiceProxy('/openvino_toolkit/pipeline/service', PipelineSrv) + req_msg = PipelineSrvRequest() + req_msg.pipeline_request.cmd = cmd + req_msg.pipeline_request.value = value + res = req(req_msg) + return res + except rospy.ServiceException, e: + print "Service call failed: %s"%e + +def usage(): + return "%s "%sys.argv[0] + +if __name__ == "__main__": + if len(sys.argv) == 3: + cmd = str(sys.argv[1]) + value = str(sys.argv[2]) + else: + print usage() + sys.exit(1) + + print("Requesting cmd:%s with value:%s"%(cmd, value)) + response = reqPipelineService(cmd, value) + #print(response) + for pipeline in response.pipelines: + root = getTree(TreeNode(pipeline.name),'',pipeline) + root.dump() + diff --git a/script/viewer/viewer.py b/script/viewer/viewer.py new file mode 100644 index 00000000..93b71918 --- /dev/null +++ b/script/viewer/viewer.py @@ -0,0 +1,115 @@ +import sys +from PyQt5.QtWidgets import QWidget, QDesktopWidget, QApplication,QPushButton,QLabel +from PyQt5.QtGui import QPainter,QPen,QBrush,QColor +from PyQt5.QtCore import QRect +from service import reqPipelineService,getTree +from pipeline_srv_msgs.srv import * +from pipeTree import TreeNode + + + + +class PipelineWidget(object): + def __init__(self,convas,name,x,y): + self.name = name + self.convas = convas + self.name_label = QLabel(self.name,convas) + self.name_label.move(x,y) + self.run_btn = QPushButton('run pipeline ',convas) + self.run_btn.move(x, y+20) + self.run_btn.clicked.connect(lambda:self.onClick(self.run_btn)) + + + + self.pause_btn = QPushButton('pause pipeline',convas) + + self.pause_btn.move(x, y+50) + self.pause_btn.clicked.connect(lambda:self.onClick(self.pause_btn)) + + + self.stop_btn = QPushButton('stop pipeline ',convas) + self.stop_btn.move(x, y+80) + self.stop_btn.clicked.connect(lambda:self.onClick(self.stop_btn)) + + self.refresh() + + def onClick(self, whichbtn): + if whichbtn == self.run_btn: + reqPipelineService("RUN_PIPELINE",self.name) + elif whichbtn == self.pause_btn: + reqPipelineService("PAUSE_PIPELINE",self.name) + elif whichbtn == self.stop_btn: + reqPipelineService("STOP_PIPELINE",self.name) + self.refresh() + + def refresh(self): + response = reqPipelineService("GET_PIPELINE","") + self.convas.response = response + for id,pipeline in enumerate(response.pipelines): + if self.name == pipeline.name: + if pipeline.running_status == '1': + self.run_btn.setEnabled(False) + self.pause_btn.setEnabled(False) + self.stop_btn.setEnabled(False) + elif pipeline.running_status == '2': + self.run_btn.setEnabled(False) + self.pause_btn.setEnabled(True) + self.stop_btn.setEnabled(True) + elif pipeline.running_status == '3': + self.run_btn.setEnabled(True) + self.pause_btn.setEnabled(False) + self.stop_btn.setEnabled(True) + self.convas.update() + + + +class Window(QWidget): + + + def __init__(self,window_width=1000,window_height=800): + super(Window,self).__init__() + self.window_width = window_width + self.window_height = window_height + + self.initWindow() + + self.show() + + def initWindow(self): + #set window param + print('Waiting for pipeling service...') + self.response = reqPipelineService("GET_PIPELINE","") + self.window_height = len(self.response.pipelines) * 200 + self.resize(self.window_width,self.window_height) + self.center() + self.setWindowTitle('Pipeline Viewer') + + for id,pipeline in enumerate(self.response.pipelines): + PipelineWidget(self,pipeline.name,10,id*self.window_height/(len(self.response.pipelines)+1)+30) + + + def paintEvent(self,event): + response = self.response + #response = reqPipelineService("GET_PIPELINE","") + for id,pipeline in enumerate(self.response.pipelines): + pipe_root = getTree(TreeNode('root'),'',pipeline) + pipe_root.dump_graphics(self,pipeline.name,pipeline.running_status,10,id*self.window_height/(len(response.pipelines)+1) + 30,self.window_width,self.window_height) + + + + def center(self): + + + qr = self.frameGeometry() + + cp = QDesktopWidget().availableGeometry().center() + + qr.moveCenter(cp) + self.move(qr.topLeft()) + + +if __name__ == '__main__': + + app = QApplication(sys.argv) + ex = Window() + sys.exit(app.exec_()) \ No newline at end of file diff --git a/vino_launch/launch/multi_pipeline_service.launch b/vino_launch/launch/multi_pipeline_service.launch new file mode 100644 index 00000000..60e2b180 --- /dev/null +++ b/vino_launch/launch/multi_pipeline_service.launch @@ -0,0 +1,11 @@ + + + + + + + + + + diff --git a/vino_launch/param/multi_pipleine_service_object.yaml b/vino_launch/param/multi_pipleine_service_object.yaml new file mode 100644 index 00000000..06d2059d --- /dev/null +++ b/vino_launch/param/multi_pipleine_service_object.yaml @@ -0,0 +1,46 @@ +Pipelines: + +- name: pipe1 + inputs: [StandardCamera] + infers: + - name: ObjectDetection + model: /opt/openvino_toolkit/open_model_zoo/model_downloader/object_detection/common/mobilenet-ssd/caffe/output/FP32/mobilenet-ssd.xml + engine: CPU + label: to/be/set/xxx.labels + batch: 16 + outputs: [ImageWindow, RosTopic, RViz] + confidence_threshold: 0.5 + connects: + - left: StandardCamera + right: [ObjectDetection] + - left: ObjectDetection + right: [ImageWindow] + - left: ObjectDetection + right: [RosTopic] + - left: ObjectDetection + right: [RViz] + + +- name: pipe2 + inputs: [RealSenseCamera] + infers: + - name: ObjectDetection + model: /opt/openvino_toolkit/open_model_zoo/model_downloader/object_detection/common/mobilenet-ssd/caffe/output/FP32/mobilenet-ssd.xml + engine: CPU + label: to/be/set/xxx.labels + batch: 16 + outputs: [ImageWindow, RosTopic, RViz] + confidence_threshold: 0.5 + connects: + - left: RealSenseCamera + right: [ObjectDetection] + - left: ObjectDetection + right: [ImageWindow] + - left: ObjectDetection + right: [RosTopic] + - left: ObjectDetection + right: [RViz] + + + +OpenvinoCommon: diff --git a/vino_param_lib/package.xml b/vino_param_lib/package.xml index 7023ef36..5c1d6ead 100644 --- a/vino_param_lib/package.xml +++ b/vino_param_lib/package.xml @@ -19,7 +19,7 @@ limitations under the License. vino_param_lib 0.3.0 - Library for ROS2 OpenVINO parameter management + Library for ROS OpenVINO parameter management Weizhi Liu Chao Li Hongkun Chen From dc532209d5df52575c881fc1a244cea527797121 Mon Sep 17 00:00:00 2001 From: Chao Li Date: Tue, 21 May 2019 11:54:15 +0800 Subject: [PATCH 03/63] support yolov2 and more networks Signed-off-by: Chao Li --- README.md | 2 +- dynamic_vino_lib/CMakeLists.txt | 6 +- .../inferences/base_inference.h | 45 ++- .../inferences/object_detection_ssd.h | 106 ++++++ .../inferences/object_detection_yolov2_voc.h | 111 ++++++ .../dynamic_vino_lib/models/base_model.h | 27 +- ...n_model.h => object_detection_ssd_model.h} | 18 +- .../models/object_detection_yolov2voc_model.h | 53 +++ .../dynamic_vino_lib/outputs/base_output.h | 3 +- .../dynamic_vino_lib/pipeline_params.h | 2 + dynamic_vino_lib/src/factory.cpp | 21 +- .../src/inferences/base_inference.cpp | 8 +- .../src/inferences/object_detection_ssd.cpp | 173 ++++++++++ .../object_detection_yolov2_voc.cpp | 326 ++++++++++++++++++ dynamic_vino_lib/src/models/base_model.cpp | 15 +- ...del.cpp => object_detection_ssd_model.cpp} | 19 +- .../object_detection_yolov2voc_model.cpp | 110 ++++++ .../src/outputs/image_window_output.cpp | 3 - dynamic_vino_lib/src/pipeline.cpp | 11 +- dynamic_vino_lib/src/pipeline_manager.cpp | 30 +- sample/CMakeLists.txt | 2 +- sample/src/pipeline_with_params.cpp | 4 +- script/environment_setup.sh | 0 script/environment_setup_binary.sh | 0 vino_launch/param/pipeline_object.yaml | 3 + vino_launch/param/pipeline_object_oss.yaml | 9 +- .../include/vino_param_lib/param_manager.h | 1 + vino_param_lib/src/param_manager.cpp | 1 + 28 files changed, 1045 insertions(+), 64 deletions(-) create mode 100644 dynamic_vino_lib/include/dynamic_vino_lib/inferences/object_detection_ssd.h create mode 100644 dynamic_vino_lib/include/dynamic_vino_lib/inferences/object_detection_yolov2_voc.h rename dynamic_vino_lib/include/dynamic_vino_lib/models/{object_detection_model.h => object_detection_ssd_model.h} (73%) create mode 100644 dynamic_vino_lib/include/dynamic_vino_lib/models/object_detection_yolov2voc_model.h create mode 100644 dynamic_vino_lib/src/inferences/object_detection_ssd.cpp create mode 100644 dynamic_vino_lib/src/inferences/object_detection_yolov2_voc.cpp rename dynamic_vino_lib/src/models/{object_detection_model.cpp => object_detection_ssd_model.cpp} (89%) create mode 100644 dynamic_vino_lib/src/models/object_detection_yolov2voc_model.cpp mode change 100755 => 100644 script/environment_setup.sh mode change 100755 => 100644 script/environment_setup_binary.sh diff --git a/README.md b/README.md index d77adfcf..e500db2e 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,5 @@ # Introduction + The OpenVINO™ (Open visual inference and neural network optimization) toolkit provides a ROS-adaptered runtime framework of neural network which quickly deploys applications and solutions for vision inference. By leveraging Intel® OpenVINO™ toolkit and corresponding libraries, this runtime framework extends workloads across Intel® hardware (including accelerators) and maximizes performance. * Enables CNN-based deep learning inference at the edge * Supports heterogeneous execution across computer vision accelerators—CPU, GPU, Intel® Movidius™ Neural Compute Stick, and FPGA—using a common API @@ -268,4 +269,3 @@ One-step installation scripts are provided for the dependencies' installation. P # More Information * ros OpenVINO discription writen in Chinese: https://mp.weixin.qq.com/s/BgG3RGauv5pmHzV_hkVAdw -###### *Any security issue should be reported using process at https://01.org/security* diff --git a/dynamic_vino_lib/CMakeLists.txt b/dynamic_vino_lib/CMakeLists.txt index db0d3c7f..47b0877f 100644 --- a/dynamic_vino_lib/CMakeLists.txt +++ b/dynamic_vino_lib/CMakeLists.txt @@ -146,7 +146,8 @@ add_library(${PROJECT_NAME} SHARED src/inferences/age_gender_detection.cpp src/inferences/face_detection.cpp src/inferences/head_pose_detection.cpp - src/inferences/object_detection.cpp + src/inferences/object_detection_ssd.cpp + src/inferences/object_detection_yolov2_voc.cpp src/inferences/object_segmentation.cpp src/inferences/person_reidentification.cpp src/inputs/realsense_camera.cpp @@ -159,7 +160,8 @@ add_library(${PROJECT_NAME} SHARED src/models/age_gender_detection_model.cpp src/models/face_detection_model.cpp src/models/head_pose_detection_model.cpp - src/models/object_detection_model.cpp + src/models/object_detection_ssd_model.cpp + src/models/object_detection_yolov2voc_model.cpp src/models/object_segmentation_model.cpp src/models/person_reidentification_model.cpp src/outputs/image_window_output.cpp diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/base_inference.h b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/base_inference.h index e2b569d9..57acffeb 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/base_inference.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/base_inference.h @@ -28,6 +28,9 @@ #include "dynamic_vino_lib/slog.h" #include "inference_engine.hpp" #include "opencv2/opencv.hpp" +#include "dynamic_vino_lib/models/object_detection_ssd_model.h" +#include "dynamic_vino_lib/models/object_detection_yolov2voc_model.h" + namespace Outputs { @@ -45,11 +48,8 @@ void matU8ToBlob(const cv::Mat& orig_image, InferenceEngine::Blob::Ptr& blob, float scale_factor = 1.0, int batch_index = 0) { InferenceEngine::SizeVector blob_size = blob->getTensorDesc().getDims(); - // const size_t width = blob_size[3]; const int width = blob_size[3]; - // const size_t height = blob_size[2]; const int height = blob_size[2]; - // const size_t channels = blob_size[1]; const int channels = blob_size[1]; T* blob_data = blob->buffer().as(); @@ -118,7 +118,7 @@ class BaseInference */ inline const int getEnqueuedNum() const { - return enqueued_frames; + return enqueued_frames_; } /** * @brief Enqueue a frame to this class. @@ -172,18 +172,20 @@ class BaseInference bool enqueue(const cv::Mat& frame, const cv::Rect&, float scale_factor, int batch_index, const std::string& input_name) { - if (enqueued_frames == max_batch_size_) + if (enqueued_frames_ == max_batch_size_) { slog::warn << "Number of " << getName() << "input more than maximum(" << max_batch_size_ << ") processed by inference" << slog::endl; return false; } + InferenceEngine::Blob::Ptr input_blob = engine_->getRequest()->GetBlob(input_name); matU8ToBlob(frame, input_blob, scale_factor, batch_index); - enqueued_frames += 1; + enqueued_frames_ += 1; return true; } + /** * @brief Set the max batch size for one inference. */ @@ -191,13 +193,40 @@ class BaseInference { max_batch_size_ = max_batch_size; } + std::shared_ptr engine_; + int enqueued_frames_ = 0; private: - std::shared_ptr engine_; int max_batch_size_ = 1; - int enqueued_frames = 0; bool results_fetched_ = false; }; + +class ObjectDetectionResult : public Result { + public: + friend class ObjectDetection; + explicit ObjectDetectionResult(const cv::Rect& location); + std::string getLabel() const { return label_; } + /** + * @brief Get the confidence that the detected area is a face. + * @return The confidence value. + */ + float getConfidence() const { return confidence_; } + bool operator<(const ObjectDetectionResult &s2) const + { + return this->confidence_ > s2.confidence_; + } + + std::string label_ = ""; + float confidence_ = -1; +}; + +class ObjectDetection : public BaseInference +{ + public: + ObjectDetection() {}; + virtual void loadNetwork(std::shared_ptr) = 0; +}; + } // namespace dynamic_vino_lib #endif // DYNAMIC_VINO_LIB_INFERENCES_BASE_INFERENCE_H diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/object_detection_ssd.h b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/object_detection_ssd.h new file mode 100644 index 00000000..b6a86c84 --- /dev/null +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/object_detection_ssd.h @@ -0,0 +1,106 @@ +/* + * Copyright (c) 2018 Intel Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * @brief A header file with declaration for ObjectDetection Class + * @file object_detection.hpp + */ +#ifndef DYNAMIC_VINO_LIB_INFERENCES_OBJECT_DETECTION_SSD_H +#define DYNAMIC_VINO_LIB_INFERENCES_OBJECT_DETECTION_SSD_H +#include +#include +#include +#include +#include +#include +#include "dynamic_vino_lib/models/object_detection_ssd_model.h" +#include "dynamic_vino_lib/models/object_detection_yolov2voc_model.h" +#include "dynamic_vino_lib/engines/engine.h" +#include "dynamic_vino_lib/inferences/base_inference.h" +#include "inference_engine.hpp" +#include "opencv2/opencv.hpp" +// namespace +namespace dynamic_vino_lib { +/** + * @class ObjectDetection + * @brief Class to load face detection model and perform face detection. + */ +class ObjectDetectionSSD : public ObjectDetection { + public: + using Result = dynamic_vino_lib::ObjectDetectionResult; + explicit ObjectDetectionSSD(double); + ~ObjectDetectionSSD() override; + /** + * @brief Load the face detection model. + */ + void loadNetwork(std::shared_ptr) override; + /** + * @brief Enqueue a frame to this class. + * The frame will be buffered but not infered yet. + * @param[in] frame The frame to be enqueued. + * @param[in] input_frame_loc The location of the enqueued frame with respect + * to the frame generated by the input device. + * @return Whether this operation is successful. + */ + bool enqueue(const cv::Mat&, const cv::Rect&) override; + /** + * @brief Start inference for all buffered frames. + * @return Whether this operation is successful. + */ + bool submitRequest() override; + /** + * @brief This function will fetch the results of the previous inference and + * stores the results in a result buffer array. All buffered frames will be + * cleared. + * @return Whether the Inference object fetches a result this time + */ + bool fetchResults() override; + /** + * @brief Get the length of the buffer result array. + * @return The length of the buffer result array. + */ + const int getResultsLength() const override; + /** + * @brief Get the location of result with respect + * to the frame generated by the input device. + * @param[in] idx The index of the result. + */ + const dynamic_vino_lib::Result* getLocationResult(int idx) const override; + /** + * @brief Show the observed detection result either through image window + or ROS topic. + */ + const void observeOutput(const std::shared_ptr& output); + /** + * @brief Get the name of the Inference instance. + * @return The name of the Inference instance. + */ + const std::string getName() const override; + private: + std::shared_ptr valid_model_; + std::vector results_; + int width_ = 0; + int height_ = 0; + int max_proposal_count_; + int object_size_; + double show_output_thresh_ = 0; + //int enqueued_frames_ = 0; + int max_batch_size_ = 1; + + bool matToBlob(const cv::Mat& frame, const cv::Rect&, float scale_factor, + int batch_index, const std::string& input_name); +}; +} // namespace dynamic_vino_lib +#endif // DYNAMIC_VINO_LIB_INFERENCES_OBJECT_DETECTION_SSD_H diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/object_detection_yolov2_voc.h b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/object_detection_yolov2_voc.h new file mode 100644 index 00000000..dc5537c9 --- /dev/null +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/object_detection_yolov2_voc.h @@ -0,0 +1,111 @@ +/* + * Copyright (c) 2018 Intel Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * @brief A header file with declaration for ObjectDetection Class + * @file object_detection.hpp + */ +#ifndef DYNAMIC_VINO_LIB_INFERENCES_OBJECT_DETECTION_YOLOV2_VOC_H +#define DYNAMIC_VINO_LIB_INFERENCES_OBJECT_DETECTION_YOLOV2_VOC_H +#include +#include +#include +#include +#include +#include +#include "dynamic_vino_lib/models/object_detection_ssd_model.h" +#include "dynamic_vino_lib/models/object_detection_yolov2voc_model.h" +#include "dynamic_vino_lib/engines/engine.h" +#include "dynamic_vino_lib/inferences/base_inference.h" +#include "inference_engine.hpp" +#include "opencv2/opencv.hpp" +// namespace +namespace dynamic_vino_lib { +/** + * @class ObjectDetectionYolov2voc + * @brief Class to load face detection model and perform face detection. + */ +class ObjectDetectionYolov2voc : public ObjectDetection { + public: + using Result = dynamic_vino_lib::ObjectDetectionResult; + explicit ObjectDetectionYolov2voc(double); + ~ObjectDetectionYolov2voc() override; + /** + * @brief Load the face detection model. + */ + void loadNetwork(std::shared_ptr) override; + /** + * @brief Enqueue a frame to this class. + * The frame will be buffered but not infered yet. + * @param[in] frame The frame to be enqueued. + * @param[in] input_frame_loc The location of the enqueued frame with respect + * to the frame generated by the input device. + * @return Whether this operation is successful. + */ + bool enqueue(const cv::Mat&, const cv::Rect&) override; + /** + * @brief Start inference for all buffered frames. + * @return Whether this operation is successful. + */ + bool submitRequest() override; + /** + * @brief This function will fetch the results of the previous inference and + * stores the results in a result buffer array. All buffered frames will be + * cleared. + * @return Whether the Inference object fetches a result this time + */ + bool fetchResults() override; + /** + * @brief Get the length of the buffer result array. + * @return The length of the buffer result array. + */ + const int getResultsLength() const override; + /** + * @brief Get the location of result with respect + * to the frame generated by the input device. + * @param[in] idx The index of the result. + */ + const dynamic_vino_lib::Result* getLocationResult(int idx) const override; + /** + * @brief Show the observed detection result either through image window + or ROS topic. + */ + const void observeOutput(const std::shared_ptr& output); + /** + * @brief Get the name of the Inference instance. + * @return The name of the Inference instance. + */ + const std::string getName() const override; + + private: + std::shared_ptr valid_model_; + std::vector raw_results_; + std::vector results_; + int width_ = 0; + int height_ = 0; + int max_proposal_count_; + int object_size_; + double show_output_thresh_ = 0; + + bool matToBlob(const cv::Mat& frame, const cv::Rect&, float scale_factor, + int batch_index, const std::string& input_name); + int getEntryIndex(int side, int lcoords, int lclasses, int location, int entry); + double IntersectionOverUnion(const Result &box_1, const Result &box_2); + //int enqueued_frames_ = 0; + int max_batch_size_ = 1; + cv::Rect ROI_; +}; +} // namespace dynamic_vino_lib +#endif // DYNAMIC_VINO_LIB_INFERENCES_OBJECT_DETECTION_YOLOV2_VOC_H diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/models/base_model.h b/dynamic_vino_lib/include/dynamic_vino_lib/models/base_model.h index 13b580d2..6977487a 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/models/base_model.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/models/base_model.h @@ -83,6 +83,7 @@ class BaseModel * @return The name of the model. */ virtual const std::string getModelName() const = 0; + //InferenceEngine::CNNNetReader::Ptr net_reader_; protected: /** @@ -92,25 +93,39 @@ class BaseModel */ virtual void checkLayerProperty( const InferenceEngine::CNNNetReader::Ptr& network_reader) = 0; + + /** + * @brief Set the layer property (layer layout, layer precision, etc.). + * @param[in] network_reader The reader of the network to be set. + */ virtual void - /** - * @brief Set the layer property (layer layout, layer precision, etc.). - * @param[in] network_reader The reader of the network to be set. - */ setLayerProperty(InferenceEngine::CNNNetReader::Ptr network_reader) = 0; + InferenceEngine::CNNNetReader::Ptr net_reader_; + private: friend class Engines::Engine; - void checkNetworkSize(unsigned int, unsigned int, InferenceEngine::CNNNetReader::Ptr); - InferenceEngine::CNNNetReader::Ptr net_reader_; std::vector labels_; int input_num_; int output_num_; std::string model_loc_; int max_batch_size_; }; + +class ObjectDetectionModel : public BaseModel +{ + public: + ObjectDetectionModel(const std::string& a, int b, int c, int d); + virtual inline const int getMaxProposalCount() { return max_proposal_count_; } + virtual inline const int getObjectSize() { return object_size_; } + + protected: + int max_proposal_count_; + int object_size_; + //virtual void setLayerProperty(InferenceEngine::CNNNetReader::Ptr network_reader) = 0; +}; } // namespace Models #endif // DYNAMIC_VINO_LIB_MODELS_BASE_MODEL_H diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/models/object_detection_model.h b/dynamic_vino_lib/include/dynamic_vino_lib/models/object_detection_ssd_model.h similarity index 73% rename from dynamic_vino_lib/include/dynamic_vino_lib/models/object_detection_model.h rename to dynamic_vino_lib/include/dynamic_vino_lib/models/object_detection_ssd_model.h index b0fe6c9f..76e257df 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/models/object_detection_model.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/models/object_detection_ssd_model.h @@ -17,8 +17,8 @@ * @brief A header file with declaration for ObjectDetectionModel Class * @file face_detection_model.h */ -#ifndef DYNAMIC_VINO_LIB_MODELS_OBJECT_DETECTION_MODEL_H -#define DYNAMIC_VINO_LIB_MODELS_OBJECT_DETECTION_MODEL_H +#ifndef DYNAMIC_VINO_LIB_MODELS_OBJECT_DETECTION_SSD_MODEL_H +#define DYNAMIC_VINO_LIB_MODELS_OBJECT_DETECTION_SSD_MODEL_H #include #include "dynamic_vino_lib/models/base_model.h" namespace Models { @@ -26,11 +26,11 @@ namespace Models { * @class ObjectDetectionModel * @brief This class generates the face detection model. */ -class ObjectDetectionModel : public BaseModel { +class ObjectDetectionSSDModel : public ObjectDetectionModel { public: - ObjectDetectionModel(const std::string&, int, int, int); - inline const int getMaxProposalCount() { return max_proposal_count_; } - inline const int getObjectSize() { return object_size_; } + ObjectDetectionSSDModel(const std::string&, int, int, int); + //inline const int getMaxProposalCount() { return max_proposal_count_; } + //inline const int getObjectSize() { return object_size_; } inline const std::string getInputName() { return input_; } inline const std::string getOutputName() { return output_; } /** @@ -42,10 +42,10 @@ class ObjectDetectionModel : public BaseModel { void checkLayerProperty(const InferenceEngine::CNNNetReader::Ptr&) override; void setLayerProperty(InferenceEngine::CNNNetReader::Ptr) override; - int max_proposal_count_; - int object_size_; + //int max_proposal_count_; + //int object_size_; std::string input_; std::string output_; }; } // namespace Models -#endif // DYNAMIC_VINO_LIB_MODELS_OBJECT_DETECTION_MODEL_H +#endif // DYNAMIC_VINO_LIB_MODELS_OBJECT_DETECTION_SSD_MODEL_H diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/models/object_detection_yolov2voc_model.h b/dynamic_vino_lib/include/dynamic_vino_lib/models/object_detection_yolov2voc_model.h new file mode 100644 index 00000000..c383db53 --- /dev/null +++ b/dynamic_vino_lib/include/dynamic_vino_lib/models/object_detection_yolov2voc_model.h @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2018 Intel Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * @brief A header file with declaration for ObjectDetectionModel Class + * @file face_detection_model.h + */ +#ifndef DYNAMIC_VINO_LIB_MODELS_OBJECT_DETECTION_YOLOV2VOC_MODEL_H +#define DYNAMIC_VINO_LIB_MODELS_OBJECT_DETECTION_YOLOV2VOC_MODEL_H +#include +#include "dynamic_vino_lib/models/base_model.h" +namespace Models { +/** + * @class ObjectDetectionModel + * @brief This class generates the face detection model. + */ +class ObjectDetectionYolov2vocModel : public ObjectDetectionModel { + public: + ObjectDetectionYolov2vocModel(const std::string&, int, int, int); + inline const std::string getInputName() { return input_; } + inline const std::string getOutputName() { return output_; } + InferenceEngine::CNNLayerPtr getLayer() { return output_layer_; } + InferenceEngine::InputInfo::Ptr getInputInfo() { return input_info_; } + /** + * @brief Get the name of this detection model. + * @return Name of the model. + */ + const std::string getModelName() const override; + + protected: + void checkLayerProperty(const InferenceEngine::CNNNetReader::Ptr&) override; + void setLayerProperty(InferenceEngine::CNNNetReader::Ptr) override; + + std::string input_; + std::string output_; + + InferenceEngine::CNNLayerPtr output_layer_; + InferenceEngine::InputInfo::Ptr input_info_; +}; +} // namespace Models +#endif // DYNAMIC_VINO_LIB_MODELS_OBJECT_DETECTION_YOLOV2VOC_MODEL_H diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/base_output.h b/dynamic_vino_lib/include/dynamic_vino_lib/outputs/base_output.h index 00cafc1d..c65d7bc6 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/base_output.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/outputs/base_output.h @@ -30,7 +30,8 @@ #include "dynamic_vino_lib/inferences/emotions_detection.h" #include "dynamic_vino_lib/inferences/face_detection.h" #include "dynamic_vino_lib/inferences/head_pose_detection.h" -#include "dynamic_vino_lib/inferences/object_detection.h" +#include "dynamic_vino_lib/inferences/object_detection_ssd.h" +#include "dynamic_vino_lib/inferences/object_detection_yolov2_voc.h" #include "dynamic_vino_lib/inferences/object_segmentation.h" #include "dynamic_vino_lib/inferences/person_reidentification.h" #include "dynamic_vino_lib/services/frame_processing_server.h" diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/pipeline_params.h b/dynamic_vino_lib/include/dynamic_vino_lib/pipeline_params.h index 4344a48c..89a9ee1e 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/pipeline_params.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/pipeline_params.h @@ -55,6 +55,8 @@ const char kInferTpye_HeadPoseEstimation[] = "HeadPoseEstimation"; const char kInferTpye_ObjectDetection[] = "ObjectDetection"; const char kInferTpye_ObjectSegmentation[] = "ObjectSegmentation"; const char kInferTpye_PersonReidentification[] = "PersonReidentification"; +const char kInferTpye_ObjectDetectionTypeSSD[] = "SSD"; +const char kInferTpye_ObjectDetectionTypeYolov2voc[] = "yolov2-voc"; /** * @class PipelineParams diff --git a/dynamic_vino_lib/src/factory.cpp b/dynamic_vino_lib/src/factory.cpp index d886124c..9560b67d 100644 --- a/dynamic_vino_lib/src/factory.cpp +++ b/dynamic_vino_lib/src/factory.cpp @@ -32,7 +32,6 @@ std::shared_ptr Factory::makeInputDeviceByName( const std::string& input_device_name, const std::string& input_file_path) { - std::cout << "InputDvice: " << input_device_name << std::endl; if (input_device_name == "RealSenseCamera") { return std::make_shared(); @@ -43,8 +42,6 @@ std::shared_ptr Factory::makeInputDeviceByName( } else if (input_device_name == "RealSenseCameraTopic") { - std::cout << "tring to create instance for " << input_device_name - << std::endl; return std::make_shared(); } else if (input_device_name == "Video") @@ -65,16 +62,17 @@ std::shared_ptr Factory::makePluginByName( const std::string& device_name, const std::string& custom_cpu_library_message, // FLAGS_l const std::string& custom_cldnn_message, // FLAGS_c - bool performance_message) -{ // FLAGS_pc - InferenceEngine::InferencePlugin plugin = InferenceEngine::PluginDispatcher({"../../../lib/intel64", ""}) - .getPluginByDevice(device_name); - /** Printing plugin version **/ - printPluginVersion(plugin, std::cout); + bool performance_message) // FLAGS_pc +{ + + InferenceEngine::InferencePlugin plugin = InferenceEngine::PluginDispatcher({"../../../lib/intel64", ""}).getPluginByDevice(device_name); + printPluginVersion(plugin, std::cout); + /** Load extensions for the CPU plugin **/ if ((device_name.find("CPU") != std::string::npos)) { plugin.AddExtension(std::make_shared()); + if (!custom_cpu_library_message.empty()) { // CPU(MKLDNN) extensions are loaded as a shared library and passed as a @@ -84,18 +82,21 @@ std::shared_ptr Factory::makePluginByName( custom_cpu_library_message); plugin.AddExtension(extension_ptr); } + } else if (!custom_cldnn_message.empty()) { - // Load Extensions for other plugins not CPU plugin.SetConfig( {{InferenceEngine::PluginConfigParams::KEY_CONFIG_FILE, custom_cldnn_message}}); } + if (performance_message) { plugin.SetConfig( {{InferenceEngine::PluginConfigParams::KEY_PERF_COUNT, InferenceEngine::PluginConfigParams::YES}}); } + return std::make_shared( InferenceEngine::InferenceEnginePluginPtr(plugin)); + } diff --git a/dynamic_vino_lib/src/inferences/base_inference.cpp b/dynamic_vino_lib/src/inferences/base_inference.cpp index 4ca7d251..ceb0fbea 100644 --- a/dynamic_vino_lib/src/inferences/base_inference.cpp +++ b/dynamic_vino_lib/src/inferences/base_inference.cpp @@ -23,6 +23,10 @@ #include "dynamic_vino_lib/inferences/base_inference.h" +dynamic_vino_lib::ObjectDetectionResult::ObjectDetectionResult( + const cv::Rect& location) + : Result(location){} + // Result dynamic_vino_lib::Result::Result(const cv::Rect& location) { @@ -43,8 +47,8 @@ void dynamic_vino_lib::BaseInference::loadEngine( bool dynamic_vino_lib::BaseInference::submitRequest() { if (engine_->getRequest() == nullptr) return false; - if (!enqueued_frames) return false; - enqueued_frames = 0; + if (!enqueued_frames_) return false; + enqueued_frames_ = 0; results_fetched_ = false; engine_->getRequest()->StartAsync(); return true; diff --git a/dynamic_vino_lib/src/inferences/object_detection_ssd.cpp b/dynamic_vino_lib/src/inferences/object_detection_ssd.cpp new file mode 100644 index 00000000..b558cf8b --- /dev/null +++ b/dynamic_vino_lib/src/inferences/object_detection_ssd.cpp @@ -0,0 +1,173 @@ +/* + * Copyright (c) 2018 Intel Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * @brief a header file with declaration of ObjectDetection class and + * ObjectDetectionResult class + * @file object_detection.cpp + */ +#include +#include +#include +#include "dynamic_vino_lib/inferences/object_detection_ssd.h" +#include "dynamic_vino_lib/outputs/base_output.h" +#include "dynamic_vino_lib/slog.h" +// ObjectDetectionResult +/* +dynamic_vino_lib::ObjectDetectionResult::ObjectDetectionResult( + const cv::Rect& location) + : Result(location){} +*/ + +dynamic_vino_lib::ObjectDetectionSSD::ObjectDetectionSSD(double show_output_thresh) + : dynamic_vino_lib::ObjectDetection(), + show_output_thresh_(show_output_thresh){} + +dynamic_vino_lib::ObjectDetectionSSD::~ObjectDetectionSSD() = default; + +void dynamic_vino_lib::ObjectDetectionSSD::loadNetwork( + std::shared_ptr network) { + valid_model_ = std::dynamic_pointer_cast(network); + + max_proposal_count_ = network->getMaxProposalCount(); + object_size_ = network->getObjectSize(); + setMaxBatchSize(network->getMaxBatchSize()); +} + +bool dynamic_vino_lib::ObjectDetectionSSD::enqueue(const cv::Mat& frame, + const cv::Rect& input_frame_loc) { + if (width_ == 0 && height_ == 0) { + width_ = frame.cols; + height_ = frame.rows; + } + + if (!matToBlob(frame, input_frame_loc, 1, 0, valid_model_->getInputName())) + { + return false; + } + + Result r(input_frame_loc); + results_.clear(); + results_.emplace_back(r); + return true; +} + +bool dynamic_vino_lib::ObjectDetectionSSD::matToBlob( + const cv::Mat& orig_image, const cv::Rect&, float scale_factor, + int batch_index, const std::string& input_name) +{ + if (enqueued_frames_ == max_batch_size_) + { + slog::warn << "Number of " << getName() << "input more than maximum(" + << max_batch_size_ << ") processed by inference" << slog::endl; + return false; + } + + InferenceEngine::Blob::Ptr input_blob = + engine_->getRequest()->GetBlob(input_name); + + InferenceEngine::SizeVector blob_size = input_blob->getTensorDesc().getDims(); + const int width = blob_size[3]; + const int height = blob_size[2]; + const int channels = blob_size[1]; + u_int8_t * blob_data = input_blob->buffer().as(); + + cv::Mat resized_image(orig_image); + if (width != orig_image.size().width || height != orig_image.size().height) + { + cv::resize(orig_image, resized_image, cv::Size(width, height)); + } + int batchOffset = batch_index * width * height * channels; + + for (int c = 0; c < channels; c++) + { + for (int h = 0; h < height; h++) + { + for (int w = 0; w < width; w++) + { + blob_data[batchOffset + c * width * height + h * width + w] = + resized_image.at(h, w)[c] * scale_factor; + } + } + } + + enqueued_frames_ += 1; + return true; +} + +bool dynamic_vino_lib::ObjectDetectionSSD::submitRequest() { + return dynamic_vino_lib::BaseInference::submitRequest(); +} + +bool dynamic_vino_lib::ObjectDetectionSSD::fetchResults() { + bool can_fetch = dynamic_vino_lib::BaseInference::fetchResults(); + + if (!can_fetch) return false; + bool found_result = false; + results_.clear(); + InferenceEngine::InferRequest::Ptr request = getEngine()->getRequest(); + + std::string output = valid_model_->getOutputName(); + const float* detections = request->GetBlob(output)->buffer().as(); + + for (int i = 0; i < max_proposal_count_; i++) { + + float image_id = detections[i * object_size_ + 0]; + cv::Rect r; + auto label_num = static_cast(detections[i * object_size_ + 1]); + std::vector& labels = valid_model_->getLabels(); + r.x = static_cast(detections[i * object_size_ + 3] * width_); + r.y = static_cast(detections[i * object_size_ + 4] * height_); + r.width = static_cast(detections[i * object_size_ + 5] * width_ - r.x); + r.height = + static_cast(detections[i * object_size_ + 6] * height_ - r.y); + Result result(r); + result.label_ = label_num < labels.size() + ? labels[label_num] + : std::string("label #") + std::to_string(label_num); + result.confidence_ = detections[i * object_size_ + 2]; + if (result.confidence_ <= show_output_thresh_) { + continue; + } + if (image_id < 0) { + break; + } + found_result = true; + results_.emplace_back(result); + } + + if (!found_result) results_.clear(); + return true; +} + +const int dynamic_vino_lib::ObjectDetectionSSD::getResultsLength() const { + return static_cast(results_.size()); +} + +const dynamic_vino_lib::Result* +dynamic_vino_lib::ObjectDetectionSSD::getLocationResult(int idx) const { + return &(results_[idx]); +} + +const std::string dynamic_vino_lib::ObjectDetectionSSD::getName() const { + return valid_model_->getModelName(); +} + +const void dynamic_vino_lib::ObjectDetectionSSD::observeOutput( + const std::shared_ptr& output) { + if (output != nullptr) { + output->accept(results_); + } +} diff --git a/dynamic_vino_lib/src/inferences/object_detection_yolov2_voc.cpp b/dynamic_vino_lib/src/inferences/object_detection_yolov2_voc.cpp new file mode 100644 index 00000000..b683248d --- /dev/null +++ b/dynamic_vino_lib/src/inferences/object_detection_yolov2_voc.cpp @@ -0,0 +1,326 @@ +/* + * Copyright (c) 2018 Intel Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * @brief a header file with declaration of ObjectDetection class and + * ObjectDetectionResult class + * @file object_detection.cpp + */ +#include +#include +#include +#include "dynamic_vino_lib/inferences/object_detection_yolov2_voc.h" +#include "dynamic_vino_lib/outputs/base_output.h" +#include "dynamic_vino_lib/slog.h" +// ObjectDetectionResult +/* +dynamic_vino_lib::ObjectDetectionResult::ObjectDetectionResult( + const cv::Rect& location) + : Result(location){} +*/ + +// ObjectDetection +dynamic_vino_lib::ObjectDetectionYolov2voc::ObjectDetectionYolov2voc(double show_output_thresh) + : dynamic_vino_lib::ObjectDetection(), + show_output_thresh_(show_output_thresh){} + +dynamic_vino_lib::ObjectDetectionYolov2voc::~ObjectDetectionYolov2voc() = default; + +void dynamic_vino_lib::ObjectDetectionYolov2voc::loadNetwork( + std::shared_ptr network) { + valid_model_ = std::dynamic_pointer_cast(network); + if (valid_model_ == NULL) + { + std::cout << "ERROR" << std::endl; + } + max_proposal_count_ = network->getMaxProposalCount(); + object_size_ = network->getObjectSize(); + setMaxBatchSize(network->getMaxBatchSize()); +} + +bool dynamic_vino_lib::ObjectDetectionYolov2voc::enqueue(const cv::Mat& frame, const cv::Rect& input_frame_loc) +{ + if (width_ == 0 && height_ == 0) { + width_ = frame.cols; + height_ = frame.rows; + } + + if (!matToBlob(frame, input_frame_loc, 1, 0, valid_model_->getInputName())) + { + return false; + } + + Result r(input_frame_loc); + results_.clear(); + results_.emplace_back(r); + return true; +} + +bool dynamic_vino_lib::ObjectDetectionYolov2voc::matToBlob( + const cv::Mat& orig_image, const cv::Rect&, float scale_factor, + int batch_index, const std::string& input_name) +{ + if (enqueued_frames_ == max_batch_size_) + { + slog::warn << "Number of " << getName() << "input more than maximum(" + << max_batch_size_ << ") processed by inference" << slog::endl; + return false; + } + InferenceEngine::Blob::Ptr input_blob = + engine_->getRequest()->GetBlob(input_name); + + InferenceEngine::SizeVector blob_size = input_blob->getTensorDesc().getDims(); + const int width = blob_size[3]; + const int height = blob_size[2]; + const int channels = blob_size[1]; + float* blob_data = input_blob->buffer().as(); + + int dx = 0; + int dy = 0; + int srcw = 0; + int srch = 0; + + int IH = height; + int IW = width; + + cv::Mat image = orig_image.clone(); + cv::cvtColor(image, image, cv::COLOR_BGR2RGB); + + image.convertTo(image, CV_32F, 1.0/255.0, 0); + srcw = image.size().width; + srch = image.size().height; + + cv::Mat resizedImg (IH, IW, CV_32FC3); + resizedImg = cv::Scalar(0.5, 0.5, 0.5); + int imw = image.size().width; + int imh = image.size().height; + float resize_ratio = (float)IH / (float)std::max(imw, imh); + cv::resize(image, image, cv::Size(imw*resize_ratio, imh*resize_ratio)); + + int new_w = imw; + int new_h = imh; + if (((float)IW/imw) < ((float)IH/imh)) { + new_w = IW; + new_h = (imh * IW)/imw; + } else { + new_h = IH; + new_w = (imw * IW)/imh; + } + dx = (IW-new_w)/2; + dy = (IH-new_h)/2; + + imh = image.size().height; + imw = image.size().width; + + for(int row = 0; row < imh; row ++) + { + for(int col = 0; col < imw; col ++) + { + for(int ch = 0; ch < 3; ch ++) + { + resizedImg.at(dy + row, dx + col)[ch] = image.at(row, col)[ch]; + } + } + } + + for (int c = 0; c < channels; c++) + { + for (int h = 0; h < height; h++) + { + for (int w = 0; w < width; w++) + { + blob_data[c * width * height + h*width + w] = resizedImg.at(h, w)[c]; + } + } + } + + ROI_.x = dx; + ROI_.y = dy; + ROI_.width = srcw; + ROI_.height = srch; + + enqueued_frames_ += 1; + return true; +} + +bool dynamic_vino_lib::ObjectDetectionYolov2voc::submitRequest() { + return dynamic_vino_lib::BaseInference::submitRequest(); +} + +bool dynamic_vino_lib::ObjectDetectionYolov2voc::fetchResults() +{ + bool can_fetch = dynamic_vino_lib::BaseInference::fetchResults(); + + if (!can_fetch) return false; + bool found_result = false; + results_.clear(); + + InferenceEngine::InferRequest::Ptr request = getEngine()->getRequest(); + + std::string output = valid_model_->getOutputName(); + std::vector& labels = valid_model_->getLabels(); + + const float* detections = request->GetBlob(output)->buffer().as::value_type *>(); + InferenceEngine::CNNLayerPtr layer = valid_model_->getLayer(); + InferenceEngine::InputInfo::Ptr input_info = valid_model_->getInputInfo(); + + int input_height = input_info->getTensorDesc().getDims()[2]; + int input_width = input_info->getTensorDesc().getDims()[3]; + + // --------------------------- Validating output parameters ------------------------------------- + if (layer->type != "RegionYolo") + throw std::runtime_error("Invalid output type: " + layer->type + ". RegionYolo expected"); + + // --------------------------- Extracting layer parameters ------------------------------------- + const int num = layer->GetParamAsInt("num"); + const int coords = layer->GetParamAsInt("coords"); + const int classes = layer->GetParamAsInt("classes"); + + const int out_blob_h = layer->input()->dims[0]; + + std::vector anchors = { + 0.572730, 0.677385, + 1.874460, 2.062530, + 3.338430, 5.474340, + 7.882820, 3.527780, + 9.770520, 9.168280 + }; + float threshold = 0.5; + auto side = out_blob_h; + + auto side_square = side * side; + + // --------------------------- Parsing YOLO Region output ------------------------------------- + for (int i = 0; i < side_square; ++i) { + int row = i / side; + int col = i % side; + + for (int n = 0; n < num; ++n) { + int obj_index = getEntryIndex(side, coords, classes, n * side * side + i, coords); + int box_index = getEntryIndex(side, coords, classes, n * side * side + i, 0); + + float scale = detections[obj_index]; + + if (scale < threshold) + continue; + + float x = (col + detections[box_index + 0 * side_square]) / side * input_width; + float y = (row + detections[box_index + 1 * side_square]) / side * input_height; + float height = std::exp(detections[box_index + 3 * side_square]) * anchors[2 * n + 1] / side * input_height; + float width = std::exp(detections[box_index + 2 * side_square]) * anchors[2 * n] / side * input_width; + + for (int j = 0; j < classes; ++j) { + + int class_index = getEntryIndex(side, coords, classes, n * side_square + i, coords + 1 + j); + + float prob = scale * detections[class_index]; + if (prob < threshold) + continue; + + float x_min = x - width/2; + float y_min = y - height/2; + + float x_min_resized = x_min / input_width * ROI_.width; + float y_min_resized = y_min / input_height * ROI_.height; + float width_resized = width / input_width * ROI_.width; + float height_resized = height / input_height * ROI_.height; + + cv::Rect r(x_min_resized, y_min_resized, width_resized, height_resized); + Result result(r); + result.label_ = j; + result.label_ = labels[j]; + + result.confidence_ = prob; + found_result = true; + raw_results_.emplace_back(result); + } + } + } + + std::sort(raw_results_.begin(), raw_results_.end()); + for (unsigned int i = 0; i < raw_results_.size(); ++i) { + if (raw_results_[i].confidence_ == 0) + continue; + for (unsigned int j = i + 1; j < raw_results_.size(); ++j) + if (IntersectionOverUnion(raw_results_[i], raw_results_[j]) >= 0.45) + raw_results_[j].confidence_ = 0; + } + + for (auto &raw_result : raw_results_) { + if (raw_result.getConfidence() < 0.5) + continue; + + results_.push_back(raw_result); + } + + if (!found_result) { + results_.clear(); + } + raw_results_.clear(); + + return true; +} + +double dynamic_vino_lib::ObjectDetectionYolov2voc::IntersectionOverUnion(const Result &box_1, const Result &box_2) { + int xmax_1 = box_1.getLocation().x + box_1.getLocation().width; + int xmin_1 = box_1.getLocation().x; + int xmax_2 = box_2.getLocation().x + box_2.getLocation().width; + int xmin_2 = box_2.getLocation().x; + + int ymax_1 = box_1.getLocation().y + box_1.getLocation().height; + int ymin_1 = box_1.getLocation().y; + int ymax_2 = box_2.getLocation().y + box_2.getLocation().height; + int ymin_2 = box_2.getLocation().y; + + double width_of_overlap_area = fmin(xmax_1 , xmax_2) - fmax(xmin_1, xmin_2); + double height_of_overlap_area = fmin(ymax_1, ymax_2) - fmax(ymin_1, ymin_2); + double area_of_overlap; + if (width_of_overlap_area < 0 || height_of_overlap_area < 0) + area_of_overlap = 0; + else + area_of_overlap = width_of_overlap_area * height_of_overlap_area; + + double box_1_area = (ymax_1 - ymin_1) * (xmax_1 - xmin_1); + double box_2_area = (ymax_2 - ymin_2) * (xmax_2 - xmin_2); + double area_of_union = box_1_area + box_2_area - area_of_overlap; + + return area_of_overlap / area_of_union; +} + +int dynamic_vino_lib::ObjectDetectionYolov2voc::getEntryIndex(int side, int lcoords, int lclasses, int location, int entry) { + int n = location / (side * side); + int loc = location % (side * side); + return n * side * side * (lcoords + lclasses + 1) + entry * side * side + loc; +} + +const int dynamic_vino_lib::ObjectDetectionYolov2voc::getResultsLength() const { + return static_cast(results_.size()); +} + +const dynamic_vino_lib::Result* +dynamic_vino_lib::ObjectDetectionYolov2voc::getLocationResult(int idx) const { + return &(results_[idx]); +} + +const std::string dynamic_vino_lib::ObjectDetectionYolov2voc::getName() const { + return valid_model_->getModelName(); +} + +const void dynamic_vino_lib::ObjectDetectionYolov2voc::observeOutput( + const std::shared_ptr& output) { + if (output != nullptr) { + output->accept(results_); + } +} diff --git a/dynamic_vino_lib/src/models/base_model.cpp b/dynamic_vino_lib/src/models/base_model.cpp index 08153777..1721f140 100644 --- a/dynamic_vino_lib/src/models/base_model.cpp +++ b/dynamic_vino_lib/src/models/base_model.cpp @@ -39,29 +39,35 @@ Models::BaseModel::BaseModel(const std::string& model_loc, int input_num, { throw std::logic_error("model file name is empty!"); } + net_reader_ = std::make_shared(); } void Models::BaseModel::modelInit() { - slog::info << "Loading network files" << slog::endl; + slog::info << "model location is " << model_loc_ << slog::endl; + slog::info << "Loading network file" << slog::endl; // Read network model net_reader_->ReadNetwork(model_loc_); + // Set batch size to given max_batch_size_ slog::info << "Batch size is set to " << max_batch_size_ << slog::endl; net_reader_->getNetwork().setBatchSize(max_batch_size_); + // Extract model name and load it's weights // remove extension size_t last_index = model_loc_.find_last_of("."); std::string raw_name = model_loc_.substr(0, last_index); std::string bin_file_name = raw_name + ".bin"; net_reader_->ReadWeights(bin_file_name); + // Read labels (if any) std::string label_file_name = raw_name + ".labels"; std::ifstream input_file(label_file_name); std::copy(std::istream_iterator(input_file), std::istream_iterator(), std::back_inserter(labels_)); checkNetworkSize(input_num_, output_num_, net_reader_); + checkLayerProperty(net_reader_); setLayerProperty(net_reader_); } @@ -87,5 +93,10 @@ void Models::BaseModel::checkNetworkSize( throw std::logic_error(getModelName() + "network should have only one output"); } - // InferenceEngine::DataPtr& output_data_ptr = output_info.begin()->second; } + +Models::ObjectDetectionModel::ObjectDetectionModel(const std::string& model_loc, + int input_num, int output_num, + int max_batch_size) + : BaseModel(model_loc, input_num, output_num, max_batch_size){} + diff --git a/dynamic_vino_lib/src/models/object_detection_model.cpp b/dynamic_vino_lib/src/models/object_detection_ssd_model.cpp similarity index 89% rename from dynamic_vino_lib/src/models/object_detection_model.cpp rename to dynamic_vino_lib/src/models/object_detection_ssd_model.cpp index e16ddc20..e0df0cdc 100644 --- a/dynamic_vino_lib/src/models/object_detection_model.cpp +++ b/dynamic_vino_lib/src/models/object_detection_ssd_model.cpp @@ -14,19 +14,21 @@ * limitations under the License. */ /** - * @brief a header file with declaration of ObjectDetectionModel class + * @brief a header file with declaration of ObjectDetectionSSDModel class * @file object_detection_model.cpp */ #include -#include "dynamic_vino_lib/models/object_detection_model.h" +#include "dynamic_vino_lib/models/object_detection_ssd_model.h" #include "dynamic_vino_lib/slog.h" // Validated Object Detection Network -Models::ObjectDetectionModel::ObjectDetectionModel(const std::string& model_loc, +Models::ObjectDetectionSSDModel::ObjectDetectionSSDModel(const std::string& model_loc, int input_num, int output_num, int max_batch_size) - : BaseModel(model_loc, input_num, output_num, max_batch_size){} -void Models::ObjectDetectionModel::setLayerProperty( + : ObjectDetectionModel(model_loc, input_num, output_num, max_batch_size){} + +void Models::ObjectDetectionSSDModel::setLayerProperty( InferenceEngine::CNNNetReader::Ptr net_reader) { + // set input property InferenceEngine::InputsDataMap input_info_map( net_reader->getNetwork().getInputsInfo()); @@ -49,8 +51,10 @@ void Models::ObjectDetectionModel::setLayerProperty( input_ = input_info_map.begin()->first; output_ = output_info_map.begin()->first; } -void Models::ObjectDetectionModel::checkLayerProperty( + +void Models::ObjectDetectionSSDModel::checkLayerProperty( const InferenceEngine::CNNNetReader::Ptr& net_reader) { + slog::info << "Checking Object Detection outputs" << slog::endl; InferenceEngine::OutputsDataMap output_info_map( net_reader->getNetwork().getOutputsInfo()); @@ -100,6 +104,7 @@ void Models::ObjectDetectionModel::checkLayerProperty( std::to_string(output_dims.size())); } } -const std::string Models::ObjectDetectionModel::getModelName() const { + +const std::string Models::ObjectDetectionSSDModel::getModelName() const { return "Object Detection"; } diff --git a/dynamic_vino_lib/src/models/object_detection_yolov2voc_model.cpp b/dynamic_vino_lib/src/models/object_detection_yolov2voc_model.cpp new file mode 100644 index 00000000..ce8c682f --- /dev/null +++ b/dynamic_vino_lib/src/models/object_detection_yolov2voc_model.cpp @@ -0,0 +1,110 @@ +/* + * Copyright (c) 2018 Intel Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * @brief a header file with declaration of ObjectDetectionYolov2vocModel class + * @file object_detection_model.cpp + */ +#include +#include "dynamic_vino_lib/models/object_detection_yolov2voc_model.h" +#include "dynamic_vino_lib/slog.h" +// Validated Object Detection Network +Models::ObjectDetectionYolov2vocModel::ObjectDetectionYolov2vocModel(const std::string& model_loc, + int input_num, int output_num, + int max_batch_size) + : ObjectDetectionModel(model_loc, input_num, output_num, max_batch_size){ +} + +void Models::ObjectDetectionYolov2vocModel::setLayerProperty( + InferenceEngine::CNNNetReader::Ptr net_reader) { + // set input property + InferenceEngine::InputsDataMap input_info_map( + net_reader->getNetwork().getInputsInfo()); + if (input_info_map.size() != 1) { + throw std::logic_error("This sample accepts networks having only one input"); + } + input_info_ = input_info_map.begin()->second; + input_info_->setPrecision(InferenceEngine::Precision::FP32); + input_info_->setLayout(InferenceEngine::Layout::NCHW); + + // set output property + InferenceEngine::OutputsDataMap output_info_map( + net_reader->getNetwork().getOutputsInfo()); + if (output_info_map.size() != 1) { + throw std::logic_error("This sample accepts networks having only one output"); + } + InferenceEngine::DataPtr& output_data_ptr = output_info_map.begin()->second; + output_data_ptr->setPrecision(InferenceEngine::Precision::FP32); + //output_data_ptr->setLayout(InferenceEngine::Layout::NCHW); + + // set input and output layer name + input_ = input_info_map.begin()->first; + output_ = output_info_map.begin()->first; +} + +void Models::ObjectDetectionYolov2vocModel::checkLayerProperty( + const InferenceEngine::CNNNetReader::Ptr& net_reader) { + slog::info << "Checking Object Detection outputs" << slog::endl; + InferenceEngine::OutputsDataMap output_info_map( + net_reader->getNetwork().getOutputsInfo()); + slog::info << "Checking Object Detection outputs ..." << slog::endl; + if (output_info_map.size() != 1) { + throw std::logic_error("This sample accepts networks having only one output"); + } + InferenceEngine::DataPtr& output_data_ptr = output_info_map.begin()->second; + output_ = output_info_map.begin()->first; + slog::info << "Checking Object Detection output ... Name=" << output_ << slog::endl; + + output_layer_ = net_reader->getNetwork().getLayerByName(output_.c_str()); + slog::info << "Checking Object Detection num_classes" << slog::endl; + if (output_layer_->params.find("classes") == output_layer_->params.end()) { + throw std::logic_error("Object Detection network output layer (" + output_ + + ") should have num_classes integer attribute"); + } + // class number should be equal to size of label vector + // if network has default "background" class, fake is used + const unsigned int num_classes = output_layer_->GetParamAsInt("classes"); + + slog::info << "Checking Object Detection output ... num_classes=" << num_classes << slog::endl; + if (getLabels().size() != num_classes) { + + if (getLabels().size() == (num_classes - 1)) { + getLabels().insert(getLabels().begin(), "fake"); + } else { + getLabels().clear(); + } + } + + const InferenceEngine::SizeVector output_dims = + output_data_ptr->getTensorDesc().getDims(); + max_proposal_count_ = static_cast(output_dims[2]); + object_size_ = static_cast(output_dims[3]); + + if (object_size_ != 33) { + throw std::logic_error( + "Object Detection network output layer should have 33 as a last aaa" + "dimension"); + } + if (output_dims.size() != 2) { + throw std::logic_error( + "Object Detection network output dimensions not compatible shoulld be 2, " + "but was " + + std::to_string(output_dims.size())); + } +} + +const std::string Models::ObjectDetectionYolov2vocModel::getModelName() const { + return "Object Detection"; +} diff --git a/dynamic_vino_lib/src/outputs/image_window_output.cpp b/dynamic_vino_lib/src/outputs/image_window_output.cpp index 3a6f2df1..472b6ee8 100644 --- a/dynamic_vino_lib/src/outputs/image_window_output.cpp +++ b/dynamic_vino_lib/src/outputs/image_window_output.cpp @@ -51,7 +51,6 @@ void Outputs::ImageWindowOutput::feedFrame(const cv::Mat& frame) void Outputs::ImageWindowOutput::accept( const std::vector& results) { - // std::cout<<"call face"<& results) { - // std::cout<<"call"<feedFrame(frame_); } - // auto t0 = std::chrono::high_resolution_clock::now(); + for (auto pos = next_.equal_range(input_device_name_); pos.first != pos.second; ++pos.first) { std::string detection_name = pos.first->second; auto detection_ptr = name_to_detection_map_[detection_name]; + detection_ptr->enqueue(frame_, cv::Rect(width_ / 2, height_ / 2, width_, height_)); increaseInferenceCounter(); detection_ptr->submitRequest(); } + std::unique_lock lock(counter_mutex_); cv_.wait(lock, [this]() { return this->counter_ == 0; }); - // auto t1 = std::chrono::high_resolution_clock::now(); - // typedef std::chrono::duration> ms; for (auto& pair : name_to_output_map_) { @@ -235,6 +235,7 @@ void Pipeline::setCallback() for (auto& pair : name_to_detection_map_) { std::string detection_name = pair.first; + std::function callb; callb = [detection_name, this]() { @@ -244,10 +245,12 @@ void Pipeline::setCallback() pair.second->getEngine()->getRequest()->SetCompletionCallback(callb); } } + void Pipeline::callback(const std::string& detection_name) { auto detection_ptr = name_to_detection_map_[detection_name]; detection_ptr->fetchResults(); + // set output for (auto pos = next_.equal_range(detection_name); pos.first != pos.second; ++pos.first) @@ -293,11 +296,13 @@ void Pipeline::initInferenceCounter() counter_ = 0; cv_.notify_all(); } + void Pipeline::increaseInferenceCounter() { std::lock_guard lk(counter_mutex_); ++counter_; } + void Pipeline::decreaseInferenceCounter() { std::lock_guard lk(counter_mutex_); diff --git a/dynamic_vino_lib/src/pipeline_manager.cpp b/dynamic_vino_lib/src/pipeline_manager.cpp index 9e00e506..08c16a72 100644 --- a/dynamic_vino_lib/src/pipeline_manager.cpp +++ b/dynamic_vino_lib/src/pipeline_manager.cpp @@ -38,6 +38,8 @@ #include "dynamic_vino_lib/models/emotion_detection_model.h" #include "dynamic_vino_lib/models/face_detection_model.h" #include "dynamic_vino_lib/models/head_pose_detection_model.h" +#include "dynamic_vino_lib/models/object_detection_ssd_model.h" +#include "dynamic_vino_lib/models/object_detection_yolov2voc_model.h" #include "dynamic_vino_lib/outputs/image_window_output.h" #include "dynamic_vino_lib/outputs/ros_topic_output.h" #include "dynamic_vino_lib/outputs/rviz_output.h" @@ -94,7 +96,6 @@ std::shared_ptr PipelineManager::createPipeline( slog::info << "One Pipeline Created!" << slog::endl; pipeline->printPipeline(); return pipeline; - } std::map> @@ -163,6 +164,8 @@ PipelineManager::parseInference( auto pcommon = Params::ParamManager::getInstance().getCommon(); std::string FLAGS_l = pcommon.custom_cpu_library; std::string FLAGS_c = pcommon.custom_cldnn_library; + //std::string FLAGS_c = "/opt/intel/computer_vision_sdk/deployment_tools/inference_engine/lib/ubuntu_16.04/intel64/cldnn_global_custom_kernels/cldnn_global_custom_kernels.xml"; + bool FLAGS_pc = pcommon.enable_performance_count; std::map> @@ -280,13 +283,30 @@ std::shared_ptr PipelineManager::createObjectDetection( const Params::ParamManager::InferenceParams & infer) { - auto object_detection_model = - std::make_shared(infer.model, 1, 1, 1); + std::shared_ptr object_detection_model; + std::shared_ptr object_inference_ptr; + + if (infer.model_type == kInferTpye_ObjectDetectionTypeSSD) + { + object_detection_model = + std::make_shared(infer.model, 1, 1, 1); + + object_inference_ptr = std::make_shared( + 0.5); // To-do theshold configuration + } + + if (infer.model_type == kInferTpye_ObjectDetectionTypeYolov2voc) + { + object_detection_model = + std::make_shared(infer.model, 1, 1, 1); + + object_inference_ptr = std::make_shared( + 0.5); // To-do theshold configuration + } + object_detection_model->modelInit(); auto object_detection_engine = std::make_shared( plugins_for_devices_[infer.engine], object_detection_model); - auto object_inference_ptr = std::make_shared( - infer.enable_roi_constraint, infer.confidence_threshold); // To-do theshold configuration object_inference_ptr->loadNetwork(object_detection_model); object_inference_ptr->loadEngine(object_detection_engine); diff --git a/sample/CMakeLists.txt b/sample/CMakeLists.txt index d5712f5f..cabe72b0 100644 --- a/sample/CMakeLists.txt +++ b/sample/CMakeLists.txt @@ -66,7 +66,7 @@ include_directories ( ${InferenceEngine_INCLUDE_DIRS}/../build/samples/thirdparty/gflags/include ${InferenceEngine_INCLUDE_DIRS}/../samples ${InferenceEngine_DIR}/../src - ${dynamic_vino_lib_INCLUDE_DIRS} + #${dynamic_vino_lib_INCLUDE_DIRS} ${vino_param_lib_INCLUDE_DIRS} ) diff --git a/sample/src/pipeline_with_params.cpp b/sample/src/pipeline_with_params.cpp index 475d6cb3..b23e160f 100644 --- a/sample/src/pipeline_with_params.cpp +++ b/sample/src/pipeline_with_params.cpp @@ -48,6 +48,8 @@ #include "dynamic_vino_lib/inferences/emotions_detection.h" #include "dynamic_vino_lib/inferences/face_detection.h" #include "dynamic_vino_lib/inferences/head_pose_detection.h" +#include "dynamic_vino_lib/inferences/object_detection_yolov2_voc.h" +#include "dynamic_vino_lib/inferences/object_detection_ssd.h" #include "dynamic_vino_lib/inputs/realsense_camera_topic.h" #include "dynamic_vino_lib/outputs/image_window_output.h" #include "dynamic_vino_lib/outputs/ros_topic_output.h" @@ -88,7 +90,7 @@ int main(int argc, char** argv) signal(SIGINT, signalHandler); std::string FLAGS_config; - ros::param::param("~param_file", FLAGS_config, "/param/pipeline_people.yaml"); + ros::param::param("~param_file", FLAGS_config, "/param/pipeline_object_oss.yaml"); slog::info << "FLAGS_config=" << FLAGS_config << slog::endl; try diff --git a/script/environment_setup.sh b/script/environment_setup.sh old mode 100755 new mode 100644 diff --git a/script/environment_setup_binary.sh b/script/environment_setup_binary.sh old mode 100755 new mode 100644 diff --git a/vino_launch/param/pipeline_object.yaml b/vino_launch/param/pipeline_object.yaml index d4f5fffe..322bf1f7 100644 --- a/vino_launch/param/pipeline_object.yaml +++ b/vino_launch/param/pipeline_object.yaml @@ -4,6 +4,9 @@ Pipelines: infers: - name: ObjectDetection model: /opt/intel/computer_vision_sdk/deployment_tools/model_downloader/object_detection/common/mobilenet-ssd/caffe/output/FP16/mobilenet-ssd.xml + #model: + model_type: SSD + #model_type: yolov2-voc engine: GPU label: to/be/set/xxx.labels batch: 16 diff --git a/vino_launch/param/pipeline_object_oss.yaml b/vino_launch/param/pipeline_object_oss.yaml index 9b656fc1..a83b710f 100644 --- a/vino_launch/param/pipeline_object_oss.yaml +++ b/vino_launch/param/pipeline_object_oss.yaml @@ -3,7 +3,10 @@ Pipelines: inputs: [RealSenseCamera] infers: - name: ObjectDetection - model: /opt/openvino_toolkit/open_model_zoo/model_downloader/object_detection/common/mobilenet-ssd/caffe/output/FP32/mobilenet-ssd.xml + #model: /opt/openvino_toolkit/open_model_zoo/model_downloader/object_detection/common/mobilenet-ssd/caffe/output/FP32/mobilenet-ssd.xml + model: /home/intel/Downloads/yolov2-voc.xml + #model_type: SSD + model_type: yolov2-voc engine: CPU label: to/be/set/xxx.labels batch: 16 @@ -16,7 +19,7 @@ Pipelines: right: [ImageWindow] - left: ObjectDetection right: [RosTopic] - - left: ObjectDetection - right: [RViz] + # - left: ObjectDetection + # right: [RViz] OpenvinoCommon: diff --git a/vino_param_lib/include/vino_param_lib/param_manager.h b/vino_param_lib/include/vino_param_lib/param_manager.h index 1f0a5cc3..76c0b89f 100644 --- a/vino_param_lib/include/vino_param_lib/param_manager.h +++ b/vino_param_lib/include/vino_param_lib/param_manager.h @@ -66,6 +66,7 @@ class ParamManager // singleton std::string name; std::string engine; std::string model; + std::string model_type; std::string label; int batch; float confidence_threshold = 0.5; diff --git a/vino_param_lib/src/param_manager.cpp b/vino_param_lib/src/param_manager.cpp index 3d5d2bb6..e4c39bf4 100644 --- a/vino_param_lib/src/param_manager.cpp +++ b/vino_param_lib/src/param_manager.cpp @@ -100,6 +100,7 @@ void operator>>(const YAML::Node& node, ParamManager::InferenceParams& infer) { YAML_PARSE(node, "name", infer.name) YAML_PARSE(node, "model", infer.model) + YAML_PARSE(node, "model_type", infer.model_type) YAML_PARSE(node, "engine", infer.engine) YAML_PARSE(node, "label", infer.label) YAML_PARSE(node, "batch", infer.batch) From 2fc6785fdf9837b144d0064f39e00762e39a79e9 Mon Sep 17 00:00:00 2001 From: Chao Li Date: Tue, 21 May 2019 17:31:37 +0800 Subject: [PATCH 04/63] result filter enabled --- dynamic_vino_lib/CMakeLists.txt | 2 + .../inferences/age_gender_detection.h | 7 +- .../dynamic_vino_lib/inferences/base_filter.h | 184 +++++++++++++++ .../inferences/base_inference.h | 40 +--- .../inferences/emotions_detection.h | 6 +- .../inferences/face_detection.h | 6 +- .../inferences/head_pose_detection.h | 6 +- .../inferences/inference_manager.h | 104 +++++++++ .../inferences/object_detection.h | 109 ++++----- .../inferences/object_detection_ssd.h | 10 +- .../inferences/object_detection_yolov2_voc.h | 9 +- .../inferences/object_segmentation.h | 6 +- .../inferences/person_reidentification.h | 6 +- .../include/dynamic_vino_lib/pipeline.h | 5 + .../dynamic_vino_lib/pipeline_manager.h | 26 +-- .../dynamic_vino_lib/pipeline_params.h | 12 +- .../src/inferences/age_gender_detection.cpp | 18 +- .../src/inferences/base_filter.cpp | 214 ++++++++++++++++++ .../src/inferences/base_inference.cpp | 4 - .../src/inferences/emotions_detection.cpp | 18 +- .../src/inferences/face_detection.cpp | 17 +- .../src/inferences/head_pose_detection.cpp | 17 +- .../src/inferences/object_detection.cpp | 130 +++++------ .../src/inferences/object_detection_ssd.cpp | 28 ++- .../object_detection_yolov2_voc.cpp | 25 +- .../src/inferences/object_segmentation.cpp | 17 +- .../inferences/person_reidentification.cpp | 18 +- .../src/inputs/standard_camera.cpp | 12 +- dynamic_vino_lib/src/inputs/video_input.cpp | 8 +- dynamic_vino_lib/src/pipeline.cpp | 48 ++-- dynamic_vino_lib/src/pipeline_manager.cpp | 22 +- dynamic_vino_lib/src/pipeline_params.cpp | 19 +- vino_launch/param/pipeline_object_oss.yaml | 2 +- .../include/vino_param_lib/param_manager.h | 62 ++--- vino_param_lib/src/param_manager.cpp | 93 +++++--- 35 files changed, 992 insertions(+), 318 deletions(-) create mode 100644 dynamic_vino_lib/include/dynamic_vino_lib/inferences/base_filter.h create mode 100644 dynamic_vino_lib/include/dynamic_vino_lib/inferences/inference_manager.h create mode 100644 dynamic_vino_lib/src/inferences/base_filter.cpp diff --git a/dynamic_vino_lib/CMakeLists.txt b/dynamic_vino_lib/CMakeLists.txt index 47b0877f..3a862958 100644 --- a/dynamic_vino_lib/CMakeLists.txt +++ b/dynamic_vino_lib/CMakeLists.txt @@ -141,11 +141,13 @@ add_library(${PROJECT_NAME} SHARED src/pipeline_params.cpp src/pipeline_manager.cpp src/engines/engine.cpp + src/inferences/base_filter.cpp src/inferences/base_inference.cpp src/inferences/emotions_detection.cpp src/inferences/age_gender_detection.cpp src/inferences/face_detection.cpp src/inferences/head_pose_detection.cpp + src/inferences/object_detection.cpp src/inferences/object_detection_ssd.cpp src/inferences/object_detection_yolov2_voc.cpp src/inferences/object_segmentation.cpp diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/age_gender_detection.h b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/age_gender_detection.h index 6c1b2749..f20f5119 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/age_gender_detection.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/age_gender_detection.h @@ -125,7 +125,12 @@ class AgeGenderDetection : public BaseInference * or ROS topic. */ const void observeOutput( - const std::shared_ptr& output) override; + const std::shared_ptr& output, + const std::string filter_conditions) override; + + const std::vector getFilteredROIs( + const std::string filter_conditions) const override; + private: std::shared_ptr valid_model_; diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/base_filter.h b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/base_filter.h new file mode 100644 index 00000000..4c597c62 --- /dev/null +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/base_filter.h @@ -0,0 +1,184 @@ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @brief A header file with declaration for BaseFilter Class + * @file base_filter.hpp + */ +#ifndef DYNAMIC_VINO_LIB__INFERENCES__BASE_FILTER_HPP_ +#define DYNAMIC_VINO_LIB__INFERENCES__BASE_FILTER_HPP_ + +#include +#include +#include +#include +#include "dynamic_vino_lib/inferences/base_inference.h" + +namespace dynamic_vino_lib +{ + +/** + * @class BaseFilter + * @brief Base class for result filter. + */ +class BaseFilter +{ +public: + BaseFilter(); + /** + * @brief Initiate a result filter. + */ + virtual void init() = 0; + + /** + * @brief Accept the filter conditions for filtering. + * @param[in] Filter conditions. + */ + void acceptFilterConditions(const std::string &); + + /** + * @brief Decide whether the input string is a relational operator or not. + * @param[in] A string to be decided. + * @return True if the input string is a relational operator, false if not. + */ + bool isRelationOperator(const std::string &); + + /** + * @brief Decide whether the input string is a logic operator or not. + * @param[in] A string to be decided. + * @return True if the input string is a logic operator, false if not. + */ + bool isLogicOperator(const std::string &); + + /** + * @brief Decide whether the an operator has a higher priority than anthor. + * @param[in] The two operators. + * @return True if the first operator has higher priority, false if not. + */ + bool isPriorTo(const std::string &, const std::string &); + + /** + * @brief Convert the input bool variable to a string type. + * @param[in] A bool type to be converted. + * @return A converted string result. + */ + std::string boolToStr(bool); + + /** + * @brief Convert the input string variable to a bool type. + * @param[in] A string type to be converted. + * @return A converted bool result. + */ + bool strToBool(const std::string &); + + /** + * @brief Get the filter conditions in the suffix order. + * @return A vector with suffix-order filter conditions. + */ + const std::vector & getSuffixConditions() const; + + /** + * @brief Do logic operation with the given bool values and the operator. + * @param[in] A bool string, an logic operator, the other bool string. + * @return The logic operation result. + */ + bool logicOperation(const std::string &, const std::string &, const std::string &); + + /** + * @brief Compare two strings with a given relational operator. + * @param[in] A string, an relational operator, the other string. + * @return True if valid, false if not. + */ + static bool stringCompare(const std::string &, const std::string &, const std::string &); + + /** + * @brief Compare two floats with a given relational operator. + * @param[in] A float number, an relational operator, the other float number. + * @return True if valid, false if not. + */ + static bool floatCompare(float, const std::string &, float); + + /** + * @brief Convert a string into a float number. + * @param[in] A string to be converted. + * @return The converted float number, 0 if string is invalid. + */ + static float stringToFloat(const std::string &); + + /** + * @brief A macro to decide whether a given result satisfies the filter condition. + * @param[in] A key to function mapping, a given result. + * @return True if valid, false if not. + */ + #define ISVALIDRESULT(key_to_function, result) \ + { \ + std::vector suffix_conditons = getSuffixConditions(); \ + std::stack result_stack; \ + for (auto elem : suffix_conditons) { \ + if (!isRelationOperator(elem) && !isLogicOperator(elem)) { \ + result_stack.push(elem); \ + } \ + else { \ + try { \ + std::string str1 = result_stack.top(); \ + result_stack.pop(); \ + std::string str2 = result_stack.top(); \ + result_stack.pop(); \ + if (key_to_function.count(str2)) { \ + result_stack.push(boolToStr(key_to_function[str2](result, elem, str1))); \ + } \ + else { \ + result_stack.push(boolToStr(logicOperation(str1, elem, str2))); \ + } \ + } \ + catch (...) { \ + slog::err << "Invalid filter conditions format!" << slog::endl; \ + } \ + } \ + } \ + if (result_stack.empty()) { \ + return true; \ + } \ + return strToBool(result_stack.top()); \ + } + +private: + /** + * @brief Parse the filter conditions and stores it into a vector. + * @param[in] A string form filter conditions. + * @return The vector form filter conditions. + */ + std::vector split(const std::string & filter_conditions); + + /** + * @brief Convert the infix expression into suffix expression. + * @param[in] The infix form filter conditions. + */ + void infixToSuffix(std::vector&infix_conditions); + + /** + * @brief Strip the extra space in a string. + * @param[in] A string to be striped. + * @return The striped string. + */ + std::string strip(const std::string & str); + + std::string striped_conditions_ = ""; + std::vector suffix_conditons_; + std::vector relation_operators_ = {"==", "!=", "<=", ">=", "<", ">"}; + std::vector logic_operators_ = {"&&", "||"}; +}; +} // namespace dynamic_vino_lib + +#endif // DYNAMIC_VINO_LIB__INFERENCES__BASE_FILTER_HPP_ \ No newline at end of file diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/base_inference.h b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/base_inference.h index 57acffeb..ef4fc641 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/base_inference.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/base_inference.h @@ -137,7 +137,8 @@ class BaseInference virtual bool submitRequest(); virtual const void observeOutput( - const std::shared_ptr& output) = 0; + const std::shared_ptr& output, + const std::string filter_conditions) = 0; /** * @brief This function will fetch the results of the previous inference and @@ -161,6 +162,16 @@ class BaseInference * @return The name of the Inference instance. */ virtual const std::string getName() const = 0; + /** + * @brief Get the max batch size for one inference. + */ + inline int getMaxBatchSize() + { + return max_batch_size_; + } + + virtual const std::vector getFilteredROIs( + const std::string filter_conditions) const = 0; protected: /** @@ -200,33 +211,6 @@ class BaseInference int max_batch_size_ = 1; bool results_fetched_ = false; }; - -class ObjectDetectionResult : public Result { - public: - friend class ObjectDetection; - explicit ObjectDetectionResult(const cv::Rect& location); - std::string getLabel() const { return label_; } - /** - * @brief Get the confidence that the detected area is a face. - * @return The confidence value. - */ - float getConfidence() const { return confidence_; } - bool operator<(const ObjectDetectionResult &s2) const - { - return this->confidence_ > s2.confidence_; - } - - std::string label_ = ""; - float confidence_ = -1; -}; - -class ObjectDetection : public BaseInference -{ - public: - ObjectDetection() {}; - virtual void loadNetwork(std::shared_ptr) = 0; -}; - } // namespace dynamic_vino_lib #endif // DYNAMIC_VINO_LIB_INFERENCES_BASE_INFERENCE_H diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/emotions_detection.h b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/emotions_detection.h index bb4e1f29..e753a7e6 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/emotions_detection.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/emotions_detection.h @@ -116,8 +116,12 @@ class EmotionsDetection : public BaseInference * or ROS topic. */ const void observeOutput( - const std::shared_ptr& output) override; + const std::shared_ptr& output, + const std::string filter_conditions) override; + const std::vector getFilteredROIs( + const std::string filter_conditions) const override; + private: std::shared_ptr valid_model_; std::vector results_; diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/face_detection.h b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/face_detection.h index 7addb3a5..83b5ac56 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/face_detection.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/face_detection.h @@ -116,13 +116,17 @@ class FaceDetection : public BaseInference * @brief Show the observed detection result either through image window or ROS topic. */ - const void observeOutput(const std::shared_ptr& output); + const void observeOutput(const std::shared_ptr& output, + const std::string filter_conditions); /** * @brief Get the name of the Inference instance. * @return The name of the Inference instance. */ const std::string getName() const override; + const std::vector getFilteredROIs( + const std::string filter_conditions) const override; + private: std::shared_ptr valid_model_; std::vector results_; diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/head_pose_detection.h b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/head_pose_detection.h index 125d6207..1fdf4811 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/head_pose_detection.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/head_pose_detection.h @@ -129,7 +129,11 @@ class HeadPoseDetection : public BaseInference or ROS topic. */ const void observeOutput( - const std::shared_ptr& output) override; + const std::shared_ptr& output, + const std::string filter_conditions) override; + + const std::vector getFilteredROIs( + const std::string filter_conditions) const override; private: std::shared_ptr valid_model_; diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/inference_manager.h b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/inference_manager.h new file mode 100644 index 00000000..28991f90 --- /dev/null +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/inference_manager.h @@ -0,0 +1,104 @@ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @brief a header file with declaration of Inference Manager class + * @file inference_manager.hpp + */ +#ifndef DYNAMIC_VINO_LIB__INFERENCES__INFERENCE_MANAGER_HPP_ +#define DYNAMIC_VINO_LIB__INFERENCES__INFERENCE_MANAGER_HPP_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "dynamic_vino_lib/pipeline.hpp" + +/** + * @class InferenceManager + * @brief This class manages inference resources. + */ +class InferenceManager +{ +public: + /** + * @brief Get the singleton instance of InferenceManager class. + * The instance will be created when first call. + * @return The reference of InferenceManager instance. + */ + static InferenceManager & getInstance() + { + static InferenceManager manager_; + return manager_; + } + + std::shared_ptr createPipeline( + const Params::ParamManager::PipelineRawData & params); + void removePipeline(const std::string & name); + InferenceManager & updatePipeline( + const std::string & name, + const Params::ParamManager::PipelineRawData & params); + + void runAll(); + void stopAll(); + void joinAll(); + + enum PipelineState + { + PipelineState_ThreadNotCreated, + PipelineState_ThreadStopped, + PipelineState_ThreadRunning, + PipelineState_Error + }; + struct PipelineData + { + Params::ParamManager::PipelineRawData params; + std::shared_ptr pipeline; + std::vector> spin_nodes; + std::shared_ptr thread; + PipelineState state; + }; + +private: + InferenceManager() {} + InferenceManager(InferenceManager const &); + void operator=(InferenceManager const &); + void threadPipeline(const char * name); + std::map> + parseInputDevice(const Params::ParamManager::PipelineRawData & params); + std::map> parseOutput( + const Params::ParamManager::PipelineRawData & params); + std::map> + parseInference(const Params::ParamManager::PipelineRawData & params); + std::shared_ptr createFaceDetection( + const Params::ParamManager::InferenceParams & infer); + std::shared_ptr createAgeGenderRecognition( + const Params::ParamManager::InferenceParams & infer); + std::shared_ptr createEmotionRecognition( + const Params::ParamManager::InferenceParams & infer); + std::shared_ptr createHeadPoseEstimation( + const Params::ParamManager::InferenceParams & infer); + std::shared_ptr createObjectDetection( + const Params::ParamManager::InferenceParams & infer); + + std::map pipelines_; + std::map plugins_for_devices_; +}; + +#endif // DYNAMIC_VINO_LIB__INFERENCES__INFERENCE_MANAGER_HPP_ \ No newline at end of file diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/object_detection.h b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/object_detection.h index 6eb50efa..362f1001 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/object_detection.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/object_detection.h @@ -25,20 +25,18 @@ #include #include #include -#include "dynamic_vino_lib/models/object_detection_model.h" +#include #include "dynamic_vino_lib/engines/engine.h" #include "dynamic_vino_lib/inferences/base_inference.h" +#include "dynamic_vino_lib/inferences/base_filter.h" #include "inference_engine.hpp" #include "opencv2/opencv.hpp" // namespace namespace dynamic_vino_lib { -/** - * @class ObjectDetectionResult - * @brief Class for storing and processing face detection result. - */ + class ObjectDetectionResult : public Result { public: - friend class ObjectDetection; + ObjectDetectionResult(); explicit ObjectDetectionResult(const cv::Rect& location); std::string getLabel() const { return label_; } /** @@ -46,73 +44,76 @@ class ObjectDetectionResult : public Result { * @return The confidence value. */ float getConfidence() const { return confidence_; } - private: + bool operator<(const ObjectDetectionResult &s2) const + { + return this->confidence_ > s2.confidence_; + } + std::string label_ = ""; float confidence_ = -1; }; + /** - * @class ObjectDetection - * @brief Class to load face detection model and perform face detection. + * @class ObjectDetectionResultFilter + * @brief Class for object detection result filter. */ -class ObjectDetection : public BaseInference { - public: +class ObjectDetectionResultFilter : public BaseFilter +{ +public: using Result = dynamic_vino_lib::ObjectDetectionResult; - explicit ObjectDetection(bool,double); - ~ObjectDetection() override; - /** - * @brief Load the face detection model. - */ - void loadNetwork(std::shared_ptr); + + ObjectDetectionResultFilter(); + /** - * @brief Enqueue a frame to this class. - * The frame will be buffered but not infered yet. - * @param[in] frame The frame to be enqueued. - * @param[in] input_frame_loc The location of the enqueued frame with respect - * to the frame generated by the input device. - * @return Whether this operation is successful. + * @brief Initiate the object detection result filter. */ - bool enqueue(const cv::Mat&, const cv::Rect&) override; + void init() override; /** - * @brief Start inference for all buffered frames. - * @return Whether this operation is successful. + * @brief Set the object detection results into filter. + * @param[in] The object detection results. */ - bool submitRequest() override; + void acceptResults(const std::vector & results); /** - * @brief This function will fetch the results of the previous inference and - * stores the results in a result buffer array. All buffered frames will be - * cleared. - * @return Whether the Inference object fetches a result this time + * @brief Get the filtered results' ROIs. + * @return The filtered ROIs. */ - bool fetchResults() override; + std::vector getFilteredResults(); +private: /** - * @brief Get the length of the buffer result array. - * @return The length of the buffer result array. + * @brief Decide whether a result is valid for label filter condition. + * @param[in] Result to be decided, filter operator, target label value. + * @return True if valid, false if not. */ - const int getResultsLength() const override; + static bool isValidLabel( + const Result & result, + const std::string & op, const std::string & target); /** - * @brief Get the location of result with respect - * to the frame generated by the input device. - * @param[in] idx The index of the result. + * @brief Decide whether a result is valid for confidence filter condition. + * @param[in] Result to be decided, filter operator, target confidence value. + * @return True if valid, false if not. */ - const dynamic_vino_lib::Result* getLocationResult(int idx) const override; + static bool isValidConfidence( + const Result & result, + const std::string & op, const std::string & target); + /** - * @brief Show the observed detection result either through image window - or ROS topic. + * @brief Decide whether a result is valid. + * @param[in] Result to be decided. + * @return True if valid, false if not. */ - const void observeOutput(const std::shared_ptr& output); - /** - * @brief Get the name of the Inference instance. - * @return The name of the Inference instance. - */ - const std::string getName() const override; - private: - std::shared_ptr valid_model_; + bool isValidResult(const Result & result); + + std::map key_to_function_; std::vector results_; - int width_ = 0; - int height_ = 0; - int max_proposal_count_; - int object_size_; - double show_output_thresh_ = 0; +}; + + +class ObjectDetection : public BaseInference +{ + public: + ObjectDetection() {}; + virtual void loadNetwork(std::shared_ptr) = 0; }; } // namespace dynamic_vino_lib #endif // DYNAMIC_VINO_LIB_INFERENCES_OBJECT_DETECTION_H diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/object_detection_ssd.h b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/object_detection_ssd.h index b6a86c84..6e910519 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/object_detection_ssd.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/object_detection_ssd.h @@ -29,6 +29,7 @@ #include "dynamic_vino_lib/models/object_detection_yolov2voc_model.h" #include "dynamic_vino_lib/engines/engine.h" #include "dynamic_vino_lib/inferences/base_inference.h" +#include "dynamic_vino_lib/inferences/object_detection.h" #include "inference_engine.hpp" #include "opencv2/opencv.hpp" // namespace @@ -40,6 +41,7 @@ namespace dynamic_vino_lib { class ObjectDetectionSSD : public ObjectDetection { public: using Result = dynamic_vino_lib::ObjectDetectionResult; + using Filter = dynamic_vino_lib::ObjectDetectionResultFilter; explicit ObjectDetectionSSD(double); ~ObjectDetectionSSD() override; /** @@ -82,15 +84,21 @@ class ObjectDetectionSSD : public ObjectDetection { * @brief Show the observed detection result either through image window or ROS topic. */ - const void observeOutput(const std::shared_ptr& output); + const void observeOutput(const std::shared_ptr& output, + const std::string filter_conditions); /** * @brief Get the name of the Inference instance. * @return The name of the Inference instance. */ const std::string getName() const override; + + const std::vector getFilteredROIs( + const std::string filter_conditions) const override; + private: std::shared_ptr valid_model_; std::vector results_; + std::shared_ptr result_filter_; int width_ = 0; int height_ = 0; int max_proposal_count_; diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/object_detection_yolov2_voc.h b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/object_detection_yolov2_voc.h index dc5537c9..f01fc7bf 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/object_detection_yolov2_voc.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/object_detection_yolov2_voc.h @@ -29,6 +29,7 @@ #include "dynamic_vino_lib/models/object_detection_yolov2voc_model.h" #include "dynamic_vino_lib/engines/engine.h" #include "dynamic_vino_lib/inferences/base_inference.h" +#include "dynamic_vino_lib/inferences/object_detection.h" #include "inference_engine.hpp" #include "opencv2/opencv.hpp" // namespace @@ -40,6 +41,7 @@ namespace dynamic_vino_lib { class ObjectDetectionYolov2voc : public ObjectDetection { public: using Result = dynamic_vino_lib::ObjectDetectionResult; + using Filter = dynamic_vino_lib::ObjectDetectionResultFilter; explicit ObjectDetectionYolov2voc(double); ~ObjectDetectionYolov2voc() override; /** @@ -82,17 +84,22 @@ class ObjectDetectionYolov2voc : public ObjectDetection { * @brief Show the observed detection result either through image window or ROS topic. */ - const void observeOutput(const std::shared_ptr& output); + const void observeOutput(const std::shared_ptr& output, + const std::string filter_conditions); /** * @brief Get the name of the Inference instance. * @return The name of the Inference instance. */ const std::string getName() const override; + const std::vector getFilteredROIs( + const std::string filter_conditions) const override; + private: std::shared_ptr valid_model_; std::vector raw_results_; std::vector results_; + std::shared_ptr result_filter_; int width_ = 0; int height_ = 0; int max_proposal_count_; diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/object_segmentation.h b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/object_segmentation.h index e6100e1a..11c5b8fe 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/object_segmentation.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/object_segmentation.h @@ -113,13 +113,17 @@ class ObjectSegmentation : public BaseInference * @brief Show the observed detection result either through image window or ROS topic. */ - const void observeOutput(const std::shared_ptr & output); + const void observeOutput(const std::shared_ptr & output, + const std::string filter_conditions); /** * @brief Get the name of the Inference instance. * @return The name of the Inference instance. */ const std::string getName() const override; + const std::vector getFilteredROIs( + const std::string filter_conditions) const override; + private: std::shared_ptr valid_model_; std::vector results_; diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/person_reidentification.h b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/person_reidentification.h index 72b9b7a0..5e9947da 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/person_reidentification.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/person_reidentification.h @@ -93,7 +93,8 @@ class PersonReidentification : public BaseInference * @brief Show the observed detection result either through image window or ROS topic. */ - const void observeOutput(const std::shared_ptr & output); + const void observeOutput(const std::shared_ptr & output, + const std::string filter_conditions); /** * @brief Get the name of the Inference instance. * @return The name of the Inference instance. @@ -112,6 +113,9 @@ class PersonReidentification : public BaseInference */ std::string findMatchPerson(const std::vector &); + const std::vector getFilteredROIs( + const std::string filter_conditions) const override; + private: std::shared_ptr valid_model_; std::vector results_; diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/pipeline.h b/dynamic_vino_lib/include/dynamic_vino_lib/pipeline.h index 1c5654c7..94e0a3a8 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/pipeline.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/pipeline.h @@ -142,6 +142,11 @@ class Pipeline return name_to_output_map_; } + std::string findFilterConditions(const std::string & input, const std::string & output) + { + return params_->findFilterConditions(input, output); + } + private: void initInferenceCounter(); void increaseInferenceCounter(); diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/pipeline_manager.h b/dynamic_vino_lib/include/dynamic_vino_lib/pipeline_manager.h index 974ac2cf..459762e8 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/pipeline_manager.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/pipeline_manager.h @@ -49,11 +49,11 @@ class PipelineManager { }; std::shared_ptr createPipeline( - const Params::ParamManager::PipelineParams& params); + const Params::ParamManager::PipelineRawData & params); void removePipeline(const std::string& name); PipelineManager& updatePipeline( const std::string& name, - const Params::ParamManager::PipelineParams& params); + const Params::ParamManager::PipelineRawData & params); void runAll(); void stopAll(); @@ -68,7 +68,7 @@ class PipelineManager { }; struct PipelineData { - Params::ParamManager::PipelineParams params; + Params::ParamManager::PipelineRawData params; std::shared_ptr pipeline; std::vector> spin_nodes; std::shared_ptr thread; @@ -90,25 +90,25 @@ class PipelineManager { void operator=(PipelineManager const&); void threadPipeline(const char* name); std::map> - parseInputDevice(const Params::ParamManager::PipelineParams& params); + parseInputDevice(const Params::ParamManager::PipelineRawData& params); std::map> parseOutput( - const Params::ParamManager::PipelineParams& params); + const Params::ParamManager::PipelineRawData& params); std::map> - parseInference(const Params::ParamManager::PipelineParams& params); + parseInference(const Params::ParamManager::PipelineRawData& params); std::shared_ptr createFaceDetection( - const Params::ParamManager::InferenceParams& infer); + const Params::ParamManager::InferenceRawData& infer); std::shared_ptr createAgeGenderRecognition( - const Params::ParamManager::InferenceParams& infer); + const Params::ParamManager::InferenceRawData& infer); std::shared_ptr createEmotionRecognition( - const Params::ParamManager::InferenceParams& infer); + const Params::ParamManager::InferenceRawData& infer); std::shared_ptr createHeadPoseEstimation( - const Params::ParamManager::InferenceParams& infer); + const Params::ParamManager::InferenceRawData& infer); std::shared_ptr createObjectDetection( - const Params::ParamManager::InferenceParams& infer); + const Params::ParamManager::InferenceRawData& infer); std::shared_ptr createObjectSegmentation( - const Params::ParamManager::InferenceParams& infer); + const Params::ParamManager::InferenceRawData& infer); std::shared_ptr createPersonReidentification( - const Params::ParamManager::InferenceParams& infer); + const Params::ParamManager::InferenceRawData& infer); std::map pipelines_; std::map plugins_for_devices_; }; diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/pipeline_params.h b/dynamic_vino_lib/include/dynamic_vino_lib/pipeline_params.h index 89a9ee1e..18df1019 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/pipeline_params.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/pipeline_params.h @@ -67,20 +67,20 @@ class PipelineParams { public: explicit PipelineParams(const std::string& name); - explicit PipelineParams(const Params::ParamManager::PipelineParams& params); - static Params::ParamManager::PipelineParams getPipeline( - const std::string& name); - PipelineParams& operator=(const Params::ParamManager::PipelineParams& params); + explicit PipelineParams(const Params::ParamManager::PipelineRawData & params); + Params::ParamManager::PipelineRawData getPipeline(const std::string & name); + PipelineParams & operator=(const Params::ParamManager::PipelineRawData & params); void update(); - void update(const Params::ParamManager::PipelineParams& params); + void update(const Params::ParamManager::PipelineRawData & params); bool isOutputTo(std::string& name); bool isGetFps(); + std::string findFilterConditions(const std::string & input, const std::string & output); const std::string kInputType_Image = "Image"; const std::string kOutputTpye_RViz = "RViz"; private: - Params::ParamManager::PipelineParams params_; + Params::ParamManager::PipelineRawData params_; }; #endif // DYNAMIC_VINO_LIB_PIPELINE_PARAMS_H diff --git a/dynamic_vino_lib/src/inferences/age_gender_detection.cpp b/dynamic_vino_lib/src/inferences/age_gender_detection.cpp index b087ab2a..9737a642 100644 --- a/dynamic_vino_lib/src/inferences/age_gender_detection.cpp +++ b/dynamic_vino_lib/src/inferences/age_gender_detection.cpp @@ -21,6 +21,7 @@ #include #include +#include #include "dynamic_vino_lib/inferences/age_gender_detection.h" #include "dynamic_vino_lib/outputs/base_output.h" @@ -102,10 +103,25 @@ const std::string dynamic_vino_lib::AgeGenderDetection::getName() const } const void dynamic_vino_lib::AgeGenderDetection::observeOutput( - const std::shared_ptr& output) + const std::shared_ptr& output, + const std::string filter_conditions) { if (output != nullptr) { output->accept(results_); } } + +const std::vector dynamic_vino_lib::AgeGenderDetection::getFilteredROIs( + const std::string filter_conditions) const +{ + if (!filter_conditions.empty()) { + slog::err << "Age gender detection does not support filtering now! " << + "Filter conditions: " << filter_conditions << slog::endl; + } + std::vector filtered_rois; + for (auto res : results_) { + filtered_rois.push_back(res.getLocation()); + } + return filtered_rois; +} \ No newline at end of file diff --git a/dynamic_vino_lib/src/inferences/base_filter.cpp b/dynamic_vino_lib/src/inferences/base_filter.cpp new file mode 100644 index 00000000..1bb83542 --- /dev/null +++ b/dynamic_vino_lib/src/inferences/base_filter.cpp @@ -0,0 +1,214 @@ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @brief An implementation file with implementation for BaseFilter Class + * @file base_filter.cpp + */ + +#include "dynamic_vino_lib/inferences/base_filter.h" +#include +#include +#include + +dynamic_vino_lib::BaseFilter::BaseFilter() {} + +void dynamic_vino_lib::BaseFilter::acceptFilterConditions( + const std::string & filter_conditions) +{ + if (striped_conditions_.empty()) { + striped_conditions_ = strip(filter_conditions); + std::vector infix_conditions = split(striped_conditions_); + infixToSuffix(infix_conditions); + } +} + +bool dynamic_vino_lib::BaseFilter::isRelationOperator(const std::string & str) +{ + if (std::find(relation_operators_.begin(), relation_operators_.end(), str) != + relation_operators_.end()) + { + return true; + } + return false; +} + +bool dynamic_vino_lib::BaseFilter::isLogicOperator(const std::string & str) +{ + if (std::find(logic_operators_.begin(), logic_operators_.end(), str) != + logic_operators_.end()) + { + return true; + } + return false; +} + +bool dynamic_vino_lib::BaseFilter::isPriorTo( + const std::string & operator1, const std::string & operator2) +{ + if (isRelationOperator(operator1) && isLogicOperator(operator2)) { + return true; + } + return false; +} + +std::string dynamic_vino_lib::BaseFilter::boolToStr(bool value) +{ + if (value) {return "true";} + return "false"; +} + +bool dynamic_vino_lib::BaseFilter::strToBool(const std::string & value) +{ + if (!value.compare("true")) {return true;} else if (!value.compare("false")) { + return false; + } else { + slog::err << "Invalid string: " << value << " for bool conversion!" << slog::endl; + } + return false; +} + +const std::vector & +dynamic_vino_lib::BaseFilter::getSuffixConditions() const +{ + return suffix_conditons_; +} + +bool dynamic_vino_lib::BaseFilter::logicOperation( + const std::string & logic1, const std::string & op, const std::string & logic2) +{ + if (!op.compare("&&")) { + return strToBool(logic1) && strToBool(logic2); + } else if (!op.compare("||")) { + return strToBool(logic1) || strToBool(logic2); + } else { + slog::err << "Invalid operator: " << op << " for logic operation!" << slog::endl; + return false; + } +} + +bool dynamic_vino_lib::BaseFilter::stringCompare( + const std::string & candidate, const std::string & op, const std::string & target) +{ + if (!op.compare("==")) { + return !target.compare(candidate); + } else if (!op.compare("!=")) { + return target.compare(candidate); + } else { + slog::err << "Invalid operator " << op << " for label comparsion" << slog::endl; + return false; + } +} + +bool dynamic_vino_lib::BaseFilter::floatCompare( + float candidate, const std::string & op, float target) +{ + if (!op.compare("<=")) { + return candidate <= target; + } else if (!op.compare(">=")) { + return candidate >= target; + } else if (!op.compare("<")) { + return candidate < target; + } else if (!op.compare(">")) { + return candidate > target; + } else { + slog::err << "Invalid operator " << op << " for confidence comparsion" << slog::endl; + return false; + } +} + +float dynamic_vino_lib::BaseFilter::stringToFloat(const std::string & candidate) +{ + float result = 0; + try { + result = std::stof(candidate); + } catch (...) { + slog::err << "Failed when converting string " << candidate << " to float" << slog::endl; + } + return result; +} + +std::vector dynamic_vino_lib::BaseFilter::split( + const std::string & filter_conditions) +{ + std::vector seperators; + seperators.insert(seperators.end(), relation_operators_.begin(), relation_operators_.end()); + seperators.insert(seperators.end(), logic_operators_.begin(), logic_operators_.end()); + seperators.push_back("("); + seperators.push_back(")"); + std::vector infix_conditions; + int last_pos = 0, pos = 0; + for (pos = 0; pos < filter_conditions.length(); pos++) { + for (auto sep : seperators) { + if (!sep.compare(filter_conditions.substr(pos, sep.length()))) { + std::string elem = filter_conditions.substr(last_pos, pos - last_pos); + if (!elem.empty()) { + infix_conditions.push_back(elem); + } + elem = filter_conditions.substr(pos, sep.length()); + infix_conditions.push_back(elem); + last_pos = pos + sep.length(); + pos = last_pos - 1; + break; + } + } + } + if (last_pos != pos) { + infix_conditions.push_back(filter_conditions.substr(last_pos, pos - last_pos)); + } + return infix_conditions; +} + +void dynamic_vino_lib::BaseFilter::infixToSuffix( + std::vector & infix_conditions) +{ + std::stack operator_stack; + for (auto elem : infix_conditions) { + if (!elem.compare("(")) { + operator_stack.push(elem); + } else if (!elem.compare(")")) { + while (!operator_stack.empty() && operator_stack.top().compare("(")) { + suffix_conditons_.push_back(operator_stack.top()); + operator_stack.pop(); + } + if (operator_stack.empty()) { + slog::err << "Brackets mismatch in filter_conditions!" << slog::endl; + } + operator_stack.pop(); + } else if (isRelationOperator(elem) || isLogicOperator(elem)) { + while (!operator_stack.empty() && isPriorTo(operator_stack.top(), elem)) { + suffix_conditons_.push_back(operator_stack.top()); + operator_stack.pop(); + } + operator_stack.push(elem); + } else { + suffix_conditons_.push_back(elem); + } + } + while (!operator_stack.empty()) { + suffix_conditons_.push_back(operator_stack.top()); + operator_stack.pop(); + } +} + +std::string dynamic_vino_lib::BaseFilter::strip(const std::string & str) +{ + std::string stripped_string = ""; + for (auto character : str) { + if (character != ' ') { + stripped_string += character; + } + } + return stripped_string; +} \ No newline at end of file diff --git a/dynamic_vino_lib/src/inferences/base_inference.cpp b/dynamic_vino_lib/src/inferences/base_inference.cpp index ceb0fbea..802391f7 100644 --- a/dynamic_vino_lib/src/inferences/base_inference.cpp +++ b/dynamic_vino_lib/src/inferences/base_inference.cpp @@ -23,10 +23,6 @@ #include "dynamic_vino_lib/inferences/base_inference.h" -dynamic_vino_lib::ObjectDetectionResult::ObjectDetectionResult( - const cv::Rect& location) - : Result(location){} - // Result dynamic_vino_lib::Result::Result(const cv::Rect& location) { diff --git a/dynamic_vino_lib/src/inferences/emotions_detection.cpp b/dynamic_vino_lib/src/inferences/emotions_detection.cpp index 9e10e3d3..ef377d22 100644 --- a/dynamic_vino_lib/src/inferences/emotions_detection.cpp +++ b/dynamic_vino_lib/src/inferences/emotions_detection.cpp @@ -22,6 +22,7 @@ #include #include + #include #include "dynamic_vino_lib/inferences/emotions_detection.h" #include "dynamic_vino_lib/outputs/base_output.h" @@ -130,10 +131,25 @@ const std::string dynamic_vino_lib::EmotionsDetection::getName() const } const void dynamic_vino_lib::EmotionsDetection::observeOutput( - const std::shared_ptr& output) + const std::shared_ptr& output, + const std::string filter_conditions) { if (output != nullptr) { output->accept(results_); } } + +const std::vector dynamic_vino_lib::EmotionsDetection::getFilteredROIs( + const std::string filter_conditions) const +{ + if (!filter_conditions.empty()) { + slog::err << "Emotion detection does not support filtering now! " << + "Filter conditions: " << filter_conditions << slog::endl; + } + std::vector filtered_rois; + for (auto res : results_) { + filtered_rois.push_back(res.getLocation()); + } + return filtered_rois; +} \ No newline at end of file diff --git a/dynamic_vino_lib/src/inferences/face_detection.cpp b/dynamic_vino_lib/src/inferences/face_detection.cpp index e7aa3c36..f47b0197 100644 --- a/dynamic_vino_lib/src/inferences/face_detection.cpp +++ b/dynamic_vino_lib/src/inferences/face_detection.cpp @@ -139,10 +139,25 @@ const std::string dynamic_vino_lib::FaceDetection::getName() const } const void dynamic_vino_lib::FaceDetection::observeOutput( - const std::shared_ptr& output) + const std::shared_ptr& output, + const std::string filter_conditions) { if (output != nullptr) { output->accept(results_); } } + +const std::vector dynamic_vino_lib::FaceDetection::getFilteredROIs( + const std::string filter_conditions) const +{ + if (!filter_conditions.empty()) { + slog::err << "Face detection does not support filtering now! " << + "Filter conditions: " << filter_conditions << slog::endl; + } + std::vector filtered_rois; + for (auto res : results_) { + filtered_rois.push_back(res.getLocation()); + } + return filtered_rois; +} \ No newline at end of file diff --git a/dynamic_vino_lib/src/inferences/head_pose_detection.cpp b/dynamic_vino_lib/src/inferences/head_pose_detection.cpp index 490936cf..920bf83c 100644 --- a/dynamic_vino_lib/src/inferences/head_pose_detection.cpp +++ b/dynamic_vino_lib/src/inferences/head_pose_detection.cpp @@ -105,10 +105,25 @@ const std::string dynamic_vino_lib::HeadPoseDetection::getName() const } const void dynamic_vino_lib::HeadPoseDetection::observeOutput( - const std::shared_ptr& output) + const std::shared_ptr& output, + const std::string filter_conditions) { if (output != nullptr) { output->accept(results_); } } + +const std::vector dynamic_vino_lib::HeadPoseDetection::getFilteredROIs( + const std::string filter_conditions) const +{ + if (!filter_conditions.empty()) { + slog::err << "Headpose detection does not support filtering now! " << + "Filter conditions: " << filter_conditions << slog::endl; + } + std::vector filtered_rois; + for (auto res : results_) { + filtered_rois.push_back(res.getLocation()); + } + return filtered_rois; +} \ No newline at end of file diff --git a/dynamic_vino_lib/src/inferences/object_detection.cpp b/dynamic_vino_lib/src/inferences/object_detection.cpp index f25259bc..2486f233 100644 --- a/dynamic_vino_lib/src/inferences/object_detection.cpp +++ b/dynamic_vino_lib/src/inferences/object_detection.cpp @@ -21,92 +21,68 @@ #include #include #include +#include +#include #include "dynamic_vino_lib/inferences/object_detection.h" #include "dynamic_vino_lib/outputs/base_output.h" #include "dynamic_vino_lib/slog.h" // ObjectDetectionResult dynamic_vino_lib::ObjectDetectionResult::ObjectDetectionResult( - const cv::Rect& location) - : Result(location){} -// ObjectDetection -dynamic_vino_lib::ObjectDetection::ObjectDetection(bool enable_roi_constraint, - double show_output_thresh) - : dynamic_vino_lib::BaseInference(), - show_output_thresh_(show_output_thresh){} -dynamic_vino_lib::ObjectDetection::~ObjectDetection() = default; -void dynamic_vino_lib::ObjectDetection::loadNetwork( - const std::shared_ptr network) { - valid_model_ = network; - max_proposal_count_ = network->getMaxProposalCount(); - object_size_ = network->getObjectSize(); - setMaxBatchSize(network->getMaxBatchSize()); + const cv::Rect& location) : Result(location){} + +// ObjectDetectionResultFilter +dynamic_vino_lib::ObjectDetectionResultFilter::ObjectDetectionResultFilter() {} + +void dynamic_vino_lib::ObjectDetectionResultFilter::init() +{ + key_to_function_.insert(std::make_pair("label", isValidLabel)); + key_to_function_.insert(std::make_pair("confidence", isValidConfidence)); } -bool dynamic_vino_lib::ObjectDetection::enqueue(const cv::Mat& frame, - const cv::Rect& input_frame_loc) { - if (width_ == 0 && height_ == 0) { - width_ = frame.cols; - height_ = frame.rows; - } - if (!dynamic_vino_lib::BaseInference::enqueue( - frame, input_frame_loc, 1, 0, valid_model_->getInputName())) { - return false; - } - Result r(input_frame_loc); - results_.clear(); - results_.emplace_back(r); - return true; + +void dynamic_vino_lib::ObjectDetectionResultFilter::acceptResults( + const std::vector & results) +{ + results_ = results; } -bool dynamic_vino_lib::ObjectDetection::submitRequest() { - return dynamic_vino_lib::BaseInference::submitRequest(); -} -bool dynamic_vino_lib::ObjectDetection::fetchResults() { - bool can_fetch = dynamic_vino_lib::BaseInference::fetchResults(); - if (!can_fetch) return false; - bool found_result = false; - results_.clear(); - InferenceEngine::InferRequest::Ptr request = getEngine()->getRequest(); - std::string output = valid_model_->getOutputName(); - const float* detections = request->GetBlob(output)->buffer().as(); - for (int i = 0; i < max_proposal_count_; i++) { - float image_id = detections[i * object_size_ + 0]; - cv::Rect r; - auto label_num = static_cast(detections[i * object_size_ + 1]); - std::vector& labels = valid_model_->getLabels(); - r.x = static_cast(detections[i * object_size_ + 3] * width_); - r.y = static_cast(detections[i * object_size_ + 4] * height_); - r.width = static_cast(detections[i * object_size_ + 5] * width_ - r.x); - r.height = - static_cast(detections[i * object_size_ + 6] * height_ - r.y); - Result result(r); - result.label_ = label_num < labels.size() - ? labels[label_num] - : std::string("label #") + std::to_string(label_num); - result.confidence_ = detections[i * object_size_ + 2]; - if (result.confidence_ <= show_output_thresh_) { - continue; - } - if (image_id < 0) { - break; + +std::vector +dynamic_vino_lib::ObjectDetectionResultFilter::getFilteredResults() +{ + std::vector results; + for (auto result : results_) { + if (isValidResult(result)) { + results.push_back(result); } - found_result = true; - results_.emplace_back(result); } - if (!found_result) results_.clear(); - return true; + return results; } -const int dynamic_vino_lib::ObjectDetection::getResultsLength() const { - return static_cast(results_.size()); + +// std::vector +// dynamic_vino_lib::ObjectDetectionResultFilter::getFilteredLocations() +// { +// std::vector locations; +// for (auto result : results_) { +// if (isValidResult(result)) { +// locations.push_back(result.getLocation()); +// } +// } +// return locations; +// } + +bool dynamic_vino_lib::ObjectDetectionResultFilter::isValidLabel( + const Result & result, const std::string & op, const std::string & target) +{ + return stringCompare(result.getLabel(), op, target); } -const dynamic_vino_lib::Result* -dynamic_vino_lib::ObjectDetection::getLocationResult(int idx) const { - return &(results_[idx]); -} -const std::string dynamic_vino_lib::ObjectDetection::getName() const { - return valid_model_->getModelName(); -} -const void dynamic_vino_lib::ObjectDetection::observeOutput( - const std::shared_ptr& output) { - if (output != nullptr) { - output->accept(results_); - } + +bool dynamic_vino_lib::ObjectDetectionResultFilter::isValidConfidence( + const Result & result, const std::string & op, const std::string & target) +{ + return floatCompare(result.getConfidence(), op, stringToFloat(target)); } + +bool dynamic_vino_lib::ObjectDetectionResultFilter::isValidResult( + const Result & result) +{ + ISVALIDRESULT(key_to_function_, result); +} \ No newline at end of file diff --git a/dynamic_vino_lib/src/inferences/object_detection_ssd.cpp b/dynamic_vino_lib/src/inferences/object_detection_ssd.cpp index b558cf8b..cdded2de 100644 --- a/dynamic_vino_lib/src/inferences/object_detection_ssd.cpp +++ b/dynamic_vino_lib/src/inferences/object_detection_ssd.cpp @@ -32,8 +32,12 @@ dynamic_vino_lib::ObjectDetectionResult::ObjectDetectionResult( */ dynamic_vino_lib::ObjectDetectionSSD::ObjectDetectionSSD(double show_output_thresh) - : dynamic_vino_lib::ObjectDetection(), - show_output_thresh_(show_output_thresh){} + : dynamic_vino_lib::ObjectDetection(), + show_output_thresh_(show_output_thresh) +{ + result_filter_ = std::make_shared(); + result_filter_->init(); +} dynamic_vino_lib::ObjectDetectionSSD::~ObjectDetectionSSD() = default; @@ -166,8 +170,24 @@ const std::string dynamic_vino_lib::ObjectDetectionSSD::getName() const { } const void dynamic_vino_lib::ObjectDetectionSSD::observeOutput( - const std::shared_ptr& output) { + const std::shared_ptr& output, + const std::string filter_conditions) { if (output != nullptr) { - output->accept(results_); + result_filter_->acceptResults(results_); + result_filter_->acceptFilterConditions(filter_conditions); + output->accept(result_filter_->getFilteredResults()); } } + +const std::vector dynamic_vino_lib::ObjectDetectionSSD::getFilteredROIs( + const std::string filter_conditions) const +{ + result_filter_->acceptResults(results_); + result_filter_->acceptFilterConditions(filter_conditions); + std::vector results = result_filter_->getFilteredResults(); + std::vector locations; + for (auto result : results) { + locations.push_back(result.getLocation()); + } + return locations; +} \ No newline at end of file diff --git a/dynamic_vino_lib/src/inferences/object_detection_yolov2_voc.cpp b/dynamic_vino_lib/src/inferences/object_detection_yolov2_voc.cpp index b683248d..3d9c1810 100644 --- a/dynamic_vino_lib/src/inferences/object_detection_yolov2_voc.cpp +++ b/dynamic_vino_lib/src/inferences/object_detection_yolov2_voc.cpp @@ -34,7 +34,10 @@ dynamic_vino_lib::ObjectDetectionResult::ObjectDetectionResult( // ObjectDetection dynamic_vino_lib::ObjectDetectionYolov2voc::ObjectDetectionYolov2voc(double show_output_thresh) : dynamic_vino_lib::ObjectDetection(), - show_output_thresh_(show_output_thresh){} + show_output_thresh_(show_output_thresh) { + result_filter_ = std::make_shared(); + result_filter_->init(); +} dynamic_vino_lib::ObjectDetectionYolov2voc::~ObjectDetectionYolov2voc() = default; @@ -319,8 +322,24 @@ const std::string dynamic_vino_lib::ObjectDetectionYolov2voc::getName() const { } const void dynamic_vino_lib::ObjectDetectionYolov2voc::observeOutput( - const std::shared_ptr& output) { + const std::shared_ptr& output, + const std::string filter_conditions) { if (output != nullptr) { - output->accept(results_); + result_filter_->acceptResults(results_); + result_filter_->acceptFilterConditions(filter_conditions); + output->accept(result_filter_->getFilteredResults()); } } + +const std::vector dynamic_vino_lib::ObjectDetectionYolov2voc::getFilteredROIs( + const std::string filter_conditions) const +{ + result_filter_->acceptResults(results_); + result_filter_->acceptFilterConditions(filter_conditions); + std::vector results = result_filter_->getFilteredResults(); + std::vector locations; + for (auto result : results) { + locations.push_back(result.getLocation()); + } + return locations; +} \ No newline at end of file diff --git a/dynamic_vino_lib/src/inferences/object_segmentation.cpp b/dynamic_vino_lib/src/inferences/object_segmentation.cpp index 69b7fd23..16146e53 100644 --- a/dynamic_vino_lib/src/inferences/object_segmentation.cpp +++ b/dynamic_vino_lib/src/inferences/object_segmentation.cpp @@ -146,9 +146,24 @@ const std::string dynamic_vino_lib::ObjectSegmentation::getName() const } const void dynamic_vino_lib::ObjectSegmentation::observeOutput( - const std::shared_ptr & output) + const std::shared_ptr & output, + const std::string filter_conditions) { if (output != nullptr) { output->accept(results_); } } + +const std::vector dynamic_vino_lib::ObjectSegmentation::getFilteredROIs( + const std::string filter_conditions) const +{ + if (!filter_conditions.empty()) { + slog::err << "Object segmentation does not support filtering now! " << + "Filter conditions: " << filter_conditions << slog::endl; + } + std::vector filtered_rois; + for (auto res : results_) { + filtered_rois.push_back(res.getLocation()); + } + return filtered_rois; +} \ No newline at end of file diff --git a/dynamic_vino_lib/src/inferences/person_reidentification.cpp b/dynamic_vino_lib/src/inferences/person_reidentification.cpp index 005a128d..a31202e1 100644 --- a/dynamic_vino_lib/src/inferences/person_reidentification.cpp +++ b/dynamic_vino_lib/src/inferences/person_reidentification.cpp @@ -145,9 +145,25 @@ const std::string dynamic_vino_lib::PersonReidentification::getName() const } const void dynamic_vino_lib::PersonReidentification::observeOutput( - const std::shared_ptr & output) + const std::shared_ptr & output, + const std::string filter_conditions) { if (output != nullptr) { output->accept(results_); } } + + +const std::vector dynamic_vino_lib::PersonReidentification::getFilteredROIs( + const std::string filter_conditions) const +{ + if (!filter_conditions.empty()) { + slog::err << "Person reidentification does not support filtering now! " << + "Filter conditions: " << filter_conditions << slog::endl; + } + std::vector filtered_rois; + for (auto res : results_) { + filtered_rois.push_back(res.getLocation()); + } + return filtered_rois; +} \ No newline at end of file diff --git a/dynamic_vino_lib/src/inputs/standard_camera.cpp b/dynamic_vino_lib/src/inputs/standard_camera.cpp index b21025c8..2ed1b864 100644 --- a/dynamic_vino_lib/src/inputs/standard_camera.cpp +++ b/dynamic_vino_lib/src/inputs/standard_camera.cpp @@ -25,8 +25,8 @@ bool Input::StandardCamera::initialize() { static int camera_count_ = 0; setInitStatus(cap.open(camera_count_)); - setWidth((size_t)cap.get(CV_CAP_PROP_FRAME_WIDTH)); - setHeight((size_t)cap.get(CV_CAP_PROP_FRAME_HEIGHT)); + setWidth((size_t)cap.get(cv::CAP_PROP_FRAME_WIDTH)); + setHeight((size_t)cap.get(cv::CAP_PROP_FRAME_HEIGHT)); camera_count_ ++; return isInit(); } @@ -35,8 +35,8 @@ bool Input::StandardCamera::initialize(int camera_num) { static int camera_count_ = 0; setInitStatus(cap.open(camera_num)); - setWidth((size_t)cap.get(CV_CAP_PROP_FRAME_WIDTH)); - setHeight((size_t)cap.get(CV_CAP_PROP_FRAME_HEIGHT)); + setWidth((size_t)cap.get(cv::CAP_PROP_FRAME_WIDTH)); + setHeight((size_t)cap.get(cv::CAP_PROP_FRAME_HEIGHT)); camera_count_ ++; return isInit(); @@ -50,8 +50,8 @@ bool Input::StandardCamera::initialize(size_t width, size_t height) setInitStatus(cap.open(camera_count_)); if (isInit()) { - cap.set(CV_CAP_PROP_FRAME_WIDTH, width); - cap.set(CV_CAP_PROP_FRAME_HEIGHT, height); + cap.set(cv::CAP_PROP_FRAME_WIDTH, width); + cap.set(cv::CAP_PROP_FRAME_HEIGHT, height); } camera_count_ ++; diff --git a/dynamic_vino_lib/src/inputs/video_input.cpp b/dynamic_vino_lib/src/inputs/video_input.cpp index d493731b..b0f6eac3 100644 --- a/dynamic_vino_lib/src/inputs/video_input.cpp +++ b/dynamic_vino_lib/src/inputs/video_input.cpp @@ -32,8 +32,8 @@ Input::Video::Video(const std::string& video) bool Input::Video::initialize() { setInitStatus(cap.open(video_)); - setWidth((size_t)cap.get(CV_CAP_PROP_FRAME_WIDTH)); - setHeight((size_t)cap.get(CV_CAP_PROP_FRAME_HEIGHT)); + setWidth((size_t)cap.get(cv::CAP_PROP_FRAME_WIDTH)); + setHeight((size_t)cap.get(cv::CAP_PROP_FRAME_HEIGHT)); return isInit(); } @@ -44,8 +44,8 @@ bool Input::Video::initialize(size_t width, size_t height) setInitStatus(cap.open(video_)); if (isInit()) { - cap.set(CV_CAP_PROP_FRAME_WIDTH, width); - cap.set(CV_CAP_PROP_FRAME_HEIGHT, height); + cap.set(cv::CAP_PROP_FRAME_WIDTH, width); + cap.set(cv::CAP_PROP_FRAME_HEIGHT, height); } return isInit(); } diff --git a/dynamic_vino_lib/src/pipeline.cpp b/dynamic_vino_lib/src/pipeline.cpp index 833ca4a2..b57ada09 100644 --- a/dynamic_vino_lib/src/pipeline.cpp +++ b/dynamic_vino_lib/src/pipeline.cpp @@ -22,6 +22,7 @@ #include #include #include +#include #include #include "dynamic_vino_lib/pipeline.h" @@ -246,41 +247,36 @@ void Pipeline::setCallback() } } -void Pipeline::callback(const std::string& detection_name) +void Pipeline::callback(const std::string & detection_name) { + // slog::info<<"Hello callback ----> " << detection_name <fetchResults(); - // set output - for (auto pos = next_.equal_range(detection_name); pos.first != pos.second; - ++pos.first) - { + for (auto pos = next_.equal_range(detection_name); pos.first != pos.second; ++pos.first) { std::string next_name = pos.first->second; + + std::string filter_conditions = findFilterConditions(detection_name, next_name); // if next is output, then print - if (output_names_.find(next_name) != output_names_.end()) - { - // name_to_output_map_[next_name]->accept(*detection_ptr->getResult()); - detection_ptr->observeOutput(name_to_output_map_[next_name]); - } - else - { + if (output_names_.find(next_name) != output_names_.end()) { + detection_ptr->observeOutput(name_to_output_map_[next_name], filter_conditions); + } else { auto detection_ptr_iter = name_to_detection_map_.find(next_name); - if (detection_ptr_iter != name_to_detection_map_.end()) - { + if (detection_ptr_iter != name_to_detection_map_.end()) { auto next_detection_ptr = detection_ptr_iter->second; - for (int i = 0; i < detection_ptr->getResultsLength(); ++i) - { - const dynamic_vino_lib::Result* prev_result = - detection_ptr->getLocationResult(i); - auto clippedRect = - prev_result->getLocation() & cv::Rect(0, 0, width_, height_); + size_t batch_size = next_detection_ptr->getMaxBatchSize(); + std::vector next_rois = detection_ptr->getFilteredROIs(filter_conditions); + for (size_t i = 0; i < next_rois.size(); i++) { + auto roi = next_rois[i]; + auto clippedRect = roi & cv::Rect(0, 0, width_, height_); cv::Mat next_input = frame_(clippedRect); - next_detection_ptr->enqueue(next_input, prev_result->getLocation()); - } - if (detection_ptr->getResultsLength() > 0) - { - increaseInferenceCounter(); - next_detection_ptr->submitRequest(); + next_detection_ptr->enqueue(next_input, roi); + if ((i + 1) == next_rois.size() || (i + 1) % batch_size == 0) { + increaseInferenceCounter(); + next_detection_ptr->submitRequest(); + auto request = next_detection_ptr->getEngine()->getRequest(); + request->Wait(InferenceEngine::IInferRequest::WaitMode::RESULT_READY); + } } } } diff --git a/dynamic_vino_lib/src/pipeline_manager.cpp b/dynamic_vino_lib/src/pipeline_manager.cpp index 08c16a72..28efcb5a 100644 --- a/dynamic_vino_lib/src/pipeline_manager.cpp +++ b/dynamic_vino_lib/src/pipeline_manager.cpp @@ -49,7 +49,7 @@ #include "dynamic_vino_lib/pipeline_params.h" #include "dynamic_vino_lib/services/pipeline_processing_server.h" std::shared_ptr PipelineManager::createPipeline( - const Params::ParamManager::PipelineParams& params) { + const Params::ParamManager::PipelineRawData& params) { if (params.name == "") { throw std::logic_error("The name of pipeline won't be empty!"); } @@ -100,7 +100,7 @@ std::shared_ptr PipelineManager::createPipeline( std::map> PipelineManager::parseInputDevice( - const Params::ParamManager::PipelineParams& params) { + const Params::ParamManager::PipelineRawData& params) { std::map> inputs; for (auto& name : params.inputs) { slog::info << "Parsing InputDvice: " << name << slog::endl; @@ -134,7 +134,7 @@ PipelineManager::parseInputDevice( std::map> PipelineManager::parseOutput( - const Params::ParamManager::PipelineParams& params) { + const Params::ParamManager::PipelineRawData& params) { std::map> outputs; for (auto& name : params.outputs) { slog::info << "Parsing Output: " << name << slog::endl; @@ -159,7 +159,7 @@ PipelineManager::parseOutput( std::map> PipelineManager::parseInference( - const Params::ParamManager::PipelineParams& params) { + const Params::ParamManager::PipelineRawData& params) { /**< update plugins for devices >**/ auto pcommon = Params::ParamManager::getInstance().getCommon(); std::string FLAGS_l = pcommon.custom_cpu_library; @@ -219,7 +219,7 @@ PipelineManager::parseInference( std::shared_ptr PipelineManager::createFaceDetection( - const Params::ParamManager::InferenceParams& infer) { + const Params::ParamManager::InferenceRawData& infer) { // TODO: add batch size in param_manager auto face_detection_model = std::make_shared(infer.model, 1, 1, 1); @@ -236,7 +236,7 @@ PipelineManager::createFaceDetection( std::shared_ptr PipelineManager::createAgeGenderRecognition( - const Params::ParamManager::InferenceParams& param) { + const Params::ParamManager::InferenceRawData& param) { auto model = std::make_shared(param.model, 1, 2, 16); model->modelInit(); @@ -251,7 +251,7 @@ PipelineManager::createAgeGenderRecognition( std::shared_ptr PipelineManager::createEmotionRecognition( - const Params::ParamManager::InferenceParams& param) { + const Params::ParamManager::InferenceRawData& param) { auto model = std::make_shared(param.model, 1, 1, 16); model->modelInit(); @@ -266,7 +266,7 @@ PipelineManager::createEmotionRecognition( std::shared_ptr PipelineManager::createHeadPoseEstimation( - const Params::ParamManager::InferenceParams& param) { + const Params::ParamManager::InferenceRawData& param) { auto model = std::make_shared(param.model, 1, 3, 16); model->modelInit(); @@ -281,7 +281,7 @@ PipelineManager::createHeadPoseEstimation( std::shared_ptr PipelineManager::createObjectDetection( -const Params::ParamManager::InferenceParams & infer) +const Params::ParamManager::InferenceRawData & infer) { std::shared_ptr object_detection_model; std::shared_ptr object_inference_ptr; @@ -314,7 +314,7 @@ const Params::ParamManager::InferenceParams & infer) } std::shared_ptr -PipelineManager::createObjectSegmentation(const Params::ParamManager::InferenceParams & infer) +PipelineManager::createObjectSegmentation(const Params::ParamManager::InferenceRawData & infer) { auto obejct_segmentation_model = std::make_shared(infer.model, 1, 2, 1); @@ -330,7 +330,7 @@ PipelineManager::createObjectSegmentation(const Params::ParamManager::InferenceP std::shared_ptr PipelineManager::createPersonReidentification( - const Params::ParamManager::InferenceParams & infer) + const Params::ParamManager::InferenceRawData & infer) { auto person_reidentification_model = std::make_shared(infer.model, 1, 1, infer.batch); diff --git a/dynamic_vino_lib/src/pipeline_params.cpp b/dynamic_vino_lib/src/pipeline_params.cpp index 947688a5..49f7a30c 100644 --- a/dynamic_vino_lib/src/pipeline_params.cpp +++ b/dynamic_vino_lib/src/pipeline_params.cpp @@ -29,13 +29,13 @@ PipelineParams::PipelineParams(const std::string& name) { params_.name = name; } PipelineParams::PipelineParams( - const Params::ParamManager::PipelineParams& params) + const Params::ParamManager::PipelineRawData& params) { params_ = params; } PipelineParams& PipelineParams::operator=( - const Params::ParamManager::PipelineParams& params) + const Params::ParamManager::PipelineRawData& params) { params_.name = params.name; params_.infers = params.infers; @@ -46,7 +46,7 @@ PipelineParams& PipelineParams::operator=( return *this; } -Params::ParamManager::PipelineParams PipelineParams::getPipeline( +Params::ParamManager::PipelineRawData PipelineParams::getPipeline( const std::string& name) { return Params::ParamManager::getInstance().getPipeline(name); @@ -61,7 +61,7 @@ void PipelineParams::update() } void PipelineParams::update( - const Params::ParamManager::PipelineParams& params) { + const Params::ParamManager::PipelineRawData& params) { params_ = params; } @@ -82,3 +82,14 @@ bool PipelineParams::isGetFps() return std::find(params_.inputs.begin(), params_.inputs.end(), kInputType_Image) == params_.inputs.end(); } + +std::string PipelineParams::findFilterConditions( + const std::string & input, const std::string & output) +{ + for (auto filter : params_.filters) { + if (!input.compare(filter.input) && !output.compare(filter.output)) { + return filter.filter_conditions; + } + } + return ""; +} \ No newline at end of file diff --git a/vino_launch/param/pipeline_object_oss.yaml b/vino_launch/param/pipeline_object_oss.yaml index a83b710f..1a4c9a4a 100644 --- a/vino_launch/param/pipeline_object_oss.yaml +++ b/vino_launch/param/pipeline_object_oss.yaml @@ -16,7 +16,7 @@ Pipelines: - left: RealSenseCamera right: [ObjectDetection] - left: ObjectDetection - right: [ImageWindow] + right: [{ImageWindow: label == Person && confidence >= 0.8}] - left: ObjectDetection right: [RosTopic] # - left: ObjectDetection diff --git a/vino_param_lib/include/vino_param_lib/param_manager.h b/vino_param_lib/include/vino_param_lib/param_manager.h index 76c0b89f..8144a21c 100644 --- a/vino_param_lib/include/vino_param_lib/param_manager.h +++ b/vino_param_lib/include/vino_param_lib/param_manager.h @@ -1,18 +1,17 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + /** * @brief A header file with declaration for parameter management * @file param_manager.hpp @@ -61,7 +60,7 @@ class ParamManager // singleton */ void print() const; - struct InferenceParams + struct InferenceRawData { std::string name; std::string engine; @@ -72,16 +71,25 @@ class ParamManager // singleton float confidence_threshold = 0.5; bool enable_roi_constraint = false; }; - struct PipelineParams + + struct FilterRawData { + std::string input; + std::string output; + std::string filter_conditions; + }; + + struct PipelineRawData { std::string name; - std::vector infers; + std::vector infers; std::vector inputs; std::vector outputs; std::multimap connects; std::string input_meta; + std::vector filters; }; - struct CommonParams + + struct CommonRawData { std::string custom_cpu_library; std::string custom_cldnn_library; @@ -105,9 +113,9 @@ class ParamManager // singleton /** * @brief Retrieve pipeline parameters. - * @return A list of struct PipelineParams storing all pipeline parameters. + * @return A list of struct PipelineRawData storing all pipeline parameters. */ - std::vector getPipelines() const + std::vector getPipelines() const { return pipelines_; } @@ -118,13 +126,13 @@ class ParamManager // singleton * @param[in] name: the name of the pipeline to be retrieved. * @return The pipeline paratmeters, or throw a loginc error. */ - PipelineParams getPipeline(const std::string& name) const; + PipelineRawData getPipeline(const std::string& name) const; /** * @brief Retrieve common parameters. - * @return struct CommonParams storing all common parameters. + * @return struct CommonRawData storing all common parameters. */ - CommonParams getCommon() const + CommonRawData getCommon() const { return common_; } @@ -136,9 +144,9 @@ class ParamManager // singleton ParamManager(ParamManager const&); void operator=(ParamManager const&); - std::vector pipelines_; - CommonParams common_; + std::vector pipelines_; + CommonRawData common_; }; } // namespace Params -#endif // VINO_PARAM_LIB_PARAM_MANAGER_H +#endif // VINO_PARAM_LIB_PARAM_MANAGER_H \ No newline at end of file diff --git a/vino_param_lib/src/param_manager.cpp b/vino_param_lib/src/param_manager.cpp index e4c39bf4..e2d4195c 100644 --- a/vino_param_lib/src/param_manager.cpp +++ b/vino_param_lib/src/param_manager.cpp @@ -1,18 +1,16 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. #include "vino_param_lib/param_manager.h" #include @@ -26,18 +24,17 @@ namespace Params { -void operator>>(const YAML::Node& node, ParamManager::PipelineParams& pipeline); -void operator>>(const YAML::Node& node, - std::vector& list); -void operator>>(const YAML::Node& node, ParamManager::InferenceParams& infer); +void operator>>(const YAML::Node & node, ParamManager::PipelineRawData & pipeline); +void operator>>(const YAML::Node & node, std::vector & list); +void operator>>(const YAML::Node & node, ParamManager::InferenceRawData & infer); void operator>>(const YAML::Node& node, std::vector& list); -void operator>>(const YAML::Node& node, - std::multimap& connect); +void operator>>(const YAML::Node& node, std::multimap& connect); +void operator>>(const YAML::Node & node, std::vector & filters); void operator>>(const YAML::Node& node, std::string& str); void operator>>(const YAML::Node& node, bool& val); void operator>>(const YAML::Node& node, int& val); void operator>>(const YAML::Node& node, float& val); -void operator>>(const YAML::Node& node, ParamManager::CommonParams& common); +void operator>>(const YAML::Node& node, ParamManager::CommonRawData& common); #define YAML_PARSE(node, key, val) \ try \ { \ @@ -53,18 +50,18 @@ void operator>>(const YAML::Node& node, ParamManager::CommonParams& common); } void operator>>(const YAML::Node& node, - std::vector& list) + std::vector& list) { slog::info << "Pipeline size: " << node.size() << slog::endl; for (unsigned i = 0; i < node.size(); i++) { - ParamManager::PipelineParams temp; + ParamManager::PipelineRawData temp; node[i] >> temp; list.push_back(temp); } } -void operator>>(const YAML::Node& node, ParamManager::CommonParams& common) +void operator>>(const YAML::Node& node, ParamManager::CommonRawData& common) { YAML_PARSE(node, "camera_topic", common.camera_topic) YAML_PARSE(node, "custom_cpu_library", common.custom_cpu_library) @@ -73,30 +70,31 @@ void operator>>(const YAML::Node& node, ParamManager::CommonParams& common) } void operator>>(const YAML::Node& node, - ParamManager::PipelineParams& pipeline) + ParamManager::PipelineRawData& pipeline) { YAML_PARSE(node, "name", pipeline.name) YAML_PARSE(node, "inputs", pipeline.inputs) YAML_PARSE(node, "infers", pipeline.infers) YAML_PARSE(node, "outputs", pipeline.outputs) YAML_PARSE(node, "connects", pipeline.connects) + YAML_PARSE(node, "connects", pipeline.filters) YAML_PARSE(node, "input_path", pipeline.input_meta) slog::info << "Pipeline Params:name=" << pipeline.name << slog::endl; } void operator>>(const YAML::Node& node, - std::vector& list) + std::vector& list) { slog::info << "Inferences size: " << node.size() << slog::endl; for (unsigned i = 0; i < node.size(); i++) { - ParamManager::InferenceParams temp_inf; + ParamManager::InferenceRawData temp_inf; node[i] >> temp_inf; list.push_back(temp_inf); } } -void operator>>(const YAML::Node& node, ParamManager::InferenceParams& infer) +void operator>>(const YAML::Node& node, ParamManager::InferenceRawData& infer) { YAML_PARSE(node, "name", infer.name) YAML_PARSE(node, "model", infer.model) @@ -126,11 +124,34 @@ void operator>>(const YAML::Node& node, { std::string left; node[i]["left"] >> left; - std::vector rights; - node[i]["right"] >> rights; - for (auto& r : rights) - { - connect.insert({left, r}); + YAML::Node rights = node[i]["right"]; + for (unsigned i = 0; i < rights.size(); i++) { + std::string right; + if (rights[i].Type() == YAML::NodeType::Map) { + rights[i].begin()->first >> right; + } + else { + rights[i] >> right; + } + connect.insert({left, right}); + } + } +} + +void operator>>(const YAML::Node & node, std::vector & filters) +{ + for (unsigned i = 0; i < node.size(); i++) { + std::string left; + node[i]["left"] >> left; + YAML::Node rights = node[i]["right"]; + for (unsigned i = 0; i < rights.size(); i++) { + if (rights[i].Type() == YAML::NodeType::Map) { + ParamManager::FilterRawData filter; + filter.input = left; + rights[i].begin()->first >> filter.output; + rights[i].begin()->second >> filter.filter_conditions; + filters.push_back(filter); + } } } } @@ -230,7 +251,7 @@ std::vector ParamManager::getPipelineNames() const return names; } -ParamManager::PipelineParams ParamManager::getPipeline( +ParamManager::PipelineRawData ParamManager::getPipeline( const std::string& name) const { for (auto& p : pipelines_) @@ -242,4 +263,4 @@ ParamManager::PipelineParams ParamManager::getPipeline( } throw std::logic_error("No parameters found for pipeline [" + name + "]"); } -} // namespace Params +} // namespace Params \ No newline at end of file From bfe86abaebbad9d9541a0e830d54f82a051fa25f Mon Sep 17 00:00:00 2001 From: luyang00 <393780933@qq.com> Date: Tue, 23 Jul 2019 09:58:02 +0800 Subject: [PATCH 05/63] support face reidentification model --- dynamic_vino_lib/CMakeLists.txt | 3 + .../inferences/base_reidentification.h | 100 ++++++++++ .../inferences/face_reidentification.h | 114 +++++++++++ .../models/face_reidentification_model.h | 48 +++++ .../dynamic_vino_lib/outputs/base_output.h | 7 + .../outputs/image_window_output.h | 9 + .../outputs/ros_service_output.h | 2 +- .../outputs/ros_topic_output.h | 8 + .../dynamic_vino_lib/outputs/rviz_output.h | 6 + .../dynamic_vino_lib/pipeline_manager.h | 2 + .../dynamic_vino_lib/pipeline_params.h | 2 +- .../src/inferences/base_reidentification.cpp | 181 ++++++++++++++++++ .../src/inferences/face_reidentification.cpp | 125 ++++++++++++ .../models/face_reidentification_model.cpp | 53 +++++ .../src/outputs/image_window_output.cpp | 11 ++ .../src/outputs/ros_topic_output.cpp | 28 +++ dynamic_vino_lib/src/outputs/rviz_output.cpp | 5 + dynamic_vino_lib/src/pipeline_manager.cpp | 22 +++ .../pipeline_face_reidentification.launch | 13 ++ .../param/pipeline_face_reidentification.yaml | 36 ++++ 20 files changed, 773 insertions(+), 2 deletions(-) create mode 100644 dynamic_vino_lib/include/dynamic_vino_lib/inferences/base_reidentification.h create mode 100644 dynamic_vino_lib/include/dynamic_vino_lib/inferences/face_reidentification.h create mode 100644 dynamic_vino_lib/include/dynamic_vino_lib/models/face_reidentification_model.h create mode 100644 dynamic_vino_lib/src/inferences/base_reidentification.cpp create mode 100644 dynamic_vino_lib/src/inferences/face_reidentification.cpp create mode 100644 dynamic_vino_lib/src/models/face_reidentification_model.cpp create mode 100644 vino_launch/launch/pipeline_face_reidentification.launch create mode 100644 vino_launch/param/pipeline_face_reidentification.yaml diff --git a/dynamic_vino_lib/CMakeLists.txt b/dynamic_vino_lib/CMakeLists.txt index 3a862958..dcbc0260 100644 --- a/dynamic_vino_lib/CMakeLists.txt +++ b/dynamic_vino_lib/CMakeLists.txt @@ -152,6 +152,8 @@ add_library(${PROJECT_NAME} SHARED src/inferences/object_detection_yolov2_voc.cpp src/inferences/object_segmentation.cpp src/inferences/person_reidentification.cpp + src/inferences/face_reidentification.cpp + src/inferences/base_reidentification.cpp src/inputs/realsense_camera.cpp src/inputs/realsense_camera_topic.cpp src/inputs/standard_camera.cpp @@ -166,6 +168,7 @@ add_library(${PROJECT_NAME} SHARED src/models/object_detection_yolov2voc_model.cpp src/models/object_segmentation_model.cpp src/models/person_reidentification_model.cpp + src/models/face_reidentification_model.cpp src/outputs/image_window_output.cpp src/outputs/ros_topic_output.cpp src/outputs/rviz_output.cpp diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/base_reidentification.h b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/base_reidentification.h new file mode 100644 index 00000000..17fd90f0 --- /dev/null +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/base_reidentification.h @@ -0,0 +1,100 @@ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @brief A header file with declaration for BaseReidentification Class + * @file base_reidentification.hpp + */ +#ifndef DYNAMIC_VINO_LIB__INFERENCES__BASE_REIDENTIFICATION_HPP_ +#define DYNAMIC_VINO_LIB__INFERENCES__BASE_REIDENTIFICATION_HPP_ +#include +#include +#include +#include +#include +#include + +// namespace +namespace dynamic_vino_lib +{ +/** + * @class Tracker + * @brief Class for storing and tracking the detected objects. + */ +class Tracker +{ +public: + explicit Tracker(int, double, double); + /** + * @brief Process the new detected track. + * @param[in] feature The new detected track feature. + * @return The detected track ID. + */ + int processNewTrack(const std::vector & feature); + +private: + /** + * @brief Find the most similar track from the recorded tracks. + * @param[in] feature The input track's feature. + * @param[in] most similar track's ID to be recorded. + * @return similarity with the most similar track. + */ + double findMostSimilarTrack(const std::vector & feature, int & most_similar_id); + /** + * @brief Update the matched track's feature by the new track. + * @param[in] track_id The matched track ID. + * @param[in] feature The matched track's feature + */ + void updateMatchTrack(int track_id, const std::vector & feature); + /** + * @brief Remove the earlest track from the recorded tracks. + */ + void removeEarlestTrack(); + /** + * @brief Add a new track to the recorded tracks, remove oldest track if needed. + * @param[in] feature A track's feature. + * @return new added track's ID. + */ + int addNewTrack(const std::vector & feature); + /** + * @brief Calculate the cosine similarity between two features. + * @return The simlarity result. + */ + double calcSimilarity( + const std::vector & feature_a, const std::vector & feature_b); + /** + * @brief get the current millisecond count since epoch. + * @return millisecond count since epoch. + */ + int64_t getCurrentTime(); + + bool saveTracksToFile(std::string filepath); + bool loadTracksFromFile(std::string filepath); + + struct Track + { + int64_t lastest_update_time; + std::vector feature; + }; + + int max_record_size_ = 1000; + int max_track_id_ = -1; + double same_track_thresh_ = 0.9; + double new_track_thresh_ = 0.3; + std::mutex tracks_mtx_; + std::unordered_map recorded_tracks_; +}; + +} // namespace dynamic_vino_lib +#endif // DYNAMIC_VINO_LIB__INFERENCES__BASE_REIDENTIFICATION_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/face_reidentification.h b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/face_reidentification.h new file mode 100644 index 00000000..2c68a97e --- /dev/null +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/face_reidentification.h @@ -0,0 +1,114 @@ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @brief A header file with declaration for FaceReidentification Class + * @file face_reidentification.hpp + */ +#ifndef DYNAMIC_VINO_LIB__INFERENCES__FACE_REIDENTIFICATION_H_ +#define DYNAMIC_VINO_LIB__INFERENCES__FACE_REIDENTIFICATION_H_ +#include +#include +#include +#include "dynamic_vino_lib/models/face_reidentification_model.h" +#include "dynamic_vino_lib/engines/engine.h" +#include "dynamic_vino_lib/inferences/base_inference.h" +#include "dynamic_vino_lib/inferences/base_reidentification.h" +#include "inference_engine.hpp" +#include "opencv2/opencv.hpp" +// namespace +namespace dynamic_vino_lib +{ +/** + * @class FaceReidentificationResult + * @brief Class for storing and processing face reidentification result. + */ +class FaceReidentificationResult : public Result +{ +public: + friend class FaceReidentification; + explicit FaceReidentificationResult(const cv::Rect & location); + std::string getFaceID() const {return face_id_;} + +private: + std::string face_id_ = "No.#"; +}; + +/** + * @class FaceReidentification + * @brief Class to load face reidentification model and perform face reidentification. + */ +class FaceReidentification : public BaseInference +{ +public: + using Result = dynamic_vino_lib::FaceReidentificationResult; + explicit FaceReidentification(double); + ~FaceReidentification() override; + /** + * @brief Load the face reidentification model. + */ + void loadNetwork(std::shared_ptr); + /** + * @brief Enqueue a frame to this class. + * The frame will be buffered but not infered yet. + * @param[in] frame The frame to be enqueued. + * @param[in] input_frame_loc The location of the enqueued frame with respect + * to the frame generated by the input device. + * @return Whether this operation is successful. + */ + bool enqueue(const cv::Mat &, const cv::Rect &) override; + /** + * @brief Start inference for all buffered frames. + * @return Whether this operation is successful. + */ + bool submitRequest() override; + /** + * @brief This function will fetch the results of the previous inference and + * stores the results in a result buffer array. All buffered frames will be + * cleared. + * @return Whether the Inference object fetches a result this time + */ + bool fetchResults() override; + /** + * @brief Get the length of the buffer result array. + * @return The length of the buffer result array. + */ + const int getResultsLength() const override; + /** + * @brief Get the location of result with respect + * to the frame generated by the input device. + * @param[in] idx The index of the result. + */ + const dynamic_vino_lib::Result * getLocationResult(int idx) const override; + /** + * @brief Show the observed reidentification result either through image window + or ROS topic. + */ + const void observeOutput(const std::shared_ptr & output, + const std::string filter_conditions); + /** + * @brief Get the name of the Inference instance. + * @return The name of the Inference instance. + */ + const std::string getName() const override; + const std::vector getFilteredROIs( + const std::string filter_conditions) const override; + +private: + std::shared_ptr valid_model_; + std::vector results_; + std::shared_ptr face_tracker_; +}; +} // namespace dynamic_vino_lib +#endif // DYNAMIC_VINO_LIB__INFERENCES__FACE_REIDENTIFICATION_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/models/face_reidentification_model.h b/dynamic_vino_lib/include/dynamic_vino_lib/models/face_reidentification_model.h new file mode 100644 index 00000000..a2efc962 --- /dev/null +++ b/dynamic_vino_lib/include/dynamic_vino_lib/models/face_reidentification_model.h @@ -0,0 +1,48 @@ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @brief A header file with declaration for FaceReidentificationModel Class + * @file person_reidentification_model.h + */ +#ifndef DYNAMIC_VINO_LIB__MODELS__FACE_REIDENTIFICATION_MODEL_H_ +#define DYNAMIC_VINO_LIB__MODELS__FACE_REIDENTIFICATION_MODEL_H_ +#include +#include "dynamic_vino_lib/models/base_model.h" +namespace Models +{ +/** + * @class FaceReidentificationModel + * @brief This class generates the face reidentification model. + */ +class FaceReidentificationModel : public BaseModel +{ +public: + FaceReidentificationModel(const std::string &, int, int, int); + inline const std::string getInputName() {return input_;} + inline const std::string getOutputName() {return output_;} + /** + * @brief Get the name of this detection model. + * @return Name of the model. + */ + const std::string getModelName() const override; + +protected: + void checkLayerProperty(const InferenceEngine::CNNNetReader::Ptr &) override; + void setLayerProperty(InferenceEngine::CNNNetReader::Ptr) override; + std::string input_; + std::string output_; +}; +} // namespace Models +#endif // DYNAMIC_VINO_LIB__MODELS__FACE_REIDENTIFICATION_MODEL_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/base_output.h b/dynamic_vino_lib/include/dynamic_vino_lib/outputs/base_output.h index c65d7bc6..846924ed 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/base_output.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/outputs/base_output.h @@ -34,6 +34,7 @@ #include "dynamic_vino_lib/inferences/object_detection_yolov2_voc.h" #include "dynamic_vino_lib/inferences/object_segmentation.h" #include "dynamic_vino_lib/inferences/person_reidentification.h" +#include "dynamic_vino_lib/inferences/face_reidentification.h" #include "dynamic_vino_lib/services/frame_processing_server.h" #include "opencv2/opencv.hpp" @@ -53,6 +54,12 @@ class BaseOutput BaseOutput() { }; + /** + * @brief Generate output content according to the face reidentification result. + */ + virtual void accept(const std::vector &) + { + } /** * @brief Generate output content according to the face detection result. */ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/image_window_output.h b/dynamic_vino_lib/include/dynamic_vino_lib/outputs/image_window_output.h index 2bdca7c2..564cbf9c 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/image_window_output.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/outputs/image_window_output.h @@ -52,11 +52,20 @@ class ImageWindowOutput : public BaseOutput * functions with image window. */ void handleOutput() override; + /** + * @brief Generate image window output content according to + * the landmarks detetection result. + * the face reidentification result. + * @param[in] A face reidentification result objetc. + */ + void accept( + const std::vector &) override; /** * @brief Generate image window output content according to * the face detection result. * @param[in] A face detection result objetc. */ + void accept( const std::vector&) override; /** diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/ros_service_output.h b/dynamic_vino_lib/include/dynamic_vino_lib/outputs/ros_service_output.h index 7195e6c4..e1cb027a 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/ros_service_output.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/outputs/ros_service_output.h @@ -78,7 +78,7 @@ class RosServiceOutput : public RosTopicOutput void setServiceResponse(boost::shared_ptr response); void setServiceResponse(boost::shared_ptr response); void setServiceResponse(boost::shared_ptr response); - + private: const std::string service_name_; const std::string pipeline_name_; diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/ros_topic_output.h b/dynamic_vino_lib/include/dynamic_vino_lib/outputs/ros_topic_output.h index a9eeb215..7b348b79 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/ros_topic_output.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/outputs/ros_topic_output.h @@ -66,6 +66,12 @@ class RosTopicOutput : public BaseOutput * functions with ros topic. */ void handleOutput() override; + /** + * @brief Generate ros topic infomation according to + * the face reidentification result. + * @param[in] results a bundle of face reidentification results. + */ + void accept(const std::vector &) override; /** * @brief Generate ros topic infomation according to * the person reidentification result. @@ -132,6 +138,8 @@ class RosTopicOutput : public BaseOutput std::shared_ptr person_reid_msg_ptr_; ros::Publisher pub_segmented_object_; std::shared_ptr segmented_object_msg_ptr_; + ros::Publisher pub_face_reid_; + std::shared_ptr face_reid_msg_ptr_; }; } // namespace Outputs diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/rviz_output.h b/dynamic_vino_lib/include/dynamic_vino_lib/outputs/rviz_output.h index f301a6f3..b8bea819 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/rviz_output.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/outputs/rviz_output.h @@ -49,6 +49,12 @@ class RvizOutput : public BaseOutput * functions with rviz. */ void handleOutput() override; + /** + * @brief Generate rviz output content according to + * the face reidentification result. + * @param[in] A face reidentification result objetc. + */ + void accept(const std::vector &) override; /** * @brief Generate rviz output content according to * the face detection result. diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/pipeline_manager.h b/dynamic_vino_lib/include/dynamic_vino_lib/pipeline_manager.h index 459762e8..3e9cd9f2 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/pipeline_manager.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/pipeline_manager.h @@ -109,6 +109,8 @@ class PipelineManager { const Params::ParamManager::InferenceRawData& infer); std::shared_ptr createPersonReidentification( const Params::ParamManager::InferenceRawData& infer); + std::shared_ptr createFaceReidentification( + const Params::ParamManager::InferenceRawData & infer); std::map pipelines_; std::map plugins_for_devices_; }; diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/pipeline_params.h b/dynamic_vino_lib/include/dynamic_vino_lib/pipeline_params.h index 18df1019..cfcf2b1d 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/pipeline_params.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/pipeline_params.h @@ -57,7 +57,7 @@ const char kInferTpye_ObjectSegmentation[] = "ObjectSegmentation"; const char kInferTpye_PersonReidentification[] = "PersonReidentification"; const char kInferTpye_ObjectDetectionTypeSSD[] = "SSD"; const char kInferTpye_ObjectDetectionTypeYolov2voc[] = "yolov2-voc"; - +const char kInferTpye_FaceReidentification[] = "FaceReidentification"; /** * @class PipelineParams * @brief This class is a pipeline parameter management that stores parameters diff --git a/dynamic_vino_lib/src/inferences/base_reidentification.cpp b/dynamic_vino_lib/src/inferences/base_reidentification.cpp new file mode 100644 index 00000000..36ded051 --- /dev/null +++ b/dynamic_vino_lib/src/inferences/base_reidentification.cpp @@ -0,0 +1,181 @@ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @brief a header file with declaration of BaseReidentification class + * @file base_reidentification.cpp + */ +#include +#include +#include +#include +#include +#include +#include +#include "dynamic_vino_lib/inferences/base_reidentification.h" +#include "dynamic_vino_lib/slog.h" + +// Tracker +dynamic_vino_lib::Tracker::Tracker( + int max_record_size, double same_track_thresh, double new_track_thresh) +: max_record_size_(max_record_size), + same_track_thresh_(same_track_thresh), + new_track_thresh_(new_track_thresh) {} + +int dynamic_vino_lib::Tracker::processNewTrack(const std::vector & feature) +{ + int most_similar_id; + double similarity = findMostSimilarTrack(feature, most_similar_id); + if (similarity > same_track_thresh_) { + updateMatchTrack(most_similar_id, feature); + } else if (similarity < new_track_thresh_) { + most_similar_id = addNewTrack(feature); + } + return most_similar_id; +} + +double dynamic_vino_lib::Tracker::findMostSimilarTrack( + const std::vector & feature, int & most_similar_id) +{ + double max_similarity = 0; + most_similar_id = -1; + for (auto iter = recorded_tracks_.begin(); iter != recorded_tracks_.end(); iter++) { + double sim = calcSimilarity(feature, iter->second.feature); + if (sim > max_similarity) { + max_similarity = sim; + most_similar_id = iter->first; + } + } + return max_similarity; +} + +double dynamic_vino_lib::Tracker::calcSimilarity( + const std::vector & feature_a, const std::vector & feature_b) +{ + if (feature_a.size() != feature_b.size()) { + slog::err << "cosine similarity can't be called for vectors of different lengths: " << + "feature_a size = " << std::to_string(feature_a.size()) << + "feature_b size = " << std::to_string(feature_b.size()) << slog::endl; + } + float mul_sum, denom_a, denom_b, value_a, value_b; + mul_sum = denom_a = denom_b = value_a = value_b = 0; + for (auto i = 0; i < feature_a.size(); i++) { + value_a = feature_a[i]; + value_b = feature_b[i]; + mul_sum += value_a * value_b; + denom_a += value_a * value_a; + denom_b += value_b * value_b; + } + if (denom_a == 0 || denom_b == 0) { + slog::err << "cosine similarity is not defined whenever one or both " + "input vectors are zero-vectors." << slog::endl; + } + return mul_sum / (sqrt(denom_a) * sqrt(denom_b)); +} + +void dynamic_vino_lib::Tracker::updateMatchTrack( + int track_id, const std::vector & feature) +{ + if (recorded_tracks_.find(track_id) != recorded_tracks_.end()) { + recorded_tracks_[track_id].feature.assign(feature.begin(), feature.end()); + recorded_tracks_[track_id].lastest_update_time = getCurrentTime(); + } else { + slog::err << "updating a non-existing track." << slog::endl; + } +} + +void dynamic_vino_lib::Tracker::removeEarlestTrack() +{ + std::lock_guard lk(tracks_mtx_); + int64_t earlest_time = LONG_MAX; + auto remove_iter = recorded_tracks_.begin(); + for (auto iter = recorded_tracks_.begin(); iter != recorded_tracks_.end(); iter++) { + if (iter->second.lastest_update_time < earlest_time) { + earlest_time = iter->second.lastest_update_time; + remove_iter = iter; + } + } + recorded_tracks_.erase(remove_iter); +} + + +int dynamic_vino_lib::Tracker::addNewTrack(const std::vector & feature) +{ + if (recorded_tracks_.size() >= max_record_size_) { + std::thread remove_thread(std::bind(&Tracker::removeEarlestTrack, this)); + remove_thread.detach(); + } + std::lock_guard lk(tracks_mtx_); + Track track; + track.lastest_update_time = getCurrentTime(); + track.feature.assign(feature.begin(), feature.end()); + max_track_id_ += 1; + recorded_tracks_.insert(std::pair(max_track_id_, track)); + return max_track_id_; +} + +int64_t dynamic_vino_lib::Tracker::getCurrentTime() +{ + auto tp = std::chrono::time_point_cast( + std::chrono::system_clock::now()); + return static_cast(tp.time_since_epoch().count()); +} + +bool dynamic_vino_lib::Tracker::saveTracksToFile(std::string filepath) +{ + std::ofstream outfile(filepath); + if (!outfile.is_open()) { + slog::err << "file not exists in file path: " << filepath << slog::endl; + return false; + } + for (auto record : recorded_tracks_) { + outfile << record.first << " " << + record.second.lastest_update_time << " "; + for (auto elem : record.second.feature) { + outfile << elem << " "; + } + outfile << "\n"; + } + outfile.close(); + slog::info << "sucessfully save tracks into file: " << filepath << slog::endl; + return true; +} + +bool dynamic_vino_lib::Tracker::loadTracksFromFile(std::string filepath) +{ + std::ifstream infile(filepath); + if (!infile.is_open()) { + slog::err << "file not exists in file path: " << filepath << slog::endl; + return false; + } + recorded_tracks_.clear(); + while (!infile.eof()) { + int track_id; + int64_t lastest_update_time; + std::vector feature; + infile >> track_id >> lastest_update_time; + for (int num = 0; num < 256; num++) { + float elem; + infile >> elem; + feature.push_back(elem); + } + Track track; + track.lastest_update_time = lastest_update_time; + track.feature = feature; + recorded_tracks_[track_id] = track; + } + infile.close(); + slog::info << "sucessfully load tracks from file: " << filepath << slog::endl; + return true; +} diff --git a/dynamic_vino_lib/src/inferences/face_reidentification.cpp b/dynamic_vino_lib/src/inferences/face_reidentification.cpp new file mode 100644 index 00000000..bb942cba --- /dev/null +++ b/dynamic_vino_lib/src/inferences/face_reidentification.cpp @@ -0,0 +1,125 @@ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @brief a header file with declaration of FaceReidentification class and + * FaceReidentificationResult class + * @file face_reidentification.cpp + */ +#include +#include +#include +#include "dynamic_vino_lib/inferences/face_reidentification.h" +#include "dynamic_vino_lib/outputs/base_output.h" +#include "dynamic_vino_lib/slog.h" + +// FaceReidentificationResult +dynamic_vino_lib::FaceReidentificationResult::FaceReidentificationResult( + const cv::Rect & location) +: Result(location) {} + +// FaceReidentification +dynamic_vino_lib::FaceReidentification::FaceReidentification(double match_thresh) +: dynamic_vino_lib::BaseInference() +{ + face_tracker_ = std::make_shared(1000, match_thresh, 0.3); +} + +dynamic_vino_lib::FaceReidentification::~FaceReidentification() = default; +void dynamic_vino_lib::FaceReidentification::loadNetwork( + const std::shared_ptr network) +{ + valid_model_ = network; + setMaxBatchSize(network->getMaxBatchSize()); +} + +bool dynamic_vino_lib::FaceReidentification::enqueue( + const cv::Mat & frame, const cv::Rect & input_frame_loc) +{ + if (getEnqueuedNum() == 0) { + results_.clear(); + } + if (!dynamic_vino_lib::BaseInference::enqueue( + frame, input_frame_loc, 1, 0, valid_model_->getInputName())) + { + return false; + } + Result r(input_frame_loc); + results_.emplace_back(r); + return true; +} + +bool dynamic_vino_lib::FaceReidentification::submitRequest() +{ + return dynamic_vino_lib::BaseInference::submitRequest(); +} + +bool dynamic_vino_lib::FaceReidentification::fetchResults() +{ + bool can_fetch = dynamic_vino_lib::BaseInference::fetchResults(); + if (!can_fetch) {return false;} + bool found_result = false; + InferenceEngine::InferRequest::Ptr request = getEngine()->getRequest(); + std::string output = valid_model_->getOutputName(); + const float * output_values = request->GetBlob(output)->buffer().as(); + int result_length = request->GetBlob(output)->getTensorDesc().getDims()[1]; + for (int i = 0; i < getResultsLength(); i++) { + std::vector new_face = std::vector( + output_values + result_length * i, output_values + result_length * (i + 1)); + std::string face_id = "No." + std::to_string(face_tracker_->processNewTrack(new_face)); + results_[i].face_id_ = face_id; + found_result = true; + } + if (!found_result) {results_.clear();} + return true; +} + +const int dynamic_vino_lib::FaceReidentification::getResultsLength() const +{ + return static_cast(results_.size()); +} + +const dynamic_vino_lib::Result * +dynamic_vino_lib::FaceReidentification::getLocationResult(int idx) const +{ + return &(results_[idx]); +} + +const std::string dynamic_vino_lib::FaceReidentification::getName() const +{ + return valid_model_->getModelName(); +} + +const void dynamic_vino_lib::FaceReidentification::observeOutput( + const std::shared_ptr & output, + const std::string filter_conditions) +{ + if (output != nullptr) { + output->accept(results_); + } +} + +const std::vector dynamic_vino_lib::FaceReidentification::getFilteredROIs( + const std::string filter_conditions) const +{ + if (!filter_conditions.empty()) { + slog::err << "Face reidentification does not support filtering now! " << + "Filter conditions: " << filter_conditions << slog::endl; + } + std::vector filtered_rois; + for (auto res : results_) { + filtered_rois.push_back(res.getLocation()); + } + return filtered_rois; +} diff --git a/dynamic_vino_lib/src/models/face_reidentification_model.cpp b/dynamic_vino_lib/src/models/face_reidentification_model.cpp new file mode 100644 index 00000000..7655737a --- /dev/null +++ b/dynamic_vino_lib/src/models/face_reidentification_model.cpp @@ -0,0 +1,53 @@ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @brief a header file with declaration of FaceReidentificationModel class + * @file face_reidentification_model.cpp + */ +#include +#include "dynamic_vino_lib/models/face_reidentification_model.h" +#include "dynamic_vino_lib/slog.h" +// Validated Face Reidentification Network +Models::FaceReidentificationModel::FaceReidentificationModel( + const std::string & model_loc, int input_num, int output_num, int max_batch_size) +: BaseModel(model_loc, input_num, output_num, max_batch_size) {} + +void Models::FaceReidentificationModel::setLayerProperty( + InferenceEngine::CNNNetReader::Ptr net_reader) +{ + // set input property + InferenceEngine::InputsDataMap input_info_map( + net_reader->getNetwork().getInputsInfo()); + InferenceEngine::InputInfo::Ptr input_info = input_info_map.begin()->second; + input_info->setPrecision(InferenceEngine::Precision::U8); + input_info->getInputData()->setLayout(InferenceEngine::Layout::NCHW); + // set output property + InferenceEngine::OutputsDataMap output_info_map( + net_reader->getNetwork().getOutputsInfo()); + InferenceEngine::DataPtr & output_data_ptr = output_info_map.begin()->second; + output_data_ptr->setPrecision(InferenceEngine::Precision::FP32); + output_data_ptr->setLayout(InferenceEngine::Layout::NCHW); + // set input and output layer name + input_ = input_info_map.begin()->first; + output_ = output_info_map.begin()->first; +} + +void Models::FaceReidentificationModel::checkLayerProperty( + const InferenceEngine::CNNNetReader::Ptr & net_reader) {} + +const std::string Models::FaceReidentificationModel::getModelName() const +{ + return "Face Reidentification"; +} diff --git a/dynamic_vino_lib/src/outputs/image_window_output.cpp b/dynamic_vino_lib/src/outputs/image_window_output.cpp index 472b6ee8..9757f40b 100644 --- a/dynamic_vino_lib/src/outputs/image_window_output.cpp +++ b/dynamic_vino_lib/src/outputs/image_window_output.cpp @@ -312,6 +312,17 @@ void Outputs::ImageWindowOutput::accept( } } +void Outputs::ImageWindowOutput::accept( + const std::vector & results) +{ + for (unsigned i = 0; i < results.size(); i++) { + cv::Rect result_rect = results[i].getLocation(); + unsigned target_index = findOutput(result_rect); + outputs_[target_index].rect = result_rect; + outputs_[target_index].desc += "[" + results[i].getFaceID() + "]"; + } +} + void Outputs::ImageWindowOutput::decorateFrame() { if (getPipeline()->getParameters()->isGetFps()) diff --git a/dynamic_vino_lib/src/outputs/ros_topic_output.cpp b/dynamic_vino_lib/src/outputs/ros_topic_output.cpp index e726a94d..a26fff49 100644 --- a/dynamic_vino_lib/src/outputs/ros_topic_output.cpp +++ b/dynamic_vino_lib/src/outputs/ros_topic_output.cpp @@ -43,6 +43,8 @@ Outputs::RosTopicOutput::RosTopicOutput(std::string pipeline_name): "/openvino_toolkit/" + pipeline_name_ + "/reidentified_persons", 16); pub_segmented_object_ = nh_.advertise( "/openvino_toolkit/" + pipeline_name_ + "/segmented_obejcts", 16); + pub_face_reid_ = nh_.advertise( + "/openvino_toolkit/" + pipeline_name_ + "/reidentified_faces", 16); emotions_msg_ptr_ = NULL; faces_msg_ptr_ = NULL; @@ -51,11 +53,29 @@ Outputs::RosTopicOutput::RosTopicOutput(std::string pipeline_name): object_msg_ptr_ = NULL; person_reid_msg_ptr_ = NULL; segmented_object_msg_ptr_ = NULL; + face_reid_msg_ptr_ = NULL; } void Outputs::RosTopicOutput::feedFrame(const cv::Mat& frame) {} +void Outputs::RosTopicOutput::accept( + const std::vector & results) +{ + face_reid_msg_ptr_ = std::make_shared(); + people_msgs::Reidentification face; + for (auto & r : results) { + // slog::info << ">"; + auto loc = r.getLocation(); + face.roi.x_offset = loc.x; + face.roi.y_offset = loc.y; + face.roi.width = loc.width; + face.roi.height = loc.height; + face.identity = r.getFaceID(); + face_reid_msg_ptr_->reidentified_vector.push_back(face); + } +} + void Outputs::RosTopicOutput::accept( const std::vector & results) { @@ -275,6 +295,14 @@ void Outputs::RosTopicOutput::handleOutput() pub_object_.publish(object_msg); object_msg_ptr_ = nullptr; } + if (face_reid_msg_ptr_ != nullptr) { + people_msgs::ReidentificationStamped face_reid_msg; + face_reid_msg.header = header; + face_reid_msg.reidentified_vector.swap(face_reid_msg_ptr_->reidentified_vector); + + pub_face_reid_.publish(face_reid_msg); + face_reid_msg_ptr_ = nullptr; + } } std_msgs::Header Outputs::RosTopicOutput::getHeader() diff --git a/dynamic_vino_lib/src/outputs/rviz_output.cpp b/dynamic_vino_lib/src/outputs/rviz_output.cpp index 957ed9f0..ba5ea70c 100644 --- a/dynamic_vino_lib/src/outputs/rviz_output.cpp +++ b/dynamic_vino_lib/src/outputs/rviz_output.cpp @@ -36,6 +36,11 @@ void Outputs::RvizOutput::feedFrame(const cv::Mat & frame) { image_window_output_->feedFrame(frame); } +void Outputs::RvizOutput::accept( + const std::vector & results) +{ + image_window_output_->accept(results); +} void Outputs::RvizOutput::accept(const std::vector & results) { diff --git a/dynamic_vino_lib/src/pipeline_manager.cpp b/dynamic_vino_lib/src/pipeline_manager.cpp index 28efcb5a..66c43c89 100644 --- a/dynamic_vino_lib/src/pipeline_manager.cpp +++ b/dynamic_vino_lib/src/pipeline_manager.cpp @@ -29,6 +29,7 @@ #include "dynamic_vino_lib/inferences/emotions_detection.h" #include "dynamic_vino_lib/inferences/face_detection.h" #include "dynamic_vino_lib/inferences/head_pose_detection.h" +#include "dynamic_vino_lib/inferences/face_reidentification.h" #include "dynamic_vino_lib/inputs/image_input.h" #include "dynamic_vino_lib/inputs/realsense_camera.h" #include "dynamic_vino_lib/inputs/realsense_camera_topic.h" @@ -40,6 +41,7 @@ #include "dynamic_vino_lib/models/head_pose_detection_model.h" #include "dynamic_vino_lib/models/object_detection_ssd_model.h" #include "dynamic_vino_lib/models/object_detection_yolov2voc_model.h" +#include "dynamic_vino_lib/models/face_reidentification_model.h" #include "dynamic_vino_lib/outputs/image_window_output.h" #include "dynamic_vino_lib/outputs/ros_topic_output.h" #include "dynamic_vino_lib/outputs/rviz_output.h" @@ -203,6 +205,9 @@ PipelineManager::parseInference( else if (infer.name == kInferTpye_PersonReidentification) { object = createPersonReidentification(infer); } + else if (infer.name == kInferTpye_FaceReidentification) { + object = createFaceReidentification(infer); + } else { slog::err << "Invalid inference name: " << infer.name << slog::endl; } @@ -345,6 +350,23 @@ PipelineManager::createPersonReidentification( return reidentification_inference_ptr; } +std::shared_ptr +PipelineManager::createFaceReidentification( + const Params::ParamManager::InferenceRawData & infer) +{ + auto face_reidentification_model = + std::make_shared(infer.model, 1, 1, infer.batch); + face_reidentification_model->modelInit(); + auto face_reidentification_engine = std::make_shared( + plugins_for_devices_[infer.engine], face_reidentification_model); + auto face_reid_ptr = + std::make_shared(infer.confidence_threshold); + face_reid_ptr->loadNetwork(face_reidentification_model); + face_reid_ptr->loadEngine(face_reidentification_engine); + + return face_reid_ptr; +} + void PipelineManager::threadPipeline(const char* name) { PipelineData& p = pipelines_[name]; while ( p.state != PipelineState_ThreadStopped && p.pipeline != nullptr && ros::ok()) { diff --git a/vino_launch/launch/pipeline_face_reidentification.launch b/vino_launch/launch/pipeline_face_reidentification.launch new file mode 100644 index 00000000..5e98c109 --- /dev/null +++ b/vino_launch/launch/pipeline_face_reidentification.launch @@ -0,0 +1,13 @@ + + + + + + + + + + + + diff --git a/vino_launch/param/pipeline_face_reidentification.yaml b/vino_launch/param/pipeline_face_reidentification.yaml new file mode 100644 index 00000000..8421ac39 --- /dev/null +++ b/vino_launch/param/pipeline_face_reidentification.yaml @@ -0,0 +1,36 @@ +Pipelines: +- name: people + inputs: [RealSenseCamera] + infers: + - name: FaceDetection + model: /opt/intel/computer_vision_sdk/deployment_tools/model_downloader/Transportation/object_detection/face/pruned_mobilenet_reduced_ssd_shared_weights/dldt/face-detection-adas-0001.xml + engine: CPU + label: /opt/intel/computer_vision_sdk/deployment_tools/model_downloader/Transportation/object_detection/face/pruned_mobilenet_reduced_ssd_shared_weights/dldt/face-detection-adas-0001.labels + batch: 1 + confidence_threshold: 0.5 + enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame + - name: LandmarksDetection + model: /opt/intel/computer_vision_sdk/deployment_tools/model_downloader/Retail/object_attributes/landmarks_regression/0009/dldt/landmarks-regression-retail-0009.xml + engine: CPU + label: to/be/set/xxx.labels + batch: 1 + - name: FaceReidentification + model: /opt/intel/computer_vision_sdk/deployment_tools/model_downloader/Retail/object_reidentification/face/mobilenet_based/dldt/face-reidentification-retail-0071.xml + engine: CPU + label: to/be/set/xxx.labels + batch: 1 + confidence_threshold: 0.9 + outputs: [ImageWindow, RosTopic, RViz] + connects: + - left: RealSenseCamera + right: [FaceDetection] + - left: FaceDetection + right: [LandmarksDetection, FaceReidentification] + - left: FaceDetection + right: [ImageWindow, RosTopic, RViz] + - left: LandmarksDetection + right: [ImageWindow, RosTopic, RViz] + - left: FaceReidentification + right: [ImageWindow, RosTopic, RViz] + +Common: From 30603edc3f6b7428e2bcf74fee38ab92ec7e123a Mon Sep 17 00:00:00 2001 From: luyang00 <393780933@qq.com> Date: Tue, 23 Jul 2019 13:08:05 +0800 Subject: [PATCH 06/63] support person attribute detection --- dynamic_vino_lib/CMakeLists.txt | 4 +- .../inferences/person_attribs_detection.h | 122 +++++++++++++++++ .../models/person_attribs_detection_model.h | 48 +++++++ .../dynamic_vino_lib/outputs/base_output.h | 13 +- .../outputs/image_window_output.h | 7 + .../outputs/ros_topic_output.h | 10 ++ .../dynamic_vino_lib/outputs/rviz_output.h | 6 + .../dynamic_vino_lib/pipeline_manager.h | 2 + .../dynamic_vino_lib/pipeline_params.h | 1 + .../inferences/person_attribs_detection.cpp | 125 ++++++++++++++++++ .../models/person_attribs_detection_model.cpp | 62 +++++++++ .../src/outputs/image_window_output.cpp | 14 ++ .../src/outputs/ros_topic_output.cpp | 27 ++++ dynamic_vino_lib/src/outputs/rviz_output.cpp | 7 + dynamic_vino_lib/src/pipeline_manager.cpp | 22 +++ people_msgs/CMakeLists.txt | 3 + people_msgs/msg/PersonAttribute.msg | 17 +++ people_msgs/msg/PersonAttributeStamped.msg | 17 +++ .../param/.pipeline_reidentification.yaml.swo | Bin 0 -> 12288 bytes .../param/.pipeline_reidentification.yaml.swp | Bin 0 -> 12288 bytes .../param/pipeline_reidentification.yaml | 16 ++- 21 files changed, 516 insertions(+), 7 deletions(-) create mode 100644 dynamic_vino_lib/include/dynamic_vino_lib/inferences/person_attribs_detection.h create mode 100644 dynamic_vino_lib/include/dynamic_vino_lib/models/person_attribs_detection_model.h create mode 100644 dynamic_vino_lib/src/inferences/person_attribs_detection.cpp create mode 100644 dynamic_vino_lib/src/models/person_attribs_detection_model.cpp create mode 100644 people_msgs/msg/PersonAttribute.msg create mode 100644 people_msgs/msg/PersonAttributeStamped.msg create mode 100644 vino_launch/param/.pipeline_reidentification.yaml.swo create mode 100644 vino_launch/param/.pipeline_reidentification.yaml.swp diff --git a/dynamic_vino_lib/CMakeLists.txt b/dynamic_vino_lib/CMakeLists.txt index dcbc0260..1b7105bf 100644 --- a/dynamic_vino_lib/CMakeLists.txt +++ b/dynamic_vino_lib/CMakeLists.txt @@ -154,6 +154,7 @@ add_library(${PROJECT_NAME} SHARED src/inferences/person_reidentification.cpp src/inferences/face_reidentification.cpp src/inferences/base_reidentification.cpp + src/inferences/person_attribs_detection.cpp src/inputs/realsense_camera.cpp src/inputs/realsense_camera_topic.cpp src/inputs/standard_camera.cpp @@ -168,7 +169,8 @@ add_library(${PROJECT_NAME} SHARED src/models/object_detection_yolov2voc_model.cpp src/models/object_segmentation_model.cpp src/models/person_reidentification_model.cpp - src/models/face_reidentification_model.cpp + src/models/face_reidentification_model.cpp + src/models/person_attribs_detection_model.cpp src/outputs/image_window_output.cpp src/outputs/ros_topic_output.cpp src/outputs/rviz_output.cpp diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/person_attribs_detection.h b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/person_attribs_detection.h new file mode 100644 index 00000000..288b8792 --- /dev/null +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/person_attribs_detection.h @@ -0,0 +1,122 @@ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @brief A header file with declaration for PersonAttribsDetection Class + * @file person_attribs_detection.hpp + */ +#ifndef DYNAMIC_VINO_LIB__INFERENCES__PERSON_ATTRIBS_DETECTION_H_ +#define DYNAMIC_VINO_LIB__INFERENCES__PERSON_ATTRIBS_DETECTION_H_ +#include +#include +#include +#include "dynamic_vino_lib/models/person_attribs_detection_model.h" +#include "dynamic_vino_lib/engines/engine.h" +#include "dynamic_vino_lib/inferences/base_inference.h" +#include "inference_engine.hpp" +#include "opencv2/opencv.hpp" +// namespace +namespace dynamic_vino_lib +{ +/** + * @class PersonAttribsDetectionResult + * @brief Class for storing and processing person attributes detection result. + */ +class PersonAttribsDetectionResult : public Result +{ +public: + friend class PersonAttribsDetection; + explicit PersonAttribsDetectionResult(const cv::Rect & location); + std::string getAttributes() const + { + return attributes_; + } + bool getMaleProbability() const + { + return male_probability_; + } + +private: + float male_probability_; + std::string attributes_ = ""; +}; +/** + * @class PersonAttribsDetection + * @brief Class to load person attributes detection model and perform person attributes detection. + */ +class PersonAttribsDetection : public BaseInference +{ +public: + using Result = dynamic_vino_lib::PersonAttribsDetectionResult; + explicit PersonAttribsDetection(double); + ~PersonAttribsDetection() override; + /** + * @brief Load the person attributes detection model. + */ + void loadNetwork(std::shared_ptr); + /** + * @brief Enqueue a frame to this class. + * The frame will be buffered but not infered yet. + * @param[in] frame The frame to be enqueued. + * @param[in] input_frame_loc The location of the enqueued frame with respect + * to the frame generated by the input device. + * @return Whether this operation is successful. + */ + bool enqueue(const cv::Mat &, const cv::Rect &) override; + /** + * @brief Start inference for all buffered frames. + * @return Whether this operation is successful. + */ + bool submitRequest() override; + /** + * @brief This function will fetch the results of the previous inference and + * stores the results in a result buffer array. All buffered frames will be + * cleared. + * @return Whether the Inference object fetches a result this time + */ + bool fetchResults() override; + /** + * @brief Get the length of the buffer result array. + * @return The length of the buffer result array. + */ + const int getResultsLength() const override; + /** + * @brief Get the location of result with respect + * to the frame generated by the input device. + * @param[in] idx The index of the result. + */ + const dynamic_vino_lib::Result * getLocationResult(int idx) const override; + /** + * @brief Show the observed detection result either through image window + or ROS topic. + */ + const void observeOutput(const std::shared_ptr & output, + const std::string filter_conditions); + /** + * @brief Get the name of the Inference instance. + * @return The name of the Inference instance. + */ + const std::string getName() const override; + const std::vector getFilteredROIs( + const std::string filter_conditions) const override; + +private: + std::shared_ptr valid_model_; + std::vector results_; + double attribs_confidence_; + const std::vector net_attributes_ = {"is male", "hat", + "longsleeves", "longpants", "longhair", "coatjacket"}; +}; +} // namespace dynamic_vino_lib +#endif // DYNAMIC_VINO_LIB__INFERENCES__PERSON_ATTRIBS_DETECTION_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/models/person_attribs_detection_model.h b/dynamic_vino_lib/include/dynamic_vino_lib/models/person_attribs_detection_model.h new file mode 100644 index 00000000..2105ef81 --- /dev/null +++ b/dynamic_vino_lib/include/dynamic_vino_lib/models/person_attribs_detection_model.h @@ -0,0 +1,48 @@ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @brief A header file with declaration for PersonAttribsDetectionModel Class + * @file person_attribs_detection_model.h + */ +#ifndef DYNAMIC_VINO_LIB__MODELS__PERSON_ATTRIBS_DETECTION_MODEL_H_ +#define DYNAMIC_VINO_LIB__MODELS__PERSON_ATTRIBS_DETECTION_MODEL_H_ +#include +#include "dynamic_vino_lib/models/base_model.h" +namespace Models +{ +/** + * @class PersonAttribsDetectionModel + * @brief This class generates the person attributes detection model. + */ +class PersonAttribsDetectionModel : public BaseModel +{ +public: + PersonAttribsDetectionModel(const std::string &, int, int, int); + inline const std::string getInputName() {return input_;} + inline const std::string getOutputName() {return output_;} + /** + * @brief Get the name of this detection model. + * @return Name of the model. + */ + const std::string getModelName() const override; + +protected: + void checkLayerProperty(const InferenceEngine::CNNNetReader::Ptr &) override; + void setLayerProperty(InferenceEngine::CNNNetReader::Ptr) override; + std::string input_; + std::string output_; +}; +} // namespace Models +#endif // DYNAMIC_VINO_LIB__MODELS__PERSON_ATTRIBS_DETECTION_MODEL_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/base_output.h b/dynamic_vino_lib/include/dynamic_vino_lib/outputs/base_output.h index 846924ed..7684a8e2 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/base_output.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/outputs/base_output.h @@ -35,6 +35,7 @@ #include "dynamic_vino_lib/inferences/object_segmentation.h" #include "dynamic_vino_lib/inferences/person_reidentification.h" #include "dynamic_vino_lib/inferences/face_reidentification.h" +#include "dynamic_vino_lib/inferences/person_attribs_detection.h" #include "dynamic_vino_lib/services/frame_processing_server.h" #include "opencv2/opencv.hpp" @@ -54,9 +55,15 @@ class BaseOutput BaseOutput() { }; - /** - * @brief Generate output content according to the face reidentification result. - */ + /** + * @brief Generate output content according to the person atrribute detection result. + */ + virtual void accept(const std::vector &) + { + } + /** + * @brief Generate output content according to the face reidentification result. + */ virtual void accept(const std::vector &) { } diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/image_window_output.h b/dynamic_vino_lib/include/dynamic_vino_lib/outputs/image_window_output.h index 564cbf9c..8dc6f4d4 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/image_window_output.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/outputs/image_window_output.h @@ -52,6 +52,13 @@ class ImageWindowOutput : public BaseOutput * functions with image window. */ void handleOutput() override; + /** + * @brief Generate image window output content according to + * the person attributes detection result. + * @param[in] A person attributes detection result objetc. + */ + void accept( + const std::vector &) override; /** * @brief Generate image window output content according to * the landmarks detetection result. diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/ros_topic_output.h b/dynamic_vino_lib/include/dynamic_vino_lib/outputs/ros_topic_output.h index 7b348b79..a147f647 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/ros_topic_output.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/outputs/ros_topic_output.h @@ -36,6 +36,8 @@ #include #include #include +#include +#include #include #include @@ -66,6 +68,12 @@ class RosTopicOutput : public BaseOutput * functions with ros topic. */ void handleOutput() override; + /** + * @brief Generate ros topic infomation according to + * the person attributes detection result. + * @param[in] results a bundle of person attributes detection results. + */ + void accept(const std::vector &) override; /** * @brief Generate ros topic infomation according to * the face reidentification result. @@ -140,6 +148,8 @@ class RosTopicOutput : public BaseOutput std::shared_ptr segmented_object_msg_ptr_; ros::Publisher pub_face_reid_; std::shared_ptr face_reid_msg_ptr_; + ros::Publisher pub_person_attribs_; + std::shared_ptr person_attribs_msg_ptr_; }; } // namespace Outputs diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/rviz_output.h b/dynamic_vino_lib/include/dynamic_vino_lib/outputs/rviz_output.h index b8bea819..550a4b3b 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/rviz_output.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/outputs/rviz_output.h @@ -49,6 +49,12 @@ class RvizOutput : public BaseOutput * functions with rviz. */ void handleOutput() override; + /** + * @brief Generate rviz output content according to + * the person attributes detection result. + * @param[in] A person attributes detection result objetc. + */ + void accept(const std::vector &) override; /** * @brief Generate rviz output content according to * the face reidentification result. diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/pipeline_manager.h b/dynamic_vino_lib/include/dynamic_vino_lib/pipeline_manager.h index 3e9cd9f2..9eb58082 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/pipeline_manager.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/pipeline_manager.h @@ -111,6 +111,8 @@ class PipelineManager { const Params::ParamManager::InferenceRawData& infer); std::shared_ptr createFaceReidentification( const Params::ParamManager::InferenceRawData & infer); + std::shared_ptr createPersonAttribsDetection( + const Params::ParamManager::InferenceRawData & infer); std::map pipelines_; std::map plugins_for_devices_; }; diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/pipeline_params.h b/dynamic_vino_lib/include/dynamic_vino_lib/pipeline_params.h index cfcf2b1d..ccea6fde 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/pipeline_params.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/pipeline_params.h @@ -58,6 +58,7 @@ const char kInferTpye_PersonReidentification[] = "PersonReidentification"; const char kInferTpye_ObjectDetectionTypeSSD[] = "SSD"; const char kInferTpye_ObjectDetectionTypeYolov2voc[] = "yolov2-voc"; const char kInferTpye_FaceReidentification[] = "FaceReidentification"; +const char kInferTpye_PersonAttribsDetection[] = "PersonAttribsDetection"; /** * @class PipelineParams * @brief This class is a pipeline parameter management that stores parameters diff --git a/dynamic_vino_lib/src/inferences/person_attribs_detection.cpp b/dynamic_vino_lib/src/inferences/person_attribs_detection.cpp new file mode 100644 index 00000000..df64283a --- /dev/null +++ b/dynamic_vino_lib/src/inferences/person_attribs_detection.cpp @@ -0,0 +1,125 @@ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @brief a header file with declaration of PersonAttribsDetection class and + * PersonAttribsDetectionResult class + * @file person_attribs_detection.cpp + */ +#include +#include +#include +#include "dynamic_vino_lib/inferences/person_attribs_detection.h" +#include "dynamic_vino_lib/outputs/base_output.h" +#include "dynamic_vino_lib/slog.h" + +// PersonAttribsDetectionResult +dynamic_vino_lib::PersonAttribsDetectionResult::PersonAttribsDetectionResult( + const cv::Rect & location) +: Result(location) {} + +// PersonAttribsDetection +dynamic_vino_lib::PersonAttribsDetection::PersonAttribsDetection(double attribs_confidence) +: attribs_confidence_(attribs_confidence), dynamic_vino_lib::BaseInference() {} + +dynamic_vino_lib::PersonAttribsDetection::~PersonAttribsDetection() = default; +void dynamic_vino_lib::PersonAttribsDetection::loadNetwork( + const std::shared_ptr network) +{ + valid_model_ = network; + setMaxBatchSize(network->getMaxBatchSize()); +} + +bool dynamic_vino_lib::PersonAttribsDetection::enqueue( + const cv::Mat & frame, const cv::Rect & input_frame_loc) +{ + if (getEnqueuedNum() == 0) { + results_.clear(); + } + if (!dynamic_vino_lib::BaseInference::enqueue( + frame, input_frame_loc, 1, 0, valid_model_->getInputName())) + { + return false; + } + Result r(input_frame_loc); + results_.emplace_back(r); + return true; +} + +bool dynamic_vino_lib::PersonAttribsDetection::submitRequest() +{ + return dynamic_vino_lib::BaseInference::submitRequest(); +} + +bool dynamic_vino_lib::PersonAttribsDetection::fetchResults() +{ + bool can_fetch = dynamic_vino_lib::BaseInference::fetchResults(); + if (!can_fetch) {return false;} + bool found_result = false; + InferenceEngine::InferRequest::Ptr request = getEngine()->getRequest(); + std::string output = valid_model_->getOutputName(); + const float * output_values = request->GetBlob(output)->buffer().as(); + int net_attrib_length = net_attributes_.size(); + for (int i = 0; i < getResultsLength(); i++) { + results_[i].male_probability_ = output_values[i * net_attrib_length]; + std::string attrib = ""; + for (int j = 1; j < net_attrib_length; j++) { + attrib += (output_values[i * net_attrib_length + j] > attribs_confidence_) ? + net_attributes_[j] + ", " : ""; + } + results_[i].attributes_ = attrib; + found_result = true; + } + if (!found_result) {results_.clear();} + return true; +} + +const int dynamic_vino_lib::PersonAttribsDetection::getResultsLength() const +{ + return static_cast(results_.size()); +} + +const dynamic_vino_lib::Result * +dynamic_vino_lib::PersonAttribsDetection::getLocationResult(int idx) const +{ + return &(results_[idx]); +} + +const std::string dynamic_vino_lib::PersonAttribsDetection::getName() const +{ + return valid_model_->getModelName(); +} + +const void dynamic_vino_lib::PersonAttribsDetection::observeOutput( + const std::shared_ptr & output, + const std::string filter_conditions) +{ + if (output != nullptr) { + output->accept(results_); + } +} + +const std::vector dynamic_vino_lib::PersonAttribsDetection::getFilteredROIs( + const std::string filter_conditions) const +{ + if (!filter_conditions.empty()) { + slog::err << "Person attributes detection does not support filtering now! " << + "Filter conditions: " << filter_conditions << slog::endl; + } + std::vector filtered_rois; + for (auto res : results_) { + filtered_rois.push_back(res.getLocation()); + } + return filtered_rois; +} diff --git a/dynamic_vino_lib/src/models/person_attribs_detection_model.cpp b/dynamic_vino_lib/src/models/person_attribs_detection_model.cpp new file mode 100644 index 00000000..1dc7c599 --- /dev/null +++ b/dynamic_vino_lib/src/models/person_attribs_detection_model.cpp @@ -0,0 +1,62 @@ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @brief a header file with declaration of PersonAttribsDetectionModel class + * @file person_attribs_detection_model.cpp + */ +#include +#include "dynamic_vino_lib/models/person_attribs_detection_model.h" +#include "dynamic_vino_lib/slog.h" +// Validated Person Attributes Detection Network +Models::PersonAttribsDetectionModel::PersonAttribsDetectionModel( + const std::string & model_loc, int input_num, int output_num, int max_batch_size) +: BaseModel(model_loc, input_num, output_num, max_batch_size) {} + +void Models::PersonAttribsDetectionModel::setLayerProperty( + InferenceEngine::CNNNetReader::Ptr net_reader) +{ + // set input property + InferenceEngine::InputsDataMap input_info_map( + net_reader->getNetwork().getInputsInfo()); + InferenceEngine::InputInfo::Ptr input_info = input_info_map.begin()->second; + input_info->setPrecision(InferenceEngine::Precision::U8); + input_info->getInputData()->setLayout(InferenceEngine::Layout::NCHW); + // set output property + InferenceEngine::OutputsDataMap output_info_map( + net_reader->getNetwork().getOutputsInfo()); + // set input and output layer name + input_ = input_info_map.begin()->first; + output_ = output_info_map.begin()->first; +} + +void Models::PersonAttribsDetectionModel::checkLayerProperty( + const InferenceEngine::CNNNetReader::Ptr & net_reader) +{ + InferenceEngine::InputsDataMap input_info_map( + net_reader->getNetwork().getInputsInfo()); + if (input_info_map.size() != 1) { + throw std::logic_error("Person Attribs topology should have only one input"); + } + InferenceEngine::OutputsDataMap output_info_map( + net_reader->getNetwork().getOutputsInfo()); + if (output_info_map.size() != 1) { + throw std::logic_error("Person Attribs Network expects networks having one output"); + } +} + +const std::string Models::PersonAttribsDetectionModel::getModelName() const +{ + return "Person Attributes Detection"; +} diff --git a/dynamic_vino_lib/src/outputs/image_window_output.cpp b/dynamic_vino_lib/src/outputs/image_window_output.cpp index 9757f40b..30f6dc1f 100644 --- a/dynamic_vino_lib/src/outputs/image_window_output.cpp +++ b/dynamic_vino_lib/src/outputs/image_window_output.cpp @@ -301,6 +301,20 @@ unsigned Outputs::ImageWindowOutput::findOutput( return outputs_.size() - 1; } +void Outputs::ImageWindowOutput::accept( + const std::vector & results) +{ + for (unsigned i = 0; i < results.size(); i++) { + cv::Rect result_rect = results[i].getLocation(); + unsigned target_index = findOutput(result_rect); + if (results[i].getMaleProbability() < 0.5) { + outputs_[target_index].scalar = cv::Scalar(0, 0, 255); + } + outputs_[target_index].rect = result_rect; + outputs_[target_index].desc += "[" + results[i].getAttributes() + "]"; + } +} + void Outputs::ImageWindowOutput::accept( const std::vector & results) { diff --git a/dynamic_vino_lib/src/outputs/ros_topic_output.cpp b/dynamic_vino_lib/src/outputs/ros_topic_output.cpp index a26fff49..379f0821 100644 --- a/dynamic_vino_lib/src/outputs/ros_topic_output.cpp +++ b/dynamic_vino_lib/src/outputs/ros_topic_output.cpp @@ -45,6 +45,8 @@ Outputs::RosTopicOutput::RosTopicOutput(std::string pipeline_name): "/openvino_toolkit/" + pipeline_name_ + "/segmented_obejcts", 16); pub_face_reid_ = nh_.advertise( "/openvino_toolkit/" + pipeline_name_ + "/reidentified_faces", 16); + pub_person_attribs_ = nh_.advertise( + "/openvino_toolkit/" + pipeline_name_ + "person_attributes", 16); emotions_msg_ptr_ = NULL; faces_msg_ptr_ = NULL; @@ -54,10 +56,27 @@ Outputs::RosTopicOutput::RosTopicOutput(std::string pipeline_name): person_reid_msg_ptr_ = NULL; segmented_object_msg_ptr_ = NULL; face_reid_msg_ptr_ = NULL; + person_attribs_msg_ptr_ = NULL; } void Outputs::RosTopicOutput::feedFrame(const cv::Mat& frame) {} +void Outputs::RosTopicOutput::accept( + const std::vector & results) +{ + person_attribs_msg_ptr_ = std::make_shared(); + people_msgs::PersonAttribute person_attrib; + for (auto & r : results) { + // slog::info << ">"; + auto loc = r.getLocation(); + person_attrib.roi.x_offset = loc.x; + person_attrib.roi.y_offset = loc.y; + person_attrib.roi.width = loc.width; + person_attrib.roi.height = loc.height; + person_attrib.attribute = r.getAttributes(); + person_attribs_msg_ptr_->attributes.push_back(person_attrib); + } +} void Outputs::RosTopicOutput::accept( const std::vector & results) @@ -226,6 +245,14 @@ void Outputs::RosTopicOutput::accept( void Outputs::RosTopicOutput::handleOutput() { std_msgs::Header header = getHeader(); + + if (person_attribs_msg_ptr_ != nullptr) { + people_msgs::PersonAttributeStamped person_attribute_msg; + person_attribute_msg.header = header; + person_attribute_msg.attributes.swap(person_attribs_msg_ptr_->attributes); + pub_person_attribs_.publish(person_attribute_msg); + person_attribs_msg_ptr_ = nullptr; + } if (person_reid_msg_ptr_ != nullptr) { people_msgs::ReidentificationStamped person_reid_msg; person_reid_msg.header = header; diff --git a/dynamic_vino_lib/src/outputs/rviz_output.cpp b/dynamic_vino_lib/src/outputs/rviz_output.cpp index ba5ea70c..e65b0b6b 100644 --- a/dynamic_vino_lib/src/outputs/rviz_output.cpp +++ b/dynamic_vino_lib/src/outputs/rviz_output.cpp @@ -36,6 +36,13 @@ void Outputs::RvizOutput::feedFrame(const cv::Mat & frame) { image_window_output_->feedFrame(frame); } + +void Outputs::RvizOutput::accept( + const std::vector & results) +{ + image_window_output_->accept(results); +} + void Outputs::RvizOutput::accept( const std::vector & results) { diff --git a/dynamic_vino_lib/src/pipeline_manager.cpp b/dynamic_vino_lib/src/pipeline_manager.cpp index 66c43c89..6c1dc951 100644 --- a/dynamic_vino_lib/src/pipeline_manager.cpp +++ b/dynamic_vino_lib/src/pipeline_manager.cpp @@ -30,6 +30,7 @@ #include "dynamic_vino_lib/inferences/face_detection.h" #include "dynamic_vino_lib/inferences/head_pose_detection.h" #include "dynamic_vino_lib/inferences/face_reidentification.h" +#include "dynamic_vino_lib/inferences/person_attribs_detection.h" #include "dynamic_vino_lib/inputs/image_input.h" #include "dynamic_vino_lib/inputs/realsense_camera.h" #include "dynamic_vino_lib/inputs/realsense_camera_topic.h" @@ -42,6 +43,7 @@ #include "dynamic_vino_lib/models/object_detection_ssd_model.h" #include "dynamic_vino_lib/models/object_detection_yolov2voc_model.h" #include "dynamic_vino_lib/models/face_reidentification_model.h" +#include "dynamic_vino_lib/models/person_attribs_detection_model.h" #include "dynamic_vino_lib/outputs/image_window_output.h" #include "dynamic_vino_lib/outputs/ros_topic_output.h" #include "dynamic_vino_lib/outputs/rviz_output.h" @@ -50,6 +52,7 @@ #include "dynamic_vino_lib/pipeline_manager.h" #include "dynamic_vino_lib/pipeline_params.h" #include "dynamic_vino_lib/services/pipeline_processing_server.h" + std::shared_ptr PipelineManager::createPipeline( const Params::ParamManager::PipelineRawData& params) { if (params.name == "") { @@ -207,6 +210,8 @@ PipelineManager::parseInference( } else if (infer.name == kInferTpye_FaceReidentification) { object = createFaceReidentification(infer); + } else if (infer.name == kInferTpye_PersonAttribsDetection) { + object = createPersonAttribsDetection(infer); } else { slog::err << "Invalid inference name: " << infer.name << slog::endl; @@ -367,6 +372,23 @@ PipelineManager::createFaceReidentification( return face_reid_ptr; } +std::shared_ptr +PipelineManager::createPersonAttribsDetection( + const Params::ParamManager::InferenceRawData & infer) +{ + auto person_attribs_detection_model = + std::make_shared(infer.model, 1, 1, infer.batch); + person_attribs_detection_model->modelInit(); + auto person_attribs_detection_engine = std::make_shared( + plugins_for_devices_[infer.engine], person_attribs_detection_model); + auto attribs_inference_ptr = + std::make_shared(infer.confidence_threshold); + attribs_inference_ptr->loadNetwork(person_attribs_detection_model); + attribs_inference_ptr->loadEngine(person_attribs_detection_engine); + + return attribs_inference_ptr; +} + void PipelineManager::threadPipeline(const char* name) { PipelineData& p = pipelines_[name]; while ( p.state != PipelineState_ThreadStopped && p.pipeline != nullptr && ros::ok()) { diff --git a/people_msgs/CMakeLists.txt b/people_msgs/CMakeLists.txt index fbf650ad..b2e99384 100644 --- a/people_msgs/CMakeLists.txt +++ b/people_msgs/CMakeLists.txt @@ -35,6 +35,9 @@ add_message_files(DIRECTORY msg FILES ObjectsInMasks.msg Reidentification.msg ReidentificationStamped.msg + PersonAttribute.msg + PersonAttributeStamped.msg + ) add_service_files(FILES diff --git a/people_msgs/msg/PersonAttribute.msg b/people_msgs/msg/PersonAttribute.msg new file mode 100644 index 00000000..12dd2793 --- /dev/null +++ b/people_msgs/msg/PersonAttribute.msg @@ -0,0 +1,17 @@ +# Copyright (c) 2017 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This message can represent a peron's attributes +string attribute # person attributes +sensor_msgs/RegionOfInterest roi # region of interest \ No newline at end of file diff --git a/people_msgs/msg/PersonAttributeStamped.msg b/people_msgs/msg/PersonAttributeStamped.msg new file mode 100644 index 00000000..0fb6dfcd --- /dev/null +++ b/people_msgs/msg/PersonAttributeStamped.msg @@ -0,0 +1,17 @@ +# Copyright (c) 2017 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This message can represent identified persons' attributes +std_msgs/Header header # timestamp in header is the time the sensor captured the raw data +PersonAttribute[] attributes # ObjectInBox array \ No newline at end of file diff --git a/vino_launch/param/.pipeline_reidentification.yaml.swo b/vino_launch/param/.pipeline_reidentification.yaml.swo new file mode 100644 index 0000000000000000000000000000000000000000..07273f68ec32bbcb214e45402d6bf5fb72d632e8 GIT binary patch literal 12288 zcmeI2&u`pB6vrp@QlKpe4i!=lL!%0iy6fG9G|}Ers!BOji4vL~P-M;8&%49gGuF&( zljT6Aa^VlCdh7}Qfc^okI3s}&aOKJg#G&n-@6I}ub$&Eab7?e|zOp@jW54;l_eLA# zbz7S&H~CVt#c;gK*azQS+xq+BCHC7T#-wtTkH=qfc)NxZYS2E^@@sDxCBhA)inncK zO%fSxV_lNEBbAPw*7-oX$lv5*sFHqE3L}cBlqKa-QEW)2q@2to-XpbYjzn=3ClB>3 zJPZU*If0?e8cXNd{9N0Y`t0Qo`TOtPI^~NF%L@bofj}S-2m}IwKp+qZ1OkEo6L94W zyNiK0s=+JOd8T%*Zo?@M2m}IwKp+qZ1OkCTAP@)y0)apv5C{ZbLjodY?7>;aZoCQS z@&EtbzyF`V!`PqDkI)a$chE&>2Kwg$V?RMppoh@&^NjrpEkakIGtjehjQtKhf_{FR zF%DgT{(6hCKcL6Zedt>#hi*b`=qdd81^OPk2i=A?A@AR7$Tzqg2m}IwKp+qZ1Olg( zz-raNxU7pps}6qd_`}Fv-*xyGD~0IMZK+bdeU-0k`?)SCxy$Tvt?S{4Nv z0jCnJ6y9b#xYk2g4sm$|bbews?g>pOc6~#(uk*CSTg`=PrCs5YeuuZK+gx-h?{KH1 zE=87Hw6n9*^n?hl(^ucWiA6-nf4MU0UDLIaDBS9mqHCm*NR%^Z;-B_ESJJyWm4%%S3 zKaS*VJlAS3H5ZznE-dWz+rJ@3x~d!=k%=}a85-$Ek@g{vUn~Mnreu+@LLJNBD9uxM ztQq4Ka=*j|SIC-|lne4{-?Uh)D4jrB90~MZx;}wwSy69mMs7GEb0ov!wW(w{%<#V) zmufRN<)S>~~G6s!g-Dxo)xDe3(=( KTdSJsG3#IYrIteg literal 0 HcmV?d00001 diff --git a/vino_launch/param/.pipeline_reidentification.yaml.swp b/vino_launch/param/.pipeline_reidentification.yaml.swp new file mode 100644 index 0000000000000000000000000000000000000000..b0b3c4d1b21fc4a769c74a78041a78573f211c90 GIT binary patch literal 12288 zcmeI2&x;gC6vxZO9E=(zIk*Q8rG}`mGd;7Ts|@iVD}gK~-naTbN@!&46;7fN`zp8%qd9MnF zd7Z}E!q@zEy}{7VGuD4TxBlwFMRsGFF{QoCNBsx1U{~`(_nZ4#eiIC%MEJha@rH|> zO(JVtY)YvgD{W$LOx{;M3JXQm`531ABy5IPVVa& zy(9w1oWQ_mwcBS|YqlA-`jyL<_-CK49`i-h@`wNtAOb{y2oM1xKm>>Y5%?zoU!Gu3 z(J@tZeqLQqOkAr+x)1>(Km>>Y5g-CYfCvx)B0vO)01+SpN05L>8GCk$v6+wIJpTXR z`Tf6jjU!b3$AE0Sy3VQtsW4}T_Lf=7e&ocHqgufTr7tl#)>kMOmKrf(=pf{%( z`wjXT`W_lW1+)ycpbOBS@Zlx&E!2bVKwm<^wf#Se0qc zo1!qf%@_~Os&3D>`NM@mbmar3Q?oI{myNq`N|oSlO+8r;rZt|+43pPV$B*~67_cNaU4;Nf0=PP`o-x`LwERM>Q06w(U#lo+is znQPbf$^ifr*YYKQMZTwg{L@0J?KT2hpn_++iZ;m mc2|pkMRel4fun(QI8l_Y4wUh8XPxcr7ZZ*$wp10 literal 0 HcmV?d00001 diff --git a/vino_launch/param/pipeline_reidentification.yaml b/vino_launch/param/pipeline_reidentification.yaml index bc0db8a0..43f652b5 100644 --- a/vino_launch/param/pipeline_reidentification.yaml +++ b/vino_launch/param/pipeline_reidentification.yaml @@ -1,9 +1,10 @@ Pipelines: - name: object - inputs: [StandardCamera] + inputs: [RealSenseCamera] infers: - name: ObjectDetection model: /opt/intel/computer_vision_sdk/deployment_tools/intel_models/person-detection-retail-0013/FP32/person-detection-retail-0013.xml + model_type: SSD engine: CPU label: to/be/set/xxx.labels batch: 1 @@ -15,13 +16,22 @@ Pipelines: label: to/be/set/xxx.labels batch: 1 confidence_threshold: 0.7 + - name: PersonAttribsDetection + model: /opt/intel/computer_vision_sdk_2018.5.455/deployment_tools/model_downloader/Security/object_attributes/pedestrian/person-attributes-recognition-crossroad-0031/dldt/person-attributes-recognition-crossroad-0031.xml + #model: /opt/intel/computer_vision_sdk/deployment_tools/intel_models/person-attributes-recognition-crossroad-0031/FP32/person-attributes-recognition-crossroad-0031.xml + engine: CPU + label: to/be/set/xxx.labels + batch: 1 + confidence_threshold: 0.5 outputs: [ImageWindow, RosTopic, RViz] connects: - - left: StandardCamera + - left: RealSenseCamera right: [ObjectDetection] - left: ObjectDetection - right: [PersonReidentification] + right: [PersonReidentification, PersonAttribsDetection] - left: PersonReidentification right: [ImageWindow, RosTopic, RViz] + - left: PersonAttribsDetection + right: [ImageWindow, RosTopic, RViz] OpenvinoCommon: From d5ec522ef659d016e9ebed20e15c9e3158bdb2a2 Mon Sep 17 00:00:00 2001 From: luyang00 <393780933@qq.com> Date: Wed, 24 Jul 2019 11:03:25 +0800 Subject: [PATCH 07/63] support vehicle & license plate detection --- dynamic_vino_lib/CMakeLists.txt | 4 + .../inferences/license_plate_detection.h | 133 +++++++++++++++++ .../inferences/vehicle_attribs_detection.h | 123 ++++++++++++++++ .../models/license_plate_detection_model.h | 53 +++++++ .../models/vehicle_attribs_detection_model.h | 50 +++++++ .../dynamic_vino_lib/outputs/base_output.h | 14 ++ .../outputs/image_window_output.h | 14 ++ .../outputs/ros_topic_output.h | 22 +++ .../dynamic_vino_lib/outputs/rviz_output.h | 14 ++ .../dynamic_vino_lib/pipeline_manager.h | 5 + .../dynamic_vino_lib/pipeline_params.h | 2 + .../inferences/license_plate_detection.cpp | 139 ++++++++++++++++++ .../inferences/vehicle_attribs_detection.cpp | 125 ++++++++++++++++ .../models/license_plate_detection_model.cpp | 67 +++++++++ .../vehicle_attribs_detection_model.cpp | 64 ++++++++ .../src/outputs/image_window_output.cpp | 23 +++ .../src/outputs/ros_topic_output.cpp | 56 ++++++- dynamic_vino_lib/src/outputs/rviz_output.cpp | 11 ++ dynamic_vino_lib/src/pipeline_manager.cpp | 47 +++++- people_msgs/CMakeLists.txt | 5 +- people_msgs/msg/LicensePlate.msg | 16 ++ people_msgs/msg/LicensePlateStamped.msg | 16 ++ people_msgs/msg/VehicleAttribs.msg | 17 +++ people_msgs/msg/VehicleAttribsStamped.msg | 16 ++ .../launch/pipeline_vehicle_detection.launch | 13 ++ .../param/.pipeline_reidentification.yaml.swo | Bin 12288 -> 0 bytes .../param/.pipeline_reidentification.yaml.swp | Bin 12288 -> 0 bytes .../param/pipeline_vehicle_detection.yaml | 35 +++++ 28 files changed, 1081 insertions(+), 3 deletions(-) create mode 100644 dynamic_vino_lib/include/dynamic_vino_lib/inferences/license_plate_detection.h create mode 100644 dynamic_vino_lib/include/dynamic_vino_lib/inferences/vehicle_attribs_detection.h create mode 100644 dynamic_vino_lib/include/dynamic_vino_lib/models/license_plate_detection_model.h create mode 100644 dynamic_vino_lib/include/dynamic_vino_lib/models/vehicle_attribs_detection_model.h create mode 100644 dynamic_vino_lib/src/inferences/license_plate_detection.cpp create mode 100644 dynamic_vino_lib/src/inferences/vehicle_attribs_detection.cpp create mode 100644 dynamic_vino_lib/src/models/license_plate_detection_model.cpp create mode 100644 dynamic_vino_lib/src/models/vehicle_attribs_detection_model.cpp create mode 100644 people_msgs/msg/LicensePlate.msg create mode 100644 people_msgs/msg/LicensePlateStamped.msg create mode 100644 people_msgs/msg/VehicleAttribs.msg create mode 100644 people_msgs/msg/VehicleAttribsStamped.msg create mode 100644 vino_launch/launch/pipeline_vehicle_detection.launch delete mode 100644 vino_launch/param/.pipeline_reidentification.yaml.swo delete mode 100644 vino_launch/param/.pipeline_reidentification.yaml.swp create mode 100644 vino_launch/param/pipeline_vehicle_detection.yaml diff --git a/dynamic_vino_lib/CMakeLists.txt b/dynamic_vino_lib/CMakeLists.txt index 1b7105bf..68f6691b 100644 --- a/dynamic_vino_lib/CMakeLists.txt +++ b/dynamic_vino_lib/CMakeLists.txt @@ -155,6 +155,8 @@ add_library(${PROJECT_NAME} SHARED src/inferences/face_reidentification.cpp src/inferences/base_reidentification.cpp src/inferences/person_attribs_detection.cpp + src/inferences/vehicle_attribs_detection.cpp + src/inferences/license_plate_detection.cpp src/inputs/realsense_camera.cpp src/inputs/realsense_camera_topic.cpp src/inputs/standard_camera.cpp @@ -171,6 +173,8 @@ add_library(${PROJECT_NAME} SHARED src/models/person_reidentification_model.cpp src/models/face_reidentification_model.cpp src/models/person_attribs_detection_model.cpp + src/models/vehicle_attribs_detection_model.cpp + src/models/license_plate_detection_model.cpp src/outputs/image_window_output.cpp src/outputs/ros_topic_output.cpp src/outputs/rviz_output.cpp diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/license_plate_detection.h b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/license_plate_detection.h new file mode 100644 index 00000000..c35c961d --- /dev/null +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/license_plate_detection.h @@ -0,0 +1,133 @@ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @brief A header file with declaration for LicensePlateDetection Class + * @file license_plate_detection.hpp + */ +#ifndef DYNAMIC_VINO_LIB__INFERENCES__LICENSE_PLATE_DETECTION_HPP_ +#define DYNAMIC_VINO_LIB__INFERENCES__LICENSE_PLATE_DETECTION_HPP_ +#include +#include +#include +#include "dynamic_vino_lib/models/license_plate_detection_model.h" +#include "dynamic_vino_lib/engines/engine.h" +#include "dynamic_vino_lib/inferences/base_inference.h" +#include "inference_engine.hpp" +#include "opencv2/opencv.hpp" +// namespace +namespace dynamic_vino_lib +{ +/** + * @class LicensePlateDetectionResult + * @brief Class for storing and processing license plate detection result. + */ +class LicensePlateDetectionResult : public Result +{ +public: + friend class LicensePlateDetection; + explicit LicensePlateDetectionResult(const cv::Rect & location); + std::string getLicense() const + { + return license_; + } + +private: + std::string license_ = ""; +}; +/** + * @class LicensePlateDetection + * @brief Class to load license plate detection model and perform detection. + */ +class LicensePlateDetection : public BaseInference +{ +public: + using Result = dynamic_vino_lib::LicensePlateDetectionResult; + LicensePlateDetection(); + ~LicensePlateDetection() override; + /** + * @brief Load the license plate detection model. + */ + void loadNetwork(std::shared_ptr); + /** + * @brief Enqueue a frame to this class. + * The frame will be buffered but not infered yet. + * @param[in] frame The frame to be enqueued. + * @param[in] input_frame_loc The location of the enqueued frame with respect + * to the frame generated by the input device. + * @return Whether this operation is successful. + */ + bool enqueue(const cv::Mat &, const cv::Rect &) override; + /** + * @brief Set the sequence input blob + */ + void fillSeqBlob(); + /** + * @brief Start inference for all buffered frames. + * @return Whether this operation is successful. + */ + bool submitRequest() override; + /** + * @brief This function will fetch the results of the previous inference and + * stores the results in a result buffer array. All buffered frames will be + * cleared. + * @return Whether the Inference object fetches a result this time + */ + bool fetchResults() override; + /** + * @brief Get the length of the buffer result array. + * @return The length of the buffer result array. + */ + const int getResultsLength() const override; + /** + * @brief Get the location of result with respect + * to the frame generated by the input device. + * @param[in] idx The index of the result. + */ + const dynamic_vino_lib::Result * getLocationResult(int idx) const override; + /** + * @brief Show the observed detection result either through image window + or ROS topic. + */ + const void observeOutput(const std::shared_ptr & output, + const std::string filter_conditions); + /** + * @brief Get the name of the Inference instance. + * @return The name of the Inference instance. + */ + const std::string getName() const override; + const std::vector getFilteredROIs( + const std::string filter_conditions) const override; + +private: + std::shared_ptr valid_model_; + std::vector results_; + const std::vector licenses_ = { + "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", + "", "", "", "", + "", "", "", "", + "", "", "", "", + "", "", "", "", + "", "", "", "", + "", "", "", "", + "", "", "", "", + "", "", "", "", + "", "", + "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", + "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", + "U", "V", "W", "X", "Y", "Z" + }; +}; +} // namespace dynamic_vino_lib +#endif // DYNAMIC_VINO_LIB__INFERENCES__LICENSE_PLATE_DETECTION_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/vehicle_attribs_detection.h b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/vehicle_attribs_detection.h new file mode 100644 index 00000000..36448f06 --- /dev/null +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/vehicle_attribs_detection.h @@ -0,0 +1,123 @@ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @brief A header file with declaration for VehicleAttribsDetection Class + * @file vehicle_attribs_detection.hpp + */ +#ifndef DYNAMIC_VINO_LIB__INFERENCES__VEHICLE_ATTRIBS_DETECTION_HPP_ +#define DYNAMIC_VINO_LIB__INFERENCES__VEHICLE_ATTRIBS_DETECTION_HPP_ +#include +#include +#include +#include "dynamic_vino_lib/models/vehicle_attribs_detection_model.h" +#include "dynamic_vino_lib/engines/engine.h" +#include "dynamic_vino_lib/inferences/base_inference.h" +#include "inference_engine.hpp" +#include "opencv2/opencv.hpp" +// namespace +namespace dynamic_vino_lib +{ +/** + * @class VehicleAttribsDetectionResult + * @brief Class for storing and processing vehicle attributes detection result. + */ +class VehicleAttribsDetectionResult : public Result +{ +public: + friend class VehicleAttribsDetection; + explicit VehicleAttribsDetectionResult(const cv::Rect & location); + std::string getColor() const + { + return color_; + } + std::string getType() const + { + return type_; + } + +private: + std::string color_ = ""; + std::string type_ = ""; +}; +/** + * @class VehicleAttribsDetection + * @brief Class to load vehicle attributes detection model and perform detection. + */ +class VehicleAttribsDetection : public BaseInference +{ +public: + using Result = dynamic_vino_lib::VehicleAttribsDetectionResult; + VehicleAttribsDetection(); + ~VehicleAttribsDetection() override; + /** + * @brief Load the vehicle attributes detection model. + */ + void loadNetwork(std::shared_ptr); + /** + * @brief Enqueue a frame to this class. + * The frame will be buffered but not infered yet. + * @param[in] frame The frame to be enqueued. + * @param[in] input_frame_loc The location of the enqueued frame with respect + * to the frame generated by the input device. + * @return Whether this operation is successful. + */ + bool enqueue(const cv::Mat &, const cv::Rect &) override; + /** + * @brief Start inference for all buffered frames. + * @return Whether this operation is successful. + */ + bool submitRequest() override; + /** + * @brief This function will fetch the results of the previous inference and + * stores the results in a result buffer array. All buffered frames will be + * cleared. + * @return Whether the Inference object fetches a result this time + */ + bool fetchResults() override; + /** + * @brief Get the length of the buffer result array. + * @return The length of the buffer result array. + */ + const int getResultsLength() const override; + /** + * @brief Get the location of result with respect + * to the frame generated by the input device. + * @param[in] idx The index of the result. + */ + const dynamic_vino_lib::Result * getLocationResult(int idx) const override; + /** + * @brief Show the observed detection result either through image window + or ROS topic. + */ + const void observeOutput(const std::shared_ptr & output, + const std::string filter_conditions); + /** + * @brief Get the name of the Inference instance. + * @return The name of the Inference instance. + */ + const std::string getName() const override; + const std::vector getFilteredROIs( + const std::string filter_conditions) const override; + +private: + std::shared_ptr valid_model_; + std::vector results_; + const std::vector types_ = { + "car", "van", "truck", "bus"}; + const std::vector colors_ = { + "white", "gray", "yellow", "red", "green", "blue", "black"}; +}; +} // namespace dynamic_vino_lib +#endif // DYNAMIC_VINO_LIB__INFERENCES__VEHICLE_ATTRIBS_DETECTION_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/models/license_plate_detection_model.h b/dynamic_vino_lib/include/dynamic_vino_lib/models/license_plate_detection_model.h new file mode 100644 index 00000000..371a1294 --- /dev/null +++ b/dynamic_vino_lib/include/dynamic_vino_lib/models/license_plate_detection_model.h @@ -0,0 +1,53 @@ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @brief A header file with declaration for LicensePlateDetectionModel Class + * @file vehicle_attribs_detection_model.h + */ +#ifndef DYNAMIC_VINO_LIB__MODELS__LICENSE_PLATE_DETECTION_MODEL_HPP_ +#define DYNAMIC_VINO_LIB__MODELS__LICENSE_PLATE_DETECTION_MODEL_HPP_ +#include +#include "dynamic_vino_lib/models/base_model.h" +namespace Models +{ +/** + * @class LicensePlateDetectionModel + * @brief This class generates the license plate detection model. + */ +class LicensePlateDetectionModel : public BaseModel +{ +public: + LicensePlateDetectionModel(const std::string &, int, int, int); + inline const std::string getInputName() {return input_;} + inline const std::string getSeqInputName() {return seq_input_;} + inline const std::string getOutputName() {return output_;} + inline const int getMaxSequenceSize() {return max_sequence_size_;} + /** + * @brief Get the name of this detection model. + * @return Name of the model. + */ + const std::string getModelName() const override; + +protected: + void checkLayerProperty(const InferenceEngine::CNNNetReader::Ptr &) override; + void setLayerProperty(InferenceEngine::CNNNetReader::Ptr) override; + // up to 88 items per license plate, ended with "-1" + const int max_sequence_size_ = 88; + std::string input_; + std::string seq_input_; + std::string output_; +}; +} // namespace Models +#endif // DYNAMIC_VINO_LIB__MODELS__LICENSE_PLATE_DETECTION_MODEL_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/models/vehicle_attribs_detection_model.h b/dynamic_vino_lib/include/dynamic_vino_lib/models/vehicle_attribs_detection_model.h new file mode 100644 index 00000000..51116e4a --- /dev/null +++ b/dynamic_vino_lib/include/dynamic_vino_lib/models/vehicle_attribs_detection_model.h @@ -0,0 +1,50 @@ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @brief A header file with declaration for VehicleAttribsDetectionModel Class + * @file vehicle_attribs_detection_model.h + */ +#ifndef DYNAMIC_VINO_LIB__MODELS__VEHICLE_ATTRIBS_DETECTION_MODEL_HPP_ +#define DYNAMIC_VINO_LIB__MODELS__VEHICLE_ATTRIBS_DETECTION_MODEL_HPP_ +#include +#include "dynamic_vino_lib/models/base_model.h" +namespace Models +{ +/** + * @class VehicleAttribsDetectionModel + * @brief This class generates the vehicle attributes detection model. + */ +class VehicleAttribsDetectionModel : public BaseModel +{ +public: + VehicleAttribsDetectionModel(const std::string &, int, int, int); + inline const std::string getInputName() {return input_;} + inline const std::string getColorOutputName() {return color_output_;} + inline const std::string getTypeOutputName() {return type_output_;} + /** + * @brief Get the name of this detection model. + * @return Name of the model. + */ + const std::string getModelName() const override; + +protected: + void checkLayerProperty(const InferenceEngine::CNNNetReader::Ptr &) override; + void setLayerProperty(InferenceEngine::CNNNetReader::Ptr) override; + std::string input_; + std::string color_output_; + std::string type_output_; +}; +} // namespace Models +#endif // DYNAMIC_VINO_LIB__MODELS__VEHICLE_ATTRIBS_DETECTION_MODEL_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/base_output.h b/dynamic_vino_lib/include/dynamic_vino_lib/outputs/base_output.h index 7684a8e2..07dda75f 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/base_output.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/outputs/base_output.h @@ -36,6 +36,8 @@ #include "dynamic_vino_lib/inferences/person_reidentification.h" #include "dynamic_vino_lib/inferences/face_reidentification.h" #include "dynamic_vino_lib/inferences/person_attribs_detection.h" +#include "dynamic_vino_lib/inferences/vehicle_attribs_detection.h" +#include "dynamic_vino_lib/inferences/license_plate_detection.h" #include "dynamic_vino_lib/services/frame_processing_server.h" #include "opencv2/opencv.hpp" @@ -55,6 +57,18 @@ class BaseOutput BaseOutput() { }; + /** + * @brief Generate output content according to the license plate detection result. + */ + virtual void accept(const std::vector &) + { + } + /** + * @brief Generate output content according to the vehicle attributes detection result. + */ + virtual void accept(const std::vector &) + { + } /** * @brief Generate output content according to the person atrribute detection result. */ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/image_window_output.h b/dynamic_vino_lib/include/dynamic_vino_lib/outputs/image_window_output.h index 8dc6f4d4..44b9790a 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/image_window_output.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/outputs/image_window_output.h @@ -52,6 +52,20 @@ class ImageWindowOutput : public BaseOutput * functions with image window. */ void handleOutput() override; + /** + * @brief Generate image window output content according to + * the license plate detection result. + * @param[in] A license plate detection result objetc. + */ + void accept( + const std::vector &) override; + /** + * @brief Generate image window output content according to + * the vehicle attributes detection result. + * @param[in] A vehicle attributes detection result objetc. + */ + void accept( + const std::vector &) override; /** * @brief Generate image window output content according to * the person attributes detection result. diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/ros_topic_output.h b/dynamic_vino_lib/include/dynamic_vino_lib/outputs/ros_topic_output.h index a147f647..24e7463e 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/ros_topic_output.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/outputs/ros_topic_output.h @@ -38,6 +38,10 @@ #include #include #include +#include +#include +#include +#include #include #include @@ -68,6 +72,20 @@ class RosTopicOutput : public BaseOutput * functions with ros topic. */ void handleOutput() override; + /** + * @brief Generate image window output content according to + * the license plate detection result. + * @param[in] A license plate detection result objetc. + */ + void accept( + const std::vector &) override; + /** + * @brief Generate image window output content according to + * the vehicle attributes detection result. + * @param[in] A vehicle attributes detection result objetc. + */ + void accept( + const std::vector &) override; /** * @brief Generate ros topic infomation according to * the person attributes detection result. @@ -150,6 +168,10 @@ class RosTopicOutput : public BaseOutput std::shared_ptr face_reid_msg_ptr_; ros::Publisher pub_person_attribs_; std::shared_ptr person_attribs_msg_ptr_; + ros::Publisher pub_license_plate_; + std::shared_ptr license_plate_msg_ptr_; + ros::Publisher pub_vehicle_attribs_; + std::shared_ptr vehicle_attribs_msg_ptr_; }; } // namespace Outputs diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/rviz_output.h b/dynamic_vino_lib/include/dynamic_vino_lib/outputs/rviz_output.h index 550a4b3b..6f944005 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/rviz_output.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/outputs/rviz_output.h @@ -49,6 +49,20 @@ class RvizOutput : public BaseOutput * functions with rviz. */ void handleOutput() override; + /** + * @brief Generate image window output content according to + * the license plate detection result. + * @param[in] A license plate detection result objetc. + */ + void accept( + const std::vector &) override; + /** + * @brief Generate image window output content according to + * the vehicle attributes detection result. + * @param[in] A vehicle attributes detection result objetc. + */ + void accept( + const std::vector &) override; /** * @brief Generate rviz output content according to * the person attributes detection result. diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/pipeline_manager.h b/dynamic_vino_lib/include/dynamic_vino_lib/pipeline_manager.h index 9eb58082..04b7e906 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/pipeline_manager.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/pipeline_manager.h @@ -113,6 +113,11 @@ class PipelineManager { const Params::ParamManager::InferenceRawData & infer); std::shared_ptr createPersonAttribsDetection( const Params::ParamManager::InferenceRawData & infer); + std::shared_ptr createVehicleAttribsDetection( + const Params::ParamManager::InferenceRawData & infer); + std::shared_ptr createLicensePlateDetection( + const Params::ParamManager::InferenceRawData & infer); + std::map pipelines_; std::map plugins_for_devices_; }; diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/pipeline_params.h b/dynamic_vino_lib/include/dynamic_vino_lib/pipeline_params.h index ccea6fde..5a12ff38 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/pipeline_params.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/pipeline_params.h @@ -59,6 +59,8 @@ const char kInferTpye_ObjectDetectionTypeSSD[] = "SSD"; const char kInferTpye_ObjectDetectionTypeYolov2voc[] = "yolov2-voc"; const char kInferTpye_FaceReidentification[] = "FaceReidentification"; const char kInferTpye_PersonAttribsDetection[] = "PersonAttribsDetection"; +const char kInferTpye_VehicleAttribsDetection[] = "VehicleAttribsDetection"; +const char kInferTpye_LicensePlateDetection[] = "LicensePlateDetection"; /** * @class PipelineParams * @brief This class is a pipeline parameter management that stores parameters diff --git a/dynamic_vino_lib/src/inferences/license_plate_detection.cpp b/dynamic_vino_lib/src/inferences/license_plate_detection.cpp new file mode 100644 index 00000000..f2c26ee9 --- /dev/null +++ b/dynamic_vino_lib/src/inferences/license_plate_detection.cpp @@ -0,0 +1,139 @@ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @brief a realization file with declaration of LicensePlateDetection class and + * LicensePlateDetectionResult class + * @file license_plate_detection.cpp + */ +#include +#include +#include +#include "dynamic_vino_lib/inferences/license_plate_detection.h" +#include "dynamic_vino_lib/outputs/base_output.h" +#include "dynamic_vino_lib/slog.h" + +// LicensePlateDetectionResult +dynamic_vino_lib::LicensePlateDetectionResult::LicensePlateDetectionResult( + const cv::Rect & location) +: Result(location) {} + +// LicensePlateDetection +dynamic_vino_lib::LicensePlateDetection::LicensePlateDetection() +: dynamic_vino_lib::BaseInference() {} + +dynamic_vino_lib::LicensePlateDetection::~LicensePlateDetection() = default; +void dynamic_vino_lib::LicensePlateDetection::loadNetwork( + const std::shared_ptr network) +{ + valid_model_ = network; + setMaxBatchSize(network->getMaxBatchSize()); +} + +void dynamic_vino_lib::LicensePlateDetection::fillSeqBlob() +{ + InferenceEngine::Blob::Ptr seq_blob = getEngine()->getRequest()->GetBlob( + valid_model_->getSeqInputName()); + int max_sequence_size = seq_blob->getTensorDesc().getDims()[0]; + // second input is sequence, which is some relic from the training + // it should have the leading 0.0f and rest 1.0f + float * blob_data = seq_blob->buffer().as(); + blob_data[0] = 0.0f; + std::fill(blob_data + 1, blob_data + max_sequence_size, 1.0f); +} + +bool dynamic_vino_lib::LicensePlateDetection::enqueue( + const cv::Mat & frame, const cv::Rect & input_frame_loc) +{ + if (getEnqueuedNum() == 0) { + results_.clear(); + } + if (!dynamic_vino_lib::BaseInference::enqueue( + frame, input_frame_loc, 1, 0, valid_model_->getInputName())) + { + return false; + } + fillSeqBlob(); + Result r(input_frame_loc); + results_.emplace_back(r); + return true; +} + +bool dynamic_vino_lib::LicensePlateDetection::submitRequest() +{ + return dynamic_vino_lib::BaseInference::submitRequest(); +} + +bool dynamic_vino_lib::LicensePlateDetection::fetchResults() +{ + bool can_fetch = dynamic_vino_lib::BaseInference::fetchResults(); + if (!can_fetch) {return false;} + bool found_result = false; + InferenceEngine::InferRequest::Ptr request = getEngine()->getRequest(); + std::string output = valid_model_->getOutputName(); + const float * output_values = request->GetBlob(output)->buffer().as(); + for (int i = 0; i < getResultsLength(); i++) { + std::string license = ""; + int max_size = valid_model_->getMaxSequenceSize(); + for (int j = 0; j < max_size; j++) { + if (output_values[i * max_size + j] == -1) { + break; + } + license += licenses_[output_values[i * max_size + j]]; + } + results_[i].license_ = license; + found_result = true; + } + if (!found_result) {results_.clear();} + return true; +} + +const int dynamic_vino_lib::LicensePlateDetection::getResultsLength() const +{ + return static_cast(results_.size()); +} + +const dynamic_vino_lib::Result * +dynamic_vino_lib::LicensePlateDetection::getLocationResult(int idx) const +{ + return &(results_[idx]); +} + +const std::string dynamic_vino_lib::LicensePlateDetection::getName() const +{ + return valid_model_->getModelName(); +} + +const void dynamic_vino_lib::LicensePlateDetection::observeOutput( + const std::shared_ptr & output, + const std::string filter_conditions) +{ + if (output != nullptr) { + output->accept(results_); + } +} + +const std::vector dynamic_vino_lib::LicensePlateDetection::getFilteredROIs( + const std::string filter_conditions) const +{ + if (!filter_conditions.empty()) { + slog::err << "License plate detection does not support filtering now! " << + "Filter conditions: " << filter_conditions << slog::endl; + } + std::vector filtered_rois; + for (auto res : results_) { + filtered_rois.push_back(res.getLocation()); + } + return filtered_rois; +} diff --git a/dynamic_vino_lib/src/inferences/vehicle_attribs_detection.cpp b/dynamic_vino_lib/src/inferences/vehicle_attribs_detection.cpp new file mode 100644 index 00000000..df23cf71 --- /dev/null +++ b/dynamic_vino_lib/src/inferences/vehicle_attribs_detection.cpp @@ -0,0 +1,125 @@ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @brief a realization file with declaration of VehicleAttribsDetection class and + * VehicleAttribsDetectionResult class + * @file vehicle_attribs_detection.cpp + */ +#include +#include +#include +#include "dynamic_vino_lib/inferences/vehicle_attribs_detection.h" +#include "dynamic_vino_lib/outputs/base_output.h" +#include "dynamic_vino_lib/slog.h" + +// VehicleAttribsDetectionResult +dynamic_vino_lib::VehicleAttribsDetectionResult::VehicleAttribsDetectionResult( + const cv::Rect & location) +: Result(location) {} + +// VehicleAttribsDetection +dynamic_vino_lib::VehicleAttribsDetection::VehicleAttribsDetection() +: dynamic_vino_lib::BaseInference() {} + +dynamic_vino_lib::VehicleAttribsDetection::~VehicleAttribsDetection() = default; +void dynamic_vino_lib::VehicleAttribsDetection::loadNetwork( + const std::shared_ptr network) +{ + valid_model_ = network; + setMaxBatchSize(network->getMaxBatchSize()); +} + +bool dynamic_vino_lib::VehicleAttribsDetection::enqueue( + const cv::Mat & frame, const cv::Rect & input_frame_loc) +{ + if (getEnqueuedNum() == 0) { + results_.clear(); + } + if (!dynamic_vino_lib::BaseInference::enqueue( + frame, input_frame_loc, 1, 0, valid_model_->getInputName())) + { + return false; + } + Result r(input_frame_loc); + results_.emplace_back(r); + return true; +} + +bool dynamic_vino_lib::VehicleAttribsDetection::submitRequest() +{ + return dynamic_vino_lib::BaseInference::submitRequest(); +} + +bool dynamic_vino_lib::VehicleAttribsDetection::fetchResults() +{ + bool can_fetch = dynamic_vino_lib::BaseInference::fetchResults(); + if (!can_fetch) {return false;} + bool found_result = false; + InferenceEngine::InferRequest::Ptr request = getEngine()->getRequest(); + std::string color_name = valid_model_->getColorOutputName(); + std::string type_name = valid_model_->getTypeOutputName(); + const float * color_values = request->GetBlob(color_name)->buffer().as(); + const float * type_values = request->GetBlob(type_name)->buffer().as(); + for (int i = 0; i < getResultsLength(); i++) { + auto color_id = std::max_element(color_values, color_values + 7) - color_values; + auto type_id = std::max_element(type_values, type_values + 4) - type_values; + color_values += 7; + type_values += 4; + results_[i].color_ = colors_[color_id]; + results_[i].type_ = types_[type_id]; + found_result = true; + } + if (!found_result) {results_.clear();} + return true; +} + +const int dynamic_vino_lib::VehicleAttribsDetection::getResultsLength() const +{ + return static_cast(results_.size()); +} + +const dynamic_vino_lib::Result * +dynamic_vino_lib::VehicleAttribsDetection::getLocationResult(int idx) const +{ + return &(results_[idx]); +} + +const std::string dynamic_vino_lib::VehicleAttribsDetection::getName() const +{ + return valid_model_->getModelName(); +} + +const void dynamic_vino_lib::VehicleAttribsDetection::observeOutput( + const std::shared_ptr & output, + const std::string filter_conditions) +{ + if (output != nullptr) { + output->accept(results_); + } +} + +const std::vector dynamic_vino_lib::VehicleAttribsDetection::getFilteredROIs( + const std::string filter_conditions) const +{ + if (!filter_conditions.empty()) { + slog::err << "Vehicle attributes detection does not support filtering now! " << + "Filter conditions: " << filter_conditions << slog::endl; + } + std::vector filtered_rois; + for (auto res : results_) { + filtered_rois.push_back(res.getLocation()); + } + return filtered_rois; +} diff --git a/dynamic_vino_lib/src/models/license_plate_detection_model.cpp b/dynamic_vino_lib/src/models/license_plate_detection_model.cpp new file mode 100644 index 00000000..15e07898 --- /dev/null +++ b/dynamic_vino_lib/src/models/license_plate_detection_model.cpp @@ -0,0 +1,67 @@ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @brief a realization file with declaration of LicensePlateDetectionModel class + * @file license_plate_detection_model.cpp + */ +#include +#include "dynamic_vino_lib/models/license_plate_detection_model.h" +#include "dynamic_vino_lib/slog.h" +// Validated Vehicle Attributes Detection Network +Models::LicensePlateDetectionModel::LicensePlateDetectionModel( + const std::string & model_loc, int input_num, int output_num, int max_batch_size) +: BaseModel(model_loc, input_num, output_num, max_batch_size) {} + +void Models::LicensePlateDetectionModel::setLayerProperty( + InferenceEngine::CNNNetReader::Ptr net_reader) +{ + // set input property + InferenceEngine::InputsDataMap input_info_map( + net_reader->getNetwork().getInputsInfo()); + InferenceEngine::InputInfo::Ptr input_info = input_info_map.begin()->second; + input_info->setPrecision(InferenceEngine::Precision::U8); + input_info->getInputData()->setLayout(InferenceEngine::Layout::NCHW); + // set output property + InferenceEngine::OutputsDataMap output_info_map( + net_reader->getNetwork().getOutputsInfo()); + // set input and output layer name + input_ = input_info_map.begin()->first; + seq_input_ = (++input_info_map.begin())->first; + output_ = output_info_map.begin()->first; +} + +void Models::LicensePlateDetectionModel::checkLayerProperty( + const InferenceEngine::CNNNetReader::Ptr & net_reader) +{ + InferenceEngine::InputsDataMap input_info_map( + net_reader->getNetwork().getInputsInfo()); + if (input_info_map.size() != 2) { + throw std::logic_error("Vehicle Attribs topology should have only two inputs"); + } + auto sequence_input = (++input_info_map.begin()); + if (sequence_input->second->getTensorDesc().getDims()[0] != getMaxSequenceSize()) { + throw std::logic_error("License plate detection max sequence size dismatch"); + } + InferenceEngine::OutputsDataMap output_info_map( + net_reader->getNetwork().getOutputsInfo()); + if (output_info_map.size() != 1) { + throw std::logic_error("Vehicle Attribs Network expects networks having one output"); + } +} + +const std::string Models::LicensePlateDetectionModel::getModelName() const +{ + return "Vehicle Attributes Detection"; +} diff --git a/dynamic_vino_lib/src/models/vehicle_attribs_detection_model.cpp b/dynamic_vino_lib/src/models/vehicle_attribs_detection_model.cpp new file mode 100644 index 00000000..4b74cd58 --- /dev/null +++ b/dynamic_vino_lib/src/models/vehicle_attribs_detection_model.cpp @@ -0,0 +1,64 @@ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @brief a realization file with declaration of VehicleAttribsDetectionModel class + * @file vehicle_attribs_detection_model.cpp + */ +#include +#include "dynamic_vino_lib/models/vehicle_attribs_detection_model.h" +#include "dynamic_vino_lib/slog.h" +// Validated Vehicle Attributes Detection Network +Models::VehicleAttribsDetectionModel::VehicleAttribsDetectionModel( + const std::string & model_loc, int input_num, int output_num, int max_batch_size) +: BaseModel(model_loc, input_num, output_num, max_batch_size) {} + +void Models::VehicleAttribsDetectionModel::setLayerProperty( + InferenceEngine::CNNNetReader::Ptr net_reader) +{ + // set input property + InferenceEngine::InputsDataMap input_info_map( + net_reader->getNetwork().getInputsInfo()); + InferenceEngine::InputInfo::Ptr input_info = input_info_map.begin()->second; + input_info->setPrecision(InferenceEngine::Precision::U8); + input_info->getInputData()->setLayout(InferenceEngine::Layout::NCHW); + // set output property + InferenceEngine::OutputsDataMap output_info_map( + net_reader->getNetwork().getOutputsInfo()); + // set input and output layer name + input_ = input_info_map.begin()->first; + auto output_iter = output_info_map.begin(); + color_output_ = (output_iter++)->second->name; + type_output_ = (output_iter++)->second->name; +} + +void Models::VehicleAttribsDetectionModel::checkLayerProperty( + const InferenceEngine::CNNNetReader::Ptr & net_reader) +{ + InferenceEngine::InputsDataMap input_info_map( + net_reader->getNetwork().getInputsInfo()); + if (input_info_map.size() != 1) { + throw std::logic_error("Vehicle Attribs topology should have only one input"); + } + InferenceEngine::OutputsDataMap output_info_map( + net_reader->getNetwork().getOutputsInfo()); + if (output_info_map.size() != 2) { + throw std::logic_error("Vehicle Attribs Network expects networks having two outputs"); + } +} + +const std::string Models::VehicleAttribsDetectionModel::getModelName() const +{ + return "Vehicle Attributes Detection"; +} diff --git a/dynamic_vino_lib/src/outputs/image_window_output.cpp b/dynamic_vino_lib/src/outputs/image_window_output.cpp index 30f6dc1f..7662706c 100644 --- a/dynamic_vino_lib/src/outputs/image_window_output.cpp +++ b/dynamic_vino_lib/src/outputs/image_window_output.cpp @@ -301,6 +301,29 @@ unsigned Outputs::ImageWindowOutput::findOutput( return outputs_.size() - 1; } +void Outputs::ImageWindowOutput::accept( + const std::vector & results) +{ + for (unsigned i = 0; i < results.size(); i++) { + cv::Rect result_rect = results[i].getLocation(); + unsigned target_index = findOutput(result_rect); + outputs_[target_index].rect = result_rect; + outputs_[target_index].desc += ("[" + results[i].getLicense() + "]"); + } +} + +void Outputs::ImageWindowOutput::accept( + const std::vector & results) +{ + for (unsigned i = 0; i < results.size(); i++) { + cv::Rect result_rect = results[i].getLocation(); + unsigned target_index = findOutput(result_rect); + outputs_[target_index].rect = result_rect; + outputs_[target_index].desc += + ("[" + results[i].getColor() + "," + results[i].getType() + "]"); + } +} + void Outputs::ImageWindowOutput::accept( const std::vector & results) { diff --git a/dynamic_vino_lib/src/outputs/ros_topic_output.cpp b/dynamic_vino_lib/src/outputs/ros_topic_output.cpp index 379f0821..08037fb2 100644 --- a/dynamic_vino_lib/src/outputs/ros_topic_output.cpp +++ b/dynamic_vino_lib/src/outputs/ros_topic_output.cpp @@ -47,6 +47,10 @@ Outputs::RosTopicOutput::RosTopicOutput(std::string pipeline_name): "/openvino_toolkit/" + pipeline_name_ + "/reidentified_faces", 16); pub_person_attribs_ = nh_.advertise( "/openvino_toolkit/" + pipeline_name_ + "person_attributes", 16); + pub_license_plate_ = nh_.advertise( + "/openvino_toolkit/" + pipeline_name_ + "/detected_license_plates", 16); + pub_vehicle_attribs_ = nh_.advertise( + "/openvino_toolkit/" + pipeline_name_ + "/detected_vehicles_attribs", 16); emotions_msg_ptr_ = NULL; faces_msg_ptr_ = NULL; @@ -57,10 +61,47 @@ Outputs::RosTopicOutput::RosTopicOutput(std::string pipeline_name): segmented_object_msg_ptr_ = NULL; face_reid_msg_ptr_ = NULL; person_attribs_msg_ptr_ = NULL; + license_plate_msg_ptr_ = NULL; + vehicle_attribs_msg_ptr_ = NULL; } void Outputs::RosTopicOutput::feedFrame(const cv::Mat& frame) {} +void Outputs::RosTopicOutput::accept( + const std::vector & results) +{ + vehicle_attribs_msg_ptr_ = std::make_shared(); + people_msgs::VehicleAttribs attribs; + for (auto & r : results) { + // slog::info << ">"; + auto loc = r.getLocation(); + attribs.roi.x_offset = loc.x; + attribs.roi.y_offset = loc.y; + attribs.roi.width = loc.width; + attribs.roi.height = loc.height; + attribs.type = r.getType(); + attribs.color = r.getColor(); + vehicle_attribs_msg_ptr_->vehicles.push_back(attribs); + } +} + +void Outputs::RosTopicOutput::accept( + const std::vector & results) +{ + license_plate_msg_ptr_ = std::make_shared(); + people_msgs::LicensePlate plate; + for (auto & r : results) { + // slog::info << ">"; + auto loc = r.getLocation(); + plate.roi.x_offset = loc.x; + plate.roi.y_offset = loc.y; + plate.roi.width = loc.width; + plate.roi.height = loc.height; + plate.license = r.getLicense(); + license_plate_msg_ptr_->licenses.push_back(plate); + } +} + void Outputs::RosTopicOutput::accept( const std::vector & results) { @@ -245,7 +286,20 @@ void Outputs::RosTopicOutput::accept( void Outputs::RosTopicOutput::handleOutput() { std_msgs::Header header = getHeader(); - + if (vehicle_attribs_msg_ptr_ != nullptr) { + people_msgs::VehicleAttribsStamped vehicle_attribs_msg; + vehicle_attribs_msg.header = header; + vehicle_attribs_msg.vehicles.swap(vehicle_attribs_msg_ptr_->vehicles); + pub_vehicle_attribs_.publish(vehicle_attribs_msg); + vehicle_attribs_msg_ptr_ = nullptr; + } + if (license_plate_msg_ptr_ != nullptr) { + people_msgs::LicensePlateStamped license_plate_msg; + license_plate_msg.header = header; + license_plate_msg.licenses.swap(license_plate_msg_ptr_->licenses); + pub_license_plate_.publish(license_plate_msg); + license_plate_msg_ptr_ = nullptr; + } if (person_attribs_msg_ptr_ != nullptr) { people_msgs::PersonAttributeStamped person_attribute_msg; person_attribute_msg.header = header; diff --git a/dynamic_vino_lib/src/outputs/rviz_output.cpp b/dynamic_vino_lib/src/outputs/rviz_output.cpp index e65b0b6b..bfcfe620 100644 --- a/dynamic_vino_lib/src/outputs/rviz_output.cpp +++ b/dynamic_vino_lib/src/outputs/rviz_output.cpp @@ -37,6 +37,17 @@ void Outputs::RvizOutput::feedFrame(const cv::Mat & frame) image_window_output_->feedFrame(frame); } +void Outputs::RvizOutput::accept( + const std::vector & results) +{ + image_window_output_->accept(results); +} +void Outputs::RvizOutput::accept( + const std::vector & results) +{ + image_window_output_->accept(results); +} + void Outputs::RvizOutput::accept( const std::vector & results) { diff --git a/dynamic_vino_lib/src/pipeline_manager.cpp b/dynamic_vino_lib/src/pipeline_manager.cpp index 6c1dc951..f9856d51 100644 --- a/dynamic_vino_lib/src/pipeline_manager.cpp +++ b/dynamic_vino_lib/src/pipeline_manager.cpp @@ -31,6 +31,8 @@ #include "dynamic_vino_lib/inferences/head_pose_detection.h" #include "dynamic_vino_lib/inferences/face_reidentification.h" #include "dynamic_vino_lib/inferences/person_attribs_detection.h" +#include "dynamic_vino_lib/inferences/vehicle_attribs_detection.h" +#include "dynamic_vino_lib/inferences/license_plate_detection.h" #include "dynamic_vino_lib/inputs/image_input.h" #include "dynamic_vino_lib/inputs/realsense_camera.h" #include "dynamic_vino_lib/inputs/realsense_camera_topic.h" @@ -44,6 +46,8 @@ #include "dynamic_vino_lib/models/object_detection_yolov2voc_model.h" #include "dynamic_vino_lib/models/face_reidentification_model.h" #include "dynamic_vino_lib/models/person_attribs_detection_model.h" +#include "dynamic_vino_lib/models/vehicle_attribs_detection_model.h" +#include "dynamic_vino_lib/models/license_plate_detection_model.h" #include "dynamic_vino_lib/outputs/image_window_output.h" #include "dynamic_vino_lib/outputs/ros_topic_output.h" #include "dynamic_vino_lib/outputs/rviz_output.h" @@ -210,9 +214,16 @@ PipelineManager::parseInference( } else if (infer.name == kInferTpye_FaceReidentification) { object = createFaceReidentification(infer); - } else if (infer.name == kInferTpye_PersonAttribsDetection) { + } + else if (infer.name == kInferTpye_PersonAttribsDetection) { object = createPersonAttribsDetection(infer); } + else if (infer.name == kInferTpye_VehicleAttribsDetection) { + object = createVehicleAttribsDetection(infer); + } + else if (infer.name == kInferTpye_LicensePlateDetection) { + object = createLicensePlateDetection(infer); + } else { slog::err << "Invalid inference name: " << infer.name << slog::endl; } @@ -389,6 +400,40 @@ PipelineManager::createPersonAttribsDetection( return attribs_inference_ptr; } +std::shared_ptr +PipelineManager::createVehicleAttribsDetection( + const Params::ParamManager::InferenceRawData & infer) +{ + auto vehicle_attribs_model = + std::make_shared(infer.model, 1, 2, infer.batch); + vehicle_attribs_model->modelInit(); + auto vehicle_attribs_engine = std::make_shared( + plugins_for_devices_[infer.engine], vehicle_attribs_model); + auto vehicle_attribs_ptr = + std::make_shared(); + vehicle_attribs_ptr->loadNetwork(vehicle_attribs_model); + vehicle_attribs_ptr->loadEngine(vehicle_attribs_engine); + + return vehicle_attribs_ptr; +} + +std::shared_ptr +PipelineManager::createLicensePlateDetection( + const Params::ParamManager::InferenceRawData & infer) +{ + auto license_plate_model = + std::make_shared(infer.model, 2, 1, infer.batch); + license_plate_model->modelInit(); + auto license_plate_engine = std::make_shared( + plugins_for_devices_[infer.engine], license_plate_model); + auto license_plate_ptr = + std::make_shared(); + license_plate_ptr->loadNetwork(license_plate_model); + license_plate_ptr->loadEngine(license_plate_engine); + + return license_plate_ptr; +} + void PipelineManager::threadPipeline(const char* name) { PipelineData& p = pipelines_[name]; while ( p.state != PipelineState_ThreadStopped && p.pipeline != nullptr && ros::ok()) { diff --git a/people_msgs/CMakeLists.txt b/people_msgs/CMakeLists.txt index b2e99384..ce32bf4b 100644 --- a/people_msgs/CMakeLists.txt +++ b/people_msgs/CMakeLists.txt @@ -37,7 +37,10 @@ add_message_files(DIRECTORY msg FILES ReidentificationStamped.msg PersonAttribute.msg PersonAttributeStamped.msg - + LicensePlate.msg + LicensePlateStamped.msg + VehicleAttribs.msg + VehicleAttribsStamped.msg ) add_service_files(FILES diff --git a/people_msgs/msg/LicensePlate.msg b/people_msgs/msg/LicensePlate.msg new file mode 100644 index 00000000..6ba97f8c --- /dev/null +++ b/people_msgs/msg/LicensePlate.msg @@ -0,0 +1,16 @@ +# Copyright (c) 2018 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +string license # The license for the detected vehicle in roi +sensor_msgs/RegionOfInterest roi # region of interest for a vehicle license \ No newline at end of file diff --git a/people_msgs/msg/LicensePlateStamped.msg b/people_msgs/msg/LicensePlateStamped.msg new file mode 100644 index 00000000..fa4fbc75 --- /dev/null +++ b/people_msgs/msg/LicensePlateStamped.msg @@ -0,0 +1,16 @@ +# Copyright (c) 2018 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +std_msgs/Header header +people_msgs/LicensePlate[] licenses \ No newline at end of file diff --git a/people_msgs/msg/VehicleAttribs.msg b/people_msgs/msg/VehicleAttribs.msg new file mode 100644 index 00000000..39b49696 --- /dev/null +++ b/people_msgs/msg/VehicleAttribs.msg @@ -0,0 +1,17 @@ +# Copyright (c) 2018 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +string type # type: car, van, truck, bus +string color # color: blue, gray, yellow, green, black, red, white +sensor_msgs/RegionOfInterest roi # region of interest for a vehicle diff --git a/people_msgs/msg/VehicleAttribsStamped.msg b/people_msgs/msg/VehicleAttribsStamped.msg new file mode 100644 index 00000000..3cdcd47e --- /dev/null +++ b/people_msgs/msg/VehicleAttribsStamped.msg @@ -0,0 +1,16 @@ +# Copyright (c) 2018 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +std_msgs/Header header +people_msgs/VehicleAttribs[] vehicles diff --git a/vino_launch/launch/pipeline_vehicle_detection.launch b/vino_launch/launch/pipeline_vehicle_detection.launch new file mode 100644 index 00000000..c5127078 --- /dev/null +++ b/vino_launch/launch/pipeline_vehicle_detection.launch @@ -0,0 +1,13 @@ + + + + + + + + + + + + diff --git a/vino_launch/param/.pipeline_reidentification.yaml.swo b/vino_launch/param/.pipeline_reidentification.yaml.swo deleted file mode 100644 index 07273f68ec32bbcb214e45402d6bf5fb72d632e8..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 12288 zcmeI2&u`pB6vrp@QlKpe4i!=lL!%0iy6fG9G|}Ers!BOji4vL~P-M;8&%49gGuF&( zljT6Aa^VlCdh7}Qfc^okI3s}&aOKJg#G&n-@6I}ub$&Eab7?e|zOp@jW54;l_eLA# zbz7S&H~CVt#c;gK*azQS+xq+BCHC7T#-wtTkH=qfc)NxZYS2E^@@sDxCBhA)inncK zO%fSxV_lNEBbAPw*7-oX$lv5*sFHqE3L}cBlqKa-QEW)2q@2to-XpbYjzn=3ClB>3 zJPZU*If0?e8cXNd{9N0Y`t0Qo`TOtPI^~NF%L@bofj}S-2m}IwKp+qZ1OkEo6L94W zyNiK0s=+JOd8T%*Zo?@M2m}IwKp+qZ1OkCTAP@)y0)apv5C{ZbLjodY?7>;aZoCQS z@&EtbzyF`V!`PqDkI)a$chE&>2Kwg$V?RMppoh@&^NjrpEkakIGtjehjQtKhf_{FR zF%DgT{(6hCKcL6Zedt>#hi*b`=qdd81^OPk2i=A?A@AR7$Tzqg2m}IwKp+qZ1Olg( zz-raNxU7pps}6qd_`}Fv-*xyGD~0IMZK+bdeU-0k`?)SCxy$Tvt?S{4Nv z0jCnJ6y9b#xYk2g4sm$|bbews?g>pOc6~#(uk*CSTg`=PrCs5YeuuZK+gx-h?{KH1 zE=87Hw6n9*^n?hl(^ucWiA6-nf4MU0UDLIaDBS9mqHCm*NR%^Z;-B_ESJJyWm4%%S3 zKaS*VJlAS3H5ZznE-dWz+rJ@3x~d!=k%=}a85-$Ek@g{vUn~Mnreu+@LLJNBD9uxM ztQq4Ka=*j|SIC-|lne4{-?Uh)D4jrB90~MZx;}wwSy69mMs7GEb0ov!wW(w{%<#V) zmufRN<)S>~~G6s!g-Dxo)xDe3(=( KTdSJsG3#IYrIteg diff --git a/vino_launch/param/.pipeline_reidentification.yaml.swp b/vino_launch/param/.pipeline_reidentification.yaml.swp deleted file mode 100644 index b0b3c4d1b21fc4a769c74a78041a78573f211c90..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 12288 zcmeI2&x;gC6vxZO9E=(zIk*Q8rG}`mGd;7Ts|@iVD}gK~-naTbN@!&46;7fN`zp8%qd9MnF zd7Z}E!q@zEy}{7VGuD4TxBlwFMRsGFF{QoCNBsx1U{~`(_nZ4#eiIC%MEJha@rH|> zO(JVtY)YvgD{W$LOx{;M3JXQm`531ABy5IPVVa& zy(9w1oWQ_mwcBS|YqlA-`jyL<_-CK49`i-h@`wNtAOb{y2oM1xKm>>Y5%?zoU!Gu3 z(J@tZeqLQqOkAr+x)1>(Km>>Y5g-CYfCvx)B0vO)01+SpN05L>8GCk$v6+wIJpTXR z`Tf6jjU!b3$AE0Sy3VQtsW4}T_Lf=7e&ocHqgufTr7tl#)>kMOmKrf(=pf{%( z`wjXT`W_lW1+)ycpbOBS@Zlx&E!2bVKwm<^wf#Se0qc zo1!qf%@_~Os&3D>`NM@mbmar3Q?oI{myNq`N|oSlO+8r;rZt|+43pPV$B*~67_cNaU4;Nf0=PP`o-x`LwERM>Q06w(U#lo+is znQPbf$^ifr*YYKQMZTwg{L@0J?KT2hpn_++iZ;m mc2|pkMRel4fun(QI8l_Y4wUh8XPxcr7ZZ*$wp10 diff --git a/vino_launch/param/pipeline_vehicle_detection.yaml b/vino_launch/param/pipeline_vehicle_detection.yaml new file mode 100644 index 00000000..0636a75e --- /dev/null +++ b/vino_launch/param/pipeline_vehicle_detection.yaml @@ -0,0 +1,35 @@ +Pipelines: +- name: object + inputs: [RealSenseCamera] + infers: + - name: ObjectDetection + model: /opt/intel/computer_vision_sdk/deployment_tools/model_downloader/Security/object_detection/barrier/0106/dldt/vehicle-license-plate-detection-barrier-0106.xml + engine: CPU + model_type: SSD + label: /opt/intel/computer_vision_sdk/deployment_tools/model_downloader/Security/object_detection/barrier/0106/dldt/vehicle-license-plate-detection-barrier-0106.labels + batch: 1 + enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame + - name: VehicleAttribsDetection + model: /opt/intel/computer_vision_sdk/deployment_tools/model_downloader/Security/object_attributes/vehicle/resnet10_update_1/dldt/vehicle-attributes-recognition-barrier-0039.xml + engine: CPU + label: to/be/set/xxx.labels + batch: 1 + - name: LicensePlateDetection + model: /opt/intel/computer_vision_sdk_2018.5.455/deployment_tools/model_downloader/Security/optical_character_recognition/license_plate/dldt/license-plate-recognition-barrier-0001.xml + engine: CPU + label: to/be/set/xxx.labels + batch: 1 + outputs: [ImageWindow, RViz, RosTopic] + connects: + - left: RealSenseCamera + right: [ObjectDetection] + - left: ObjectDetection + right: [{VehicleAttribsDetection: label == vehicle && confidence >= 0.8}, {LicensePlateDetection: label == license && confidence >= 0.8}] + - left: ObjectDetection + right: [ImageWindow, RosTopic, RViz] + - left: VehicleAttribsDetection + right: [ImageWindow, RosTopic, RViz] + - left: LicensePlateDetection + right: [ImageWindow, RosTopic, RViz] + +OpenvinoCommon: \ No newline at end of file From ec395d1887b881c8cad43530457286d5fe7d48bb Mon Sep 17 00:00:00 2001 From: luyang00 <393780933@qq.com> Date: Tue, 5 Nov 2019 13:35:57 +0800 Subject: [PATCH 08/63] add suport landmarks detection & fix vehicle attribute detection --- ...icense-plate-detection-barrier-0106.labels | 2 + .../labels/object_detection/yolov2-voc.labels | 20 +++ dynamic_vino_lib/CMakeLists.txt | 2 + .../inferences/landmarks_detection.h | 114 ++++++++++++++++ .../models/landmarks_detection_model.h | 49 +++++++ .../dynamic_vino_lib/outputs/base_output.h | 7 + .../outputs/image_window_output.h | 8 ++ .../dynamic_vino_lib/pipeline_manager.h | 3 + .../dynamic_vino_lib/pipeline_params.h | 1 + .../src/inferences/landmarks_detection.cpp | 127 ++++++++++++++++++ .../src/models/landmarks_detection_model.cpp | 64 +++++++++ .../src/outputs/image_window_output.cpp | 13 ++ .../src/outputs/ros_topic_output.cpp | 2 +- dynamic_vino_lib/src/pipeline_manager.cpp | 25 ++++ .../param/pipeline_face_reidentification.yaml | 8 +- .../param/pipeline_reidentification_oss.yaml | 2 +- .../param/pipeline_vehicle_detection.yaml | 8 +- 17 files changed, 445 insertions(+), 10 deletions(-) create mode 100644 data/labels/object_detection/vehicle-license-plate-detection-barrier-0106.labels create mode 100644 data/labels/object_detection/yolov2-voc.labels create mode 100644 dynamic_vino_lib/include/dynamic_vino_lib/inferences/landmarks_detection.h create mode 100644 dynamic_vino_lib/include/dynamic_vino_lib/models/landmarks_detection_model.h create mode 100644 dynamic_vino_lib/src/inferences/landmarks_detection.cpp create mode 100644 dynamic_vino_lib/src/models/landmarks_detection_model.cpp diff --git a/data/labels/object_detection/vehicle-license-plate-detection-barrier-0106.labels b/data/labels/object_detection/vehicle-license-plate-detection-barrier-0106.labels new file mode 100644 index 00000000..23d4cd9a --- /dev/null +++ b/data/labels/object_detection/vehicle-license-plate-detection-barrier-0106.labels @@ -0,0 +1,2 @@ +vehicle +license diff --git a/data/labels/object_detection/yolov2-voc.labels b/data/labels/object_detection/yolov2-voc.labels new file mode 100644 index 00000000..6d218467 --- /dev/null +++ b/data/labels/object_detection/yolov2-voc.labels @@ -0,0 +1,20 @@ +Aeroplane +Bicycle +Bird +Boat +Bottle +Bus +Car +Cat +Chair +Cow +DiningTable +Dog +Horse +Motorbike +Person +PottedPlant +Sheep +Sofa +Train +TV/Monitor diff --git a/dynamic_vino_lib/CMakeLists.txt b/dynamic_vino_lib/CMakeLists.txt index 68f6691b..2cbf9408 100644 --- a/dynamic_vino_lib/CMakeLists.txt +++ b/dynamic_vino_lib/CMakeLists.txt @@ -155,6 +155,7 @@ add_library(${PROJECT_NAME} SHARED src/inferences/face_reidentification.cpp src/inferences/base_reidentification.cpp src/inferences/person_attribs_detection.cpp + src/inferences/landmarks_detection.cpp src/inferences/vehicle_attribs_detection.cpp src/inferences/license_plate_detection.cpp src/inputs/realsense_camera.cpp @@ -173,6 +174,7 @@ add_library(${PROJECT_NAME} SHARED src/models/person_reidentification_model.cpp src/models/face_reidentification_model.cpp src/models/person_attribs_detection_model.cpp + src/models/landmarks_detection_model.cpp src/models/vehicle_attribs_detection_model.cpp src/models/license_plate_detection_model.cpp src/outputs/image_window_output.cpp diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/landmarks_detection.h b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/landmarks_detection.h new file mode 100644 index 00000000..d165482c --- /dev/null +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/landmarks_detection.h @@ -0,0 +1,114 @@ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @brief A header file with declaration for LandmarksDetection Class + * @file landmarks_detection.hpp + */ +#ifndef DYNAMIC_VINO_LIB__INFERENCES__LANDMARKS_DETECTION_HPP_ +#define DYNAMIC_VINO_LIB__INFERENCES__LANDMARKS_DETECTION_HPP_ +#include +#include +#include +#include "dynamic_vino_lib/models/landmarks_detection_model.h" +#include "dynamic_vino_lib/engines/engine.h" +#include "dynamic_vino_lib/inferences/base_inference.h" +#include "inference_engine.hpp" +#include "opencv2/opencv.hpp" +// namespace +namespace dynamic_vino_lib +{ +/** + * @class LandmarksDetectionResult + * @brief Class for storing and processing landmarks detection result. + */ +class LandmarksDetectionResult : public Result +{ +public: + friend class LandmarksDetection; + explicit LandmarksDetectionResult(const cv::Rect & location); + std::vector getLandmarks() const + { + return landmark_points_; + } + +private: + std::vector landmark_points_; +}; +/** + * @class LandmarksDetection + * @brief Class to load landmarks detection model and perform landmarks detection. + */ +class LandmarksDetection : public BaseInference +{ +public: + using Result = dynamic_vino_lib::LandmarksDetectionResult; + LandmarksDetection(); + ~LandmarksDetection() override; + /** + * @brief Load the landmarks detection model. + */ + void loadNetwork(std::shared_ptr); + /** + * @brief Enqueue a frame to this class. + * The frame will be buffered but not infered yet. + * @param[in] frame The frame to be enqueued. + * @param[in] input_frame_loc The location of the enqueued frame with respect + * to the frame generated by the input device. + * @return Whether this operation is successful. + */ + bool enqueue(const cv::Mat &, const cv::Rect &) override; + /** + * @brief Start inference for all buffered frames. + * @return Whether this operation is successful. + */ + bool submitRequest() override; + /** + * @brief This function will fetch the results of the previous inference and + * stores the results in a result buffer array. All buffered frames will be + * cleared. + * @return Whether the Inference object fetches a result this time + */ + bool fetchResults() override; + /** + * @brief Get the length of the buffer result array. + * @return The length of the buffer result array. + */ + const int getResultsLength() const override; + /** + * @brief Get the location of result with respect + * to the frame generated by the input device. + * @param[in] idx The index of the result. + */ + const dynamic_vino_lib::Result * getLocationResult(int idx) const override; + /** + * @brief Show the observed detection result either through image window + or ROS topic. + */ + const void observeOutput(const std::shared_ptr & output, + const std::string filter_conditions); + /** + * @brief Get the name of the Inference instance. + * @return The name of the Inference instance. + */ + const std::string getName() const override; + const std::vector getFilteredROIs( + const std::string filter_conditions) const override; + +private: + std::shared_ptr valid_model_; + std::vector results_; +}; +} // namespace dynamic_vino_lib +#endif // DYNAMIC_VINO_LIB__INFERENCES__LANDMARKS_DETECTION_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/models/landmarks_detection_model.h b/dynamic_vino_lib/include/dynamic_vino_lib/models/landmarks_detection_model.h new file mode 100644 index 00000000..4d9175c0 --- /dev/null +++ b/dynamic_vino_lib/include/dynamic_vino_lib/models/landmarks_detection_model.h @@ -0,0 +1,49 @@ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @brief A header file with declaration for LandmarksDetectionModel Class + * @file landmarks_detection_model.h + */ +#ifndef DYNAMIC_VINO_LIB__MODELS__LANDMARKS_DETECTION_MODEL_HPP_ +#define DYNAMIC_VINO_LIB__MODELS__LANDMARKS_DETECTION_MODEL_HPP_ +#include +#include "dynamic_vino_lib/models/base_model.h" +namespace Models +{ +/** + * @class LandmarksDetectionModel + * @brief This class generates the landmarks detection model. + */ +class LandmarksDetectionModel : public BaseModel +{ +public: + + LandmarksDetectionModel(const std::string &, int, int, int); + inline const std::string getInputName() {return input_;} + inline const std::string getOutputName() {return output_;} + /** + * @brief Get the name of this detection model. + * @return Name of the model. + */ + const std::string getModelName() const override; + +protected: + void checkLayerProperty(const InferenceEngine::CNNNetReader::Ptr &) override; + void setLayerProperty(InferenceEngine::CNNNetReader::Ptr) override; + std::string input_; + std::string output_; +}; +} // namespace Models +#endif // DYNAMIC_VINO_LIB__MODELS__LANDMARKS_DETECTION_MODEL_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/base_output.h b/dynamic_vino_lib/include/dynamic_vino_lib/outputs/base_output.h index 07dda75f..afcafb54 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/base_output.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/outputs/base_output.h @@ -34,6 +34,7 @@ #include "dynamic_vino_lib/inferences/object_detection_yolov2_voc.h" #include "dynamic_vino_lib/inferences/object_segmentation.h" #include "dynamic_vino_lib/inferences/person_reidentification.h" +#include "dynamic_vino_lib/inferences/landmarks_detection.h" #include "dynamic_vino_lib/inferences/face_reidentification.h" #include "dynamic_vino_lib/inferences/person_attribs_detection.h" #include "dynamic_vino_lib/inferences/vehicle_attribs_detection.h" @@ -81,6 +82,12 @@ class BaseOutput virtual void accept(const std::vector &) { } + /** + * @brief Generate output content according to the landmarks detection result. + */ + virtual void accept(const std::vector &) + { + } /** * @brief Generate output content according to the face detection result. */ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/image_window_output.h b/dynamic_vino_lib/include/dynamic_vino_lib/outputs/image_window_output.h index 44b9790a..bf7a5abc 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/image_window_output.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/outputs/image_window_output.h @@ -66,6 +66,13 @@ class ImageWindowOutput : public BaseOutput */ void accept( const std::vector &) override; + /** + * @brief Generate image window output content according to + * the landmarks detection result. + * @param[in] A landmarks detection result objetc. + */ + void accept( + const std::vector &) override; /** * @brief Generate image window output content according to * the person attributes detection result. @@ -158,6 +165,7 @@ class ImageWindowOutput : public BaseOutput cv::Point hp_y; // for headpose, end point of yAxis cv::Point hp_zs; // for headpose, start point of zAxis cv::Point hp_ze; // for headpose, end point of zAxis + std::vector landmarks; }; std::vector outputs_; diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/pipeline_manager.h b/dynamic_vino_lib/include/dynamic_vino_lib/pipeline_manager.h index 04b7e906..2770ef9d 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/pipeline_manager.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/pipeline_manager.h @@ -117,6 +117,9 @@ class PipelineManager { const Params::ParamManager::InferenceRawData & infer); std::shared_ptr createLicensePlateDetection( const Params::ParamManager::InferenceRawData & infer); + std::shared_ptr createLandmarksDetection( + const Params::ParamManager::InferenceRawData & infer); + std::map pipelines_; std::map plugins_for_devices_; diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/pipeline_params.h b/dynamic_vino_lib/include/dynamic_vino_lib/pipeline_params.h index 5a12ff38..7173cc21 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/pipeline_params.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/pipeline_params.h @@ -57,6 +57,7 @@ const char kInferTpye_ObjectSegmentation[] = "ObjectSegmentation"; const char kInferTpye_PersonReidentification[] = "PersonReidentification"; const char kInferTpye_ObjectDetectionTypeSSD[] = "SSD"; const char kInferTpye_ObjectDetectionTypeYolov2voc[] = "yolov2-voc"; +const char kInferTpye_LandmarksDetection[] = "LandmarksDetection"; const char kInferTpye_FaceReidentification[] = "FaceReidentification"; const char kInferTpye_PersonAttribsDetection[] = "PersonAttribsDetection"; const char kInferTpye_VehicleAttribsDetection[] = "VehicleAttribsDetection"; diff --git a/dynamic_vino_lib/src/inferences/landmarks_detection.cpp b/dynamic_vino_lib/src/inferences/landmarks_detection.cpp new file mode 100644 index 00000000..18004ff3 --- /dev/null +++ b/dynamic_vino_lib/src/inferences/landmarks_detection.cpp @@ -0,0 +1,127 @@ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @brief a header file with declaration of LandmarksDetection class and + * LandmarksDetectionResult class + * @file landmarks_detection.cpp + */ +#include +#include +#include +#include "dynamic_vino_lib/inferences/landmarks_detection.h" +#include "dynamic_vino_lib/outputs/base_output.h" + +// LandmarksDetectionResult +dynamic_vino_lib::LandmarksDetectionResult::LandmarksDetectionResult( + const cv::Rect & location) +: Result(location) {} + +// LandmarksDetection +dynamic_vino_lib::LandmarksDetection::LandmarksDetection() +: dynamic_vino_lib::BaseInference() {} + +dynamic_vino_lib::LandmarksDetection::~LandmarksDetection() = default; +void dynamic_vino_lib::LandmarksDetection::loadNetwork( + const std::shared_ptr network) +{ + valid_model_ = network; + setMaxBatchSize(network->getMaxBatchSize()); +} + +bool dynamic_vino_lib::LandmarksDetection::enqueue( + const cv::Mat & frame, const cv::Rect & input_frame_loc) +{ + if (getEnqueuedNum() == 0) { + results_.clear(); + } + if (!dynamic_vino_lib::BaseInference::enqueue( + frame, input_frame_loc, 1, 0, valid_model_->getInputName())) + { + return false; + } + Result r(input_frame_loc); + results_.emplace_back(r); + return true; +} + +bool dynamic_vino_lib::LandmarksDetection::submitRequest() +{ + return dynamic_vino_lib::BaseInference::submitRequest(); +} + +bool dynamic_vino_lib::LandmarksDetection::fetchResults() +{ + bool can_fetch = dynamic_vino_lib::BaseInference::fetchResults(); + if (!can_fetch) {return false;} + bool found_result = false; + InferenceEngine::InferRequest::Ptr request = getEngine()->getRequest(); + std::string output = valid_model_->getOutputName(); + const float * output_values = request->GetBlob(output)->buffer().as(); + int result_length = request->GetBlob(output)->getTensorDesc().getDims()[1]; + for (int i = 0; i < getResultsLength(); i++) { + std::vector coordinates = std::vector( + output_values + result_length * i, output_values + result_length * (i + 1)); + for (int j = 0; j < result_length; j += 2) { + cv::Rect rect = results_[i].getLocation(); + int col = static_cast(coordinates[j] * rect.width); + int row = static_cast(coordinates[j + 1] * rect.height); + cv::Point landmark_point(rect.x + col, rect.y + row); + results_[i].landmark_points_.push_back(landmark_point); + } + found_result = true; + } + if (!found_result) {results_.clear();} + return true; +} + +const int dynamic_vino_lib::LandmarksDetection::getResultsLength() const +{ + return static_cast(results_.size()); +} + +const dynamic_vino_lib::Result * +dynamic_vino_lib::LandmarksDetection::getLocationResult(int idx) const +{ + return &(results_[idx]); +} + +const std::string dynamic_vino_lib::LandmarksDetection::getName() const +{ + return valid_model_->getModelName(); +} + +const void dynamic_vino_lib::LandmarksDetection::observeOutput( + const std::shared_ptr & output, + const std::string filter_conditions) +{ + if (output != nullptr) { + output->accept(results_); + } +} + + +const std::vector dynamic_vino_lib::LandmarksDetection::getFilteredROIs( + const std::string filter_conditions) const +{ + if (!filter_conditions.empty()) { + slog::err << "Landmarks detection does not support filtering now! " << + "Filter conditions: " << filter_conditions << slog::endl; + } + std::vector filtered_rois; + for (auto res : results_) { + filtered_rois.push_back(res.getLocation()); + } + return filtered_rois; +} diff --git a/dynamic_vino_lib/src/models/landmarks_detection_model.cpp b/dynamic_vino_lib/src/models/landmarks_detection_model.cpp new file mode 100644 index 00000000..75a7d7e3 --- /dev/null +++ b/dynamic_vino_lib/src/models/landmarks_detection_model.cpp @@ -0,0 +1,64 @@ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @brief a header file with declaration of LandmarksDetectionModel class + * @file landmarks_detection_model.cpp + */ +#include +#include "dynamic_vino_lib/models/landmarks_detection_model.h" +// Validated Landmarks Detection Network +Models::LandmarksDetectionModel::LandmarksDetectionModel( + const std::string & model_loc, int input_num, int output_num, int max_batch_size) +: BaseModel(model_loc, input_num, output_num, max_batch_size) {} + +void Models::LandmarksDetectionModel::setLayerProperty( + InferenceEngine::CNNNetReader::Ptr net_reader) +{ + // set input property + InferenceEngine::InputsDataMap input_info_map( + net_reader->getNetwork().getInputsInfo()); + InferenceEngine::InputInfo::Ptr input_info = input_info_map.begin()->second; + input_info->setPrecision(InferenceEngine::Precision::U8); + input_info->getInputData()->setLayout(InferenceEngine::Layout::NCHW); + // set output property + InferenceEngine::OutputsDataMap output_info_map( + net_reader->getNetwork().getOutputsInfo()); + InferenceEngine::DataPtr & output_data_ptr = output_info_map.begin()->second; + output_data_ptr->setPrecision(InferenceEngine::Precision::FP32); + output_data_ptr->setLayout(InferenceEngine::Layout::NCHW); + // set input and output layer name + input_ = input_info_map.begin()->first; + output_ = output_info_map.begin()->first; +} + +void Models::LandmarksDetectionModel::checkLayerProperty( + const InferenceEngine::CNNNetReader::Ptr & net_reader) +{ + InferenceEngine::InputsDataMap input_info_map( + net_reader->getNetwork().getInputsInfo()); + if (input_info_map.size() != 1) { + throw std::logic_error("Landmarks Detection topology should have only one input"); + } + InferenceEngine::OutputsDataMap output_info_map( + net_reader->getNetwork().getOutputsInfo()); + if (output_info_map.size() != 1) { + throw std::logic_error("Landmarks Detection Network expects networks having one output"); + } +} + +const std::string Models::LandmarksDetectionModel::getModelName() const +{ + return "Landmarks Detection"; +} diff --git a/dynamic_vino_lib/src/outputs/image_window_output.cpp b/dynamic_vino_lib/src/outputs/image_window_output.cpp index 7662706c..3a3ee88f 100644 --- a/dynamic_vino_lib/src/outputs/image_window_output.cpp +++ b/dynamic_vino_lib/src/outputs/image_window_output.cpp @@ -360,6 +360,19 @@ void Outputs::ImageWindowOutput::accept( } } +void Outputs::ImageWindowOutput::accept( + const std::vector & results) +{ + for (unsigned i = 0; i < results.size(); i++) { + cv::Rect result_rect = results[i].getLocation(); + unsigned target_index = findOutput(result_rect); + std::vector landmark_points = results[i].getLandmarks(); + for (int j = 0; j < landmark_points.size(); j++) { + outputs_[target_index].landmarks.push_back(landmark_points[j]); + } + } +} + void Outputs::ImageWindowOutput::decorateFrame() { if (getPipeline()->getParameters()->isGetFps()) diff --git a/dynamic_vino_lib/src/outputs/ros_topic_output.cpp b/dynamic_vino_lib/src/outputs/ros_topic_output.cpp index 08037fb2..7167d26b 100644 --- a/dynamic_vino_lib/src/outputs/ros_topic_output.cpp +++ b/dynamic_vino_lib/src/outputs/ros_topic_output.cpp @@ -46,7 +46,7 @@ Outputs::RosTopicOutput::RosTopicOutput(std::string pipeline_name): pub_face_reid_ = nh_.advertise( "/openvino_toolkit/" + pipeline_name_ + "/reidentified_faces", 16); pub_person_attribs_ = nh_.advertise( - "/openvino_toolkit/" + pipeline_name_ + "person_attributes", 16); + "/openvino_toolkit/" + pipeline_name_ + "/person_attributes", 16); pub_license_plate_ = nh_.advertise( "/openvino_toolkit/" + pipeline_name_ + "/detected_license_plates", 16); pub_vehicle_attribs_ = nh_.advertise( diff --git a/dynamic_vino_lib/src/pipeline_manager.cpp b/dynamic_vino_lib/src/pipeline_manager.cpp index f9856d51..eaca54b2 100644 --- a/dynamic_vino_lib/src/pipeline_manager.cpp +++ b/dynamic_vino_lib/src/pipeline_manager.cpp @@ -33,6 +33,7 @@ #include "dynamic_vino_lib/inferences/person_attribs_detection.h" #include "dynamic_vino_lib/inferences/vehicle_attribs_detection.h" #include "dynamic_vino_lib/inferences/license_plate_detection.h" +#include "dynamic_vino_lib/inferences/landmarks_detection.h" #include "dynamic_vino_lib/inputs/image_input.h" #include "dynamic_vino_lib/inputs/realsense_camera.h" #include "dynamic_vino_lib/inputs/realsense_camera_topic.h" @@ -48,6 +49,7 @@ #include "dynamic_vino_lib/models/person_attribs_detection_model.h" #include "dynamic_vino_lib/models/vehicle_attribs_detection_model.h" #include "dynamic_vino_lib/models/license_plate_detection_model.h" +#include "dynamic_vino_lib/models/landmarks_detection_model.h" #include "dynamic_vino_lib/outputs/image_window_output.h" #include "dynamic_vino_lib/outputs/ros_topic_output.h" #include "dynamic_vino_lib/outputs/rviz_output.h" @@ -218,6 +220,9 @@ PipelineManager::parseInference( else if (infer.name == kInferTpye_PersonAttribsDetection) { object = createPersonAttribsDetection(infer); } + else if (infer.name == kInferTpye_LandmarksDetection) { + object = createLandmarksDetection(infer); + } else if (infer.name == kInferTpye_VehicleAttribsDetection) { object = createVehicleAttribsDetection(infer); } @@ -400,6 +405,26 @@ PipelineManager::createPersonAttribsDetection( return attribs_inference_ptr; } +std::shared_ptr +PipelineManager::createLandmarksDetection( + const Params::ParamManager::InferenceRawData & infer) +{ + auto landmarks_detection_model = + std::make_shared(infer.model, 1, 1, infer.batch); + landmarks_detection_model->modelInit(); + auto landmarks_detection_engine = std::make_shared( + plugins_for_devices_[infer.engine], landmarks_detection_model); + auto landmarks_inference_ptr = + std::make_shared(); + landmarks_inference_ptr->loadNetwork(landmarks_detection_model); + landmarks_inference_ptr->loadEngine(landmarks_detection_engine); + + return landmarks_inference_ptr; +} + + + + std::shared_ptr PipelineManager::createVehicleAttribsDetection( const Params::ParamManager::InferenceRawData & infer) diff --git a/vino_launch/param/pipeline_face_reidentification.yaml b/vino_launch/param/pipeline_face_reidentification.yaml index 8421ac39..8900e2c7 100644 --- a/vino_launch/param/pipeline_face_reidentification.yaml +++ b/vino_launch/param/pipeline_face_reidentification.yaml @@ -3,19 +3,19 @@ Pipelines: inputs: [RealSenseCamera] infers: - name: FaceDetection - model: /opt/intel/computer_vision_sdk/deployment_tools/model_downloader/Transportation/object_detection/face/pruned_mobilenet_reduced_ssd_shared_weights/dldt/face-detection-adas-0001.xml + model: /opt/openvino_toolkit/models/face_detection/output/Transportation/object_detection/face/pruned_mobilenet_reduced_ssd_shared_weights/dldt/face-detection-adas-0001.xml engine: CPU - label: /opt/intel/computer_vision_sdk/deployment_tools/model_downloader/Transportation/object_detection/face/pruned_mobilenet_reduced_ssd_shared_weights/dldt/face-detection-adas-0001.labels + label: /opt/openvino_toolkit/models/face_detection/output/Transportation/object_detection/face/pruned_mobilenet_reduced_ssd_shared_weights/dldt/face-detection-adas-0001.labels batch: 1 confidence_threshold: 0.5 enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame - name: LandmarksDetection - model: /opt/intel/computer_vision_sdk/deployment_tools/model_downloader/Retail/object_attributes/landmarks_regression/0009/dldt/landmarks-regression-retail-0009.xml + model: /opt/openvino_toolkit/models/landmarks-regression/output/Retail/object_attributes/landmarks_regression/0009/dldt/landmarks-regression-retail-0009.xml engine: CPU label: to/be/set/xxx.labels batch: 1 - name: FaceReidentification - model: /opt/intel/computer_vision_sdk/deployment_tools/model_downloader/Retail/object_reidentification/face/mobilenet_based/dldt/face-reidentification-retail-0071.xml + model: /opt/openvino_toolkit/models/face-reidentification/output/Retail/object_reidentification/face/mobilenet_based/dldt/face-reidentification-retail-0095.xml engine: CPU label: to/be/set/xxx.labels batch: 1 diff --git a/vino_launch/param/pipeline_reidentification_oss.yaml b/vino_launch/param/pipeline_reidentification_oss.yaml index 98837f9f..84219182 100644 --- a/vino_launch/param/pipeline_reidentification_oss.yaml +++ b/vino_launch/param/pipeline_reidentification_oss.yaml @@ -3,7 +3,7 @@ Pipelines: inputs: [StandardCamera] infers: - name: ObjectDetection - model: /opt/openvino_toolkit/open_model_zoo/model_downloader/Retail/object_detection/pedestrian/rmnet_ssd/0013/dldt/person-detection-retail-0013.xml + model: /opt/openvino_toolkit/open_model_zoo/model_downloader/Retail/object_detection/pedestrian/rmnet_ssd/0013cd /dldt/person-detection-retail-0013.xml engine: CPU label: to/be/set/xxx.labels batch: 1 diff --git a/vino_launch/param/pipeline_vehicle_detection.yaml b/vino_launch/param/pipeline_vehicle_detection.yaml index 0636a75e..459a63b7 100644 --- a/vino_launch/param/pipeline_vehicle_detection.yaml +++ b/vino_launch/param/pipeline_vehicle_detection.yaml @@ -3,19 +3,19 @@ Pipelines: inputs: [RealSenseCamera] infers: - name: ObjectDetection - model: /opt/intel/computer_vision_sdk/deployment_tools/model_downloader/Security/object_detection/barrier/0106/dldt/vehicle-license-plate-detection-barrier-0106.xml + model: /opt/openvino_toolkit/models/vehicle-license-plate-detection/output/Security/object_detection/barrier/0106/dldt/vehicle-license-plate-detection-barrier-0106.xml engine: CPU model_type: SSD - label: /opt/intel/computer_vision_sdk/deployment_tools/model_downloader/Security/object_detection/barrier/0106/dldt/vehicle-license-plate-detection-barrier-0106.labels + label: /opt/openvino_toolkit/models/vehicle-license-plate-detection/output/Security/object_detection/barrier/0106/dldt/vehicle-license-plate-detection-barrier-0106.labels batch: 1 enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame - name: VehicleAttribsDetection - model: /opt/intel/computer_vision_sdk/deployment_tools/model_downloader/Security/object_attributes/vehicle/resnet10_update_1/dldt/vehicle-attributes-recognition-barrier-0039.xml + model: /opt/openvino_toolkit/models/vehicle-attributes-recongnition/output/Security/object_attributes/vehicle/resnet10_update_1/dldt/vehicle-attributes-recognition-barrier-0039.xml engine: CPU label: to/be/set/xxx.labels batch: 1 - name: LicensePlateDetection - model: /opt/intel/computer_vision_sdk_2018.5.455/deployment_tools/model_downloader/Security/optical_character_recognition/license_plate/dldt/license-plate-recognition-barrier-0001.xml + model: /opt/openvino_toolkit/models/license-plate-recognition/output/Security/optical_character_recognition/license_plate/dldt/license-plate-recognition-barrier-0001.xml engine: CPU label: to/be/set/xxx.labels batch: 1 From 7ca223aa21487192f39b8ff4a718ef22382465d9 Mon Sep 17 00:00:00 2001 From: luyang00 <393780933@qq.com> Date: Tue, 5 Nov 2019 16:27:08 +0800 Subject: [PATCH 09/63] fix landmark detection --- dynamic_vino_lib/src/outputs/image_window_output.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/dynamic_vino_lib/src/outputs/image_window_output.cpp b/dynamic_vino_lib/src/outputs/image_window_output.cpp index 3a3ee88f..c01f4c36 100644 --- a/dynamic_vino_lib/src/outputs/image_window_output.cpp +++ b/dynamic_vino_lib/src/outputs/image_window_output.cpp @@ -394,6 +394,10 @@ void Outputs::ImageWindowOutput::decorateFrame() cv::line(frame_, o.hp_cp, o.hp_y, cv::Scalar(0, 255, 0), 2); cv::line(frame_, o.hp_zs, o.hp_ze, cv::Scalar(255, 0, 0), 2); cv::circle(frame_, o.hp_ze, 3, cv::Scalar(255, 0, 0), 2); + for (int i = 0; i < o.landmarks.size(); i++) + { + cv::circle(frame_, o.landmarks[i], 3, cv::Scalar(255, 0, 0), 2); + } } outputs_.clear(); From 78d25ecd3f5cc15b65ae7f15c0376c85dffe50a2 Mon Sep 17 00:00:00 2001 From: luyang00 <393780933@qq.com> Date: Tue, 5 Nov 2019 16:58:56 +0800 Subject: [PATCH 10/63] update readme & remove an useless log --- README.md | 19 +++++++++++++++++++ .../src/outputs/image_window_output.cpp | 4 ++-- 2 files changed, 21 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index e500db2e..365fd260 100644 --- a/README.md +++ b/README.md @@ -80,6 +80,10 @@ Currently, the inference feature list is supported: ```/ros_openvino_toolkit/segmented_obejcts```([people_msgs::ObjectsInMasks](https://github.com/intel/ros_openvino_toolkit/blob/devel/people_msgs/msg/ObjectsInMasks.msg)) - Person Reidentification: ```/ros_openvino_toolkit/reidentified_persons```([people_msgs::ReidentificationStamped](https://github.com/intel/ros_openvino_toolkit/blob/devel/people_msgs/msg/ReidentificationStamped.msg)) +- Vehicle Detection: +```/ros2_openvino_toolkit/detected_license_plates```([people_msgs::msg::VehicleAttribsStamped]https://github.com/intel/ros2_openvino_toolkit/blob/devel/people_msgs/msg/VehicleAttribsStamped.msg) +- Vehicle License Detection: +```/ros2_openvino_toolkit/detected_license_plates```([people_msgs::msg::LicensePlateStamped]https://github.com/intel/ros2_openvino_toolkit/blob/devel/people_msgs/msg/LicensePlateStamped.msg) - Rviz Output: ```/ros_openvino_toolkit/image_rviz```([sensor_msgs::Image](http://docs.ros.org/melodic/api/sensor_msgs/html/msg/Image.html)) @@ -201,6 +205,11 @@ One-step installation scripts are provided for the dependencies' installation. P python3 downloader.py --name head-pose-estimation-adas-0001 python3 downloader.py --name person-detection-retail-0013 python3 downloader.py --name person-reidentification-retail-0076 + sudo python3 downloader.py --name vehicle-license-plate-detection-barrier-0106 --output_dir /opt/openvino_toolkit/models/vehicle-license-plate-detection/output + sudo python3 downloader.py --name vehicle-attributes-recognition-barrier-0039 --output_dir /opt/openvino_toolkit/models/vehicle-attributes-recongnition/output + sudo python3 downloader.py --name license-plate-recognition-barrier-0001 --output_dir /opt/openvino_toolkit/models/license-plate-recognition/output + sudo python3 downloader.py --name landmarks-regression-retail-0009 --output_dir /opt/openvino_toolkit/models/landmarks-regression/output + sudo python3 downloader.py --name face-reidentification-retail-0095 --output_dir /opt/openvino_toolkit/models/face-reidentification/output ``` * copy label files (excute _once_)
```bash @@ -209,6 +218,8 @@ One-step installation scripts are provided for the dependencies' installation. P sudo cp /opt/openvino_toolkit/ros_openvino_toolkit/data/labels/object_segmentation/frozen_inference_graph.labels /opt/models/mask_rcnn_inception_v2_coco_2018_01_28/output sudo cp /opt/openvino_toolkit/ros_openvino_toolkit/data/labels/object_detection/mobilenet-ssd.labels /opt/openvino_toolkit/open_model_zoo/model_downloader/object_detection/common/mobilenet-ssd/caffe/output/FP32 sudo cp /opt/openvino_toolkit/ros_openvino_toolkit/data/labels/object_detection/mobilenet-ssd.labels /opt/openvino_toolkit/open_model_zoo/model_downloader/object_detection/common/mobilenet-ssd/caffe/output/FP16 + sudo cp /opt/openvino_toolkit/ros_openvino_toolkit/data/labels/object_detection/vehicle-license-plate-detection-barrier-0106.labels /opt/openvino_toolkit/models/vehicle-license-plate-detection/output/Security/object_detection/barrier/0106/dldt/vehicle-license-plate-detection-barrier-0106.labels + ``` * set ENV LD_LIBRARY_PATH
```bash @@ -261,6 +272,14 @@ One-step installation scripts are provided for the dependencies' installation. P ```bash rosrun dynamic_vino_sample image_people_client ~/catkin_ws/src/ros_openvino_toolkit/data/images/team.jpg ``` +* run face re-identification with facial landmarks from realsense camera + ``bash + roslaunch vino_launch pipeline_face_reidentification.launch + ``` +* run vehicle detection sample code input from StandardCamera. + ```bash + roslaunch vino_launch pipeline_vehicle_detection.launch + ``` # TODO Features * Support **result filtering** for inference process, so that the inference results can be filtered to different subsidiary inference. For example, given an image, firstly we do Object Detection on it, secondly we pass cars to vehicle brand recognition and pass license plate to license number recognition. * Design **resource manager** to better use such resources as models, engines, and other external plugins. diff --git a/dynamic_vino_lib/src/outputs/image_window_output.cpp b/dynamic_vino_lib/src/outputs/image_window_output.cpp index c01f4c36..f4f0e9cc 100644 --- a/dynamic_vino_lib/src/outputs/image_window_output.cpp +++ b/dynamic_vino_lib/src/outputs/image_window_output.cpp @@ -58,8 +58,8 @@ void Outputs::ImageWindowOutput::accept( if (outputs_.size() != results.size()) { // throw std::logic_error("size is not equal!"); - slog::err << "the size of Face Detection and Output Vector is not equal!" - << slog::endl; + // slog::err << "the size of Face Detection and Output Vector is not equal!" + // << slog::endl; return; } From 78714ff4fff0a86ee936642a07188da373fe8507 Mon Sep 17 00:00:00 2001 From: luyang00 <393780933@qq.com> Date: Tue, 5 Nov 2019 17:04:01 +0800 Subject: [PATCH 11/63] readme update --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 365fd260..657676aa 100644 --- a/README.md +++ b/README.md @@ -81,9 +81,9 @@ Currently, the inference feature list is supported: - Person Reidentification: ```/ros_openvino_toolkit/reidentified_persons```([people_msgs::ReidentificationStamped](https://github.com/intel/ros_openvino_toolkit/blob/devel/people_msgs/msg/ReidentificationStamped.msg)) - Vehicle Detection: -```/ros2_openvino_toolkit/detected_license_plates```([people_msgs::msg::VehicleAttribsStamped]https://github.com/intel/ros2_openvino_toolkit/blob/devel/people_msgs/msg/VehicleAttribsStamped.msg) +```/ros2_openvino_toolkit/detected_license_plates```([people_msgs::msg::VehicleAttribsStamped](https://github.com/intel/ros2_openvino_toolkit/blob/devel/people_msgs/msg/VehicleAttribsStamped.msg) - Vehicle License Detection: -```/ros2_openvino_toolkit/detected_license_plates```([people_msgs::msg::LicensePlateStamped]https://github.com/intel/ros2_openvino_toolkit/blob/devel/people_msgs/msg/LicensePlateStamped.msg) +```/ros2_openvino_toolkit/detected_license_plates```([people_msgs::msg::LicensePlateStamped](https://github.com/intel/ros2_openvino_toolkit/blob/devel/people_msgs/msg/LicensePlateStamped.msg) - Rviz Output: ```/ros_openvino_toolkit/image_rviz```([sensor_msgs::Image](http://docs.ros.org/melodic/api/sensor_msgs/html/msg/Image.html)) From 1e70f8d8747a608b1d0ab33aac412f6360596583 Mon Sep 17 00:00:00 2001 From: luyang00 <393780933@qq.com> Date: Tue, 5 Nov 2019 17:06:28 +0800 Subject: [PATCH 12/63] readme update --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 657676aa..73df482a 100644 --- a/README.md +++ b/README.md @@ -273,7 +273,7 @@ One-step installation scripts are provided for the dependencies' installation. P rosrun dynamic_vino_sample image_people_client ~/catkin_ws/src/ros_openvino_toolkit/data/images/team.jpg ``` * run face re-identification with facial landmarks from realsense camera - ``bash + ``bash roslaunch vino_launch pipeline_face_reidentification.launch ``` * run vehicle detection sample code input from StandardCamera. From f2aec747f68de305d427e53d207130724d8ffd6b Mon Sep 17 00:00:00 2001 From: luyang00 <393780933@qq.com> Date: Tue, 5 Nov 2019 17:08:38 +0800 Subject: [PATCH 13/63] readme update --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 73df482a..ed238952 100644 --- a/README.md +++ b/README.md @@ -273,12 +273,12 @@ One-step installation scripts are provided for the dependencies' installation. P rosrun dynamic_vino_sample image_people_client ~/catkin_ws/src/ros_openvino_toolkit/data/images/team.jpg ``` * run face re-identification with facial landmarks from realsense camera - ``bash + ```bash roslaunch vino_launch pipeline_face_reidentification.launch ``` * run vehicle detection sample code input from StandardCamera. ```bash - roslaunch vino_launch pipeline_vehicle_detection.launch + roslaunch vino_launch pipeline_vehicle_detection.launch ``` # TODO Features * Support **result filtering** for inference process, so that the inference results can be filtered to different subsidiary inference. For example, given an image, firstly we do Object Detection on it, secondly we pass cars to vehicle brand recognition and pass license plate to license number recognition. From ca96a5baf1acc84a15565b4ce995f9f64d57d168 Mon Sep 17 00:00:00 2001 From: luyang00 <393780933@qq.com> Date: Thu, 7 Nov 2019 14:57:24 +0800 Subject: [PATCH 14/63] enable engine manager --- dynamic_vino_lib/CMakeLists.txt | 3 +- .../include/dynamic_vino_lib/engines/engine.h | 4 + .../dynamic_vino_lib/engines/engine_manager.h | 62 ++++++++ .../dynamic_vino_lib/models/base_model.h | 5 +- .../dynamic_vino_lib/pipeline_manager.h | 3 +- .../dynamic_vino_lib/utils/version_info.hpp | 143 ++++++++++++++++++ dynamic_vino_lib/src/engines/engine.cpp | 8 +- .../src/engines/engine_manager.cpp | 108 +++++++++++++ dynamic_vino_lib/src/pipeline_manager.cpp | 50 +++--- 9 files changed, 354 insertions(+), 32 deletions(-) create mode 100644 dynamic_vino_lib/include/dynamic_vino_lib/engines/engine_manager.h create mode 100644 dynamic_vino_lib/include/dynamic_vino_lib/utils/version_info.hpp create mode 100644 dynamic_vino_lib/src/engines/engine_manager.cpp diff --git a/dynamic_vino_lib/CMakeLists.txt b/dynamic_vino_lib/CMakeLists.txt index 2cbf9408..7eabe967 100644 --- a/dynamic_vino_lib/CMakeLists.txt +++ b/dynamic_vino_lib/CMakeLists.txt @@ -107,7 +107,7 @@ if(UNIX OR APPLE) # Generic flags. set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIC -fno-operator-names -Wformat -Wformat-security -Wall") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++14") # Add OpenMP support set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fopenmp") @@ -141,6 +141,7 @@ add_library(${PROJECT_NAME} SHARED src/pipeline_params.cpp src/pipeline_manager.cpp src/engines/engine.cpp + src/engines/engine_manager.cpp src/inferences/base_filter.cpp src/inferences/base_inference.cpp src/inferences/emotions_detection.cpp diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/engines/engine.h b/dynamic_vino_lib/include/dynamic_vino_lib/engines/engine.h index 5eb284a2..66a82361 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/engines/engine.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/engines/engine.h @@ -41,6 +41,10 @@ class Engine * from a inference plugin and an inference network. */ Engine(InferenceEngine::InferencePlugin, Models::BaseModel::Ptr); + /** + * @brief Using an Inference Request to initialize the inference Engine. + */ + Engine(InferenceEngine::InferRequest::Ptr &); /** * @brief Get the inference request this instance holds. * @return The inference request this instance holds. diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/engines/engine_manager.h b/dynamic_vino_lib/include/dynamic_vino_lib/engines/engine_manager.h new file mode 100644 index 00000000..fb923db1 --- /dev/null +++ b/dynamic_vino_lib/include/dynamic_vino_lib/engines/engine_manager.h @@ -0,0 +1,62 @@ +// Copyright (c) 2018-2019 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @brief A header file with declaration for NetworkEngine class + * @file engine.h + */ +#ifndef DYNAMIC_VINO_LIB__ENGINES__ENGINE_MANAGER_HPP_ +#define DYNAMIC_VINO_LIB__ENGINES__ENGINE_MANAGER_HPP_ + +#pragma once + +#include "dynamic_vino_lib/models/base_model.h" +#include "dynamic_vino_lib/engines/engine.h" +#include "inference_engine.hpp" + +namespace Engines +{ +/** + * @class EngineManager + * @brief This class is used to create and manage Inference engines. + */ +class EngineManager +{ +public: + /** + * @brief Create InferenceEngine instance by given Engine Name and Network. + * @return The shared pointer of created Engine instance. + */ + std::shared_ptr createEngine( + const std::string &, const std::shared_ptr &); + +private: + std::map plugins_for_devices_; + + std::unique_ptr + makePluginByName( + const std::string & device_name, const std::string & custom_cpu_library_message, + const std::string & custom_cldnn_message, bool performance_message); + + std::shared_ptr createEngine_beforeV2019R2( + const std::string &, const std::shared_ptr &); + +#if(defined(USE_IE_CORE)) + std::shared_ptr createEngine_V2019R2_plus( + const std::string &, const std::shared_ptr &); +#endif +}; +} // namespace Engines + +#endif // DYNAMIC_VINO_LIB__ENGINES__ENGINE_MANAGER_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/models/base_model.h b/dynamic_vino_lib/include/dynamic_vino_lib/models/base_model.h index 6977487a..666c7131 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/models/base_model.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/models/base_model.h @@ -83,7 +83,10 @@ class BaseModel * @return The name of the model. */ virtual const std::string getModelName() const = 0; - //InferenceEngine::CNNNetReader::Ptr net_reader_; + inline InferenceEngine::CNNNetReader::Ptr getNetReader() const + { + return net_reader_; + } protected: /** diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/pipeline_manager.h b/dynamic_vino_lib/include/dynamic_vino_lib/pipeline_manager.h index 2770ef9d..ef50ddc2 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/pipeline_manager.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/pipeline_manager.h @@ -31,6 +31,7 @@ #include #include "dynamic_vino_lib/pipeline.h" +#include "dynamic_vino_lib/engines/engine_manager.h" /** * @class PipelineManager @@ -122,7 +123,7 @@ class PipelineManager { std::map pipelines_; - std::map plugins_for_devices_; + Engines::EngineManager engine_manager_; }; #endif // DYNAMIC_VINO_LIB__PIPELINE_MANAGER_HPP_ \ No newline at end of file diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/utils/version_info.hpp b/dynamic_vino_lib/include/dynamic_vino_lib/utils/version_info.hpp new file mode 100644 index 00000000..735f3406 --- /dev/null +++ b/dynamic_vino_lib/include/dynamic_vino_lib/utils/version_info.hpp @@ -0,0 +1,143 @@ +// Copyright (c) 2018-2019 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +// @brief a header file with version information about Inference Engine. +// @file version_info.hpp +// + +#ifndef DYNAMIC_VINO_LIB__UTILS__VERSION_INFO_HPP_ +#define DYNAMIC_VINO_LIB__UTILS__VERSION_INFO_HPP_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef WIN32 +#define UNUSED +#else +#define UNUSED __attribute__((unused)) +#endif + +/** + * @brief Trims from both ends (in place) + * @param s - string to trim + * @return trimmed string + */ +inline std::string & trim(std::string & s) +{ + s.erase(s.begin(), + std::find_if(s.begin(), s.end(), std::not1(std::ptr_fun(std::isspace)))); + s.erase( + std::find_if(s.rbegin(), s.rend(), std::not1(std::ptr_fun(std::isspace))).base(), + s.end()); + return s; +} + +/** +* @brief Converts string to TargetDevice +* @param deviceName - string value representing device +* @return TargetDevice value that corresponds to input string. +* eDefault in case no corresponding value was found +*/ +static InferenceEngine::TargetDevice getDeviceFromStr(const std::string & deviceName) +{ + return InferenceEngine::TargetDeviceInfo::fromStr(deviceName); +} + +static std::ostream & operator<<(std::ostream & os, const InferenceEngine::Version * version) +{ + os << "\n\tAPI version ............ "; + if (nullptr == version) { + os << "UNKNOWN"; + } else { + os << version->apiVersion.major << "." << version->apiVersion.minor; + if (nullptr != version->buildNumber) { + os << "\n\t" << + "Build .................. " << version->buildNumber; + } + if (nullptr != version->description) { + os << "\n\t" << + "Description ............ " << version->description; + } + } + return os; +} + +/** + * @class PluginVersion + * @brief A PluginVersion class stores plugin version and initialization status + */ +struct PluginVersion : public InferenceEngine::Version +{ + bool initialized = false; + + explicit PluginVersion(const InferenceEngine::Version * ver) + { + if (nullptr == ver) { + return; + } + InferenceEngine::Version::operator=(*ver); + initialized = true; + } + + operator bool() const noexcept + { + return initialized; + } +}; + +static UNUSED std::ostream & operator<<(std::ostream & os, const PluginVersion & version) +{ + os << "\tPlugin version ......... "; + if (!version) { + os << "UNKNOWN"; + } else { + os << version.apiVersion.major << "." << version.apiVersion.minor; + } + + os << "\n\tPlugin name ............ "; + if (!version || version.description == nullptr) { + os << "UNKNOWN"; + } else { + os << version.description; + } + + os << "\n\tPlugin build ........... "; + if (!version || version.buildNumber == nullptr) { + os << "UNKNOWN"; + } else { + os << version.buildNumber; + } + + return os; +} + +inline void printPluginVersion(InferenceEngine::InferenceEnginePluginPtr ptr, std::ostream & stream) +{ + const PluginVersion * pluginVersion = nullptr; + ptr->GetVersion((const InferenceEngine::Version * &)pluginVersion); + stream << pluginVersion << std::endl; +} + +#endif // DYNAMIC_VINO_LIB__UTILS__VERSION_INFO_HPP_ diff --git a/dynamic_vino_lib/src/engines/engine.cpp b/dynamic_vino_lib/src/engines/engine.cpp index db008926..262953ee 100644 --- a/dynamic_vino_lib/src/engines/engine.cpp +++ b/dynamic_vino_lib/src/engines/engine.cpp @@ -23,6 +23,12 @@ Engines::Engine::Engine(InferenceEngine::InferencePlugin plg, const Models::BaseModel::Ptr base_model) { - request_ = (plg.LoadNetwork(base_model->net_reader_->getNetwork(), {})) + request_ = (plg.LoadNetwork(base_model->getNetReader()->getNetwork(), {})) .CreateInferRequestPtr(); } + +Engines::Engine::Engine( + InferenceEngine::InferRequest::Ptr & request) +{ + request_ = request; +} diff --git a/dynamic_vino_lib/src/engines/engine_manager.cpp b/dynamic_vino_lib/src/engines/engine_manager.cpp new file mode 100644 index 00000000..fc97c340 --- /dev/null +++ b/dynamic_vino_lib/src/engines/engine_manager.cpp @@ -0,0 +1,108 @@ +// Copyright (c) 2018-2019 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @brief a header file with definition of Engine class + * @file engine.cpp + */ +#include "dynamic_vino_lib/engines/engine_manager.h" +#include "dynamic_vino_lib/engines/engine.h" +#include "dynamic_vino_lib/slog.h" +#include "dynamic_vino_lib/models/base_model.h" +#include "dynamic_vino_lib/utils/version_info.hpp" +#include +#include +#include + +std::shared_ptr Engines::EngineManager::createEngine( + const std::string & device, const std::shared_ptr & model) +{ +#if(defined(USE_IE_CORE)) + return createEngine_V2019R2_plus(device, model); +#else + return createEngine_beforeV2019R2(device, model); +#endif +} + +std::shared_ptr Engines::EngineManager::createEngine_beforeV2019R2( + const std::string & device, const std::shared_ptr & model) +{ + if(plugins_for_devices_.find(device) == plugins_for_devices_.end()) { + auto pcommon = Params::ParamManager::getInstance().getCommon(); + plugins_for_devices_[device] = *makePluginByName(device, pcommon.custom_cpu_library, + pcommon.custom_cldnn_library, pcommon.enable_performance_count); + slog::info << "Created plugin for " << device << slog::endl; + } + + auto executeable_network = + plugins_for_devices_[device].LoadNetwork(model->getNetReader()->getNetwork(), {}); + auto request = executeable_network.CreateInferRequestPtr(); + + return std::make_shared(request); +} + +#if(defined(USE_IE_CORE)) +std::shared_ptr Engines::EngineManager::createEngine_V2019R2_plus( + const std::string & device, const std::shared_ptr & model) +{ + InferenceEngine::Core core; + auto executable_network = core.LoadNetwork(model->getNetReader()->getNetwork(), device); + auto request = executable_network.CreateInferRequestPtr(); + + return std::make_shared(request); +} +#endif + +std::unique_ptr +Engines::EngineManager::makePluginByName( + const std::string & device_name, const std::string & custom_cpu_library_message, + const std::string & custom_cldnn_message, bool performance_message) +{ + slog::info << "Creating plugin for " << device_name << slog::endl; + + InferenceEngine::InferencePlugin plugin = + InferenceEngine::PluginDispatcher({"../../../lib/intel64", ""}) + .getPluginByDevice(device_name); + + /** Printing plugin version **/ + printPluginVersion(plugin, std::cout); + + /** Load extensions for the CPU plugin **/ + if ((device_name.find("CPU") != std::string::npos)) { + plugin.AddExtension(std::make_shared()); + if (!custom_cpu_library_message.empty()) { + slog::info << "custom cpu library is not empty, tyring to use this extension:" + << custom_cpu_library_message << slog::endl; + // CPU(MKLDNN) extensions are loaded as a shared library and passed as a + // pointer to base + // extension + auto extension_ptr = + InferenceEngine::make_so_pointer(custom_cpu_library_message); + plugin.AddExtension(extension_ptr); + } + } else if (!custom_cldnn_message.empty()) { + slog::info << "custom cldnn library is not empty, tyring to use this extension:" + << custom_cldnn_message << slog::endl; + // Load Extensions for other plugins not CPU + plugin.SetConfig( + {{InferenceEngine::PluginConfigParams::KEY_CONFIG_FILE, custom_cldnn_message}}); + } + if (performance_message) { + plugin.SetConfig({{InferenceEngine::PluginConfigParams::KEY_PERF_COUNT, + InferenceEngine::PluginConfigParams::YES}}); + } + + return std::make_unique( + InferenceEngine::InferenceEnginePluginPtr(plugin)); +} diff --git a/dynamic_vino_lib/src/pipeline_manager.cpp b/dynamic_vino_lib/src/pipeline_manager.cpp index eaca54b2..f3e039d3 100644 --- a/dynamic_vino_lib/src/pipeline_manager.cpp +++ b/dynamic_vino_lib/src/pipeline_manager.cpp @@ -58,6 +58,7 @@ #include "dynamic_vino_lib/pipeline_manager.h" #include "dynamic_vino_lib/pipeline_params.h" #include "dynamic_vino_lib/services/pipeline_processing_server.h" +#include "dynamic_vino_lib/engines/engine_manager.h" std::shared_ptr PipelineManager::createPipeline( const Params::ParamManager::PipelineRawData& params) { @@ -187,10 +188,6 @@ PipelineManager::parseInference( } slog::info << "Parsing Inference: " << infer.name << slog::endl; std::shared_ptr object = nullptr; - if (plugins_for_devices_.find(infer.engine) == plugins_for_devices_.end()) { - plugins_for_devices_[infer.engine] = - *Factory::makePluginByName(infer.engine, FLAGS_l, FLAGS_c, FLAGS_pc); - } if (infer.name == kInferTpye_FaceDetection) { object = createFaceDetection(infer); @@ -250,8 +247,8 @@ PipelineManager::createFaceDetection( auto face_detection_model = std::make_shared(infer.model, 1, 1, 1); face_detection_model->modelInit(); - auto face_detection_engine = std::make_shared( - plugins_for_devices_[infer.engine], face_detection_model); + auto face_detection_engine = engine_manager_.createEngine( + infer.engine, face_detection_model); auto face_inference_ptr = std::make_shared( 0.5); // TODO: add output_threshold in param_manager face_inference_ptr->loadNetwork(face_detection_model); @@ -266,8 +263,7 @@ PipelineManager::createAgeGenderRecognition( auto model = std::make_shared(param.model, 1, 2, 16); model->modelInit(); - auto engine = std::make_shared( - plugins_for_devices_[param.engine], model); + auto engine = engine_manager_.createEngine(param.engine, model); auto infer = std::make_shared(); infer->loadNetwork(model); infer->loadEngine(engine); @@ -281,8 +277,7 @@ PipelineManager::createEmotionRecognition( auto model = std::make_shared(param.model, 1, 1, 16); model->modelInit(); - auto engine = std::make_shared( - plugins_for_devices_[param.engine], model); + auto engine = engine_manager_.createEngine(param.engine, model); auto infer = std::make_shared(); infer->loadNetwork(model); infer->loadEngine(engine); @@ -296,8 +291,7 @@ PipelineManager::createHeadPoseEstimation( auto model = std::make_shared(param.model, 1, 3, 16); model->modelInit(); - auto engine = std::make_shared( - plugins_for_devices_[param.engine], model); + auto engine = engine_manager_.createEngine(param.engine, model); auto infer = std::make_shared(); infer->loadNetwork(model); infer->loadEngine(engine); @@ -331,8 +325,8 @@ const Params::ParamManager::InferenceRawData & infer) } object_detection_model->modelInit(); - auto object_detection_engine = std::make_shared( - plugins_for_devices_[infer.engine], object_detection_model); + auto object_detection_engine = engine_manager_.createEngine( + infer.engine, object_detection_model); object_inference_ptr->loadNetwork(object_detection_model); object_inference_ptr->loadEngine(object_detection_engine); @@ -345,8 +339,8 @@ PipelineManager::createObjectSegmentation(const Params::ParamManager::InferenceR auto obejct_segmentation_model = std::make_shared(infer.model, 1, 2, 1); obejct_segmentation_model->modelInit(); - auto obejct_segmentation_engine = std::make_shared( - plugins_for_devices_[infer.engine], obejct_segmentation_model); + auto obejct_segmentation_engine = engine_manager_.createEngine( + infer.engine, obejct_segmentation_model); auto segmentation_inference_ptr = std::make_shared(0.5); segmentation_inference_ptr->loadNetwork(obejct_segmentation_model); segmentation_inference_ptr->loadEngine(obejct_segmentation_engine); @@ -361,8 +355,8 @@ PipelineManager::createPersonReidentification( auto person_reidentification_model = std::make_shared(infer.model, 1, 1, infer.batch); person_reidentification_model->modelInit(); - auto person_reidentification_engine = std::make_shared( - plugins_for_devices_[infer.engine], person_reidentification_model); + auto person_reidentification_engine = engine_manager_.createEngine( + infer.engine, person_reidentification_model); auto reidentification_inference_ptr = std::make_shared(infer.confidence_threshold); reidentification_inference_ptr->loadNetwork(person_reidentification_model); @@ -378,8 +372,8 @@ PipelineManager::createFaceReidentification( auto face_reidentification_model = std::make_shared(infer.model, 1, 1, infer.batch); face_reidentification_model->modelInit(); - auto face_reidentification_engine = std::make_shared( - plugins_for_devices_[infer.engine], face_reidentification_model); + auto face_reidentification_engine = engine_manager_.createEngine( + infer.engine, face_reidentification_model); auto face_reid_ptr = std::make_shared(infer.confidence_threshold); face_reid_ptr->loadNetwork(face_reidentification_model); @@ -395,8 +389,8 @@ PipelineManager::createPersonAttribsDetection( auto person_attribs_detection_model = std::make_shared(infer.model, 1, 1, infer.batch); person_attribs_detection_model->modelInit(); - auto person_attribs_detection_engine = std::make_shared( - plugins_for_devices_[infer.engine], person_attribs_detection_model); + auto person_attribs_detection_engine = engine_manager_.createEngine( + infer.engine, person_attribs_detection_model); auto attribs_inference_ptr = std::make_shared(infer.confidence_threshold); attribs_inference_ptr->loadNetwork(person_attribs_detection_model); @@ -412,8 +406,8 @@ PipelineManager::createLandmarksDetection( auto landmarks_detection_model = std::make_shared(infer.model, 1, 1, infer.batch); landmarks_detection_model->modelInit(); - auto landmarks_detection_engine = std::make_shared( - plugins_for_devices_[infer.engine], landmarks_detection_model); + auto landmarks_detection_engine = engine_manager_.createEngine( + infer.engine, landmarks_detection_model); auto landmarks_inference_ptr = std::make_shared(); landmarks_inference_ptr->loadNetwork(landmarks_detection_model); @@ -432,8 +426,8 @@ PipelineManager::createVehicleAttribsDetection( auto vehicle_attribs_model = std::make_shared(infer.model, 1, 2, infer.batch); vehicle_attribs_model->modelInit(); - auto vehicle_attribs_engine = std::make_shared( - plugins_for_devices_[infer.engine], vehicle_attribs_model); + auto vehicle_attribs_engine = engine_manager_.createEngine( + infer.engine, vehicle_attribs_model); auto vehicle_attribs_ptr = std::make_shared(); vehicle_attribs_ptr->loadNetwork(vehicle_attribs_model); @@ -449,8 +443,8 @@ PipelineManager::createLicensePlateDetection( auto license_plate_model = std::make_shared(infer.model, 2, 1, infer.batch); license_plate_model->modelInit(); - auto license_plate_engine = std::make_shared( - plugins_for_devices_[infer.engine], license_plate_model); + auto license_plate_engine = engine_manager_.createEngine( + infer.engine, license_plate_model); auto license_plate_ptr = std::make_shared(); license_plate_ptr->loadNetwork(license_plate_model); From 59c0d913938abeadb509fbaa8d833532f3b03ada Mon Sep 17 00:00:00 2001 From: luyang00 <393780933@qq.com> Date: Thu, 7 Nov 2019 15:53:44 +0800 Subject: [PATCH 15/63] code refector for pipeline manager --- dynamic_vino_lib/CMakeLists.txt | 6 ++++ dynamic_vino_lib/src/pipeline_manager.cpp | 34 +++++++++++------------ 2 files changed, 23 insertions(+), 17 deletions(-) diff --git a/dynamic_vino_lib/CMakeLists.txt b/dynamic_vino_lib/CMakeLists.txt index 7eabe967..2d1588d3 100644 --- a/dynamic_vino_lib/CMakeLists.txt +++ b/dynamic_vino_lib/CMakeLists.txt @@ -16,6 +16,12 @@ cmake_minimum_required (VERSION 2.8.3) project(dynamic_vino_lib) +#################################### +## to use new InferenceEngine API (InferenceEngine::Core), +## then, uncomment below line +#add_definitions(-DUSE_IE_CORE) +#################################### + message(STATUS "Looking for inference engine configuration file at: ${CMAKE_PREFIX_PATH}") find_package(InferenceEngine 1.1) if (NOT InferenceEngine_FOUND) diff --git a/dynamic_vino_lib/src/pipeline_manager.cpp b/dynamic_vino_lib/src/pipeline_manager.cpp index f3e039d3..ff6e8268 100644 --- a/dynamic_vino_lib/src/pipeline_manager.cpp +++ b/dynamic_vino_lib/src/pipeline_manager.cpp @@ -159,6 +159,8 @@ PipelineManager::parseOutput( object = std::make_shared(params.name); } else if (name == kOutputTpye_RosService) { object = std::make_shared(params.name); + } else { + slog::err << "Invalid output name: " << name << slog::endl; } if (object != nullptr) { outputs.insert({name, object}); @@ -191,19 +193,18 @@ PipelineManager::parseInference( if (infer.name == kInferTpye_FaceDetection) { object = createFaceDetection(infer); - - } else if (infer.name == kInferTpye_AgeGenderRecognition) { + } + else if (infer.name == kInferTpye_AgeGenderRecognition) { object = createAgeGenderRecognition(infer); - - } else if (infer.name == kInferTpye_EmotionRecognition) { + } + else if (infer.name == kInferTpye_EmotionRecognition) { object = createEmotionRecognition(infer); - - } else if (infer.name == kInferTpye_HeadPoseEstimation) { + } + else if (infer.name == kInferTpye_HeadPoseEstimation) { object = createHeadPoseEstimation(infer); - - } else if (infer.name == kInferTpye_ObjectDetection) { + } + else if (infer.name == kInferTpye_ObjectDetection) { object = createObjectDetection(infer); - } else if (infer.name == kInferTpye_ObjectSegmentation) { object = createObjectSegmentation(infer); @@ -230,7 +231,6 @@ PipelineManager::parseInference( slog::err << "Invalid inference name: " << infer.name << slog::endl; } - if (object != nullptr) { inferences.insert({infer.name, object}); slog::info << " ... Adding one Inference: " << infer.name << slog::endl; @@ -245,7 +245,7 @@ PipelineManager::createFaceDetection( const Params::ParamManager::InferenceRawData& infer) { // TODO: add batch size in param_manager auto face_detection_model = - std::make_shared(infer.model, 1, 1, 1); + std::make_shared(infer.model, 1, 1, infer.batch); face_detection_model->modelInit(); auto face_detection_engine = engine_manager_.createEngine( infer.engine, face_detection_model); @@ -261,7 +261,7 @@ std::shared_ptr PipelineManager::createAgeGenderRecognition( const Params::ParamManager::InferenceRawData& param) { auto model = - std::make_shared(param.model, 1, 2, 16); + std::make_shared(param.model, 1, 2, param.batch); model->modelInit(); auto engine = engine_manager_.createEngine(param.engine, model); auto infer = std::make_shared(); @@ -275,7 +275,7 @@ std::shared_ptr PipelineManager::createEmotionRecognition( const Params::ParamManager::InferenceRawData& param) { auto model = - std::make_shared(param.model, 1, 1, 16); + std::make_shared(param.model, 1, 1, param.batch); model->modelInit(); auto engine = engine_manager_.createEngine(param.engine, model); auto infer = std::make_shared(); @@ -289,7 +289,7 @@ std::shared_ptr PipelineManager::createHeadPoseEstimation( const Params::ParamManager::InferenceRawData& param) { auto model = - std::make_shared(param.model, 1, 3, 16); + std::make_shared(param.model, 1, 3, param.batch); model->modelInit(); auto engine = engine_manager_.createEngine(param.engine, model); auto infer = std::make_shared(); @@ -309,7 +309,7 @@ const Params::ParamManager::InferenceRawData & infer) if (infer.model_type == kInferTpye_ObjectDetectionTypeSSD) { object_detection_model = - std::make_shared(infer.model, 1, 1, 1); + std::make_shared(infer.model, 1, 1, infer.batch); object_inference_ptr = std::make_shared( 0.5); // To-do theshold configuration @@ -318,7 +318,7 @@ const Params::ParamManager::InferenceRawData & infer) if (infer.model_type == kInferTpye_ObjectDetectionTypeYolov2voc) { object_detection_model = - std::make_shared(infer.model, 1, 1, 1); + std::make_shared(infer.model, 1, 1, infer.batch); object_inference_ptr = std::make_shared( 0.5); // To-do theshold configuration @@ -337,7 +337,7 @@ std::shared_ptr PipelineManager::createObjectSegmentation(const Params::ParamManager::InferenceRawData & infer) { auto obejct_segmentation_model = - std::make_shared(infer.model, 1, 2, 1); + std::make_shared(infer.model, 1, 2, infer.batch); obejct_segmentation_model->modelInit(); auto obejct_segmentation_engine = engine_manager_.createEngine( infer.engine, obejct_segmentation_model); From 9c1cddddb5e800a6cedb27356df282471e8c4b51 Mon Sep 17 00:00:00 2001 From: luyang00 <393780933@qq.com> Date: Fri, 8 Nov 2019 10:03:57 +0800 Subject: [PATCH 16/63] remove factory.cpp --- dynamic_vino_lib/CMakeLists.txt | 1 - .../include/dynamic_vino_lib/factory.h | 70 ------------ dynamic_vino_lib/src/factory.cpp | 102 ------------------ dynamic_vino_lib/src/pipeline_manager.cpp | 2 - sample/src/image_object_server.cpp | 1 - sample/src/image_people_server.cpp | 1 - sample/src/image_reidentification_server.cpp | 1 - sample/src/image_segmentation_server.cpp | 1 - sample/src/pipeline_with_params.cpp | 1 - 9 files changed, 180 deletions(-) delete mode 100644 dynamic_vino_lib/include/dynamic_vino_lib/factory.h delete mode 100644 dynamic_vino_lib/src/factory.cpp diff --git a/dynamic_vino_lib/CMakeLists.txt b/dynamic_vino_lib/CMakeLists.txt index 2d1588d3..26969100 100644 --- a/dynamic_vino_lib/CMakeLists.txt +++ b/dynamic_vino_lib/CMakeLists.txt @@ -142,7 +142,6 @@ add_library(${PROJECT_NAME} SHARED src/services/frame_processing_server.cpp src/services/pipeline_processing_server.cpp src/services/test.cpp - src/factory.cpp src/pipeline.cpp src/pipeline_params.cpp src/pipeline_manager.cpp diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/factory.h b/dynamic_vino_lib/include/dynamic_vino_lib/factory.h deleted file mode 100644 index 98210efb..00000000 --- a/dynamic_vino_lib/include/dynamic_vino_lib/factory.h +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * @brief a header file with declaration of Factory class - * @file factory.h - */ - -#ifndef DYNAMIC_VINO_LIB_FACTORY_H -#define DYNAMIC_VINO_LIB_FACTORY_H - -#include - -#include -#include - -#include "dynamic_vino_lib/common.h" -#include "dynamic_vino_lib/inputs/base_input.h" -#include "extension/ext_list.hpp" - -/** -* @class Factory -* @brief This class is a factory class that produces the derived input device -* class corresponding to -* the input string -*/ -class Factory -{ - public: - /** - * @brief This function produces the derived input device class corresponding - * to the input string - * @param[in] input device name, can be RealSenseCamera, StandardCamera or - * video directory - * @param[in] input file path, file path for a video or image file - * @return the instance of derived input device referenced by a smart pointer - */ - static std::shared_ptr makeInputDeviceByName( - const std::string& input_device_name, const std::string& input_file_path); - /** - * @brief This function produces the derived inference plugin corresponding to - * the input string - * @param[in] device_name The name of target device (CPU, GPU, FPGA, MYRIAD) - * @param[in] custom_cpu_library_message Absolute path to CPU library with user - * layers - * @param[in] custom_cldnn_message clDNN custom kernels path - * @param[in] performance_message Enable per-layer performance report - * @return the instance of derived inference plugin referenced by a smart - * pointer - */ - static std::shared_ptr makePluginByName( - const std::string& device_name, - const std::string& custom_cpu_library_message, - const std::string& custom_cldnn_message, bool performance_message); -}; - -#endif // DYNAMIC_VINO_LIB_FACTORY_H diff --git a/dynamic_vino_lib/src/factory.cpp b/dynamic_vino_lib/src/factory.cpp deleted file mode 100644 index 9560b67d..00000000 --- a/dynamic_vino_lib/src/factory.cpp +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * @brief a header file with declaration of Factory class - * @file factory.cpp - */ - -#include -#include - -#include "dynamic_vino_lib/factory.h" -#include "dynamic_vino_lib/inputs/image_input.h" -#include "dynamic_vino_lib/inputs/realsense_camera.h" -#include "dynamic_vino_lib/inputs/realsense_camera_topic.h" -#include "dynamic_vino_lib/inputs/standard_camera.h" -#include "dynamic_vino_lib/inputs/video_input.h" - -std::shared_ptr Factory::makeInputDeviceByName( - const std::string& input_device_name, const std::string& input_file_path) -{ - if (input_device_name == "RealSenseCamera") - { - return std::make_shared(); - } - else if (input_device_name == "StandardCamera") - { - return std::make_shared(); - } - else if (input_device_name == "RealSenseCameraTopic") - { - return std::make_shared(); - } - else if (input_device_name == "Video") - { - return std::make_shared(input_file_path); - } - else if (input_device_name == "Image") - { - return std::make_shared(input_file_path); - } - else - { - throw std::logic_error("Unsupported input category"); - } -} - -std::shared_ptr Factory::makePluginByName( - const std::string& device_name, - const std::string& custom_cpu_library_message, // FLAGS_l - const std::string& custom_cldnn_message, // FLAGS_c - bool performance_message) // FLAGS_pc -{ - - InferenceEngine::InferencePlugin plugin = InferenceEngine::PluginDispatcher({"../../../lib/intel64", ""}).getPluginByDevice(device_name); - printPluginVersion(plugin, std::cout); - - /** Load extensions for the CPU plugin **/ - if ((device_name.find("CPU") != std::string::npos)) - { - plugin.AddExtension(std::make_shared()); - - if (!custom_cpu_library_message.empty()) - { - // CPU(MKLDNN) extensions are loaded as a shared library and passed as a - // pointer to base - // extension - auto extension_ptr = InferenceEngine::make_so_pointer( - custom_cpu_library_message); - plugin.AddExtension(extension_ptr); - } - - } - else if (!custom_cldnn_message.empty()) - { - plugin.SetConfig( - {{InferenceEngine::PluginConfigParams::KEY_CONFIG_FILE, custom_cldnn_message}}); - } - - if (performance_message) - { - plugin.SetConfig( - {{InferenceEngine::PluginConfigParams::KEY_PERF_COUNT, InferenceEngine::PluginConfigParams::YES}}); - } - - return std::make_shared( - InferenceEngine::InferenceEnginePluginPtr(plugin)); - -} diff --git a/dynamic_vino_lib/src/pipeline_manager.cpp b/dynamic_vino_lib/src/pipeline_manager.cpp index ff6e8268..f9da0b78 100644 --- a/dynamic_vino_lib/src/pipeline_manager.cpp +++ b/dynamic_vino_lib/src/pipeline_manager.cpp @@ -22,9 +22,7 @@ #include #include #include - #include -#include "dynamic_vino_lib/factory.h" #include "dynamic_vino_lib/inferences/age_gender_detection.h" #include "dynamic_vino_lib/inferences/emotions_detection.h" #include "dynamic_vino_lib/inferences/face_detection.h" diff --git a/sample/src/image_object_server.cpp b/sample/src/image_object_server.cpp index 25905814..f4de452f 100644 --- a/sample/src/image_object_server.cpp +++ b/sample/src/image_object_server.cpp @@ -42,7 +42,6 @@ #include #include "dynamic_vino_lib/common.h" #include "dynamic_vino_lib/engines/engine.h" -#include "dynamic_vino_lib/factory.h" #include "dynamic_vino_lib/inferences/age_gender_detection.h" #include "dynamic_vino_lib/inferences/base_inference.h" #include "dynamic_vino_lib/inferences/emotions_detection.h" diff --git a/sample/src/image_people_server.cpp b/sample/src/image_people_server.cpp index 92520ec5..00a91985 100644 --- a/sample/src/image_people_server.cpp +++ b/sample/src/image_people_server.cpp @@ -42,7 +42,6 @@ #include #include "dynamic_vino_lib/common.h" #include "dynamic_vino_lib/engines/engine.h" -#include "dynamic_vino_lib/factory.h" #include "dynamic_vino_lib/inferences/age_gender_detection.h" #include "dynamic_vino_lib/inferences/base_inference.h" #include "dynamic_vino_lib/inferences/emotions_detection.h" diff --git a/sample/src/image_reidentification_server.cpp b/sample/src/image_reidentification_server.cpp index 86293ddb..0300cb9f 100644 --- a/sample/src/image_reidentification_server.cpp +++ b/sample/src/image_reidentification_server.cpp @@ -42,7 +42,6 @@ #include #include "dynamic_vino_lib/common.h" #include "dynamic_vino_lib/engines/engine.h" -#include "dynamic_vino_lib/factory.h" #include "dynamic_vino_lib/inferences/age_gender_detection.h" #include "dynamic_vino_lib/inferences/base_inference.h" #include "dynamic_vino_lib/inferences/emotions_detection.h" diff --git a/sample/src/image_segmentation_server.cpp b/sample/src/image_segmentation_server.cpp index 34288a29..130b2899 100644 --- a/sample/src/image_segmentation_server.cpp +++ b/sample/src/image_segmentation_server.cpp @@ -42,7 +42,6 @@ #include #include "dynamic_vino_lib/common.h" #include "dynamic_vino_lib/engines/engine.h" -#include "dynamic_vino_lib/factory.h" #include "dynamic_vino_lib/inferences/age_gender_detection.h" #include "dynamic_vino_lib/inferences/base_inference.h" #include "dynamic_vino_lib/inferences/emotions_detection.h" diff --git a/sample/src/pipeline_with_params.cpp b/sample/src/pipeline_with_params.cpp index b23e160f..ed0e1dcb 100644 --- a/sample/src/pipeline_with_params.cpp +++ b/sample/src/pipeline_with_params.cpp @@ -42,7 +42,6 @@ #include #include "dynamic_vino_lib/common.h" #include "dynamic_vino_lib/engines/engine.h" -#include "dynamic_vino_lib/factory.h" #include "dynamic_vino_lib/inferences/age_gender_detection.h" #include "dynamic_vino_lib/inferences/base_inference.h" #include "dynamic_vino_lib/inferences/emotions_detection.h" From d138dd92f0a1d0e2acc4a6f99d03b496cbdd1187 Mon Sep 17 00:00:00 2001 From: luyang00 <393780933@qq.com> Date: Fri, 8 Nov 2019 16:53:23 +0800 Subject: [PATCH 17/63] bug fix for c++11 --- dynamic_vino_lib/src/engines/engine_manager.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/dynamic_vino_lib/src/engines/engine_manager.cpp b/dynamic_vino_lib/src/engines/engine_manager.cpp index fc97c340..891bc7af 100644 --- a/dynamic_vino_lib/src/engines/engine_manager.cpp +++ b/dynamic_vino_lib/src/engines/engine_manager.cpp @@ -103,6 +103,5 @@ Engines::EngineManager::makePluginByName( InferenceEngine::PluginConfigParams::YES}}); } - return std::make_unique( - InferenceEngine::InferenceEnginePluginPtr(plugin)); + return std::unique_ptr (new InferenceEngine::InferencePlugin(plugin)); } From c98652da4046bcf57b6b859fd75621cbe3bdaff7 Mon Sep 17 00:00:00 2001 From: luyang00 <393780933@qq.com> Date: Tue, 12 Nov 2019 13:05:13 +0800 Subject: [PATCH 18/63] update object_segmentation model for 2019_R3 --- .../inferences/object_segmentation.h | 7 +- .../dynamic_vino_lib/models/base_model.h | 63 ++++++--- .../models/object_segmentation_model.h | 24 +++- .../src/inferences/object_segmentation.cpp | 37 +++++- dynamic_vino_lib/src/models/base_model.cpp | 25 ++-- .../src/models/object_segmentation_model.cpp | 121 ++++++++++++++++-- dynamic_vino_lib/src/pipeline.cpp | 3 +- dynamic_vino_lib/src/pipeline_manager.cpp | 19 +-- 8 files changed, 236 insertions(+), 63 deletions(-) diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/object_segmentation.h b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/object_segmentation.h index 11c5b8fe..44d919c9 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/object_segmentation.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/object_segmentation.h @@ -86,6 +86,10 @@ class ObjectSegmentation : public BaseInference * @return Whether this operation is successful. */ bool enqueue(const cv::Mat &, const cv::Rect &) override; + + //Deprecated!! + bool enqueue_for_one_input(const cv::Mat &, const cv::Rect &); + /** * @brief Start inference for all buffered frames. * @return Whether this operation is successful. @@ -114,13 +118,12 @@ class ObjectSegmentation : public BaseInference or ROS topic. */ const void observeOutput(const std::shared_ptr & output, - const std::string filter_conditions); + const std::string filter_conditions) override; /** * @brief Get the name of the Inference instance. * @return The name of the Inference instance. */ const std::string getName() const override; - const std::vector getFilteredROIs( const std::string filter_conditions) const override; diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/models/base_model.h b/dynamic_vino_lib/include/dynamic_vino_lib/models/base_model.h index 666c7131..bc85f345 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/models/base_model.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/models/base_model.h @@ -22,6 +22,8 @@ #ifndef DYNAMIC_VINO_LIB_MODELS_BASE_MODEL_H #define DYNAMIC_VINO_LIB_MODELS_BASE_MODEL_H +#include + #include #include #include @@ -33,6 +35,11 @@ namespace Engines class Engine; } +namespace dynamic_vino_lib +{ +class ObjectDetectionResult; +} + namespace Models { /** @@ -41,9 +48,8 @@ namespace Models */ class BaseModel { +public: using Ptr = std::shared_ptr; - - public: /** * @brief Initialize the class with given .xml, .bin and .labels file. It will * also check whether the number of input and output are fit. @@ -54,13 +60,12 @@ class BaseModel * @param[in] batch_size The number of batch size the network should have. * @return Whether the input device is successfully turned on. */ - BaseModel(const std::string& model_loc, int input_num, int output_num, - int batch_size); + BaseModel(const std::string & model_loc, int input_num, int output_num, int batch_size); /** * @brief Get the label vector. * @return The label vector. */ - inline std::vector& getLabels() + inline std::vector & getLabels() { return labels_; } @@ -68,10 +73,19 @@ class BaseModel * @brief Get the maximum batch size of the model. * @return The maximum batch size of the model. */ - inline const int getMaxBatchSize() const + inline int getMaxBatchSize() const { return max_batch_size_; } + inline void setMaxBatchSize(int max_batch_size) + { + max_batch_size_ = max_batch_size; + } + + virtual bool enqueue( + const std::shared_ptr & engine, + const cv::Mat & frame, + const cv::Rect & input_frame_loc) {return true;} /** * @brief Initialize the model. During the process the class will check * the network input, output size, check layer property and @@ -83,38 +97,48 @@ class BaseModel * @return The name of the model. */ virtual const std::string getModelName() const = 0; + + virtual inline int getMaxProposalCount() const {return max_proposal_count_;} + inline int getObjectSize() const {return object_size_;} + inline void setObjectSize(int os) {object_size_ = os;} inline InferenceEngine::CNNNetReader::Ptr getNetReader() const { return net_reader_; } - protected: +protected: /** * @brief Check whether the layer property * (output layer name, output layer type, etc.) is right * @param[in] network_reader The reader of the network to be checked. */ - virtual void checkLayerProperty( - const InferenceEngine::CNNNetReader::Ptr& network_reader) = 0; - + virtual void checkLayerProperty(const InferenceEngine::CNNNetReader::Ptr & network_reader) = 0; /** * @brief Set the layer property (layer layout, layer precision, etc.). * @param[in] network_reader The reader of the network to be set. */ - virtual void - setLayerProperty(InferenceEngine::CNNNetReader::Ptr network_reader) = 0; - + virtual void setLayerProperty(InferenceEngine::CNNNetReader::Ptr network_reader) = 0; + virtual void checkNetworkSize(int, int, InferenceEngine::CNNNetReader::Ptr); InferenceEngine::CNNNetReader::Ptr net_reader_; + void setFrameSize(const int & w, const int & h) + { + frame_size_.width = w; + frame_size_.height = h; + } + cv::Size getFrameSize() + {return frame_size_;} + +protected: + int max_proposal_count_; + int object_size_; - private: - friend class Engines::Engine; - void checkNetworkSize(unsigned int, unsigned int, - InferenceEngine::CNNNetReader::Ptr); +private: std::vector labels_; int input_num_; int output_num_; - std::string model_loc_; int max_batch_size_; + std::string model_loc_; + cv::Size frame_size_; }; class ObjectDetectionModel : public BaseModel @@ -129,6 +153,7 @@ class ObjectDetectionModel : public BaseModel int object_size_; //virtual void setLayerProperty(InferenceEngine::CNNNetReader::Ptr network_reader) = 0; }; + } // namespace Models -#endif // DYNAMIC_VINO_LIB_MODELS_BASE_MODEL_H +#endif // DYNAMIC_VINO_LIB__MODELS__BASE_MODEL_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/models/object_segmentation_model.h b/dynamic_vino_lib/include/dynamic_vino_lib/models/object_segmentation_model.h index 38418824..5af986d5 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/models/object_segmentation_model.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/models/object_segmentation_model.h @@ -29,11 +29,11 @@ class ObjectSegmentationModel : public BaseModel { public: ObjectSegmentationModel(const std::string &, int, int, int); - inline const int getMaxProposalCount() + inline int getMaxProposalCount() const { return max_proposal_count_; } - inline const int getObjectSize() + inline int getObjectSize() const { return object_size_; } @@ -50,6 +50,13 @@ class ObjectSegmentationModel : public BaseModel return mask_output_; } + bool enqueue(const std::shared_ptr & ,const cv::Mat &, + const cv::Rect & ) override; + + bool matToBlob( + const cv::Mat & , const cv::Rect &, float , + int , const std::shared_ptr & ); + /** * @brief Get the name of this segmentation model. * @return Name of the model. @@ -59,14 +66,21 @@ class ObjectSegmentationModel : public BaseModel protected: void checkLayerProperty(const InferenceEngine::CNNNetReader::Ptr &) override; void setLayerProperty(InferenceEngine::CNNNetReader::Ptr) override; - //void checkNetworkSize(int input_size, int output_size, InferenceEngine::CNNNetReader::Ptr net_reader) override; + void checkNetworkSize(int, int, InferenceEngine::CNNNetReader::Ptr) override; private: int max_proposal_count_; int object_size_; std::string input_; - std::string mask_output_; - std::string detection_output_; + std::string mask_output_ = "masks"; + std::string detection_output_ = "detection_output"; + + size_t input_channels_; + size_t input_height_; + size_t input_width_; + InferenceEngine::InputsDataMap input_info_; + InferenceEngine::OutputsDataMap output_info_; + }; } // namespace Models #endif // DYNAMIC_VINO_LIB__MODELS__OBJECT_SEGMENTATION_MODEL_HPP_ diff --git a/dynamic_vino_lib/src/inferences/object_segmentation.cpp b/dynamic_vino_lib/src/inferences/object_segmentation.cpp index 16146e53..2b9126ea 100644 --- a/dynamic_vino_lib/src/inferences/object_segmentation.cpp +++ b/dynamic_vino_lib/src/inferences/object_segmentation.cpp @@ -43,11 +43,16 @@ dynamic_vino_lib::ObjectSegmentation::~ObjectSegmentation() = default; void dynamic_vino_lib::ObjectSegmentation::loadNetwork( const std::shared_ptr network) { + slog::info << "Loading Network: " << network->getModelName() << slog::endl; valid_model_ = network; setMaxBatchSize(network->getMaxBatchSize()); } -bool dynamic_vino_lib::ObjectSegmentation::enqueue( +/** + * Deprecated! + * This function only support OpenVINO version <=2018R5 + */ +bool dynamic_vino_lib::ObjectSegmentation::enqueue_for_one_input( const cv::Mat & frame, const cv::Rect & input_frame_loc) { @@ -66,6 +71,34 @@ bool dynamic_vino_lib::ObjectSegmentation::enqueue( return true; } +bool dynamic_vino_lib::ObjectSegmentation::enqueue( + const cv::Mat & frame, + const cv::Rect & input_frame_loc) +{ + if (width_ == 0 && height_ == 0) { + width_ = frame.cols; + height_ = frame.rows; + } + + if (valid_model_ == nullptr || getEngine() == nullptr) { + throw std::logic_error("Model or Engine is not set correctly!"); + return false; + } + + if (enqueued_frames_ >= valid_model_->getMaxBatchSize()) { + slog::warn << "Number of " << getName() << "input more than maximum(" << + getMaxBatchSize()<< ") processed by inference" << slog::endl; + return false; + } + + if (!valid_model_->enqueue(getEngine(), frame, input_frame_loc)) { + return false; + } + + enqueued_frames_ += 1; + return true; +} + bool dynamic_vino_lib::ObjectSegmentation::submitRequest() { return dynamic_vino_lib::BaseInference::submitRequest(); @@ -166,4 +199,4 @@ const std::vector dynamic_vino_lib::ObjectSegmentation::getFilteredROI filtered_rois.push_back(res.getLocation()); } return filtered_rois; -} \ No newline at end of file +} diff --git a/dynamic_vino_lib/src/models/base_model.cpp b/dynamic_vino_lib/src/models/base_model.cpp index 1721f140..274d5c08 100644 --- a/dynamic_vino_lib/src/models/base_model.cpp +++ b/dynamic_vino_lib/src/models/base_model.cpp @@ -73,26 +73,25 @@ void Models::BaseModel::modelInit() } void Models::BaseModel::checkNetworkSize( - unsigned int input_size, unsigned int output_size, - InferenceEngine::CNNNetReader::Ptr net_reader) + int input_size, int output_size, + InferenceEngine::CNNNetReader::Ptr net_reader) { + // TODO(Houk): Repeat, better removed! // check input size slog::info << "Checking input size" << slog::endl; - InferenceEngine::InputsDataMap input_info( - net_reader->getNetwork().getInputsInfo()); - if (input_info.size() != input_size) - { - throw std::logic_error(getModelName() + " should have only one input"); + InferenceEngine::InputsDataMap input_info(net_reader->getNetwork().getInputsInfo()); + if (input_info.size() != input_size) { + throw std::logic_error(getModelName() + " should have " + std::to_string(input_size) + " inpu" + "t"); } // check output size slog::info << "Checking output size" << slog::endl; - InferenceEngine::OutputsDataMap output_info( - net_reader->getNetwork().getOutputsInfo()); - if (output_info.size() != output_size) - { - throw std::logic_error(getModelName() + - "network should have only one output"); + InferenceEngine::OutputsDataMap output_info(net_reader->getNetwork().getOutputsInfo()); + if (output_info.size() != output_size) { + throw std::logic_error(getModelName() + " should have " + std::to_string(output_size) + " outpu" + "t"); } + // InferenceEngine::DataPtr& output_data_ptr = output_info.begin()->second; } Models::ObjectDetectionModel::ObjectDetectionModel(const std::string& model_loc, diff --git a/dynamic_vino_lib/src/models/object_segmentation_model.cpp b/dynamic_vino_lib/src/models/object_segmentation_model.cpp index 2feb29c7..a08789af 100644 --- a/dynamic_vino_lib/src/models/object_segmentation_model.cpp +++ b/dynamic_vino_lib/src/models/object_segmentation_model.cpp @@ -18,6 +18,7 @@ */ #include #include "dynamic_vino_lib/models/object_segmentation_model.h" +#include "dynamic_vino_lib/inferences/object_segmentation.h" #include "dynamic_vino_lib/slog.h" // Validated Object Detection Network Models::ObjectSegmentationModel::ObjectSegmentationModel( @@ -28,37 +29,74 @@ Models::ObjectSegmentationModel::ObjectSegmentationModel( { } +void Models::ObjectSegmentationModel::checkNetworkSize( + int input_size, int output_size, InferenceEngine::CNNNetReader::Ptr net_reader) +{ + slog::info << "Checking input size" << slog::endl; + InferenceEngine::InputsDataMap input_info(net_reader->getNetwork().getInputsInfo()); + if (input_info.size() != input_size) { + throw std::logic_error(getModelName() + " should have " + std::to_string(input_size) + " inpu" + "t, but got " + std::to_string(input_info.size())); + } + + // check output size + slog::info << "Checking output size" << slog::endl; + InferenceEngine::OutputsDataMap output_info(net_reader->getNetwork().getOutputsInfo()); + if (output_info.size() != output_size && output_info.size() != (output_size - 1)) { + throw std::logic_error(getModelName() + " should have " + std::to_string(output_size) + " outpu" + "t, but got " + std::to_string(output_info.size())); + } +} + void Models::ObjectSegmentationModel::setLayerProperty( InferenceEngine::CNNNetReader::Ptr net_reader) { - // set input property - InferenceEngine::InputsDataMap input_info_map(net_reader->getNetwork().getInputsInfo()); - auto inputInfoItem = *input_info_map.begin(); - inputInfoItem.second->setPrecision(InferenceEngine::Precision::U8); auto network = net_reader->getNetwork(); + + // set input property + input_info_ = InferenceEngine::InputsDataMap(net_reader->getNetwork().getInputsInfo()); + for (const auto & inputInfoItem : input_info_) { + if (inputInfoItem.second->getDims().size() == 4) { // first input contains images + inputInfoItem.second->setPrecision(InferenceEngine::Precision::U8); + input_ = inputInfoItem.first; //input_name + } else if (inputInfoItem.second->getDims().size() == 2) { // second input contains image info + inputInfoItem.second->setPrecision(InferenceEngine::Precision::FP32); + } else { + throw std::logic_error("Unsupported input shape with size = " + std::to_string(inputInfoItem.second->getDims().size())); + } + } + try { - network.addOutput(std::string("detection_output"), 0); + network.addOutput(std::string(getDetectionOutputName().c_str()), 0); } catch (std::exception & error) { throw std::logic_error(getModelName() + "is failed when adding detection_output laryer."); } + network.setBatchSize(1); slog::info << "Batch size is " << std::to_string(net_reader->getNetwork().getBatchSize()) << slog::endl; - InferenceEngine::OutputsDataMap output_info_map(net_reader->getNetwork().getOutputsInfo()); - for (auto & item : output_info_map) { + + input_channels_ = input_info_[input_]->getDims()[2]; + input_height_ = input_info_[input_]->getDims()[1]; + input_width_ = input_info_[input_]->getDims()[0]; + + output_info_ = InferenceEngine::OutputsDataMap(net_reader->getNetwork().getOutputsInfo()); + for (auto & item : output_info_) { item.second->setPrecision(InferenceEngine::Precision::FP32); } - auto output_ptr = output_info_map.begin(); - input_ = input_info_map.begin()->first; - detection_output_ = output_ptr->first; - mask_output_ = (++output_ptr)->first; + + //Deprecated! + //auto output_ptr = output_info_.begin(); + //input_ = input_info_map.begin()->first; + //detection_output_ = output_ptr->first; + //mask_output_ = (++output_ptr)->first; } void Models::ObjectSegmentationModel::checkLayerProperty( const InferenceEngine::CNNNetReader::Ptr & net_reader) { const InferenceEngine::CNNLayerPtr output_layer = - net_reader->getNetwork().getLayerByName("detection_output"); + net_reader->getNetwork().getLayerByName(getDetectionOutputName().c_str()); const int num_classes = output_layer->GetParamAsInt("num_classes"); slog::info << "Checking Object Segmentation output ... num_classes=" << num_classes << slog::endl; if (getLabels().size() != num_classes) { @@ -70,6 +108,65 @@ void Models::ObjectSegmentationModel::checkLayerProperty( } } +bool Models::ObjectSegmentationModel::enqueue( + const std::shared_ptr & engine, + const cv::Mat & frame, + const cv::Rect & input_frame_loc) +{ + if (engine == nullptr) { + slog::err << "A frame is trying to be enqueued in a NULL Engine." << slog::endl; + return false; + } + + for (const auto & inputInfoItem : input_info_) { + /** Fill first input tensor with images. First b channel, then g and r channels **/ + if (inputInfoItem.second->getDims().size() == 4) { + matToBlob(frame, input_frame_loc, 1.0, 0, engine); + } + + /** Fill second input tensor with image info **/ + if (inputInfoItem.second->getDims().size() == 2) { + InferenceEngine::Blob::Ptr input = engine->getRequest()->GetBlob(inputInfoItem.first); + auto data = input->buffer().as::value_type *>(); + data[0] = static_cast(input_height_); // height + data[1] = static_cast(input_width_); // width + data[2] = 1; + } + } +} + +bool Models::ObjectSegmentationModel::matToBlob( + const cv::Mat & orig_image, const cv::Rect &, float scale_factor, + int batch_index, const std::shared_ptr & engine) +{ + if (engine == nullptr) { + slog::err << "A frame is trying to be enqueued in a NULL Engine." << slog::endl; + return false; + } + + std::string input_name = getInputName(); + InferenceEngine::Blob::Ptr input_blob = + engine->getRequest()->GetBlob(input_name); + auto blob_data = input_blob->buffer().as::value_type *>(); + + cv::Mat resized_image(orig_image); + if(orig_image.size().height != input_height_ || orig_image.size().width != input_width_){ + cv::resize(orig_image, resized_image, cv::Size(input_width_, input_height_)); + } + int batchOffset = batch_index * input_width_ * input_height_ * input_channels_; + + for (int c = 0; c < input_channels_; c++) { + for (int h = 0; h < input_height_; h++) { + for (int w = 0; w < input_width_; w++) { + blob_data[batchOffset + c * input_width_ * input_height_ + h * input_width_ + w] = + resized_image.at(h, w)[c] * scale_factor; + } + } + } + + return true; +} + const std::string Models::ObjectSegmentationModel::getModelName() const { return "Object Segmentation"; diff --git a/dynamic_vino_lib/src/pipeline.cpp b/dynamic_vino_lib/src/pipeline.cpp index b57ada09..70985573 100644 --- a/dynamic_vino_lib/src/pipeline.cpp +++ b/dynamic_vino_lib/src/pipeline.cpp @@ -215,7 +215,6 @@ void Pipeline::runOnce() std::unique_lock lock(counter_mutex_); cv_.wait(lock, [this]() { return this->counter_ == 0; }); - for (auto& pair : name_to_output_map_) { pair.second->handleOutput(); @@ -249,7 +248,7 @@ void Pipeline::setCallback() void Pipeline::callback(const std::string & detection_name) { - // slog::info<<"Hello callback ----> " << detection_name < " << detection_name <fetchResults(); // set output diff --git a/dynamic_vino_lib/src/pipeline_manager.cpp b/dynamic_vino_lib/src/pipeline_manager.cpp index f9da0b78..07fc1211 100644 --- a/dynamic_vino_lib/src/pipeline_manager.cpp +++ b/dynamic_vino_lib/src/pipeline_manager.cpp @@ -334,14 +334,17 @@ const Params::ParamManager::InferenceRawData & infer) std::shared_ptr PipelineManager::createObjectSegmentation(const Params::ParamManager::InferenceRawData & infer) { - auto obejct_segmentation_model = - std::make_shared(infer.model, 1, 2, infer.batch); - obejct_segmentation_model->modelInit(); - auto obejct_segmentation_engine = engine_manager_.createEngine( - infer.engine, obejct_segmentation_model); - auto segmentation_inference_ptr = std::make_shared(0.5); - segmentation_inference_ptr->loadNetwork(obejct_segmentation_model); - segmentation_inference_ptr->loadEngine(obejct_segmentation_engine); + auto model = + std::make_shared(infer.model, 2, 2, infer.batch); + model->modelInit(); + slog::info << "Segmentation model initialized." << slog::endl; + auto engine = engine_manager_.createEngine(infer.engine, model); + slog::info << "Segmentation Engine initialized." << slog::endl; + auto segmentation_inference_ptr = std::make_shared( + infer.confidence_threshold); + slog::info << "Segmentation Inference instanced." << slog::endl; + segmentation_inference_ptr->loadNetwork(model); + segmentation_inference_ptr->loadEngine(engine); return segmentation_inference_ptr; } From 9848b09d92805f185bb5d0bb22df3680f4a13e2e Mon Sep 17 00:00:00 2001 From: luyang00 <393780933@qq.com> Date: Tue, 12 Nov 2019 14:20:53 +0800 Subject: [PATCH 19/63] code refactor --- .../inferences/age_gender_detection.h | 7 +- .../inferences/base_inference.h | 114 +++++++++--------- .../inferences/emotions_detection.h | 7 +- .../inferences/face_detection.h | 5 +- .../inferences/face_reidentification.h | 5 +- .../inferences/head_pose_detection.h | 7 +- .../inferences/landmarks_detection.h | 5 +- .../inferences/license_plate_detection.h | 5 +- .../inferences/object_detection_ssd.h | 5 +- .../inferences/object_detection_yolov2_voc.h | 5 +- .../inferences/object_segmentation.h | 5 +- .../inferences/person_attribs_detection.h | 5 +- .../inferences/person_reidentification.h | 5 +- .../inferences/vehicle_attribs_detection.h | 5 +- .../src/inferences/age_gender_detection.cpp | 7 +- .../src/inferences/base_inference.cpp | 31 ++++- .../src/inferences/emotions_detection.cpp | 7 +- .../src/inferences/face_detection.cpp | 7 +- .../src/inferences/face_reidentification.cpp | 7 +- .../src/inferences/head_pose_detection.cpp | 7 +- .../src/inferences/landmarks_detection.cpp | 7 +- .../inferences/license_plate_detection.cpp | 7 +- .../src/inferences/object_detection_ssd.cpp | 9 +- .../object_detection_yolov2_voc.cpp | 9 +- .../src/inferences/object_segmentation.cpp | 7 +- .../inferences/person_attribs_detection.cpp | 7 +- .../inferences/person_reidentification.cpp | 7 +- .../inferences/vehicle_attribs_detection.cpp | 7 +- dynamic_vino_lib/src/pipeline.cpp | 2 +- 29 files changed, 152 insertions(+), 161 deletions(-) diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/age_gender_detection.h b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/age_gender_detection.h index f20f5119..8b991aa2 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/age_gender_detection.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/age_gender_detection.h @@ -108,7 +108,7 @@ class AgeGenderDetection : public BaseInference * @brief Get the length of the buffer result array. * @return The length of the buffer result array. */ - const int getResultsLength() const override; + int getResultsLength() const override; /** * @brief Get the location of result with respect * to the frame generated by the input device. @@ -124,9 +124,8 @@ class AgeGenderDetection : public BaseInference * @brief Show the observed detection result either through image window * or ROS topic. */ - const void observeOutput( - const std::shared_ptr& output, - const std::string filter_conditions) override; + void observeOutput( + const std::shared_ptr& output) override; const std::vector getFilteredROIs( const std::string filter_conditions) const override; diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/base_inference.h b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/base_inference.h index ef4fc641..d67cfa7b 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/base_inference.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/base_inference.h @@ -43,31 +43,28 @@ class BaseOutput; * @param[in] scale_factor Scale factor for loading. * @param[in] batch_index Indicates the batch index for the frame. */ -template -void matU8ToBlob(const cv::Mat& orig_image, InferenceEngine::Blob::Ptr& blob, - float scale_factor = 1.0, int batch_index = 0) +template +void matU8ToBlob( + const cv::Mat & orig_image, InferenceEngine::Blob::Ptr & blob, + float scale_factor = 1.0, int batch_index = 0) { InferenceEngine::SizeVector blob_size = blob->getTensorDesc().getDims(); - const int width = blob_size[3]; - const int height = blob_size[2]; - const int channels = blob_size[1]; - T* blob_data = blob->buffer().as(); + const size_t width = blob_size[3]; + const size_t height = blob_size[2]; + const size_t channels = blob_size[1]; + T * blob_data = blob->buffer().as(); cv::Mat resized_image(orig_image); - if (width != orig_image.size().width || height != orig_image.size().height) - { + if (width != orig_image.size().width || height != orig_image.size().height) { cv::resize(orig_image, resized_image, cv::Size(width, height)); } int batchOffset = batch_index * width * height * channels; - for (int c = 0; c < channels; c++) - { - for (int h = 0; h < height; h++) - { - for (int w = 0; w < width; w++) - { + for (size_t c = 0; c < channels; c++) { + for (size_t h = 0; h < height; h++) { + for (size_t w = 0; w < width; w++) { blob_data[batchOffset + c * width * height + h * width + w] = - resized_image.at(h, w)[c] * scale_factor; + resized_image.at(h, w)[c] * scale_factor; } } } @@ -81,12 +78,15 @@ namespace dynamic_vino_lib */ class Result { - public: +public: friend class BaseInference; - explicit Result(const cv::Rect& location); - inline const cv::Rect getLocation() const { return location_; } + explicit Result(const cv::Rect & location); + inline const cv::Rect getLocation() const + { + return location_; + } - private: +private: cv::Rect location_; }; @@ -96,7 +96,7 @@ class Result */ class BaseInference { - public: +public: BaseInference(); virtual ~BaseInference(); /** @@ -116,10 +116,25 @@ class BaseInference * @brief Get the number of enqueued frames to be infered. * @return The number of enqueued frames to be infered. */ - inline const int getEnqueuedNum() const + inline int getEnqueuedNum() const { return enqueued_frames_; } + + /** + * @brief Get the size of the maximum of inference batch. + * @return The max batch size. + */ + inline int getMaxBatchSize() const + { + return max_batch_size_; + } + + inline void setMaxBatchSize(int max) + { + max_batch_size_ = max; + } + /** * @brief Enqueue a frame to this class. * The frame will be buffered but not infered yet. @@ -128,17 +143,15 @@ class BaseInference * to the frame generated by the input device. * @return Whether this operation is successful. */ - virtual bool enqueue(const cv::Mat& frame, - const cv::Rect& input_frame_loc) = 0; + virtual bool enqueue(const cv::Mat & frame, const cv::Rect & input_frame_loc) = 0; /** * @brief Start inference for all buffered frames. * @return Whether this operation is successful. */ virtual bool submitRequest(); + virtual bool SynchronousRequest(); - virtual const void observeOutput( - const std::shared_ptr& output, - const std::string filter_conditions) = 0; + virtual void observeOutput(const std::shared_ptr & output) = 0; /** * @brief This function will fetch the results of the previous inference and @@ -150,67 +163,52 @@ class BaseInference /** * @brief Get the length of the buffer result array. */ - virtual const int getResultsLength() const = 0; + virtual int getResultsLength() const = 0; /** * @brief Get the location of result with respect * to the frame generated by the input device. * @param[in] idx The index of the result. */ - virtual const dynamic_vino_lib::Result* getLocationResult(int idx) const = 0; + virtual const dynamic_vino_lib::Result * getLocationResult(int idx) const = 0; /** * @brief Get the name of the Inference instance. * @return The name of the Inference instance. */ virtual const std::string getName() const = 0; - /** - * @brief Get the max batch size for one inference. - */ - inline int getMaxBatchSize() - { - return max_batch_size_; - } virtual const std::vector getFilteredROIs( const std::string filter_conditions) const = 0; - protected: +protected: /** * @brief Enqueue the fram into the input blob of the target calculation * device. Check OpenVINO document for detailed information. * @return Whether this operation is successful. */ - template - bool enqueue(const cv::Mat& frame, const cv::Rect&, float scale_factor, - int batch_index, const std::string& input_name) + template + bool enqueue( + const cv::Mat & frame, const cv::Rect &, float scale_factor, int batch_index, + const std::string & input_name) { - if (enqueued_frames_ == max_batch_size_) - { - slog::warn << "Number of " << getName() << "input more than maximum(" - << max_batch_size_ << ") processed by inference" << slog::endl; + if (enqueued_frames_ == max_batch_size_) { + slog::warn << "Number of " << getName() << "input more than maximum(" << max_batch_size_ << + ") processed by inference" << slog::endl; return false; } - - InferenceEngine::Blob::Ptr input_blob = - engine_->getRequest()->GetBlob(input_name); + InferenceEngine::Blob::Ptr input_blob = engine_->getRequest()->GetBlob(input_name); matU8ToBlob(frame, input_blob, scale_factor, batch_index); enqueued_frames_ += 1; return true; } - /** - * @brief Set the max batch size for one inference. - */ - inline void setMaxBatchSize(int max_batch_size) - { - max_batch_size_ = max_batch_size; - } - std::shared_ptr engine_; + std::vector results_; int enqueued_frames_ = 0; - - private: + +protected: + std::shared_ptr engine_ = nullptr; int max_batch_size_ = 1; bool results_fetched_ = false; }; } // namespace dynamic_vino_lib -#endif // DYNAMIC_VINO_LIB_INFERENCES_BASE_INFERENCE_H +#endif // DYNAMIC_VINO_LIB__INFERENCES__BASE_INFERENCE_HPP_ \ No newline at end of file diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/emotions_detection.h b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/emotions_detection.h index e753a7e6..f320d415 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/emotions_detection.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/emotions_detection.h @@ -99,7 +99,7 @@ class EmotionsDetection : public BaseInference * @brief Get the length of the buffer result array. * @return The length of the buffer result array. */ - const int getResultsLength() const override; + int getResultsLength() const override; /** * @brief Get the location of result with respect * to the frame generated by the input device. @@ -115,9 +115,8 @@ class EmotionsDetection : public BaseInference * @brief Show the observed detection result either through image window * or ROS topic. */ - const void observeOutput( - const std::shared_ptr& output, - const std::string filter_conditions) override; + void observeOutput( + const std::shared_ptr& output) override; const std::vector getFilteredROIs( const std::string filter_conditions) const override; diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/face_detection.h b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/face_detection.h index 83b5ac56..a19a2a6d 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/face_detection.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/face_detection.h @@ -105,7 +105,7 @@ class FaceDetection : public BaseInference * @brief Get the length of the buffer result array. * @return The length of the buffer result array. */ - const int getResultsLength() const override; + int getResultsLength() const override; /** * @brief Get the location of result with respect * to the frame generated by the input device. @@ -116,8 +116,7 @@ class FaceDetection : public BaseInference * @brief Show the observed detection result either through image window or ROS topic. */ - const void observeOutput(const std::shared_ptr& output, - const std::string filter_conditions); + void observeOutput(const std::shared_ptr& output); /** * @brief Get the name of the Inference instance. * @return The name of the Inference instance. diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/face_reidentification.h b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/face_reidentification.h index 2c68a97e..5b8487e4 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/face_reidentification.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/face_reidentification.h @@ -84,7 +84,7 @@ class FaceReidentification : public BaseInference * @brief Get the length of the buffer result array. * @return The length of the buffer result array. */ - const int getResultsLength() const override; + int getResultsLength() const override; /** * @brief Get the location of result with respect * to the frame generated by the input device. @@ -95,8 +95,7 @@ class FaceReidentification : public BaseInference * @brief Show the observed reidentification result either through image window or ROS topic. */ - const void observeOutput(const std::shared_ptr & output, - const std::string filter_conditions); + void observeOutput(const std::shared_ptr & output); /** * @brief Get the name of the Inference instance. * @return The name of the Inference instance. diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/head_pose_detection.h b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/head_pose_detection.h index 1fdf4811..00df9a9b 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/head_pose_detection.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/head_pose_detection.h @@ -112,7 +112,7 @@ class HeadPoseDetection : public BaseInference * @brief Get the length of the buffer result array. * @return The length of the buffer result array. */ - const int getResultsLength() const override; + int getResultsLength() const override; /** * @brief Get the location of result with respect * to the frame generated by the input device. @@ -128,9 +128,8 @@ class HeadPoseDetection : public BaseInference * @brief Show the observed detection result either through image window or ROS topic. */ - const void observeOutput( - const std::shared_ptr& output, - const std::string filter_conditions) override; + void observeOutput( + const std::shared_ptr& output) override; const std::vector getFilteredROIs( const std::string filter_conditions) const override; diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/landmarks_detection.h b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/landmarks_detection.h index d165482c..9e697ece 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/landmarks_detection.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/landmarks_detection.h @@ -85,7 +85,7 @@ class LandmarksDetection : public BaseInference * @brief Get the length of the buffer result array. * @return The length of the buffer result array. */ - const int getResultsLength() const override; + int getResultsLength() const override; /** * @brief Get the location of result with respect * to the frame generated by the input device. @@ -96,8 +96,7 @@ class LandmarksDetection : public BaseInference * @brief Show the observed detection result either through image window or ROS topic. */ - const void observeOutput(const std::shared_ptr & output, - const std::string filter_conditions); + void observeOutput(const std::shared_ptr & output); /** * @brief Get the name of the Inference instance. * @return The name of the Inference instance. diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/license_plate_detection.h b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/license_plate_detection.h index c35c961d..9de41af6 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/license_plate_detection.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/license_plate_detection.h @@ -89,7 +89,7 @@ class LicensePlateDetection : public BaseInference * @brief Get the length of the buffer result array. * @return The length of the buffer result array. */ - const int getResultsLength() const override; + int getResultsLength() const override; /** * @brief Get the location of result with respect * to the frame generated by the input device. @@ -100,8 +100,7 @@ class LicensePlateDetection : public BaseInference * @brief Show the observed detection result either through image window or ROS topic. */ - const void observeOutput(const std::shared_ptr & output, - const std::string filter_conditions); + void observeOutput(const std::shared_ptr & output); /** * @brief Get the name of the Inference instance. * @return The name of the Inference instance. diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/object_detection_ssd.h b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/object_detection_ssd.h index 6e910519..9b7cf0d5 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/object_detection_ssd.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/object_detection_ssd.h @@ -73,7 +73,7 @@ class ObjectDetectionSSD : public ObjectDetection { * @brief Get the length of the buffer result array. * @return The length of the buffer result array. */ - const int getResultsLength() const override; + int getResultsLength() const override; /** * @brief Get the location of result with respect * to the frame generated by the input device. @@ -84,8 +84,7 @@ class ObjectDetectionSSD : public ObjectDetection { * @brief Show the observed detection result either through image window or ROS topic. */ - const void observeOutput(const std::shared_ptr& output, - const std::string filter_conditions); + void observeOutput(const std::shared_ptr& output); /** * @brief Get the name of the Inference instance. * @return The name of the Inference instance. diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/object_detection_yolov2_voc.h b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/object_detection_yolov2_voc.h index f01fc7bf..c64cd5d2 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/object_detection_yolov2_voc.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/object_detection_yolov2_voc.h @@ -73,7 +73,7 @@ class ObjectDetectionYolov2voc : public ObjectDetection { * @brief Get the length of the buffer result array. * @return The length of the buffer result array. */ - const int getResultsLength() const override; + int getResultsLength() const override; /** * @brief Get the location of result with respect * to the frame generated by the input device. @@ -84,8 +84,7 @@ class ObjectDetectionYolov2voc : public ObjectDetection { * @brief Show the observed detection result either through image window or ROS topic. */ - const void observeOutput(const std::shared_ptr& output, - const std::string filter_conditions); + void observeOutput(const std::shared_ptr& output); /** * @brief Get the name of the Inference instance. * @return The name of the Inference instance. diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/object_segmentation.h b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/object_segmentation.h index 44d919c9..5d7720de 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/object_segmentation.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/object_segmentation.h @@ -106,7 +106,7 @@ class ObjectSegmentation : public BaseInference * @brief Get the length of the buffer result array. * @return The length of the buffer result array. */ - const int getResultsLength() const override; + int getResultsLength() const override; /** * @brief Get the location of result with respect * to the frame generated by the input device. @@ -117,8 +117,7 @@ class ObjectSegmentation : public BaseInference * @brief Show the observed detection result either through image window or ROS topic. */ - const void observeOutput(const std::shared_ptr & output, - const std::string filter_conditions) override; + void observeOutput(const std::shared_ptr & output) override; /** * @brief Get the name of the Inference instance. * @return The name of the Inference instance. diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/person_attribs_detection.h b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/person_attribs_detection.h index 288b8792..0f1fd464 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/person_attribs_detection.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/person_attribs_detection.h @@ -90,7 +90,7 @@ class PersonAttribsDetection : public BaseInference * @brief Get the length of the buffer result array. * @return The length of the buffer result array. */ - const int getResultsLength() const override; + int getResultsLength() const override; /** * @brief Get the location of result with respect * to the frame generated by the input device. @@ -101,8 +101,7 @@ class PersonAttribsDetection : public BaseInference * @brief Show the observed detection result either through image window or ROS topic. */ - const void observeOutput(const std::shared_ptr & output, - const std::string filter_conditions); + void observeOutput(const std::shared_ptr & output); /** * @brief Get the name of the Inference instance. * @return The name of the Inference instance. diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/person_reidentification.h b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/person_reidentification.h index 5e9947da..c411a065 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/person_reidentification.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/person_reidentification.h @@ -82,7 +82,7 @@ class PersonReidentification : public BaseInference * @brief Get the length of the buffer result array. * @return The length of the buffer result array. */ - const int getResultsLength() const override; + int getResultsLength() const override; /** * @brief Get the location of result with respect * to the frame generated by the input device. @@ -93,8 +93,7 @@ class PersonReidentification : public BaseInference * @brief Show the observed detection result either through image window or ROS topic. */ - const void observeOutput(const std::shared_ptr & output, - const std::string filter_conditions); + void observeOutput(const std::shared_ptr & output); /** * @brief Get the name of the Inference instance. * @return The name of the Inference instance. diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/vehicle_attribs_detection.h b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/vehicle_attribs_detection.h index 36448f06..ca2cdc4b 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/vehicle_attribs_detection.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/vehicle_attribs_detection.h @@ -90,7 +90,7 @@ class VehicleAttribsDetection : public BaseInference * @brief Get the length of the buffer result array. * @return The length of the buffer result array. */ - const int getResultsLength() const override; + int getResultsLength() const override; /** * @brief Get the location of result with respect * to the frame generated by the input device. @@ -101,8 +101,7 @@ class VehicleAttribsDetection : public BaseInference * @brief Show the observed detection result either through image window or ROS topic. */ - const void observeOutput(const std::shared_ptr & output, - const std::string filter_conditions); + void observeOutput(const std::shared_ptr & output); /** * @brief Get the name of the Inference instance. * @return The name of the Inference instance. diff --git a/dynamic_vino_lib/src/inferences/age_gender_detection.cpp b/dynamic_vino_lib/src/inferences/age_gender_detection.cpp index 9737a642..679dd397 100644 --- a/dynamic_vino_lib/src/inferences/age_gender_detection.cpp +++ b/dynamic_vino_lib/src/inferences/age_gender_detection.cpp @@ -86,7 +86,7 @@ bool dynamic_vino_lib::AgeGenderDetection::fetchResults() return true; } -const int dynamic_vino_lib::AgeGenderDetection::getResultsLength() const +int dynamic_vino_lib::AgeGenderDetection::getResultsLength() const { return static_cast(results_.size()); } @@ -102,9 +102,8 @@ const std::string dynamic_vino_lib::AgeGenderDetection::getName() const return valid_model_->getModelName(); } -const void dynamic_vino_lib::AgeGenderDetection::observeOutput( - const std::shared_ptr& output, - const std::string filter_conditions) +void dynamic_vino_lib::AgeGenderDetection::observeOutput( + const std::shared_ptr& output) { if (output != nullptr) { diff --git a/dynamic_vino_lib/src/inferences/base_inference.cpp b/dynamic_vino_lib/src/inferences/base_inference.cpp index 802391f7..6801422b 100644 --- a/dynamic_vino_lib/src/inferences/base_inference.cpp +++ b/dynamic_vino_lib/src/inferences/base_inference.cpp @@ -24,7 +24,7 @@ #include "dynamic_vino_lib/inferences/base_inference.h" // Result -dynamic_vino_lib::Result::Result(const cv::Rect& location) +dynamic_vino_lib::Result::Result(const cv::Rect & location) { location_ = location; } @@ -34,25 +34,44 @@ dynamic_vino_lib::BaseInference::BaseInference() = default; dynamic_vino_lib::BaseInference::~BaseInference() = default; -void dynamic_vino_lib::BaseInference::loadEngine( - const std::shared_ptr engine) +void dynamic_vino_lib::BaseInference::loadEngine(const std::shared_ptr engine) { engine_ = engine; } bool dynamic_vino_lib::BaseInference::submitRequest() { - if (engine_->getRequest() == nullptr) return false; - if (!enqueued_frames_) return false; + if (engine_->getRequest() == nullptr) { + return false; + } + if (!enqueued_frames_) { + return false; + } enqueued_frames_ = 0; results_fetched_ = false; engine_->getRequest()->StartAsync(); return true; } +bool dynamic_vino_lib::BaseInference::SynchronousRequest() +{ + if (engine_->getRequest() == nullptr) { + return false; + } + if (!enqueued_frames_) { + return false; + } + enqueued_frames_ = 0; + results_fetched_ = false; + engine_->getRequest()->Infer(); + return true; +} + bool dynamic_vino_lib::BaseInference::fetchResults() { - if (results_fetched_) return false; + if (results_fetched_) { + return false; + } results_fetched_ = true; return true; } diff --git a/dynamic_vino_lib/src/inferences/emotions_detection.cpp b/dynamic_vino_lib/src/inferences/emotions_detection.cpp index ef377d22..19d053ed 100644 --- a/dynamic_vino_lib/src/inferences/emotions_detection.cpp +++ b/dynamic_vino_lib/src/inferences/emotions_detection.cpp @@ -114,7 +114,7 @@ bool dynamic_vino_lib::EmotionsDetection::fetchResults() return true; } -const int dynamic_vino_lib::EmotionsDetection::getResultsLength() const +int dynamic_vino_lib::EmotionsDetection::getResultsLength() const { return static_cast(results_.size()); } @@ -130,9 +130,8 @@ const std::string dynamic_vino_lib::EmotionsDetection::getName() const return valid_model_->getModelName(); } -const void dynamic_vino_lib::EmotionsDetection::observeOutput( - const std::shared_ptr& output, - const std::string filter_conditions) +void dynamic_vino_lib::EmotionsDetection::observeOutput( + const std::shared_ptr& output) { if (output != nullptr) { diff --git a/dynamic_vino_lib/src/inferences/face_detection.cpp b/dynamic_vino_lib/src/inferences/face_detection.cpp index f47b0197..02fef81d 100644 --- a/dynamic_vino_lib/src/inferences/face_detection.cpp +++ b/dynamic_vino_lib/src/inferences/face_detection.cpp @@ -122,7 +122,7 @@ bool dynamic_vino_lib::FaceDetection::fetchResults() return true; } -const int dynamic_vino_lib::FaceDetection::getResultsLength() const +int dynamic_vino_lib::FaceDetection::getResultsLength() const { return static_cast(results_.size()); } @@ -138,9 +138,8 @@ const std::string dynamic_vino_lib::FaceDetection::getName() const return valid_model_->getModelName(); } -const void dynamic_vino_lib::FaceDetection::observeOutput( - const std::shared_ptr& output, - const std::string filter_conditions) +void dynamic_vino_lib::FaceDetection::observeOutput( + const std::shared_ptr& output) { if (output != nullptr) { diff --git a/dynamic_vino_lib/src/inferences/face_reidentification.cpp b/dynamic_vino_lib/src/inferences/face_reidentification.cpp index bb942cba..10cac25b 100644 --- a/dynamic_vino_lib/src/inferences/face_reidentification.cpp +++ b/dynamic_vino_lib/src/inferences/face_reidentification.cpp @@ -85,7 +85,7 @@ bool dynamic_vino_lib::FaceReidentification::fetchResults() return true; } -const int dynamic_vino_lib::FaceReidentification::getResultsLength() const +int dynamic_vino_lib::FaceReidentification::getResultsLength() const { return static_cast(results_.size()); } @@ -101,9 +101,8 @@ const std::string dynamic_vino_lib::FaceReidentification::getName() const return valid_model_->getModelName(); } -const void dynamic_vino_lib::FaceReidentification::observeOutput( - const std::shared_ptr & output, - const std::string filter_conditions) +void dynamic_vino_lib::FaceReidentification::observeOutput( + const std::shared_ptr & output) { if (output != nullptr) { output->accept(results_); diff --git a/dynamic_vino_lib/src/inferences/head_pose_detection.cpp b/dynamic_vino_lib/src/inferences/head_pose_detection.cpp index 920bf83c..541aae58 100644 --- a/dynamic_vino_lib/src/inferences/head_pose_detection.cpp +++ b/dynamic_vino_lib/src/inferences/head_pose_detection.cpp @@ -88,7 +88,7 @@ bool dynamic_vino_lib::HeadPoseDetection::fetchResults() return true; } -const int dynamic_vino_lib::HeadPoseDetection::getResultsLength() const +int dynamic_vino_lib::HeadPoseDetection::getResultsLength() const { return static_cast(results_.size()); } @@ -104,9 +104,8 @@ const std::string dynamic_vino_lib::HeadPoseDetection::getName() const return valid_model_->getModelName(); } -const void dynamic_vino_lib::HeadPoseDetection::observeOutput( - const std::shared_ptr& output, - const std::string filter_conditions) +void dynamic_vino_lib::HeadPoseDetection::observeOutput( + const std::shared_ptr& output) { if (output != nullptr) { diff --git a/dynamic_vino_lib/src/inferences/landmarks_detection.cpp b/dynamic_vino_lib/src/inferences/landmarks_detection.cpp index 18004ff3..80f37deb 100644 --- a/dynamic_vino_lib/src/inferences/landmarks_detection.cpp +++ b/dynamic_vino_lib/src/inferences/landmarks_detection.cpp @@ -86,7 +86,7 @@ bool dynamic_vino_lib::LandmarksDetection::fetchResults() return true; } -const int dynamic_vino_lib::LandmarksDetection::getResultsLength() const +int dynamic_vino_lib::LandmarksDetection::getResultsLength() const { return static_cast(results_.size()); } @@ -102,9 +102,8 @@ const std::string dynamic_vino_lib::LandmarksDetection::getName() const return valid_model_->getModelName(); } -const void dynamic_vino_lib::LandmarksDetection::observeOutput( - const std::shared_ptr & output, - const std::string filter_conditions) +void dynamic_vino_lib::LandmarksDetection::observeOutput( + const std::shared_ptr & output) { if (output != nullptr) { output->accept(results_); diff --git a/dynamic_vino_lib/src/inferences/license_plate_detection.cpp b/dynamic_vino_lib/src/inferences/license_plate_detection.cpp index f2c26ee9..8303fc45 100644 --- a/dynamic_vino_lib/src/inferences/license_plate_detection.cpp +++ b/dynamic_vino_lib/src/inferences/license_plate_detection.cpp @@ -99,7 +99,7 @@ bool dynamic_vino_lib::LicensePlateDetection::fetchResults() return true; } -const int dynamic_vino_lib::LicensePlateDetection::getResultsLength() const +int dynamic_vino_lib::LicensePlateDetection::getResultsLength() const { return static_cast(results_.size()); } @@ -115,9 +115,8 @@ const std::string dynamic_vino_lib::LicensePlateDetection::getName() const return valid_model_->getModelName(); } -const void dynamic_vino_lib::LicensePlateDetection::observeOutput( - const std::shared_ptr & output, - const std::string filter_conditions) +void dynamic_vino_lib::LicensePlateDetection::observeOutput( + const std::shared_ptr & output) { if (output != nullptr) { output->accept(results_); diff --git a/dynamic_vino_lib/src/inferences/object_detection_ssd.cpp b/dynamic_vino_lib/src/inferences/object_detection_ssd.cpp index cdded2de..2d982d28 100644 --- a/dynamic_vino_lib/src/inferences/object_detection_ssd.cpp +++ b/dynamic_vino_lib/src/inferences/object_detection_ssd.cpp @@ -156,7 +156,7 @@ bool dynamic_vino_lib::ObjectDetectionSSD::fetchResults() { return true; } -const int dynamic_vino_lib::ObjectDetectionSSD::getResultsLength() const { +int dynamic_vino_lib::ObjectDetectionSSD::getResultsLength() const { return static_cast(results_.size()); } @@ -169,12 +169,11 @@ const std::string dynamic_vino_lib::ObjectDetectionSSD::getName() const { return valid_model_->getModelName(); } -const void dynamic_vino_lib::ObjectDetectionSSD::observeOutput( - const std::shared_ptr& output, - const std::string filter_conditions) { +void dynamic_vino_lib::ObjectDetectionSSD::observeOutput( + const std::shared_ptr& output) { if (output != nullptr) { result_filter_->acceptResults(results_); - result_filter_->acceptFilterConditions(filter_conditions); + //result_filter_->acceptFilterConditions(filter_conditions); output->accept(result_filter_->getFilteredResults()); } } diff --git a/dynamic_vino_lib/src/inferences/object_detection_yolov2_voc.cpp b/dynamic_vino_lib/src/inferences/object_detection_yolov2_voc.cpp index 3d9c1810..576a2c4e 100644 --- a/dynamic_vino_lib/src/inferences/object_detection_yolov2_voc.cpp +++ b/dynamic_vino_lib/src/inferences/object_detection_yolov2_voc.cpp @@ -308,7 +308,7 @@ int dynamic_vino_lib::ObjectDetectionYolov2voc::getEntryIndex(int side, int lcoo return n * side * side * (lcoords + lclasses + 1) + entry * side * side + loc; } -const int dynamic_vino_lib::ObjectDetectionYolov2voc::getResultsLength() const { +int dynamic_vino_lib::ObjectDetectionYolov2voc::getResultsLength() const { return static_cast(results_.size()); } @@ -321,12 +321,11 @@ const std::string dynamic_vino_lib::ObjectDetectionYolov2voc::getName() const { return valid_model_->getModelName(); } -const void dynamic_vino_lib::ObjectDetectionYolov2voc::observeOutput( - const std::shared_ptr& output, - const std::string filter_conditions) { +void dynamic_vino_lib::ObjectDetectionYolov2voc::observeOutput( + const std::shared_ptr& output) { if (output != nullptr) { result_filter_->acceptResults(results_); - result_filter_->acceptFilterConditions(filter_conditions); + //result_filter_->acceptFilterConditions(filter_conditions); output->accept(result_filter_->getFilteredResults()); } } diff --git a/dynamic_vino_lib/src/inferences/object_segmentation.cpp b/dynamic_vino_lib/src/inferences/object_segmentation.cpp index 2b9126ea..cfbcf58a 100644 --- a/dynamic_vino_lib/src/inferences/object_segmentation.cpp +++ b/dynamic_vino_lib/src/inferences/object_segmentation.cpp @@ -162,7 +162,7 @@ bool dynamic_vino_lib::ObjectSegmentation::fetchResults() return true; } -const int dynamic_vino_lib::ObjectSegmentation::getResultsLength() const +int dynamic_vino_lib::ObjectSegmentation::getResultsLength() const { return static_cast(results_.size()); } @@ -178,9 +178,8 @@ const std::string dynamic_vino_lib::ObjectSegmentation::getName() const return valid_model_->getModelName(); } -const void dynamic_vino_lib::ObjectSegmentation::observeOutput( - const std::shared_ptr & output, - const std::string filter_conditions) +void dynamic_vino_lib::ObjectSegmentation::observeOutput( + const std::shared_ptr & output) { if (output != nullptr) { output->accept(results_); diff --git a/dynamic_vino_lib/src/inferences/person_attribs_detection.cpp b/dynamic_vino_lib/src/inferences/person_attribs_detection.cpp index df64283a..10517fc2 100644 --- a/dynamic_vino_lib/src/inferences/person_attribs_detection.cpp +++ b/dynamic_vino_lib/src/inferences/person_attribs_detection.cpp @@ -85,7 +85,7 @@ bool dynamic_vino_lib::PersonAttribsDetection::fetchResults() return true; } -const int dynamic_vino_lib::PersonAttribsDetection::getResultsLength() const +int dynamic_vino_lib::PersonAttribsDetection::getResultsLength() const { return static_cast(results_.size()); } @@ -101,9 +101,8 @@ const std::string dynamic_vino_lib::PersonAttribsDetection::getName() const return valid_model_->getModelName(); } -const void dynamic_vino_lib::PersonAttribsDetection::observeOutput( - const std::shared_ptr & output, - const std::string filter_conditions) +void dynamic_vino_lib::PersonAttribsDetection::observeOutput( + const std::shared_ptr & output) { if (output != nullptr) { output->accept(results_); diff --git a/dynamic_vino_lib/src/inferences/person_reidentification.cpp b/dynamic_vino_lib/src/inferences/person_reidentification.cpp index a31202e1..fdef4ebe 100644 --- a/dynamic_vino_lib/src/inferences/person_reidentification.cpp +++ b/dynamic_vino_lib/src/inferences/person_reidentification.cpp @@ -128,7 +128,7 @@ std::string dynamic_vino_lib::PersonReidentification::findMatchPerson( } } -const int dynamic_vino_lib::PersonReidentification::getResultsLength() const +int dynamic_vino_lib::PersonReidentification::getResultsLength() const { return static_cast(results_.size()); } @@ -144,9 +144,8 @@ const std::string dynamic_vino_lib::PersonReidentification::getName() const return valid_model_->getModelName(); } -const void dynamic_vino_lib::PersonReidentification::observeOutput( - const std::shared_ptr & output, - const std::string filter_conditions) +void dynamic_vino_lib::PersonReidentification::observeOutput( + const std::shared_ptr & output) { if (output != nullptr) { output->accept(results_); diff --git a/dynamic_vino_lib/src/inferences/vehicle_attribs_detection.cpp b/dynamic_vino_lib/src/inferences/vehicle_attribs_detection.cpp index df23cf71..20370e61 100644 --- a/dynamic_vino_lib/src/inferences/vehicle_attribs_detection.cpp +++ b/dynamic_vino_lib/src/inferences/vehicle_attribs_detection.cpp @@ -85,7 +85,7 @@ bool dynamic_vino_lib::VehicleAttribsDetection::fetchResults() return true; } -const int dynamic_vino_lib::VehicleAttribsDetection::getResultsLength() const +int dynamic_vino_lib::VehicleAttribsDetection::getResultsLength() const { return static_cast(results_.size()); } @@ -101,9 +101,8 @@ const std::string dynamic_vino_lib::VehicleAttribsDetection::getName() const return valid_model_->getModelName(); } -const void dynamic_vino_lib::VehicleAttribsDetection::observeOutput( - const std::shared_ptr & output, - const std::string filter_conditions) +void dynamic_vino_lib::VehicleAttribsDetection::observeOutput( + const std::shared_ptr & output) { if (output != nullptr) { output->accept(results_); diff --git a/dynamic_vino_lib/src/pipeline.cpp b/dynamic_vino_lib/src/pipeline.cpp index 70985573..419c9e03 100644 --- a/dynamic_vino_lib/src/pipeline.cpp +++ b/dynamic_vino_lib/src/pipeline.cpp @@ -258,7 +258,7 @@ void Pipeline::callback(const std::string & detection_name) std::string filter_conditions = findFilterConditions(detection_name, next_name); // if next is output, then print if (output_names_.find(next_name) != output_names_.end()) { - detection_ptr->observeOutput(name_to_output_map_[next_name], filter_conditions); + detection_ptr->observeOutput(name_to_output_map_[next_name]); } else { auto detection_ptr_iter = name_to_detection_map_.find(next_name); if (detection_ptr_iter != name_to_detection_map_.end()) { From a9f98fa5c2cc243913d4f9e742f0ba1151493c70 Mon Sep 17 00:00:00 2001 From: RachelRen05 Date: Fri, 15 Nov 2019 17:31:28 +0800 Subject: [PATCH 20/63] delete useless files --- .../launch/image_object_server_oss.launch | 9 ----- .../launch/image_people_server_oss.launch | 9 ----- .../image_reidentification_server_oss.launch | 9 ----- vino_launch/launch/pipeline_object_oss.launch | 14 ------- .../launch/pipeline_object_oss_topic.launch | 23 ----------- .../launch/pipeline_people_myriad.launch | 17 -------- vino_launch/launch/pipeline_people_oss.launch | 17 -------- .../pipeline_reidentification_oss.launch | 14 ------- .../param/image_object_server_oss.yaml | 18 --------- .../param/image_people_server_oss.yaml | 39 ------------------ vino_launch/param/pipeline_image_oss.yaml | 40 ------------------- vino_launch/param/pipeline_object_oss.yaml | 25 ------------ vino_launch/param/pipeline_people_myriad.yaml | 39 ------------------ vino_launch/param/pipeline_people_oss.yaml | 39 ------------------ .../pipeline_reidentification_server_oss.yaml | 28 ------------- 15 files changed, 340 deletions(-) delete mode 100644 vino_launch/launch/image_object_server_oss.launch delete mode 100644 vino_launch/launch/image_people_server_oss.launch delete mode 100644 vino_launch/launch/image_reidentification_server_oss.launch delete mode 100644 vino_launch/launch/pipeline_object_oss.launch delete mode 100644 vino_launch/launch/pipeline_object_oss_topic.launch delete mode 100644 vino_launch/launch/pipeline_people_myriad.launch delete mode 100644 vino_launch/launch/pipeline_people_oss.launch delete mode 100644 vino_launch/launch/pipeline_reidentification_oss.launch delete mode 100644 vino_launch/param/image_object_server_oss.yaml delete mode 100644 vino_launch/param/image_people_server_oss.yaml delete mode 100644 vino_launch/param/pipeline_image_oss.yaml delete mode 100644 vino_launch/param/pipeline_object_oss.yaml delete mode 100644 vino_launch/param/pipeline_people_myriad.yaml delete mode 100644 vino_launch/param/pipeline_people_oss.yaml delete mode 100644 vino_launch/param/pipeline_reidentification_server_oss.yaml diff --git a/vino_launch/launch/image_object_server_oss.launch b/vino_launch/launch/image_object_server_oss.launch deleted file mode 100644 index a2698e2b..00000000 --- a/vino_launch/launch/image_object_server_oss.launch +++ /dev/null @@ -1,9 +0,0 @@ - - - - - - - - diff --git a/vino_launch/launch/image_people_server_oss.launch b/vino_launch/launch/image_people_server_oss.launch deleted file mode 100644 index 0d9bcf8b..00000000 --- a/vino_launch/launch/image_people_server_oss.launch +++ /dev/null @@ -1,9 +0,0 @@ - - - - - - - - diff --git a/vino_launch/launch/image_reidentification_server_oss.launch b/vino_launch/launch/image_reidentification_server_oss.launch deleted file mode 100644 index 3ea12149..00000000 --- a/vino_launch/launch/image_reidentification_server_oss.launch +++ /dev/null @@ -1,9 +0,0 @@ - - - - - - - - diff --git a/vino_launch/launch/pipeline_object_oss.launch b/vino_launch/launch/pipeline_object_oss.launch deleted file mode 100644 index 8f450044..00000000 --- a/vino_launch/launch/pipeline_object_oss.launch +++ /dev/null @@ -1,14 +0,0 @@ - - - - - - - - - - - - - diff --git a/vino_launch/launch/pipeline_object_oss_topic.launch b/vino_launch/launch/pipeline_object_oss_topic.launch deleted file mode 100644 index 5624dfea..00000000 --- a/vino_launch/launch/pipeline_object_oss_topic.launch +++ /dev/null @@ -1,23 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - diff --git a/vino_launch/launch/pipeline_people_myriad.launch b/vino_launch/launch/pipeline_people_myriad.launch deleted file mode 100644 index 2dbc86ca..00000000 --- a/vino_launch/launch/pipeline_people_myriad.launch +++ /dev/null @@ -1,17 +0,0 @@ - - - - - - - - - - - - - - - - diff --git a/vino_launch/launch/pipeline_people_oss.launch b/vino_launch/launch/pipeline_people_oss.launch deleted file mode 100644 index 0d7a2b13..00000000 --- a/vino_launch/launch/pipeline_people_oss.launch +++ /dev/null @@ -1,17 +0,0 @@ - - - - - - - - - - - - - - - - diff --git a/vino_launch/launch/pipeline_reidentification_oss.launch b/vino_launch/launch/pipeline_reidentification_oss.launch deleted file mode 100644 index eaa393e3..00000000 --- a/vino_launch/launch/pipeline_reidentification_oss.launch +++ /dev/null @@ -1,14 +0,0 @@ - - - - - - - - - - - - - diff --git a/vino_launch/param/image_object_server_oss.yaml b/vino_launch/param/image_object_server_oss.yaml deleted file mode 100644 index 935ea613..00000000 --- a/vino_launch/param/image_object_server_oss.yaml +++ /dev/null @@ -1,18 +0,0 @@ -Pipelines: -- name: object - inputs: [Image] - infers: - - name: ObjectDetection - model: /opt/openvino_toolkit/open_model_zoo/model_downloader/object_detection/common/mobilenet-ssd/caffe/output/FP32/mobilenet-ssd.xml - engine: CPU - label: to/be/set/xxx.labels - batch: 16 - outputs: [RosService] - confidence_threshold: 0.2 - connects: - - left: Image - right: [ObjectDetection] - - left: ObjectDetection - right: [RosService] - input_path: "/opt/openvino_toolkit/ros_openvino_toolkit/data/images/car.png" -OpenvinoCommon: diff --git a/vino_launch/param/image_people_server_oss.yaml b/vino_launch/param/image_people_server_oss.yaml deleted file mode 100644 index 3e0a12ca..00000000 --- a/vino_launch/param/image_people_server_oss.yaml +++ /dev/null @@ -1,39 +0,0 @@ -Pipelines: -- name: people - inputs: [Image] - infers: - - name: FaceDetection - model: /opt/openvino_toolkit/open_model_zoo/model_downloader/Transportation/object_detection/face/pruned_mobilenet_reduced_ssd_shared_weights/dldt/face-detection-adas-0001.xml - engine: CPU - label: to/be/set/xxx.labels - batch: 1 - - name: AgeGenderRecognition - model: /opt/openvino_toolkit/open_model_zoo/model_downloader/Retail/object_attributes/age_gender/dldt/age-gender-recognition-retail-0013.xml - engine: CPU - label: to/be/set/xxx.labels - batch: 16 - - name: EmotionRecognition - model: /opt/openvino_toolkit/open_model_zoo/model_downloader/Retail/object_attributes/emotions_recognition/0003/dldt/emotions-recognition-retail-0003.xml - engine: CPU - label: to/be/set/xxx.labels - batch: 16 - - name: HeadPoseEstimation - model: /opt/openvino_toolkit/open_model_zoo/model_downloader/Transportation/object_attributes/headpose/vanilla_cnn/dldt/head-pose-estimation-adas-0001.xml - engine: CPU - label: to/be/set/xxx.labels - batch: 16 - outputs: [RosService] - confidence_threshold: 0.2 - connects: - - left: Image - right: [FaceDetection] - - left: FaceDetection - right: [AgeGenderRecognition, EmotionRecognition, HeadPoseEstimation, RosService] - - left: AgeGenderRecognition - right: [RosService] - - left: EmotionRecognition - right: [RosService] - - left: HeadPoseEstimation - right: [RosService] - input_path: "/opt/openvino_toolkit/ros_openvino_toolkit/data/images/team.jpg" -OpenvinoCommon: diff --git a/vino_launch/param/pipeline_image_oss.yaml b/vino_launch/param/pipeline_image_oss.yaml deleted file mode 100644 index 5c87fcb8..00000000 --- a/vino_launch/param/pipeline_image_oss.yaml +++ /dev/null @@ -1,40 +0,0 @@ -Pipelines: -- name: people - inputs: [Image] - input_path: /opt/openvino_toolkit/ros_openvino_toolkit/data/images/team.jpg - infers: - - name: FaceDetection - model: /opt/openvino_toolkit/open_model_zoo/model_downloader/Transportation/object_detection/face/pruned_mobilenet_reduced_ssd_shared_weights/dldt/face-detection-adas-0001.xml - engine: CPU - label: to/be/set/xxx.labels - batch: 1 - - name: AgeGenderRecognition - model: /opt/openvino_toolkit/open_model_zoo/model_downloader/Retail/object_attributes/age_gender/dldt/age-gender-recognition-retail-0013.xml - engine: CPU - label: to/be/set/xxx.labels - batch: 16 - - name: EmotionRecognition - model: /opt/openvino_toolkit/open_model_zoo/model_downloader/Retail/object_attributes/emotions_recognition/0003/dldt/emotions-recognition-retail-0003.xml - engine: CPU - label: to/be/set/xxx.labels - batch: 16 - - name: HeadPoseEstimation - model: /opt/openvino_toolkit/open_model_zoo/model_downloader/Transportation/object_attributes/headpose/vanilla_cnn/dldt/head-pose-estimation-adas-0001.xml - engine: CPU - label: to/be/set/xxx.labels - batch: 16 - outputs: [ImageWindow, RosTopic,RViz] - confidence_threshold: 0.2 - connects: - - left: Image - right: [FaceDetection] - - left: FaceDetection - right: [AgeGenderRecognition, EmotionRecognition, HeadPoseEstimation, ImageWindow, RosTopic, Rviz] - - left: AgeGenderRecognition - right: [ImageWindow, RosTopic,RViz] - - left: EmotionRecognition - right: [ImageWindow, RosTopic,RViz] - - left: HeadPoseEstimation - right: [ImageWindow, RosTopic,RViz] - -OpenvinoCommon: diff --git a/vino_launch/param/pipeline_object_oss.yaml b/vino_launch/param/pipeline_object_oss.yaml deleted file mode 100644 index 1a4c9a4a..00000000 --- a/vino_launch/param/pipeline_object_oss.yaml +++ /dev/null @@ -1,25 +0,0 @@ -Pipelines: -- name: object - inputs: [RealSenseCamera] - infers: - - name: ObjectDetection - #model: /opt/openvino_toolkit/open_model_zoo/model_downloader/object_detection/common/mobilenet-ssd/caffe/output/FP32/mobilenet-ssd.xml - model: /home/intel/Downloads/yolov2-voc.xml - #model_type: SSD - model_type: yolov2-voc - engine: CPU - label: to/be/set/xxx.labels - batch: 16 - outputs: [ImageWindow, RosTopic, RViz] - confidence_threshold: 0.5 - connects: - - left: RealSenseCamera - right: [ObjectDetection] - - left: ObjectDetection - right: [{ImageWindow: label == Person && confidence >= 0.8}] - - left: ObjectDetection - right: [RosTopic] - # - left: ObjectDetection - # right: [RViz] - -OpenvinoCommon: diff --git a/vino_launch/param/pipeline_people_myriad.yaml b/vino_launch/param/pipeline_people_myriad.yaml deleted file mode 100644 index 30dc280a..00000000 --- a/vino_launch/param/pipeline_people_myriad.yaml +++ /dev/null @@ -1,39 +0,0 @@ -Pipelines: -- name: people - inputs: [StandardCamera] - infers: - - name: FaceDetection - model: /opt/intel/computer_vision_sdk/deployment_tools/intel_models/face-detection-adas-0001/FP16/face-detection-adas-0001.xml - engine: MYRIAD - label: to/be/set/xxx.labels - batch: 1 - - name: AgeGenderRecognition - model: /opt/intel/computer_vision_sdk/deployment_tools/intel_models/age-gender-recognition-retail-0013/FP16/age-gender-recognition-retail-0013.xml - engine: MYRIAD - label: to/be/set/xxx.labels - batch: 16 - - name: EmotionRecognition - model: /opt/intel/computer_vision_sdk/deployment_tools/intel_models/emotions-recognition-retail-0003/FP16/emotions-recognition-retail-0003.xml - engine: MYRIAD - label: to/be/set/xxx.labels - batch: 16 - - name: HeadPoseEstimation - model: /opt/intel/computer_vision_sdk/deployment_tools/intel_models/head-pose-estimation-adas-0001/FP16/head-pose-estimation-adas-0001.xml - engine: MYRIAD - label: to/be/set/xxx.labels - batch: 16 - outputs: [ImageWindow, RosTopic,RViz] - confidence_threshold: 0.2 - connects: - - left: StandardCamera - right: [FaceDetection] - - left: FaceDetection - right: [AgeGenderRecognition, EmotionRecognition, HeadPoseEstimation, ImageWindow, RosTopic, RViz] - - left: AgeGenderRecognition - right: [ImageWindow, RosTopic,RViz] - - left: EmotionRecognition - right: [ImageWindow, RosTopic,RViz] - - left: HeadPoseEstimation - right: [ImageWindow, RosTopic,RViz] - -OpenvinoCommon: diff --git a/vino_launch/param/pipeline_people_oss.yaml b/vino_launch/param/pipeline_people_oss.yaml deleted file mode 100644 index dd000e34..00000000 --- a/vino_launch/param/pipeline_people_oss.yaml +++ /dev/null @@ -1,39 +0,0 @@ -Pipelines: -- name: people - inputs: [StandardCamera] - infers: - - name: FaceDetection - model: /opt/openvino_toolkit/open_model_zoo/model_downloader/Transportation/object_detection/face/pruned_mobilenet_reduced_ssd_shared_weights/dldt/face-detection-adas-0001.xml - engine: CPU - label: to/be/set/xxx.labels - batch: 1 - - name: AgeGenderRecognition - model: /opt/openvino_toolkit/open_model_zoo/model_downloader/Retail/object_attributes/age_gender/dldt/age-gender-recognition-retail-0013.xml - engine: CPU - label: to/be/set/xxx.labels - batch: 16 - - name: EmotionRecognition - model: /opt/openvino_toolkit/open_model_zoo/model_downloader/Retail/object_attributes/emotions_recognition/0003/dldt/emotions-recognition-retail-0003.xml - engine: CPU - label: to/be/set/xxx.labels - batch: 16 - - name: HeadPoseEstimation - model: /opt/openvino_toolkit/open_model_zoo/model_downloader/Transportation/object_attributes/headpose/vanilla_cnn/dldt/head-pose-estimation-adas-0001.xml - engine: CPU - label: to/be/set/xxx.labels - batch: 16 - outputs: [ImageWindow, RosTopic,RViz] - confidence_threshold: 0.2 - connects: - - left: StandardCamera - right: [FaceDetection] - - left: FaceDetection - right: [AgeGenderRecognition, EmotionRecognition, HeadPoseEstimation, ImageWindow, RosTopic, Rviz] - - left: AgeGenderRecognition - right: [ImageWindow, RosTopic,RViz] - - left: EmotionRecognition - right: [ImageWindow, RosTopic,RViz] - - left: HeadPoseEstimation - right: [ImageWindow, RosTopic,RViz] - -OpenvinoCommon: diff --git a/vino_launch/param/pipeline_reidentification_server_oss.yaml b/vino_launch/param/pipeline_reidentification_server_oss.yaml deleted file mode 100644 index 43f00d13..00000000 --- a/vino_launch/param/pipeline_reidentification_server_oss.yaml +++ /dev/null @@ -1,28 +0,0 @@ -Pipelines: -- name: object - inputs: [Image] - infers: - - name: ObjectDetection - model: /opt/openvino_toolkit/open_model_zoo/model_downloader/Retail/object_detection/pedestrian/rmnet_ssd/0013/dldt/person-detection-retail-0013.xml - engine: CPU - label: to/be/set/xxx.labels - batch: 1 - confidence_threshold: 0.5 - enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame - - name: PersonReidentification - model: /opt/openvino_toolkit/open_model_zoo/model_downloader/Retail/object_reidentification/pedestrian/rmnet_based/0076/dldt/person-reidentification-retail-0076.xml - engine: CPU - label: to/be/set/xxx.labels - batch: 1 - confidence_threshold: 0.7 - outputs: [RosService] - connects: - - left: Image - right: [ObjectDetection] - - left: ObjectDetection - right: [PersonReidentification] - - left: PersonReidentification - right: [RosService] - input_path: "/opt/openvino_toolkit/ros_openvino_toolkit/data/images/team.jpg" - -OpenvinoCommon: \ No newline at end of file From 60dd3ed65d9655da44639be6ae4abc1a7eabb256 Mon Sep 17 00:00:00 2001 From: RachelRen05 Date: Fri, 15 Nov 2019 17:31:57 +0800 Subject: [PATCH 21/63] update readme and script --- README.md | 165 ++++++++++--------------- doc/BINARY_VERSION_README.md | 133 ++++++++++++-------- doc/OBJECT_DETECTION.md | 190 +++++++++++++++++++++++++++++ doc/OPEN_SOURCE_CODE_README.md | 10 +- script/environment_setup.sh | 62 +++++----- script/environment_setup_binary.sh | 60 +++++---- 6 files changed, 395 insertions(+), 225 deletions(-) create mode 100644 doc/OBJECT_DETECTION.md mode change 100644 => 100755 script/environment_setup.sh mode change 100644 => 100755 script/environment_setup_binary.sh diff --git a/README.md b/README.md index ed238952..e0056607 100644 --- a/README.md +++ b/README.md @@ -9,7 +9,7 @@ The OpenVINO™ (Open visual inference and neural network optimization) toolkit ## Design Architecture From the view of hirarchical architecture design, the package is divided into different functional components, as shown in below picture. -![OpenVINO_Architecture](https://github.com/intel/ros_openvino_toolkit/blob/master/data/images/design_arch.PNG "OpenVINO RunTime Architecture") +![OpenVINO_Architecture](https://github.com/intel/ros_openvino_toolkit/blob/devel/data/images/design_arch.PNG "OpenVINO RunTime Architecture") - **Intel® OpenVINO™ toolkit** is leveraged to provide deep learning basic implementation for data inference. is free software that helps developers and data scientists speed up computer vision workloads, streamline deep learning inference and deployments, and enable easy, heterogeneous execution across Intel® platforms from edge to cloud. It helps to: @@ -24,11 +24,11 @@ and enable easy, heterogeneous execution across Intel® platforms from edge to c ## Logic Flow From the view of logic implementation, the package introduces the definitions of parameter manager, pipeline and pipeline manager. The below picture depicts how these entities co-work together when the corresponding program is launched. -![Logic_Flow](https://github.com/intel/ros_openvino_toolkit/blob/master/data/images/impletation_logic.PNG "OpenVINO RunTime Logic Flow") +![Logic_Flow](https://github.com/intel/ros_openvino_toolkit/blob/devel/data/images/impletation_logic.PNG "OpenVINO RunTime Logic Flow") Once a corresponding program is launched with a specified .yaml config file passed in the .launch file or via commandline, _**parameter manager**_ analyzes the configurations about pipeline and the whole framework, then shares the parsed configuration information with pipeline procedure. A _**pipeline instance**_ is created by following the configuration info and is added into _**pipeline manager**_ for lifecycle control and inference action triggering. -The contents in **.yaml config file** should be well structured and follow the supported rules and entity names. Please see [the configuration guidance](https://github.com/intel/ros_openvino_toolkit/blob/master/doc/YAML_CONFIGURATION_GUIDE.md) for how to create or edit the config files. +The contents in **.yaml config file** should be well structured and follow the supported rules and entity names. Please see [the configuration guidance](https://github.com/intel/ros_openvino_toolkit/blob/devel/doc/YAML_CONFIGURATION_GUIDE.md) for how to create or edit the config files. **Pipeline** fulfills the whole data handling process: initiliazing Input Component for image data gathering and formating; building up the structured inference network and passing the formatted data through the inference network; transfering the inference results and handling output, etc. @@ -81,9 +81,9 @@ Currently, the inference feature list is supported: - Person Reidentification: ```/ros_openvino_toolkit/reidentified_persons```([people_msgs::ReidentificationStamped](https://github.com/intel/ros_openvino_toolkit/blob/devel/people_msgs/msg/ReidentificationStamped.msg)) - Vehicle Detection: -```/ros2_openvino_toolkit/detected_license_plates```([people_msgs::msg::VehicleAttribsStamped](https://github.com/intel/ros2_openvino_toolkit/blob/devel/people_msgs/msg/VehicleAttribsStamped.msg) +```/ros_openvino_toolkit/detected_license_plates```([people_msgs::msg::VehicleAttribsStamped](https://github.com/intel/ros2_openvino_toolkit/blob/devel/people_msgs/msg/VehicleAttribsStamped.msg) - Vehicle License Detection: -```/ros2_openvino_toolkit/detected_license_plates```([people_msgs::msg::LicensePlateStamped](https://github.com/intel/ros2_openvino_toolkit/blob/devel/people_msgs/msg/LicensePlateStamped.msg) +```/ros_openvino_toolkit/detected_license_plates```([people_msgs::msg::LicensePlateStamped](https://github.com/intel/ros2_openvino_toolkit/blob/devel/people_msgs/msg/LicensePlateStamped.msg) - Rviz Output: ```/ros_openvino_toolkit/image_rviz```([sensor_msgs::Image](http://docs.ros.org/melodic/api/sensor_msgs/html/msg/Image.html)) @@ -107,75 +107,45 @@ To show in RViz tool, add an image marker with the composited topic: ### Image Window OpenCV based image window is natively supported by the package. -To enable window, Image Window output should be added into the output choices in .yaml config file. see [the config file guidance](https://github.com/intel/ros_openvino_toolkit/blob/master/doc/YAML_CONFIGURATION_GUIDE.md) for checking/adding this feature in your launching. +To enable window, Image Window output should be added into the output choices in .yaml config file. see [the config file guidance](https://github.com/intel/ros_openvino_toolkit/blob/devel/doc/YAML_CONFIGURATION_GUIDE.md) for checking/adding this feature in your launching. ## Demo Result Snapshots See below pictures for the demo result snapshots. * face detection input from standard camera -![face_detection_demo_image](https://github.com/intel/ros_openvino_toolkit/blob/master/data/images/face_detection.png "face detection demo image") +![face_detection_demo_image](https://github.com/intel/ros_openvino_toolkit/blob/devel/data/images/face_detection.png "face detection demo image") * object detection input from realsense camera -![object_detection_demo_realsense](https://github.com/intel/ros_openvino_toolkit/blob/master/data/images/object_detection.gif "object detection demo realsense") +![object_detection_demo_realsense](https://github.com/intel/ros_openvino_toolkit/blob/devel/data/images/object_detection.gif "object detection demo realsense") * object segmentation input from video -![object_segmentation_demo_video](https://github.com/intel/ros_openvino_toolkit/blob/master/data/images/object_segmentation.gif "object segmentation demo video") +![object_segmentation_demo_video](https://github.com/intel/ros_openvino_toolkit/blob/devel/data/images/object_segmentation.gif "object segmentation demo video") * Person Reidentification input from standard camera -![person_reidentification_demo_video](https://github.com/intel/ros2_openvino_toolkit/blob/master/data/images/person-reidentification.gif "person reidentification demo video") +![person_reidentification_demo_video](https://github.com/intel/ros2_openvino_toolkit/blob/devel/data/images/person-reidentification.gif "person reidentification demo video") # Installation & Launching -**NOTE:** Intel releases 2 different series of OpenVINO Toolkit, we call them as [OpenSource Version](https://github.com/opencv/dldt/) and [Tarball Version](https://software.intel.com/en-us/openvino-toolkit). This guidelie uses OpenSource Version as the installation and launching example. **If you want to use Tarball version, please follow [the guide for Tarball Version](https://github.com/intel/ros_openvino_toolkit/blob/master/doc/BINARY_VERSION_README.md).** - -## Enable Intel® Neural Compute Stick 2 (Intel® NCS 2) under the OpenVINO Open Source version (Optional)
-1. Intel Distribution of OpenVINO toolkit
- * Download OpenVINO toolkit by following the [guide](https://software.intel.com/en-us/openvino-toolkit/choose-download)
- ```bash - cd ~/Downloads - wget -c http://registrationcenter-download.intel.com/akdlm/irc_nas/15078/l_openvino_toolkit_p_2018.5.455.tgz - ``` - * Install OpenVINO toolkit by following the [guide](https://software.intel.com/en-us/articles/OpenVINO-Install-Linux)
- ```bash - cd ~/Downloads - tar -xvf l_openvino_toolkit_p_2018.5.455.tgz - cd l_openvino_toolkit_p_2018.5.455 - # root is required instead of sudo - sudo -E ./install_cv_sdk_dependencies.sh - sudo ./install_GUI.sh - # build sample code under OpenVINO toolkit - source /opt/intel/computer_vision_sdk/bin/setupvars.sh - cd /opt/intel/computer_vision_sdk/deployment_tools/inference_engine/samples/ - mkdir build - cd build - cmake .. - make - ``` - * Configure the Neural Compute Stick USB Driver - ```bash - cd ~/Downloads - cat < 97-usbboot.rules - SUBSYSTEM=="usb", ATTRS{idProduct}=="2150", ATTRS{idVendor}=="03e7", GROUP="users", MODE="0666", ENV{ID_MM_DEVICE_IGNORE}="1" - SUBSYSTEM=="usb", ATTRS{idProduct}=="2485", ATTRS{idVendor}=="03e7", GROUP="users", MODE="0666", ENV{ID_MM_DEVICE_IGNORE}="1" - SUBSYSTEM=="usb", ATTRS{idProduct}=="f63b", ATTRS{idVendor}=="03e7", GROUP="users", MODE="0666", ENV{ID_MM_DEVICE_IGNORE}="1" - EOF - sudo cp 97-usbboot.rules /etc/udev/rules.d/ - sudo udevadm control --reload-rules - sudo udevadm trigger - sudo ldconfig - rm 97-usbboot.rules - ``` - -2. Configure the environment (you can write the configuration to your ~/.basrch file)
- **Note**: If you used root privileges to install the OpenVINO binary package, it installs the Intel Distribution of OpenVINO toolkit in this directory: */opt/intel/openvino_/* - ```bash - export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/intel/computer_vision_sdk/deployment_tools/inference_engine/samples/build/intel64/Release/lib - source /opt/intel/computer_vision_sdk/bin/setupvars.sh - ``` +**NOTE:** Intel releases 2 different series of OpenVINO Toolkit, we call them as [OpenSource Version](https://github.com/opencv/dldt/) and [Tarball Version](https://software.intel.com/en-us/openvino-toolkit). This guidelie uses OpenSource Version as the installation and launching example. **If you want to use Tarball version, please follow [the guide for Tarball Version](https://github.com/intel/ros_openvino_toolkit/blob/devel/doc/BINARY_VERSION_README.md).** ## Dependencies Installation -One-step installation scripts are provided for the dependencies' installation. Please see [the guide](https://github.com/intel/ros_openvino_toolkit/blob/master/doc/OPEN_SOURCE_CODE_README.md) for details. +One-step installation scripts are provided for the dependencies' installation. Please see [the guide](https://github.com/intel/ros_openvino_toolkit/blob/devel/doc/OPEN_SOURCE_CODE_README.md) for details. ## Launching * Preparation + * Configure the Neural Compute Stick USB Driver + ```bash + cd ~/Downloads + cat < 97-usbboot.rules + SUBSYSTEM=="usb", ATTRS{idProduct}=="2150", ATTRS{idVendor}=="03e7", GROUP="users", MODE="0666", ENV{ID_MM_DEVICE_IGNORE}="1" + SUBSYSTEM=="usb", ATTRS{idProduct}=="2485", ATTRS{idVendor}=="03e7", GROUP="users", MODE="0666", ENV{ID_MM_DEVICE_IGNORE}="1" + SUBSYSTEM=="usb", ATTRS{idProduct}=="f63b", ATTRS{idVendor}=="03e7", GROUP="users", MODE="0666", ENV{ID_MM_DEVICE_IGNORE}="1" + EOF + sudo cp 97-usbboot.rules /etc/udev/rules.d/ + sudo udevadm control --reload-rules + sudo udevadm trigger + sudo ldconfig + rm 97-usbboot.rules + ``` + * download [Object Detection model](https://github.com/intel/ros_openvino_toolkit/tree/devel/doc/OBJECT_DETECTION.md) * download and convert a trained model to produce an optimized Intermediate Representation (IR) of the model ```bash #object segmentation model @@ -186,25 +156,20 @@ One-step installation scripts are provided for the dependencies' installation. P wget http://download.tensorflow.org/models/object_detection/mask_rcnn_inception_v2_coco_2018_01_28.tar.gz tar -zxvf mask_rcnn_inception_v2_coco_2018_01_28.tar.gz cd mask_rcnn_inception_v2_coco_2018_01_28 - python3 /opt/openvino_toolkit/dldt/model-optimizer/mo_tf.py --input_model frozen_inference_graph.pb --tensorflow_use_custom_operations_config /opt/openvino_toolkit/dldt/model-optimizer/extensions/front/tf/mask_rcnn_support.json --tensorflow_object_detection_api_pipeline_config pipeline.config --reverse_input_channels --output_dir ./output/ - sudo mkdir -p /opt/models - sudo ln -s ~/Downloads/models/mask_rcnn_inception_v2_coco_2018_01_28 /opt/models/ - #object detection model - cd /opt/openvino_toolkit/open_model_zoo/model_downloader - python3 ./downloader.py --name mobilenet-ssd - #FP32 precision model - sudo python3 /opt/openvino_toolkit/dldt/model-optimizer/mo.py --input_model /opt/openvino_toolkit/open_model_zoo/model_downloader/object_detection/common/mobilenet-ssd/caffe/mobilenet-ssd.caffemodel --output_dir /opt/openvino_toolkit/open_model_zoo/model_downloader/object_detection/common/mobilenet-ssd/caffe/output/FP32 --mean_values [127.5,127.5,127.5] --scale_values [127.5] - #FP16 precision model - sudo python3 /opt/openvino_toolkit/dldt/model-optimizer/mo.py --input_model /opt/openvino_toolkit/open_model_zoo/model_downloader/object_detection/common/mobilenet-ssd/caffe/mobilenet-ssd.caffemodel --output_dir /opt/openvino_toolkit/open_model_zoo/model_downloader/object_detection/common/mobilenet-ssd/caffe/output/FP16 --data_type=FP16 --mean_values [127.5,127.5,127.5] --scale_values [127.5] + #FP32 + sudo python3 /opt/openvino_toolkit/dldt/model-optimizer/mo_tf.py --input_model frozen_inference_graph.pb --tensorflow_use_custom_operations_config /opt/openvino_toolkit/dldt/model-optimizer/extensions/front/tf/mask_rcnn_support.json --tensorflow_object_detection_api_pipeline_config pipeline.config --reverse_input_channels --output_dir /opt/openvino_toolkit/models/segmentation/output/FP32 + #FP16 + sudo python3 /opt/openvino_toolkit/dldt/model-optimizer/mo_tf.py --input_model frozen_inference_graph.pb --tensorflow_use_custom_operations_config /opt/openvino_toolkit/dldt/model-optimizer/extensions/front/tf/mask_rcnn_support.json --tensorflow_object_detection_api_pipeline_config pipeline.config --reverse_input_channels --data_type=FP16 --output_dir /opt/openvino_toolkit/models/segmentation/output/FP16 + ``` * download the optimized Intermediate Representation (IR) of model (excute _once_)
```bash - cd /opt/openvino_toolkit/open_model_zoo/model_downloader - python3 downloader.py --name face-detection-adas-0001 - python3 downloader.py --name age-gender-recognition-retail-0013 - python3 downloader.py --name emotions-recognition-retail-0003 - python3 downloader.py --name head-pose-estimation-adas-0001 - python3 downloader.py --name person-detection-retail-0013 - python3 downloader.py --name person-reidentification-retail-0076 + cd /opt/openvino_toolkit/open_model_zoo/tools/downloader + sudo python3 downloader.py --name face-detection-adas-0001 --output_dir /opt/openvino_toolkit/models/face_detection/output + sudo python3 downloader.py --name age-gender-recognition-retail-0013 --output_dir /opt/openvino_toolkit/models/age-gender-recognition/output + sudo python3 downloader.py --name emotions-recognition-retail-0003 --output_dir /opt/openvino_toolkit/models/emotions-recognition/output + sudo python3 downloader.py --name head-pose-estimation-adas-0001 --output_dir /opt/openvino_toolkit/models/head-pose-estimation/output + sudo python3 downloader.py --name person-detection-retail-0013 --output_dir /opt/openvino_toolkit/models/person-detection/output + sudo python3 downloader.py --name person-reidentification-retail-0076 --output_dir /opt/openvino_toolkit/models/person-reidentification/output sudo python3 downloader.py --name vehicle-license-plate-detection-barrier-0106 --output_dir /opt/openvino_toolkit/models/vehicle-license-plate-detection/output sudo python3 downloader.py --name vehicle-attributes-recognition-barrier-0039 --output_dir /opt/openvino_toolkit/models/vehicle-attributes-recongnition/output sudo python3 downloader.py --name license-plate-recognition-barrier-0001 --output_dir /opt/openvino_toolkit/models/license-plate-recognition/output @@ -213,13 +178,15 @@ One-step installation scripts are provided for the dependencies' installation. P ``` * copy label files (excute _once_)
```bash - sudo cp /opt/openvino_toolkit/ros_openvino_toolkit/data/labels/emotions-recognition/FP32/emotions-recognition-retail-0003.labels /opt/openvino_toolkit/open_model_zoo/model_downloader/Retail/object_attributes/emotions_recognition/0003/dldt - sudo cp /opt/openvino_toolkit/ros_openvino_toolkit/data/labels/face_detection/face-detection-adas-0001.labels /opt/openvino_toolkit/open_model_zoo/model_downloader/Transportation/object_detection/face/pruned_mobilenet_reduced_ssd_shared_weights/dldt - sudo cp /opt/openvino_toolkit/ros_openvino_toolkit/data/labels/object_segmentation/frozen_inference_graph.labels /opt/models/mask_rcnn_inception_v2_coco_2018_01_28/output - sudo cp /opt/openvino_toolkit/ros_openvino_toolkit/data/labels/object_detection/mobilenet-ssd.labels /opt/openvino_toolkit/open_model_zoo/model_downloader/object_detection/common/mobilenet-ssd/caffe/output/FP32 - sudo cp /opt/openvino_toolkit/ros_openvino_toolkit/data/labels/object_detection/mobilenet-ssd.labels /opt/openvino_toolkit/open_model_zoo/model_downloader/object_detection/common/mobilenet-ssd/caffe/output/FP16 - sudo cp /opt/openvino_toolkit/ros_openvino_toolkit/data/labels/object_detection/vehicle-license-plate-detection-barrier-0106.labels /opt/openvino_toolkit/models/vehicle-license-plate-detection/output/Security/object_detection/barrier/0106/dldt/vehicle-license-plate-detection-barrier-0106.labels - + sudo cp /opt/openvino_toolkit/ros_openvino_toolkit/data/labels/emotions-recognition/FP32/emotions-recognition-retail-0003.labels /opt/openvino_toolkit/models/emotions-recognition/output/intel/emotions-recognition-retail-0003/FP32/ + sudo cp /opt/openvino_toolkit/ros_openvino_toolkit/data/labels/face_detection/face-detection-adas-0001.labels /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP32/ + sudo cp /opt/openvino_toolkit/ros_openvino_toolkit/data/labels/face_detection/face-detection-adas-0001.labels /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP16/ + sudo cp /opt/openvino_toolkit/ros_openvino_toolkit/data/labels/object_segmentation/frozen_inference_graph.labels /opt/openvino_toolkit/models/segmentation/output/FP32/ + sudo cp /opt/openvino_toolkit/ros_openvino_toolkit/data/labels/object_segmentation/frozen_inference_graph.labels /opt/openvino_toolkit/models/segmentation/output/FP16/ + sudo cp /opt/openvino_toolkit/ros_openvino_toolkit/data/labels/object_detection/vehicle-license-plate-detection-barrier-0106.labels /opt/openvino_toolkit/models/vehicle-license-plate-detection/output/intel/vehicle-license-plate-detection-barrier-0106/FP32 + sudo cp /opt/openvino_toolkit/ros_openvino_toolkit/data/labels/face_detection/face-detection-adas-0001.labels /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP32/ + sudo cp /opt/openvino_toolkit/ros_openvino_toolkit/data/labels/face_detection/face-detection-adas-0001.labels /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP16/ + ``` * set ENV LD_LIBRARY_PATH
```bash @@ -227,20 +194,11 @@ One-step installation scripts are provided for the dependencies' installation. P ``` * run face detection sample code input from StandardCamera. ```bash - roslaunch vino_launch pipeline_people_oss.launch + roslaunch vino_launch pipeline_people.launch ``` - * run face detection sample code input from Image. ```bash - roslaunch vino_launch pipeline_image_oss.launch - ``` -* run object detection sample code input from RealsensCamera. - ```bash - roslaunch vino_launch pipeline_object_oss.launch - ``` -* run object detection sample code input from RealsensCameraTopic. - ```bash - roslaunch vino_launch pipeline_object_oss_topic.launch + roslaunch vino_launch pipeline_image.launch ``` * run object segmentation sample code input from RealSenseCameraTopic. ```bash @@ -252,12 +210,20 @@ One-step installation scripts are provided for the dependencies' installation. P ``` * run person reidentification sample code input from StandardCamera. ```bash - roslaunch vino_launch pipeline_reidentification_oss.launch + roslaunch vino_launch pipeline_reidentification.launch + ``` +* run face re-identification with facial landmarks from realsense camera + ```bash + roslaunch vino_launch pipeline_face_reidentification.launch + ``` +* run vehicle detection sample code input from StandardCamera. + ```bash + roslaunch vino_launch pipeline_vehicle_detection.launch ``` * run object detection service sample code input from Image Run image processing service: ```bash - roslaunch vino_launch image_object_server_oss.launch + roslaunch vino_launch image_object_server.launch ``` Run example application with an absolute path of an image on another console: ```bash @@ -266,20 +232,12 @@ One-step installation scripts are provided for the dependencies' installation. P * run face detection service sample code input from Image Run image processing service: ```bash - roslaunch vino_launch image_people_server_oss.launch + roslaunch vino_launch image_people_server.launch ``` Run example application with an absolute path of an image on another console: ```bash rosrun dynamic_vino_sample image_people_client ~/catkin_ws/src/ros_openvino_toolkit/data/images/team.jpg ``` -* run face re-identification with facial landmarks from realsense camera - ```bash - roslaunch vino_launch pipeline_face_reidentification.launch - ``` -* run vehicle detection sample code input from StandardCamera. - ```bash - roslaunch vino_launch pipeline_vehicle_detection.launch - ``` # TODO Features * Support **result filtering** for inference process, so that the inference results can be filtered to different subsidiary inference. For example, given an image, firstly we do Object Detection on it, secondly we pass cars to vehicle brand recognition and pass license plate to license number recognition. * Design **resource manager** to better use such resources as models, engines, and other external plugins. @@ -288,3 +246,4 @@ One-step installation scripts are provided for the dependencies' installation. P # More Information * ros OpenVINO discription writen in Chinese: https://mp.weixin.qq.com/s/BgG3RGauv5pmHzV_hkVAdw + diff --git a/doc/BINARY_VERSION_README.md b/doc/BINARY_VERSION_README.md index 63b11f76..a30ba6b6 100644 --- a/doc/BINARY_VERSION_README.md +++ b/doc/BINARY_VERSION_README.md @@ -13,11 +13,11 @@ This project is a ROS wrapper for CV API of [OpenVINO™](https://software.intel * Demo application to show above detection and recognitions ## 2. Prerequisite -- An x86_64 computer running Ubuntu 16.04. Below processors are supported: +- An x86_64 computer running Ubuntu 18.04. Below processors are supported: * 6th-8th Generation Intel® Core™ * Intel® Xeon® v5 family * Intel® Xeon® v6 family -- ROS Kinetic +- ROS [Dashing](https://github.com/ros2/ros2) - [OpenVINO™ Toolkit](https://software.intel.com/en-us/openvino-toolkit) - RGB Camera, e.g. RealSense D400 Series or standard USB camera or Video/Image File @@ -38,9 +38,9 @@ This project is a ROS wrapper for CV API of [OpenVINO™](https://software.intel ``` **Note**:You can also choose to follow the steps below to build the environment step by step. -- Install ROS Kinetic Desktop-Full ([guide](http://wiki.ros.org/kinetic/Installation/Ubuntu)) +- Install ROS Dashing Desktop-Full ([guide](https://index.ros.org/doc/ros2/Installation/Dashing/)) -- Install [OpenVINO™ Toolkit](https://software.intel.com/en-us/openvino-toolkit) ([guide](https://software.intel.com/en-us/articles/OpenVINO-Install-Linux)). Choose "2018 R4" when download tarball. +- Install [OpenVINO™ Toolkit](https://software.intel.com/en-us/openvino-toolkit) ([guide](https://software.intel.com/en-us/articles/OpenVINO-Install-Linux)). **Note**: Please use *root privileges* to run the installer when installing the core components. - Install OpenCL Driver for GPU @@ -70,9 +70,8 @@ sudo ./install_NEO_OCL_driver.sh sudo apt update sudo apt install libjasper1 libjasper-dev ``` -- Install Intel® RealSense™ SDK 2.0 [(tag v2.17.1)](https://github.com/IntelRealSense/librealsense/tree/v2.17.1) - * [Install from source code](https://github.com/IntelRealSense/librealsense/blob/v2.17.1/doc/installation.md)(Recommended) - * [Install from package](https://github.com/IntelRealSense/librealsense/blob/v2.17.1/doc/distribution_linux.md) +- Install Intel® RealSense™ SDK 2.0 [(tag v2.30.0)](https://github.com/IntelRealSense/librealsense/tree/v2.30.0) + * [Install from package](https://github.com/IntelRealSense/librealsense/blob/v2.30.0/doc/distribution_linux.md) - Other Dependencies ```bash @@ -89,8 +88,8 @@ sudo ln -sf libboost_python-py35.so libboost_python3.so - Build sample code under openvino toolkit ```bash # root is required instead of sudo -source /opt/intel/computer_vision_sdk/bin/setupvars.sh -cd /opt/intel/computer_vision_sdk/deployment_tools/inference_engine/samples/ +source /opt/intel/openvino/bin/setupvars.sh +cd /opt/intel/openvino/deployment_tools/inference_engine/samples/ mkdir build cd build cmake .. @@ -99,8 +98,8 @@ make - Set Environment CPU_EXTENSION_LIB and GFLAGS_LIB ```bash -export CPU_EXTENSION_LIB=/opt/intel/computer_vision_sdk/deployment_tools/inference_engine/samples/build/intel64/Release/lib/libcpu_extension.so -export GFLAGS_LIB=/opt/intel/computer_vision_sdk/deployment_tools/inference_engine/samples/build/intel64/Release/lib/libgflags_nothreads.a +export CPU_EXTENSION_LIB=/opt/intel/openvino/deployment_tools/inference_engine/samples/build/intel64/Release/lib/libcpu_extension.so +export GFLAGS_LIB=/opt/intel/openvino/deployment_tools/inference_engine/samples/build/intel64/Release/lib/libgflags_nothreads.a ``` - Install ROS_OpenVINO packages @@ -122,7 +121,7 @@ source /opt/ros/kinetic/setup.bash # Ubuntu 18.04 source /opt/ros/melodic/setup.bash -source /opt/intel/computer_vision_sdk/bin/setupvars.sh +source /opt/intel/openvino/bin/setupvars.sh export OpenCV_DIR=$HOME/code/opencv/build cd ~/catkin_ws catkin_make @@ -133,57 +132,74 @@ sudo ln -s ~/catkin_ws/src/ros_openvino_toolkit /opt/openvino_toolkit/ros_openvi ## 5. Running the Demo * Preparation + * Configure the Neural Compute Stick USB Driver + ```bash + cd ~/Downloads + cat < 97-usbboot.rules + SUBSYSTEM=="usb", ATTRS{idProduct}=="2150", ATTRS{idVendor}=="03e7", GROUP="users", MODE="0666", ENV{ID_MM_DEVICE_IGNORE}="1" + SUBSYSTEM=="usb", ATTRS{idProduct}=="2485", ATTRS{idVendor}=="03e7", GROUP="users", MODE="0666", ENV{ID_MM_DEVICE_IGNORE}="1" + SUBSYSTEM=="usb", ATTRS{idProduct}=="f63b", ATTRS{idVendor}=="03e7", GROUP="users", MODE="0666", ENV{ID_MM_DEVICE_IGNORE}="1" + EOF + sudo cp 97-usbboot.rules /etc/udev/rules.d/ + sudo udevadm control --reload-rules + sudo udevadm trigger + sudo ldconfig + rm 97-usbboot.rules + ``` + * download [Object Detection model](https://github.com/intel/ros_openvino_toolkit/tree/devel/doc/OBJECT_DETECTION.md) * download and convert a trained model to produce an optimized Intermediate Representation (IR) of the model - ```bash - #object segmentation model - cd /opt/intel/computer_vision_sdk/deployment_tools/model_optimizer/install_prerequisites - sudo ./install_prerequisites.sh - mkdir -p ~/Downloads/models - cd ~/Downloads/models - wget http://download.tensorflow.org/models/object_detection/mask_rcnn_inception_v2_coco_2018_01_28.tar.gz - tar -zxvf mask_rcnn_inception_v2_coco_2018_01_28.tar.gz - cd mask_rcnn_inception_v2_coco_2018_01_28 - python3 /opt/intel/computer_vision_sdk/deployment_tools/model_optimizer/mo_tf.py --input_model frozen_inference_graph.pb --tensorflow_use_custom_operations_config /opt/intel/computer_vision_sdk/deployment_tools/model_optimizer/extensions/front/tf/mask_rcnn_support.json --tensorflow_object_detection_api_pipeline_config pipeline.config --reverse_input_channels --output_dir ./output/ - sudo mkdir -p /opt/models - sudo ln -sf ~/Downloads/models/mask_rcnn_inception_v2_coco_2018_01_28 /opt/models/ - #object detection model - cd /opt/intel/computer_vision_sdk/deployment_tools/model_downloader - sudo python3 ./downloader.py --name mobilenet-ssd - #FP32 precision model - sudo python3 /opt/intel/computer_vision_sdk/deployment_tools/model_optimizer/mo.py --input_model /opt/intel/computer_vision_sdk/deployment_tools/model_downloader/object_detection/common/mobilenet-ssd/caffe/mobilenet-ssd.caffemodel --output_dir /opt/intel/computer_vision_sdk/deployment_tools/model_downloader/object_detection/common/mobilenet-ssd/caffe/output/FP32 --mean_values [127.5,127.5,127.5] --scale_values [127.5] - #FP16 precision model - sudo python3 /opt/intel/computer_vision_sdk/deployment_tools/model_optimizer/mo.py --input_model /opt/intel/computer_vision_sdk/deployment_tools/model_downloader/object_detection/common/mobilenet-ssd/caffe/mobilenet-ssd.caffemodel --output_dir /opt/intel/computer_vision_sdk/deployment_tools/model_downloader/object_detection/common/mobilenet-ssd/caffe/output/FP16 --data_type=FP16 --mean_values [127.5,127.5,127.5] --scale_values [127.5] - ``` + ```bash + #object segmentation model + cd /opt/intel/openvino/deployment_tools/model_optimizer/install_prerequisites + sudo ./install_prerequisites.sh + mkdir -p ~/Downloads/models + cd ~/Downloads/models + wget http://download.tensorflow.org/models/object_detection/mask_rcnn_inception_v2_coco_2018_01_28.tar.gz + tar -zxvf mask_rcnn_inception_v2_coco_2018_01_28.tar.gz + cd mask_rcnn_inception_v2_coco_2018_01_28 + #FP32 + sudo python3 /opt/intel/openvino/deployment_tools/model_optimizer/mo_tf.py --input_model frozen_inference_graph.pb --tensorflow_use_custom_operations_config /opt/intel/openvino/deployment_tools/model_optimizer/extensions/front/tf/mask_rcnn_support.json --tensorflow_object_detection_api_pipeline_config pipeline.config --reverse_input_channels --output_dir /opt/openvino_toolkit/models/segmentation/output/FP32 + #FP16 + sudo python3 /opt/intel/openvino/deployment_tools/model_optimizer/mo_tf.py --input_model frozen_inference_graph.pb --tensorflow_use_custom_operations_config /opt/intel/openvino/deployment_tools/model_optimizer/extensions/front/tf/mask_rcnn_support.json --tensorflow_object_detection_api_pipeline_config pipeline.config --reverse_input_channels --data_type=FP16 --output_dir /opt/openvino_toolkit/models/segmentation/output/FP16 + ``` + * download the optimized Intermediate Representation (IR) of model (excute once) + ```bash + cd /opt/intel/openvino/deployment_tools/tools/model_downloader + sudo python3 downloader.py --name face-detection-adas-0001 --output_dir /opt/openvino_toolkit/models/face_detection/output + sudo python3 downloader.py --name age-gender-recognition-retail-0013 --output_dir /opt/openvino_toolkit/models/age-gender-recognition/output + sudo python3 downloader.py --name emotions-recognition-retail-0003 --output_dir /opt/openvino_toolkit/models/emotions-recognition/output + sudo python3 downloader.py --name head-pose-estimation-adas-0001 --output_dir /opt/openvino_toolkit/models/head-pose-estimation/output + sudo python3 downloader.py --name person-detection-retail-0013 --output_dir /opt/openvino_toolkit/models/person-detection/output + sudo python3 downloader.py --name person-reidentification-retail-0076 --output_dir /opt/openvino_toolkit/models/person-reidentification/output + sudo python3 downloader.py --name vehicle-license-plate-detection-barrier-0106 --output_dir /opt/openvino_toolkit/models/vehicle-license-plate-detection/output + sudo python3 downloader.py --name vehicle-attributes-recognition-barrier-0039 --output_dir /opt/openvino_toolkit/models/vehicle-attributes-recongnition/output + sudo python3 downloader.py --name license-plate-recognition-barrier-0001 --output_dir /opt/openvino_toolkit/models/license-plate-recognition/output + sudo python3 downloader.py --name landmarks-regression-retail-0009 --output_dir /opt/openvino_toolkit/models/landmarks-regression/output + sudo python3 downloader.py --name face-reidentification-retail-0095 --output_dir /opt/openvino_toolkit/models/face-reidentification/output + ``` * copy label files (excute _once_)
- ```bash - sudo cp /opt/openvino_toolkit/ros_openvino_toolkit/data/labels/emotions-recognition/FP32/emotions-recognition-retail-0003.labels /opt/intel/computer_vision_sdk/deployment_tools/intel_models/emotions-recognition-retail-0003/FP32 - sudo cp /opt/openvino_toolkit/ros_openvino_toolkit/data/labels/emotions-recognition/FP32/emotions-recognition-retail-0003.labels /opt/intel/computer_vision_sdk/deployment_tools/intel_models/emotions-recognition-retail-0003/FP16 - sudo cp /opt/openvino_toolkit/ros_openvino_toolkit/data/labels/face_detection/face-detection-adas-0001.labels /opt/intel/computer_vision_sdk/deployment_tools/intel_models/face-detection-adas-0001/FP32 - sudo cp /opt/openvino_toolkit/ros_openvino_toolkit/data/labels/face_detection/face-detection-adas-0001.labels /opt/intel/computer_vision_sdk/deployment_tools/intel_models/face-detection-adas-0001/FP16 - sudo cp /opt/openvino_toolkit/ros_openvino_toolkit/data/labels/object_segmentation/frozen_inference_graph.labels ~/Downloads/models/mask_rcnn_inception_v2_coco_2018_01_28/output - sudo cp /opt/openvino_toolkit/ros_openvino_toolkit/data/labels/object_detection/mobilenet-ssd.labels /opt/intel/computer_vision_sdk/deployment_tools/model_downloader/object_detection/common/mobilenet-ssd/caffe/output/FP32 - sudo cp /opt/openvino_toolkit/ros_openvino_toolkit/data/labels/object_detection/mobilenet-ssd.labels /opt/intel/computer_vision_sdk/deployment_tools/model_downloader/object_detection/common/mobilenet-ssd/caffe/output/FP16 - ``` - * set ENV LD_LIBRARY_PATH and environment - ```bash - source /opt/intel/computer_vision_sdk/bin/setupvars.sh - export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/intel/computer_vision_sdk/deployment_tools/inference_engine/samples/build/intel64/Release/lib - ``` -* run face detection sample code input from StandardCamera.(connect Intel® Neural Compute Stick 2) ```bash - roslaunch vino_launch pipeline_people_myriad.launch + sudo cp /opt/openvino_toolkit/ros_openvino_toolkit/data/labels/emotions-recognition/FP32/emotions-recognition-retail-0003.labels /opt/openvino_toolkit/models/emotions-recognition/output/intel/emotions-recognition-retail-0003/FP32/ + sudo cp /opt/openvino_toolkit/ros_openvino_toolkit/data/labels/face_detection/face-detection-adas-0001.labels /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP32/ + sudo cp /opt/openvino_toolkit/ros_openvino_toolkit/data/labels/face_detection/face-detection-adas-0001.labels /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP16/ + sudo cp /opt/openvino_toolkit/ros_openvino_toolkit/data/labels/object_segmentation/frozen_inference_graph.labels /opt/openvino_toolkit/models/segmentation/output/FP32/ + sudo cp /opt/openvino_toolkit/ros_openvino_toolkit/data/labels/object_segmentation/frozen_inference_graph.labels /opt/openvino_toolkit/models/segmentation/output/FP16/ + sudo cp /opt/openvino_toolkit/ros_openvino_toolkit/data/labels/object_detection/vehicle-license-plate-detection-barrier-0106.labels /opt/openvino_toolkit/models/vehicle-license-plate-detection/output/intel/vehicle-license-plate-detection-barrier-0106/FP32 + sudo cp /opt/openvino_toolkit/ros_openvino_toolkit/data/labels/face_detection/face-detection-adas-0001.labels /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP32/ + sudo cp /opt/openvino_toolkit/ros_openvino_toolkit/data/labels/face_detection/face-detection-adas-0001.labels /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP16/ ``` -* run face detection sample code input from Image. + * set ENV LD_LIBRARY_PATH and environment ```bash - roslaunch vino_launch pipeline_image.launch + source /opt/intel/openvino/bin/setupvars.sh + export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/intel/openvino/deployment_tools/inference_engine/samples/build/intel64/Release/lib ``` -* run object detection sample code input from RealSenseCamera. +* run face detection sample code input from StandardCamera.(connect Intel® Neural Compute Stick 2) ```bash - roslaunch vino_launch pipeline_object.launch + roslaunch vino_launch pipeline_people.launch ``` -* run object detection sample code input from RealSenseCameraTopic.(connect Intel® Neural Compute Stick 2) +* run face detection sample code input from Image. ```bash - roslaunch vino_launch pipeline_object_topic.launch + roslaunch vino_launch pipeline_image.launch ``` * run object segmentation sample code input from RealSenseCameraTopic. ```bash @@ -197,6 +213,14 @@ sudo ln -s ~/catkin_ws/src/ros_openvino_toolkit /opt/openvino_toolkit/ros_openvi ```bash roslaunch vino_launch pipeline_reidentification.launch ``` +* run face re-identification with facial landmarks from realsense camera + ```bash + roslaunch vino_launch pipeline_face_reidentification.launch + ``` +* run vehicle detection sample code input from StandardCamera. + ```bash + roslaunch vino_launch pipeline_vehicle_detection.launch + ``` * run object detection service sample code input from Image Run image processing service: ```bash @@ -228,3 +252,4 @@ sudo ln -s ~/catkin_ws/src/ros_openvino_toolkit /opt/openvino_toolkit/ros_openvi ###### *Any security issue should be reported using process at https://01.org/security* + diff --git a/doc/OBJECT_DETECTION.md b/doc/OBJECT_DETECTION.md new file mode 100644 index 00000000..5e23b1ae --- /dev/null +++ b/doc/OBJECT_DETECTION.md @@ -0,0 +1,190 @@ +# Object Detection +## Launching +### OpenSource Version +#### mobilenet-ssd +* download and convert a trained model to produce an optimized Intermediate Representation (IR) of the model + ```bash + cd /opt/openvino_toolkit/open_model_zoo/tools/downloader + python3 ./downloader.py --name mobilenet-ssd + #FP32 precision model + sudo python3 /opt/openvino_toolkit/dldt/model-optimizer/mo.py --input_model /opt/openvino_toolkit/open_model_zoo/tools/downloader/public/mobilenet-ssd/mobilenet-ssd.caffemodel --output_dir /opt/openvino_toolkit/models/object_detection/mobilenet-ssd/caffe/output/FP32 --mean_values [127.5,127.5,127.5] --scale_values [127.5] + #FP16 precision model + sudo python3 /opt/openvino_toolkit/dldt/model-optimizer/mo.py --input_model /opt/openvino_toolkit/open_model_zoo/tools/downloader/public/mobilenet-ssd/mobilenet-ssd.caffemodel --output_dir /opt/openvino_toolkit/models/object_detection/mobilenet-ssd/caffe/output/FP16 --data_type=FP16 --mean_values [127.5,127.5,127.5] --scale_values [127.5] + ``` +* copy label files (excute _once_)
+ ```bash + sudo cp /opt/openvino_toolkit/ros_openvino_toolkit/data/labels/object_detection/mobilenet-ssd.labels /opt/openvino_toolkit/models/object_detection/mobilenet-ssd/caffe/output/FP32 + sudo cp /opt/openvino_toolkit/ros_openvino_toolkit/data/labels/object_detection/mobilenet-ssd.labels /opt/openvino_toolkit/models/object_detection/mobilenet-ssd/caffe/output/FP16 + ``` +* run object detection sample code input from RealSenseCamera.(connect Intel® Neural Compute Stick 2) + ```bash + roslaunch vino_launch pipeline_object.launch + ``` +* run object detection sample code input from RealSenseCameraTopic.(connect Intel® Neural Compute Stick 2) + ```bash + roslaunch vino_launch pipeline_object_topic.launch + ``` +#### YOLOv2-voc +* Darkflow to protobuf(.pb) + - install [darkflow](https://github.com/thtrieu/darkflow) + - install prerequsites + ```bash + pip3 install tensorflow opencv-python numpy networkx cython + ``` + - Get darkflow and YOLO-OpenVINO + ```bash + mkdir -p ~/code && cd ~/code + git clone https://github.com/thtrieu/darkflow + git clone https://github.com/chaoli2/YOLO-OpenVINO + sudo ln -sf ~/code/darkflow /opt/openvino_toolkit/ + ``` + - modify the line self.offset = 16 in the ./darkflow/utils/loader.py file and replace with self.offset = 20 + - Install darkflow + ```bash + cd ~/code/darkflow + pip3 install . + ``` + - Copy voc.names in YOLO-OpenVINO/common to labels.txt in darkflow. + ```bash + cp ~/code/YOLO-OpenVINO/common/voc.names ~/code/darkflow/labels.txt + ``` + - Get yolov2 weights and cfg + ```bash + cd ~/code/darkflow + mkdir -p models + cd models + wget -c https://pjreddie.com/media/files/yolov2-voc.weights + wget https://raw.githubusercontent.com/pjreddie/darknet/master/cfg/yolov2-voc.cfg + ``` + - Run convert script + ```bash + cd ~/code/darkflow + flow --model models/yolov2-voc.cfg --load models/yolov2-voc.weights --savepb + ``` +* Convert YOLOv2-voc TensorFlow Model to the optimized Intermediate Representation (IR) of model + ```bash + cd ~/code/darkflow + # FP32 precision model + sudo python3 /opt/openvino_toolkit/dldt/model-optimizer/mo_tf.py \ + --input_model built_graph/yolov2-voc.pb \ + --batch 1 \ + --tensorflow_use_custom_operations_config /opt/openvino_toolkit/dldt/model-optimizer/extensions/front/tf/yolo_v2_voc.json \ + --data_type FP32 \ + --output_dir /opt/openvino_toolkit/models/object_detection/YOLOv2-voc/tf/output/FP32 + # FP16 precision model + sudo python3 /opt/openvino_toolkit/dldt/model-optimizer/mo_tf.py \ + --input_model built_graph/yolov2-voc.pb \ + --batch 1 \ + --tensorflow_use_custom_operations_config /opt/openvino_toolkit/dldt/model-optimizer/extensions/front/tf/yolo_v2_voc.json \ + --data_type FP16 \ + --output_dir /opt/openvino_toolkit/models/object_detection/YOLOv2-voc/tf/output/FP16 + ``` +* copy label files (excute _once_)
+ ```bash + sudo cp /opt/openvino_toolkit/ros_openvino_toolkit/data/labels/object_detection/yolov2-voc.labels /opt/openvino_toolkit/models/object_detection/YOLOv2-voc/tf/output/FP32 + sudo cp /opt/openvino_toolkit/ros_openvino_toolkit/data/labels/object_detection/yolov2-voc.labels /opt/openvino_toolkit/models/object_detection/YOLOv2-voc/tf/output/FP16 + ``` +* run object detection sample code input from RealSenseCamera.(connect Intel® Neural Compute Stick 2) + ```bash + roslaunch vino_launch pipeline_object_yolo.launch + ``` +* run object detection sample code input from RealSenseCameraTopic.(connect Intel® Neural Compute Stick 2) + ```bash + roslaunch vino_launch pipeline_object_yolo_topic.launch + ``` +### Binary Version +#### mobilenet-ssd +* download and convert a trained model to produce an optimized Intermediate Representation (IR) of the model + ```bash + cd /opt/intel/openvino/deployment_tools/tools/model_downloader + sudo python3 ./downloader.py --name mobilenet-ssd + #FP32 precision model + sudo python3 /opt/intel/openvino/deployment_tools/model_optimizer/mo.py --input_model /opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/public/mobilenet-ssd/mobilenet-ssd.caffemodel --output_dir /opt/openvino_toolkit/models/object_detection/mobilenet-ssd/caffe/output/FP32 --mean_values [127.5,127.5,127.5] --scale_values [127.5] + #FP16 precision model + sudo python3 /opt/intel/openvino/deployment_tools/model_optimizer/mo.py --input_model /opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/public/mobilenet-ssd/mobilenet-ssd.caffemodel --output_dir /opt/openvino_toolkit/models/object_detection/mobilenet-ssd/caffe/output/FP16 --data_type=FP16 --mean_values [127.5,127.5,127.5] --scale_values [127.5] + ``` +* copy label files (excute _once_)
+ ```bash + sudo cp /opt/openvino_toolkit/ros_openvino_toolkit/data/labels/object_detection/mobilenet-ssd.labels /opt/openvino_toolkit/models/object_detection/mobilenet-ssd/caffe/output/FP32 + sudo cp /opt/openvino_toolkit/ros_openvino_toolkit/data/labels/object_detection/mobilenet-ssd.labels /opt/openvino_toolkit/models/object_detection/mobilenet-ssd/caffe/output/FP16 + ``` +* run object detection sample code input from RealSenseCamera.(connect Intel® Neural Compute Stick 2) + ```bash + roslaunch vino_launch pipeline_object.launch + ``` +* run object detection sample code input from RealSenseCameraTopic.(connect Intel® Neural Compute Stick 2) + ```bash + roslaunch vino_launch pipeline_object_topic.launch + ``` + #### YOLOv2-voc +* Darkflow to protobuf(.pb) + - install [darkflow](https://github.com/thtrieu/darkflow) + - install prerequsites + ```bash + pip3 install tensorflow opencv-python numpy networkx cython + ``` + - Get darkflow and YOLO-OpenVINO + ```bash + mkdir -p ~/code && cd ~/code + git clone https://github.com/thtrieu/darkflow + git clone https://github.com/chaoli2/YOLO-OpenVINO + sudo ln -sf ~/code/darkflow /opt/openvino_toolkit/ + ``` + - modify the line self.offset = 16 in the ./darkflow/utils/loader.py file and replace with self.offset = 20 + - Install darkflow + ```bash + cd ~/code/darkflow + pip3 install . + ``` + - Copy voc.names in YOLO-OpenVINO/common to labels.txt in darkflow. + ```bash + cp ~/code/YOLO-OpenVINO/common/voc.names ~/code/darkflow/labels.txt + ``` + - Get yolov2 weights and cfg + ```bash + cd ~/code/darkflow + mkdir -p models + cd models + wget -c https://pjreddie.com/media/files/yolov2-voc.weights + wget https://raw.githubusercontent.com/pjreddie/darknet/master/cfg/yolov2-voc.cfg + ``` + - Run convert script + ```bash + cd ~/code/darkflow + flow --model models/yolov2-voc.cfg --load models/yolov2-voc.weights --savepb + ``` +* Convert YOLOv2-voc TensorFlow Model to the optimized Intermediate Representation (IR) of model + ```bash + cd ~/code/darkflow + # FP32 precision model + sudo python3 /opt/intel/openvino/deployment_tools/model_optimizer/mo_tf.py \ + --input_model built_graph/yolov2-voc.pb \ + --batch 1 \ + --tensorflow_use_custom_operations_config /opt/intel/openvino/deployment_tools/model_optimizer/extensions/front/tf/yolo_v2_voc.json \ + --data_type FP32 \ + --output_dir /opt/openvino_toolkit/models/object_detection/YOLOv2-voc/tf/output/FP32 + # FP16 precision model + sudo python3 /opt/intel/openvino/deployment_tools/model_optimizer/mo_tf.py \ + --input_model built_graph/yolov2-voc.pb \ + --batch 1 \ + --tensorflow_use_custom_operations_config /opt/intel/openvino/deployment_tools/model_optimizer/extensions/front/tf/yolo_v2_voc.json \ + --data_type FP16 \ + --output_dir /opt/openvino_toolkit/models/object_detection/YOLOv2-voc/tf/output/FP16 + ``` +* copy label files (excute _once_)
+ ```bash + sudo cp /opt/openvino_toolkit/ros_openvino_toolkit/data/labels/object_detection/yolov2-voc.labels /opt/openvino_toolkit/models/object_detection/YOLOv2-voc/tf/output/FP32 + sudo cp /opt/openvino_toolkit/ros_openvino_toolkit/data/labels/object_detection/yolov2-voc.labels /opt/openvino_toolkit/models/object_detection/YOLOv2-voc/tf/output/FP16 + ``` +* run object detection sample code input from RealSenseCamera.(connect Intel® Neural Compute Stick 2) + ```bash + roslaunch vino_launch pipeline_object_yolo.launch + ``` +* run object detection sample code input from RealSenseCameraTopic.(connect Intel® Neural Compute Stick 2) + ```bash + roslaunch vino_launch pipeline_object_yolo_topic.launch + ``` + + + + diff --git a/doc/OPEN_SOURCE_CODE_README.md b/doc/OPEN_SOURCE_CODE_README.md index c03a66a8..48e811ce 100644 --- a/doc/OPEN_SOURCE_CODE_README.md +++ b/doc/OPEN_SOURCE_CODE_README.md @@ -79,7 +79,7 @@ This project is a ROS wrapper for CV API of [OpenVINO™](https://software.intel mkdir ~/code && cd ~/code git clone https://github.com/opencv/dldt.git cd dldt/inference-engine/ - git checkout 2018_R4 + git checkout 2019_R3 ./install_dependencies.sh mkdir build && cd build cmake -DCMAKE_BUILD_TYPE=Release .. @@ -92,7 +92,7 @@ This project is a ROS wrapper for CV API of [OpenVINO™](https://software.intel cd ~/code git clone https://github.com/opencv/open_model_zoo.git cd open_model_zoo/demos/ - git checkout 2018_R4 + git checkout 2019_R3 mkdir build && cd build cmake -DCMAKE_BUILD_TYPE=Release /opt/openvino_toolkit/dldt/inference-engine make -j8 @@ -100,9 +100,8 @@ This project is a ROS wrapper for CV API of [OpenVINO™](https://software.intel sudo ln -s ~/code/open_model_zoo /opt/openvino_toolkit/open_model_zoo ``` -- Install Intel® RealSense™ SDK 2.0 [(tag v2.17.1)](https://github.com/IntelRealSense/librealsense/tree/v2.17.1)
- * [Install from source code](https://github.com/IntelRealSense/librealsense/blob/v2.17.1/doc/installation.md)(Recommended)
- * [Install from package](https://github.com/IntelRealSense/librealsense/blob/v2.17.1/doc/distribution_linux.md)
+- Install Intel® RealSense™ SDK 2.0 [(tag v2.30.0)](https://github.com/IntelRealSense/librealsense/tree/v2.30.0) + * [Install from package](https://github.com/IntelRealSense/librealsense/blob/v2.30.0/doc/distribution_linux.md) - Other Dependencies ```bash @@ -150,3 +149,4 @@ This project is a ROS wrapper for CV API of [OpenVINO™](https://software.intel ``` ###### *Any security issue should be reported using process at https://01.org/security* + diff --git a/script/environment_setup.sh b/script/environment_setup.sh old mode 100644 new mode 100755 index 903e7fb0..6d9a8049 --- a/script/environment_setup.sh +++ b/script/environment_setup.sh @@ -68,8 +68,8 @@ if [ "$CLEAN" == "1" ]; then case $answer in Y|y) echo echo "===================Cleaning...====================================" - echo $ROOT_PASSWD | sudo -S rm -rf ~/code - echo $ROOT_PASSWD | sudo -S rm -rf /opt/intel + #echo $ROOT_PASSWD | sudo -S rm -rf ~/code + #echo $ROOT_PASSWD | sudo -S rm -rf /opt/intel echo $ROOT_PASSWD | sudo -S rm -rf /opt/openvino_toolkit if [[ $system_ver = "16.04" && -L "/usr/lib/x86_64-linux-gnu/libboost_python3.so" ]]; then echo $ROOT_PASSWD | sudo -S rm /usr/lib/x86_64-linux-gnu/libboost_python3.so @@ -83,12 +83,14 @@ fi # Setup ROS from Debian if [ "$ROS_DEBIAN" == "1" ]; then echo "===================Installing ROS from Debian Package...=======================" + echo $ROOT_PASSWD | sudo -S apt-get install -y curl + curl http://repo.ros2.org/repos.key | sudo apt-key add - echo $ROOT_PASSWD | sudo -S sh -c 'echo "deb http://packages.ros.org/ros/ubuntu $(lsb_release -sc) main" > /etc/apt/sources.list.d/ros-latest.list' - #echo $ROOT_PASSWD | sudo -S apt-key adv --keyserver hkp://ha.pool.sks-keyservers.net:80 --recv-key 421C365BD9FF1F717815A3895523BAEEB01FA116 - #For those who cannot access hkp protocal - echo $ROOT_PASSWD | curl http://repo.ros2.org/repos.key | sudo apt-key add - + echo $ROOT_PASSWD | sudo -S apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --keyserver-options http-proxy="$http_proxy" --recv-key F42ED6FBAB17C654 + #echo $ROOT_PASSWD | sudo -S apt-key adv --keyserver 'hkp://pgp.mit.edu:80' --recv-key C1CF6E31E6BADE8868B172B4F42ED6FBAB17C654 + #For those who cannot access hkp protocal echo $ROOT_PASSWD | sudo -S apt-get update - echo $ROOT_PASSWD | sudo -S apt-get install -y ros-$ros_ver-desktop-full #For ubuntu16.04 Ros-melodic + echo $ROOT_PASSWD | sudo -S apt-get install -y ros-$ros_ver-desktop-full if [ ! -f "/etc/ros/rosdep/sources.list.d/20-default.list" ]; then echo $ROOT_PASSWD | sudo -S rosdep init @@ -96,19 +98,19 @@ if [ "$ROS_DEBIAN" == "1" ]; then echo "file already exists, skip..." fi - set +o errexit + set +o errexit rosdep update until [ $? == 0 ] do rosdep update done tail ~/.bashrc | grep "/opt/ros/$ros_ver/setup.bash" - set -o errexit + set -o errexit if [ "$?" == "1" ]; then echo "source /opt/ros/$ros_ver/setup.bash" >> ~/.bashrc else - echo "ros melodic already set, skip..." + echo "ros $ros_ver already set, skip..." fi source ~/.bashrc echo $ROOT_PASSWD | sudo -S apt-get install -y python-rosinstall python-rosinstall-generator python-wstool build-essential @@ -210,7 +212,7 @@ if [ "$DLDT" == "1" ]; then mkdir -p ~/code && cd ~/code git clone https://github.com/opencv/dldt.git cd dldt/inference-engine/ - git checkout 2018_R5 + git checkout 2019_R3.1 git submodule init git submodule update --recursive mkdir build && cd build @@ -227,7 +229,7 @@ if [ "$MODEL_ZOO" == "1" ]; then mkdir -p ~/code && cd ~/code git clone https://github.com/opencv/open_model_zoo.git cd open_model_zoo/demos/ - git checkout 2018_R5 + git checkout 2019_R3.1 mkdir build && cd build cmake -DCMAKE_BUILD_TYPE=Release /opt/openvino_toolkit/dldt/inference-engine make -j8 @@ -239,39 +241,35 @@ fi # Setup LIBREALSENSE if [ "$LIBREALSENSE" == "1" ]; then echo "===================Setting Up LibRealSense...=======================" - echo $ROOT_PASSWD | sudo -S apt-get install -y libssl-dev libusb-1.0-0-dev pkg-config libgtk-3-dev - echo $ROOT_PASSWD | sudo -S apt-get install -y libglfw3-dev libgl1-mesa-dev libglu1-mesa-dev - mkdir -p ~/code && cd ~/code - git clone https://github.com/IntelRealSense/librealsense - cd ~/code/librealsense - git checkout v2.17.1 - mkdir build && cd build - cmake ../ -DBUILD_EXAMPLES=true - echo $ROOT_PASSWD | sudo -S make uninstall - make clean - make - echo $ROOT_PASSWD | sudo -S make install - cd .. - echo $ROOT_PASSWD | sudo -S cp config/99-realsense-libusb.rules /etc/udev/rules.d/ - echo $ROOT_PASSWD | sudo -S udevadm control --reload-rules - udevadm trigger + echo "Install server public key for librealsense" + if [ -n "$http_proxy" ]; then + echo $ROOT_PASSWD | sudo -S apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --keyserver-options http-proxy=$http_proxy --recv-key C8B3A55A6F3EFCDE + else + echo $ROOT_PASSWD | sudo -S apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-key C8B3A55A6F3EFCDE + fi + if ! test "$(grep "http://realsense-hw-public.s3.amazonaws.com/Debian/apt-repo" /etc/apt/sources.list)" + then + echo $ROOT_PASSWD | sudo -S add-apt-repository "deb http://realsense-hw-public.s3.amazonaws.com/Debian/apt-repo bionic main" -u + fi + + echo $ROOT_PASSWD | sudo -S apt update && sudo -S apt-get install -y librealsense2-dkms librealsense2-utils librealsense2-dev echo "==== END install librealsense ====" fi # Setup other dependencies if [ "$OTHER_DEPENDENCY" == "1" ]; then echo "===================Setting UP OTHER_DEPENDENCY DEPENDENCY...=======================" - pip3 install numpy - pip3 install networkx - echo $ROOT_PASSWD | sudo -S apt-get install python3-yaml + echo $ROOT_PASSWD | sudo -S apt-get install -y python3-pip + sudo pip3 install numpy + sudo pip3 install networkx if [ $system_ver = "16.04" ]; then echo $ROOT_PASSWD | sudo -S apt-get install -y --no-install-recommends libboost-all-dev cd /usr/lib/x86_64-linux-gnu - sudo ln -s libboost_python-py35.so libboost_python3.so + echo $ROOT_PASSWD | sudo -S ln -sf libboost_python-py35.so libboost_python3.so elif [ $system_ver = "18.04" ]; then echo $ROOT_PASSWD | sudo -S apt-get install -y --no-install-recommends libboost-all-dev - sudo apt install libboost-python1.62.0 + echo $ROOT_PASSWD | sudo -S apt install -y --no-install-recommends libboost-python1.62.0 fi echo "==== END install other dependencies ====" fi diff --git a/script/environment_setup_binary.sh b/script/environment_setup_binary.sh old mode 100644 new mode 100755 index 32d54e48..afaeedb6 --- a/script/environment_setup_binary.sh +++ b/script/environment_setup_binary.sh @@ -79,10 +79,12 @@ fi # Setup ROS from debian if [ "$ROS_DEBIAN" == "1" ]; then echo "===================Installing ROS from Debian Package...=======================" + echo $ROOT_PASSWD | sudo -S apt-get install -y curl + curl http://repo.ros2.org/repos.key | sudo apt-key add - echo $ROOT_PASSWD | sudo -S sh -c 'echo "deb http://packages.ros.org/ros/ubuntu $(lsb_release -sc) main" > /etc/apt/sources.list.d/ros-latest.list' - #echo $ROOT_PASSWD | sudo -S apt-key adv --keyserver hkp://ha.pool.sks-keyservers.net:80 --recv-key 421C365BD9FF1F717815A3895523BAEEB01FA116 - #For those who cannot access hkp protocal - echo $ROOT_PASSWD | curl http://repo.ros2.org/repos.key | sudo apt-key add - + echo $ROOT_PASSWD | sudo -S apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --keyserver-options http-proxy="$http_proxy" --recv-key F42ED6FBAB17C654 + #echo $ROOT_PASSWD | sudo -S apt-key adv --keyserver 'hkp://pgp.mit.edu:80' --recv-key C1CF6E31E6BADE8868B172B4F42ED6FBAB17C654 + #For those who cannot access hkp protocal echo $ROOT_PASSWD | sudo -S apt-get update echo $ROOT_PASSWD | sudo -S apt-get install -y ros-$ros_ver-desktop-full @@ -148,19 +150,19 @@ fi #setup OPENVINO if [ "$OPENVINO" == "1" ]; then cd ~/Downloads - wget -c http://registrationcenter-download.intel.com/akdlm/irc_nas/15078/l_openvino_toolkit_p_2018.5.455.tgz - tar -xvf l_openvino_toolkit_p_2018.5.455.tgz - cd l_openvino_toolkit_p_2018.5.455 - echo $ROOT_PASSWD | sudo -S ./install_cv_sdk_dependencies.sh + wget -c http://registrationcenter-download.intel.com/akdlm/irc_nas/16057/l_openvino_toolkit_p_2019.3.376.tgz + tar -xvf l_openvino_toolkit_p_2019.3.376.tgz + cd l_openvino_toolkit_p_2019.3.376 + echo $ROOT_PASSWD | sudo -S ./install_openvino_dependencies.sh cp $basedir/openvino_silent.cfg . echo $ROOT_PASSWD | sudo -S ./install.sh --silent openvino_silent.cfg set +o errexit - tail ~/.bashrc | grep "computer_vision_sdk/bin/setupvars.sh" + tail ~/.bashrc | grep "openvino/bin/setupvars.sh" set -o errexit if [ "$?" == "1" ]; then - echo "source /opt/intel/computer_vision_sdk/bin/setupvars.sh" >> ~/.bashrc + echo "source /opt/intel/openvino/bin/setupvars.sh" >> ~/.bashrc else echo "openvino already set, skip..." fi @@ -168,7 +170,7 @@ fi #install OPENCL Driver for GPU if [ "$OPENCL" == "1" ]; then - cd /opt/intel/computer_vision_sdk/install_dependencies + cd /opt/intel/openvino/install_dependencies echo $ROOT_PASSWD | sudo -S ./install_NEO_OCL_driver.sh echo "install OPENCL Driver for GPU" fi @@ -176,39 +178,35 @@ fi # Setup LIBREALSENSE if [ "$LIBREALSENSE" == "1" ]; then echo "===================Setting Up LibRealSense...=======================" - echo $ROOT_PASSWD | sudo -S apt-get install -y libssl-dev libusb-1.0-0-dev pkg-config libgtk-3-dev - echo $ROOT_PASSWD | sudo -S apt-get install -y libglfw3-dev libgl1-mesa-dev libglu1-mesa-dev - mkdir -p ~/code && cd ~/code - git clone https://github.com/IntelRealSense/librealsense - cd ~/code/librealsense - git checkout v2.17.1 - mkdir build && cd build - cmake ../ -DBUILD_EXAMPLES=true - echo $ROOT_PASSWD | sudo -S make uninstall - make clean - make - echo $ROOT_PASSWD | sudo -S make install - cd .. - echo $ROOT_PASSWD | sudo -S cp config/99-realsense-libusb.rules /etc/udev/rules.d/ - echo $ROOT_PASSWD | sudo -S udevadm control --reload-rules - udevadm trigger + echo "Install server public key for librealsense" + if [ -n "$http_proxy" ]; then + echo $ROOT_PASSWD | sudo -S apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --keyserver-options http-proxy=$http_proxy --recv-key C8B3A55A6F3EFCDE + else + echo $ROOT_PASSWD | sudo -S apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-key C8B3A55A6F3EFCDE + fi + if ! test "$(grep "http://realsense-hw-public.s3.amazonaws.com/Debian/apt-repo" /etc/apt/sources.list)" + then + echo $ROOT_PASSWD | sudo -S add-apt-repository "deb http://realsense-hw-public.s3.amazonaws.com/Debian/apt-repo bionic main" -u + fi + + echo $ROOT_PASSWD | sudo -S apt update && sudo -S apt-get install -y librealsense2-dkms librealsense2-utils librealsense2-dev echo "==== END install librealsense ====" fi # Setup other dependencies if [ "$OTHER_DEPENDENCY" == "1" ]; then echo "===================Setting UP OTHER_DEPENDENCY DEPENDENCY...=======================" - echo $ROOT_PASSWD | sudo -S apt-get install python3-pip - pip3 install numpy - pip3 install networkx + echo $ROOT_PASSWD | sudo -S apt-get install -y python3-pip + sudo pip3 install numpy + sudo pip3 install networkx if [ $system_ver = "16.04" ]; then echo $ROOT_PASSWD | sudo -S apt-get install -y --no-install-recommends libboost-all-dev cd /usr/lib/x86_64-linux-gnu - sudo ln -s libboost_python-py35.so libboost_python3.so + echo $ROOT_PASSWD | sudo -S ln -sf libboost_python-py35.so libboost_python3.so elif [ $system_ver = "18.04" ]; then echo $ROOT_PASSWD | sudo -S apt-get install -y --no-install-recommends libboost-all-dev - sudo apt install libboost-python1.62.0 + echo $ROOT_PASSWD | sudo -S apt install -y --no-install-recommends libboost-python1.62.0 fi echo "==== END install other dependencies ====" fi From 015dfc658e5aa5338d1c8580619936ff1410d4d7 Mon Sep 17 00:00:00 2001 From: RachelRen05 Date: Fri, 15 Nov 2019 17:32:19 +0800 Subject: [PATCH 22/63] update config files --- ...mage_oss.launch => pipeline_people.launch} | 2 +- vino_launch/param/image_object_server.yaml | 12 +++--- vino_launch/param/image_people_server.yaml | 28 +++++++------ .../param/image_segmentation_server.yaml | 2 +- .../param/pipeline_face_reidentification.yaml | 8 ++-- vino_launch/param/pipeline_image.yaml | 15 +++---- vino_launch/param/pipeline_object.yaml | 12 +++--- .../param/pipeline_object_oss_topic.yaml | 22 ---------- vino_launch/param/pipeline_object_topic.yaml | 7 ++-- vino_launch/param/pipeline_people.yaml | 40 +++++++++++++++++++ .../param/pipeline_reidentification.yaml | 24 ++++------- .../param/pipeline_reidentification_oss.yaml | 27 ------------- vino_launch/param/pipeline_segmentation.yaml | 7 ++-- .../param/pipeline_vehicle_detection.yaml | 15 ++++--- vino_launch/param/pipeline_video.yaml | 16 +++++--- 15 files changed, 113 insertions(+), 124 deletions(-) rename vino_launch/launch/{pipeline_image_oss.launch => pipeline_people.launch} (97%) delete mode 100644 vino_launch/param/pipeline_object_oss_topic.yaml create mode 100644 vino_launch/param/pipeline_people.yaml delete mode 100644 vino_launch/param/pipeline_reidentification_oss.yaml diff --git a/vino_launch/launch/pipeline_image_oss.launch b/vino_launch/launch/pipeline_people.launch similarity index 97% rename from vino_launch/launch/pipeline_image_oss.launch rename to vino_launch/launch/pipeline_people.launch index 04d43e8f..7f7c016e 100644 --- a/vino_launch/launch/pipeline_image_oss.launch +++ b/vino_launch/launch/pipeline_people.launch @@ -1,5 +1,5 @@ - + - model_type: SSD - #model_type: yolov2-voc - engine: GPU + model: /opt/openvino_toolkit/models/object_detection/mobilenet-ssd/caffe/output/FP16/mobilenet-ssd.xml + engine: MYRIAD label: to/be/set/xxx.labels - batch: 16 + batch: 1 + confidence_threshold: 0.5 + enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame outputs: [ImageWindow, RosTopic, RViz] - confidence_threshold: 0.5 connects: - left: RealSenseCamera right: [ObjectDetection] diff --git a/vino_launch/param/pipeline_object_oss_topic.yaml b/vino_launch/param/pipeline_object_oss_topic.yaml deleted file mode 100644 index a29b85d1..00000000 --- a/vino_launch/param/pipeline_object_oss_topic.yaml +++ /dev/null @@ -1,22 +0,0 @@ -Pipelines: -- name: object - inputs: [RealSenseCameraTopic] - infers: - - name: ObjectDetection - model: /opt/openvino_toolkit/open_model_zoo/model_downloader/object_detection/common/mobilenet-ssd/caffe/output/FP32/mobilenet-ssd.xml - engine: CPU - label: to/be/set/xxx.labels - batch: 16 - outputs: [ImageWindow, RosTopic, RViz] - confidence_threshold: 0.5 - connects: - - left: RealSenseCameraTopic - right: [ObjectDetection] - - left: ObjectDetection - right: [ImageWindow] - - left: ObjectDetection - right: [RosTopic] - - left: ObjectDetection - right: [RViz] - -OpenvinoCommon: diff --git a/vino_launch/param/pipeline_object_topic.yaml b/vino_launch/param/pipeline_object_topic.yaml index 8fb74e10..67ec2a90 100644 --- a/vino_launch/param/pipeline_object_topic.yaml +++ b/vino_launch/param/pipeline_object_topic.yaml @@ -3,12 +3,13 @@ Pipelines: inputs: [RealSenseCameraTopic] infers: - name: ObjectDetection - model: /opt/intel/computer_vision_sdk/deployment_tools/model_downloader/object_detection/common/mobilenet-ssd/caffe/output/FP16/mobilenet-ssd.xml + model: /opt/openvino_toolkit/models/object_detection/mobilenet-ssd/caffe/output/FP16/mobilenet-ssd.xml engine: MYRIAD label: to/be/set/xxx.labels - batch: 16 + batch: 1 + confidence_threshold: 0.5 + enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame outputs: [ImageWindow, RosTopic, RViz] - confidence_threshold: 0.5 connects: - left: RealSenseCameraTopic right: [ObjectDetection] diff --git a/vino_launch/param/pipeline_people.yaml b/vino_launch/param/pipeline_people.yaml new file mode 100644 index 00000000..21d09b54 --- /dev/null +++ b/vino_launch/param/pipeline_people.yaml @@ -0,0 +1,40 @@ +Pipelines: +- name: people + inputs: [RealSenseCamera] + infers: + - name: FaceDetection + model: /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.xml + engine: MYRIAD + label: /to/be/set/xxx.labels + batch: 1 + confidence_threshold: 0.5 + enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame + - name: AgeGenderRecognition + model: /opt/openvino_toolkit/models/age-gender-recognition/output/intel/age-gender-recognition-retail-0013/FP32/age-gender-recognition-retail-0013.xml + engine: CPU + label: to/be/set/xxx.labels + batch: 16 + - name: EmotionRecognition + model: /opt/openvino_toolkit/models/emotions-recognition/output/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.xml + engine: CPU + label: /opt/openvino_toolkit/models/emotions-recognition/output/FP32/Retail/object_attributes/emotions_recognition/0003/dldt/emotions-recognition-retail-0003.labels + batch: 16 + - name: HeadPoseEstimation + model: /opt/openvino_toolkit/models/head-pose-estimation/output/intel/head-pose-estimation-adas-0001/FP32/head-pose-estimation-adas-0001.xml + engine: CPU + label: to/be/set/xxx.labels + batch: 16 + outputs: [ImageWindow, RosTopic, RViz] + connects: + - left: RealSenseCamera + right: [FaceDetection] + - left: FaceDetection + right: [AgeGenderRecognition, EmotionRecognition, HeadPoseEstimation, ImageWindow, RosTopic, RViz] + - left: AgeGenderRecognition + right: [ImageWindow, RosTopic, RViz] + - left: EmotionRecognition + right: [ImageWindow, RosTopic, RViz] + - left: HeadPoseEstimation + right: [ImageWindow, RosTopic, RViz] + +Common: diff --git a/vino_launch/param/pipeline_reidentification.yaml b/vino_launch/param/pipeline_reidentification.yaml index 43f652b5..7f3cbe46 100644 --- a/vino_launch/param/pipeline_reidentification.yaml +++ b/vino_launch/param/pipeline_reidentification.yaml @@ -1,37 +1,27 @@ Pipelines: - name: object - inputs: [RealSenseCamera] + inputs: [StandardCamera] infers: - name: ObjectDetection - model: /opt/intel/computer_vision_sdk/deployment_tools/intel_models/person-detection-retail-0013/FP32/person-detection-retail-0013.xml - model_type: SSD + model: /opt/openvino_toolkit/models/person-detection/output/intel/person-detection-retail-0013/FP32/person-detection-retail-0013.xml engine: CPU label: to/be/set/xxx.labels batch: 1 confidence_threshold: 0.5 enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame - name: PersonReidentification - model: /opt/intel/computer_vision_sdk/deployment_tools/intel_models/person-reidentification-retail-0076/FP32/person-reidentification-retail-0076.xml + model: /opt/openvino_toolkit/models/person-reidentification/output/intel/person-reidentification-retail-0076/FP32/person-reidentification-retail-0076.xml engine: CPU label: to/be/set/xxx.labels batch: 1 confidence_threshold: 0.7 - - name: PersonAttribsDetection - model: /opt/intel/computer_vision_sdk_2018.5.455/deployment_tools/model_downloader/Security/object_attributes/pedestrian/person-attributes-recognition-crossroad-0031/dldt/person-attributes-recognition-crossroad-0031.xml - #model: /opt/intel/computer_vision_sdk/deployment_tools/intel_models/person-attributes-recognition-crossroad-0031/FP32/person-attributes-recognition-crossroad-0031.xml - engine: CPU - label: to/be/set/xxx.labels - batch: 1 - confidence_threshold: 0.5 - outputs: [ImageWindow, RosTopic, RViz] + outputs: [ImageWindow, RViz, RosTopic] connects: - - left: RealSenseCamera + - left: StandardCamera right: [ObjectDetection] - left: ObjectDetection - right: [PersonReidentification, PersonAttribsDetection] + right: [PersonReidentification] - left: PersonReidentification - right: [ImageWindow, RosTopic, RViz] - - left: PersonAttribsDetection - right: [ImageWindow, RosTopic, RViz] + right: [ImageWindow, RViz, RosTopic] OpenvinoCommon: diff --git a/vino_launch/param/pipeline_reidentification_oss.yaml b/vino_launch/param/pipeline_reidentification_oss.yaml deleted file mode 100644 index 84219182..00000000 --- a/vino_launch/param/pipeline_reidentification_oss.yaml +++ /dev/null @@ -1,27 +0,0 @@ -Pipelines: -- name: object - inputs: [StandardCamera] - infers: - - name: ObjectDetection - model: /opt/openvino_toolkit/open_model_zoo/model_downloader/Retail/object_detection/pedestrian/rmnet_ssd/0013cd /dldt/person-detection-retail-0013.xml - engine: CPU - label: to/be/set/xxx.labels - batch: 1 - confidence_threshold: 0.5 - enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame - - name: PersonReidentification - model: /opt/openvino_toolkit/open_model_zoo/model_downloader/Retail/object_reidentification/pedestrian/rmnet_based/0076/dldt/person-reidentification-retail-0076.xml - engine: CPU - label: to/be/set/xxx.labels - batch: 1 - confidence_threshold: 0.7 - outputs: [ImageWindow, RosTopic, RViz] - connects: - - left: StandardCamera - right: [ObjectDetection] - - left: ObjectDetection - right: [PersonReidentification] - - left: PersonReidentification - right: [ImageWindow, RosTopic, RViz] - -OpenvinoCommon: diff --git a/vino_launch/param/pipeline_segmentation.yaml b/vino_launch/param/pipeline_segmentation.yaml index e954d8e3..fd4a7851 100644 --- a/vino_launch/param/pipeline_segmentation.yaml +++ b/vino_launch/param/pipeline_segmentation.yaml @@ -3,12 +3,12 @@ Pipelines: inputs: [RealSenseCameraTopic] infers: - name: ObjectSegmentation - model: /opt/models/mask_rcnn_inception_v2_coco_2018_01_28/output/frozen_inference_graph.xml - engine: CPU + model: /opt/openvino_toolkit/models/segmentation/output/FP16/frozen_inference_graph.xml + engine: "HETERO:CPU,GPU,MYRIAD" label: to/be/set/xxx.labels batch: 1 + confidence_threshold: 0.5 outputs: [ImageWindow, RosTopic, RViz] - confidence_threshold: 0.2 connects: - left: RealSenseCameraTopic right: [ObjectSegmentation] @@ -20,4 +20,3 @@ Pipelines: right: [RViz] OpenvinoCommon: - diff --git a/vino_launch/param/pipeline_vehicle_detection.yaml b/vino_launch/param/pipeline_vehicle_detection.yaml index 459a63b7..28732fb4 100644 --- a/vino_launch/param/pipeline_vehicle_detection.yaml +++ b/vino_launch/param/pipeline_vehicle_detection.yaml @@ -1,27 +1,26 @@ Pipelines: - name: object - inputs: [RealSenseCamera] + inputs: [StandardCamera] infers: - name: ObjectDetection - model: /opt/openvino_toolkit/models/vehicle-license-plate-detection/output/Security/object_detection/barrier/0106/dldt/vehicle-license-plate-detection-barrier-0106.xml + model: /opt/openvino_toolkit/models/vehicle-license-plate-detection/output/intel/vehicle-license-plate-detection-barrier-0106/FP32/vehicle-license-plate-detection-barrier-0106.xml engine: CPU - model_type: SSD - label: /opt/openvino_toolkit/models/vehicle-license-plate-detection/output/Security/object_detection/barrier/0106/dldt/vehicle-license-plate-detection-barrier-0106.labels + label: /opt/intel/openvino/deployment_tools/tools/model_downloader/Security/object_detection/barrier/0106/dldt/vehicle-license-plate-detection-barrier-0106.labels batch: 1 enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame - name: VehicleAttribsDetection - model: /opt/openvino_toolkit/models/vehicle-attributes-recongnition/output/Security/object_attributes/vehicle/resnet10_update_1/dldt/vehicle-attributes-recognition-barrier-0039.xml + model: /opt/openvino_toolkit/models/vehicle-attributes-recongnition/output/intel/vehicle-attributes-recognition-barrier-0039/FP32/vehicle-attributes-recognition-barrier-0039.xml engine: CPU label: to/be/set/xxx.labels batch: 1 - name: LicensePlateDetection - model: /opt/openvino_toolkit/models/license-plate-recognition/output/Security/optical_character_recognition/license_plate/dldt/license-plate-recognition-barrier-0001.xml + model: /opt/openvino_toolkit/models/license-plate-recognition/output/intel/license-plate-recognition-barrier-0001/FP32/license-plate-recognition-barrier-0001.xml engine: CPU label: to/be/set/xxx.labels batch: 1 outputs: [ImageWindow, RViz, RosTopic] connects: - - left: RealSenseCamera + - left: StandardCamera right: [ObjectDetection] - left: ObjectDetection right: [{VehicleAttribsDetection: label == vehicle && confidence >= 0.8}, {LicensePlateDetection: label == license && confidence >= 0.8}] @@ -32,4 +31,4 @@ Pipelines: - left: LicensePlateDetection right: [ImageWindow, RosTopic, RViz] -OpenvinoCommon: \ No newline at end of file +OpenvinoCommon: diff --git a/vino_launch/param/pipeline_video.yaml b/vino_launch/param/pipeline_video.yaml index bac3f74d..fa632bbb 100644 --- a/vino_launch/param/pipeline_video.yaml +++ b/vino_launch/param/pipeline_video.yaml @@ -1,20 +1,26 @@ Pipelines: - name: segmentation inputs: [Video] - input_path: /home/intel/Videos/2018-09-26-101439.webm + input_path: /home/iei/Videos/object_segmentation.MP4 infers: - name: ObjectSegmentation - model: /opt/models/mask_rcnn_inception_v2_coco_2018_01_28/output/frozen_inference_graph.xml + model: /opt/openvino_toolkit/models/segmentation/output/FP32/frozen_inference_graph.xml + #model: /opt/models/mask_rcnn_inception_v2_coco_2018_01_28/output/frozen_inference_graph.xml engine: CPU label: to/be/set/xxx.labels batch: 1 + confidence_threshold: 0.5 outputs: [ImageWindow, RosTopic, RViz] - confidence_threshold: 0.2 connects: - left: Video right: [ObjectSegmentation] - left: ObjectSegmentation - right: [ImageWindow, RosTopic, RViz] + right: [ImageWindow] + - left: ObjectSegmentation + right: [RosTopic] + - left: ObjectSegmentation + right: [RViz] -OpenvinoCommon: + +OpenvinoCommon: From 3ce01e99348ac26d1930b3e2daabe91a34a9203e Mon Sep 17 00:00:00 2001 From: luyang00 <393780933@qq.com> Date: Tue, 19 Nov 2019 11:53:56 +0800 Subject: [PATCH 23/63] add return value for object segmentation & slove warning for segmentation --- .../dynamic_vino_lib/inferences/base_inference.h | 2 +- dynamic_vino_lib/src/models/base_model.cpp | 4 ++-- .../src/models/face_detection_model.cpp | 4 ++-- .../src/models/object_segmentation_model.cpp | 13 +++++++------ sample/src/image_object_client.cpp | 4 ++-- 5 files changed, 14 insertions(+), 13 deletions(-) diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/base_inference.h b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/base_inference.h index d67cfa7b..f194aa89 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/base_inference.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/base_inference.h @@ -55,7 +55,7 @@ void matU8ToBlob( T * blob_data = blob->buffer().as(); cv::Mat resized_image(orig_image); - if (width != orig_image.size().width || height != orig_image.size().height) { + if (width != (size_t)orig_image.size().width || height != (size_t)orig_image.size().height) { cv::resize(orig_image, resized_image, cv::Size(width, height)); } int batchOffset = batch_index * width * height * channels; diff --git a/dynamic_vino_lib/src/models/base_model.cpp b/dynamic_vino_lib/src/models/base_model.cpp index 274d5c08..e63ec037 100644 --- a/dynamic_vino_lib/src/models/base_model.cpp +++ b/dynamic_vino_lib/src/models/base_model.cpp @@ -80,14 +80,14 @@ void Models::BaseModel::checkNetworkSize( // check input size slog::info << "Checking input size" << slog::endl; InferenceEngine::InputsDataMap input_info(net_reader->getNetwork().getInputsInfo()); - if (input_info.size() != input_size) { + if (input_info.size() != (size_t)input_size) { throw std::logic_error(getModelName() + " should have " + std::to_string(input_size) + " inpu" "t"); } // check output size slog::info << "Checking output size" << slog::endl; InferenceEngine::OutputsDataMap output_info(net_reader->getNetwork().getOutputsInfo()); - if (output_info.size() != output_size) { + if (output_info.size() != (size_t)output_size) { throw std::logic_error(getModelName() + " should have " + std::to_string(output_size) + " outpu" "t"); } diff --git a/dynamic_vino_lib/src/models/face_detection_model.cpp b/dynamic_vino_lib/src/models/face_detection_model.cpp index caadb0e2..cc219312 100644 --- a/dynamic_vino_lib/src/models/face_detection_model.cpp +++ b/dynamic_vino_lib/src/models/face_detection_model.cpp @@ -79,9 +79,9 @@ void Models::FaceDetectionModel::checkLayerProperty( // class number should be equal to size of label vector // if network has default "background" class, fake is used const size_t num_classes = output_layer->GetParamAsInt("num_classes"); - if (getLabels().size() != num_classes) + if ((size_t)getLabels().size() != num_classes) { - if (getLabels().size() == (num_classes - 1)) + if ((size_t)getLabels().size() == (num_classes - 1)) { getLabels().insert(getLabels().begin(), "fake"); } diff --git a/dynamic_vino_lib/src/models/object_segmentation_model.cpp b/dynamic_vino_lib/src/models/object_segmentation_model.cpp index a08789af..582e7de0 100644 --- a/dynamic_vino_lib/src/models/object_segmentation_model.cpp +++ b/dynamic_vino_lib/src/models/object_segmentation_model.cpp @@ -34,7 +34,7 @@ void Models::ObjectSegmentationModel::checkNetworkSize( { slog::info << "Checking input size" << slog::endl; InferenceEngine::InputsDataMap input_info(net_reader->getNetwork().getInputsInfo()); - if (input_info.size() != input_size) { + if (input_info.size() != (size_t)input_size) { throw std::logic_error(getModelName() + " should have " + std::to_string(input_size) + " inpu" "t, but got " + std::to_string(input_info.size())); } @@ -42,7 +42,7 @@ void Models::ObjectSegmentationModel::checkNetworkSize( // check output size slog::info << "Checking output size" << slog::endl; InferenceEngine::OutputsDataMap output_info(net_reader->getNetwork().getOutputsInfo()); - if (output_info.size() != output_size && output_info.size() != (output_size - 1)) { + if (output_info.size() != (size_t)output_size && output_info.size() != (size_t)(output_size - 1)) { throw std::logic_error(getModelName() + " should have " + std::to_string(output_size) + " outpu" "t, but got " + std::to_string(output_info.size())); } @@ -133,6 +133,7 @@ bool Models::ObjectSegmentationModel::enqueue( data[2] = 1; } } + return true; } bool Models::ObjectSegmentationModel::matToBlob( @@ -150,14 +151,14 @@ bool Models::ObjectSegmentationModel::matToBlob( auto blob_data = input_blob->buffer().as::value_type *>(); cv::Mat resized_image(orig_image); - if(orig_image.size().height != input_height_ || orig_image.size().width != input_width_){ + if((size_t)orig_image.size().height != input_height_ || (size_t)orig_image.size().width != input_width_){ cv::resize(orig_image, resized_image, cv::Size(input_width_, input_height_)); } int batchOffset = batch_index * input_width_ * input_height_ * input_channels_; - for (int c = 0; c < input_channels_; c++) { - for (int h = 0; h < input_height_; h++) { - for (int w = 0; w < input_width_; w++) { + for (unsigned int c = 0; c < input_channels_; c++) { + for (unsigned int h = 0; h < input_height_; h++) { + for (unsigned int w = 0; w < input_width_; w++) { blob_data[batchOffset + c * input_width_ * input_height_ + h * input_width_ + w] = resized_image.at(h, w)[c] * scale_factor; } diff --git a/sample/src/image_object_client.cpp b/sample/src/image_object_client.cpp index 1cdddb9a..46229110 100644 --- a/sample/src/image_object_client.cpp +++ b/sample/src/image_object_client.cpp @@ -79,9 +79,9 @@ int main(int argc, char ** argv) cv::Point left_top = cv::Point(xmin, ymin); cv::Point right_bottom = cv::Point(xmax, ymax); cv::rectangle(image, left_top, right_bottom, cv::Scalar(0, 255, 0), 1, 8, 0); - cv::rectangle(image, cvPoint(xmin, ymin), cvPoint(xmax, ymin + 20), cv::Scalar(0, 255, 0), + cv::rectangle(image, cv::Point(xmin, ymin), cv::Point(xmax, ymin + 20), cv::Scalar(0, 255, 0), -1); - cv::putText(image, ss.str(), cvPoint(xmin + 5, ymin + 20), cv::FONT_HERSHEY_PLAIN, 1, + cv::putText(image, ss.str(), cv::Point(xmin + 5, ymin + 20), cv::FONT_HERSHEY_PLAIN, 1, cv::Scalar(0, 0, 255), 1); } cv::imshow("image_detection", image); From e7dfdd9c1c311968c28842e475aa7cdf6d62a1ac Mon Sep 17 00:00:00 2001 From: luyang00 <393780933@qq.com> Date: Tue, 19 Nov 2019 14:35:29 +0800 Subject: [PATCH 24/63] fix ImageWindowOuput --- .../src/outputs/image_window_output.cpp | 15 +++++++++++---- dynamic_vino_lib/src/pipeline.cpp | 2 +- 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/dynamic_vino_lib/src/outputs/image_window_output.cpp b/dynamic_vino_lib/src/outputs/image_window_output.cpp index f4f0e9cc..49304d1a 100644 --- a/dynamic_vino_lib/src/outputs/image_window_output.cpp +++ b/dynamic_vino_lib/src/outputs/image_window_output.cpp @@ -390,10 +390,17 @@ void Outputs::ImageWindowOutput::decorateFrame() cv::putText(frame_, o.desc, cv::Point2f(o.rect.x, new_y), cv::FONT_HERSHEY_COMPLEX_SMALL, 0.8, o.scalar); cv::rectangle(frame_, o.rect, o.scalar, 1); - cv::line(frame_, o.hp_cp, o.hp_x, cv::Scalar(0, 0, 255), 2); - cv::line(frame_, o.hp_cp, o.hp_y, cv::Scalar(0, 255, 0), 2); - cv::line(frame_, o.hp_zs, o.hp_ze, cv::Scalar(255, 0, 0), 2); - cv::circle(frame_, o.hp_ze, 3, cv::Scalar(255, 0, 0), 2); + if (o.hp_cp != o.hp_x){ + cv::line(frame_, o.hp_cp, o.hp_x, cv::Scalar(0, 0, 255), 2); + } + if (o.hp_cp != o.hp_y) { + cv::line(frame_, o.hp_cp, o.hp_y, cv::Scalar(0, 255, 0), 2); + } + if (o.hp_zs != o.hp_ze) { + cv::line(frame_, o.hp_zs, o.hp_ze, cv::Scalar(255, 0, 0), 2); + cv::circle(frame_, o.hp_ze, 3, cv::Scalar(255, 0, 0), 2); + } + for (int i = 0; i < o.landmarks.size(); i++) { cv::circle(frame_, o.landmarks[i], 3, cv::Scalar(255, 0, 0), 2); diff --git a/dynamic_vino_lib/src/pipeline.cpp b/dynamic_vino_lib/src/pipeline.cpp index 419c9e03..92afb4bc 100644 --- a/dynamic_vino_lib/src/pipeline.cpp +++ b/dynamic_vino_lib/src/pipeline.cpp @@ -248,7 +248,7 @@ void Pipeline::setCallback() void Pipeline::callback(const std::string & detection_name) { - slog::info<<"Hello callback ----> " << detection_name < " << detection_name <fetchResults(); // set output From 3b57afff4ae50482bd0b45f689c0fe50ae3ddf86 Mon Sep 17 00:00:00 2001 From: luyang00 <393780933@qq.com> Date: Tue, 19 Nov 2019 16:59:39 +0800 Subject: [PATCH 25/63] fix service & vehicle detection --- README.md | 4 ++-- .../include/dynamic_vino_lib/inputs/base_input.h | 7 ++++++- .../include/dynamic_vino_lib/inputs/image_input.h | 3 ++- .../dynamic_vino_lib/inputs/realsense_camera.h | 1 - .../dynamic_vino_lib/inputs/realsense_camera_topic.h | 1 - .../include/dynamic_vino_lib/inputs/standard_camera.h | 1 - .../include/dynamic_vino_lib/inputs/video_input.h | 3 ++- .../include/dynamic_vino_lib/outputs/base_output.h | 4 ++-- .../dynamic_vino_lib/outputs/ros_service_output.h | 6 +++--- .../services/frame_processing_server.h | 2 +- .../services/pipeline_processing_server.h | 2 +- dynamic_vino_lib/src/inputs/image_input.cpp | 10 ++++++++-- dynamic_vino_lib/src/inputs/realsense_camera.cpp | 6 +----- .../src/inputs/realsense_camera_topic.cpp | 5 ----- dynamic_vino_lib/src/inputs/standard_camera.cpp | 5 ----- dynamic_vino_lib/src/inputs/video_input.cpp | 10 ++++++++-- dynamic_vino_lib/src/outputs/ros_service_output.cpp | 4 ++-- .../src/services/frame_processing_server.cpp | 11 +++++++---- .../src/services/pipeline_processing_server.cpp | 2 +- sample/src/image_object_client.cpp | 10 +++++----- sample/src/image_object_server.cpp | 2 +- sample/src/image_people_client.cpp | 3 +++ vino_launch/launch/pipeline_vehicle_detection.launch | 4 ++-- vino_launch/param/pipeline_vehicle_detection.yaml | 9 +++++---- 24 files changed, 62 insertions(+), 53 deletions(-) diff --git a/README.md b/README.md index e0056607..e6a40aba 100644 --- a/README.md +++ b/README.md @@ -89,9 +89,9 @@ Currently, the inference feature list is supported: ### Service - Object Detection Service: -```/detect_object``` ([object_msgs::DetectObject](https://github.com/intel/object_msgs/blob/master/srv/DetectObject.srv)) +```/detect_object``` ([object_msgs::DetectObjectSrv](https://github.com/intel/object_msgs/blob/master/srv/DetectObjectSrv.srv)) - Face Detection Service: -```/detect_face``` ([object_msgs::DetectObject](https://github.com/intel/object_msgs/blob/master/srv/DetectObject.srv)) +```/detect_face``` ([object_msgs::DetectObjectSrv](https://github.com/intel/object_msgs/blob/master/srv/DetectObjectSrv.srv)) - Age & Gender Detection Service: ```/detect_age_gender``` ([people_msgs::AgeGender](https://github.com/intel/ros_openvino_toolkit/blob/master/people_msgs/srv/AgeGenderSrv.srv)) - Headpose Detection Service: diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/base_input.h b/dynamic_vino_lib/include/dynamic_vino_lib/inputs/base_input.h index e300fdee..e390bb0b 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/base_input.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inputs/base_input.h @@ -31,6 +31,11 @@ */ namespace Input { +struct Config +{ + std::string path; +}; + class BaseInputDevice : public Ros2Handler { public: @@ -57,7 +62,7 @@ class BaseInputDevice : public Ros2Handler * @return Whether the next frame is successfully read. */ virtual bool read(cv::Mat* frame) = 0; - virtual void config() = 0; //< TODO + virtual void config(const Config &) {} virtual ~BaseInputDevice() = default; /** * @brief Get the width of the frame read from input device. diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/image_input.h b/dynamic_vino_lib/include/dynamic_vino_lib/inputs/image_input.h index afdbd619..a3ba36e6 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/image_input.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inputs/image_input.h @@ -64,7 +64,8 @@ class Image : public BaseInputDevice * @return Whether the next frame is successfully read. */ bool read(cv::Mat* frame) override; - void config() override; + + void config(const Config &) override; private: cv::Mat image_; diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/realsense_camera.h b/dynamic_vino_lib/include/dynamic_vino_lib/inputs/realsense_camera.h index 4ec9456d..b013693b 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/realsense_camera.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inputs/realsense_camera.h @@ -60,7 +60,6 @@ class RealSenseCamera : public BaseInputDevice * @return Whether the next frame is successfully read. */ bool read(cv::Mat* frame) override; - void config() override; private: rs2::config cfg_; diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/realsense_camera_topic.h b/dynamic_vino_lib/include/dynamic_vino_lib/inputs/realsense_camera_topic.h index 8d5fbf9d..77af4dd6 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/realsense_camera_topic.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inputs/realsense_camera_topic.h @@ -52,7 +52,6 @@ class RealSenseCameraTopic : public BaseInputDevice return true; }; bool read(cv::Mat* frame) override; - void config() override; private: ros::NodeHandle nh_; diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/standard_camera.h b/dynamic_vino_lib/include/dynamic_vino_lib/inputs/standard_camera.h index 9a43d6b1..7ba12482 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/standard_camera.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inputs/standard_camera.h @@ -57,7 +57,6 @@ class StandardCamera : public BaseInputDevice * @return Whether the next frame is successfully read. */ bool read(cv::Mat* frame) override; - void config() override; private: cv::VideoCapture cap; diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/video_input.h b/dynamic_vino_lib/include/dynamic_vino_lib/inputs/video_input.h index d1ae6f47..242b1ea7 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/video_input.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inputs/video_input.h @@ -61,7 +61,8 @@ class Video : public BaseInputDevice * @return Whether the next frame is successfully read. */ bool read(cv::Mat* frame) override; - void config() override; + + void config(const Config &) override; private: cv::VideoCapture cap; diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/base_output.h b/dynamic_vino_lib/include/dynamic_vino_lib/outputs/base_output.h index afcafb54..65bd492b 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/base_output.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/outputs/base_output.h @@ -147,9 +147,9 @@ class BaseOutput void setPipeline(Pipeline* const pipeline); virtual void setServiceResponse( - boost::shared_ptr response) {} + boost::shared_ptr response) {} virtual void setServiceResponseForFace( - boost::shared_ptr response) {} + boost::shared_ptr response) {} virtual void setServiceResponse( boost::shared_ptr response) {} virtual void setServiceResponse( diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/ros_service_output.h b/dynamic_vino_lib/include/dynamic_vino_lib/outputs/ros_service_output.h index e1cb027a..4433ce3c 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/ros_service_output.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/outputs/ros_service_output.h @@ -38,7 +38,7 @@ #include #include #include -#include +#include #include #include @@ -70,8 +70,8 @@ class RosServiceOutput : public RosTopicOutput void handleOutput() override {} void clearData(); - void setServiceResponse(boost::shared_ptr response); - void setResponseForFace(boost::shared_ptr response); + void setServiceResponse(boost::shared_ptr response); + void setResponseForFace(boost::shared_ptr response); void setServiceResponse(boost::shared_ptr response); void setServiceResponse(boost::shared_ptr response); void setServiceResponse(boost::shared_ptr response); diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/services/frame_processing_server.h b/dynamic_vino_lib/include/dynamic_vino_lib/services/frame_processing_server.h index fbc4ca64..53e30f1d 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/services/frame_processing_server.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/services/frame_processing_server.h @@ -28,7 +28,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/services/pipeline_processing_server.h b/dynamic_vino_lib/include/dynamic_vino_lib/services/pipeline_processing_server.h index e79eb63a..bd638aa2 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/services/pipeline_processing_server.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/services/pipeline_processing_server.h @@ -28,7 +28,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/dynamic_vino_lib/src/inputs/image_input.cpp b/dynamic_vino_lib/src/inputs/image_input.cpp index c2790f0e..10a69b0b 100644 --- a/dynamic_vino_lib/src/inputs/image_input.cpp +++ b/dynamic_vino_lib/src/inputs/image_input.cpp @@ -20,6 +20,7 @@ */ #include "dynamic_vino_lib/inputs/image_input.h" +#include "dynamic_vino_lib/slog.h" #include // Image @@ -55,7 +56,12 @@ bool Input::Image::read(cv::Mat* frame) return true; } -void Input::Image::config() +void Input::Image::config(const Input::Config & config) { - // TODO(weizhi): config + if (config.path != "") { + file_.assign(config.path); + initialize(); + slog::info << "Image Input device was reinitialized with new file:" << + config.path.c_str() << slog::endl; + } } diff --git a/dynamic_vino_lib/src/inputs/realsense_camera.cpp b/dynamic_vino_lib/src/inputs/realsense_camera.cpp index 1a0cc197..13cd7782 100644 --- a/dynamic_vino_lib/src/inputs/realsense_camera.cpp +++ b/dynamic_vino_lib/src/inputs/realsense_camera.cpp @@ -126,8 +126,4 @@ bool Input::RealSenseCamera::read(cv::Mat* frame) CV_8UC3, (void*)color_frame.get_data(), cv::Mat::AUTO_STEP) .copyTo(*frame); return true; -} -void Input::RealSenseCamera::config() -{ - // TODO(weizhi): config -} +} \ No newline at end of file diff --git a/dynamic_vino_lib/src/inputs/realsense_camera_topic.cpp b/dynamic_vino_lib/src/inputs/realsense_camera_topic.cpp index 900ffa07..5d366882 100644 --- a/dynamic_vino_lib/src/inputs/realsense_camera_topic.cpp +++ b/dynamic_vino_lib/src/inputs/realsense_camera_topic.cpp @@ -66,8 +66,3 @@ bool Input::RealSenseCameraTopic::read(cv::Mat* frame) } return true; } - -void Input::RealSenseCameraTopic::config() -{ - // TODO(weizhi): config -} diff --git a/dynamic_vino_lib/src/inputs/standard_camera.cpp b/dynamic_vino_lib/src/inputs/standard_camera.cpp index 2ed1b864..23a8e38b 100644 --- a/dynamic_vino_lib/src/inputs/standard_camera.cpp +++ b/dynamic_vino_lib/src/inputs/standard_camera.cpp @@ -68,8 +68,3 @@ bool Input::StandardCamera::read(cv::Mat* frame) return cap.retrieve(*frame); } -void Input::StandardCamera::config() -{ - // TODO(weizhi): config -} - diff --git a/dynamic_vino_lib/src/inputs/video_input.cpp b/dynamic_vino_lib/src/inputs/video_input.cpp index b0f6eac3..8a5fdf96 100644 --- a/dynamic_vino_lib/src/inputs/video_input.cpp +++ b/dynamic_vino_lib/src/inputs/video_input.cpp @@ -22,6 +22,7 @@ #include #include "dynamic_vino_lib/inputs/video_input.h" +#include "dynamic_vino_lib/slog.h" // Video Input::Video::Video(const std::string& video) @@ -60,7 +61,12 @@ bool Input::Video::read(cv::Mat* frame) return cap.retrieve(*frame); } -void Input::Video::config() +void Input::Video::config(const Input::Config & config) { - // TODO(weizhi): config + if (config.path != "") { + video_.assign(config.path); + initialize(); + slog::info << "Image Input device was reinitialized with new file:" << + config.path.c_str() << slog::endl; + } } diff --git a/dynamic_vino_lib/src/outputs/ros_service_output.cpp b/dynamic_vino_lib/src/outputs/ros_service_output.cpp index a47ad751..a0f9aad4 100644 --- a/dynamic_vino_lib/src/outputs/ros_service_output.cpp +++ b/dynamic_vino_lib/src/outputs/ros_service_output.cpp @@ -25,7 +25,7 @@ #include void Outputs::RosServiceOutput::setServiceResponse( - boost::shared_ptr response) + boost::shared_ptr response) { if (object_msg_ptr_ != nullptr && object_msg_ptr_->objects_vector.size() > 0) { object_msgs::ObjectsInBoxes objs; @@ -39,7 +39,7 @@ void Outputs::RosServiceOutput::setServiceResponse( } void Outputs::RosServiceOutput::setResponseForFace( - boost::shared_ptr response) + boost::shared_ptr response) { if (faces_msg_ptr_ != nullptr && faces_msg_ptr_->objects_vector.size() > 0) { object_msgs::ObjectsInBoxes objs; diff --git a/dynamic_vino_lib/src/services/frame_processing_server.cpp b/dynamic_vino_lib/src/services/frame_processing_server.cpp index 9434a979..d55405db 100644 --- a/dynamic_vino_lib/src/services/frame_processing_server.cpp +++ b/dynamic_vino_lib/src/services/frame_processing_server.cpp @@ -16,7 +16,7 @@ #include #include #include -#include +#include #include #include #include @@ -42,12 +42,13 @@ FrameProcessingServer::FrameProcessingServer( config_path_(config_path) { nh_=std::make_shared(service_name_); - + initService(); } template void FrameProcessingServer::initService() { + std::cout << "!!!!" << config_path_ << std::endl; Params::ParamManager::getInstance().parse(config_path_); Params::ParamManager::getInstance().print(); @@ -79,7 +80,9 @@ bool FrameProcessingServer::cbService( for (auto it = pipelines_.begin(); it != pipelines_.end(); ++it) { PipelineManager::PipelineData & p = pipelines_[it->second.params.name.c_str()]; auto input = p.pipeline->getInputDevice(); - + Input::Config config; + config.path = event.getRequest().image_path; + input->config(config); p.pipeline->runOnce(); auto output_handle = p.pipeline->getOutputHandle(); @@ -98,7 +101,7 @@ bool FrameProcessingServer::cbService( -template class FrameProcessingServer; +template class FrameProcessingServer; template class FrameProcessingServer; template class FrameProcessingServer; template class FrameProcessingServer; diff --git a/dynamic_vino_lib/src/services/pipeline_processing_server.cpp b/dynamic_vino_lib/src/services/pipeline_processing_server.cpp index 80c378ca..1745e479 100644 --- a/dynamic_vino_lib/src/services/pipeline_processing_server.cpp +++ b/dynamic_vino_lib/src/services/pipeline_processing_server.cpp @@ -16,7 +16,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/sample/src/image_object_client.cpp b/sample/src/image_object_client.cpp index 46229110..5245f3e1 100644 --- a/sample/src/image_object_client.cpp +++ b/sample/src/image_object_client.cpp @@ -24,7 +24,7 @@ #include #include "opencv2/opencv.hpp" -#include +#include @@ -42,10 +42,10 @@ int main(int argc, char ** argv) } std::string image_path = argv[1]; - ros::ServiceClient client = n.serviceClient("/openvino_toolkit/service"); - - - object_msgs::DetectObject srv; + ros::ServiceClient client = n.serviceClient("/openvino_toolkit/service"); + + object_msgs::DetectObjectSrv srv; + srv.request.image_path = image_path; if (client.call(srv)) { diff --git a/sample/src/image_object_server.cpp b/sample/src/image_object_server.cpp index f4de452f..2469eb1a 100644 --- a/sample/src/image_object_server.cpp +++ b/sample/src/image_object_server.cpp @@ -87,7 +87,7 @@ int main(int argc, char** argv) auto node = std::make_shared>(service_name, FLAGS_config); + >(service_name, FLAGS_config); slog::info << "Waiting for service request..." << slog::endl; ros::spin(); diff --git a/sample/src/image_people_client.cpp b/sample/src/image_people_client.cpp index 5dfb1b60..ecb25636 100644 --- a/sample/src/image_people_client.cpp +++ b/sample/src/image_people_client.cpp @@ -41,7 +41,10 @@ int main(int argc, char ** argv) ros::ServiceClient client = n.serviceClient("/openvino_toolkit/service"); + std::string image_path = argv[1]; + people_msgs::PeopleSrv srv; + srv.request.image_path = image_path; if (client.call(srv)) { diff --git a/vino_launch/launch/pipeline_vehicle_detection.launch b/vino_launch/launch/pipeline_vehicle_detection.launch index c5127078..7011a7db 100644 --- a/vino_launch/launch/pipeline_vehicle_detection.launch +++ b/vino_launch/launch/pipeline_vehicle_detection.launch @@ -7,7 +7,7 @@ - + diff --git a/vino_launch/param/pipeline_vehicle_detection.yaml b/vino_launch/param/pipeline_vehicle_detection.yaml index 28732fb4..81c80193 100644 --- a/vino_launch/param/pipeline_vehicle_detection.yaml +++ b/vino_launch/param/pipeline_vehicle_detection.yaml @@ -1,11 +1,12 @@ Pipelines: - name: object - inputs: [StandardCamera] + inputs: [RealSenseCamera] infers: - name: ObjectDetection model: /opt/openvino_toolkit/models/vehicle-license-plate-detection/output/intel/vehicle-license-plate-detection-barrier-0106/FP32/vehicle-license-plate-detection-barrier-0106.xml engine: CPU - label: /opt/intel/openvino/deployment_tools/tools/model_downloader/Security/object_detection/barrier/0106/dldt/vehicle-license-plate-detection-barrier-0106.labels + model_type: SSD + label: /opt/openvino_toolkit/models/vehicle-license-plate-detection/output/Security/object_detection/barrier/0106/dldt/vehicle-license-plate-detection-barrier-0106.labels batch: 1 enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame - name: VehicleAttribsDetection @@ -20,7 +21,7 @@ Pipelines: batch: 1 outputs: [ImageWindow, RViz, RosTopic] connects: - - left: StandardCamera + - left: RealSenseCamera right: [ObjectDetection] - left: ObjectDetection right: [{VehicleAttribsDetection: label == vehicle && confidence >= 0.8}, {LicensePlateDetection: label == license && confidence >= 0.8}] @@ -31,4 +32,4 @@ Pipelines: - left: LicensePlateDetection right: [ImageWindow, RosTopic, RViz] -OpenvinoCommon: +OpenvinoCommon: \ No newline at end of file From ba16b57e751e0ed6ebfa9b5c24864c5a0f2101cd Mon Sep 17 00:00:00 2001 From: congliu0913 Date: Wed, 30 Dec 2020 14:41:58 +0800 Subject: [PATCH 26/63] enable github action --- .github/workflows/dev-ov_2020-3.yml | 72 +++++++++++++++++++++++++++++ 1 file changed, 72 insertions(+) create mode 100644 .github/workflows/dev-ov_2020-3.yml diff --git a/.github/workflows/dev-ov_2020-3.yml b/.github/workflows/dev-ov_2020-3.yml new file mode 100644 index 00000000..a8fbd5f6 --- /dev/null +++ b/.github/workflows/dev-ov_2020-3.yml @@ -0,0 +1,72 @@ +name: dev-ov.2020.3-CI + +# Controls when the action will run. Triggers the workflow on push or pull request +# events but only for the dev-ov2020.3 branch +on: + push: + branches: [ dev-ov2020.3 ] + pull_request: + branches: [ dev-ov2020.3 ] + +# A workflow run is made up of one or more jobs that can run sequentially or in parallel +jobs: + # This workflow contains a single job called "build" + build: + # The type of runner that the job will run on + runs-on: ubuntu-18.04 + + # Steps represent a sequence of tasks that will be executed as part of the job + steps: + # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it + - uses: actions/checkout@v2 + + # install ros melodic + - uses: ros-tooling/setup-ros@0.1.1 + with: + required-ros-distributions: melodic + - run: "source /opt/ros/melodic/setup.bash" + + # install openvino 2020.3 + - name: install openvino 2020.3 + run: | + sudo apt update && sudo apt install curl gnupg2 lsb-release + curl -s https://apt.repos.intel.com/openvino/2020/GPG-PUB-KEY-INTEL-OPENVINO-2020 |sudo apt-key add - + echo "deb https://apt.repos.intel.com/openvino/2020 all main" | sudo tee /etc/apt/sources.list.d/intel-openvino-2020.list + sudo apt update + sudo apt-cache search openvino + sudo apt-get install -y \ + intel-openvino-runtime-ubuntu18-2020.3.341 \ + intel-openvino-ie-samples-2020.3.341 \ + intel-openvino-omz-dev-2020.3.341 \ + intel-openvino-omz-tools-2020.3.341 \ + intel-openvino-gstreamer-rt-ubuntu-bionic-2020.3.341 \ + intel-openvino-gva-dev-ubuntu-bionic-2020.3.341 \ + intel-openvino-gva-rt-ubuntu-bionic-2020.3.341 \ + intel-openvino-ie-bin-python-tools-ubuntu-bionic-2020.3.341 \ + intel-openvino-ie-rt-core-ubuntu-bionic-2020.3.341 \ + intel-openvino-ie-rt-cpu-ubuntu-bionic-2020.3.341 \ + intel-openvino-ie-rt-gna-ubuntu-bionic-2020.3.341 \ + intel-openvino-ie-rt-gpu-ubuntu-bionic-2020.3.341 \ + intel-openvino-ie-rt-hddl-ubuntu-bionic-2020.3.341 \ + intel-openvino-ie-rt-vpu-ubuntu-bionic-2020.3.341 \ + intel-openvino-ie-sdk-ubuntu-bionic-2020.3.341 \ + intel-openvino-opencv-lib-ubuntu-bionic-2020.3.341 + sudo apt-get install -y libgflags-dev + ls -lh /opt/intel/openvino + source /opt/intel/openvino/bin/setupvars.sh + + # build ros openvino toolkit + - name: build ros openvino toolkit + run: | + mkdir -p ~/catkin_ws/src + cp -rf ${GITHUB_WORKSPACE} ~/catkin_ws/src + cd ~/catkin_ws/src + git clone https://github.com/intel/object_msgs.git + cd ~/catkin_ws/ + source /opt/ros/melodic/setup.bash + source /opt/intel/openvino/bin/setupvars.sh + export CPU_EXTENSION_LIB+=/opt/intel/openvino_2020.3.341/deployment_tools/inference_engine/lib/intel64/libinference_engine.so + export GFLAGS_LIB+=/usr/lib/x86_64-linux-gnu/libgflags_nothreads.a + env + catkin_make + From adf10adbf3c3fca49244c766bc270a5a601a1f9a Mon Sep 17 00:00:00 2001 From: lewisliu Date: Wed, 30 Dec 2020 15:06:28 +0800 Subject: [PATCH 27/63] update inputs headers to align with ROS2 code and openvino.2020.3 Signed-off-by: lewisliu --- .../dynamic_vino_lib/inputs/base_input.h | 33 ++++--------------- .../dynamic_vino_lib/inputs/image_input.h | 11 +------ .../inputs/realsense_camera.h | 12 ++----- .../inputs/realsense_camera_topic.h | 25 ++------------ .../dynamic_vino_lib/inputs/standard_camera.h | 17 +++++----- .../dynamic_vino_lib/inputs/video_input.h | 11 ------- 6 files changed, 23 insertions(+), 86 deletions(-) diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/base_input.h b/dynamic_vino_lib/include/dynamic_vino_lib/inputs/base_input.h index e390bb0b..527d3ad2 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/base_input.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inputs/base_input.h @@ -46,12 +46,6 @@ class BaseInputDevice : public Ros2Handler * @return Whether the input device is successfully turned on. */ virtual bool initialize() = 0; - /** - * @brief (Only work for standard camera) - * Initialize camera by its index when multiple standard camera is connected. - * @return Whether the input device is successfully turned on. - */ - virtual bool initialize(int) = 0; /** * @brief Initialize the input device with given width and height. * @return Whether the input device is successfully turned on. @@ -61,7 +55,11 @@ class BaseInputDevice : public Ros2Handler * @brief Read next frame, and give the value to argument frame. * @return Whether the next frame is successfully read. */ - virtual bool read(cv::Mat* frame) = 0; + virtual bool read(cv::Mat * frame) = 0; + virtual bool readService(cv::Mat * frame, std::string config_path) + { + return true; + } virtual void config(const Config &) {} virtual ~BaseInputDevice() = default; /** @@ -112,28 +110,11 @@ class BaseInputDevice : public Ros2Handler { is_init_ = is_init; } - /** - * @brief Set the frame_id of input device for ROSTopic outputs. - * @param[in] frame_id The frame_id of input device. - */ - inline void setFrameID(std::string frame_id) - { - frame_id_ = frame_id; - } - /** - * @brief Get the frame_id of input device. - * @return Frame_id of input device. - */ - inline std::string getFrameID() - { - return frame_id_; - } private: - size_t width_ = 0; - size_t height_ = 0; + size_t width_ = 0; // 0 means using the original size + size_t height_ = 0; // 0 means using the original size bool is_init_ = false; - std::string frame_id_; }; } // namespace Input #endif // DYNAMIC_VINO_LIB_INPUTS_BASE_INPUT_H diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/image_input.h b/dynamic_vino_lib/include/dynamic_vino_lib/inputs/image_input.h index a3ba36e6..03558a9e 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/image_input.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inputs/image_input.h @@ -41,15 +41,6 @@ class Image : public BaseInputDevice * @return Whether the input device is successfully turned on. */ bool initialize() override; - /** - * @brief (Only work for standard camera) - * No implementation for Image class. - * @return Whether the input device is successfully turned on. - */ - bool initialize(int t) override - { - return initialize(); - }; /** * @brief Initialize the input device with given width and height. * No implementation for Image class. @@ -58,7 +49,7 @@ class Image : public BaseInputDevice bool initialize(size_t width, size_t height) override { return initialize(); - }; + } /** * @brief Read next frame, and give the value to argument frame. * @return Whether the next frame is successfully read. diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/realsense_camera.h b/dynamic_vino_lib/include/dynamic_vino_lib/inputs/realsense_camera.h index b013693b..b2391543 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/realsense_camera.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inputs/realsense_camera.h @@ -41,15 +41,6 @@ class RealSenseCamera : public BaseInputDevice * @return Whether the input device is successfully turned on. */ bool initialize() override; - /** - * @brief (Only work for standard camera) - * Initialize camera by its index when multiple standard camera is connected. - * @return Whether the input device is successfully turned on. - */ - bool initialize(int t) override - { - return true; - }; /** * @brief Initialize the input device with given width and height. * @return Whether the input device is successfully turned on. @@ -62,6 +53,9 @@ class RealSenseCamera : public BaseInputDevice bool read(cv::Mat* frame) override; private: + void bypassFewFramesOnceInited(); + std::string getCameraSN(); + rs2::config cfg_; rs2::pipeline pipe_; bool first_read_ = true; diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/realsense_camera_topic.h b/dynamic_vino_lib/include/dynamic_vino_lib/inputs/realsense_camera_topic.h index 77af4dd6..e5d8ad0d 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/realsense_camera_topic.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inputs/realsense_camera_topic.h @@ -35,32 +35,13 @@ namespace Input { /** + * DEPRECATED! + * Using the new class ImageTopic to handle all image topics. * @class RealSenseCameraTopic * @brief Class for recieving a realsense camera topic as input. */ -class RealSenseCameraTopic : public BaseInputDevice -{ - public: - RealSenseCameraTopic(); - bool initialize() override; - bool initialize(int t) override - { - return true; - }; - bool initialize(size_t width, size_t height) override - { - return true; - }; - bool read(cv::Mat* frame) override; - - private: - ros::NodeHandle nh_; - image_transport::Subscriber sub_; - cv::Mat image; - cv::Mat last_image; +typedef ImageTopic RealSenseCameraTopic; - void cb(const sensor_msgs::ImageConstPtr& image_msg); -}; } // namespace Input #endif // DYNAMIC_VINO_LIB_INPUTS_REALSENSE_CAMERA_TOPIC_H_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/standard_camera.h b/dynamic_vino_lib/include/dynamic_vino_lib/inputs/standard_camera.h index 7ba12482..5c69eb5e 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/standard_camera.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inputs/standard_camera.h @@ -23,7 +23,13 @@ #define DYNAMIC_VINO_LIB_INPUTS_STANDARD_CAMERA_H #include -#include "dynamic_vino_lib/inputs/base_input.h" + +#include "dynamic_vino_lib/inputs/base_input.hpp" +#include +#include +#include +#include +#include namespace Input { @@ -41,12 +47,6 @@ class StandardCamera : public BaseInputDevice * @return Whether the input device is successfully turned on. */ bool initialize() override; - /** - * @brief (Only work for standard camera) - * Initialize camera by its index when multiple standard camera is connected. - * @return Whether the input device is successfully turned on. - */ - bool initialize(int t) override; /** * @brief Initialize the input device with given width and height. * @return Whether the input device is successfully turned on. @@ -59,8 +59,9 @@ class StandardCamera : public BaseInputDevice bool read(cv::Mat* frame) override; private: + int getCameraId(); cv::VideoCapture cap; - static int camera_count_; + int camera_id_ = -1; }; } // namespace Input #endif // DYNAMIC_VINO_LIB_INPUTS_STANDARD_CAMERA_H diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/video_input.h b/dynamic_vino_lib/include/dynamic_vino_lib/inputs/video_input.h index 242b1ea7..e70337a3 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/video_input.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inputs/video_input.h @@ -41,15 +41,6 @@ class Video : public BaseInputDevice * @return Whether the input device is successfully turned on. */ bool initialize() override; - /** - * @brief (Only work for standard camera) - * No implementation for Video class. - * @return Whether the input device is successfully turned on. - */ - bool initialize(int t) override - { - return initialize(); - }; /** * @brief Initialize the input device with given width and height. * No implementation for Video class. @@ -61,8 +52,6 @@ class Video : public BaseInputDevice * @return Whether the next frame is successfully read. */ bool read(cv::Mat* frame) override; - - void config(const Config &) override; private: cv::VideoCapture cap; From ea0ccec5cfd42f52ace3070a20a7735e5486618f Mon Sep 17 00:00:00 2001 From: lewisliu Date: Wed, 30 Dec 2020 15:07:48 +0800 Subject: [PATCH 28/63] update enginers headers to align with ROS2 and ov.2020.3 Signed-off-by: lewisliu --- .../include/dynamic_vino_lib/engines/engine.h | 8 ++++++-- .../include/dynamic_vino_lib/engines/engine_manager.h | 7 ++++--- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/engines/engine.h b/dynamic_vino_lib/include/dynamic_vino_lib/engines/engine.h index 66a82361..0faff891 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/engines/engine.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/engines/engine.h @@ -35,12 +35,16 @@ namespace Engines { class Engine { - public: +public: +#if(defined(USE_OLD_E_PLUGIN_API)) /** + * DEPRECATED! instead of using Engine(InferenceEngine::InferRequest::Ptr &) * @brief Create an NetworkEngine instance * from a inference plugin and an inference network. */ Engine(InferenceEngine::InferencePlugin, Models::BaseModel::Ptr); +#endif + /** * @brief Using an Inference Request to initialize the inference Engine. */ @@ -65,7 +69,7 @@ class Engine } private: - InferenceEngine::InferRequest::Ptr request_; + InferenceEngine::InferRequest::Ptr request_ = nullptr; }; } // namespace Engines diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/engines/engine_manager.h b/dynamic_vino_lib/include/dynamic_vino_lib/engines/engine_manager.h index fb923db1..6d1cb9b7 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/engines/engine_manager.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/engines/engine_manager.h @@ -14,7 +14,7 @@ /** * @brief A header file with declaration for NetworkEngine class - * @file engine.h + * @file engine_manager.h */ #ifndef DYNAMIC_VINO_LIB__ENGINES__ENGINE_MANAGER_HPP_ #define DYNAMIC_VINO_LIB__ENGINES__ENGINE_MANAGER_HPP_ @@ -42,6 +42,7 @@ class EngineManager const std::string &, const std::shared_ptr &); private: +#if(defined(USE_OLD_E_PLUGIN_API)) std::map plugins_for_devices_; std::unique_ptr @@ -51,11 +52,11 @@ class EngineManager std::shared_ptr createEngine_beforeV2019R2( const std::string &, const std::shared_ptr &); +#endif -#if(defined(USE_IE_CORE)) std::shared_ptr createEngine_V2019R2_plus( const std::string &, const std::shared_ptr &); -#endif + }; } // namespace Engines From 9e9434703708132481b6bda906e1994ce9bf0a4b Mon Sep 17 00:00:00 2001 From: lewisliu Date: Wed, 30 Dec 2020 15:16:08 +0800 Subject: [PATCH 29/63] update engine code to align with ROS2 and ov2020.3 Signed-off-by: lewisliu --- dynamic_vino_lib/src/engines/engine.cpp | 7 ++-- .../src/engines/engine_manager.cpp | 35 ++++++++++--------- 2 files changed, 24 insertions(+), 18 deletions(-) diff --git a/dynamic_vino_lib/src/engines/engine.cpp b/dynamic_vino_lib/src/engines/engine.cpp index 262953ee..2f1eaf29 100644 --- a/dynamic_vino_lib/src/engines/engine.cpp +++ b/dynamic_vino_lib/src/engines/engine.cpp @@ -20,12 +20,15 @@ */ #include "dynamic_vino_lib/engines/engine.h" -Engines::Engine::Engine(InferenceEngine::InferencePlugin plg, - const Models::BaseModel::Ptr base_model) +#if(defined(USE_OLD_E_PLUGIN_API)) +Engines::Engine::Engine( + InferenceEngine::InferencePlugin plg, + const Models::BaseModel::Ptr base_model) { request_ = (plg.LoadNetwork(base_model->getNetReader()->getNetwork(), {})) .CreateInferRequestPtr(); } +#endif Engines::Engine::Engine( InferenceEngine::InferRequest::Ptr & request) diff --git a/dynamic_vino_lib/src/engines/engine_manager.cpp b/dynamic_vino_lib/src/engines/engine_manager.cpp index 891bc7af..27b6ce9c 100644 --- a/dynamic_vino_lib/src/engines/engine_manager.cpp +++ b/dynamic_vino_lib/src/engines/engine_manager.cpp @@ -23,18 +23,31 @@ #include "dynamic_vino_lib/utils/version_info.hpp" #include #include +#if(defined(USE_OLD_E_PLUGIN_API)) #include +#endif std::shared_ptr Engines::EngineManager::createEngine( const std::string & device, const std::shared_ptr & model) { -#if(defined(USE_IE_CORE)) - return createEngine_V2019R2_plus(device, model); -#else +#if(defined(USE_OLD_E_PLUGIN_API)) return createEngine_beforeV2019R2(device, model); +#else + return createEngine_V2019R2_plus(device, model); #endif } +std::shared_ptr Engines::EngineManager::createEngine_V2019R2_plus( + const std::string & device, const std::shared_ptr & model) +{ + InferenceEngine::Core core; + auto executable_network = core.LoadNetwork(model->getNetReader()->getNetwork(), device); + auto request = executable_network.CreateInferRequestPtr(); + + return std::make_shared(request); +} + +#if(defined(USE_OLD_E_PLUGIN_API)) std::shared_ptr Engines::EngineManager::createEngine_beforeV2019R2( const std::string & device, const std::shared_ptr & model) { @@ -52,18 +65,6 @@ std::shared_ptr Engines::EngineManager::createEngine_beforeV201 return std::make_shared(request); } -#if(defined(USE_IE_CORE)) -std::shared_ptr Engines::EngineManager::createEngine_V2019R2_plus( - const std::string & device, const std::shared_ptr & model) -{ - InferenceEngine::Core core; - auto executable_network = core.LoadNetwork(model->getNetReader()->getNetwork(), device); - auto request = executable_network.CreateInferRequestPtr(); - - return std::make_shared(request); -} -#endif - std::unique_ptr Engines::EngineManager::makePluginByName( const std::string & device_name, const std::string & custom_cpu_library_message, @@ -103,5 +104,7 @@ Engines::EngineManager::makePluginByName( InferenceEngine::PluginConfigParams::YES}}); } - return std::unique_ptr (new InferenceEngine::InferencePlugin(plugin)); + return std::make_unique( + InferenceEngine::InferenceEnginePluginPtr(plugin)); } +#endif From fcfc641781119989cee197f3cd18f15d7976ddf4 Mon Sep 17 00:00:00 2001 From: lewisliu Date: Thu, 31 Dec 2020 08:22:53 +0800 Subject: [PATCH 30/63] update inputs code to align with ROS2 and ov2020.3 Signed-off-by: lewisliu --- dynamic_vino_lib/CMakeLists.txt | 2 +- dynamic_vino_lib/src/inputs/image_input.cpp | 9 +- dynamic_vino_lib/src/inputs/image_topic.cpp | 83 +++++++++++ .../src/inputs/realsense_camera.cpp | 139 +++++++----------- .../src/inputs/realsense_camera_topic.cpp | 68 --------- .../src/inputs/standard_camera.cpp | 61 ++++---- dynamic_vino_lib/src/inputs/video_input.cpp | 17 +-- 7 files changed, 179 insertions(+), 200 deletions(-) create mode 100644 dynamic_vino_lib/src/inputs/image_topic.cpp delete mode 100644 dynamic_vino_lib/src/inputs/realsense_camera_topic.cpp diff --git a/dynamic_vino_lib/CMakeLists.txt b/dynamic_vino_lib/CMakeLists.txt index 26969100..18324837 100644 --- a/dynamic_vino_lib/CMakeLists.txt +++ b/dynamic_vino_lib/CMakeLists.txt @@ -165,7 +165,7 @@ add_library(${PROJECT_NAME} SHARED src/inferences/vehicle_attribs_detection.cpp src/inferences/license_plate_detection.cpp src/inputs/realsense_camera.cpp - src/inputs/realsense_camera_topic.cpp + src/inputs/image_topic.cpp src/inputs/standard_camera.cpp src/inputs/video_input.cpp src/inputs/image_input.cpp diff --git a/dynamic_vino_lib/src/inputs/image_input.cpp b/dynamic_vino_lib/src/inputs/image_input.cpp index 10a69b0b..72bc8c38 100644 --- a/dynamic_vino_lib/src/inputs/image_input.cpp +++ b/dynamic_vino_lib/src/inputs/image_input.cpp @@ -31,16 +31,12 @@ Input::Image::Image(const std::string& file) bool Input::Image::initialize() { - setFrameID("image_frame"); image_ = cv::imread(file_); - if (image_.data != NULL) - { + if (image_.data != NULL) { setInitStatus(true); setWidth((size_t)image_.cols); setHeight((size_t)image_.rows); - } - else - { + } else { setInitStatus(false); } return isInit(); @@ -53,6 +49,7 @@ bool Input::Image::read(cv::Mat* frame) return false; } *frame = image_; + setHeader("image_frame"); return true; } diff --git a/dynamic_vino_lib/src/inputs/image_topic.cpp b/dynamic_vino_lib/src/inputs/image_topic.cpp new file mode 100644 index 00000000..f1fddf68 --- /dev/null +++ b/dynamic_vino_lib/src/inputs/image_topic.cpp @@ -0,0 +1,83 @@ +/* + * Copyright (c) 2018 Intel Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @brief a header file with declaration of ImageTopic class + * @file image_topic.cpp + */ + +#include "dynamic_vino_lib/inputs/realsense_camera_topic.h" +//#include +#include "dynamic_vino_lib/slog.h" +#include +#include + +#define INPUT_TOPIC "/camera/color/image_raw" + + +Input::ImageTopic::ImageTopic(rclcpp::Node::SharedPtr node) +: node_(node) +{ +} + +bool Input::ImageTopic::initialize() +{ + slog::debug << "before Image Topic init" << slog::endl; + + if(node_ == nullptr){ + throw std::runtime_error("Image Topic is not instancialized because of no parent node."); + return false; + } + auto qos = rclcpp::QoS(rclcpp::KeepLast(1)).best_effort(); + sub_ = node_->create_subscription( + INPUT_TOPIC, qos, + std::bind(&ImageTopic::cb, this, std::placeholders::_1)); + + return true; +} + + bool Input::ImageTopic::initialize(size_t width, size_t height) + { + slog::warn << "BE CAREFUL: nothing for resolution is done when calling initialize(width, height)" + << " for Image Topic" << slog::endl; + return initialize(); + } + +void Input::ImageTopic::cb(const sensor_msgs::msg::Image::SharedPtr image_msg) +{ + slog::debug << "Receiving a new image from Camera topic." << slog::endl; + setHeader(image_msg->header); + + image_ = cv_bridge::toCvCopy(image_msg, "bgr8")->image; + //Suppose Image Topic is sent within BGR order, so the below line would work. + //image_ = cv::Mat(image_msg->height, image_msg->width, CV_8UC3, + // const_cast(&image_msg->data[0]), image_msg->step); + + image_count_.increaseCounter(); +} + +bool Input::ImageTopic::read(cv::Mat * frame) +{ + if (image_count_.get() < 0 || image_.empty()) { + slog::debug << "No data received in CameraTopic instance" << slog::endl; + return false; + } + + *frame = image_; + lockHeader(); + image_count_.decreaseCounter(); + return true; +} diff --git a/dynamic_vino_lib/src/inputs/realsense_camera.cpp b/dynamic_vino_lib/src/inputs/realsense_camera.cpp index 13cd7782..1685da67 100644 --- a/dynamic_vino_lib/src/inputs/realsense_camera.cpp +++ b/dynamic_vino_lib/src/inputs/realsense_camera.cpp @@ -24,106 +24,79 @@ // RealSenseCamera bool Input::RealSenseCamera::initialize() { - static int rscamera_count = 0; - // Get all devices connected - rs2::context cxt; - auto device = cxt.query_devices(); - size_t device_count = device.size(); - slog::info << "Find RealSense num:"<< device_count << slog::endl; - auto hardware = device[rscamera_count]; - auto devSerialNumber = hardware.get_info(RS2_CAMERA_INFO_SERIAL_NUMBER); - //std::cout << "Camera " << rscamera_count << ": " << hardware.get_info(RS2_CAMERA_INFO_NAME) << std::endl; - slog::info << "RealSense Serial number : " << devSerialNumber << slog::endl; - cfg_.enable_device(devSerialNumber); - cfg_.enable_stream(RS2_STREAM_COLOR, 640, 480, RS2_FORMAT_BGR8, 30); - setInitStatus(pipe_.start(cfg_)); - setWidth(640); - setHeight(480); - if (!isInit()) - { - return false; - } - if (first_read_) - { - rs2::frameset frames; - for (int i = 0; i < 30; i++) - { - // Wait for all configured streams to produce a frame - try - { - frames = pipe_.wait_for_frames(); - } - catch (...) - { - return false; - } - } - first_read_ = false; - } - rscamera_count ++; - - return true; + return initialize(640, 480); } bool Input::RealSenseCamera::initialize(size_t width, size_t height) { - static int rscamera_count = 0; - if (3 * width != 4 * height) - { - slog::err << "The aspect ratio must be 4:3 when using RealSense camera" - << slog::endl; + if (3 * width != 4 * height) { + slog::err << "The aspect ratio must be 4:3 when using RealSense camera" << slog::endl; + throw std::runtime_error("The aspect ratio must be 4:3 when using RealSense camera!"); return false; } - cfg_.enable_stream(RS2_STREAM_COLOR, static_cast(width), - static_cast(height), RS2_FORMAT_BGR8, 30); + + auto devSerialNumber = getCameraSN(); + slog::info << "RealSense Serial number : " << devSerialNumber << slog::endl; + + cfg_.enable_device(devSerialNumber); + cfg_.enable_stream(RS2_STREAM_COLOR, static_cast(width), static_cast(height), + RS2_FORMAT_BGR8, 30); + setInitStatus(pipe_.start(cfg_)); setWidth(width); setHeight(height); - if (!isInit()) - { - return false; - } - if (first_read_) - { - rs2::frameset frames; - for (int i = 0; i < 30; i++) - { - // Wait for all configured streams to produce a frame - try - { - frames = pipe_.wait_for_frames(); - } - catch (...) - { - return false; - } - } - first_read_ = false; - } - rscamera_count ++; - return true; + //bypass RealSense's bug: several captured frames after HW is inited are with wrong data. + bypassFewFramesOnceInited(); + + return isInit(); } bool Input::RealSenseCamera::read(cv::Mat* frame) { - if (!isInit()) - { + if (!isInit()) { return false; } - rs2::frameset data = - pipe_.wait_for_frames(); // Wait for next set of frames from the camera - rs2::frame color_frame; - try - { + + try { + rs2::frameset data = pipe_.wait_for_frames(); // Wait for next set of frames from the camera + rs2::frame color_frame; color_frame = data.get_color_frame(); - } - catch (...) - { + + cv::Mat(cv::Size(static_cast(getWidth()), static_cast(getHeight())), CV_8UC3, + const_cast(color_frame.get_data()), cv::Mat::AUTO_STEP) + .copyTo(*frame); + } catch (...) { return false; } - cv::Mat(cv::Size(static_cast(getWidth()), static_cast(getHeight())), - CV_8UC3, (void*)color_frame.get_data(), cv::Mat::AUTO_STEP) - .copyTo(*frame); + + setHeader("realsense_camera_frame"); return true; -} \ No newline at end of file +} + +std::string Input::RealSenseCamera::getCameraSN() +{ + static int rscamera_count = 0; + // Get all devices connected + rs2::context cxt; + auto device = cxt.query_devices(); + size_t device_count = device.size(); + slog::info << "Find RealSense num:" << device_count << slog::endl; + auto hardware = device[rscamera_count]; + auto devSerialNumber = hardware.get_info(RS2_CAMERA_INFO_SERIAL_NUMBER); + rscamera_count++; + return devSerialNumber; +} + +void Input::RealSenseCamera::bypassFewFramesOnceInited() +{ + if(!isInit() || !first_read_){ + return; + } + + rs2::frameset frames; + for (int i = 0; i < 30; i++) { + frames = pipe_.wait_for_frames(); + } + first_read_ = false; +} diff --git a/dynamic_vino_lib/src/inputs/realsense_camera_topic.cpp b/dynamic_vino_lib/src/inputs/realsense_camera_topic.cpp deleted file mode 100644 index 5d366882..00000000 --- a/dynamic_vino_lib/src/inputs/realsense_camera_topic.cpp +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * @brief a header file with declaration of RealSenseCamera class - * @file realsense_camera_topic.cpp - */ - -#include "dynamic_vino_lib/inputs/realsense_camera_topic.h" -#include -#include "dynamic_vino_lib/slog.h" - -#include - -#define INPUT_TOPIC "/camera/color/image_raw" - -Input::RealSenseCameraTopic::RealSenseCameraTopic() -{ -} - -bool Input::RealSenseCameraTopic::initialize() -{ - slog::info << "before cameraTOpic init" << slog::endl; - - std::shared_ptr it = - std::make_shared(nh_); - sub_ = it->subscribe("/camera/color/image_raw", 1, &RealSenseCameraTopic::cb, - this); - return true; -} - -void Input::RealSenseCameraTopic::cb( - const sensor_msgs::ImageConstPtr& image_msg) -{ - image = cv_bridge::toCvCopy(image_msg, "bgr8")->image; -} - -bool Input::RealSenseCameraTopic::read(cv::Mat* frame) -{ - ros::spinOnce(); - //nothing in topics from begining - if (image.empty() && last_image.empty()) - { - slog::warn << "No data received in CameraTopic instance" << slog::endl; - return false; - } - if(image.empty()) { - *frame = last_image; - } - else - { - *frame = image; - } - return true; -} diff --git a/dynamic_vino_lib/src/inputs/standard_camera.cpp b/dynamic_vino_lib/src/inputs/standard_camera.cpp index 23a8e38b..3bee9c8a 100644 --- a/dynamic_vino_lib/src/inputs/standard_camera.cpp +++ b/dynamic_vino_lib/src/inputs/standard_camera.cpp @@ -23,48 +23,53 @@ // StandardCamera bool Input::StandardCamera::initialize() { - static int camera_count_ = 0; - setInitStatus(cap.open(camera_count_)); - setWidth((size_t)cap.get(cv::CAP_PROP_FRAME_WIDTH)); - setHeight((size_t)cap.get(cv::CAP_PROP_FRAME_HEIGHT)); - camera_count_ ++; - return isInit(); -} - -bool Input::StandardCamera::initialize(int camera_num) -{ - static int camera_count_ = 0; - setInitStatus(cap.open(camera_num)); - setWidth((size_t)cap.get(cv::CAP_PROP_FRAME_WIDTH)); - setHeight((size_t)cap.get(cv::CAP_PROP_FRAME_HEIGHT)); - - camera_count_ ++; - return isInit(); + return initialize(640, 480); } bool Input::StandardCamera::initialize(size_t width, size_t height) { - static int camera_count_ = 0; + auto id = getCameraId(); + setInitStatus(cap.open(id)); + cap.set(cv::CAP_PROP_FRAME_WIDTH, width); + cap.set(cv::CAP_PROP_FRAME_HEIGHT, height); setWidth(width); setHeight(height); - setInitStatus(cap.open(camera_count_)); - if (isInit()) - { - cap.set(cv::CAP_PROP_FRAME_WIDTH, width); - cap.set(cv::CAP_PROP_FRAME_HEIGHT, height); - } - - camera_count_ ++; + return isInit(); } bool Input::StandardCamera::read(cv::Mat* frame) { - if (!isInit()) - { + if (!isInit()) { return false; } cap.grab(); + setHeader("standard_camera_frame"); return cap.retrieve(*frame); } +int Input::StandardCamera::getCameraId() +{ + // In case this function is invoked more than once. + if (camera_id_ >= 0){ + return camera_id_; + } + + static int STANDARD_CAMERA_COUNT = -1; + int fd; // A file descriptor to the video device + struct v4l2_capability cap; + char file[20]; + //if it is a realsense camera then skip it until we meet a standard camera + do + { + STANDARD_CAMERA_COUNT ++; + sprintf(file,"/dev/video%d",STANDARD_CAMERA_COUNT);//format filename + fd = open(file,O_RDWR); + ioctl(fd, VIDIOC_QUERYCAP, &cap); + close(fd); + std::cout << "!!camera: "<< cap.card << std::endl; + }while(!strcmp((char*)cap.card,"Intel(R) RealSense(TM) Depth Ca")); + + camera_id_ = STANDARD_CAMERA_COUNT; + return STANDARD_CAMERA_COUNT; +} diff --git a/dynamic_vino_lib/src/inputs/video_input.cpp b/dynamic_vino_lib/src/inputs/video_input.cpp index 8a5fdf96..392f752f 100644 --- a/dynamic_vino_lib/src/inputs/video_input.cpp +++ b/dynamic_vino_lib/src/inputs/video_input.cpp @@ -43,8 +43,7 @@ bool Input::Video::initialize(size_t width, size_t height) setWidth(width); setHeight(height); setInitStatus(cap.open(video_)); - if (isInit()) - { + if (isInit()) { cap.set(cv::CAP_PROP_FRAME_WIDTH, width); cap.set(cv::CAP_PROP_FRAME_HEIGHT, height); } @@ -53,20 +52,10 @@ bool Input::Video::initialize(size_t width, size_t height) bool Input::Video::read(cv::Mat* frame) { - if (!isInit()) - { + if (!isInit()) { return false; } cap.grab(); + setHeader("video_frame"); return cap.retrieve(*frame); } - -void Input::Video::config(const Input::Config & config) -{ - if (config.path != "") { - video_.assign(config.path); - initialize(); - slog::info << "Image Input device was reinitialized with new file:" << - config.path.c_str() << slog::endl; - } -} From 59229e28173e526f37d1667aff813241a056cbb7 Mon Sep 17 00:00:00 2001 From: Lewis Liu Date: Sat, 2 Jan 2021 21:02:30 +0800 Subject: [PATCH 31/63] update cmake minimum version to v3.0.2 Signed-off-by: Lewis Liu --- dynamic_vino_lib/CMakeLists.txt | 2 +- people_msgs/CMakeLists.txt | 2 +- sample/CMakeLists.txt | 2 +- vino_param_lib/CMakeLists.txt | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/dynamic_vino_lib/CMakeLists.txt b/dynamic_vino_lib/CMakeLists.txt index 18324837..2983827d 100644 --- a/dynamic_vino_lib/CMakeLists.txt +++ b/dynamic_vino_lib/CMakeLists.txt @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -cmake_minimum_required (VERSION 2.8.3) +cmake_minimum_required (VERSION 3.0.2) project(dynamic_vino_lib) diff --git a/people_msgs/CMakeLists.txt b/people_msgs/CMakeLists.txt index ce32bf4b..d56c92d7 100644 --- a/people_msgs/CMakeLists.txt +++ b/people_msgs/CMakeLists.txt @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -cmake_minimum_required(VERSION 2.8.3) +cmake_minimum_required(VERSION 3.0.2) project(people_msgs) diff --git a/sample/CMakeLists.txt b/sample/CMakeLists.txt index cabe72b0..e3c74d82 100644 --- a/sample/CMakeLists.txt +++ b/sample/CMakeLists.txt @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -cmake_minimum_required(VERSION 2.8.3) +cmake_minimum_required(VERSION 3.0.2) project(dynamic_vino_sample) diff --git a/vino_param_lib/CMakeLists.txt b/vino_param_lib/CMakeLists.txt index a0c5c2d8..bcda4545 100644 --- a/vino_param_lib/CMakeLists.txt +++ b/vino_param_lib/CMakeLists.txt @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -cmake_minimum_required(VERSION 2.8.3) +cmake_minimum_required(VERSION 3.0.2) project(vino_param_lib) find_package(catkin REQUIRED COMPONENTS From f5583830accb6c126a1e28083d3706195c34a359 Mon Sep 17 00:00:00 2001 From: Lewis Liu Date: Mon, 4 Jan 2021 10:28:46 +0800 Subject: [PATCH 32/63] remove the dependency to ENV MACROs for sameple package. Signed-off-by: Lewis Liu --- sample/CMakeLists.txt | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/sample/CMakeLists.txt b/sample/CMakeLists.txt index e3c74d82..05a80a00 100644 --- a/sample/CMakeLists.txt +++ b/sample/CMakeLists.txt @@ -79,21 +79,21 @@ else() return() endif() -if (NOT DEFINED ENV{CPU_EXTENSION_LIB}) - message(FATAL_ERROR "Please set ENV CPU_EXTENSION_LIB with 'export CPU_EXTENSION_LIB='") -endif() -set (CpuExtension_lib $ENV{CPU_EXTENSION_LIB}) -add_library(cpu_extension SHARED IMPORTED) -set_target_properties(cpu_extension PROPERTIES - IMPORTED_LOCATION $ENV{CPU_EXTENSION_LIB}) - -if (NOT DEFINED ENV{GFLAGS_LIB}) - message(FATAL_ERROR "Please set ENV GFLAGS_LIB with 'export GFLAGS_LIB='") -endif() -set (Gflags_lib $ENV{GFLAGS_LIB}) -add_library(gflags STATIC IMPORTED) -set_target_properties(gflags PROPERTIES - IMPORTED_LOCATION $ENV{GFLAGS_LIB}) +#if (NOT DEFINED ENV{CPU_EXTENSION_LIB}) +# message(FATAL_ERROR "Please set ENV CPU_EXTENSION_LIB with 'export CPU_EXTENSION_LIB='") +#endif() +#set (CpuExtension_lib $ENV{CPU_EXTENSION_LIB}) +#add_library(cpu_extension SHARED IMPORTED) +#set_target_properties(cpu_extension PROPERTIES +# IMPORTED_LOCATION $ENV{CPU_EXTENSION_LIB}) + +#if (NOT DEFINED ENV{GFLAGS_LIB}) +# message(FATAL_ERROR "Please set ENV GFLAGS_LIB with 'export GFLAGS_LIB='") +#endif() +#set (Gflags_lib $ENV{GFLAGS_LIB}) +#add_library(gflags STATIC IMPORTED) +#set_target_properties(gflags PROPERTIES +# IMPORTED_LOCATION $ENV{GFLAGS_LIB}) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") From c49c150de0308504a76d486917941255c3d3aaac Mon Sep 17 00:00:00 2001 From: Lewis Liu Date: Mon, 4 Jan 2021 10:29:31 +0800 Subject: [PATCH 33/63] aligin inference code structure with ROS2 and OV2020.3 Signed-off-by: Lewis Liu --- dynamic_vino_lib/CMakeLists.txt | 16 +- .../dynamic_vino_lib/inferences/base_filter.h | 15 +- .../inferences/base_inference.h | 14 +- .../inferences/emotions_detection.h | 4 + .../inferences/face_detection.h | 89 +---- .../inferences/head_pose_detection.h | 4 + .../inferences/inference_manager.h | 4 +- .../inferences/object_detection.h | 118 +++++- .../inferences/object_detection_ssd.h | 113 ------ .../inferences/object_detection_yolov2_voc.h | 117 ------ .../inferences/object_segmentation.h | 10 +- .../inferences/person_attribs_detection.h | 15 +- .../inferences/person_reidentification.h | 16 +- .../src/inferences/age_gender_detection.cpp | 4 +- .../src/inferences/base_filter.cpp | 8 +- .../src/inferences/base_inference.cpp | 9 + .../src/inferences/emotions_detection.cpp | 4 +- .../src/inferences/face_detection.cpp | 130 +------ .../src/inferences/face_reidentification.cpp | 2 +- .../src/inferences/head_pose_detection.cpp | 4 +- .../src/inferences/landmarks_detection.cpp | 2 +- .../inferences/license_plate_detection.cpp | 2 +- .../src/inferences/object_detection.cpp | 138 ++++++- .../src/inferences/object_detection_ssd.cpp | 192 ---------- .../object_detection_yolov2_voc.cpp | 344 ------------------ .../src/inferences/object_segmentation.cpp | 110 +++--- .../inferences/person_attribs_detection.cpp | 46 ++- .../inferences/person_reidentification.cpp | 60 +-- .../inferences/vehicle_attribs_detection.cpp | 8 +- 29 files changed, 445 insertions(+), 1153 deletions(-) delete mode 100644 dynamic_vino_lib/include/dynamic_vino_lib/inferences/object_detection_ssd.h delete mode 100644 dynamic_vino_lib/include/dynamic_vino_lib/inferences/object_detection_yolov2_voc.h delete mode 100644 dynamic_vino_lib/src/inferences/object_detection_ssd.cpp delete mode 100644 dynamic_vino_lib/src/inferences/object_detection_yolov2_voc.cpp diff --git a/dynamic_vino_lib/CMakeLists.txt b/dynamic_vino_lib/CMakeLists.txt index 2983827d..f5b428ec 100644 --- a/dynamic_vino_lib/CMakeLists.txt +++ b/dynamic_vino_lib/CMakeLists.txt @@ -72,13 +72,13 @@ include_directories ( ${CMAKE_CURRENT_SOURCE_DIR}/../vino_param_lib/include ) -if (NOT DEFINED ENV{CPU_EXTENSION_LIB}) - message(FATAL_ERROR "Please set ENV CPU_EXTENSION_LIB with 'export CPU_EXTENSION_LIB='") -endif() -set (CpuExtension_lib $ENV{CPU_EXTENSION_LIB}) -add_library(cpu_extension SHARED IMPORTED) -set_target_properties(cpu_extension PROPERTIES - IMPORTED_LOCATION $ENV{CPU_EXTENSION_LIB}) +#if (NOT DEFINED ENV{CPU_EXTENSION_LIB}) +# message(FATAL_ERROR "Please set ENV CPU_EXTENSION_LIB with 'export CPU_EXTENSION_LIB='") +#endif() +#set (CpuExtension_lib $ENV{CPU_EXTENSION_LIB}) +#add_library(cpu_extension SHARED IMPORTED) +#set_target_properties(cpu_extension PROPERTIES +# IMPORTED_LOCATION $ENV{CPU_EXTENSION_LIB}) # Flags if(UNIX OR APPLE) @@ -154,8 +154,6 @@ add_library(${PROJECT_NAME} SHARED src/inferences/face_detection.cpp src/inferences/head_pose_detection.cpp src/inferences/object_detection.cpp - src/inferences/object_detection_ssd.cpp - src/inferences/object_detection_yolov2_voc.cpp src/inferences/object_segmentation.cpp src/inferences/person_reidentification.cpp src/inferences/face_reidentification.cpp diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/base_filter.h b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/base_filter.h index 4c597c62..1695b9f7 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/base_filter.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/base_filter.h @@ -41,6 +41,19 @@ class BaseFilter */ virtual void init() = 0; + /** + * @brief Get the filtered results' ROIs. + * @return The filtered ROIs. + */ + virtual std::vector getFilteredLocations() = 0; + + /** + * @brief Check if the filter conditions is valid for filtering. + * @param[in] Filter conditions. + * @return true if some of the conditions are valid, otherwise false. + */ + bool isValidFilterConditions(const std::string &); + /** * @brief Accept the filter conditions for filtering. * @param[in] Filter conditions. @@ -181,4 +194,4 @@ class BaseFilter }; } // namespace dynamic_vino_lib -#endif // DYNAMIC_VINO_LIB__INFERENCES__BASE_FILTER_HPP_ \ No newline at end of file +#endif // DYNAMIC_VINO_LIB__INFERENCES__BASE_FILTER_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/base_inference.h b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/base_inference.h index f194aa89..2fcbd3bb 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/base_inference.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/base_inference.h @@ -23,14 +23,13 @@ #include #include +#include #include "dynamic_vino_lib/engines/engine.h" +#include "dynamic_vino_lib/models/base_model.h" #include "dynamic_vino_lib/slog.h" #include "inference_engine.hpp" #include "opencv2/opencv.hpp" -#include "dynamic_vino_lib/models/object_detection_ssd_model.h" -#include "dynamic_vino_lib/models/object_detection_yolov2voc_model.h" - namespace Outputs { @@ -179,6 +178,8 @@ class BaseInference virtual const std::vector getFilteredROIs( const std::string filter_conditions) const = 0; + void addCandidatedModel(std::shared_ptr model); + protected: /** * @brief Enqueue the fram into the input blob of the target calculation @@ -202,13 +203,14 @@ class BaseInference } std::vector results_; - int enqueued_frames_ = 0; - + protected: std::shared_ptr engine_ = nullptr; + std::vector > candidated_models_; int max_batch_size_ = 1; + int enqueued_frames_ = 0; bool results_fetched_ = false; }; } // namespace dynamic_vino_lib -#endif // DYNAMIC_VINO_LIB__INFERENCES__BASE_INFERENCE_HPP_ \ No newline at end of file +#endif // DYNAMIC_VINO_LIB__INFERENCES__BASE_INFERENCE_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/emotions_detection.h b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/emotions_detection.h index f320d415..dbaf592f 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/emotions_detection.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/emotions_detection.h @@ -118,6 +118,10 @@ class EmotionsDetection : public BaseInference void observeOutput( const std::shared_ptr& output) override; + std::vector getResults() + { + return results_; + } const std::vector getFilteredROIs( const std::string filter_conditions) const override; diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/face_detection.h b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/face_detection.h index a19a2a6d..f1e484ea 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/face_detection.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/face_detection.h @@ -43,97 +43,20 @@ namespace dynamic_vino_lib * @class FaceDetectionResult * @brief Class for storing and processing face detection result. */ -class FaceDetectionResult : public Result +class FaceDetectionResult : public ObjectDetectionResult { - public: - friend class FaceDetection; - explicit FaceDetectionResult(const cv::Rect& location); - std::string getLabel() const - { - return label_; - } - /** - * @brief Get the confidence that the detected area is a face. - * @return The confidence value. - */ - float getConfidence() const - { - return confidence_; - } - - private: - std::string label_ = ""; - float confidence_ = -1; +public: + explicit FaceDetectionResult(const cv::Rect & location); }; /** * @class FaceDetection * @brief Class to load face detection model and perform face detection. */ -class FaceDetection : public BaseInference +class FaceDetection : public ObjectDetection { - public: - using Result = dynamic_vino_lib::FaceDetectionResult; - explicit FaceDetection(double); - ~FaceDetection() override; - /** - * @brief Load the face detection model. - */ - void loadNetwork(std::shared_ptr); - /** - * @brief Enqueue a frame to this class. - * The frame will be buffered but not infered yet. - * @param[in] frame The frame to be enqueued. - * @param[in] input_frame_loc The location of the enqueued frame with respect - * to the frame generated by the input device. - * @return Whether this operation is successful. - */ - bool enqueue(const cv::Mat&, const cv::Rect&) override; - /** - * @brief Start inference for all buffered frames. - * @return Whether this operation is successful. - */ - bool submitRequest() override; - /** - * @brief This function will fetch the results of the previous inference and - * stores the results in a result buffer array. All buffered frames will be - * cleared. - * @return Whether the Inference object fetches a result this time - */ - bool fetchResults() override; - /** - * @brief Get the length of the buffer result array. - * @return The length of the buffer result array. - */ - int getResultsLength() const override; - /** - * @brief Get the location of result with respect - * to the frame generated by the input device. - * @param[in] idx The index of the result. - */ - const dynamic_vino_lib::Result* getLocationResult(int idx) const override; - /** - * @brief Show the observed detection result either through image window - or ROS topic. - */ - void observeOutput(const std::shared_ptr& output); - /** - * @brief Get the name of the Inference instance. - * @return The name of the Inference instance. - */ - const std::string getName() const override; - - const std::vector getFilteredROIs( - const std::string filter_conditions) const override; - - private: - std::shared_ptr valid_model_; - std::vector results_; - int width_ = 0; - int height_ = 0; - int max_proposal_count_; - int object_size_; - double show_output_thresh_ = 0; +public: + explicit FaceDetection(bool, double); }; } // namespace dynamic_vino_lib #endif // DYNAMIC_VINO_LIB_INFERENCES_FACE_DETECTION_H diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/head_pose_detection.h b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/head_pose_detection.h index 00df9a9b..ef16d415 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/head_pose_detection.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/head_pose_detection.h @@ -131,6 +131,10 @@ class HeadPoseDetection : public BaseInference void observeOutput( const std::shared_ptr& output) override; + std::vector getResults() + { + return results_; + } const std::vector getFilteredROIs( const std::string filter_conditions) const override; diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/inference_manager.h b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/inference_manager.h index 28991f90..8151f052 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/inference_manager.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/inference_manager.h @@ -98,7 +98,7 @@ class InferenceManager const Params::ParamManager::InferenceParams & infer); std::map pipelines_; - std::map plugins_for_devices_; + // std::map plugins_for_devices_; }; -#endif // DYNAMIC_VINO_LIB__INFERENCES__INFERENCE_MANAGER_HPP_ \ No newline at end of file +#endif // DYNAMIC_VINO_LIB__INFERENCES__INFERENCE_MANAGER_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/object_detection.h b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/object_detection.h index 362f1001..75c5b85f 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/object_detection.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/object_detection.h @@ -32,23 +32,46 @@ #include "inference_engine.hpp" #include "opencv2/opencv.hpp" // namespace -namespace dynamic_vino_lib { +namespace dynamic_vino_lib +{ +/** + * @class ObjectDetectionResult + * @brief Class for storing and processing face detection result. + */ +class ObjectDetectionResult : public Result +{ +public: + friend class ObjectDetection; + explicit ObjectDetectionResult(const cv::Rect & location); + std::string getLabel() const + { + return label_; + } -class ObjectDetectionResult : public Result { - public: - ObjectDetectionResult(); - explicit ObjectDetectionResult(const cv::Rect& location); - std::string getLabel() const { return label_; } + void setLabel(const std::string & label) + { + label_ = label; + } /** * @brief Get the confidence that the detected area is a face. - * @return The confidence value. + * @return The confidence value. */ - float getConfidence() const { return confidence_; } - bool operator<(const ObjectDetectionResult &s2) const + float getConfidence() const + { + return confidence_; + } + + void setConfidence(const float & con) + { + confidence_ = con; + } + + bool operator<(const ObjectDetectionResult & s2) const { return this->confidence_ > s2.confidence_; } +private: std::string label_ = ""; float confidence_ = -1; }; @@ -77,7 +100,8 @@ class ObjectDetectionResultFilter : public BaseFilter * @brief Get the filtered results' ROIs. * @return The filtered ROIs. */ - std::vector getFilteredResults(); + std::vector getFilteredLocations() override; + private: /** * @brief Decide whether a result is valid for label filter condition. @@ -108,12 +132,78 @@ class ObjectDetectionResultFilter : public BaseFilter std::vector results_; }; - +/** + * @class ObjectDetection + * @brief Class to load face detection model and perform face detection. + */ class ObjectDetection : public BaseInference { - public: - ObjectDetection() {}; - virtual void loadNetwork(std::shared_ptr) = 0; +public: + using Result = dynamic_vino_lib::ObjectDetectionResult; + using Filter = dynamic_vino_lib::ObjectDetectionResultFilter; + explicit ObjectDetection(bool, double); + ~ObjectDetection() override; + /** + * @brief Load the face detection model. + */ + void loadNetwork(std::shared_ptr); + /** + * @brief Enqueue a frame to this class. + * The frame will be buffered but not infered yet. + * @param[in] frame The frame to be enqueued. + * @param[in] input_frame_loc The location of the enqueued frame with respect + * to the frame generated by the input device. + * @return Whether this operation is successful. + */ + bool enqueue(const cv::Mat &, const cv::Rect &) override; + + /** + * @brief This function will fetch the results of the previous inference and + * stores the results in a result buffer array. All buffered frames will be + * cleared. + * @return Whether the Inference object fetches a result this time + */ + bool fetchResults() override; + /** + * @brief Get the length of the buffer result array. + * @return The length of the buffer result array. + */ + int getResultsLength() const override; + /** + * @brief Get the location of result with respect + * to the frame generated by the input device. + * @param[in] idx The index of the result. + */ + const Result * getLocationResult(int idx) const override; + /** + * @brief Show the observed detection result either through image window + or ROS topic. + */ + void observeOutput(const std::shared_ptr & output); + /** + * @brief Get the name of the Inference instance. + * @return The name of the Inference instance. + */ + const std::string getName() const override; + + const std::vector getFilteredROIs( + const std::string filter_conditions) const override; + /** + * @brief Calculate the IoU ratio for the given rectangles. + * @return IoU Ratio of the given rectangles. + */ + static double calcIoU(const cv::Rect & box_1, const cv::Rect & box_2); + +private: + std::shared_ptr valid_model_; + std::shared_ptr result_filter_; + std::vector results_; + int width_ = 0; + int height_ = 0; + int max_proposal_count_; + int object_size_; + double show_output_thresh_ = 0; + bool enable_roi_constraint_ = false; }; } // namespace dynamic_vino_lib #endif // DYNAMIC_VINO_LIB_INFERENCES_OBJECT_DETECTION_H diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/object_detection_ssd.h b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/object_detection_ssd.h deleted file mode 100644 index 9b7cf0d5..00000000 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/object_detection_ssd.h +++ /dev/null @@ -1,113 +0,0 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * @brief A header file with declaration for ObjectDetection Class - * @file object_detection.hpp - */ -#ifndef DYNAMIC_VINO_LIB_INFERENCES_OBJECT_DETECTION_SSD_H -#define DYNAMIC_VINO_LIB_INFERENCES_OBJECT_DETECTION_SSD_H -#include -#include -#include -#include -#include -#include -#include "dynamic_vino_lib/models/object_detection_ssd_model.h" -#include "dynamic_vino_lib/models/object_detection_yolov2voc_model.h" -#include "dynamic_vino_lib/engines/engine.h" -#include "dynamic_vino_lib/inferences/base_inference.h" -#include "dynamic_vino_lib/inferences/object_detection.h" -#include "inference_engine.hpp" -#include "opencv2/opencv.hpp" -// namespace -namespace dynamic_vino_lib { -/** - * @class ObjectDetection - * @brief Class to load face detection model and perform face detection. - */ -class ObjectDetectionSSD : public ObjectDetection { - public: - using Result = dynamic_vino_lib::ObjectDetectionResult; - using Filter = dynamic_vino_lib::ObjectDetectionResultFilter; - explicit ObjectDetectionSSD(double); - ~ObjectDetectionSSD() override; - /** - * @brief Load the face detection model. - */ - void loadNetwork(std::shared_ptr) override; - /** - * @brief Enqueue a frame to this class. - * The frame will be buffered but not infered yet. - * @param[in] frame The frame to be enqueued. - * @param[in] input_frame_loc The location of the enqueued frame with respect - * to the frame generated by the input device. - * @return Whether this operation is successful. - */ - bool enqueue(const cv::Mat&, const cv::Rect&) override; - /** - * @brief Start inference for all buffered frames. - * @return Whether this operation is successful. - */ - bool submitRequest() override; - /** - * @brief This function will fetch the results of the previous inference and - * stores the results in a result buffer array. All buffered frames will be - * cleared. - * @return Whether the Inference object fetches a result this time - */ - bool fetchResults() override; - /** - * @brief Get the length of the buffer result array. - * @return The length of the buffer result array. - */ - int getResultsLength() const override; - /** - * @brief Get the location of result with respect - * to the frame generated by the input device. - * @param[in] idx The index of the result. - */ - const dynamic_vino_lib::Result* getLocationResult(int idx) const override; - /** - * @brief Show the observed detection result either through image window - or ROS topic. - */ - void observeOutput(const std::shared_ptr& output); - /** - * @brief Get the name of the Inference instance. - * @return The name of the Inference instance. - */ - const std::string getName() const override; - - const std::vector getFilteredROIs( - const std::string filter_conditions) const override; - - private: - std::shared_ptr valid_model_; - std::vector results_; - std::shared_ptr result_filter_; - int width_ = 0; - int height_ = 0; - int max_proposal_count_; - int object_size_; - double show_output_thresh_ = 0; - //int enqueued_frames_ = 0; - int max_batch_size_ = 1; - - bool matToBlob(const cv::Mat& frame, const cv::Rect&, float scale_factor, - int batch_index, const std::string& input_name); -}; -} // namespace dynamic_vino_lib -#endif // DYNAMIC_VINO_LIB_INFERENCES_OBJECT_DETECTION_SSD_H diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/object_detection_yolov2_voc.h b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/object_detection_yolov2_voc.h deleted file mode 100644 index c64cd5d2..00000000 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/object_detection_yolov2_voc.h +++ /dev/null @@ -1,117 +0,0 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * @brief A header file with declaration for ObjectDetection Class - * @file object_detection.hpp - */ -#ifndef DYNAMIC_VINO_LIB_INFERENCES_OBJECT_DETECTION_YOLOV2_VOC_H -#define DYNAMIC_VINO_LIB_INFERENCES_OBJECT_DETECTION_YOLOV2_VOC_H -#include -#include -#include -#include -#include -#include -#include "dynamic_vino_lib/models/object_detection_ssd_model.h" -#include "dynamic_vino_lib/models/object_detection_yolov2voc_model.h" -#include "dynamic_vino_lib/engines/engine.h" -#include "dynamic_vino_lib/inferences/base_inference.h" -#include "dynamic_vino_lib/inferences/object_detection.h" -#include "inference_engine.hpp" -#include "opencv2/opencv.hpp" -// namespace -namespace dynamic_vino_lib { -/** - * @class ObjectDetectionYolov2voc - * @brief Class to load face detection model and perform face detection. - */ -class ObjectDetectionYolov2voc : public ObjectDetection { - public: - using Result = dynamic_vino_lib::ObjectDetectionResult; - using Filter = dynamic_vino_lib::ObjectDetectionResultFilter; - explicit ObjectDetectionYolov2voc(double); - ~ObjectDetectionYolov2voc() override; - /** - * @brief Load the face detection model. - */ - void loadNetwork(std::shared_ptr) override; - /** - * @brief Enqueue a frame to this class. - * The frame will be buffered but not infered yet. - * @param[in] frame The frame to be enqueued. - * @param[in] input_frame_loc The location of the enqueued frame with respect - * to the frame generated by the input device. - * @return Whether this operation is successful. - */ - bool enqueue(const cv::Mat&, const cv::Rect&) override; - /** - * @brief Start inference for all buffered frames. - * @return Whether this operation is successful. - */ - bool submitRequest() override; - /** - * @brief This function will fetch the results of the previous inference and - * stores the results in a result buffer array. All buffered frames will be - * cleared. - * @return Whether the Inference object fetches a result this time - */ - bool fetchResults() override; - /** - * @brief Get the length of the buffer result array. - * @return The length of the buffer result array. - */ - int getResultsLength() const override; - /** - * @brief Get the location of result with respect - * to the frame generated by the input device. - * @param[in] idx The index of the result. - */ - const dynamic_vino_lib::Result* getLocationResult(int idx) const override; - /** - * @brief Show the observed detection result either through image window - or ROS topic. - */ - void observeOutput(const std::shared_ptr& output); - /** - * @brief Get the name of the Inference instance. - * @return The name of the Inference instance. - */ - const std::string getName() const override; - - const std::vector getFilteredROIs( - const std::string filter_conditions) const override; - - private: - std::shared_ptr valid_model_; - std::vector raw_results_; - std::vector results_; - std::shared_ptr result_filter_; - int width_ = 0; - int height_ = 0; - int max_proposal_count_; - int object_size_; - double show_output_thresh_ = 0; - - bool matToBlob(const cv::Mat& frame, const cv::Rect&, float scale_factor, - int batch_index, const std::string& input_name); - int getEntryIndex(int side, int lcoords, int lclasses, int location, int entry); - double IntersectionOverUnion(const Result &box_1, const Result &box_2); - //int enqueued_frames_ = 0; - int max_batch_size_ = 1; - cv::Rect ROI_; -}; -} // namespace dynamic_vino_lib -#endif // DYNAMIC_VINO_LIB_INFERENCES_OBJECT_DETECTION_YOLOV2_VOC_H diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/object_segmentation.h b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/object_segmentation.h index 5d7720de..41b548df 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/object_segmentation.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/object_segmentation.h @@ -117,7 +117,7 @@ class ObjectSegmentation : public BaseInference * @brief Show the observed detection result either through image window or ROS topic. */ - void observeOutput(const std::shared_ptr & output) override; + void observeOutput(const std::shared_ptr & output); /** * @brief Get the name of the Inference instance. * @return The name of the Inference instance. @@ -132,6 +132,14 @@ class ObjectSegmentation : public BaseInference int width_ = 0; int height_ = 0; double show_output_thresh_ = 0; + + std::vector colors_ = { + {128, 64, 128}, {232, 35, 244}, {70, 70, 70}, {156, 102, 102}, {153, 153, 190}, + {153, 153, 153}, {30, 170, 250}, {0, 220, 220}, {35, 142, 107}, {152, 251, 152}, + {180, 130, 70}, {60, 20, 220}, {0, 0, 255}, {142, 0, 0}, {70, 0, 0}, + {100, 60, 0}, {90, 0, 0}, {230, 0, 0}, {32, 11, 119}, {0, 74, 111}, + {81, 0, 81} + }; }; } // namespace dynamic_vino_lib #endif // DYNAMIC_VINO_LIB__INFERENCES__OBJECT_SEGMENTATION_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/person_attribs_detection.h b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/person_attribs_detection.h index 0f1fd464..b4d8c2be 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/person_attribs_detection.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/person_attribs_detection.h @@ -46,9 +46,20 @@ class PersonAttribsDetectionResult : public Result { return male_probability_; } + cv::Point2f getTopLocation() const + { + return top_point_; + } + cv::Point2f getBottomLocation() const + { + return bottom_point_; + } private: float male_probability_; + float attributes_probability_[8]; + cv::Point2f top_point_; + cv::Point2f bottom_point_; std::string attributes_ = ""; }; /** @@ -114,8 +125,8 @@ class PersonAttribsDetection : public BaseInference std::shared_ptr valid_model_; std::vector results_; double attribs_confidence_; - const std::vector net_attributes_ = {"is male", "hat", - "longsleeves", "longpants", "longhair", "coatjacket"}; + const std::vector net_attributes_ = {"is male", "has_bag", "has_backpack" , "has hat", + "has longsleeves", "has longpants", "has longhair", "has coat_jacket"}; }; } // namespace dynamic_vino_lib #endif // DYNAMIC_VINO_LIB__INFERENCES__PERSON_ATTRIBS_DETECTION_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/person_reidentification.h b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/person_reidentification.h index c411a065..8a728ec1 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/person_reidentification.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/person_reidentification.h @@ -99,27 +99,13 @@ class PersonReidentification : public BaseInference * @return The name of the Inference instance. */ const std::string getName() const override; - /** - * @brief Calculate the similarity between a new detected person with - * the already recorded persons. - * @return The similarity value. - */ - float calcSimilarity(const std::vector &, const std::vector &); - /** - * @brief Try to find the matched person from the recorded persons, if there are not, - * record it in the recorded persons. - * @return The id of the matched person (or the new person). - */ - std::string findMatchPerson(const std::vector &); - const std::vector getFilteredROIs( const std::string filter_conditions) const override; private: std::shared_ptr valid_model_; std::vector results_; - std::vector> recorded_persons_; - double match_thresh_ = 0; + std::shared_ptr person_tracker_; }; } // namespace dynamic_vino_lib #endif // DYNAMIC_VINO_LIB__INFERENCES__PERSON_REIDENTIFICATION_HPP_ diff --git a/dynamic_vino_lib/src/inferences/age_gender_detection.cpp b/dynamic_vino_lib/src/inferences/age_gender_detection.cpp index 679dd397..b8393b91 100644 --- a/dynamic_vino_lib/src/inferences/age_gender_detection.cpp +++ b/dynamic_vino_lib/src/inferences/age_gender_detection.cpp @@ -99,7 +99,7 @@ dynamic_vino_lib::AgeGenderDetection::getLocationResult(int idx) const const std::string dynamic_vino_lib::AgeGenderDetection::getName() const { - return valid_model_->getModelName(); + return valid_model_->getModelCategory(); } void dynamic_vino_lib::AgeGenderDetection::observeOutput( @@ -123,4 +123,4 @@ const std::vector dynamic_vino_lib::AgeGenderDetection::getFilteredROI filtered_rois.push_back(res.getLocation()); } return filtered_rois; -} \ No newline at end of file +} diff --git a/dynamic_vino_lib/src/inferences/base_filter.cpp b/dynamic_vino_lib/src/inferences/base_filter.cpp index 1bb83542..cf308114 100644 --- a/dynamic_vino_lib/src/inferences/base_filter.cpp +++ b/dynamic_vino_lib/src/inferences/base_filter.cpp @@ -24,6 +24,12 @@ dynamic_vino_lib::BaseFilter::BaseFilter() {} +bool dynamic_vino_lib::BaseFilter::isValidFilterConditions( + const std::string & filter_conditions) +{ + return strip(filter_conditions) != ""; +} + void dynamic_vino_lib::BaseFilter::acceptFilterConditions( const std::string & filter_conditions) { @@ -211,4 +217,4 @@ std::string dynamic_vino_lib::BaseFilter::strip(const std::string & str) } } return stripped_string; -} \ No newline at end of file +} diff --git a/dynamic_vino_lib/src/inferences/base_inference.cpp b/dynamic_vino_lib/src/inferences/base_inference.cpp index 6801422b..33e4ac01 100644 --- a/dynamic_vino_lib/src/inferences/base_inference.cpp +++ b/dynamic_vino_lib/src/inferences/base_inference.cpp @@ -75,3 +75,12 @@ bool dynamic_vino_lib::BaseInference::fetchResults() results_fetched_ = true; return true; } + +void dynamic_vino_lib::BaseInference::addCandidatedModel(std::shared_ptr model) +{ + slog::info << "TESTING in addCandidatedModel()" << slog::endl; + if (model != nullptr) { + slog::info << "adding new Model Candidate..." << slog::endl; + candidated_models_.push_back(model); + } +} diff --git a/dynamic_vino_lib/src/inferences/emotions_detection.cpp b/dynamic_vino_lib/src/inferences/emotions_detection.cpp index 19d053ed..3a870cdb 100644 --- a/dynamic_vino_lib/src/inferences/emotions_detection.cpp +++ b/dynamic_vino_lib/src/inferences/emotions_detection.cpp @@ -127,7 +127,7 @@ dynamic_vino_lib::EmotionsDetection::getLocationResult(int idx) const const std::string dynamic_vino_lib::EmotionsDetection::getName() const { - return valid_model_->getModelName(); + return valid_model_->getModelCategory(); } void dynamic_vino_lib::EmotionsDetection::observeOutput( @@ -151,4 +151,4 @@ const std::vector dynamic_vino_lib::EmotionsDetection::getFilteredROIs filtered_rois.push_back(res.getLocation()); } return filtered_rois; -} \ No newline at end of file +} diff --git a/dynamic_vino_lib/src/inferences/face_detection.cpp b/dynamic_vino_lib/src/inferences/face_detection.cpp index 02fef81d..60cad5a6 100644 --- a/dynamic_vino_lib/src/inferences/face_detection.cpp +++ b/dynamic_vino_lib/src/inferences/face_detection.cpp @@ -29,134 +29,16 @@ #include "dynamic_vino_lib/slog.h" // FaceDetectionResult -dynamic_vino_lib::FaceDetectionResult::FaceDetectionResult( - const cv::Rect& location) - : Result(location) +dynamic_vino_lib::FaceDetectionResult::FaceDetectionResult(const cv::Rect & location) +: ObjectDetectionResult(location) { } // FaceDetection -dynamic_vino_lib::FaceDetection::FaceDetection(double show_output_thresh) - : dynamic_vino_lib::BaseInference(), - show_output_thresh_(show_output_thresh) +dynamic_vino_lib::FaceDetection::FaceDetection( + bool enable_roi_constraint, + double show_output_thresh) +: ObjectDetection(enable_roi_constraint, show_output_thresh) { } -dynamic_vino_lib::FaceDetection::~FaceDetection() = default; - -void dynamic_vino_lib::FaceDetection::loadNetwork( - const std::shared_ptr network) -{ - valid_model_ = network; - max_proposal_count_ = network->getMaxProposalCount(); - object_size_ = network->getObjectSize(); - setMaxBatchSize(network->getMaxBatchSize()); -} - -bool dynamic_vino_lib::FaceDetection::enqueue(const cv::Mat& frame, - const cv::Rect& input_frame_loc) -{ - // slog::info << "Face-enqueue" << slog::endl; - if (width_ == 0 && height_ == 0) - { - width_ = frame.cols; - height_ = frame.rows; - } - if (!dynamic_vino_lib::BaseInference::enqueue( - frame, input_frame_loc, 1, 0, valid_model_->getInputName())) - { - return false; - } - Result r(input_frame_loc); - results_.clear(); - results_.emplace_back(r); - return true; -} - -bool dynamic_vino_lib::FaceDetection::submitRequest() -{ - // slog::info << "Face-submitRequest" << slog::endl; - return dynamic_vino_lib::BaseInference::submitRequest(); -} - -bool dynamic_vino_lib::FaceDetection::fetchResults() -{ - bool can_fetch = dynamic_vino_lib::BaseInference::fetchResults(); - if (!can_fetch) return false; - bool found_result = false; - results_.clear(); - InferenceEngine::InferRequest::Ptr request = getEngine()->getRequest(); - std::string output = valid_model_->getOutputName(); - const float* detections = request->GetBlob(output)->buffer().as(); - for (int i = 0; i < max_proposal_count_; i++) - { - float image_id = detections[i * object_size_ + 0]; - cv::Rect r; - auto label_num = static_cast(detections[i * object_size_ + 1]); - std::vector& labels = valid_model_->getLabels(); - - r.x = static_cast(detections[i * object_size_ + 3] * width_); - r.y = static_cast(detections[i * object_size_ + 4] * height_); - r.width = static_cast(detections[i * object_size_ + 5] * width_ - r.x); - r.height = - static_cast(detections[i * object_size_ + 6] * height_ - r.y); - Result result(r); - result.label_ = label_num < labels.size() - ? labels[label_num] - : std::string("label #") + std::to_string(label_num); - result.confidence_ = detections[i * object_size_ + 2]; - if (result.confidence_ <= show_output_thresh_) - { - continue; - } - - if (image_id < 0) - { - break; - } - found_result = true; - results_.emplace_back(result); - } - if (!found_result) results_.clear(); - - return true; -} - -int dynamic_vino_lib::FaceDetection::getResultsLength() const -{ - return static_cast(results_.size()); -} - -const dynamic_vino_lib::Result* -dynamic_vino_lib::FaceDetection::getLocationResult(int idx) const -{ - return &(results_[idx]); -} - -const std::string dynamic_vino_lib::FaceDetection::getName() const -{ - return valid_model_->getModelName(); -} - -void dynamic_vino_lib::FaceDetection::observeOutput( - const std::shared_ptr& output) -{ - if (output != nullptr) - { - output->accept(results_); - } -} - -const std::vector dynamic_vino_lib::FaceDetection::getFilteredROIs( - const std::string filter_conditions) const -{ - if (!filter_conditions.empty()) { - slog::err << "Face detection does not support filtering now! " << - "Filter conditions: " << filter_conditions << slog::endl; - } - std::vector filtered_rois; - for (auto res : results_) { - filtered_rois.push_back(res.getLocation()); - } - return filtered_rois; -} \ No newline at end of file diff --git a/dynamic_vino_lib/src/inferences/face_reidentification.cpp b/dynamic_vino_lib/src/inferences/face_reidentification.cpp index 10cac25b..b3e18f88 100644 --- a/dynamic_vino_lib/src/inferences/face_reidentification.cpp +++ b/dynamic_vino_lib/src/inferences/face_reidentification.cpp @@ -98,7 +98,7 @@ dynamic_vino_lib::FaceReidentification::getLocationResult(int idx) const const std::string dynamic_vino_lib::FaceReidentification::getName() const { - return valid_model_->getModelName(); + return valid_model_->getModelCategory(); } void dynamic_vino_lib::FaceReidentification::observeOutput( diff --git a/dynamic_vino_lib/src/inferences/head_pose_detection.cpp b/dynamic_vino_lib/src/inferences/head_pose_detection.cpp index 541aae58..ee647674 100644 --- a/dynamic_vino_lib/src/inferences/head_pose_detection.cpp +++ b/dynamic_vino_lib/src/inferences/head_pose_detection.cpp @@ -101,7 +101,7 @@ dynamic_vino_lib::HeadPoseDetection::getLocationResult(int idx) const const std::string dynamic_vino_lib::HeadPoseDetection::getName() const { - return valid_model_->getModelName(); + return valid_model_->getModelCategory(); } void dynamic_vino_lib::HeadPoseDetection::observeOutput( @@ -125,4 +125,4 @@ const std::vector dynamic_vino_lib::HeadPoseDetection::getFilteredROIs filtered_rois.push_back(res.getLocation()); } return filtered_rois; -} \ No newline at end of file +} diff --git a/dynamic_vino_lib/src/inferences/landmarks_detection.cpp b/dynamic_vino_lib/src/inferences/landmarks_detection.cpp index 80f37deb..4f86e78d 100644 --- a/dynamic_vino_lib/src/inferences/landmarks_detection.cpp +++ b/dynamic_vino_lib/src/inferences/landmarks_detection.cpp @@ -99,7 +99,7 @@ dynamic_vino_lib::LandmarksDetection::getLocationResult(int idx) const const std::string dynamic_vino_lib::LandmarksDetection::getName() const { - return valid_model_->getModelName(); + return valid_model_->getModelCategory(); } void dynamic_vino_lib::LandmarksDetection::observeOutput( diff --git a/dynamic_vino_lib/src/inferences/license_plate_detection.cpp b/dynamic_vino_lib/src/inferences/license_plate_detection.cpp index 8303fc45..4528a182 100644 --- a/dynamic_vino_lib/src/inferences/license_plate_detection.cpp +++ b/dynamic_vino_lib/src/inferences/license_plate_detection.cpp @@ -112,7 +112,7 @@ dynamic_vino_lib::LicensePlateDetection::getLocationResult(int idx) const const std::string dynamic_vino_lib::LicensePlateDetection::getName() const { - return valid_model_->getModelName(); + return valid_model_->getModelCategory(); } void dynamic_vino_lib::LicensePlateDetection::observeOutput( diff --git a/dynamic_vino_lib/src/inferences/object_detection.cpp b/dynamic_vino_lib/src/inferences/object_detection.cpp index 2486f233..b45c65bd 100644 --- a/dynamic_vino_lib/src/inferences/object_detection.cpp +++ b/dynamic_vino_lib/src/inferences/object_detection.cpp @@ -27,8 +27,108 @@ #include "dynamic_vino_lib/outputs/base_output.h" #include "dynamic_vino_lib/slog.h" // ObjectDetectionResult -dynamic_vino_lib::ObjectDetectionResult::ObjectDetectionResult( - const cv::Rect& location) : Result(location){} +dynamic_vino_lib::ObjectDetectionResult::ObjectDetectionResult(const cv::Rect & location) +: Result(location) +{ +} + +// ObjectDetection +dynamic_vino_lib::ObjectDetection::ObjectDetection( + bool enable_roi_constraint, + double show_output_thresh) +: show_output_thresh_(show_output_thresh), + enable_roi_constraint_(enable_roi_constraint), dynamic_vino_lib::BaseInference() +{ + result_filter_ = std::make_shared(); + result_filter_->init(); +} + +dynamic_vino_lib::ObjectDetection::~ObjectDetection() = default; + +void dynamic_vino_lib::ObjectDetection::loadNetwork( + std::shared_ptr network) +{ + valid_model_ = network; + + setMaxBatchSize(network->getMaxBatchSize()); +} +bool dynamic_vino_lib::ObjectDetection::enqueue( + const cv::Mat & frame, + const cv::Rect & input_frame_loc) +{ + if (valid_model_ == nullptr || getEngine() == nullptr) { + return false; + } + + if (enqueued_frames_ >= valid_model_->getMaxBatchSize()) { + slog::warn << "Number of " << getName() << "input more than maximum(" << + max_batch_size_ << ") processed by inference" << slog::endl; + return false; + } + + if (!valid_model_->enqueue(getEngine(), frame, input_frame_loc)) { + return false; + } + + // nonsense!! + // Result r(input_frame_loc); + // results_.clear(); + // results_.emplace_back(r); + enqueued_frames_ += 1; + return true; +} + +bool dynamic_vino_lib::ObjectDetection::fetchResults() +{ + bool can_fetch = dynamic_vino_lib::BaseInference::fetchResults(); + if (!can_fetch) { + return false; + } + + results_.clear(); + + return (valid_model_ != nullptr) && valid_model_->fetchResults( + getEngine(), results_, show_output_thresh_, enable_roi_constraint_); +} + +int dynamic_vino_lib::ObjectDetection::getResultsLength() const +{ + return static_cast(results_.size()); +} + +const dynamic_vino_lib::ObjectDetection::Result * +dynamic_vino_lib::ObjectDetection::getLocationResult(int idx) const +{ + return &(results_[idx]); +} + +const std::string dynamic_vino_lib::ObjectDetection::getName() const +{ + return valid_model_->getModelCategory(); +} + +void dynamic_vino_lib::ObjectDetection::observeOutput( + const std::shared_ptr & output) +{ + if (output != nullptr) { + output->accept(results_); + } +} + +const std::vector dynamic_vino_lib::ObjectDetection::getFilteredROIs( + const std::string filter_conditions) const +{ + if (!result_filter_->isValidFilterConditions(filter_conditions)) { + std::vector filtered_rois; + for (auto result : results_) { + filtered_rois.push_back(result.getLocation()); + } + return filtered_rois; + } + result_filter_->acceptResults(results_); + result_filter_->acceptFilterConditions(filter_conditions); + return result_filter_->getFilteredLocations(); +} // ObjectDetectionResultFilter dynamic_vino_lib::ObjectDetectionResultFilter::ObjectDetectionResultFilter() {} @@ -45,30 +145,18 @@ void dynamic_vino_lib::ObjectDetectionResultFilter::acceptResults( results_ = results; } -std::vector -dynamic_vino_lib::ObjectDetectionResultFilter::getFilteredResults() +std::vector +dynamic_vino_lib::ObjectDetectionResultFilter::getFilteredLocations() { - std::vector results; + std::vector locations; for (auto result : results_) { if (isValidResult(result)) { - results.push_back(result); + locations.push_back(result.getLocation()); } } - return results; + return locations; } -// std::vector -// dynamic_vino_lib::ObjectDetectionResultFilter::getFilteredLocations() -// { -// std::vector locations; -// for (auto result : results_) { -// if (isValidResult(result)) { -// locations.push_back(result.getLocation()); -// } -// } -// return locations; -// } - bool dynamic_vino_lib::ObjectDetectionResultFilter::isValidLabel( const Result & result, const std::string & op, const std::string & target) { @@ -85,4 +173,14 @@ bool dynamic_vino_lib::ObjectDetectionResultFilter::isValidResult( const Result & result) { ISVALIDRESULT(key_to_function_, result); -} \ No newline at end of file +} + +double dynamic_vino_lib::ObjectDetection::calcIoU( + const cv::Rect & box_1, + const cv::Rect & box_2) +{ + cv::Rect i = box_1 & box_2; + cv::Rect u = box_1 | box_2; + + return static_cast(i.area()) / static_cast(u.area()); +} diff --git a/dynamic_vino_lib/src/inferences/object_detection_ssd.cpp b/dynamic_vino_lib/src/inferences/object_detection_ssd.cpp deleted file mode 100644 index 2d982d28..00000000 --- a/dynamic_vino_lib/src/inferences/object_detection_ssd.cpp +++ /dev/null @@ -1,192 +0,0 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * @brief a header file with declaration of ObjectDetection class and - * ObjectDetectionResult class - * @file object_detection.cpp - */ -#include -#include -#include -#include "dynamic_vino_lib/inferences/object_detection_ssd.h" -#include "dynamic_vino_lib/outputs/base_output.h" -#include "dynamic_vino_lib/slog.h" -// ObjectDetectionResult -/* -dynamic_vino_lib::ObjectDetectionResult::ObjectDetectionResult( - const cv::Rect& location) - : Result(location){} -*/ - -dynamic_vino_lib::ObjectDetectionSSD::ObjectDetectionSSD(double show_output_thresh) - : dynamic_vino_lib::ObjectDetection(), - show_output_thresh_(show_output_thresh) -{ - result_filter_ = std::make_shared(); - result_filter_->init(); -} - -dynamic_vino_lib::ObjectDetectionSSD::~ObjectDetectionSSD() = default; - -void dynamic_vino_lib::ObjectDetectionSSD::loadNetwork( - std::shared_ptr network) { - valid_model_ = std::dynamic_pointer_cast(network); - - max_proposal_count_ = network->getMaxProposalCount(); - object_size_ = network->getObjectSize(); - setMaxBatchSize(network->getMaxBatchSize()); -} - -bool dynamic_vino_lib::ObjectDetectionSSD::enqueue(const cv::Mat& frame, - const cv::Rect& input_frame_loc) { - if (width_ == 0 && height_ == 0) { - width_ = frame.cols; - height_ = frame.rows; - } - - if (!matToBlob(frame, input_frame_loc, 1, 0, valid_model_->getInputName())) - { - return false; - } - - Result r(input_frame_loc); - results_.clear(); - results_.emplace_back(r); - return true; -} - -bool dynamic_vino_lib::ObjectDetectionSSD::matToBlob( - const cv::Mat& orig_image, const cv::Rect&, float scale_factor, - int batch_index, const std::string& input_name) -{ - if (enqueued_frames_ == max_batch_size_) - { - slog::warn << "Number of " << getName() << "input more than maximum(" - << max_batch_size_ << ") processed by inference" << slog::endl; - return false; - } - - InferenceEngine::Blob::Ptr input_blob = - engine_->getRequest()->GetBlob(input_name); - - InferenceEngine::SizeVector blob_size = input_blob->getTensorDesc().getDims(); - const int width = blob_size[3]; - const int height = blob_size[2]; - const int channels = blob_size[1]; - u_int8_t * blob_data = input_blob->buffer().as(); - - cv::Mat resized_image(orig_image); - if (width != orig_image.size().width || height != orig_image.size().height) - { - cv::resize(orig_image, resized_image, cv::Size(width, height)); - } - int batchOffset = batch_index * width * height * channels; - - for (int c = 0; c < channels; c++) - { - for (int h = 0; h < height; h++) - { - for (int w = 0; w < width; w++) - { - blob_data[batchOffset + c * width * height + h * width + w] = - resized_image.at(h, w)[c] * scale_factor; - } - } - } - - enqueued_frames_ += 1; - return true; -} - -bool dynamic_vino_lib::ObjectDetectionSSD::submitRequest() { - return dynamic_vino_lib::BaseInference::submitRequest(); -} - -bool dynamic_vino_lib::ObjectDetectionSSD::fetchResults() { - bool can_fetch = dynamic_vino_lib::BaseInference::fetchResults(); - - if (!can_fetch) return false; - bool found_result = false; - results_.clear(); - InferenceEngine::InferRequest::Ptr request = getEngine()->getRequest(); - - std::string output = valid_model_->getOutputName(); - const float* detections = request->GetBlob(output)->buffer().as(); - - for (int i = 0; i < max_proposal_count_; i++) { - - float image_id = detections[i * object_size_ + 0]; - cv::Rect r; - auto label_num = static_cast(detections[i * object_size_ + 1]); - std::vector& labels = valid_model_->getLabels(); - r.x = static_cast(detections[i * object_size_ + 3] * width_); - r.y = static_cast(detections[i * object_size_ + 4] * height_); - r.width = static_cast(detections[i * object_size_ + 5] * width_ - r.x); - r.height = - static_cast(detections[i * object_size_ + 6] * height_ - r.y); - Result result(r); - result.label_ = label_num < labels.size() - ? labels[label_num] - : std::string("label #") + std::to_string(label_num); - result.confidence_ = detections[i * object_size_ + 2]; - if (result.confidence_ <= show_output_thresh_) { - continue; - } - if (image_id < 0) { - break; - } - found_result = true; - results_.emplace_back(result); - } - - if (!found_result) results_.clear(); - return true; -} - -int dynamic_vino_lib::ObjectDetectionSSD::getResultsLength() const { - return static_cast(results_.size()); -} - -const dynamic_vino_lib::Result* -dynamic_vino_lib::ObjectDetectionSSD::getLocationResult(int idx) const { - return &(results_[idx]); -} - -const std::string dynamic_vino_lib::ObjectDetectionSSD::getName() const { - return valid_model_->getModelName(); -} - -void dynamic_vino_lib::ObjectDetectionSSD::observeOutput( - const std::shared_ptr& output) { - if (output != nullptr) { - result_filter_->acceptResults(results_); - //result_filter_->acceptFilterConditions(filter_conditions); - output->accept(result_filter_->getFilteredResults()); - } -} - -const std::vector dynamic_vino_lib::ObjectDetectionSSD::getFilteredROIs( - const std::string filter_conditions) const -{ - result_filter_->acceptResults(results_); - result_filter_->acceptFilterConditions(filter_conditions); - std::vector results = result_filter_->getFilteredResults(); - std::vector locations; - for (auto result : results) { - locations.push_back(result.getLocation()); - } - return locations; -} \ No newline at end of file diff --git a/dynamic_vino_lib/src/inferences/object_detection_yolov2_voc.cpp b/dynamic_vino_lib/src/inferences/object_detection_yolov2_voc.cpp deleted file mode 100644 index 576a2c4e..00000000 --- a/dynamic_vino_lib/src/inferences/object_detection_yolov2_voc.cpp +++ /dev/null @@ -1,344 +0,0 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * @brief a header file with declaration of ObjectDetection class and - * ObjectDetectionResult class - * @file object_detection.cpp - */ -#include -#include -#include -#include "dynamic_vino_lib/inferences/object_detection_yolov2_voc.h" -#include "dynamic_vino_lib/outputs/base_output.h" -#include "dynamic_vino_lib/slog.h" -// ObjectDetectionResult -/* -dynamic_vino_lib::ObjectDetectionResult::ObjectDetectionResult( - const cv::Rect& location) - : Result(location){} -*/ - -// ObjectDetection -dynamic_vino_lib::ObjectDetectionYolov2voc::ObjectDetectionYolov2voc(double show_output_thresh) - : dynamic_vino_lib::ObjectDetection(), - show_output_thresh_(show_output_thresh) { - result_filter_ = std::make_shared(); - result_filter_->init(); -} - -dynamic_vino_lib::ObjectDetectionYolov2voc::~ObjectDetectionYolov2voc() = default; - -void dynamic_vino_lib::ObjectDetectionYolov2voc::loadNetwork( - std::shared_ptr network) { - valid_model_ = std::dynamic_pointer_cast(network); - if (valid_model_ == NULL) - { - std::cout << "ERROR" << std::endl; - } - max_proposal_count_ = network->getMaxProposalCount(); - object_size_ = network->getObjectSize(); - setMaxBatchSize(network->getMaxBatchSize()); -} - -bool dynamic_vino_lib::ObjectDetectionYolov2voc::enqueue(const cv::Mat& frame, const cv::Rect& input_frame_loc) -{ - if (width_ == 0 && height_ == 0) { - width_ = frame.cols; - height_ = frame.rows; - } - - if (!matToBlob(frame, input_frame_loc, 1, 0, valid_model_->getInputName())) - { - return false; - } - - Result r(input_frame_loc); - results_.clear(); - results_.emplace_back(r); - return true; -} - -bool dynamic_vino_lib::ObjectDetectionYolov2voc::matToBlob( - const cv::Mat& orig_image, const cv::Rect&, float scale_factor, - int batch_index, const std::string& input_name) -{ - if (enqueued_frames_ == max_batch_size_) - { - slog::warn << "Number of " << getName() << "input more than maximum(" - << max_batch_size_ << ") processed by inference" << slog::endl; - return false; - } - InferenceEngine::Blob::Ptr input_blob = - engine_->getRequest()->GetBlob(input_name); - - InferenceEngine::SizeVector blob_size = input_blob->getTensorDesc().getDims(); - const int width = blob_size[3]; - const int height = blob_size[2]; - const int channels = blob_size[1]; - float* blob_data = input_blob->buffer().as(); - - int dx = 0; - int dy = 0; - int srcw = 0; - int srch = 0; - - int IH = height; - int IW = width; - - cv::Mat image = orig_image.clone(); - cv::cvtColor(image, image, cv::COLOR_BGR2RGB); - - image.convertTo(image, CV_32F, 1.0/255.0, 0); - srcw = image.size().width; - srch = image.size().height; - - cv::Mat resizedImg (IH, IW, CV_32FC3); - resizedImg = cv::Scalar(0.5, 0.5, 0.5); - int imw = image.size().width; - int imh = image.size().height; - float resize_ratio = (float)IH / (float)std::max(imw, imh); - cv::resize(image, image, cv::Size(imw*resize_ratio, imh*resize_ratio)); - - int new_w = imw; - int new_h = imh; - if (((float)IW/imw) < ((float)IH/imh)) { - new_w = IW; - new_h = (imh * IW)/imw; - } else { - new_h = IH; - new_w = (imw * IW)/imh; - } - dx = (IW-new_w)/2; - dy = (IH-new_h)/2; - - imh = image.size().height; - imw = image.size().width; - - for(int row = 0; row < imh; row ++) - { - for(int col = 0; col < imw; col ++) - { - for(int ch = 0; ch < 3; ch ++) - { - resizedImg.at(dy + row, dx + col)[ch] = image.at(row, col)[ch]; - } - } - } - - for (int c = 0; c < channels; c++) - { - for (int h = 0; h < height; h++) - { - for (int w = 0; w < width; w++) - { - blob_data[c * width * height + h*width + w] = resizedImg.at(h, w)[c]; - } - } - } - - ROI_.x = dx; - ROI_.y = dy; - ROI_.width = srcw; - ROI_.height = srch; - - enqueued_frames_ += 1; - return true; -} - -bool dynamic_vino_lib::ObjectDetectionYolov2voc::submitRequest() { - return dynamic_vino_lib::BaseInference::submitRequest(); -} - -bool dynamic_vino_lib::ObjectDetectionYolov2voc::fetchResults() -{ - bool can_fetch = dynamic_vino_lib::BaseInference::fetchResults(); - - if (!can_fetch) return false; - bool found_result = false; - results_.clear(); - - InferenceEngine::InferRequest::Ptr request = getEngine()->getRequest(); - - std::string output = valid_model_->getOutputName(); - std::vector& labels = valid_model_->getLabels(); - - const float* detections = request->GetBlob(output)->buffer().as::value_type *>(); - InferenceEngine::CNNLayerPtr layer = valid_model_->getLayer(); - InferenceEngine::InputInfo::Ptr input_info = valid_model_->getInputInfo(); - - int input_height = input_info->getTensorDesc().getDims()[2]; - int input_width = input_info->getTensorDesc().getDims()[3]; - - // --------------------------- Validating output parameters ------------------------------------- - if (layer->type != "RegionYolo") - throw std::runtime_error("Invalid output type: " + layer->type + ". RegionYolo expected"); - - // --------------------------- Extracting layer parameters ------------------------------------- - const int num = layer->GetParamAsInt("num"); - const int coords = layer->GetParamAsInt("coords"); - const int classes = layer->GetParamAsInt("classes"); - - const int out_blob_h = layer->input()->dims[0]; - - std::vector anchors = { - 0.572730, 0.677385, - 1.874460, 2.062530, - 3.338430, 5.474340, - 7.882820, 3.527780, - 9.770520, 9.168280 - }; - float threshold = 0.5; - auto side = out_blob_h; - - auto side_square = side * side; - - // --------------------------- Parsing YOLO Region output ------------------------------------- - for (int i = 0; i < side_square; ++i) { - int row = i / side; - int col = i % side; - - for (int n = 0; n < num; ++n) { - int obj_index = getEntryIndex(side, coords, classes, n * side * side + i, coords); - int box_index = getEntryIndex(side, coords, classes, n * side * side + i, 0); - - float scale = detections[obj_index]; - - if (scale < threshold) - continue; - - float x = (col + detections[box_index + 0 * side_square]) / side * input_width; - float y = (row + detections[box_index + 1 * side_square]) / side * input_height; - float height = std::exp(detections[box_index + 3 * side_square]) * anchors[2 * n + 1] / side * input_height; - float width = std::exp(detections[box_index + 2 * side_square]) * anchors[2 * n] / side * input_width; - - for (int j = 0; j < classes; ++j) { - - int class_index = getEntryIndex(side, coords, classes, n * side_square + i, coords + 1 + j); - - float prob = scale * detections[class_index]; - if (prob < threshold) - continue; - - float x_min = x - width/2; - float y_min = y - height/2; - - float x_min_resized = x_min / input_width * ROI_.width; - float y_min_resized = y_min / input_height * ROI_.height; - float width_resized = width / input_width * ROI_.width; - float height_resized = height / input_height * ROI_.height; - - cv::Rect r(x_min_resized, y_min_resized, width_resized, height_resized); - Result result(r); - result.label_ = j; - result.label_ = labels[j]; - - result.confidence_ = prob; - found_result = true; - raw_results_.emplace_back(result); - } - } - } - - std::sort(raw_results_.begin(), raw_results_.end()); - for (unsigned int i = 0; i < raw_results_.size(); ++i) { - if (raw_results_[i].confidence_ == 0) - continue; - for (unsigned int j = i + 1; j < raw_results_.size(); ++j) - if (IntersectionOverUnion(raw_results_[i], raw_results_[j]) >= 0.45) - raw_results_[j].confidence_ = 0; - } - - for (auto &raw_result : raw_results_) { - if (raw_result.getConfidence() < 0.5) - continue; - - results_.push_back(raw_result); - } - - if (!found_result) { - results_.clear(); - } - raw_results_.clear(); - - return true; -} - -double dynamic_vino_lib::ObjectDetectionYolov2voc::IntersectionOverUnion(const Result &box_1, const Result &box_2) { - int xmax_1 = box_1.getLocation().x + box_1.getLocation().width; - int xmin_1 = box_1.getLocation().x; - int xmax_2 = box_2.getLocation().x + box_2.getLocation().width; - int xmin_2 = box_2.getLocation().x; - - int ymax_1 = box_1.getLocation().y + box_1.getLocation().height; - int ymin_1 = box_1.getLocation().y; - int ymax_2 = box_2.getLocation().y + box_2.getLocation().height; - int ymin_2 = box_2.getLocation().y; - - double width_of_overlap_area = fmin(xmax_1 , xmax_2) - fmax(xmin_1, xmin_2); - double height_of_overlap_area = fmin(ymax_1, ymax_2) - fmax(ymin_1, ymin_2); - double area_of_overlap; - if (width_of_overlap_area < 0 || height_of_overlap_area < 0) - area_of_overlap = 0; - else - area_of_overlap = width_of_overlap_area * height_of_overlap_area; - - double box_1_area = (ymax_1 - ymin_1) * (xmax_1 - xmin_1); - double box_2_area = (ymax_2 - ymin_2) * (xmax_2 - xmin_2); - double area_of_union = box_1_area + box_2_area - area_of_overlap; - - return area_of_overlap / area_of_union; -} - -int dynamic_vino_lib::ObjectDetectionYolov2voc::getEntryIndex(int side, int lcoords, int lclasses, int location, int entry) { - int n = location / (side * side); - int loc = location % (side * side); - return n * side * side * (lcoords + lclasses + 1) + entry * side * side + loc; -} - -int dynamic_vino_lib::ObjectDetectionYolov2voc::getResultsLength() const { - return static_cast(results_.size()); -} - -const dynamic_vino_lib::Result* -dynamic_vino_lib::ObjectDetectionYolov2voc::getLocationResult(int idx) const { - return &(results_[idx]); -} - -const std::string dynamic_vino_lib::ObjectDetectionYolov2voc::getName() const { - return valid_model_->getModelName(); -} - -void dynamic_vino_lib::ObjectDetectionYolov2voc::observeOutput( - const std::shared_ptr& output) { - if (output != nullptr) { - result_filter_->acceptResults(results_); - //result_filter_->acceptFilterConditions(filter_conditions); - output->accept(result_filter_->getFilteredResults()); - } -} - -const std::vector dynamic_vino_lib::ObjectDetectionYolov2voc::getFilteredROIs( - const std::string filter_conditions) const -{ - result_filter_->acceptResults(results_); - result_filter_->acceptFilterConditions(filter_conditions); - std::vector results = result_filter_->getFilteredResults(); - std::vector locations; - for (auto result : results) { - locations.push_back(result.getLocation()); - } - return locations; -} \ No newline at end of file diff --git a/dynamic_vino_lib/src/inferences/object_segmentation.cpp b/dynamic_vino_lib/src/inferences/object_segmentation.cpp index cfbcf58a..f28b7e2f 100644 --- a/dynamic_vino_lib/src/inferences/object_segmentation.cpp +++ b/dynamic_vino_lib/src/inferences/object_segmentation.cpp @@ -21,6 +21,7 @@ #include #include #include +#include #include "dynamic_vino_lib/inferences/object_segmentation.h" #include "dynamic_vino_lib/outputs/base_output.h" @@ -43,7 +44,7 @@ dynamic_vino_lib::ObjectSegmentation::~ObjectSegmentation() = default; void dynamic_vino_lib::ObjectSegmentation::loadNetwork( const std::shared_ptr network) { - slog::info << "Loading Network: " << network->getModelName() << slog::endl; + slog::info << "Loading Network: " << network->getModelCategory() << slog::endl; valid_model_ = network; setMaxBatchSize(network->getMaxBatchSize()); } @@ -113,52 +114,75 @@ bool dynamic_vino_lib::ObjectSegmentation::fetchResults() bool found_result = false; results_.clear(); InferenceEngine::InferRequest::Ptr request = getEngine()->getRequest(); - std::string detection_output = valid_model_->getDetectionOutputName(); - std::string mask_output = valid_model_->getMaskOutputName(); - const auto do_blob = request->GetBlob(detection_output.c_str()); + slog::debug << "Analyzing Detection results..." << slog::endl; + std::string detection_output = valid_model_->getOutputName("detection"); + std::string mask_output = valid_model_->getOutputName("masks"); + + const InferenceEngine::Blob::Ptr do_blob = request->GetBlob(detection_output.c_str()); const auto do_data = do_blob->buffer().as(); const auto masks_blob = request->GetBlob(mask_output.c_str()); const auto masks_data = masks_blob->buffer().as(); - // amount of elements in each detected box description (batch, label, prob, x1, y1, x2, y2) - size_t box_num = masks_blob->dims().at(3); - size_t label_num = masks_blob->dims().at(2); - size_t box_description_size = do_blob->dims().at(0); - size_t H = masks_blob->dims().at(1); - size_t W = masks_blob->dims().at(0); - size_t box_stride = W * H * label_num; - for (size_t box = 0; box < box_num; ++box) { - float * box_info = do_data + box * box_description_size; - float batch = box_info[0]; - if (batch < 0) { - break; - } - float prob = box_info[2]; - if (prob > show_output_thresh_) { - float x1 = std::min(std::max(0.0f, box_info[3] * width_), static_cast(width_)); - float y1 = std::min(std::max(0.0f, box_info[4] * height_), static_cast(height_)); - float x2 = std::min(std::max(0.0f, box_info[5] * width_), static_cast(width_)); - float y2 = std::min(std::max(0.0f, box_info[6] * height_), static_cast(height_)); - int box_width = std::min(static_cast(std::max(0.0f, x2 - x1)), width_); - int box_height = std::min(static_cast(std::max(0.0f, y2 - y1)), height_); - int class_id = static_cast(box_info[1] + 1e-6f); - float * mask_arr = masks_data + box_stride * box + H * W * (class_id - 1); - cv::Mat mask_mat(H, W, CV_32FC1, mask_arr); - cv::Rect roi = cv::Rect(static_cast(x1), static_cast(y1), box_width, box_height); - cv::Mat resized_mask_mat(box_height, box_width, CV_32FC1); - cv::resize(mask_mat, resized_mask_mat, cv::Size(box_width, box_height)); - Result result(roi); - result.confidence_ = prob; - std::vector & labels = valid_model_->getLabels(); - result.label_ = class_id < labels.size() ? labels[class_id] : - std::string("label #") + std::to_string(class_id); - result.mask_ = resized_mask_mat; - found_result = true; - results_.emplace_back(result); + const size_t output_w = masks_blob->getTensorDesc().getDims().at(3); + const size_t output_h = masks_blob->getTensorDesc().getDims().at(2); + const size_t output_des = masks_blob-> getTensorDesc().getDims().at(1); + const size_t output_extra = masks_blob-> getTensorDesc().getDims().at(0); + + slog::debug << "output w " << output_w<< slog::endl; + slog::debug << "output h " << output_h << slog::endl; + slog::debug << "output description " << output_des << slog::endl; + slog::debug << "output extra " << output_extra << slog::endl; + + const float * detections = request->GetBlob(detection_output)->buffer().as(); + std::vector &labels = valid_model_->getLabels(); + slog::debug << "label size " <(detections[rowId * output_w + colId]); + for (int ch = 0; ch < colored_mask.channels();++ch){ + colored_mask.at(rowId, colId)[ch] = colors_[classId][ch]; + } + //classId = static_cast(predictions[rowId * output_w + colId]); + } else { + for (int chId = 0; chId < output_des; ++chId) + { + float prob = detections[chId * output_h * output_w + rowId * output_w+ colId]; + //float prob = predictions[chId * output_h * output_w + rowId * output_w+ colId]; + if (prob > maxProb) + { + classId = chId; + maxProb = prob; + } + } + while (classId >= colors_.size()) + { + static std::mt19937 rng(classId); + std::uniform_int_distribution distr(0, 255); + cv::Vec3b color(distr(rng), distr(rng), distr(rng)); + colors_.push_back(color); + } + if(maxProb > 0.5){ + for (int ch = 0; ch < colored_mask.channels();++ch){ + colored_mask.at(rowId, colId)[ch] = colors_[classId][ch]; + } + } + } } } - if (!found_result) { - results_.clear(); - } + const float alpha = 0.7f; + Result result(roi); + result.mask_ = colored_mask; + found_result = true; + results_.emplace_back(result); return true; } @@ -175,7 +199,7 @@ dynamic_vino_lib::ObjectSegmentation::getLocationResult(int idx) const const std::string dynamic_vino_lib::ObjectSegmentation::getName() const { - return valid_model_->getModelName(); + return valid_model_->getModelCategory(); } void dynamic_vino_lib::ObjectSegmentation::observeOutput( diff --git a/dynamic_vino_lib/src/inferences/person_attribs_detection.cpp b/dynamic_vino_lib/src/inferences/person_attribs_detection.cpp index 10517fc2..9db8a3c8 100644 --- a/dynamic_vino_lib/src/inferences/person_attribs_detection.cpp +++ b/dynamic_vino_lib/src/inferences/person_attribs_detection.cpp @@ -61,7 +61,7 @@ bool dynamic_vino_lib::PersonAttribsDetection::submitRequest() { return dynamic_vino_lib::BaseInference::submitRequest(); } - +/* bool dynamic_vino_lib::PersonAttribsDetection::fetchResults() { bool can_fetch = dynamic_vino_lib::BaseInference::fetchResults(); @@ -84,6 +84,48 @@ bool dynamic_vino_lib::PersonAttribsDetection::fetchResults() if (!found_result) {results_.clear();} return true; } +*/ +bool dynamic_vino_lib::PersonAttribsDetection::fetchResults() +{ + bool can_fetch = dynamic_vino_lib::BaseInference::fetchResults(); + if (!can_fetch) {return false;} + bool found_result = false; + InferenceEngine::InferRequest::Ptr request = getEngine()->getRequest(); + slog::debug << "Analyzing Attributes Detection results..." << slog::endl; + std::string attribute_output = valid_model_->getOutputName("attributes_output_"); + std::string top_output = valid_model_->getOutputName("top_output_"); + std::string bottom_output = valid_model_->getOutputName("bottom_output_"); + + /*auto attri_values = request->GetBlob(attribute_output)->buffer().as(); + auto top_values = request->GetBlob(top_output)->buffer().as(); + auto bottom_values = request->GetBlob(bottom_output)->buffer().as();*/ + InferenceEngine::Blob::Ptr attribBlob = request->GetBlob(attribute_output); + InferenceEngine::Blob::Ptr topBlob = request->GetBlob(top_output); + InferenceEngine::Blob::Ptr bottomBlob = request->GetBlob(bottom_output); + + auto attri_values = attribBlob->buffer().as(); + auto top_values = topBlob->buffer().as(); + auto bottom_values = bottomBlob->buffer().as(); + + int net_attrib_length = net_attributes_.size(); + for (int i = 0; i < getResultsLength(); i++) { + results_[i].male_probability_ = attri_values[i * net_attrib_length]; + results_[i].top_point_.x = top_values[i]; + results_[i].top_point_.y = top_values[i+1]; + results_[i].bottom_point_.x = bottom_values[i]; + results_[i].bottom_point_.y = bottom_values[i+1]; + std::string attrib = ""; + for (int j = 1; j < net_attrib_length; j++) { + attrib += (attri_values[i * net_attrib_length + j] > attribs_confidence_) ? + net_attributes_[j] + ", " : ""; + } + results_[i].attributes_ = attrib; + + found_result = true; + } + if (!found_result) {results_.clear();} + return true; +} int dynamic_vino_lib::PersonAttribsDetection::getResultsLength() const { @@ -98,7 +140,7 @@ dynamic_vino_lib::PersonAttribsDetection::getLocationResult(int idx) const const std::string dynamic_vino_lib::PersonAttribsDetection::getName() const { - return valid_model_->getModelName(); + return valid_model_->getModelCategory(); } void dynamic_vino_lib::PersonAttribsDetection::observeOutput( diff --git a/dynamic_vino_lib/src/inferences/person_reidentification.cpp b/dynamic_vino_lib/src/inferences/person_reidentification.cpp index fdef4ebe..9d96105d 100644 --- a/dynamic_vino_lib/src/inferences/person_reidentification.cpp +++ b/dynamic_vino_lib/src/inferences/person_reidentification.cpp @@ -31,7 +31,10 @@ dynamic_vino_lib::PersonReidentificationResult::PersonReidentificationResult( // PersonReidentification dynamic_vino_lib::PersonReidentification::PersonReidentification(double match_thresh) -: match_thresh_(match_thresh), dynamic_vino_lib::BaseInference() {} +: dynamic_vino_lib::BaseInference() +{ + person_tracker_ = std::make_shared(1000, match_thresh, 0.3); +} dynamic_vino_lib::PersonReidentification::~PersonReidentification() = default; void dynamic_vino_lib::PersonReidentification::loadNetwork( @@ -73,7 +76,8 @@ bool dynamic_vino_lib::PersonReidentification::fetchResults() for (int i = 0; i < getResultsLength(); i++) { std::vector new_person = std::vector( output_values + 256 * i, output_values + 256 * i + 256); - std::string person_id = findMatchPerson(new_person); + std::string person_id = "No." + std::to_string( + person_tracker_->processNewTrack(new_person)); results_[i].person_id_ = person_id; found_result = true; } @@ -81,53 +85,6 @@ bool dynamic_vino_lib::PersonReidentification::fetchResults() return true; } -float dynamic_vino_lib::PersonReidentification::calcSimilarity( - const std::vector & person_a, const std::vector & person_b) -{ - if (person_a.size() != person_b.size()) { - throw std::logic_error("cosine similarity can't be called for vectors of different lengths: " - "person_a size = " + std::to_string(person_a.size()) + - "person_b size = " + std::to_string(person_b.size())); - } - float mul_sum, denom_a, denom_b, value_a, value_b; - mul_sum = denom_a = denom_b = value_a = value_b = 0; - for (auto i = 0; i < person_a.size(); i++) { - value_a = person_a[i]; - value_b = person_b[i]; - mul_sum += value_a * value_b; - denom_a += value_a * value_a; - denom_b += value_b * value_b; - } - if (denom_a == 0 || denom_b == 0) { - throw std::logic_error("cosine similarity is not defined whenever one or both " - "input vectors are zero-vectors."); - } - return mul_sum / (sqrt(denom_a) * sqrt(denom_b)); -} - -std::string dynamic_vino_lib::PersonReidentification::findMatchPerson( - const std::vector & new_person) -{ - auto size = recorded_persons_.size(); - std::string id = "No."; - float best_match_sim = 0; - int best_match_ind = -1; - for (auto i = 0; i < size; ++i) { - float cos_sim = calcSimilarity(new_person, recorded_persons_[i]); - if (cos_sim > best_match_sim) { - best_match_sim = cos_sim; - best_match_ind = i; - } - } - if (best_match_sim > match_thresh_) { - recorded_persons_[best_match_ind] = new_person; - return id + std::to_string(best_match_ind); - } else { - recorded_persons_.push_back(new_person); - return id + std::to_string(size); - } -} - int dynamic_vino_lib::PersonReidentification::getResultsLength() const { return static_cast(results_.size()); @@ -141,7 +98,7 @@ dynamic_vino_lib::PersonReidentification::getLocationResult(int idx) const const std::string dynamic_vino_lib::PersonReidentification::getName() const { - return valid_model_->getModelName(); + return valid_model_->getModelCategory(); } void dynamic_vino_lib::PersonReidentification::observeOutput( @@ -152,7 +109,6 @@ void dynamic_vino_lib::PersonReidentification::observeOutput( } } - const std::vector dynamic_vino_lib::PersonReidentification::getFilteredROIs( const std::string filter_conditions) const { @@ -165,4 +121,4 @@ const std::vector dynamic_vino_lib::PersonReidentification::getFiltere filtered_rois.push_back(res.getLocation()); } return filtered_rois; -} \ No newline at end of file +} diff --git a/dynamic_vino_lib/src/inferences/vehicle_attribs_detection.cpp b/dynamic_vino_lib/src/inferences/vehicle_attribs_detection.cpp index 20370e61..0c55aa80 100644 --- a/dynamic_vino_lib/src/inferences/vehicle_attribs_detection.cpp +++ b/dynamic_vino_lib/src/inferences/vehicle_attribs_detection.cpp @@ -68,8 +68,10 @@ bool dynamic_vino_lib::VehicleAttribsDetection::fetchResults() if (!can_fetch) {return false;} bool found_result = false; InferenceEngine::InferRequest::Ptr request = getEngine()->getRequest(); - std::string color_name = valid_model_->getColorOutputName(); - std::string type_name = valid_model_->getTypeOutputName(); + //std::string color_name = valid_model_->getColorOutputName(); + //std::string type_name = valid_model_->getTypeOutputName(); + std::string color_name = valid_model_->getOutputName("color_output_"); + std::string type_name = valid_model_->getOutputName("type_output_"); const float * color_values = request->GetBlob(color_name)->buffer().as(); const float * type_values = request->GetBlob(type_name)->buffer().as(); for (int i = 0; i < getResultsLength(); i++) { @@ -98,7 +100,7 @@ dynamic_vino_lib::VehicleAttribsDetection::getLocationResult(int idx) const const std::string dynamic_vino_lib::VehicleAttribsDetection::getName() const { - return valid_model_->getModelName(); + return valid_model_->getModelCategory(); } void dynamic_vino_lib::VehicleAttribsDetection::observeOutput( From 8cde81ab555d6d518893fb479c2faba87fe8ccbb Mon Sep 17 00:00:00 2001 From: Lewis Liu Date: Mon, 4 Jan 2021 11:15:32 +0800 Subject: [PATCH 34/63] align models code structure with ROS2 and OV2020.3 Signed-off-by: Lewis Liu --- .../models/age_gender_detection_model.h | 24 +- .../dynamic_vino_lib/models/base_model.h | 164 ++++----- .../models/emotion_detection_model.h | 24 +- .../models/face_detection_model.h | 35 +- .../models/face_reidentification_model.h | 6 +- .../models/head_pose_detection_model.h | 17 +- .../models/landmarks_detection_model.h | 7 +- .../models/license_plate_detection_model.h | 9 +- .../models/object_detection_ssd_model.h | 40 +- .../models/object_detection_yolov2voc_model.h | 44 ++- .../models/object_segmentation_model.h | 30 +- .../models/person_attribs_detection_model.h | 13 +- .../models/person_reidentification_model.h | 9 +- .../models/vehicle_attribs_detection_model.h | 9 +- .../src/models/age_gender_detection_model.cpp | 95 ++--- dynamic_vino_lib/src/models/base_model.cpp | 74 ++-- .../src/models/emotion_detection_model.cpp | 64 ++-- .../src/models/face_detection_model.cpp | 108 +++--- .../models/face_reidentification_model.cpp | 6 +- .../src/models/head_pose_detection_model.cpp | 82 ++--- .../src/models/landmarks_detection_model.cpp | 6 +- .../models/license_plate_detection_model.cpp | 40 +- .../src/models/object_detection_ssd_model.cpp | 227 +++++++++--- .../object_detection_yolov2voc_model.cpp | 347 +++++++++++++++--- .../src/models/object_segmentation_model.cpp | 274 ++++++++------ .../models/person_attribs_detection_model.cpp | 46 +-- .../models/person_reidentification_model.cpp | 26 +- .../vehicle_attribs_detection_model.cpp | 49 +-- 28 files changed, 1086 insertions(+), 789 deletions(-) diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/models/age_gender_detection_model.h b/dynamic_vino_lib/include/dynamic_vino_lib/models/age_gender_detection_model.h index 586b0e86..fcaeae74 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/models/age_gender_detection_model.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/models/age_gender_detection_model.h @@ -33,23 +33,20 @@ namespace Models */ class AgeGenderDetectionModel : public BaseModel { - public: - AgeGenderDetectionModel(const std::string&, int, int, int); +public: + AgeGenderDetectionModel(const std::string & model_loc, int batch_size = 1); /** * @brief Get the input name. * @return Input name. */ - inline const std::string getInputName() const - { - return input_; - } + /** * @brief Get the age from the detection reuslt. * @return Detected age. */ inline const std::string getOutputAgeName() const { - return output_age_; + return getOutputName("age"); } /** * @brief Get the gender from the detection reuslt. @@ -57,22 +54,17 @@ class AgeGenderDetectionModel : public BaseModel */ inline const std::string getOutputGenderName() const { - return output_gender_; + return getOutputName("gender"); } /** * @brief Get the name of this detection model. * @return Name of the model. */ - const std::string getModelName() const override; + const std::string getModelCategory() const override; - protected: - void checkLayerProperty(const InferenceEngine::CNNNetReader::Ptr&) override; - void setLayerProperty(InferenceEngine::CNNNetReader::Ptr) override; +protected: + bool updateLayerProperty(InferenceEngine::CNNNetReader::Ptr) override; - private: - std::string input_; - std::string output_age_; - std::string output_gender_; }; } // namespace Models diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/models/base_model.h b/dynamic_vino_lib/include/dynamic_vino_lib/models/base_model.h index bc85f345..c1f7d645 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/models/base_model.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/models/base_model.h @@ -26,18 +26,20 @@ #include #include -#include +#include +#include +#include #include "inference_engine.hpp" namespace Engines { -class Engine; + class Engine; } namespace dynamic_vino_lib { -class ObjectDetectionResult; + class ObjectDetectionResult; } namespace Models @@ -46,47 +48,40 @@ namespace Models * @class BaseModel * @brief This class represents the network given by .xml and .bin file */ -class BaseModel -{ -public: - using Ptr = std::shared_ptr; - /** + class BaseModel : public ModelAttribute + { + public: + using Ptr = std::shared_ptr; + /** * @brief Initialize the class with given .xml, .bin and .labels file. It will * also check whether the number of input and output are fit. * @param[in] model_loc The location of model' s .xml file * (model' s bin file should be the same as .xml file except for extension) * @param[in] input_num The number of input the network should have. * @param[in] output_num The number of output the network should have. - * @param[in] batch_size The number of batch size the network should have. + * @param[in] batch_size The number of batch size (default: 1) the network should have. * @return Whether the input device is successfully turned on. */ - BaseModel(const std::string & model_loc, int input_num, int output_num, int batch_size); - /** - * @brief Get the label vector. - * @return The label vector. - */ - inline std::vector & getLabels() - { - return labels_; - } - /** + BaseModel(const std::string &model_loc, int batch_size = 1); + + /** * @brief Get the maximum batch size of the model. * @return The maximum batch size of the model. */ - inline int getMaxBatchSize() const - { - return max_batch_size_; - } - inline void setMaxBatchSize(int max_batch_size) - { - max_batch_size_ = max_batch_size; - } - - virtual bool enqueue( - const std::shared_ptr & engine, - const cv::Mat & frame, - const cv::Rect & input_frame_loc) {return true;} - /** + inline int getMaxBatchSize() const + { + return max_batch_size_; + } + inline void setMaxBatchSize(int max_batch_size) + { + max_batch_size_ = max_batch_size; + } + + virtual bool enqueue( + const std::shared_ptr &engine, + const cv::Mat &frame, + const cv::Rect &input_frame_loc) { return true; } + /** * @brief Initialize the model. During the process the class will check * the network input, output size, check layer property and * set layer property. @@ -96,63 +91,54 @@ class BaseModel * @brief Get the name of the model. * @return The name of the model. */ - virtual const std::string getModelName() const = 0; - - virtual inline int getMaxProposalCount() const {return max_proposal_count_;} - inline int getObjectSize() const {return object_size_;} - inline void setObjectSize(int os) {object_size_ = os;} - inline InferenceEngine::CNNNetReader::Ptr getNetReader() const - { - return net_reader_; - } - -protected: - /** - * @brief Check whether the layer property - * (output layer name, output layer type, etc.) is right - * @param[in] network_reader The reader of the network to be checked. - */ - virtual void checkLayerProperty(const InferenceEngine::CNNNetReader::Ptr & network_reader) = 0; - /** - * @brief Set the layer property (layer layout, layer precision, etc.). - * @param[in] network_reader The reader of the network to be set. - */ - virtual void setLayerProperty(InferenceEngine::CNNNetReader::Ptr network_reader) = 0; - virtual void checkNetworkSize(int, int, InferenceEngine::CNNNetReader::Ptr); - InferenceEngine::CNNNetReader::Ptr net_reader_; - void setFrameSize(const int & w, const int & h) + virtual const std::string getModelCategory() const = 0; + inline ModelAttr getAttribute() { return attr_; } + + inline InferenceEngine::CNNNetReader::Ptr getNetReader() const + { + return net_reader_; + } + + protected: + /** + * New infterface to check and update Layer Property + * @brief Set the layer property (layer layout, layer precision, etc.). + * @param[in] network_reader The reader of the network to be set. + */ + virtual bool updateLayerProperty(InferenceEngine::CNNNetReader::Ptr network_reader) = 0; + + InferenceEngine::CNNNetReader::Ptr net_reader_; + void setFrameSize(const int &w, const int &h) + { + frame_size_.width = w; + frame_size_.height = h; + } + cv::Size getFrameSize() + { + return frame_size_; + } + + private: + int max_batch_size_; + std::string model_loc_; + cv::Size frame_size_; + }; + + class ObjectDetectionModel : public BaseModel { - frame_size_.width = w; - frame_size_.height = h; - } - cv::Size getFrameSize() - {return frame_size_;} - -protected: - int max_proposal_count_; - int object_size_; - -private: - std::vector labels_; - int input_num_; - int output_num_; - int max_batch_size_; - std::string model_loc_; - cv::Size frame_size_; -}; - -class ObjectDetectionModel : public BaseModel -{ - public: - ObjectDetectionModel(const std::string& a, int b, int c, int d); - virtual inline const int getMaxProposalCount() { return max_proposal_count_; } - virtual inline const int getObjectSize() { return object_size_; } - - protected: - int max_proposal_count_; - int object_size_; - //virtual void setLayerProperty(InferenceEngine::CNNNetReader::Ptr network_reader) = 0; -}; + public: + ObjectDetectionModel(const std::string &model_loc, int batch_size = 1); + virtual bool fetchResults( + const std::shared_ptr &engine, + std::vector &result, + const float &confidence_thresh = 0.3, + const bool &enable_roi_constraint = false) = 0; + virtual bool matToBlob( + const cv::Mat &orig_image, const cv::Rect &, float scale_factor, + int batch_index, const std::shared_ptr &engine) = 0; + }; + +} // namespace Models } // namespace Models diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/models/emotion_detection_model.h b/dynamic_vino_lib/include/dynamic_vino_lib/models/emotion_detection_model.h index 65a1e490..1f749e83 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/models/emotion_detection_model.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/models/emotion_detection_model.h @@ -33,29 +33,19 @@ namespace Models */ class EmotionDetectionModel : public BaseModel { - public: - EmotionDetectionModel(const std::string&, int, int, int); - inline const std::string getInputName() - { - return input_; - } - inline const std::string getOutputName() - { - return output_; - } +public: + EmotionDetectionModel(const std::string & model_loc, int batch_size = 1); + /** * @brief Get the name of this detection model. * @return Name of the model. */ - const std::string getModelName() const override; + const std::string getModelCategory() const override; + bool updateLayerProperty(InferenceEngine::CNNNetReader::Ptr) override; - protected: - void checkLayerProperty(const InferenceEngine::CNNNetReader::Ptr&) override; - void setLayerProperty(InferenceEngine::CNNNetReader::Ptr) override; +private: + bool verifyOutputLayer(const InferenceEngine::DataPtr & ptr); - private: - std::string input_; - std::string output_; }; } // namespace Models diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/models/face_detection_model.h b/dynamic_vino_lib/include/dynamic_vino_lib/models/face_detection_model.h index 778c1db3..59086c20 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/models/face_detection_model.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/models/face_detection_model.h @@ -31,41 +31,16 @@ namespace Models * @class FaceDetectionModel * @brief This class generates the face detection model. */ -class FaceDetectionModel : public BaseModel +class FaceDetectionModel : public ObjectDetectionModel { - public: - FaceDetectionModel(const std::string&, int, int, int); - inline const int getMaxProposalCount() - { - return max_proposal_count_; - } - inline const int getObjectSize() - { - return object_size_; - } - inline const std::string getInputName() - { - return input_; - } - inline const std::string getOutputName() - { - return output_; - } +public: + FaceDetectionModel(const std::string & model_loc, int batch_size = 1); + //void checkLayerProperty(const InferenceEngine::CNNNetReader::Ptr &) override; /** * @brief Get the name of this detection model. * @return Name of the model. */ - const std::string getModelName() const override; - - protected: - void checkLayerProperty(const InferenceEngine::CNNNetReader::Ptr&) override; - void setLayerProperty(InferenceEngine::CNNNetReader::Ptr) override; - - private: - int max_proposal_count_; - int object_size_; - std::string input_; - std::string output_; + const std::string getModelCategory() const override; }; } // namespace Models diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/models/face_reidentification_model.h b/dynamic_vino_lib/include/dynamic_vino_lib/models/face_reidentification_model.h index a2efc962..31f03010 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/models/face_reidentification_model.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/models/face_reidentification_model.h @@ -29,18 +29,16 @@ namespace Models class FaceReidentificationModel : public BaseModel { public: - FaceReidentificationModel(const std::string &, int, int, int); + FaceReidentificationModel(const std::string & model_loc, int batch_size = 1); inline const std::string getInputName() {return input_;} inline const std::string getOutputName() {return output_;} /** * @brief Get the name of this detection model. * @return Name of the model. */ - const std::string getModelName() const override; + const std::string getModelCategory() const override; protected: - void checkLayerProperty(const InferenceEngine::CNNNetReader::Ptr &) override; - void setLayerProperty(InferenceEngine::CNNNetReader::Ptr) override; std::string input_; std::string output_; }; diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/models/head_pose_detection_model.h b/dynamic_vino_lib/include/dynamic_vino_lib/models/head_pose_detection_model.h index db3eb498..fab5b6b0 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/models/head_pose_detection_model.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/models/head_pose_detection_model.h @@ -33,12 +33,9 @@ namespace Models */ class HeadPoseDetectionModel : public BaseModel { - public: - HeadPoseDetectionModel(const std::string&, int, int, int); - inline const std::string getInputName() const - { - return input_; - } +public: + HeadPoseDetectionModel(const std::string & model_loc, int batch_size = 1); + /** * @brief Get the output angle roll. * @return Roll value. @@ -67,14 +64,10 @@ class HeadPoseDetectionModel : public BaseModel * @brief Get the name of this detection model. * @return Name of the model. */ - const std::string getModelName() const override; - - protected: - void checkLayerProperty(const InferenceEngine::CNNNetReader::Ptr&) override; - void setLayerProperty(InferenceEngine::CNNNetReader::Ptr) override; + const std::string getModelCategory() const override; + bool updateLayerProperty(InferenceEngine::CNNNetReader::Ptr) override; private: - std::string input_; std::string output_angle_r_ = "angle_r_fc"; std::string output_angle_p_ = "angle_p_fc"; std::string output_angle_y_ = "angle_y_fc"; diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/models/landmarks_detection_model.h b/dynamic_vino_lib/include/dynamic_vino_lib/models/landmarks_detection_model.h index 4d9175c0..49149c67 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/models/landmarks_detection_model.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/models/landmarks_detection_model.h @@ -29,19 +29,16 @@ namespace Models class LandmarksDetectionModel : public BaseModel { public: - - LandmarksDetectionModel(const std::string &, int, int, int); + LandmarksDetectionModel(const std::string & model_loc, int batch_size = 1); inline const std::string getInputName() {return input_;} inline const std::string getOutputName() {return output_;} /** * @brief Get the name of this detection model. * @return Name of the model. */ - const std::string getModelName() const override; + const std::string getModelCategory() const override; protected: - void checkLayerProperty(const InferenceEngine::CNNNetReader::Ptr &) override; - void setLayerProperty(InferenceEngine::CNNNetReader::Ptr) override; std::string input_; std::string output_; }; diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/models/license_plate_detection_model.h b/dynamic_vino_lib/include/dynamic_vino_lib/models/license_plate_detection_model.h index 371a1294..17f90dd3 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/models/license_plate_detection_model.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/models/license_plate_detection_model.h @@ -29,20 +29,19 @@ namespace Models class LicensePlateDetectionModel : public BaseModel { public: - LicensePlateDetectionModel(const std::string &, int, int, int); + LicensePlateDetectionModel(const std::string & model_loc, int batch_size = 1); inline const std::string getInputName() {return input_;} inline const std::string getSeqInputName() {return seq_input_;} inline const std::string getOutputName() {return output_;} - inline const int getMaxSequenceSize() {return max_sequence_size_;} + inline int getMaxSequenceSize() const {return max_sequence_size_;} /** * @brief Get the name of this detection model. * @return Name of the model. */ - const std::string getModelName() const override; + const std::string getModelCategory() const override; protected: - void checkLayerProperty(const InferenceEngine::CNNNetReader::Ptr &) override; - void setLayerProperty(InferenceEngine::CNNNetReader::Ptr) override; + bool updateLayerProperty(InferenceEngine::CNNNetReader::Ptr) override; // up to 88 items per license plate, ended with "-1" const int max_sequence_size_ = 88; std::string input_; diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/models/object_detection_ssd_model.h b/dynamic_vino_lib/include/dynamic_vino_lib/models/object_detection_ssd_model.h index 76e257df..bf023cea 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/models/object_detection_ssd_model.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/models/object_detection_ssd_model.h @@ -26,26 +26,36 @@ namespace Models { * @class ObjectDetectionModel * @brief This class generates the face detection model. */ -class ObjectDetectionSSDModel : public ObjectDetectionModel { - public: - ObjectDetectionSSDModel(const std::string&, int, int, int); - //inline const int getMaxProposalCount() { return max_proposal_count_; } - //inline const int getObjectSize() { return object_size_; } - inline const std::string getInputName() { return input_; } - inline const std::string getOutputName() { return output_; } +class ObjectDetectionSSDModel : public ObjectDetectionModel +{ + using Result = dynamic_vino_lib::ObjectDetectionResult; + +public: + ObjectDetectionSSDModel(const std::string & model_loc, int batch_size = 1); + + bool fetchResults( + const std::shared_ptr & engine, + std::vector & results, + const float & confidence_thresh = 0.3, + const bool & enable_roi_constraint = false) override; + + bool enqueue( + const std::shared_ptr & engine, + const cv::Mat & frame, + const cv::Rect & input_frame_loc) override; + + bool matToBlob( + const cv::Mat & orig_image, const cv::Rect &, float scale_factor, + int batch_index, const std::shared_ptr & engine) override; + /** * @brief Get the name of this detection model. * @return Name of the model. */ - const std::string getModelName() const override; - protected: - void checkLayerProperty(const InferenceEngine::CNNNetReader::Ptr&) override; - void setLayerProperty(InferenceEngine::CNNNetReader::Ptr) override; + const std::string getModelCategory() const override; + + bool updateLayerProperty(InferenceEngine::CNNNetReader::Ptr) override; - //int max_proposal_count_; - //int object_size_; - std::string input_; - std::string output_; }; } // namespace Models #endif // DYNAMIC_VINO_LIB_MODELS_OBJECT_DETECTION_SSD_MODEL_H diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/models/object_detection_yolov2voc_model.h b/dynamic_vino_lib/include/dynamic_vino_lib/models/object_detection_yolov2voc_model.h index c383db53..0a9bd45d 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/models/object_detection_yolov2voc_model.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/models/object_detection_yolov2voc_model.h @@ -26,28 +26,38 @@ namespace Models { * @class ObjectDetectionModel * @brief This class generates the face detection model. */ -class ObjectDetectionYolov2vocModel : public ObjectDetectionModel { - public: - ObjectDetectionYolov2vocModel(const std::string&, int, int, int); - inline const std::string getInputName() { return input_; } - inline const std::string getOutputName() { return output_; } - InferenceEngine::CNNLayerPtr getLayer() { return output_layer_; } - InferenceEngine::InputInfo::Ptr getInputInfo() { return input_info_; } +class ObjectDetectionYolov2Model : public ObjectDetectionModel +{ + using Result = dynamic_vino_lib::ObjectDetectionResult; + +public: + ObjectDetectionYolov2Model(const std::string & model_loc, int batch_size = 1); + + bool fetchResults( + const std::shared_ptr & engine, + std::vector & results, + const float & confidence_thresh = 0.3, + const bool & enable_roi_constraint = false) override; + + bool enqueue( + const std::shared_ptr & engine, + const cv::Mat & frame, + const cv::Rect & input_frame_loc) override; + + bool matToBlob( + const cv::Mat & orig_image, const cv::Rect &, float scale_factor, + int batch_index, const std::shared_ptr & engine) override; + /** * @brief Get the name of this detection model. * @return Name of the model. */ - const std::string getModelName() const override; - - protected: - void checkLayerProperty(const InferenceEngine::CNNNetReader::Ptr&) override; - void setLayerProperty(InferenceEngine::CNNNetReader::Ptr) override; - - std::string input_; - std::string output_; + const std::string getModelCategory() const override; + bool updateLayerProperty(InferenceEngine::CNNNetReader::Ptr) override; - InferenceEngine::CNNLayerPtr output_layer_; - InferenceEngine::InputInfo::Ptr input_info_; +protected: + int getEntryIndex(int side, int lcoords, int lclasses, int location, int entry); + InferenceEngine::InputInfo::Ptr input_info_ = nullptr; }; } // namespace Models #endif // DYNAMIC_VINO_LIB_MODELS_OBJECT_DETECTION_YOLOV2VOC_MODEL_H diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/models/object_segmentation_model.h b/dynamic_vino_lib/include/dynamic_vino_lib/models/object_segmentation_model.h index 5af986d5..6859ecbe 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/models/object_segmentation_model.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/models/object_segmentation_model.h @@ -28,7 +28,7 @@ namespace Models class ObjectSegmentationModel : public BaseModel { public: - ObjectSegmentationModel(const std::string &, int, int, int); + ObjectSegmentationModel(const std::string & model_loc, int batch_size = 1); inline int getMaxProposalCount() const { return max_proposal_count_; @@ -37,18 +37,6 @@ class ObjectSegmentationModel : public BaseModel { return object_size_; } - inline const std::string getInputName() - { - return input_; - } - inline const std::string getDetectionOutputName() - { - return detection_output_; - } - inline const std::string getMaskOutputName() - { - return mask_output_; - } bool enqueue(const std::shared_ptr & ,const cv::Mat &, const cv::Rect & ) override; @@ -61,26 +49,14 @@ class ObjectSegmentationModel : public BaseModel * @brief Get the name of this segmentation model. * @return Name of the model. */ - const std::string getModelName() const override; - -protected: - void checkLayerProperty(const InferenceEngine::CNNNetReader::Ptr &) override; - void setLayerProperty(InferenceEngine::CNNNetReader::Ptr) override; - void checkNetworkSize(int, int, InferenceEngine::CNNNetReader::Ptr) override; + const std::string getModelCategory() const override; + bool updateLayerProperty(InferenceEngine::CNNNetReader::Ptr) override; private: int max_proposal_count_; int object_size_; - std::string input_; - std::string mask_output_ = "masks"; - std::string detection_output_ = "detection_output"; - size_t input_channels_; - size_t input_height_; - size_t input_width_; InferenceEngine::InputsDataMap input_info_; - InferenceEngine::OutputsDataMap output_info_; - }; } // namespace Models #endif // DYNAMIC_VINO_LIB__MODELS__OBJECT_SEGMENTATION_MODEL_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/models/person_attribs_detection_model.h b/dynamic_vino_lib/include/dynamic_vino_lib/models/person_attribs_detection_model.h index 2105ef81..11d8c384 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/models/person_attribs_detection_model.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/models/person_attribs_detection_model.h @@ -29,18 +29,19 @@ namespace Models class PersonAttribsDetectionModel : public BaseModel { public: - PersonAttribsDetectionModel(const std::string &, int, int, int); - inline const std::string getInputName() {return input_;} - inline const std::string getOutputName() {return output_;} + PersonAttribsDetectionModel(const std::string & model_loc, int batch_size = 1); + //inline const std::string getInputName() {return input_;} + //inline const std::string getOutputName() {return output_;} /** * @brief Get the name of this detection model. * @return Name of the model. */ - const std::string getModelName() const override; + const std::string getModelCategory() const override; protected: - void checkLayerProperty(const InferenceEngine::CNNNetReader::Ptr &) override; - void setLayerProperty(InferenceEngine::CNNNetReader::Ptr) override; + //void checkLayerProperty(const InferenceEngine::CNNNetReader::Ptr &) override; + //void setLayerProperty(InferenceEngine::CNNNetReader::Ptr) override; + bool updateLayerProperty(InferenceEngine::CNNNetReader::Ptr) override; std::string input_; std::string output_; }; diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/models/person_reidentification_model.h b/dynamic_vino_lib/include/dynamic_vino_lib/models/person_reidentification_model.h index a9cff01f..450c0caa 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/models/person_reidentification_model.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/models/person_reidentification_model.h @@ -29,18 +29,19 @@ namespace Models class PersonReidentificationModel : public BaseModel { public: - PersonReidentificationModel(const std::string &, int, int, int); + PersonReidentificationModel(const std::string & model_loc, int batch_size = 1); inline const std::string getInputName() {return input_;} inline const std::string getOutputName() {return output_;} /** * @brief Get the name of this detection model. * @return Name of the model. */ - const std::string getModelName() const override; + const std::string getModelCategory() const override; protected: - void checkLayerProperty(const InferenceEngine::CNNNetReader::Ptr &) override; - void setLayerProperty(InferenceEngine::CNNNetReader::Ptr) override; + bool updateLayerProperty(InferenceEngine::CNNNetReader::Ptr) override; + //void checkLayerProperty(const InferenceEngine::CNNNetReader::Ptr &) override; + //void setLayerProperty(InferenceEngine::CNNNetReader::Ptr) override; std::string input_; std::string output_; }; diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/models/vehicle_attribs_detection_model.h b/dynamic_vino_lib/include/dynamic_vino_lib/models/vehicle_attribs_detection_model.h index 51116e4a..884f0a66 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/models/vehicle_attribs_detection_model.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/models/vehicle_attribs_detection_model.h @@ -29,7 +29,7 @@ namespace Models class VehicleAttribsDetectionModel : public BaseModel { public: - VehicleAttribsDetectionModel(const std::string &, int, int, int); + VehicleAttribsDetectionModel(const std::string & model_loc, int batch_size = 1); inline const std::string getInputName() {return input_;} inline const std::string getColorOutputName() {return color_output_;} inline const std::string getTypeOutputName() {return type_output_;} @@ -37,11 +37,12 @@ class VehicleAttribsDetectionModel : public BaseModel * @brief Get the name of this detection model. * @return Name of the model. */ - const std::string getModelName() const override; + const std::string getModelCategory() const override; protected: - void checkLayerProperty(const InferenceEngine::CNNNetReader::Ptr &) override; - void setLayerProperty(InferenceEngine::CNNNetReader::Ptr) override; + //void checkLayerProperty(const InferenceEngine::CNNNetReader::Ptr &) override; + //void setLayerProperty(InferenceEngine::CNNNetReader::Ptr) override; + bool updateLayerProperty(InferenceEngine::CNNNetReader::Ptr) override; std::string input_; std::string color_output_; std::string type_output_; diff --git a/dynamic_vino_lib/src/models/age_gender_detection_model.cpp b/dynamic_vino_lib/src/models/age_gender_detection_model.cpp index 2a1122ad..04638728 100644 --- a/dynamic_vino_lib/src/models/age_gender_detection_model.cpp +++ b/dynamic_vino_lib/src/models/age_gender_detection_model.cpp @@ -32,67 +32,72 @@ Models::AgeGenderDetectionModel::AgeGenderDetectionModel( : BaseModel(model_loc, input_num, output_num, max_batch_size) { } - -void Models::AgeGenderDetectionModel::setLayerProperty( - InferenceEngine::CNNNetReader::Ptr net_reader) +bool Models::AgeGenderDetectionModel::updateLayerProperty( + InferenceEngine::CNNNetReader::Ptr net_reader) { + slog::info << "Checking INPUTs for model " << getModelName() << slog::endl; // set input property - InferenceEngine::InputsDataMap input_info_map( - net_reader->getNetwork().getInputsInfo()); + InferenceEngine::InputsDataMap input_info_map(net_reader->getNetwork().getInputsInfo()); + if (input_info_map.size() != 1) { + slog::warn << "This model seems not Age-Gender-like, which should have only one input," + <<" but we got " << std::to_string(input_info_map.size()) << "inputs" + << slog::endl; + return false; + } InferenceEngine::InputInfo::Ptr input_info = input_info_map.begin()->second; input_info->setPrecision(InferenceEngine::Precision::FP32); input_info->setLayout(InferenceEngine::Layout::NCHW); + addInputInfo("input", input_info_map.begin()->first); // set output property - InferenceEngine::OutputsDataMap output_info_map( - net_reader->getNetwork().getOutputsInfo()); + InferenceEngine::OutputsDataMap output_info_map(net_reader->getNetwork().getOutputsInfo()); + if (output_info_map.size() != 2) { + // throw std::logic_error("Age/Gender Recognition network should have two output layers"); + slog::warn << "This model seems not Age-gender like, which should have and only have 2" + " outputs, but we got " << std::to_string(output_info_map.size()) << "outputs" + << slog::endl; + return false; + } auto it = output_info_map.begin(); InferenceEngine::DataPtr age_output_ptr = (it++)->second; InferenceEngine::DataPtr gender_output_ptr = (it++)->second; - age_output_ptr->setPrecision(InferenceEngine::Precision::FP32); - age_output_ptr->setLayout(InferenceEngine::Layout::NCHW); - gender_output_ptr->setPrecision(InferenceEngine::Precision::FP32); - gender_output_ptr->setLayout(InferenceEngine::Layout::NCHW); - // set input and output layer name - input_ = input_info_map.begin()->first; - output_age_ = age_output_ptr->name; - output_gender_ = gender_output_ptr->name; -} -void Models::AgeGenderDetectionModel::checkLayerProperty( - const InferenceEngine::CNNNetReader::Ptr& net_reader) -{ - slog::info << "Checking Age Gender Detection outputs" << slog::endl; - InferenceEngine::OutputsDataMap output_info( - net_reader->getNetwork().getOutputsInfo()); - auto it = output_info.begin(); - InferenceEngine::DataPtr age_output_ptr = (it++)->second; - InferenceEngine::DataPtr gender_output_ptr = (it++)->second; - // output layer of age should be Convolution type - if (gender_output_ptr->getCreatorLayer().lock()->type == "Convolution") - { + //Check More Configuration: + if (gender_output_ptr->getCreatorLayer().lock()->type == "Convolution") { std::swap(age_output_ptr, gender_output_ptr); } - if (age_output_ptr->getCreatorLayer().lock()->type != "Convolution") - { - throw std::logic_error("In Age Gender network, age layer (" + - age_output_ptr->getCreatorLayer().lock()->name + - ") should be a Convolution, but was: " + - age_output_ptr->getCreatorLayer().lock()->type); + if (age_output_ptr->getCreatorLayer().lock()->type != "Convolution") { + slog::err << "In Age Gender network, age layer (" + << age_output_ptr->getCreatorLayer().lock()->name + << ") should be a Convolution, but was: " + << age_output_ptr->getCreatorLayer().lock()->type << slog::endl; + return false; } - if (gender_output_ptr->getCreatorLayer().lock()->type != "SoftMax") - { - throw std::logic_error("In Age Gender network, gender layer (" + - gender_output_ptr->getCreatorLayer().lock()->name + - ") should be a SoftMax, but was: " + - gender_output_ptr->getCreatorLayer().lock()->type); + if (gender_output_ptr->getCreatorLayer().lock()->type != "SoftMax") { + slog::err <<"In Age Gender network, gender layer (" + << gender_output_ptr->getCreatorLayer().lock()->name + << ") should be a SoftMax, but was: " + << gender_output_ptr->getCreatorLayer().lock()->type + << slog::endl; + return false; } - slog::info << "Age layer: " << age_output_ptr->getCreatorLayer().lock()->name - << slog::endl; - slog::info << "Gender layer: " - << gender_output_ptr->getCreatorLayer().lock()->name << slog::endl; + slog::info << "Age layer: " << age_output_ptr->getCreatorLayer().lock()->name << slog::endl; + slog::info << "Gender layer: " << gender_output_ptr->getCreatorLayer().lock()->name << slog::endl; + + age_output_ptr->setPrecision(InferenceEngine::Precision::FP32); + age_output_ptr->setLayout(InferenceEngine::Layout::NCHW); + gender_output_ptr->setPrecision(InferenceEngine::Precision::FP32); + gender_output_ptr->setLayout(InferenceEngine::Layout::NCHW); + + //output_age_ = age_output_ptr->name; + addOutputInfo("age", age_output_ptr->getName()); + //output_gender_ = gender_output_ptr->name; + addOutputInfo("gender", gender_output_ptr->getName()); + + printAttribute(); + return true; } -const std::string Models::AgeGenderDetectionModel::getModelName() const +const std::string Models::AgeGenderDetectionModel::getModelCategory() const { return "Age Gender Detection"; } diff --git a/dynamic_vino_lib/src/models/base_model.cpp b/dynamic_vino_lib/src/models/base_model.cpp index e63ec037..de805a3b 100644 --- a/dynamic_vino_lib/src/models/base_model.cpp +++ b/dynamic_vino_lib/src/models/base_model.cpp @@ -28,74 +28,66 @@ #include "dynamic_vino_lib/slog.h" // Validated Base Network -Models::BaseModel::BaseModel(const std::string& model_loc, int input_num, - int output_num, int max_batch_size) - : input_num_(input_num), - output_num_(output_num), - model_loc_(model_loc), - max_batch_size_(max_batch_size) +Models::BaseModel::BaseModel( + const std::string & model_loc, int max_batch_size) +: model_loc_(model_loc), + max_batch_size_(max_batch_size), + ModelAttribute(model_loc) { if (model_loc.empty()) { throw std::logic_error("model file name is empty!"); } - + net_reader_ = std::make_shared(); } void Models::BaseModel::modelInit() { - slog::info << "model location is " << model_loc_ << slog::endl; - slog::info << "Loading network file" << slog::endl; + slog::info << "Loading network files" << slog::endl; // Read network model net_reader_->ReadNetwork(model_loc_); - - // Set batch size to given max_batch_size_ - slog::info << "Batch size is set to " << max_batch_size_ << slog::endl; - net_reader_->getNetwork().setBatchSize(max_batch_size_); - // Extract model name and load it's weights // remove extension size_t last_index = model_loc_.find_last_of("."); std::string raw_name = model_loc_.substr(0, last_index); std::string bin_file_name = raw_name + ".bin"; net_reader_->ReadWeights(bin_file_name); - // Read labels (if any) std::string label_file_name = raw_name + ".labels"; - std::ifstream input_file(label_file_name); - std::copy(std::istream_iterator(input_file), - std::istream_iterator(), std::back_inserter(labels_)); - checkNetworkSize(input_num_, output_num_, net_reader_); + loadLabelsFromFile(label_file_name); + // Set batch size to given max_batch_size_ + slog::info << "Batch size is set to " << max_batch_size_ << slog::endl; + net_reader_->getNetwork().setBatchSize(max_batch_size_); + /** DEPRECATED! checkLayerProperty(net_reader_); - setLayerProperty(net_reader_); + setLayerProperty(net_reader_); */ + updateLayerProperty(net_reader_); } -void Models::BaseModel::checkNetworkSize( - int input_size, int output_size, +#if 0 +bool Models::BaseModel::updateLayerProperty( InferenceEngine::CNNNetReader::Ptr net_reader) { - // TODO(Houk): Repeat, better removed! - // check input size - slog::info << "Checking input size" << slog::endl; - InferenceEngine::InputsDataMap input_info(net_reader->getNetwork().getInputsInfo()); - if (input_info.size() != (size_t)input_size) { - throw std::logic_error(getModelName() + " should have " + std::to_string(input_size) + " inpu" - "t"); +#if 0 + if (!updateLayerProperty(net_reader)){ + slog::warn << "The model(name: " << getModelName() << ") failed to update Layer Property!" + << slog::endl; + return false; } - // check output size - slog::info << "Checking output size" << slog::endl; - InferenceEngine::OutputsDataMap output_info(net_reader->getNetwork().getOutputsInfo()); - if (output_info.size() != (size_t)output_size) { - throw std::logic_error(getModelName() + " should have " + std::to_string(output_size) + " outpu" - "t"); +#endif + if(!isVerified()){ + slog::warn << "The model(name: " << getModelName() << ") does NOT pass Attribute Check!" + << slog::endl; + return false; } - // InferenceEngine::DataPtr& output_data_ptr = output_info.begin()->second; -} -Models::ObjectDetectionModel::ObjectDetectionModel(const std::string& model_loc, - int input_num, int output_num, - int max_batch_size) - : BaseModel(model_loc, input_num, output_num, max_batch_size){} + return true; +} +#endif +Models::ObjectDetectionModel::ObjectDetectionModel( + const std::string & model_loc, + int max_batch_size) +: BaseModel(model_loc, max_batch_size) {} diff --git a/dynamic_vino_lib/src/models/emotion_detection_model.cpp b/dynamic_vino_lib/src/models/emotion_detection_model.cpp index d1e6150d..47dbbd53 100644 --- a/dynamic_vino_lib/src/models/emotion_detection_model.cpp +++ b/dynamic_vino_lib/src/models/emotion_detection_model.cpp @@ -31,47 +31,57 @@ Models::EmotionDetectionModel::EmotionDetectionModel( { } -void Models::EmotionDetectionModel::setLayerProperty( - InferenceEngine::CNNNetReader::Ptr net_reader) +bool Models::EmotionDetectionModel::updateLayerProperty +(InferenceEngine::CNNNetReader::Ptr net_reader) { + slog::info << "Checking INPUTs for model " << getModelName() << slog::endl; // set input property - InferenceEngine::InputsDataMap input_info_map( - net_reader->getNetwork().getInputsInfo()); + InferenceEngine::InputsDataMap input_info_map(net_reader->getNetwork().getInputsInfo()); + if (input_info_map.size() != 1) { + slog::warn << "This model seems not Age-Gender-like, which should have only one input," + <<" but we got " << std::to_string(input_info_map.size()) << "inputs" + << slog::endl; + return false; + } InferenceEngine::InputInfo::Ptr input_info = input_info_map.begin()->second; input_info->setPrecision(InferenceEngine::Precision::FP32); input_info->setLayout(InferenceEngine::Layout::NCHW); + addInputInfo("input", input_info_map.begin()->first); + // set output property - InferenceEngine::OutputsDataMap output_info_map( - net_reader->getNetwork().getOutputsInfo()); - InferenceEngine::DataPtr& output_data_ptr = output_info_map.begin()->second; + InferenceEngine::OutputsDataMap output_info_map(net_reader->getNetwork().getOutputsInfo()); + if (output_info_map.size() != 1) { + // throw std::logic_error("Age/Gender Recognition network should have two output layers"); + slog::warn << "This model should have and only have 1 output, but we got " + << std::to_string(output_info_map.size()) << "outputs" << slog::endl; + return false; + } + InferenceEngine::DataPtr & output_data_ptr = output_info_map.begin()->second; + slog::info << "Emotions layer: " << output_data_ptr->getCreatorLayer().lock()->name << + slog::endl; output_data_ptr->setPrecision(InferenceEngine::Precision::FP32); output_data_ptr->setLayout(InferenceEngine::Layout::NCHW); - // set input and output layer name - input_ = input_info_map.begin()->first; - output_ = output_info_map.begin()->first; + addOutputInfo("output", output_info_map.begin()->first); + + printAttribute(); + return verifyOutputLayer(output_data_ptr); } -void Models::EmotionDetectionModel::checkLayerProperty( - const InferenceEngine::CNNNetReader::Ptr& net_reader) +bool Models::EmotionDetectionModel::verifyOutputLayer(const InferenceEngine::DataPtr & ptr) { - slog::info << "Checking Emotions Detection outputs" << slog::endl; - InferenceEngine::OutputsDataMap output_info( - net_reader->getNetwork().getOutputsInfo()); - InferenceEngine::DataPtr emotions_output_ptr = output_info.begin()->second; - // output layer should be SoftMax type - if (emotions_output_ptr->getCreatorLayer().lock()->type != "SoftMax") - { - throw std::logic_error("In Emotions Recognition network, Emotion layer (" + - emotions_output_ptr->getCreatorLayer().lock()->name + - ") should be a SoftMax, but was: " + - emotions_output_ptr->getCreatorLayer().lock()->type); + if (ptr->getCreatorLayer().lock()->type != "SoftMax") { + slog::err <<"In Emotion network, gender layer (" + << ptr->getCreatorLayer().lock()->name + << ") should be a SoftMax, but was: " + << ptr->getCreatorLayer().lock()->type + << slog::endl; + return false; } - slog::info << "Emotions layer: " - << emotions_output_ptr->getCreatorLayer().lock()->name - << slog::endl; + + return true; } -const std::string Models::EmotionDetectionModel::getModelName() const +const std::string Models::EmotionDetectionModel::getModelCategory() const { return "Emotions Detection"; } diff --git a/dynamic_vino_lib/src/models/face_detection_model.cpp b/dynamic_vino_lib/src/models/face_detection_model.cpp index cc219312..4b6d3f5e 100644 --- a/dynamic_vino_lib/src/models/face_detection_model.cpp +++ b/dynamic_vino_lib/src/models/face_detection_model.cpp @@ -26,92 +26,76 @@ #include "dynamic_vino_lib/slog.h" // Validated Face Detection Network -Models::FaceDetectionModel::FaceDetectionModel(const std::string& model_loc, - int input_num, int output_num, - int max_batch_size) - : BaseModel(model_loc, input_num, output_num, max_batch_size) +Models::FaceDetectionModel::FaceDetectionModel( + const std::string & model_loc, int max_batch_size) +: ObjectDetectionModel(model_loc, max_batch_size) { } -void Models::FaceDetectionModel::setLayerProperty( - InferenceEngine::CNNNetReader::Ptr net_reader) -{ - // set input property - InferenceEngine::InputsDataMap input_info_map( - net_reader->getNetwork().getInputsInfo()); - InferenceEngine::InputInfo::Ptr input_info = input_info_map.begin()->second; - input_info->setPrecision(InferenceEngine::Precision::U8); - input_info->setLayout(InferenceEngine::Layout::NCHW); - // set output property - InferenceEngine::OutputsDataMap output_info_map( - net_reader->getNetwork().getOutputsInfo()); - InferenceEngine::DataPtr& output_data_ptr = output_info_map.begin()->second; - output_data_ptr->setPrecision(InferenceEngine::Precision::FP32); - output_data_ptr->setLayout(InferenceEngine::Layout::NCHW); - // set input and output layer name - input_ = input_info_map.begin()->first; - output_ = output_info_map.begin()->first; -} - +#if 0 void Models::FaceDetectionModel::checkLayerProperty( const InferenceEngine::CNNNetReader::Ptr& net_reader) { + slog::info << "Checking Face Detection inputs" << slog::endl; + InferenceEngine::InputsDataMap input_info_map(net_reader->getNetwork().getInputsInfo()); + if (input_info_map.size() != 1) { + slog::err << "Face Detection network should have only one input, but we got " + << std::to_string(input_info_map.size()) << "inputs" << slog::endl; + throw std::logic_error("Face Detection network should have only one input"); + } + slog::info << "Checking Face Detection outputs" << slog::endl; - InferenceEngine::OutputsDataMap output_info_map( - net_reader->getNetwork().getOutputsInfo()); - InferenceEngine::DataPtr& output_data_ptr = output_info_map.begin()->second; + InferenceEngine::OutputsDataMap output_info_map(net_reader->getNetwork().getOutputsInfo()); + slog::info << "Checking Face Detection outputs ..." << slog::endl; + if (output_info_map.size() != 1) { + throw std::logic_error("This sample accepts networks having only one output"); + } + InferenceEngine::DataPtr & output_data_ptr = output_info_map.begin()->second; output_ = output_info_map.begin()->first; + slog::info << "Checking Object Detection output ... Name=" << output_ << slog::endl; + const InferenceEngine::CNNLayerPtr output_layer = - net_reader->getNetwork().getLayerByName(output_.c_str()); - // output layer should be DetectionOutput type - if (output_layer->type != "DetectionOutput") - { - throw std::logic_error( - "Face Detection network output layer(" + output_layer->name + - ") should be DetectionOutput, but was " + output_layer->type); - } + net_reader->getNetwork().getLayerByName(output_.c_str()); // output layer should have attribute called num_classes - if (output_layer->params.find("num_classes") == output_layer->params.end()) - { - throw std::logic_error("Face Detection network output layer (" + output_ + - ") should have num_classes integer attribute"); + slog::info << "Checking Object Detection num_classes" << slog::endl; + if (output_layer->params.find("num_classes") == output_layer->params.end()) { + throw std::logic_error("Object Detection network output layer (" + output_ + + ") should have num_classes integer attribute"); } // class number should be equal to size of label vector // if network has default "background" class, fake is used - const size_t num_classes = output_layer->GetParamAsInt("num_classes"); - if ((size_t)getLabels().size() != num_classes) - { - if ((size_t)getLabels().size() == (num_classes - 1)) - { + const int num_classes = output_layer->GetParamAsInt("num_classes"); + + slog::info << "Checking Object Detection output ... num_classes=" << num_classes << slog::endl; + if (getLabels().size() != num_classes) { + if (getLabels().size() == (num_classes - 1)) { getLabels().insert(getLabels().begin(), "fake"); - } - else - { + } else { getLabels().clear(); } } // last dimension of output layer should be 7 - const InferenceEngine::SizeVector output_dims = - output_data_ptr->getTensorDesc().getDims(); + const InferenceEngine::SizeVector output_dims = output_data_ptr->getTensorDesc().getDims(); max_proposal_count_ = static_cast(output_dims[2]); slog::info << "max proposal count is: " << max_proposal_count_ << slog::endl; - object_size_ = static_cast(output_dims[3]); - if (object_size_ != 7) - { - throw std::logic_error( - "Face Detection network output layer should have 7 as a last " - "dimension"); + + auto object_size = static_cast(output_dims[3]); + if (object_size != 7) { + throw std::logic_error("Object Detection network output layer should have 7 as a last " + "dimension"); } - if (output_dims.size() != 4) - { - throw std::logic_error( - "Face Detection network output dimensions not compatible shoulld be 4, " - "but was " + - std::to_string(output_dims.size())); + setObjectSize(object_size); + + if (output_dims.size() != 4) { + throw std::logic_error("Object Detection network output dimensions not compatible shoulld be " + "4, " + "but was " + + std::to_string(output_dims.size())); } } +#endif -const std::string Models::FaceDetectionModel::getModelName() const +const std::string Models::FaceDetectionModel::getModelCategory() const { return "Face Detection"; } diff --git a/dynamic_vino_lib/src/models/face_reidentification_model.cpp b/dynamic_vino_lib/src/models/face_reidentification_model.cpp index 7655737a..d131e517 100644 --- a/dynamic_vino_lib/src/models/face_reidentification_model.cpp +++ b/dynamic_vino_lib/src/models/face_reidentification_model.cpp @@ -21,8 +21,8 @@ #include "dynamic_vino_lib/slog.h" // Validated Face Reidentification Network Models::FaceReidentificationModel::FaceReidentificationModel( - const std::string & model_loc, int input_num, int output_num, int max_batch_size) -: BaseModel(model_loc, input_num, output_num, max_batch_size) {} + const std::string & model_loc, int max_batch_size) +: BaseModel(model_loc, max_batch_size) {} void Models::FaceReidentificationModel::setLayerProperty( InferenceEngine::CNNNetReader::Ptr net_reader) @@ -47,7 +47,7 @@ void Models::FaceReidentificationModel::setLayerProperty( void Models::FaceReidentificationModel::checkLayerProperty( const InferenceEngine::CNNNetReader::Ptr & net_reader) {} -const std::string Models::FaceReidentificationModel::getModelName() const +const std::string Models::FaceReidentificationModel::getModelCategory() const { return "Face Reidentification"; } diff --git a/dynamic_vino_lib/src/models/head_pose_detection_model.cpp b/dynamic_vino_lib/src/models/head_pose_detection_model.cpp index 2012deb2..2ae13e10 100644 --- a/dynamic_vino_lib/src/models/head_pose_detection_model.cpp +++ b/dynamic_vino_lib/src/models/head_pose_detection_model.cpp @@ -28,76 +28,48 @@ // Validated Head Pose Network Models::HeadPoseDetectionModel::HeadPoseDetectionModel( - const std::string& model_loc, int input_num, int output_num, - int max_batch_size) - : BaseModel(model_loc, input_num, output_num, max_batch_size) + const std::string & model_loc, int max_batch_size) +: BaseModel(model_loc, max_batch_size) { } -void Models::HeadPoseDetectionModel::checkLayerProperty( - const InferenceEngine::CNNNetReader::Ptr& net_reader) -{ - slog::info << "Checking Head Pose network outputs 1" << slog::endl; - slog::info << "Checked Head Pose network outputs" << slog::endl; - - InferenceEngine::OutputsDataMap outputInfo( - net_reader->getNetwork().getOutputsInfo()); - std::map layerNames = {{output_angle_r_, false}, - {output_angle_p_, false}, - {output_angle_y_, false}}; - - for (auto&& output : outputInfo) - { - InferenceEngine::CNNLayerPtr layer = - output.second->getCreatorLayer().lock(); - if (layerNames.find(layer->name) == layerNames.end()) - { - throw std::logic_error("Head Pose network output layer unknown: " + - layer->name + ", should be " + output_angle_r_ + - " or " + output_angle_p_ + " or " + - output_angle_y_); - } - if (layer->type != "FullyConnected") - { - throw std::logic_error("Head Pose network output layer (" + layer->name + - ") has invalid type: " + layer->type + - ", should be FullyConnected"); - } - auto fc = dynamic_cast(layer.get()); - if (fc->_out_num != 1) - { - throw std::logic_error("Head Pose network output layer (" + layer->name + - ") has invalid out-size=" + - std::to_string(fc->_out_num) + ", should be 1"); - } - layerNames[layer->name] = true; - } -} - -void Models::HeadPoseDetectionModel::setLayerProperty( - InferenceEngine::CNNNetReader::Ptr net_reader) +bool Models::HeadPoseDetectionModel::updateLayerProperty +(InferenceEngine::CNNNetReader::Ptr net_reader) { + slog::info << "Checking INPUTs for model " << getModelName() << slog::endl; // set input property - InferenceEngine::InputsDataMap input_info_map( - net_reader->getNetwork().getInputsInfo()); - + InferenceEngine::InputsDataMap input_info_map(net_reader->getNetwork().getInputsInfo()); + if (input_info_map.size() != 1) { + slog::warn << "This model should have only one input, but we got" + << std::to_string(input_info_map.size()) << "inputs" + << slog::endl; + return false; + } InferenceEngine::InputInfo::Ptr input_info = input_info_map.begin()->second; input_info->setPrecision(InferenceEngine::Precision::U8); input_info->getInputData()->setLayout(InferenceEngine::Layout::NCHW); - input_ = input_info_map.begin()->first; + addInputInfo("input", input_info_map.begin()->first); // set output property - InferenceEngine::OutputsDataMap output_info_map( - net_reader->getNetwork().getOutputsInfo()); - - for (auto& output : output_info_map) - { + InferenceEngine::OutputsDataMap output_info_map(net_reader->getNetwork().getOutputsInfo()); + for (auto & output : output_info_map) { output.second->setPrecision(InferenceEngine::Precision::FP32); output.second->setLayout(InferenceEngine::Layout::NC); } + + for (const std::string& outName : {output_angle_r_, output_angle_p_, output_angle_y_}) { + if (output_info_map.find(outName) == output_info_map.end()) { + throw std::logic_error("There is no " + outName + " output in Head Pose Estimation network"); + } else { + addOutputInfo(outName, outName); + } + } + + printAttribute(); + return true; } -const std::string Models::HeadPoseDetectionModel::getModelName() const +const std::string Models::HeadPoseDetectionModel::getModelCategory() const { return "Head Pose Network"; } diff --git a/dynamic_vino_lib/src/models/landmarks_detection_model.cpp b/dynamic_vino_lib/src/models/landmarks_detection_model.cpp index 75a7d7e3..94a3b594 100644 --- a/dynamic_vino_lib/src/models/landmarks_detection_model.cpp +++ b/dynamic_vino_lib/src/models/landmarks_detection_model.cpp @@ -20,8 +20,8 @@ #include "dynamic_vino_lib/models/landmarks_detection_model.h" // Validated Landmarks Detection Network Models::LandmarksDetectionModel::LandmarksDetectionModel( - const std::string & model_loc, int input_num, int output_num, int max_batch_size) -: BaseModel(model_loc, input_num, output_num, max_batch_size) {} + const std::string & model_loc, int max_batch_size) +: BaseModel(model_loc, max_batch_size) {} void Models::LandmarksDetectionModel::setLayerProperty( InferenceEngine::CNNNetReader::Ptr net_reader) @@ -58,7 +58,7 @@ void Models::LandmarksDetectionModel::checkLayerProperty( } } -const std::string Models::LandmarksDetectionModel::getModelName() const +const std::string Models::LandmarksDetectionModel::getModelCategory() const { return "Landmarks Detection"; } diff --git a/dynamic_vino_lib/src/models/license_plate_detection_model.cpp b/dynamic_vino_lib/src/models/license_plate_detection_model.cpp index 15e07898..eb0afee9 100644 --- a/dynamic_vino_lib/src/models/license_plate_detection_model.cpp +++ b/dynamic_vino_lib/src/models/license_plate_detection_model.cpp @@ -21,30 +21,13 @@ #include "dynamic_vino_lib/slog.h" // Validated Vehicle Attributes Detection Network Models::LicensePlateDetectionModel::LicensePlateDetectionModel( - const std::string & model_loc, int input_num, int output_num, int max_batch_size) -: BaseModel(model_loc, input_num, output_num, max_batch_size) {} + const std::string & model_loc, int max_batch_size) +: BaseModel(model_loc, max_batch_size) {} -void Models::LicensePlateDetectionModel::setLayerProperty( - InferenceEngine::CNNNetReader::Ptr net_reader) -{ - // set input property - InferenceEngine::InputsDataMap input_info_map( - net_reader->getNetwork().getInputsInfo()); - InferenceEngine::InputInfo::Ptr input_info = input_info_map.begin()->second; - input_info->setPrecision(InferenceEngine::Precision::U8); - input_info->getInputData()->setLayout(InferenceEngine::Layout::NCHW); - // set output property - InferenceEngine::OutputsDataMap output_info_map( - net_reader->getNetwork().getOutputsInfo()); - // set input and output layer name - input_ = input_info_map.begin()->first; - seq_input_ = (++input_info_map.begin())->first; - output_ = output_info_map.begin()->first; -} - -void Models::LicensePlateDetectionModel::checkLayerProperty( - const InferenceEngine::CNNNetReader::Ptr & net_reader) +bool Models::LicensePlateDetectionModel::updateLayerProperty( + const InferenceEngine::CNNNetReader::Ptr net_reader) { + slog::info << "Checking INPUTs for model " << getModelName() << slog::endl; InferenceEngine::InputsDataMap input_info_map( net_reader->getNetwork().getInputsInfo()); if (input_info_map.size() != 2) { @@ -59,9 +42,20 @@ void Models::LicensePlateDetectionModel::checkLayerProperty( if (output_info_map.size() != 1) { throw std::logic_error("Vehicle Attribs Network expects networks having one output"); } + + InferenceEngine::InputInfo::Ptr input_info = input_info_map.begin()->second; + input_info->setPrecision(InferenceEngine::Precision::U8); + input_info->getInputData()->setLayout(InferenceEngine::Layout::NCHW); + + // set input and output layer name + input_ = input_info_map.begin()->first; + seq_input_ = (++input_info_map.begin())->first; + output_ = output_info_map.begin()->first; + + return true; } -const std::string Models::LicensePlateDetectionModel::getModelName() const +const std::string Models::LicensePlateDetectionModel::getModelCategory() const { return "Vehicle Attributes Detection"; } diff --git a/dynamic_vino_lib/src/models/object_detection_ssd_model.cpp b/dynamic_vino_lib/src/models/object_detection_ssd_model.cpp index e0df0cdc..0c281c3b 100644 --- a/dynamic_vino_lib/src/models/object_detection_ssd_model.cpp +++ b/dynamic_vino_lib/src/models/object_detection_ssd_model.cpp @@ -21,63 +21,175 @@ #include "dynamic_vino_lib/models/object_detection_ssd_model.h" #include "dynamic_vino_lib/slog.h" // Validated Object Detection Network -Models::ObjectDetectionSSDModel::ObjectDetectionSSDModel(const std::string& model_loc, - int input_num, int output_num, - int max_batch_size) - : ObjectDetectionModel(model_loc, input_num, output_num, max_batch_size){} +Models::ObjectDetectionSSDModel::ObjectDetectionSSDModel( + const std::string & model_loc, int max_batch_size) +: ObjectDetectionModel(model_loc, max_batch_size) +{ + slog::debug << "TESTING: in ObjectDetectionSSDModel" << slog::endl; + //addCandidatedAttr(std::make_shared()); +} + +const std::string Models::ObjectDetectionSSDModel::getModelCategory() const +{ + return "Object Detection SSD"; +} -void Models::ObjectDetectionSSDModel::setLayerProperty( - InferenceEngine::CNNNetReader::Ptr net_reader) { +bool Models::ObjectDetectionSSDModel::enqueue( + const std::shared_ptr & engine, + const cv::Mat & frame, + const cv::Rect & input_frame_loc) +{ + if (!this->matToBlob(frame, input_frame_loc, 1, 0, engine)) { + return false; + } + + setFrameSize(frame.cols, frame.rows); + return true; +} - // set input property - InferenceEngine::InputsDataMap input_info_map( - net_reader->getNetwork().getInputsInfo()); +bool Models::ObjectDetectionSSDModel::matToBlob( + const cv::Mat & orig_image, const cv::Rect &, float scale_factor, + int batch_index, const std::shared_ptr & engine) +{ + if (engine == nullptr) { + slog::err << "A frame is trying to be enqueued in a NULL Engine." << slog::endl; + return false; + } + + std::string input_name = getInputName(); + slog::debug << "add input image to blob: " << input_name << slog::endl; + InferenceEngine::Blob::Ptr input_blob = + engine->getRequest()->GetBlob(input_name); + + InferenceEngine::SizeVector blob_size = input_blob->getTensorDesc().getDims(); + const int width = blob_size[3]; + const int height = blob_size[2]; + const int channels = blob_size[1]; + u_int8_t * blob_data = input_blob->buffer().as(); + + cv::Mat resized_image(orig_image); + if (width != orig_image.size().width || height != orig_image.size().height) { + cv::resize(orig_image, resized_image, cv::Size(width, height)); + } + int batchOffset = batch_index * width * height * channels; + + for (int c = 0; c < channels; c++) { + for (int h = 0; h < height; h++) { + for (int w = 0; w < width; w++) { + blob_data[batchOffset + c * width * height + h * width + w] = + resized_image.at(h, w)[c] * scale_factor; + } + } + } + + slog::debug << "Convert input image to blob: DONE!" << slog::endl; + return true; +} + +bool Models::ObjectDetectionSSDModel::fetchResults( + const std::shared_ptr & engine, + std::vector & results, + const float & confidence_thresh, + const bool & enable_roi_constraint) +{ + slog::debug << "fetching Infer Resulsts from the given SSD model" << slog::endl; + if (engine == nullptr) { + slog::err << "Trying to fetch results from Engines." << slog::endl; + return false; + } + + slog::debug << "Fetching Detection Results ..." << slog::endl; + InferenceEngine::InferRequest::Ptr request = engine->getRequest(); + std::string output = getOutputName(); + const float * detections = request->GetBlob(output)->buffer().as(); + + slog::debug << "Analyzing Detection results..." << slog::endl; + auto max_proposal_count = getMaxProposalCount(); + auto object_size = getObjectSize(); + slog::debug << "MaxProprosalCount=" << max_proposal_count + << ", ObjectSize=" << object_size << slog::endl; + for (int i = 0; i < max_proposal_count; i++) { + float image_id = detections[i * object_size + 0]; + if (image_id < 0) { + //slog::info << "Found objects: " << i << "|" << results.size() << slog::endl; + break; + } + + cv::Rect r; + auto label_num = static_cast(detections[i * object_size + 1]); + std::vector & labels = getLabels(); + auto frame_size = getFrameSize(); + r.x = static_cast(detections[i * object_size + 3] * frame_size.width); + r.y = static_cast(detections[i * object_size + 4] * frame_size.height); + r.width = static_cast(detections[i * object_size + 5] * frame_size.width - r.x); + r.height = static_cast(detections[i * object_size + 6] * frame_size.height - r.y); + + if (enable_roi_constraint) {r &= cv::Rect(0, 0, frame_size.width, frame_size.height);} + + dynamic_vino_lib::ObjectDetectionResult result(r); + std::string label = label_num < labels.size() ? labels[label_num] : + std::string("label #") + std::to_string(label_num); + result.setLabel(label); + float confidence = detections[i * object_size + 2]; + if (confidence <= confidence_thresh /* || r.x == 0 */) { // why r.x needs to be checked? + continue; + } + result.setConfidence(confidence); + + results.emplace_back(result); + } + + return true; +} + +bool Models::ObjectDetectionSSDModel::updateLayerProperty( + const InferenceEngine::CNNNetReader::Ptr net_reader) +{ + slog::info << "Checking INPUTs for model " << getModelName() << slog::endl; + + InferenceEngine::InputsDataMap input_info_map(net_reader->getNetwork().getInputsInfo()); if (input_info_map.size() != 1) { - throw std::logic_error("This sample accepts networks having only one input"); + slog::warn << "This model seems not SSDNet-like, SSDnet has only one input, but we got " + << std::to_string(input_info_map.size()) << "inputs" << slog::endl; + return false; } + InferenceEngine::InputInfo::Ptr input_info = input_info_map.begin()->second; input_info->setPrecision(InferenceEngine::Precision::U8); - input_info->getInputData()->setLayout(InferenceEngine::Layout::NCHW); - // set output property - InferenceEngine::OutputsDataMap output_info_map( - net_reader->getNetwork().getOutputsInfo()); - if (output_info_map.size() != 1) { - throw std::logic_error("This sample accepts networks having only one output"); - } - InferenceEngine::DataPtr& output_data_ptr = output_info_map.begin()->second; - output_data_ptr->setPrecision(InferenceEngine::Precision::FP32); - output_data_ptr->setLayout(InferenceEngine::Layout::NCHW); - // set input and output layer name - input_ = input_info_map.begin()->first; - output_ = output_info_map.begin()->first; -} + addInputInfo("input", input_info_map.begin()->first); -void Models::ObjectDetectionSSDModel::checkLayerProperty( - const InferenceEngine::CNNNetReader::Ptr& net_reader) { + const InferenceEngine::SizeVector input_dims = input_info->getTensorDesc().getDims(); + setInputHeight(input_dims[2]); + setInputWidth(input_dims[3]); - slog::info << "Checking Object Detection outputs" << slog::endl; - InferenceEngine::OutputsDataMap output_info_map( - net_reader->getNetwork().getOutputsInfo()); - slog::info << "Checking Object Detection outputs ..." << slog::endl; + slog::info << "Checking OUTPUTs for model " << getModelName() << slog::endl; + InferenceEngine::OutputsDataMap output_info_map(net_reader->getNetwork().getOutputsInfo()); if (output_info_map.size() != 1) { - throw std::logic_error("This sample accepts networks having only one output"); + slog::warn << "This model seems not SSDNet-like! We got " + << std::to_string(output_info_map.size()) << "outputs, but SSDnet has only one." + << slog::endl; + return false; } - InferenceEngine::DataPtr& output_data_ptr = output_info_map.begin()->second; - output_ = output_info_map.begin()->first; - slog::info << "Checking Object Detection output ... Name=" << output_ << slog::endl; + InferenceEngine::DataPtr & output_data_ptr = output_info_map.begin()->second; + addOutputInfo("output", output_info_map.begin()->first); + slog::info << "Checking Object Detection output ... Name=" << output_info_map.begin()->first + << slog::endl; + output_data_ptr->setPrecision(InferenceEngine::Precision::FP32); +///TODO: double check this part: BEGIN const InferenceEngine::CNNLayerPtr output_layer = - net_reader->getNetwork().getLayerByName(output_.c_str()); + net_reader->getNetwork().getLayerByName(output_info_map.begin()->first.c_str()); // output layer should have attribute called num_classes slog::info << "Checking Object Detection num_classes" << slog::endl; if (output_layer->params.find("num_classes") == output_layer->params.end()) { - throw std::logic_error("Object Detection network output layer (" + output_ + - ") should have num_classes integer attribute"); + slog::warn << "This model's output layer (" << output_info_map.begin()->first + << ") should have num_classes integer attribute" << slog::endl; + return false; } // class number should be equal to size of label vector // if network has default "background" class, fake is used - const unsigned int num_classes = output_layer->GetParamAsInt("num_classes"); - + const int num_classes = output_layer->GetParamAsInt("num_classes"); +#if 0 slog::info << "Checking Object Detection output ... num_classes=" << num_classes << slog::endl; if (getLabels().size() != num_classes) { if (getLabels().size() == (num_classes - 1)) { @@ -86,25 +198,30 @@ void Models::ObjectDetectionSSDModel::checkLayerProperty( getLabels().clear(); } } +#endif + ///TODO: double check this part: END + // last dimension of output layer should be 7 - const InferenceEngine::SizeVector output_dims = - output_data_ptr->getTensorDesc().getDims(); - max_proposal_count_ = static_cast(output_dims[2]); - slog::info << "max proposal count is: " << max_proposal_count_ << slog::endl; - object_size_ = static_cast(output_dims[3]); - if (object_size_ != 7) { - throw std::logic_error( - "Object Detection network output layer should have 7 as a last " - "dimension"); + const InferenceEngine::SizeVector output_dims = output_data_ptr->getTensorDesc().getDims(); + setMaxProposalCount(static_cast(output_dims[2])); + slog::info << "max proposal count is: " << getMaxProposalCount() << slog::endl; + + auto object_size = static_cast(output_dims[3]); + if (object_size != 7) { + slog::warn << "This model is NOT SSDNet-like, whose output data for each detected object" + << "should have 7 dimensions, but was " << std::to_string(object_size) + << slog::endl; + return false; } + setObjectSize(object_size); + if (output_dims.size() != 4) { - throw std::logic_error( - "Object Detection network output dimensions not compatible shoulld be 4, " - "but was " + - std::to_string(output_dims.size())); + slog::warn << "This model is not SSDNet-like, output dimensions shoulld be 4, but was" + << std::to_string(output_dims.size()) << slog::endl; + return false; } -} -const std::string Models::ObjectDetectionSSDModel::getModelName() const { - return "Object Detection"; + printAttribute(); + slog::info << "This model is SSDNet-like, Layer Property updated!" << slog::endl; + return true; } diff --git a/dynamic_vino_lib/src/models/object_detection_yolov2voc_model.cpp b/dynamic_vino_lib/src/models/object_detection_yolov2voc_model.cpp index ce8c682f..d24bb946 100644 --- a/dynamic_vino_lib/src/models/object_detection_yolov2voc_model.cpp +++ b/dynamic_vino_lib/src/models/object_detection_yolov2voc_model.cpp @@ -21,62 +21,57 @@ #include "dynamic_vino_lib/models/object_detection_yolov2voc_model.h" #include "dynamic_vino_lib/slog.h" // Validated Object Detection Network -Models::ObjectDetectionYolov2vocModel::ObjectDetectionYolov2vocModel(const std::string& model_loc, - int input_num, int output_num, - int max_batch_size) - : ObjectDetectionModel(model_loc, input_num, output_num, max_batch_size){ +Models::ObjectDetectionYolov2Model::ObjectDetectionYolov2Model( + const std::string & model_loc, int max_batch_size) +: ObjectDetectionModel(model_loc, max_batch_size) +{ } -void Models::ObjectDetectionYolov2vocModel::setLayerProperty( - InferenceEngine::CNNNetReader::Ptr net_reader) { - // set input property - InferenceEngine::InputsDataMap input_info_map( - net_reader->getNetwork().getInputsInfo()); +bool Models::ObjectDetectionYolov2Model::updateLayerProperty( + const InferenceEngine::CNNNetReader::Ptr net_reader) +{ + slog::info << "Checking INPUTs for model " << getModelName() << slog::endl; + + InferenceEngine::InputsDataMap input_info_map(net_reader->getNetwork().getInputsInfo()); if (input_info_map.size() != 1) { - throw std::logic_error("This sample accepts networks having only one input"); + slog::warn << "This model seems not Yolo-like, which has only one input, but we got " + << std::to_string(input_info_map.size()) << "inputs" << slog::endl; + return false; } - input_info_ = input_info_map.begin()->second; - input_info_->setPrecision(InferenceEngine::Precision::FP32); - input_info_->setLayout(InferenceEngine::Layout::NCHW); + + InferenceEngine::InputInfo::Ptr input_info = input_info_map.begin()->second; + input_info->setPrecision(InferenceEngine::Precision::FP32); + input_info->getInputData()->setLayout(InferenceEngine::Layout::NCHW); + input_info_ = input_info; + addInputInfo("input", input_info_map.begin()->first); // set output property - InferenceEngine::OutputsDataMap output_info_map( - net_reader->getNetwork().getOutputsInfo()); + InferenceEngine::OutputsDataMap output_info_map(net_reader->getNetwork().getOutputsInfo()); if (output_info_map.size() != 1) { - throw std::logic_error("This sample accepts networks having only one output"); + slog::warn << "This model seems not Yolo-like! We got " + << std::to_string(output_info_map.size()) << "outputs, but SSDnet has only one." + << slog::endl; + return false; } - InferenceEngine::DataPtr& output_data_ptr = output_info_map.begin()->second; + InferenceEngine::DataPtr & output_data_ptr = output_info_map.begin()->second; output_data_ptr->setPrecision(InferenceEngine::Precision::FP32); - //output_data_ptr->setLayout(InferenceEngine::Layout::NCHW); + addOutputInfo("output", output_info_map.begin()->first); + slog::info << "Checking Object Detection output ... Name=" << output_info_map.begin()->first + << slog::endl; - // set input and output layer name - input_ = input_info_map.begin()->first; - output_ = output_info_map.begin()->first; -} - -void Models::ObjectDetectionYolov2vocModel::checkLayerProperty( - const InferenceEngine::CNNNetReader::Ptr& net_reader) { - slog::info << "Checking Object Detection outputs" << slog::endl; - InferenceEngine::OutputsDataMap output_info_map( - net_reader->getNetwork().getOutputsInfo()); - slog::info << "Checking Object Detection outputs ..." << slog::endl; - if (output_info_map.size() != 1) { - throw std::logic_error("This sample accepts networks having only one output"); - } - InferenceEngine::DataPtr& output_data_ptr = output_info_map.begin()->second; - output_ = output_info_map.begin()->first; - slog::info << "Checking Object Detection output ... Name=" << output_ << slog::endl; - - output_layer_ = net_reader->getNetwork().getLayerByName(output_.c_str()); + const InferenceEngine::CNNLayerPtr output_layer = + net_reader->getNetwork().getLayerByName(output_info_map.begin()->first.c_str()); + // output layer should have attribute called num_classes slog::info << "Checking Object Detection num_classes" << slog::endl; - if (output_layer_->params.find("classes") == output_layer_->params.end()) { - throw std::logic_error("Object Detection network output layer (" + output_ + - ") should have num_classes integer attribute"); + if (output_layer == nullptr || + output_layer->params.find("classes") == output_layer->params.end()) { + slog::warn << "This model's output layer (" << output_info_map.begin()->first + << ") should have num_classes integer attribute" << slog::endl; + return false; } // class number should be equal to size of label vector // if network has default "background" class, fake is used - const unsigned int num_classes = output_layer_->GetParamAsInt("classes"); - + const int num_classes = output_layer->GetParamAsInt("classes"); slog::info << "Checking Object Detection output ... num_classes=" << num_classes << slog::endl; if (getLabels().size() != num_classes) { @@ -87,24 +82,264 @@ void Models::ObjectDetectionYolov2vocModel::checkLayerProperty( } } - const InferenceEngine::SizeVector output_dims = - output_data_ptr->getTensorDesc().getDims(); - max_proposal_count_ = static_cast(output_dims[2]); - object_size_ = static_cast(output_dims[3]); + // last dimension of output layer should be 7 + const InferenceEngine::SizeVector output_dims = output_data_ptr->getTensorDesc().getDims(); + setMaxProposalCount(static_cast(output_dims[2])); + slog::info << "max proposal count is: " << getMaxProposalCount() << slog::endl; - if (object_size_ != 33) { - throw std::logic_error( - "Object Detection network output layer should have 33 as a last aaa" - "dimension"); + auto object_size = static_cast(output_dims[3]); + if (object_size != 33) { + slog::warn << "This model is NOT Yolo-like, whose output data for each detected object" + << "should have 7 dimensions, but was " << std::to_string(object_size) + << slog::endl; + return false; } + setObjectSize(object_size); + if (output_dims.size() != 2) { - throw std::logic_error( - "Object Detection network output dimensions not compatible shoulld be 2, " - "but was " + - std::to_string(output_dims.size())); + slog::warn << "This model is not Yolo-like, output dimensions shoulld be 2, but was" + << std::to_string(output_dims.size()) << slog::endl; + return false; + } + + printAttribute(); + slog::info << "This model is Yolo-like, Layer Property updated!" << slog::endl; + return true; +} + +const std::string Models::ObjectDetectionYolov2Model::getModelCategory() const +{ + return "Object Detection Yolo v2"; +} + +bool Models::ObjectDetectionYolov2Model::enqueue( + const std::shared_ptr & engine, + const cv::Mat & frame, + const cv::Rect & input_frame_loc) +{ + setFrameSize(frame.cols, frame.rows); + + if (!matToBlob(frame, input_frame_loc, 1, 0, engine)) { + return false; + } + return true; +} + +bool Models::ObjectDetectionYolov2Model::matToBlob( + const cv::Mat & orig_image, const cv::Rect &, float scale_factor, + int batch_index, const std::shared_ptr & engine) +{ + if (engine == nullptr) { + slog::err << "A frame is trying to be enqueued in a NULL Engine." << slog::endl; + return false; + } + + std::string input_name = getInputName(); + InferenceEngine::Blob::Ptr input_blob = + engine->getRequest()->GetBlob(input_name); + + InferenceEngine::SizeVector blob_size = input_blob->getTensorDesc().getDims(); + const int width = blob_size[3]; + const int height = blob_size[2]; + const int channels = blob_size[1]; + float * blob_data = input_blob->buffer().as(); + + + int dx = 0; + int dy = 0; + int srcw = 0; + int srch = 0; + + int IH = height; + int IW = width; + + cv::Mat image = orig_image.clone(); + cv::cvtColor(image, image, cv::COLOR_BGR2RGB); + + image.convertTo(image, CV_32F, 1.0 / 255.0, 0); + srcw = image.size().width; + srch = image.size().height; + + cv::Mat resizedImg(IH, IW, CV_32FC3); + resizedImg = cv::Scalar(0.5, 0.5, 0.5); + int imw = image.size().width; + int imh = image.size().height; + float resize_ratio = static_cast(IH) / static_cast(std::max(imw, imh)); + cv::resize(image, image, cv::Size(imw * resize_ratio, imh * resize_ratio)); + + int new_w = imw; + int new_h = imh; + if ((static_cast(IW) / imw) < (static_cast(IH) / imh)) { + new_w = IW; + new_h = (imh * IW) / imw; + } else { + new_h = IH; + new_w = (imw * IW) / imh; + } + dx = (IW - new_w) / 2; + dy = (IH - new_h) / 2; + + imh = image.size().height; + imw = image.size().width; + + for (int row = 0; row < imh; row++) { + for (int col = 0; col < imw; col++) { + for (int ch = 0; ch < 3; ch++) { + resizedImg.at(dy + row, dx + col)[ch] = image.at(row, col)[ch]; + } + } + } + + for (int c = 0; c < channels; c++) { + for (int h = 0; h < height; h++) { + for (int w = 0; w < width; w++) { + blob_data[c * width * height + h * width + w] = resizedImg.at(h, w)[c]; + } + } + } + + setFrameSize(srcw, srch); + return true; +} + +bool Models::ObjectDetectionYolov2Model::fetchResults( + const std::shared_ptr & engine, + std::vector & results, + const float & confidence_thresh, + const bool & enable_roi_constraint) +{ + try { + if (engine == nullptr) { + slog::err << "Trying to fetch results from Engines." << slog::endl; + return false; + } + + InferenceEngine::InferRequest::Ptr request = engine->getRequest(); + + std::string output = getOutputName(); + std::vector & labels = getLabels(); + const float * detections = + request->GetBlob(output)->buffer().as::value_type *>(); + InferenceEngine::CNNLayerPtr layer = + getNetReader()->getNetwork().getLayerByName(output.c_str()); + int input_height = input_info_->getTensorDesc().getDims()[2]; + int input_width = input_info_->getTensorDesc().getDims()[3]; + + // --------------------------- Validating output parameters -------------------------------- + if (layer != nullptr && layer->type != "RegionYolo") { + throw std::runtime_error("Invalid output type: " + layer->type + ". RegionYolo expected"); + } + // --------------------------- Extracting layer parameters -------------------------------- + const int num = layer->GetParamAsInt("num"); + const int coords = layer->GetParamAsInt("coords"); + const int classes = layer->GetParamAsInt("classes"); + auto blob = request->GetBlob(output); + const int out_blob_h = static_cast(blob->getTensorDesc().getDims()[2]);; + + std::vector anchors = { + 0.572730, 0.677385, + 1.874460, 2.062530, + 3.338430, 5.474340, + 7.882820, 3.527780, + 9.770520, 9.168280 + }; + auto side = out_blob_h; + + auto side_square = side * side; + // --------------------------- Parsing YOLO Region output ------------------------------------- + std::vector raw_results; + for (int i = 0; i < side_square; ++i) { + int row = i / side; + int col = i % side; + + for (int n = 0; n < num; ++n) { + int obj_index = getEntryIndex(side, coords, classes, n * side * side + i, coords); + int box_index = getEntryIndex(side, coords, classes, n * side * side + i, 0); + + float scale = detections[obj_index]; + + if (scale < confidence_thresh) { + continue; + } + + float x = (col + detections[box_index + 0 * side_square]) / side * input_width; + float y = (row + detections[box_index + 1 * side_square]) / side * input_height; + float height = std::exp(detections[box_index + 3 * side_square]) * anchors[2 * n + 1] / + side * input_height; + float width = std::exp(detections[box_index + 2 * side_square]) * anchors[2 * n] / side * + input_width; + + for (int j = 0; j < classes; ++j) { + int class_index = + getEntryIndex(side, coords, classes, n * side_square + i, coords + 1 + j); + + float prob = scale * detections[class_index]; + if (prob < confidence_thresh) { + continue; + } + + float x_min = x - width / 2; + float y_min = y - height / 2; + + auto frame_size = getFrameSize(); + float x_min_resized = x_min / input_width * frame_size.width; + float y_min_resized = y_min / input_height * frame_size.height; + float width_resized = width / input_width * frame_size.width; + float height_resized = height / input_height * frame_size.height; + + cv::Rect r(x_min_resized, y_min_resized, width_resized, height_resized); + Result result(r); + // result.label_ = j; + std::string label = j < + labels.size() ? labels[j] : std::string("label #") + std::to_string(j); + result.setLabel(label); + + result.setConfidence(prob); + raw_results.emplace_back(result); + } + } + } + + std::sort(raw_results.begin(), raw_results.end()); + for (unsigned int i = 0; i < raw_results.size(); ++i) { + if (raw_results[i].getConfidence() == 0) { + continue; + } + for (unsigned int j = i + 1; j < raw_results.size(); ++j) { + auto iou = dynamic_vino_lib::ObjectDetection::calcIoU( + raw_results[i].getLocation(), raw_results[j].getLocation()); + if (iou >= 0.45) { + raw_results[j].setConfidence(0); + } + } + } + + for (auto & raw_result : raw_results) { + if (raw_result.getConfidence() < confidence_thresh) { + continue; + } + + results.push_back(raw_result); + } + + raw_results.clear(); + + return true; + } catch (const std::exception & error) { + slog::err << error.what() << slog::endl; + return false; + } catch (...) { + slog::err << "Unknown/internal exception happened." << slog::endl; + return false; } } -const std::string Models::ObjectDetectionYolov2vocModel::getModelName() const { - return "Object Detection"; +int Models::ObjectDetectionYolov2Model::getEntryIndex( + int side, int lcoords, int lclasses, + int location, int entry) +{ + int n = location / (side * side); + int loc = location % (side * side); + return n * side * side * (lcoords + lclasses + 1) + entry * side * side + loc; } diff --git a/dynamic_vino_lib/src/models/object_segmentation_model.cpp b/dynamic_vino_lib/src/models/object_segmentation_model.cpp index 582e7de0..800fdd8b 100644 --- a/dynamic_vino_lib/src/models/object_segmentation_model.cpp +++ b/dynamic_vino_lib/src/models/object_segmentation_model.cpp @@ -22,153 +22,203 @@ #include "dynamic_vino_lib/slog.h" // Validated Object Detection Network Models::ObjectSegmentationModel::ObjectSegmentationModel( - const std::string & model_loc, - int input_num, int output_num, - int max_batch_size) -: BaseModel(model_loc, input_num, output_num, max_batch_size) + const std::string &model_loc, + int max_batch_size) + : BaseModel(model_loc, max_batch_size) { } -void Models::ObjectSegmentationModel::checkNetworkSize( - int input_size, int output_size, InferenceEngine::CNNNetReader::Ptr net_reader) +bool Models::ObjectSegmentationModel::enqueue( + const std::shared_ptr &engine, + const cv::Mat &frame, + const cv::Rect &input_frame_loc) { - slog::info << "Checking input size" << slog::endl; - InferenceEngine::InputsDataMap input_info(net_reader->getNetwork().getInputsInfo()); - if (input_info.size() != (size_t)input_size) { - throw std::logic_error(getModelName() + " should have " + std::to_string(input_size) + " inpu" - "t, but got " + std::to_string(input_info.size())); + if (engine == nullptr) + { + slog::err << "A frame is trying to be enqueued in a NULL Engine." << slog::endl; + return false; } - // check output size - slog::info << "Checking output size" << slog::endl; - InferenceEngine::OutputsDataMap output_info(net_reader->getNetwork().getOutputsInfo()); - if (output_info.size() != (size_t)output_size && output_info.size() != (size_t)(output_size - 1)) { - throw std::logic_error(getModelName() + " should have " + std::to_string(output_size) + " outpu" - "t, but got " + std::to_string(output_info.size())); + for (const auto &inputInfoItem : input_info_) + { + // Fill first input tensor with images. First b channel, then g and r channels + slog::debug<<"first tensor"<getTensorDesc().getDims().size()<getTensorDesc().getDims().size()==4) + { + matToBlob(frame, input_frame_loc, 1.0, 0, engine); + } + + // Fill second input tensor with image info + if (inputInfoItem.second->getTensorDesc().getDims().size() == 2) + { + InferenceEngine::Blob::Ptr input = engine->getRequest()->GetBlob(inputInfoItem.first); + auto data = input->buffer().as::value_type *>(); + data[0] = static_cast(frame.rows); // height + data[1] = static_cast(frame.cols); // width + data[2] = 1; + } } + return true; + } -void Models::ObjectSegmentationModel::setLayerProperty( - InferenceEngine::CNNNetReader::Ptr net_reader) +bool Models::ObjectSegmentationModel::matToBlob( + const cv::Mat &orig_image, const cv::Rect &, float scale_factor, + int batch_index, const std::shared_ptr &engine) { - auto network = net_reader->getNetwork(); + (void)scale_factor; + (void)batch_index; - // set input property - input_info_ = InferenceEngine::InputsDataMap(net_reader->getNetwork().getInputsInfo()); - for (const auto & inputInfoItem : input_info_) { - if (inputInfoItem.second->getDims().size() == 4) { // first input contains images - inputInfoItem.second->setPrecision(InferenceEngine::Precision::U8); - input_ = inputInfoItem.first; //input_name - } else if (inputInfoItem.second->getDims().size() == 2) { // second input contains image info - inputInfoItem.second->setPrecision(InferenceEngine::Precision::FP32); - } else { - throw std::logic_error("Unsupported input shape with size = " + std::to_string(inputInfoItem.second->getDims().size())); - } + if (engine == nullptr) + { + slog::err << "A frame is trying to be enqueued in a NULL Engine." << slog::endl; + return false; } - try { - network.addOutput(std::string(getDetectionOutputName().c_str()), 0); - } catch (std::exception & error) { - throw std::logic_error(getModelName() + "is failed when adding detection_output laryer."); - } + size_t channels = orig_image.channels(); + size_t height = orig_image.size().height; + size_t width = orig_image.size().width; - network.setBatchSize(1); - slog::info << "Batch size is " << std::to_string(net_reader->getNetwork().getBatchSize()) << - slog::endl; + size_t strideH = orig_image.step.buf[0]; + size_t strideW = orig_image.step.buf[1]; - input_channels_ = input_info_[input_]->getDims()[2]; - input_height_ = input_info_[input_]->getDims()[1]; - input_width_ = input_info_[input_]->getDims()[0]; + bool is_dense = + strideW == channels && + strideH == channels * width; - output_info_ = InferenceEngine::OutputsDataMap(net_reader->getNetwork().getOutputsInfo()); - for (auto & item : output_info_) { - item.second->setPrecision(InferenceEngine::Precision::FP32); + if (!is_dense){ + slog::err << "Doesn't support conversion from not dense cv::Mat." << slog::endl; + return false; } - //Deprecated! - //auto output_ptr = output_info_.begin(); - //input_ = input_info_map.begin()->first; - //detection_output_ = output_ptr->first; - //mask_output_ = (++output_ptr)->first; + InferenceEngine::TensorDesc tDesc(InferenceEngine::Precision::U8, + {1, channels, height, width}, + InferenceEngine::Layout::NHWC); + + auto shared_blob = InferenceEngine::make_shared_blob(tDesc, orig_image.data); + engine->getRequest()->SetBlob(getInputName(), shared_blob); + + return true; } -void Models::ObjectSegmentationModel::checkLayerProperty( - const InferenceEngine::CNNNetReader::Ptr & net_reader) +const std::string Models::ObjectSegmentationModel::getModelCategory() const { - const InferenceEngine::CNNLayerPtr output_layer = - net_reader->getNetwork().getLayerByName(getDetectionOutputName().c_str()); - const int num_classes = output_layer->GetParamAsInt("num_classes"); - slog::info << "Checking Object Segmentation output ... num_classes=" << num_classes << slog::endl; - if (getLabels().size() != num_classes) { - if (getLabels().size() == (num_classes - 1)) { - getLabels().insert(getLabels().begin(), "fake"); - } else { - getLabels().clear(); - } - } + return "Object Segmentation"; } -bool Models::ObjectSegmentationModel::enqueue( - const std::shared_ptr & engine, - const cv::Mat & frame, - const cv::Rect & input_frame_loc) +bool Models::ObjectSegmentationModel::updateLayerProperty( + const InferenceEngine::CNNNetReader::Ptr net_reader) { - if (engine == nullptr) { - slog::err << "A frame is trying to be enqueued in a NULL Engine." << slog::endl; + slog::info<< "Checking INPUTS for Model" <getNetwork(); + input_info_ = InferenceEngine::InputsDataMap(network.getInputsInfo()); + + InferenceEngine::ICNNNetwork:: InputShapes inputShapes = network.getInputShapes(); + slog::debug<<"input size"<getDims().size() == 4) { - matToBlob(frame, input_frame_loc, 1.0, 0, engine); - } - - /** Fill second input tensor with image info **/ - if (inputInfoItem.second->getDims().size() == 2) { - InferenceEngine::Blob::Ptr input = engine->getRequest()->GetBlob(inputInfoItem.first); - auto data = input->buffer().as::value_type *>(); - data[0] = static_cast(input_height_); // height - data[1] = static_cast(input_width_); // width - data[2] = 1; - } + InferenceEngine::SizeVector &in_size_vector = inputShapes.begin()->second; + slog::debug<<"channel size"< & engine) -{ - if (engine == nullptr) { - slog::err << "A frame is trying to be enqueued in a NULL Engine." << slog::endl; + in_size_vector[0] = 1; + network.reshape(inputShapes); + + InferenceEngine:: InputInfo &inputInfo = *network.getInputsInfo().begin()->second; + inputInfo.getPreProcess().setResizeAlgorithm(InferenceEngine::ResizeAlgorithm::RESIZE_BILINEAR); + inputInfo.setLayout(InferenceEngine::Layout::NHWC); + inputInfo.setPrecision(InferenceEngine::Precision::U8); + + //InferenceEngine::InputInfo::Ptr input_info = input_info_map.begin()->second; + //addInputInfo("input", input_info_map.begin()->first.c_str()); + addInputInfo("input", inputShapes.begin()->first); + + InferenceEngine::OutputsDataMap outputsDataMap = network.getOutputsInfo(); + if (outputsDataMap.size() != 1) { + //throw std::runtime_error("Demo supports topologies only with 1 output"); + slog::warn << "This inference sample should have only one output, but we got" + << std::to_string(outputsDataMap.size()) << "outputs" + << slog::endl; return false; } - std::string input_name = getInputName(); - InferenceEngine::Blob::Ptr input_blob = - engine->getRequest()->GetBlob(input_name); - auto blob_data = input_blob->buffer().as::value_type *>(); + InferenceEngine::Data & data = *outputsDataMap.begin()->second; + data.setPrecision(InferenceEngine::Precision::FP32); + + const InferenceEngine::SizeVector& outSizeVector = data.getTensorDesc().getDims(); + int outChannels, outHeight, outWidth; + slog::debug << "output size vector " << outSizeVector.size() << slog::endl; + switch(outSizeVector.size()){ + case 3: + outChannels = 0; + outHeight = outSizeVector[1]; + outWidth = outSizeVector[2]; + break; + case 4: + outChannels = outSizeVector[1]; + outHeight = outSizeVector[2]; + outWidth = outSizeVector[3]; + break; + default: + throw std::runtime_error("Unexpected output blob shape. Only 4D and 3D output blobs are" + "supported."); - cv::Mat resized_image(orig_image); - if((size_t)orig_image.size().height != input_height_ || (size_t)orig_image.size().width != input_width_){ - cv::resize(orig_image, resized_image, cv::Size(input_width_, input_height_)); } - int batchOffset = batch_index * input_width_ * input_height_ * input_channels_; - - for (unsigned int c = 0; c < input_channels_; c++) { - for (unsigned int h = 0; h < input_height_; h++) { - for (unsigned int w = 0; w < input_width_; w++) { - blob_data[batchOffset + c * input_width_ * input_height_ + h * input_width_ + w] = - resized_image.at(h, w)[c] * scale_factor; - } - } + if(outHeight == 0 || outWidth == 0){ + slog::err << "output_height or output_width is not set, please check the MaskOutput Info " + << "is set correctly." << slog::endl; + //throw std::runtime_error("output_height or output_width is not set, please check the MaskOutputInfo"); + return false; } + slog::debug << "output width " << outWidth<< slog::endl; + slog::debug << "output hEIGHT " << outHeight<< slog::endl; + slog::debug << "output CHANNALS " << outChannels<< slog::endl; + addOutputInfo("masks", (outputsDataMap.begin()++)->first); + addOutputInfo("detection", outputsDataMap.begin()->first); + + //const InferenceEngine::CNNLayerPtr output_layer = + //network.getLayerByName(outputsDataMap.begin()->first.c_str()); + const InferenceEngine::CNNLayerPtr output_layer = + network.getLayerByName(getOutputName("detection").c_str()); + //const int num_classes = output_layer->GetParamAsInt("num_classes"); + //slog::info << "Checking Object Segmentation output ... num_classes=" << num_classes << slog::endl; + +#if 0 + if (getLabels().size() != num_classes) + { + if (getLabels().size() == (num_classes - 1)) + { + getLabels().insert(getLabels().begin(), "fake"); + } + else + { + getLabels().clear(); + } + } +#endif +/* + const InferenceEngine::SizeVector output_dims = data.getTensorDesc().getDims(); + setMaxProposalCount(static_cast(output_dims[2])); + slog::info << "max proposal count is: " << getMaxProposalCount() << slog::endl; + auto object_size = static_cast(output_dims[3]); + setObjectSize(object_size); + + slog::debug << "model size" << output_dims.size() << slog::endl;*/ + printAttribute(); + slog::info << "This model is SSDNet-like, Layer Property updated!" << slog::endl; return true; } -const std::string Models::ObjectSegmentationModel::getModelName() const -{ - return "Object Segmentation"; -} diff --git a/dynamic_vino_lib/src/models/person_attribs_detection_model.cpp b/dynamic_vino_lib/src/models/person_attribs_detection_model.cpp index 1dc7c599..31505820 100644 --- a/dynamic_vino_lib/src/models/person_attribs_detection_model.cpp +++ b/dynamic_vino_lib/src/models/person_attribs_detection_model.cpp @@ -21,42 +21,46 @@ #include "dynamic_vino_lib/slog.h" // Validated Person Attributes Detection Network Models::PersonAttribsDetectionModel::PersonAttribsDetectionModel( - const std::string & model_loc, int input_num, int output_num, int max_batch_size) -: BaseModel(model_loc, input_num, output_num, max_batch_size) {} + const std::string & model_loc, int max_batch_size) +: BaseModel(model_loc, max_batch_size) {} -void Models::PersonAttribsDetectionModel::setLayerProperty( +bool Models::PersonAttribsDetectionModel::updateLayerProperty( InferenceEngine::CNNNetReader::Ptr net_reader) { - // set input property + slog::info << "Checking INPUTs for model " << getModelName() << slog::endl; InferenceEngine::InputsDataMap input_info_map( net_reader->getNetwork().getInputsInfo()); + if (input_info_map.size() != 1) { + throw std::logic_error("Person Attribs topology should have only one input"); + } InferenceEngine::InputInfo::Ptr input_info = input_info_map.begin()->second; input_info->setPrecision(InferenceEngine::Precision::U8); input_info->getInputData()->setLayout(InferenceEngine::Layout::NCHW); - // set output property + addInputInfo("input", input_info_map.begin()->first); + + slog::info << "Checking OUTPUTs for model " << getModelName() << slog::endl; InferenceEngine::OutputsDataMap output_info_map( net_reader->getNetwork().getOutputsInfo()); - // set input and output layer name + if (output_info_map.size() != 3) { + throw std::logic_error("Person Attribs Network expects networks having 3 output"); + } input_ = input_info_map.begin()->first; output_ = output_info_map.begin()->first; -} -void Models::PersonAttribsDetectionModel::checkLayerProperty( - const InferenceEngine::CNNNetReader::Ptr & net_reader) -{ - InferenceEngine::InputsDataMap input_info_map( - net_reader->getNetwork().getInputsInfo()); - if (input_info_map.size() != 1) { - throw std::logic_error("Person Attribs topology should have only one input"); - } - InferenceEngine::OutputsDataMap output_info_map( - net_reader->getNetwork().getOutputsInfo()); - if (output_info_map.size() != 1) { - throw std::logic_error("Person Attribs Network expects networks having one output"); - } + auto output_iter = output_info_map.begin(); + InferenceEngine::DataPtr attribute_output_ptr = (output_iter++)->second; + InferenceEngine::DataPtr top_output_ptr = (output_iter++)->second; + InferenceEngine::DataPtr bottom_output_ptr = (output_iter++)->second; + + addOutputInfo("attributes_output_", attribute_output_ptr->getName()); + //output_gender_ = gender_output_ptr->name; + addOutputInfo("top_output_", top_output_ptr->getName()); + addOutputInfo("bottom_output_", bottom_output_ptr->getName()); + printAttribute(); + return true; } -const std::string Models::PersonAttribsDetectionModel::getModelName() const +const std::string Models::PersonAttribsDetectionModel::getModelCategory() const { return "Person Attributes Detection"; } diff --git a/dynamic_vino_lib/src/models/person_reidentification_model.cpp b/dynamic_vino_lib/src/models/person_reidentification_model.cpp index 7367e212..2a1beb92 100644 --- a/dynamic_vino_lib/src/models/person_reidentification_model.cpp +++ b/dynamic_vino_lib/src/models/person_reidentification_model.cpp @@ -21,30 +21,32 @@ #include "dynamic_vino_lib/slog.h" // Validated Object Detection Network Models::PersonReidentificationModel::PersonReidentificationModel( - const std::string & model_loc, int input_num, int output_num, int max_batch_size) -: BaseModel(model_loc, input_num, output_num, max_batch_size) {} + const std::string & model_loc, int max_batch_size) +: BaseModel(model_loc, max_batch_size) {} -void Models::PersonReidentificationModel::setLayerProperty( - InferenceEngine::CNNNetReader::Ptr net_reader) +bool Models::PersonReidentificationModel::updateLayerProperty( + InferenceEngine::CNNNetReader::Ptr netreader) { - // set input property - InferenceEngine::InputsDataMap input_info_map( - net_reader->getNetwork().getInputsInfo()); + slog::info << "Checking Inputs for Model" << getModelName() << slog::endl; + + auto network = netreader->getNetwork(); + + InferenceEngine::InputsDataMap input_info_map(network.getInputsInfo()); + InferenceEngine::InputInfo::Ptr input_info = input_info_map.begin()->second; input_info->setPrecision(InferenceEngine::Precision::U8); input_info->getInputData()->setLayout(InferenceEngine::Layout::NCHW); // set output property InferenceEngine::OutputsDataMap output_info_map( - net_reader->getNetwork().getOutputsInfo()); + network.getOutputsInfo()); // set input and output layer name input_ = input_info_map.begin()->first; output_ = output_info_map.begin()->first; + + return true; } -void Models::PersonReidentificationModel::checkLayerProperty( - const InferenceEngine::CNNNetReader::Ptr & net_reader) {} - -const std::string Models::PersonReidentificationModel::getModelName() const +const std::string Models::PersonReidentificationModel::getModelCategory() const { return "Person Reidentification"; } diff --git a/dynamic_vino_lib/src/models/vehicle_attribs_detection_model.cpp b/dynamic_vino_lib/src/models/vehicle_attribs_detection_model.cpp index 4b74cd58..3328eca4 100644 --- a/dynamic_vino_lib/src/models/vehicle_attribs_detection_model.cpp +++ b/dynamic_vino_lib/src/models/vehicle_attribs_detection_model.cpp @@ -21,31 +21,14 @@ #include "dynamic_vino_lib/slog.h" // Validated Vehicle Attributes Detection Network Models::VehicleAttribsDetectionModel::VehicleAttribsDetectionModel( - const std::string & model_loc, int input_num, int output_num, int max_batch_size) -: BaseModel(model_loc, input_num, output_num, max_batch_size) {} + const std::string & model_loc, int max_batch_size) +: BaseModel(model_loc, max_batch_size) {} -void Models::VehicleAttribsDetectionModel::setLayerProperty( +bool Models::VehicleAttribsDetectionModel::updateLayerProperty( InferenceEngine::CNNNetReader::Ptr net_reader) { - // set input property - InferenceEngine::InputsDataMap input_info_map( - net_reader->getNetwork().getInputsInfo()); - InferenceEngine::InputInfo::Ptr input_info = input_info_map.begin()->second; - input_info->setPrecision(InferenceEngine::Precision::U8); - input_info->getInputData()->setLayout(InferenceEngine::Layout::NCHW); - // set output property - InferenceEngine::OutputsDataMap output_info_map( - net_reader->getNetwork().getOutputsInfo()); - // set input and output layer name - input_ = input_info_map.begin()->first; - auto output_iter = output_info_map.begin(); - color_output_ = (output_iter++)->second->name; - type_output_ = (output_iter++)->second->name; -} - -void Models::VehicleAttribsDetectionModel::checkLayerProperty( - const InferenceEngine::CNNNetReader::Ptr & net_reader) -{ + slog::info << "Checking INPUTs for model " << getModelName() << slog::endl; + // set input property InferenceEngine::InputsDataMap input_info_map( net_reader->getNetwork().getInputsInfo()); if (input_info_map.size() != 1) { @@ -56,9 +39,29 @@ void Models::VehicleAttribsDetectionModel::checkLayerProperty( if (output_info_map.size() != 2) { throw std::logic_error("Vehicle Attribs Network expects networks having two outputs"); } + + InferenceEngine::InputInfo::Ptr input_info = input_info_map.begin()->second; + input_info->setPrecision(InferenceEngine::Precision::U8); + input_info->getInputData()->setLayout(InferenceEngine::Layout::NCHW); + + // set input and output layer name + input_ = input_info_map.begin()->first; + auto output_iter = output_info_map.begin(); + // color_output_ = (output_iter++)->second->name; + // type_output_ = (output_iter++)->second->name; + InferenceEngine::DataPtr color_output_ptr = (output_iter++)->second; + InferenceEngine::DataPtr type_output_ptr = (output_iter++)->second; + + addOutputInfo("color_output_", color_output_ptr->getName()); + //output_gender_ = gender_output_ptr->name; + addOutputInfo("type_output_", type_output_ptr->getName()); + + printAttribute(); + return true; } -const std::string Models::VehicleAttribsDetectionModel::getModelName() const +const std::string Models::VehicleAttribsDetectionModel::getModelCategory() const { return "Vehicle Attributes Detection"; } + From 6756092eed234a82ea14521a451ca247fc733140 Mon Sep 17 00:00:00 2001 From: Lewis Liu Date: Mon, 4 Jan 2021 16:31:17 +0800 Subject: [PATCH 35/63] align outputs code structure with ROS2 and OV2020.3 Signed-off-by: Lewis Liu --- .../dynamic_vino_lib/outputs/base_output.h | 11 +- .../outputs/image_window_output.h | 6 +- .../outputs/ros_topic_output.h | 20 +- .../src/outputs/image_window_output.cpp | 251 +++++++----------- .../src/outputs/ros_service_output.cpp | 66 ++--- .../src/outputs/ros_topic_output.cpp | 163 +++++++----- dynamic_vino_lib/src/outputs/rviz_output.cpp | 14 +- 7 files changed, 254 insertions(+), 277 deletions(-) diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/base_output.h b/dynamic_vino_lib/include/dynamic_vino_lib/outputs/base_output.h index 65bd492b..caaf7e53 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/base_output.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/outputs/base_output.h @@ -53,11 +53,9 @@ namespace Outputs */ class BaseOutput { - public: - // BaseOutput() = default; - BaseOutput() - { - }; +public: + explicit BaseOutput(std::string output_name) + : output_name_(output_name) {} /** * @brief Generate output content according to the license plate detection result. */ @@ -168,7 +166,8 @@ class BaseOutput protected: cv::Mat frame_; - Pipeline* pipeline_; + Pipeline * pipeline_; + std::string output_name_; }; } // namespace Outputs #endif // DYNAMIC_VINO_LIB_OUTPUTS_BASE_OUTPUT_H diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/image_window_output.h b/dynamic_vino_lib/include/dynamic_vino_lib/outputs/image_window_output.h index bf7a5abc..a162772e 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/image_window_output.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/outputs/image_window_output.h @@ -141,7 +141,7 @@ class ImageWindowOutput : public BaseOutput private: unsigned findOutput(const cv::Rect &); - void initOutputs(unsigned size); + //void initOutputs(unsigned size); /** * @brief Calculate the axises of the coordinates for showing * the image window. @@ -155,6 +155,8 @@ class ImageWindowOutput : public BaseOutput */ cv::Mat getRotationTransform(double yaw, double pitch, double roll); + void mergeMask(const std::vector &); + struct OutputData { std::string desc; @@ -165,6 +167,8 @@ class ImageWindowOutput : public BaseOutput cv::Point hp_y; // for headpose, end point of yAxis cv::Point hp_zs; // for headpose, start point of zAxis cv::Point hp_ze; // for headpose, end point of zAxis + cv::Point pa_top; // for person attributes, top position + cv::Point pa_bottom; //for person attributes, bottom position std::vector landmarks; }; diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/ros_topic_output.h b/dynamic_vino_lib/include/dynamic_vino_lib/outputs/ros_topic_output.h index 24e7463e..bfaa8e4b 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/ros_topic_output.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/outputs/ros_topic_output.h @@ -151,27 +151,27 @@ class RosTopicOutput : public BaseOutput ros::NodeHandle nh_; protected: ros::Publisher pub_face_; - std::shared_ptr faces_msg_ptr_; + std::shared_ptr faces_topic_; ros::Publisher pub_emotion_; - std::shared_ptr emotions_msg_ptr_; + std::shared_ptr emotions_topic_; ros::Publisher pub_age_gender_; - std::shared_ptr age_gender_msg_ptr_; + std::shared_ptr age_gender_topic_; ros::Publisher pub_headpose_; - std::shared_ptr headpose_msg_ptr_; + std::shared_ptr headpose_topic_; ros::Publisher pub_object_; - std::shared_ptr object_msg_ptr_; + std::shared_ptr detected_objects_topic_; ros::Publisher pub_person_reid_; std::shared_ptr person_reid_msg_ptr_; ros::Publisher pub_segmented_object_; - std::shared_ptr segmented_object_msg_ptr_; + std::shared_ptr segmented_objects_topic_; ros::Publisher pub_face_reid_; - std::shared_ptr face_reid_msg_ptr_; + std::shared_ptr face_reid_topic_; ros::Publisher pub_person_attribs_; - std::shared_ptr person_attribs_msg_ptr_; + std::shared_ptr person_attribs_topic_; ros::Publisher pub_license_plate_; - std::shared_ptr license_plate_msg_ptr_; + std::shared_ptr license_plate_topic_; ros::Publisher pub_vehicle_attribs_; - std::shared_ptr vehicle_attribs_msg_ptr_; + std::shared_ptr vehicle_attribs_topic_; }; } // namespace Outputs diff --git a/dynamic_vino_lib/src/outputs/image_window_output.cpp b/dynamic_vino_lib/src/outputs/image_window_output.cpp index 49304d1a..397f582c 100644 --- a/dynamic_vino_lib/src/outputs/image_window_output.cpp +++ b/dynamic_vino_lib/src/outputs/image_window_output.cpp @@ -23,15 +23,19 @@ #include #include #include +#include +#include + +#include "dynamic_vino_lib/outputs/image_window_output.h" #include "dynamic_vino_lib/pipeline.h" -Outputs::ImageWindowOutput::ImageWindowOutput(const std::string& window_name, - int focal_length) - : window_name_(window_name), focal_length_(focal_length) +Outputs::ImageWindowOutput::ImageWindowOutput(const std::string & output_name, int focal_length) +: BaseOutput(output_name), focal_length_(focal_length) { + cv::namedWindow(output_name_, cv::WINDOW_AUTOSIZE); } -void Outputs::ImageWindowOutput::feedFrame(const cv::Mat& frame) +void Outputs::ImageWindowOutput::feedFrame(const cv::Mat & frame) { // frame_ = frame; frame_ = frame.clone(); @@ -48,32 +52,56 @@ void Outputs::ImageWindowOutput::feedFrame(const cv::Mat& frame) } } -void Outputs::ImageWindowOutput::accept( - const std::vector& results) +unsigned Outputs::ImageWindowOutput::findOutput( + const cv::Rect & result_rect) { - if (outputs_.size() == 0) - { - initOutputs(results.size()); + for (unsigned i = 0; i < outputs_.size(); i++) { + if (outputs_[i].rect == result_rect) { + return i; + } } - if (outputs_.size() != results.size()) - { - // throw std::logic_error("size is not equal!"); - // slog::err << "the size of Face Detection and Output Vector is not equal!" - // << slog::endl; - return; + OutputData output; + output.desc = ""; + output.scalar = cv::Scalar(255, 0, 0); + outputs_.push_back(output); + return outputs_.size() - 1; +} + +void Outputs::ImageWindowOutput::accept( + const std::vector & results) +{ + for (unsigned i = 0; i < results.size(); i++) { + cv::Rect result_rect = results[i].getLocation(); + unsigned target_index = findOutput(result_rect); + outputs_[target_index].rect = result_rect; + outputs_[target_index].desc += ("[" + results[i].getLicense() + "]"); } +} - for (unsigned i = 0; i < results.size(); i++) - { - // outputs_[i].desc.str(""); - outputs_[i].rect = results[i].getLocation(); +void Outputs::ImageWindowOutput::accept( + const std::vector & results) +{ + for (unsigned i = 0; i < results.size(); i++) { + cv::Rect result_rect = results[i].getLocation(); + unsigned target_index = findOutput(result_rect); + outputs_[target_index].rect = result_rect; + outputs_[target_index].desc += + ("[" + results[i].getColor() + "," + results[i].getType() + "]"); + } +} +void Outputs::ImageWindowOutput::accept( + const std::vector& results) +{ + for (unsigned i = 0; i < results.size(); i++) { + cv::Rect result_rect = results[i].getLocation(); + unsigned target_index = findOutput(result_rect); + outputs_[target_index].rect = result_rect; auto fd_conf = results[i].getConfidence(); - if (fd_conf >= 0) - { + if (fd_conf >= 0) { std::ostringstream ostream; ostream << "[" << std::fixed << std::setprecision(3) << fd_conf << "]"; - outputs_[i].desc += ostream.str(); + outputs_[target_index].desc += ostream.str(); } } } @@ -81,52 +109,30 @@ void Outputs::ImageWindowOutput::accept( void Outputs::ImageWindowOutput::accept( const std::vector& results) { - if (outputs_.size() == 0) - { - initOutputs(results.size()); - } - if (outputs_.size() != results.size()) - { - // throw std::logic_error("size is not equal!"); - slog::err << "the size of Emotion Detection and Output Vector is not equal!" - << slog::endl; - return; - } - for (unsigned i = 0; i < results.size(); i++) - { + for (unsigned i = 0; i < results.size(); i++) { + cv::Rect result_rect = results[i].getLocation(); + unsigned target_index = findOutput(result_rect); + outputs_[target_index].rect = result_rect; std::ostringstream ostream; ostream << "[" << results[i].getLabel() << "]"; - outputs_[i].desc += ostream.str(); + outputs_[target_index].desc += ostream.str(); } } void Outputs::ImageWindowOutput::accept( const std::vector& results) { - if (outputs_.size() == 0) - { - initOutputs(results.size()); - } - if (outputs_.size() != results.size()) - { - // throw std::logic_error("size is not equal!"); - slog::err - << "the size of AgeGender Detection and Output Vector is not equal!" - << slog::endl; - return; - } - for (unsigned i = 0; i < results.size(); i++) - { + for (unsigned i = 0; i < results.size(); i++) { + cv::Rect result_rect = results[i].getLocation(); + unsigned target_index = findOutput(result_rect); + outputs_[target_index].rect = result_rect; std::ostringstream ostream; - // auto age = results[i].getAge(); - ostream << "[Y" << std::fixed << std::setprecision(0) << results[i].getAge() - << "]"; - outputs_[i].desc += ostream.str(); + ostream << "[Y" << std::fixed << std::setprecision(0) << results[i].getAge() << "]"; + outputs_[target_index].desc += ostream.str(); auto male_prob = results[i].getMaleProbability(); - if (male_prob < 0.5) - { - outputs_[i].scalar = cv::Scalar(0, 0, 255); + if (male_prob < 0.5) { + outputs_[target_index].scalar = cv::Scalar(0, 0, 255); } } } @@ -169,21 +175,11 @@ cv::Mat Outputs::ImageWindowOutput::getRotationTransform(double yaw, void Outputs::ImageWindowOutput::accept( const std::vector& results) { - if (outputs_.size() == 0) - { - initOutputs(results.size()); - } - if (outputs_.size() != results.size()) - { - // throw std::logic_error("size is not equal!"); - slog::err - << "the size of HeadPose Detection and Output Vector is not equal!" - << slog::endl; - return; - } - for (unsigned i = 0; i < results.size(); i++) - { + for (unsigned i = 0; i < results.size(); i++) { auto result = results[i]; + cv::Rect result_rect = result.getLocation(); + unsigned target_index = findOutput(result_rect); + outputs_[target_index].rect = result_rect; double yaw = result.getAngleY(); double pitch = result.getAngleP(); double roll = result.getAngleR(); @@ -191,49 +187,37 @@ void Outputs::ImageWindowOutput::accept( feedFrame(frame_); cv::Mat r = getRotationTransform(yaw, pitch, roll); cv::Rect location = result.getLocation(); - auto cp = cv::Point(location.x + location.width / 2, - location.y + location.height / 2); - outputs_[i].hp_cp = cp; - outputs_[i].hp_x = calcAxis(r, scale, 0, 0, cp); - outputs_[i].hp_y = calcAxis(r, 0, -scale, 0, cp); - outputs_[i].hp_ze = calcAxis(r, 0, 0, -scale, cp); - outputs_[i].hp_zs = calcAxis(r, 0, 0, scale, cp); + auto cp = cv::Point(location.x + location.width / 2, location.y + location.height / 2); + outputs_[target_index].hp_cp = cp; + outputs_[target_index].hp_x = calcAxis(r, scale, 0, 0, cp); + outputs_[target_index].hp_y = calcAxis(r, 0, -scale, 0, cp); + outputs_[target_index].hp_ze = calcAxis(r, 0, 0, -scale, cp); + outputs_[target_index].hp_zs = calcAxis(r, 0, 0, scale, cp); } } void Outputs::ImageWindowOutput::accept( const std::vector& results) { - if (outputs_.size() == 0) - { - initOutputs(results.size()); - } - if (outputs_.size() != results.size()) - { - // throw std::logic_error("size is not equal!"); - slog::err << "the size of Face Detection and Output Vector is not equal!" - << slog::endl; - return; - } - for (unsigned i = 0; i < results.size(); i++) - { - // outputs_[i].desc.str(""); - outputs_[i].rect = results[i].getLocation(); + for (unsigned i = 0; i < results.size(); i++) { + cv::Rect result_rect = results[i].getLocation(); + unsigned target_index = findOutput(result_rect); + outputs_[target_index].rect = result_rect; auto fd_conf = results[i].getConfidence(); - if (fd_conf >= 0) - { + if (fd_conf >= 0) { std::ostringstream ostream; ostream << "[" << std::fixed << std::setprecision(3) << fd_conf << "]"; - outputs_[i].desc += ostream.str(); + outputs_[target_index].desc += ostream.str(); } auto label = results[i].getLabel(); - outputs_[i].desc += "[" + label + "]"; + outputs_[target_index].desc += "[" + label + "]"; } } void Outputs::ImageWindowOutput::mergeMask( const std::vector & results) { + /* std::map class_color; for (unsigned i = 0; i < results.size(); i++) { std::string class_label = results[i].getLabel(); @@ -260,6 +244,12 @@ void Outputs::ImageWindowOutput::mergeMask( } cv::addWeighted(colored_mask, alpha, roi_img, 1.0f - alpha, 0.0f, roi_img); } + */ + const float alpha = 0.5f; + cv::Mat roi_img = frame_; + cv::Mat colored_mask = results[0].getMask(); + cv::resize(colored_mask,colored_mask,cv::Size(frame_.size().width,frame_.size().height)); + cv::addWeighted(colored_mask, alpha, roi_img, 1.0f - alpha, 0.0f, roi_img); } void Outputs::ImageWindowOutput::accept( @@ -286,53 +276,25 @@ void Outputs::ImageWindowOutput::accept( mergeMask(results); } -unsigned Outputs::ImageWindowOutput::findOutput( - const cv::Rect & result_rect) -{ - for (unsigned i = 0; i < outputs_.size(); i++) { - if (outputs_[i].rect == result_rect) { - return i; - } - } - OutputData output; - output.desc = ""; - output.scalar = cv::Scalar(255, 0, 0); - outputs_.push_back(output); - return outputs_.size() - 1; -} -void Outputs::ImageWindowOutput::accept( - const std::vector & results) -{ - for (unsigned i = 0; i < results.size(); i++) { - cv::Rect result_rect = results[i].getLocation(); - unsigned target_index = findOutput(result_rect); - outputs_[target_index].rect = result_rect; - outputs_[target_index].desc += ("[" + results[i].getLicense() + "]"); - } -} - -void Outputs::ImageWindowOutput::accept( - const std::vector & results) -{ - for (unsigned i = 0; i < results.size(); i++) { - cv::Rect result_rect = results[i].getLocation(); - unsigned target_index = findOutput(result_rect); - outputs_[target_index].rect = result_rect; - outputs_[target_index].desc += - ("[" + results[i].getColor() + "," + results[i].getType() + "]"); - } -} void Outputs::ImageWindowOutput::accept( const std::vector & results) { - for (unsigned i = 0; i < results.size(); i++) { + for (unsigned i = 0; i < results.size(); i++) { cv::Rect result_rect = results[i].getLocation(); unsigned target_index = findOutput(result_rect); if (results[i].getMaleProbability() < 0.5) { outputs_[target_index].scalar = cv::Scalar(0, 0, 255); } + else{ + outputs_[target_index].scalar = cv::Scalar(0, 255, 0); + } + outputs_[target_index].pa_top.x = results[i].getTopLocation().x*result_rect.width + result_rect.x; + outputs_[target_index].pa_top.y = results[i].getTopLocation().y*result_rect.height + result_rect.y; + outputs_[target_index].pa_bottom.x = results[i].getBottomLocation().x*result_rect.width + result_rect.x; + outputs_[target_index].pa_bottom.y = results[i].getBottomLocation().y*result_rect.height + result_rect.y; + outputs_[target_index].rect = result_rect; outputs_[target_index].desc += "[" + results[i].getAttributes() + "]"; } @@ -411,21 +373,10 @@ void Outputs::ImageWindowOutput::decorateFrame() } void Outputs::ImageWindowOutput::handleOutput() { - cv::namedWindow(window_name_, cv::WINDOW_AUTOSIZE); - decorateFrame(); - cv::imshow(window_name_, frame_); - if( cv::waitKey(1) > 0) - { - ros::shutdown(); - } -} - -void Outputs::ImageWindowOutput::initOutputs(unsigned size) -{ - outputs_.resize(size); - for (unsigned i = 0; i < size; i++) - { - outputs_[i].desc = ""; - outputs_[i].scalar = cv::Scalar(255, 0, 0); + if(frame_.cols == 0 || frame_.rows == 0){ + return; } + decorateFrame(); + cv::imshow(output_name_, frame_); + cv::waitKey(1); } diff --git a/dynamic_vino_lib/src/outputs/ros_service_output.cpp b/dynamic_vino_lib/src/outputs/ros_service_output.cpp index a0f9aad4..8302dfb5 100644 --- a/dynamic_vino_lib/src/outputs/ros_service_output.cpp +++ b/dynamic_vino_lib/src/outputs/ros_service_output.cpp @@ -27,13 +27,13 @@ void Outputs::RosServiceOutput::setServiceResponse( boost::shared_ptr response) { - if (object_msg_ptr_ != nullptr && object_msg_ptr_->objects_vector.size() > 0) { + if (detected_objects_topic_ != nullptr && detected_objects_topic_->objects_vector.size() > 0) { object_msgs::ObjectsInBoxes objs; - objs.objects_vector = object_msg_ptr_->objects_vector; + objs.objects_vector = detected_objects_topic_->objects_vector; response->objects.push_back(objs); - } else if (faces_msg_ptr_ != nullptr && faces_msg_ptr_ ->objects_vector.size() > 0) { + } else if (faces_topic_ != nullptr && faces_topic_ ->objects_vector.size() > 0) { object_msgs::ObjectsInBoxes objs; - objs.objects_vector = faces_msg_ptr_->objects_vector; + objs.objects_vector = faces_topic_->objects_vector; response->objects.push_back(objs); } } @@ -41,9 +41,9 @@ void Outputs::RosServiceOutput::setServiceResponse( void Outputs::RosServiceOutput::setResponseForFace( boost::shared_ptr response) { - if (faces_msg_ptr_ != nullptr && faces_msg_ptr_->objects_vector.size() > 0) { + if (faces_topic_ != nullptr && faces_topic_->objects_vector.size() > 0) { object_msgs::ObjectsInBoxes objs; - objs.objects_vector = faces_msg_ptr_->objects_vector; + objs.objects_vector = faces_topic_->objects_vector; response->objects.push_back(objs); } } @@ -51,24 +51,24 @@ void Outputs::RosServiceOutput::setResponseForFace( void Outputs::RosServiceOutput::setServiceResponse( boost::shared_ptr response) { - if (age_gender_msg_ptr_ != nullptr) { - response->age_gender.objects = age_gender_msg_ptr_->objects; + if (age_gender_topic_ != nullptr) { + response->age_gender.objects = age_gender_topic_->objects; } } void Outputs::RosServiceOutput::setServiceResponse( boost::shared_ptr response) { - if (emotions_msg_ptr_ != nullptr) { - response->emotion.emotions = emotions_msg_ptr_->emotions; + if (emotions_topic_ != nullptr) { + response->emotion.emotions = emotions_topic_->emotions; } } void Outputs::RosServiceOutput::setServiceResponse( boost::shared_ptr response) { - if (headpose_msg_ptr_ != nullptr) { - response->headpose.headposes = headpose_msg_ptr_->headposes; + if (headpose_topic_ != nullptr) { + response->headpose.headposes = headpose_topic_->headposes; } } @@ -76,15 +76,15 @@ void Outputs::RosServiceOutput::setServiceResponse( boost::shared_ptr response) { slog::info << "in ObjectsInMasks service::Response ..."; - if (segmented_object_msg_ptr_ != nullptr) { - response->segmentation.objects_vector = segmented_object_msg_ptr_->objects_vector; + if (segmented_objects_topic_ != nullptr) { + response->segmentation.objects_vector = segmented_objects_topic_->objects_vector; } } void Outputs::RosServiceOutput::setServiceResponse( boost::shared_ptr response) { slog::info << "in Reidentification service::Response ..."; - if (person_reid_msg_ptr_ != nullptr) { + if (person_reid_topic_ != nullptr) { response->reidentification.reidentified_vector = person_reid_msg_ptr_->reidentified_vector; } } @@ -92,36 +92,36 @@ void Outputs::RosServiceOutput::setServiceResponse( void Outputs::RosServiceOutput::setServiceResponse( boost::shared_ptr response) { - slog::info << "in People::Response ..."; - if (faces_msg_ptr_ != nullptr) { + slog::info << "in People::Response ..."; + if (faces_topic_ != nullptr) { slog::info << "[FACES],"; - response->persons.faces = faces_msg_ptr_->objects_vector; - } else if (object_msg_ptr_ != nullptr) { + response->persons.faces = faces_topic_->objects_vector; + } else if (detected_objects_topic_ != nullptr) { slog::info << "[FACES(objects)],"; - response->persons.faces = object_msg_ptr_->objects_vector; + response->persons.faces = detected_objects_topic_->objects_vector; } - if (age_gender_msg_ptr_ != nullptr) { + if (age_gender_topic_ != nullptr) { slog::info << "[AGE_GENDER],"; - response->persons.agegenders = age_gender_msg_ptr_->objects; + response->persons.agegenders = age_gender_topic_->objects; } - if (emotions_msg_ptr_ != nullptr) { + if (emotions_topic_ != nullptr) { slog::info << "[EMOTION],"; - response->persons.emotions = emotions_msg_ptr_->emotions; + response->persons.emotions = emotions_topic_->emotions; } - if (headpose_msg_ptr_ != nullptr) { + if (headpose_topic_ != nullptr) { slog::info << "[HEADPOSE],"; - response->persons.headposes = headpose_msg_ptr_->headposes; + response->persons.headposes = headpose_topic_->headposes; } slog::info << "DONE!" << slog::endl; } void Outputs::RosServiceOutput::clearData() { - faces_msg_ptr_ = nullptr; - object_msg_ptr_ = nullptr; - age_gender_msg_ptr_ = nullptr; - emotions_msg_ptr_ = nullptr; - headpose_msg_ptr_ = nullptr; - segmented_object_msg_ptr_ = nullptr; - person_reid_msg_ptr_ = nullptr; + faces_topic_ = nullptr; + detected_objects_topic_ = nullptr; + age_gender_topic_ = nullptr; + emotions_topic_ = nullptr; + headpose_topic_ = nullptr; + segmented_object_topic_ = nullptr; + person_reid_topic_ = nullptr; } diff --git a/dynamic_vino_lib/src/outputs/ros_topic_output.cpp b/dynamic_vino_lib/src/outputs/ros_topic_output.cpp index 7167d26b..4cd96895 100644 --- a/dynamic_vino_lib/src/outputs/ros_topic_output.cpp +++ b/dynamic_vino_lib/src/outputs/ros_topic_output.cpp @@ -52,25 +52,28 @@ Outputs::RosTopicOutput::RosTopicOutput(std::string pipeline_name): pub_vehicle_attribs_ = nh_.advertise( "/openvino_toolkit/" + pipeline_name_ + "/detected_vehicles_attribs", 16); - emotions_msg_ptr_ = NULL; - faces_msg_ptr_ = NULL; - age_gender_msg_ptr_ = NULL; - headpose_msg_ptr_ = NULL; - object_msg_ptr_ = NULL; + emotions_topic_ = NULL; + faces_topic_ = NULL; + age_gender_topic_ = NULL; + headpose_topic_ = NULL; + detected_objects_topic_ = NULL; person_reid_msg_ptr_ = NULL; - segmented_object_msg_ptr_ = NULL; - face_reid_msg_ptr_ = NULL; - person_attribs_msg_ptr_ = NULL; - license_plate_msg_ptr_ = NULL; - vehicle_attribs_msg_ptr_ = NULL; + segmented_objects_topic_ = NULL; + face_reid_topic_ = NULL; + person_attribs_topic_ = NULL; + license_plate_topic_ = NULL; + vehicle_attribs_topic_ = NULL; } -void Outputs::RosTopicOutput::feedFrame(const cv::Mat& frame) {} +void Outputs::RosTopicOutput::feedFrame(const cv::Mat& frame) +{ + frame_ = frame.clone(); +} void Outputs::RosTopicOutput::accept( const std::vector & results) { - vehicle_attribs_msg_ptr_ = std::make_shared(); + vehicle_attribs_topic_ = std::make_shared(); people_msgs::VehicleAttribs attribs; for (auto & r : results) { // slog::info << ">"; @@ -81,14 +84,14 @@ void Outputs::RosTopicOutput::accept( attribs.roi.height = loc.height; attribs.type = r.getType(); attribs.color = r.getColor(); - vehicle_attribs_msg_ptr_->vehicles.push_back(attribs); + vehicle_attribs_topic_->vehicles.push_back(attribs); } } void Outputs::RosTopicOutput::accept( const std::vector & results) { - license_plate_msg_ptr_ = std::make_shared(); + license_plate_topic_ = std::make_shared(); people_msgs::LicensePlate plate; for (auto & r : results) { // slog::info << ">"; @@ -98,14 +101,14 @@ void Outputs::RosTopicOutput::accept( plate.roi.width = loc.width; plate.roi.height = loc.height; plate.license = r.getLicense(); - license_plate_msg_ptr_->licenses.push_back(plate); + license_plate_topic_->licenses.push_back(plate); } } void Outputs::RosTopicOutput::accept( const std::vector & results) { - person_attribs_msg_ptr_ = std::make_shared(); + person_attribs_topic_ = std::make_shared(); people_msgs::PersonAttribute person_attrib; for (auto & r : results) { // slog::info << ">"; @@ -115,14 +118,14 @@ void Outputs::RosTopicOutput::accept( person_attrib.roi.width = loc.width; person_attrib.roi.height = loc.height; person_attrib.attribute = r.getAttributes(); - person_attribs_msg_ptr_->attributes.push_back(person_attrib); + person_attribs_topic_->attributes.push_back(person_attrib); } } void Outputs::RosTopicOutput::accept( const std::vector & results) { - face_reid_msg_ptr_ = std::make_shared(); + face_reid_topic_ = std::make_shared(); people_msgs::Reidentification face; for (auto & r : results) { // slog::info << ">"; @@ -132,7 +135,7 @@ void Outputs::RosTopicOutput::accept( face.roi.width = loc.width; face.roi.height = loc.height; face.identity = r.getFaceID(); - face_reid_msg_ptr_->reidentified_vector.push_back(face); + face_reid_topic_->reidentified_vector.push_back(face); } } @@ -156,7 +159,7 @@ void Outputs::RosTopicOutput::accept( void Outputs::RosTopicOutput::accept( const std::vector & results) { - segmented_object_msg_ptr_ = std::make_shared(); + segmented_objects_topic_ = std::make_shared(); people_msgs::ObjectInMask object; for (auto & r : results) { // slog::info << ">"; @@ -173,7 +176,7 @@ void Outputs::RosTopicOutput::accept( object.mask_array.push_back(mask.at(h, w)); } } - segmented_object_msg_ptr_->objects_vector.push_back(object); + segmented_objects_topic_->objects_vector.push_back(object); } } @@ -181,7 +184,7 @@ void Outputs::RosTopicOutput::accept( void Outputs::RosTopicOutput::accept( const std::vector& results) { - faces_msg_ptr_ = std::make_shared(); + faces_topic_ = std::make_shared(); object_msgs::ObjectInBox face; for (auto r : results) @@ -193,14 +196,14 @@ void Outputs::RosTopicOutput::accept( face.roi.height = loc.height; face.object.object_name = r.getLabel(); face.object.probability = r.getConfidence(); - faces_msg_ptr_->objects_vector.push_back(face); + faces_topic_->objects_vector.push_back(face); } } void Outputs::RosTopicOutput::accept( const std::vector& results) { - emotions_msg_ptr_ = std::make_shared(); + emotions_topic_ = std::make_shared(); people_msgs::Emotion emotion; for (auto r : results) @@ -211,14 +214,14 @@ void Outputs::RosTopicOutput::accept( emotion.roi.width = loc.width; emotion.roi.height = loc.height; emotion.emotion = r.getLabel(); - emotions_msg_ptr_->emotions.push_back(emotion); + emotions_topic_->emotions.push_back(emotion); } } void Outputs::RosTopicOutput::accept( const std::vector& results) { - age_gender_msg_ptr_ = std::make_shared(); + age_gender_topic_ = std::make_shared(); people_msgs::AgeGender ag; for (auto r : results) @@ -240,14 +243,14 @@ void Outputs::RosTopicOutput::accept( ag.gender = "Female"; ag.gender_confidence = 1.0 - male_prob; } - age_gender_msg_ptr_->objects.push_back(ag); + age_gender_topic_->objects.push_back(ag); } } void Outputs::RosTopicOutput::accept( const std::vector& results) { - headpose_msg_ptr_ = std::make_shared(); + headpose_topic_ = std::make_shared(); people_msgs::HeadPose hp; for (auto r : results) @@ -260,14 +263,14 @@ void Outputs::RosTopicOutput::accept( hp.yaw = r.getAngleY(); hp.pitch = r.getAngleP(); hp.roll = r.getAngleR(); - headpose_msg_ptr_->headposes.push_back(hp); + headpose_topic_->headposes.push_back(hp); } } void Outputs::RosTopicOutput::accept( const std::vector& results) { - object_msg_ptr_ = std::make_shared(); + detected_objects_topic_ = std::make_shared(); object_msgs::ObjectInBox hp; for (auto r : results) @@ -279,33 +282,57 @@ void Outputs::RosTopicOutput::accept( hp.roi.height = loc.height; hp.object.object_name = r.getLabel(); hp.object.probability = r.getConfidence(); - object_msg_ptr_->objects_vector.push_back(hp); + detected_objects_topic_->objects_vector.push_back(hp); + } +} + +void Outputs::RosTopicOutput::accept( + const std::vector & results) +{ + landmarks_topic_ = std::make_shared(); + people_msgs::msg::Landmark landmark; + for (auto & r : results) { + // slog::info << ">"; + auto loc = r.getLocation(); + landmark.roi.x_offset = loc.x; + landmark.roi.y_offset = loc.y; + landmark.roi.width = loc.width; + landmark.roi.height = loc.height; + std::vector landmark_points = r.getLandmarks(); + for (auto pt : landmark_points) { + geometry_msgs::msg::Point point; + point.x = pt.x; + point.y = pt.y; + landmark.landmark_points.push_back(point); + } + landmarks_topic_->landmarks.push_back(landmark); } } void Outputs::RosTopicOutput::handleOutput() { - std_msgs::Header header = getHeader(); - if (vehicle_attribs_msg_ptr_ != nullptr) { + //std_msgs::Header header = getHeader(); + auto header = getPipeline()->getInputDevice()->getLockedHeader(); + if (vehicle_attribs_topic_ != nullptr) { people_msgs::VehicleAttribsStamped vehicle_attribs_msg; vehicle_attribs_msg.header = header; - vehicle_attribs_msg.vehicles.swap(vehicle_attribs_msg_ptr_->vehicles); + vehicle_attribs_msg.vehicles.swap(vehicle_attribs_topic_->vehicles); pub_vehicle_attribs_.publish(vehicle_attribs_msg); - vehicle_attribs_msg_ptr_ = nullptr; + vehicle_attribs_topic_ = nullptr; } - if (license_plate_msg_ptr_ != nullptr) { + if (license_plate_topic_ != nullptr) { people_msgs::LicensePlateStamped license_plate_msg; license_plate_msg.header = header; - license_plate_msg.licenses.swap(license_plate_msg_ptr_->licenses); + license_plate_msg.licenses.swap(license_plate_topic_->licenses); pub_license_plate_.publish(license_plate_msg); - license_plate_msg_ptr_ = nullptr; + license_plate_topic_ = nullptr; } - if (person_attribs_msg_ptr_ != nullptr) { + if (person_attribs_topic_ != nullptr) { people_msgs::PersonAttributeStamped person_attribute_msg; person_attribute_msg.header = header; - person_attribute_msg.attributes.swap(person_attribs_msg_ptr_->attributes); + person_attribute_msg.attributes.swap(person_attribs_topic_->attributes); pub_person_attribs_.publish(person_attribute_msg); - person_attribs_msg_ptr_ = nullptr; + person_attribs_topic_ = nullptr; } if (person_reid_msg_ptr_ != nullptr) { people_msgs::ReidentificationStamped person_reid_msg; @@ -314,78 +341,83 @@ void Outputs::RosTopicOutput::handleOutput() pub_person_reid_.publish(person_reid_msg); person_reid_msg_ptr_ = nullptr; } - if (segmented_object_msg_ptr_ != nullptr) { + if (segmented_objects_topic_ != nullptr) { // slog::info << "publishing faces outputs." << slog::endl; people_msgs::ObjectsInMasks segmented_msg; segmented_msg.header = header; - segmented_msg.objects_vector.swap(segmented_object_msg_ptr_->objects_vector); + segmented_msg.objects_vector.swap(segmented_objects_topic_->objects_vector); pub_segmented_object_.publish(segmented_msg); - segmented_object_msg_ptr_ = nullptr; + segmented_objects_topic_ = nullptr; } - if (faces_msg_ptr_ != NULL) + if (faces_topic_ != NULL) { object_msgs::ObjectsInBoxes faces_msg; faces_msg.header = header; - faces_msg.objects_vector.swap(faces_msg_ptr_->objects_vector); + faces_msg.objects_vector.swap(faces_topic_->objects_vector); pub_face_.publish(faces_msg); - faces_msg_ptr_ = nullptr; + faces_topic_ = nullptr; } - if (emotions_msg_ptr_ != nullptr) + if (emotions_topic_ != nullptr) { people_msgs::EmotionsStamped emotions_msg; emotions_msg.header = header; - emotions_msg.emotions.swap(emotions_msg_ptr_->emotions); + emotions_msg.emotions.swap(emotions_topic_->emotions); pub_emotion_.publish(emotions_msg); - emotions_msg_ptr_ = nullptr; + emotions_topic_ = nullptr; } - if (age_gender_msg_ptr_ != nullptr) + if (age_gender_topic_ != nullptr) { people_msgs::AgeGenderStamped age_gender_msg; age_gender_msg.header = header; - age_gender_msg.objects.swap(age_gender_msg_ptr_->objects); + age_gender_msg.objects.swap(age_gender_topic_->objects); pub_age_gender_.publish(age_gender_msg); - age_gender_msg_ptr_ = nullptr; + age_gender_topic_ = nullptr; } - if (headpose_msg_ptr_ != nullptr) + if (headpose_topic_ != nullptr) { people_msgs::HeadPoseStamped headpose_msg; headpose_msg.header = header; - headpose_msg.headposes.swap(headpose_msg_ptr_->headposes); + headpose_msg.headposes.swap(headpose_topic_->headposes); pub_headpose_.publish(headpose_msg); - headpose_msg_ptr_ = nullptr; + headpose_topic_ = nullptr; } - if (object_msg_ptr_ != nullptr) + if (detected_objects_topic_ != nullptr) { object_msgs::ObjectsInBoxes object_msg; object_msg.header = header; - object_msg.objects_vector.swap(object_msg_ptr_->objects_vector); + object_msg.objects_vector.swap(detected_objects_topic_->objects_vector); pub_object_.publish(object_msg); - object_msg_ptr_ = nullptr; + detected_objects_topic_ = nullptr; } - if (object_msg_ptr_ != nullptr) + if (detected_objects_topic_ != nullptr) { object_msgs::ObjectsInBoxes object_msg; object_msg.header = header; - object_msg.objects_vector.swap(object_msg_ptr_->objects_vector); + object_msg.objects_vector.swap(detected_objects_topic_->objects_vector); pub_object_.publish(object_msg); - object_msg_ptr_ = nullptr; + detected_objects_topic_ = nullptr; } - if (face_reid_msg_ptr_ != nullptr) { + if (face_reid_topic_ != nullptr) { people_msgs::ReidentificationStamped face_reid_msg; face_reid_msg.header = header; - face_reid_msg.reidentified_vector.swap(face_reid_msg_ptr_->reidentified_vector); + face_reid_msg.reidentified_vector.swap(face_reid_topic_->reidentified_vector); pub_face_reid_.publish(face_reid_msg); - face_reid_msg_ptr_ = nullptr; + face_reid_topic_ = nullptr; } } +#if 0 //deprecated +/** + * Don't use this inferface to create new time stamp, it'd better use camera/topic + * time stamp. + */ std_msgs::Header Outputs::RosTopicOutput::getHeader() { std_msgs::Header header; @@ -398,3 +430,4 @@ std_msgs::Header Outputs::RosTopicOutput::getHeader() header.stamp.nsec = ns % 1000000000; return header; } +#endif //depreated diff --git a/dynamic_vino_lib/src/outputs/rviz_output.cpp b/dynamic_vino_lib/src/outputs/rviz_output.cpp index bfcfe620..1c9e8670 100644 --- a/dynamic_vino_lib/src/outputs/rviz_output.cpp +++ b/dynamic_vino_lib/src/outputs/rviz_output.cpp @@ -26,6 +26,7 @@ #include "dynamic_vino_lib/outputs/rviz_output.h" Outputs::RvizOutput::RvizOutput(std::string pipeline_name) +: BaseOutput(pipeline_name) { image_topic_ = nullptr; pub_image_ = nh_.advertise("/openvino_toolkit/"+pipeline_name+"/images", 16); @@ -101,22 +102,11 @@ void Outputs::RvizOutput::handleOutput() image_window_output_->setPipeline(getPipeline()); image_window_output_->decorateFrame(); cv::Mat frame = image_window_output_->getFrame(); - std_msgs::Header header = getHeader(); + std_msgs::Header header = getPipeline()->getInputDevice()->getLockedHeader(); std::shared_ptr cv_ptr = std::make_shared(header, "bgr8", frame); sensor_msgs::Image image_msg; image_topic_ = cv_ptr->toImageMsg(); pub_image_.publish(image_topic_); } - std::shared_ptr image_topic_; -std_msgs::Header Outputs::RvizOutput::getHeader() -{ - std_msgs::Header header; - header.frame_id = getPipeline()->getInputDevice()->getFrameID(); - std::chrono::high_resolution_clock::time_point tp = std::chrono::high_resolution_clock::now(); - int64 ns = tp.time_since_epoch().count(); - header.stamp.sec = ns / 1000000000; - header.stamp.nsec = ns % 1000000000; - return header; -} From 403668d428855258246469fec69ef78a061b2721 Mon Sep 17 00:00:00 2001 From: lewisliu Date: Tue, 5 Jan 2021 15:11:31 +0800 Subject: [PATCH 36/63] align pipeline code structure with ROS2 and OV2020.3 Signed-off-by: lewisliu --- .../include/dynamic_vino_lib/pipeline.h | 16 +- .../dynamic_vino_lib/pipeline_manager.h | 8 +- .../dynamic_vino_lib/pipeline_params.h | 2 + dynamic_vino_lib/src/pipeline.cpp | 190 +++++++------- dynamic_vino_lib/src/pipeline_manager.cpp | 246 +++++++----------- dynamic_vino_lib/src/pipeline_params.cpp | 9 +- 6 files changed, 208 insertions(+), 263 deletions(-) diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/pipeline.h b/dynamic_vino_lib/include/dynamic_vino_lib/pipeline.h index 94e0a3a8..e8a06b6b 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/pipeline.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/pipeline.h @@ -118,6 +118,14 @@ class Pipeline void setCallback(); void printPipeline(); + std::map> getOutputHandle() + { + return name_to_output_map_; + } + void setParams(PipelineParams pipeline_params) + { + params_ = std::make_shared(pipeline_params); + } const std::shared_ptr getParameters() { return params_; @@ -137,10 +145,6 @@ class Pipeline { return fps_; } - std::map> getOutputHandle() - { - return name_to_output_map_; - } std::string findFilterConditions(const std::string & input, const std::string & output) { @@ -174,7 +178,7 @@ class Pipeline name_to_detection_map_; std::map> name_to_output_map_; - + int total_inference_ = 0; std::set output_names_; int width_ = 0; int height_ = 0; @@ -184,6 +188,8 @@ class Pipeline std::mutex counter_mutex_; std::condition_variable cv_; int fps_ = 0; + int frame_cnt_ = 0; + std::chrono::time_point t_start_; }; #endif // DYNAMIC_VINO_LIB_PIPELINE_H_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/pipeline_manager.h b/dynamic_vino_lib/include/dynamic_vino_lib/pipeline_manager.h index ef50ddc2..27c44fe8 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/pipeline_manager.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/pipeline_manager.h @@ -91,9 +91,9 @@ class PipelineManager { void operator=(PipelineManager const&); void threadPipeline(const char* name); std::map> - parseInputDevice(const Params::ParamManager::PipelineRawData& params); - std::map> parseOutput( - const Params::ParamManager::PipelineRawData& params); + parseInputDevice(const PipelineData & params); + std::map> + parseOutput(const PipelineData & pdata); std::map> parseInference(const Params::ParamManager::PipelineRawData& params); std::shared_ptr createFaceDetection( @@ -126,4 +126,4 @@ class PipelineManager { Engines::EngineManager engine_manager_; }; -#endif // DYNAMIC_VINO_LIB__PIPELINE_MANAGER_HPP_ \ No newline at end of file +#endif // DYNAMIC_VINO_LIB__PIPELINE_MANAGER_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/pipeline_params.h b/dynamic_vino_lib/include/dynamic_vino_lib/pipeline_params.h index 7173cc21..7718c273 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/pipeline_params.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/pipeline_params.h @@ -39,7 +39,9 @@ const char kInputType_Image[] = "Image"; const char kInputType_Video[] = "Video"; const char kInputType_StandardCamera[] = "StandardCamera"; +const char kInputType_IpCamera[] = "IpCamera"; const char kInputType_CameraTopic[] = "RealSenseCameraTopic"; +const char kInputType_ImageTopic[] = "ImageTopic"; const char kInputType_RealSenseCamera[] = "RealSenseCamera"; const char kInputType_ServiceImage[] = "ServiceImage"; diff --git a/dynamic_vino_lib/src/pipeline.cpp b/dynamic_vino_lib/src/pipeline.cpp index 92afb4bc..16b3e505 100644 --- a/dynamic_vino_lib/src/pipeline.cpp +++ b/dynamic_vino_lib/src/pipeline.cpp @@ -29,46 +29,42 @@ Pipeline::Pipeline(const std::string& name) { - params_ = std::make_shared(name); + if (!name.empty()) { + params_ = std::make_shared(name); + } counter_ = 0; } bool Pipeline::add(const std::string& name, std::shared_ptr input_device) { + if (name.empty()) { + slog::err << "Item name can't be empty!" << slog::endl; + return false; + } + slog::info << "Adding Input Device into Pipeline: " << name << slog::endl; input_device_name_ = name; input_device_ = input_device; - next_.insert({"", name}); + + addConnect("", name); return true; } bool Pipeline::add(const std::string& parent, const std::string& name, std::shared_ptr output) { - if (parent.empty()) - { - slog::err << "output device have no parent!" << slog::endl; + if (parent.empty() || name.empty() || !isLegalConnect(parent, name) || output == nullptr) { + slog::err << "ARGuments ERROR when adding output instance!" << slog::endl; return false; } - if (name_to_detection_map_.find(parent) == name_to_detection_map_.end()) - { - slog::err << "parent detection does not exists!" << slog::endl; - return false; - } + if (add(name, output)) { + addConnect(parent, name); - if (output_names_.find(name) != output_names_.end()) - { - return add(parent, name); + return true; } - output_names_.insert(name); - name_to_output_map_[name] = output; - next_.insert({parent, name}); - - /**< Add pipeline instance to Output instance >**/ - output->setPipeline(this); - return true; + return false; } bool Pipeline::add(const std::string& parent, const std::string& name) @@ -90,30 +86,50 @@ bool Pipeline::add(const std::string& name, std::map>::iterator it = name_to_output_map_.find(name); if (it != name_to_output_map_.end()) { - slog::warn << "inferance instance for [" << name << + slog::warn << "inferance instance for [" << name << "] already exists, update it with new instance." << slog::endl; } name_to_output_map_[name] = output; output_names_.insert(name); /**< Add pipeline instance to Output instance >**/ output->setPipeline(this); - + return true; } +void Pipeline::addConnect(const std::string & parent, const std::string & name) +{ + std::pair::iterator, + std::multimap::iterator> + ret; + ret = next_.equal_range(parent); + + for (std::multimap::iterator it = ret.first; it != ret.second; ++it) { + if (it->second == name) { + slog::warn << "The connect [" << parent << "<-->" << name << "] already exists." << + slog::endl; + return; + } + } + slog::info << "Adding connection into pipeline:[" << parent << "<-->" << name << "]" << + slog::endl; + next_.insert({parent, name}); +} + bool Pipeline::add(const std::string& parent, const std::string& name, std::shared_ptr inference) { - if (name_to_detection_map_.find(parent) == name_to_detection_map_.end() && - input_device_name_ != parent) - { - slog::err << "parent device/detection does not exists!" << slog::endl; + if (parent.empty() || name.empty() || !isLegalConnect(parent, name)) { + slog::err << "ARGuments ERROR when adding inference instance!" << slog::endl; return false; } - next_.insert({parent, name}); - name_to_detection_map_[name] = inference; - ++total_inference_; - return true; + + if (add(name, inference)) { + addConnect(parent, name); + return true; + } + + return false; } bool Pipeline::add(const std::string& name, @@ -122,11 +138,12 @@ bool Pipeline::add(const std::string& name, slog::err << "Item name can't be empty!" << slog::endl; return false; } - - std::map>::iterator it = name_to_detection_map_.find(name); + + std::map>::iterator it = + name_to_detection_map_.find(name); if (it != name_to_detection_map_.end()) { - slog::warn << "inferance instance for [" << name << - "] already exists, update it with new instance." << slog::endl; + slog::warn << "inferance instance for [" << name << + "] already exists, update it with new instance." << slog::endl; } else { ++total_inference_; } @@ -135,21 +152,6 @@ bool Pipeline::add(const std::string& name, return true; } -int Pipeline::getCatagoryOrder(const std::string name) -{ - int order = kCatagoryOrder_Unknown; - if (name == input_device_name_) { - order = kCatagoryOrder_Input; - } else if (name_to_detection_map_.find(name) != name_to_detection_map_.end()) { - order = kCatagoryOrder_Inference; - } else if (name_to_output_map_.find(name) != name_to_output_map_.end()) { - order = kCatagoryOrder_Output; - } - - return order; -} - - bool Pipeline::isLegalConnect(const std::string parent, const std::string child) { int parent_order = getCatagoryOrder(parent); @@ -162,61 +164,58 @@ bool Pipeline::isLegalConnect(const std::string parent, const std::string child) (parent_order <= child_order); } -void Pipeline::addConnect(const std::string & parent, const std::string & name) +int Pipeline::getCatagoryOrder(const std::string name) { - std::pair::iterator, - std::multimap::iterator> - ret; - ret = next_.equal_range(parent); - - for (std::multimap::iterator it = ret.first; it != ret.second; ++it) { - if (it->second == name) { - slog::warn << "The connect [" << parent << "<-->" << name << "] already exists." << - slog::endl; - return; - } + int order = kCatagoryOrder_Unknown; + if (name == input_device_name_) { + order = kCatagoryOrder_Input; + } else if (name_to_detection_map_.find(name) != name_to_detection_map_.end()) { + order = kCatagoryOrder_Inference; + } else if (name_to_output_map_.find(name) != name_to_output_map_.end()) { + order = kCatagoryOrder_Output; } - slog::info << "Adding connection into pipeline:[" << parent << "<-->" << name << "]" << - slog::endl; - next_.insert({parent, name}); -} + return order; +} void Pipeline::runOnce() { initInferenceCounter(); - if (!input_device_->read(&frame_)) - { + if (!input_device_->read(&frame_)) { // throw std::logic_error("Failed to get frame from cv::VideoCapture"); - slog::warn << "Failed to get frame from input_device." << slog::endl; - return; + // slog::warn << "Failed to get frame from input_device." << slog::endl; + return; //do nothing if now frame read out } - countFPS(); width_ = frame_.cols; height_ = frame_.rows; - for (auto& pair : name_to_output_map_) - { - pair.second->feedFrame(frame_); - } - - for (auto pos = next_.equal_range(input_device_name_); - pos.first != pos.second; ++pos.first) - { + slog::debug << "DEBUG: in Pipeline run process..." << slog::endl; + // auto t0 = std::chrono::high_resolution_clock::now(); + for (auto pos = next_.equal_range(input_device_name_); pos.first != pos.second; ++pos.first) { std::string detection_name = pos.first->second; auto detection_ptr = name_to_detection_map_[detection_name]; - - detection_ptr->enqueue(frame_, - cv::Rect(width_ / 2, height_ / 2, width_, height_)); + detection_ptr->enqueue(frame_, cv::Rect(width_ / 2, height_ / 2, width_, height_)); increaseInferenceCounter(); detection_ptr->submitRequest(); } - std::unique_lock lock(counter_mutex_); - cv_.wait(lock, [this]() { return this->counter_ == 0; }); - for (auto& pair : name_to_output_map_) + for (auto &pair : name_to_output_map_) { + pair.second->feedFrame(frame_); + } + countFPS(); + + slog::debug << "DEBUG: align inference process, waiting until all inferences done!" << slog::endl; + std::unique_lock lock(counter_mutex_); + cv_.wait(lock, [self = this]() {return self->counter_ == 0;}); + + //auto t1 = std::chrono::high_resolution_clock::now(); + //typedef std::chrono::duration> ms; + + slog::debug << "DEBUG: in Pipeline run process...handleOutput" << slog::endl; + for (auto & pair : name_to_output_map_) { + // slog::info << "Handling Output ..." << pair.first << slog::endl; pair.second->handleOutput(); } } @@ -306,22 +305,13 @@ void Pipeline::decreaseInferenceCounter() void Pipeline::countFPS() { - static int fps = 0; - - static auto t_start = std::chrono::high_resolution_clock::now(); - static int frame_cnt = 0; - - frame_cnt++; - + frame_cnt_++; auto t_end = std::chrono::high_resolution_clock::now(); typedef std::chrono::duration> ms; - ms secondDetection = std::chrono::duration_cast(t_end - t_start); - - if (secondDetection.count() > 1000) { - fps = frame_cnt; - setFPS(fps); - frame_cnt = 0; - t_start = t_end; + ms secondDetection = std::chrono::duration_cast(t_end - t_start_); + if (secondDetection.count() > 1000) { + setFPS(frame_cnt_); + frame_cnt_ = 0; + t_start_ = t_end; } - -} +} diff --git a/dynamic_vino_lib/src/pipeline_manager.cpp b/dynamic_vino_lib/src/pipeline_manager.cpp index 07fc1211..8ac16fc1 100644 --- a/dynamic_vino_lib/src/pipeline_manager.cpp +++ b/dynamic_vino_lib/src/pipeline_manager.cpp @@ -63,12 +63,17 @@ std::shared_ptr PipelineManager::createPipeline( if (params.name == "") { throw std::logic_error("The name of pipeline won't be empty!"); } - PipelineData data; std::shared_ptr pipeline = std::make_shared(params.name); pipeline->getParameters()->update(params); - auto inputs = parseInputDevice(params); + PipelineData data; + data.parent_node = node; + data.pipeline = pipeline; + data.params = params; + data.state = PipelineState_ThreadNotCreated; + + auto inputs = parseInputDevice(data); if (inputs.size() != 1) { slog::err << "currently one pipeline only supports ONE input." << slog::endl; @@ -82,7 +87,7 @@ std::shared_ptr PipelineManager::createPipeline( } } - auto outputs = parseOutput(params); + auto outputs = parseOutput(data); for (auto it = outputs.begin(); it != outputs.end(); ++it) { pipeline->add(it->first, it->second); } @@ -97,9 +102,6 @@ std::shared_ptr PipelineManager::createPipeline( pipeline->add(it->first, it->second); } - data.pipeline = pipeline; - data.params = params; - data.state = PipelineState_ThreadNotCreated; pipelines_.insert({params.name, data}); pipeline->setCallback(); @@ -109,27 +111,32 @@ std::shared_ptr PipelineManager::createPipeline( } std::map> -PipelineManager::parseInputDevice( - const Params::ParamManager::PipelineRawData& params) { +PipelineManager::parseInputDevice(const PipelineData & pdata) +{ std::map> inputs; - for (auto& name : params.inputs) { + for (auto & name : pdata.params.inputs) { slog::info << "Parsing InputDvice: " << name << slog::endl; std::shared_ptr device = nullptr; if (name == kInputType_RealSenseCamera) { device = std::make_shared(); } else if (name == kInputType_StandardCamera) { device = std::make_shared(); - } else if (name == kInputType_CameraTopic) { - device = std::make_shared(); - std::cout <<"register yaml"<(pdata.params.input_meta); + } + } else if (name == kInputType_CameraTopic || name == kInputType_ImageTopic) { + device = std::make_shared(pdata.parent_node); } else if (name == kInputType_Video) { - if (params.input_meta != "") { - device = std::make_shared(params.input_meta); + if (pdata.params.input_meta != "") { + device = std::make_shared(pdata.params.input_meta); } } else if (name == kInputType_Image) { - if (params.input_meta != "") { - device = std::make_shared(params.input_meta); + if (pdata.params.input_meta != "") { + device = std::make_shared(pdata.params.input_meta); } + } else { + slog::err << "Invalid input device name: " << name << slog::endl; } if (device != nullptr) { @@ -143,20 +150,20 @@ PipelineManager::parseInputDevice( } std::map> -PipelineManager::parseOutput( - const Params::ParamManager::PipelineRawData& params) { +PipelineManager::parseOutput(const PipelineData & pdata) +{ std::map> outputs; - for (auto& name : params.outputs) { + for (auto & name : pdata.params.outputs) { slog::info << "Parsing Output: " << name << slog::endl; std::shared_ptr object = nullptr; if (name == kOutputTpye_RosTopic) { - object = std::make_shared(params.name); + object = std::make_shared(pdata.params.name, pdata.parent_node); } else if (name == kOutputTpye_ImageWindow) { - object = std::make_shared(params.name); + object = std::make_shared(pdata.params.name); } else if (name == kOutputTpye_RViz) { - object = std::make_shared(params.name); + object = std::make_shared(pdata.params.name, pdata.parent_node); } else if (name == kOutputTpye_RosService) { - object = std::make_shared(params.name); + object = std::make_shared(pdata.params.name); } else { slog::err << "Invalid output name: " << name << slog::endl; } @@ -170,19 +177,10 @@ PipelineManager::parseOutput( } std::map> -PipelineManager::parseInference( - const Params::ParamManager::PipelineRawData& params) { - /**< update plugins for devices >**/ - auto pcommon = Params::ParamManager::getInstance().getCommon(); - std::string FLAGS_l = pcommon.custom_cpu_library; - std::string FLAGS_c = pcommon.custom_cldnn_library; - //std::string FLAGS_c = "/opt/intel/computer_vision_sdk/deployment_tools/inference_engine/lib/ubuntu_16.04/intel64/cldnn_global_custom_kernels/cldnn_global_custom_kernels.xml"; - - bool FLAGS_pc = pcommon.enable_performance_count; - - std::map> - inferences; - for (auto& infer : params.infers) { +PipelineManager::parseInference(const Params::ParamManager::PipelineRawData & params) +{ + std::map> inferences; + for (auto & infer : params.infers) { if (infer.name.empty() || infer.model.empty()) { continue; } @@ -240,26 +238,15 @@ PipelineManager::parseInference( std::shared_ptr PipelineManager::createFaceDetection( - const Params::ParamManager::InferenceRawData& infer) { - // TODO: add batch size in param_manager - auto face_detection_model = - std::make_shared(infer.model, 1, 1, infer.batch); - face_detection_model->modelInit(); - auto face_detection_engine = engine_manager_.createEngine( - infer.engine, face_detection_model); - auto face_inference_ptr = std::make_shared( - 0.5); // TODO: add output_threshold in param_manager - face_inference_ptr->loadNetwork(face_detection_model); - face_inference_ptr->loadEngine(face_detection_engine); - - return face_inference_ptr; + const Params::ParamManager::InferenceRawData & infer) +{ + return createObjectDetection(infer); } std::shared_ptr -PipelineManager::createAgeGenderRecognition( - const Params::ParamManager::InferenceRawData& param) { - auto model = - std::make_shared(param.model, 1, 2, param.batch); +PipelineManager::createAgeGenderRecognition(const Params::ParamManager::InferenceRawData & param) +{ + auto model = std::make_shared(param.model, param.batch); model->modelInit(); auto engine = engine_manager_.createEngine(param.engine, model); auto infer = std::make_shared(); @@ -270,10 +257,9 @@ PipelineManager::createAgeGenderRecognition( } std::shared_ptr -PipelineManager::createEmotionRecognition( - const Params::ParamManager::InferenceRawData& param) { - auto model = - std::make_shared(param.model, 1, 1, param.batch); +PipelineManager::createEmotionRecognition(const Params::ParamManager::InferenceRawData & param) +{ + auto model = std::make_shared(param.model, param.batch); model->modelInit(); auto engine = engine_manager_.createEngine(param.engine, model); auto infer = std::make_shared(); @@ -284,10 +270,9 @@ PipelineManager::createEmotionRecognition( } std::shared_ptr -PipelineManager::createHeadPoseEstimation( - const Params::ParamManager::InferenceRawData& param) { - auto model = - std::make_shared(param.model, 1, 3, param.batch); +PipelineManager::createHeadPoseEstimation(const Params::ParamManager::InferenceRawData & param) +{ + auto model = std::make_shared(param.model, param.batch); model->modelInit(); auto engine = engine_manager_.createEngine(param.engine, model); auto infer = std::make_shared(); @@ -303,25 +288,20 @@ const Params::ParamManager::InferenceRawData & infer) { std::shared_ptr object_detection_model; std::shared_ptr object_inference_ptr; - - if (infer.model_type == kInferTpye_ObjectDetectionTypeSSD) - { - object_detection_model = - std::make_shared(infer.model, 1, 1, infer.batch); - - object_inference_ptr = std::make_shared( - 0.5); // To-do theshold configuration + slog::debug << "for test in createObjectDetection()" << slog::endl; + if (infer.model_type == kInferTpye_ObjectDetectionTypeSSD) { + object_detection_model = + std::make_shared(infer.model, infer.batch); } - - if (infer.model_type == kInferTpye_ObjectDetectionTypeYolov2voc) - { - object_detection_model = - std::make_shared(infer.model, 1, 1, infer.batch); - - object_inference_ptr = std::make_shared( - 0.5); // To-do theshold configuration + if (infer.model_type == kInferTpye_ObjectDetectionTypeYolov2) { + object_detection_model = + std::make_shared(infer.model, infer.batch); } + slog::debug << "for test in createObjectDetection(), Created SSDModel" << slog::endl; + object_inference_ptr = std::make_shared( + infer.enable_roi_constraint, infer.confidence_threshold); // To-do theshold configuration + slog::debug << "for test in createObjectDetection(), before modelInit()" << slog::endl; object_detection_model->modelInit(); auto object_detection_engine = engine_manager_.createEngine( infer.engine, object_detection_model); @@ -335,7 +315,7 @@ std::shared_ptr PipelineManager::createObjectSegmentation(const Params::ParamManager::InferenceRawData & infer) { auto model = - std::make_shared(infer.model, 2, 2, infer.batch); + std::make_shared(infer.model, infer.batch); model->modelInit(); slog::info << "Segmentation model initialized." << slog::endl; auto engine = engine_manager_.createEngine(infer.engine, model); @@ -353,12 +333,15 @@ std::shared_ptr PipelineManager::createPersonReidentification( const Params::ParamManager::InferenceRawData & infer) { - auto person_reidentification_model = - std::make_shared(infer.model, 1, 1, infer.batch); + std::shared_ptr person_reidentification_model; + std::shared_ptr reidentification_inference_ptr; + slog::debug << "for test in createPersonReidentification()"<(infer.model, infer.batch); person_reidentification_model->modelInit(); - auto person_reidentification_engine = engine_manager_.createEngine( - infer.engine, person_reidentification_model); - auto reidentification_inference_ptr = + slog::info << "Reidentification model initialized" << slog::endl; + auto person_reidentification_engine = engine_manager_.createEngine(infer.engine, person_reidentification_model); + reidentification_inference_ptr = std::make_shared(infer.confidence_threshold); reidentification_inference_ptr->loadNetwork(person_reidentification_model); reidentification_inference_ptr->loadEngine(person_reidentification_engine); @@ -366,73 +349,18 @@ PipelineManager::createPersonReidentification( return reidentification_inference_ptr; } -std::shared_ptr -PipelineManager::createFaceReidentification( - const Params::ParamManager::InferenceRawData & infer) -{ - auto face_reidentification_model = - std::make_shared(infer.model, 1, 1, infer.batch); - face_reidentification_model->modelInit(); - auto face_reidentification_engine = engine_manager_.createEngine( - infer.engine, face_reidentification_model); - auto face_reid_ptr = - std::make_shared(infer.confidence_threshold); - face_reid_ptr->loadNetwork(face_reidentification_model); - face_reid_ptr->loadEngine(face_reidentification_engine); - - return face_reid_ptr; -} - -std::shared_ptr -PipelineManager::createPersonAttribsDetection( - const Params::ParamManager::InferenceRawData & infer) -{ - auto person_attribs_detection_model = - std::make_shared(infer.model, 1, 1, infer.batch); - person_attribs_detection_model->modelInit(); - auto person_attribs_detection_engine = engine_manager_.createEngine( - infer.engine, person_attribs_detection_model); - auto attribs_inference_ptr = - std::make_shared(infer.confidence_threshold); - attribs_inference_ptr->loadNetwork(person_attribs_detection_model); - attribs_inference_ptr->loadEngine(person_attribs_detection_engine); - - return attribs_inference_ptr; -} - -std::shared_ptr -PipelineManager::createLandmarksDetection( - const Params::ParamManager::InferenceRawData & infer) -{ - auto landmarks_detection_model = - std::make_shared(infer.model, 1, 1, infer.batch); - landmarks_detection_model->modelInit(); - auto landmarks_detection_engine = engine_manager_.createEngine( - infer.engine, landmarks_detection_model); - auto landmarks_inference_ptr = - std::make_shared(); - landmarks_inference_ptr->loadNetwork(landmarks_detection_model); - landmarks_inference_ptr->loadEngine(landmarks_detection_engine); - - return landmarks_inference_ptr; -} - - - - std::shared_ptr PipelineManager::createVehicleAttribsDetection( const Params::ParamManager::InferenceRawData & infer) { - auto vehicle_attribs_model = - std::make_shared(infer.model, 1, 2, infer.batch); - vehicle_attribs_model->modelInit(); - auto vehicle_attribs_engine = engine_manager_.createEngine( - infer.engine, vehicle_attribs_model); + auto model = + std::make_shared(infer.model, infer.batch); + model->modelInit(); + auto engine = engine_manager_.createEngine(infer.engine, model); auto vehicle_attribs_ptr = std::make_shared(); - vehicle_attribs_ptr->loadNetwork(vehicle_attribs_model); - vehicle_attribs_ptr->loadEngine(vehicle_attribs_engine); + vehicle_attribs_ptr->loadNetwork(model); + vehicle_attribs_ptr->loadEngine(engine); return vehicle_attribs_ptr; } @@ -441,19 +369,35 @@ std::shared_ptr PipelineManager::createLicensePlateDetection( const Params::ParamManager::InferenceRawData & infer) { - auto license_plate_model = - std::make_shared(infer.model, 2, 1, infer.batch); - license_plate_model->modelInit(); - auto license_plate_engine = engine_manager_.createEngine( - infer.engine, license_plate_model); + auto model = + std::make_shared(infer.model, infer.batch); + model->modelInit(); + auto engine = engine_manager_.createEngine(infer.engine, model); auto license_plate_ptr = std::make_shared(); - license_plate_ptr->loadNetwork(license_plate_model); - license_plate_ptr->loadEngine(license_plate_engine); + license_plate_ptr->loadNetwork(model); + license_plate_ptr->loadEngine(engine); return license_plate_ptr; } +std::shared_ptr +PipelineManager::createPersonAttribsDetection( + const Params::ParamManager::InferenceRawData & infer) +{ + auto model = + std::make_shared(infer.model, infer.batch); + slog::debug << "for test in createPersonAttributesDetection()"<modelInit(); + auto engine = engine_manager_.createEngine(infer.engine, model); + auto attribs_inference_ptr = + std::make_shared(infer.confidence_threshold); + attribs_inference_ptr->loadNetwork(model); + attribs_inference_ptr->loadEngine(engine); + + return attribs_inference_ptr; +} + void PipelineManager::threadPipeline(const char* name) { PipelineData& p = pipelines_[name]; while ( p.state != PipelineState_ThreadStopped && p.pipeline != nullptr && ros::ok()) { diff --git a/dynamic_vino_lib/src/pipeline_params.cpp b/dynamic_vino_lib/src/pipeline_params.cpp index 49f7a30c..9b7b60f1 100644 --- a/dynamic_vino_lib/src/pipeline_params.cpp +++ b/dynamic_vino_lib/src/pipeline_params.cpp @@ -79,8 +79,11 @@ bool PipelineParams::isOutputTo(std::string& output) bool PipelineParams::isGetFps() { /**< Only "Image" input can't computing FPS >**/ - return std::find(params_.inputs.begin(), params_.inputs.end(), - kInputType_Image) == params_.inputs.end(); + if (params_.inputs.size() == 0) { + return false; + } + return std::find(params_.inputs.begin(), params_.inputs.end(), kInputType_Image) == + params_.inputs.end(); } std::string PipelineParams::findFilterConditions( @@ -92,4 +95,4 @@ std::string PipelineParams::findFilterConditions( } } return ""; -} \ No newline at end of file +} From 561a84dec851b4fc8d6e2ef6ccb1ad5ad6d0172f Mon Sep 17 00:00:00 2001 From: Lewis Liu Date: Mon, 11 Jan 2021 15:22:22 +0800 Subject: [PATCH 37/63] fix some build errors. Signed-off-by: Lewis Liu --- dynamic_vino_lib/CMakeLists.txt | 12 +- .../dynamic_vino_lib/inputs/standard_camera.h | 2 +- .../models/attributes/base_attribute.h | 199 ++++++++++++++++++ .../dynamic_vino_lib/models/base_model.h | 3 +- .../dynamic_vino_lib/outputs/base_output.h | 2 - .../outputs/ros_service_output.h | 2 +- .../services/frame_processing_server.h | 4 +- .../services/pipeline_processing_server.h | 4 +- .../dynamic_vino_lib/utils/version_info.hpp | 15 +- .../src/services/frame_processing_server.cpp | 3 +- .../services/pipeline_processing_server.cpp | 4 +- pipeline_srv_msgs/CMakeLists.txt | 2 +- vino_launch/CMakeLists.txt | 2 +- 13 files changed, 222 insertions(+), 32 deletions(-) create mode 100644 dynamic_vino_lib/include/dynamic_vino_lib/models/attributes/base_attribute.h diff --git a/dynamic_vino_lib/CMakeLists.txt b/dynamic_vino_lib/CMakeLists.txt index f5b428ec..f0acaec8 100644 --- a/dynamic_vino_lib/CMakeLists.txt +++ b/dynamic_vino_lib/CMakeLists.txt @@ -23,7 +23,7 @@ project(dynamic_vino_lib) #################################### message(STATUS "Looking for inference engine configuration file at: ${CMAKE_PREFIX_PATH}") -find_package(InferenceEngine 1.1) +find_package(InferenceEngine REQUIRED) if (NOT InferenceEngine_FOUND) message(FATAL_ERROR "") endif() @@ -188,11 +188,11 @@ add_library(${PROJECT_NAME} SHARED src/outputs/ros_service_output.cpp ) -add_dependencies(${PROJECT_NAME} - ${${PROJECT_NAME}_EXPORTED_TARGETS} - ${catkin_EXPORTED_TARGETS} - "object_msgs" -) +#add_dependencies(${PROJECT_NAME} +# ${${PROJECT_NAME}_EXPORTED_TARGETS} +# ${catkin_EXPORTED_TARGETS} +# "object_msgs" +#) target_link_libraries(${PROJECT_NAME} ${catkin_LIBRARIES} diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/standard_camera.h b/dynamic_vino_lib/include/dynamic_vino_lib/inputs/standard_camera.h index 5c69eb5e..78fc408f 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/standard_camera.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inputs/standard_camera.h @@ -24,7 +24,7 @@ #include -#include "dynamic_vino_lib/inputs/base_input.hpp" +#include "dynamic_vino_lib/inputs/base_input.h" #include #include #include diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/models/attributes/base_attribute.h b/dynamic_vino_lib/include/dynamic_vino_lib/models/attributes/base_attribute.h new file mode 100644 index 00000000..453470d7 --- /dev/null +++ b/dynamic_vino_lib/include/dynamic_vino_lib/models/attributes/base_attribute.h @@ -0,0 +1,199 @@ +// Copyright (c) 2018-2020 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @brief A header file with declaration for ModelAttribute class. + * @file base_attribute.hpp + */ + +#ifndef DYNAMIC_VINO_LIB__MODELS__ATTRIBUTES_BASE_ATTRIBUTE_HPP_ +#define DYNAMIC_VINO_LIB__MODELS__ATTRIBUTES_BASE_ATTRIBUTE_HPP_ + +#include +#include +#include +#include + +#include "inference_engine.hpp" +#include "dynamic_vino_lib/slog.h" + +namespace Models +{ +/** + * @class ModelAttribute + * @brief This class represents the network given by .xml and .bin file + */ +class ModelAttribute +{ +public: + using Ptr = std::shared_ptr; + struct ModelAttr { + int max_proposal_count = 0; + int object_size = 0; + int input_height = 0; + int input_width = 0; + std::string model_name; + std::map input_names; + std::map output_names; + std::vector labels; + }; + + ModelAttribute(const std::string model_name) + { + attr_.model_name = model_name; + } + + inline bool isVerified() + { + return (attr_.max_proposal_count > 0 && attr_.object_size > 0 && attr_.input_height > 0 + && attr_.input_width > 0 && attr_.input_names.empty() && attr_.output_names.empty()); + } + inline void printAttribute() + { + slog::info << "----Attributes for Model " << attr_.model_name << "----" << slog::endl; + slog::info << "| model_name: " << attr_.model_name << slog::endl; + slog::info << "| max_proposal_count: " << attr_.max_proposal_count << slog::endl; + slog::info << "| object_size: " << attr_.object_size << slog::endl; + slog::info << "| input_height: " << attr_.input_height << slog::endl; + slog::info << "| input_width: " << attr_.input_width << slog::endl; + slog::info << "| input_names: " << slog::endl; + for (auto & item: attr_.input_names) { + slog::info << "| " << item.first << "-->" << item.second << slog::endl; + } + slog::info << "| output_names: " << slog::endl; + for (auto & item: attr_.output_names) { + slog::info << "| " << item.first << "-->" << item.second << slog::endl; + } + + if(attr_.max_proposal_count <= 0 || attr_.object_size <= 0 || attr_.input_height <= 0 + || attr_.input_width <= 0 || attr_.input_names.empty() || attr_.output_names.empty()){ + slog::info << "--------" << slog::endl; + slog::warn << "Not all attributes are set correctly! not 0 or empty is allowed in" + << " the above list." << slog::endl; + } + slog::info << "--------------------------------" << slog::endl; + } + + virtual bool updateLayerProperty( + const InferenceEngine::CNNNetReader::Ptr &) + { return false; } + + inline std::string getModelName() const + { + return attr_.model_name; + } + + inline void setModelName(std::string name) + { + attr_.model_name = name; + } + + inline std::string getInputName(std::string name = "input") const + { + // std::map::iterator it; + auto it = attr_.input_names.find(name); + if(it == attr_.input_names.end()){ + slog::warn << "No input named: " << name << slog::endl; + return std::string(""); + } + + return it->second; + } + + inline std::string getOutputName(std::string name = "output") const + { + //std::map::iterator it; + auto it = attr_.output_names.find(name); + if(it == attr_.output_names.end()){ + slog::warn << "No output named: " << name << slog::endl; + return std::string(""); + } + + return it->second; + } + + inline int getMaxProposalCount() const + { + return attr_.max_proposal_count; + } + + inline int getObjectSize() const + { + return attr_.object_size; + } + + inline void loadLabelsFromFile(const std::string file_path) + { + std::ifstream input_file(file_path); + std::copy(std::istream_iterator(input_file), + std::istream_iterator(), + std::back_inserter(attr_.labels)); + } + + inline std::vector& getLabels() + { + return attr_.labels; + } + + inline void addInputInfo(std::string key, std::string value) + { + attr_.input_names[key] = value; + } + + inline void addOutputInfo(std::string key, std::string value) + { + attr_.output_names[key] = value; + } + + inline void setInputHeight(const int height) + { + attr_.input_height = height; + } + + inline void setInputWidth(const int width) + { + attr_.input_width = width; + } + + inline void setMaxProposalCount(const int max) + { + attr_.max_proposal_count = max; + } + + inline void setObjectSize(const int size) + { + attr_.object_size = size; + } + +protected: + ModelAttr attr_; + +}; + +#if 0 //not used +class SSDModelAttr : public ModelAttribute +{ +public: + explicit SSDModelAttr(const std::string model_name = "SSDNet-like"); + + bool updateLayerProperty( + const InferenceEngine::CNNNetReader::Ptr &); + +}; +#endif + + +} // namespace Models + +#endif // DYNAMIC_VINO_LIB__MODELS__ATTRIBUTES_BASE_ATTRIBUTE_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/models/base_model.h b/dynamic_vino_lib/include/dynamic_vino_lib/models/base_model.h index c1f7d645..826e5d1b 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/models/base_model.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/models/base_model.h @@ -31,6 +31,7 @@ #include #include "inference_engine.hpp" +#include "dynamic_vino_lib/models/attributes/base_attribute.h" namespace Engines { @@ -140,6 +141,4 @@ namespace Models } // namespace Models -} // namespace Models - #endif // DYNAMIC_VINO_LIB__MODELS__BASE_MODEL_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/base_output.h b/dynamic_vino_lib/include/dynamic_vino_lib/outputs/base_output.h index caaf7e53..f0f1d353 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/base_output.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/outputs/base_output.h @@ -30,8 +30,6 @@ #include "dynamic_vino_lib/inferences/emotions_detection.h" #include "dynamic_vino_lib/inferences/face_detection.h" #include "dynamic_vino_lib/inferences/head_pose_detection.h" -#include "dynamic_vino_lib/inferences/object_detection_ssd.h" -#include "dynamic_vino_lib/inferences/object_detection_yolov2_voc.h" #include "dynamic_vino_lib/inferences/object_segmentation.h" #include "dynamic_vino_lib/inferences/person_reidentification.h" #include "dynamic_vino_lib/inferences/landmarks_detection.h" diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/ros_service_output.h b/dynamic_vino_lib/include/dynamic_vino_lib/outputs/ros_service_output.h index 4433ce3c..170ab96b 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/ros_service_output.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/outputs/ros_service_output.h @@ -38,7 +38,7 @@ #include #include #include -#include +#include #include #include diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/services/frame_processing_server.h b/dynamic_vino_lib/include/dynamic_vino_lib/services/frame_processing_server.h index 53e30f1d..cba664a8 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/services/frame_processing_server.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/services/frame_processing_server.h @@ -28,7 +28,7 @@ #include #include #include -#include +#include #include #include #include @@ -59,4 +59,4 @@ class FrameProcessingServer }; } // namespace vino_service -#endif // DYNAMIC_VINO_LIB__SERVICES__FRAME_PROCESSING_SERVER_HPP_ \ No newline at end of file +#endif // DYNAMIC_VINO_LIB__SERVICES__FRAME_PROCESSING_SERVER_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/services/pipeline_processing_server.h b/dynamic_vino_lib/include/dynamic_vino_lib/services/pipeline_processing_server.h index bd638aa2..3437f2c4 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/services/pipeline_processing_server.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/services/pipeline_processing_server.h @@ -28,7 +28,7 @@ #include #include #include -#include +#include #include #include #include @@ -71,4 +71,4 @@ class PipelineProcessingServer }; } // namespace vino_service -#endif // DYNAMIC_VINO_LIB__SERVICES__FRAME_PROCESSING_SERVER_HPP_ \ No newline at end of file +#endif // DYNAMIC_VINO_LIB__SERVICES__FRAME_PROCESSING_SERVER_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/utils/version_info.hpp b/dynamic_vino_lib/include/dynamic_vino_lib/utils/version_info.hpp index 735f3406..431fa86d 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/utils/version_info.hpp +++ b/dynamic_vino_lib/include/dynamic_vino_lib/utils/version_info.hpp @@ -24,7 +24,9 @@ #include #include #include +#if(defined(USE_OLD_E_PLUGIN_API)) #include +#endif #include #include #include @@ -54,17 +56,6 @@ inline std::string & trim(std::string & s) return s; } -/** -* @brief Converts string to TargetDevice -* @param deviceName - string value representing device -* @return TargetDevice value that corresponds to input string. -* eDefault in case no corresponding value was found -*/ -static InferenceEngine::TargetDevice getDeviceFromStr(const std::string & deviceName) -{ - return InferenceEngine::TargetDeviceInfo::fromStr(deviceName); -} - static std::ostream & operator<<(std::ostream & os, const InferenceEngine::Version * version) { os << "\n\tAPI version ............ "; @@ -84,6 +75,7 @@ static std::ostream & operator<<(std::ostream & os, const InferenceEngine::Versi return os; } +#if(defined(USE_OLD_E_PLUGIN_API)) /** * @class PluginVersion * @brief A PluginVersion class stores plugin version and initialization status @@ -139,5 +131,6 @@ inline void printPluginVersion(InferenceEngine::InferenceEnginePluginPtr ptr, st ptr->GetVersion((const InferenceEngine::Version * &)pluginVersion); stream << pluginVersion << std::endl; } +#endif // (defined(USE_OLD_E_PLUGIN_API)) #endif // DYNAMIC_VINO_LIB__UTILS__VERSION_INFO_HPP_ diff --git a/dynamic_vino_lib/src/services/frame_processing_server.cpp b/dynamic_vino_lib/src/services/frame_processing_server.cpp index d55405db..095f9885 100644 --- a/dynamic_vino_lib/src/services/frame_processing_server.cpp +++ b/dynamic_vino_lib/src/services/frame_processing_server.cpp @@ -16,7 +16,8 @@ #include #include #include -#include +#include +#include #include #include #include diff --git a/dynamic_vino_lib/src/services/pipeline_processing_server.cpp b/dynamic_vino_lib/src/services/pipeline_processing_server.cpp index 1745e479..28554164 100644 --- a/dynamic_vino_lib/src/services/pipeline_processing_server.cpp +++ b/dynamic_vino_lib/src/services/pipeline_processing_server.cpp @@ -16,8 +16,8 @@ #include #include #include -#include -#include +#include +#include #include #include #include diff --git a/pipeline_srv_msgs/CMakeLists.txt b/pipeline_srv_msgs/CMakeLists.txt index f169b913..6144488e 100644 --- a/pipeline_srv_msgs/CMakeLists.txt +++ b/pipeline_srv_msgs/CMakeLists.txt @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -cmake_minimum_required(VERSION 2.8.3) +cmake_minimum_required(VERSION 3.0.2) project(pipeline_srv_msgs) diff --git a/vino_launch/CMakeLists.txt b/vino_launch/CMakeLists.txt index b463d711..b6bd5adc 100644 --- a/vino_launch/CMakeLists.txt +++ b/vino_launch/CMakeLists.txt @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -cmake_minimum_required(VERSION 2.8.3) +cmake_minimum_required (VERSION 3.0.2) project(vino_launch) From 7532d0f4da481a48d3269d76211756ef03ff07b3 Mon Sep 17 00:00:00 2001 From: lewisliu Date: Tue, 12 Jan 2021 19:21:54 +0800 Subject: [PATCH 38/63] update slog to fix slog errors for slog::debug Signed-off-by: lewisliu --- .../include/dynamic_vino_lib/slog.h | 100 ++++++++++++++++-- 1 file changed, 91 insertions(+), 9 deletions(-) diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/slog.h b/dynamic_vino_lib/include/dynamic_vino_lib/slog.h index 0466457a..56d7ebb8 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/slog.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/slog.h @@ -28,6 +28,36 @@ namespace slog { +#if 1 + enum COLOR { + RESET = 0, + BLUE = 1, + GREEN = 2, + YELLOW = 3, + RED = 4, + }; + +#else +//the following are UBUNTU/LINUX ONLY terminal color codes. +#define RESET "\033[0m" +#define BLACK "\033[30m" /* Black */ +#define RED "\033[31m" /* Red */ +#define GREEN "\033[32m" /* Green */ +#define YELLOW "\033[33m" /* Yellow */ +#define BLUE "\033[34m" /* Blue */ +#define MAGENTA "\033[35m" /* Magenta */ +#define CYAN "\033[36m" /* Cyan */ +#define WHITE "\033[37m" /* White */ +#define BOLDBLACK "\033[1m\033[30m" /* Bold Black */ +#define BOLDRED "\033[1m\033[31m" /* Bold Red */ +#define BOLDGREEN "\033[1m\033[32m" /* Bold Green */ +#define BOLDYELLOW "\033[1m\033[33m" /* Bold Yellow */ +#define BOLDBLUE "\033[1m\033[34m" /* Bold Blue */ +#define BOLDMAGENTA "\033[1m\033[35m" /* Bold Magenta */ +#define BOLDCYAN "\033[1m\033[36m" /* Bold Cyan */ +#define BOLDWHITE "\033[1m\033[37m" /* Bold White */ +#endif + /** * @class LogStreamEndLine * @brief The LogStreamEndLine class implements an end line marker for a log @@ -48,14 +78,16 @@ class LogStream std::string _prefix; std::ostream* _log_stream; bool _new_line; + int _color_id; public: /** * @brief A constructor. Creates an LogStream object * @param prefix The prefix to print */ - LogStream(const std::string& prefix, std::ostream& log_stream) - : _prefix(prefix), _new_line(true) + LogStream(const std::string & prefix, std::ostream & log_stream, + const int color_id = -1) + : _prefix(prefix), _new_line(true), _color_id(color_id) { _log_stream = &log_stream; } @@ -67,9 +99,9 @@ class LogStream template LogStream& operator<<(const T& arg) { - if (_new_line) - { - (*_log_stream) << "[ " << _prefix << " ] "; + if (_new_line) { + setLineColor(); + (*_log_stream) << "[ " << _prefix << " ] "; _new_line = false; } @@ -81,15 +113,65 @@ class LogStream LogStream& operator<<(const LogStreamEndLine& arg) { _new_line = true; - + resetLineColor(); (*_log_stream) << std::endl; return *this; } + + void setLineColor() + { + switch(_color_id){ + case BLUE: + (*_log_stream) << "\033[34m"; + break; + case GREEN: + (*_log_stream) << "\033[32m"; + break; + case YELLOW: + (*_log_stream) << "\033[33m"; + break; + case RED: + (*_log_stream) << "\033[31m"; + break; + default: + break; + } + } + + void resetLineColor() + { + if(_color_id > 0){ + (*_log_stream) << "\033[0m"; //RESET + } + } +}; + +class NullStream +{ +public: + NullStream(){} + + NullStream(const std::string & prefix, std::ostream & log_stream) + { + (void)prefix; + (void)log_stream; + } + + template + NullStream & operator<<(const T & arg) + { + return *this; + } }; -static LogStream info("INFO", std::cout); -static LogStream warn("WARNING", std::cout); -static LogStream err("ERROR", std::cerr); +#ifdef LOG_LEVEL_DEBUG + static LogStream debug("DEBUG", std::cout, GREEN); +#else + static NullStream debug; +#endif +static LogStream info("INFO", std::cout, BLUE); +static LogStream warn("WARNING", std::cout, YELLOW); +static LogStream err("ERROR", std::cerr, RED); } // namespace slog #endif // DYNAMIC_VINO_LIB_SLOG_H From 3eb5edf35089fb2202d2bb853b580160e4e350cc Mon Sep 17 00:00:00 2001 From: lewisliu Date: Mon, 1 Feb 2021 09:08:21 +0800 Subject: [PATCH 39/63] add missing header. Signed-off-by: lewisliu --- .../dynamic_vino_lib/inputs/image_topic.h | 55 +++++++++++++++++++ 1 file changed, 55 insertions(+) create mode 100644 dynamic_vino_lib/include/dynamic_vino_lib/inputs/image_topic.h diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/image_topic.h b/dynamic_vino_lib/include/dynamic_vino_lib/inputs/image_topic.h new file mode 100644 index 00000000..77cb8c2a --- /dev/null +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inputs/image_topic.h @@ -0,0 +1,55 @@ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @brief A header file with declaration for ImageTopic class + * @file image_topic.h + */ + +#ifndef DYNAMIC_VINO_LIB__INPUTS__IMAGE_TOPIC_HPP_ +#define DYNAMIC_VINO_LIB__INPUTS__IMAGE_TOPIC_HPP_ + +#include +#include +#include +#include +#include "dynamic_vino_lib/utils/mutex_counter.hpp" +#include "dynamic_vino_lib/inputs/base_input.hpp" + +namespace Input +{ +/** + * @class ImageTopic + * @brief Class for recieving a realsense camera topic as input. + */ +class ImageTopic : public BaseInputDevice +{ +public: + ImageTopic(rclcpp::Node::SharedPtr node = nullptr); + bool initialize() override; + bool initialize(size_t width, size_t height) override; + bool read(cv::Mat * frame) override; + +private: + ros::NodeHandle nh_; + image_transport::Subscriber sub_; + cv::Mat image_; + MutexCounter image_count_; + rclcpp::Node::SharedPtr node_ = nullptr; + + void cb(const sensor_msgs::ImageConstPtr& image_msg); +}; +} // namespace Input + +#endif // DYNAMIC_VINO_LIB__INPUTS__IMAGE_TOPIC_HPP_ From f0e377a81e7150c7a831ecfbe1bf35af92fc85f0 Mon Sep 17 00:00:00 2001 From: Corsair-cxs <1494089474@qq.com> Date: Sat, 20 Feb 2021 15:12:40 +0800 Subject: [PATCH 40/63] Fix ROS cpp format --- .../include/dynamic_vino_lib/args_helper.h | 15 +- .../include/dynamic_vino_lib/common.h | 552 ++++++++++-------- .../include/dynamic_vino_lib/engines/engine.h | 6 +- .../dynamic_vino_lib/engines/engine_manager.h | 20 +- .../inferences/age_gender_detection.h | 13 +- .../dynamic_vino_lib/inferences/base_filter.h | 111 ++-- .../inferences/base_inference.h | 50 +- .../inferences/base_reidentification.h | 11 +- .../inferences/emotions_detection.h | 16 +- .../inferences/face_detection.h | 2 +- .../inferences/face_reidentification.h | 16 +- .../inferences/head_pose_detection.h | 14 +- .../inferences/inference_manager.h | 49 +- .../inferences/landmarks_detection.h | 11 +- .../inferences/license_plate_detection.h | 96 ++- .../inferences/object_detection.h | 34 +- .../inferences/object_segmentation.h | 28 +- .../inferences/person_attribs_detection.h | 16 +- .../inferences/person_reidentification.h | 16 +- .../inferences/vehicle_attribs_detection.h | 17 +- .../dynamic_vino_lib/inputs/base_input.h | 16 +- .../dynamic_vino_lib/inputs/image_input.h | 6 +- .../dynamic_vino_lib/inputs/image_topic.h | 2 +- .../inputs/realsense_camera.h | 4 +- .../dynamic_vino_lib/inputs/ros_handler.h | 5 +- .../dynamic_vino_lib/inputs/standard_camera.h | 4 +- .../dynamic_vino_lib/inputs/video_input.h | 4 +- .../models/age_gender_detection_model.h | 3 +- .../models/attributes/base_attribute.h | 60 +- .../dynamic_vino_lib/models/base_model.h | 171 +++--- .../models/emotion_detection_model.h | 5 +- .../models/face_detection_model.h | 4 +- .../models/face_reidentification_model.h | 12 +- .../models/head_pose_detection_model.h | 4 +- .../models/landmarks_detection_model.h | 12 +- .../models/license_plate_detection_model.h | 22 +- .../models/object_detection_ssd_model.h | 25 +- .../models/object_detection_yolov2voc_model.h | 24 +- .../models/object_segmentation_model.h | 9 +- .../models/person_attribs_detection_model.h | 10 +- .../models/person_reidentification_model.h | 16 +- .../models/vehicle_attribs_detection_model.h | 21 +- .../dynamic_vino_lib/outputs/base_output.h | 71 ++- .../outputs/image_window_output.h | 106 ++-- .../outputs/ros_service_output.h | 11 +- .../outputs/ros_topic_output.h | 28 +- .../dynamic_vino_lib/outputs/rviz_output.h | 47 +- .../include/dynamic_vino_lib/pipeline.h | 26 +- .../dynamic_vino_lib/pipeline_manager.h | 84 ++- .../dynamic_vino_lib/pipeline_params.h | 15 +- .../services/frame_processing_server.h | 17 +- .../services/pipeline_processing_server.h | 24 +- .../include/dynamic_vino_lib/slog.h | 115 ++-- .../dynamic_vino_lib/utils/version_info.hpp | 72 ++- dynamic_vino_lib/src/engines/engine.cpp | 12 +- .../src/engines/engine_manager.cpp | 70 +-- .../src/inferences/age_gender_detection.cpp | 49 +- .../src/inferences/base_filter.cpp | 170 ++++-- .../src/inferences/base_inference.cpp | 20 +- .../src/inferences/base_reidentification.cpp | 95 +-- .../src/inferences/emotions_detection.cpp | 55 +- .../src/inferences/face_detection.cpp | 10 +- .../src/inferences/face_reidentification.cpp | 62 +- .../src/inferences/head_pose_detection.cpp | 52 +- .../src/inferences/landmarks_detection.cpp | 69 ++- .../inferences/license_plate_detection.cpp | 71 ++- .../src/inferences/object_detection.cpp | 89 ++- .../src/inferences/object_segmentation.cpp | 106 ++-- .../inferences/person_attribs_detection.cpp | 72 ++- .../inferences/person_reidentification.cpp | 64 +- .../inferences/vehicle_attribs_detection.cpp | 67 ++- dynamic_vino_lib/src/inputs/image_input.cpp | 15 +- dynamic_vino_lib/src/inputs/image_topic.cpp | 33 +- .../src/inputs/realsense_camera.cpp | 28 +- .../src/inputs/standard_camera.cpp | 20 +- dynamic_vino_lib/src/inputs/video_input.cpp | 6 +- .../src/models/age_gender_detection_model.cpp | 51 +- dynamic_vino_lib/src/models/base_model.cpp | 15 +- .../src/models/emotion_detection_model.cpp | 40 +- .../src/models/face_detection_model.cpp | 5 +- .../models/face_reidentification_model.cpp | 23 +- .../src/models/head_pose_detection_model.cpp | 29 +- .../src/models/landmarks_detection_model.cpp | 33 +- .../models/license_plate_detection_model.cpp | 25 +- .../src/models/object_detection_ssd_model.cpp | 126 ++-- .../object_detection_yolov2voc_model.cpp | 207 ++++--- .../src/models/object_segmentation_model.cpp | 130 ++--- .../models/person_attribs_detection_model.cpp | 26 +- .../models/person_reidentification_model.cpp | 19 +- .../vehicle_attribs_detection_model.cpp | 31 +- dynamic_vino_lib/src/outputs/base_output.cpp | 7 +- .../src/outputs/image_window_output.cpp | 170 +++--- .../src/outputs/ros_service_output.cpp | 84 +-- .../src/outputs/ros_topic_output.cpp | 126 ++-- dynamic_vino_lib/src/outputs/rviz_output.cpp | 39 +- dynamic_vino_lib/src/pipeline.cpp | 149 +++-- dynamic_vino_lib/src/pipeline_manager.cpp | 308 +++++----- dynamic_vino_lib/src/pipeline_params.cpp | 36 +- .../src/services/frame_processing_server.cpp | 50 +- .../services/pipeline_processing_server.cpp | 95 +-- sample/include/sample/utility.hpp | 72 +-- sample/src/image_object_client.cpp | 58 +- sample/src/image_object_server.cpp | 13 +- sample/src/image_people_client.cpp | 58 +- sample/src/image_people_server.cpp | 15 +- sample/src/image_reidentification_client.cpp | 41 +- sample/src/image_reidentification_server.cpp | 14 +- sample/src/image_segmentation_client.cpp | 45 +- sample/src/image_segmentation_server.cpp | 15 +- sample/src/pipeline_service_client.cpp | 84 +-- sample/src/pipeline_with_params.cpp | 12 +- .../include/vino_param_lib/param_manager.h | 11 +- vino_param_lib/include/vino_param_lib/slog.h | 5 +- vino_param_lib/src/param_manager.cpp | 85 ++- 114 files changed, 2977 insertions(+), 2758 deletions(-) diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/args_helper.h b/dynamic_vino_lib/include/dynamic_vino_lib/args_helper.h index cc163f47..b02bc4da 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/args_helper.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/args_helper.h @@ -37,14 +37,12 @@ /** * @brief This function check input args and find images in given folder */ -void readImagesArguments(std::vector& images, - const std::string& arg) +void readImagesArguments(std::vector& images, const std::string& arg) { struct stat sb; if (stat(arg.c_str(), &sb) != 0) { - std::cout << "[ WARNING ] File " << arg << " cannot be opened!" - << std::endl; + std::cout << "[ WARNING ] File " << arg << " cannot be opened!" << std::endl; return; } if (S_ISDIR(sb.st_mode)) @@ -53,8 +51,7 @@ void readImagesArguments(std::vector& images, dp = opendir(arg.c_str()); if (dp == nullptr) { - std::cout << "[ WARNING ] Directory " << arg << " cannot be opened!" - << std::endl; + std::cout << "[ WARNING ] Directory " << arg << " cannot be opened!" << std::endl; return; } @@ -62,9 +59,9 @@ void readImagesArguments(std::vector& images, while (nullptr != (ep = readdir(dp))) { std::string fileName = ep->d_name; - if (fileName == "." || fileName == "..") continue; - std::cout << "[ INFO ] Add file " << ep->d_name << " from directory " - << arg << "." << std::endl; + if (fileName == "." || fileName == "..") + continue; + std::cout << "[ INFO ] Add file " << ep->d_name << " from directory " << arg << "." << std::endl; images.push_back(arg + "/" + ep->d_name); } } diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/common.h b/dynamic_vino_lib/include/dynamic_vino_lib/common.h index f97eb4a2..afe1d785 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/common.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/common.h @@ -57,14 +57,10 @@ * @param s - string to trim * @return trimmed string */ -inline std::string &trim(std::string &s) { - s.erase(s.begin(), - std::find_if(s.begin(), s.end(), - std::not1(std::ptr_fun(std::isspace)))); - s.erase(std::find_if(s.rbegin(), s.rend(), - std::not1(std::ptr_fun(std::isspace))) - .base(), - s.end()); +inline std::string& trim(std::string& s) +{ + s.erase(s.begin(), std::find_if(s.begin(), s.end(), std::not1(std::ptr_fun(std::isspace)))); + s.erase(std::find_if(s.rbegin(), s.rend(), std::not1(std::ptr_fun(std::isspace))).base(), s.end()); return s; } @@ -74,8 +70,8 @@ inline std::string &trim(std::string &s) { * @return TargetDevice value that corresponds to input string. * eDefault in case no corresponding value was found */ -static InferenceEngine::TargetDevice getDeviceFromStr( - const std::string &deviceName) { +static InferenceEngine::TargetDevice getDeviceFromStr(const std::string& deviceName) +{ return InferenceEngine::TargetDeviceInfo::fromStr(deviceName); } @@ -86,14 +82,18 @@ static InferenceEngine::TargetDevice getDeviceFromStr( * @param device - device to infer on * @return Plugin pointer */ -static InferenceEngine::InferenceEnginePluginPtr selectPlugin( - const std::vector &pluginDirs, const std::string &plugin, - InferenceEngine::TargetDevice device) { +static InferenceEngine::InferenceEnginePluginPtr selectPlugin(const std::vector& pluginDirs, + const std::string& plugin, + InferenceEngine::TargetDevice device) +{ InferenceEngine::PluginDispatcher dispatcher(pluginDirs); - if (!plugin.empty()) { + if (!plugin.empty()) + { return dispatcher.getPluginByName(plugin); - } else { + } + else + { return dispatcher.getSuitablePlugin(device); } } @@ -105,9 +105,10 @@ static InferenceEngine::InferenceEnginePluginPtr selectPlugin( * @param device - string representation of device to infer on * @return Plugin pointer */ -static UNUSED InferenceEngine::InferenceEnginePluginPtr selectPlugin( - const std::vector &pluginDirs, const std::string &plugin, - const std::string &device) { +static UNUSED InferenceEngine::InferenceEnginePluginPtr selectPlugin(const std::vector& pluginDirs, + const std::string& plugin, + const std::string& device) +{ return selectPlugin(pluginDirs, plugin, getDeviceFromStr(device)); } @@ -116,9 +117,11 @@ static UNUSED InferenceEngine::InferenceEnginePluginPtr selectPlugin( * @param filepath - full file name * @return filename without extension */ -static UNUSED std::string fileNameNoExt(const std::string &filepath) { +static UNUSED std::string fileNameNoExt(const std::string& filepath) +{ auto pos = filepath.rfind('.'); - if (pos == std::string::npos) return filepath; + if (pos == std::string::npos) + return filepath; return filepath.substr(0, pos); } @@ -127,24 +130,31 @@ static UNUSED std::string fileNameNoExt(const std::string &filepath) { * @param filename - name of the file which extension should be extracted * @return string with extracted file extension */ -inline std::string fileExt(const std::string &filename) { +inline std::string fileExt(const std::string& filename) +{ auto pos = filename.rfind('.'); - if (pos == std::string::npos) return ""; + if (pos == std::string::npos) + return ""; return filename.substr(pos + 1); } -static UNUSED std::ostream &operator<<( - std::ostream &os, const InferenceEngine::Version *version) { +static UNUSED std::ostream& operator<<(std::ostream& os, const InferenceEngine::Version* version) +{ os << "\n\tAPI version ............ "; - if (nullptr == version) { + if (nullptr == version) + { os << "UNKNOWN"; - } else { + } + else + { os << version->apiVersion.major << "." << version->apiVersion.minor; - if (nullptr != version->buildNumber) { + if (nullptr != version->buildNumber) + { os << "\n\t" << "Build .................. " << version->buildNumber; } - if (nullptr != version->description) { + if (nullptr != version->description) + { os << "\n\t" << "Description ....... " << version->description; } @@ -156,71 +166,91 @@ static UNUSED std::ostream &operator<<( * @class PluginVersion * @brief A PluginVersion class stores plugin version and initialization status */ -struct PluginVersion : public InferenceEngine::Version { +struct PluginVersion : public InferenceEngine::Version +{ bool initialized = false; - explicit PluginVersion(const InferenceEngine::Version *ver) { - if (nullptr == ver) { + explicit PluginVersion(const InferenceEngine::Version* ver) + { + if (nullptr == ver) + { return; } InferenceEngine::Version::operator=(*ver); initialized = true; } - operator bool() const noexcept { return initialized; } + operator bool() const noexcept + { + return initialized; + } }; -static UNUSED std::ostream &operator<<(std::ostream &os, - const PluginVersion &version) { +static UNUSED std::ostream& operator<<(std::ostream& os, const PluginVersion& version) +{ os << "\tPlugin version ......... "; - if (!version) { + if (!version) + { os << "UNKNOWN"; - } else { + } + else + { os << version.apiVersion.major << "." << version.apiVersion.minor; } os << "\n\tPlugin name ............ "; - if (!version || version.description == nullptr) { + if (!version || version.description == nullptr) + { os << "UNKNOWN"; - } else { + } + else + { os << version.description; } os << "\n\tPlugin build ........... "; - if (!version || version.buildNumber == nullptr) { + if (!version || version.buildNumber == nullptr) + { os << "UNKNOWN"; - } else { + } + else + { os << version.buildNumber; } return os; } -inline void printPluginVersion(InferenceEngine::InferenceEnginePluginPtr ptr, - std::ostream &stream) { - const PluginVersion *pluginVersion; - ptr->GetVersion((const InferenceEngine::Version *&)pluginVersion); +inline void printPluginVersion(InferenceEngine::InferenceEnginePluginPtr ptr, std::ostream& stream) +{ + const PluginVersion* pluginVersion; + ptr->GetVersion((const InferenceEngine::Version*&)pluginVersion); stream << pluginVersion << std::endl; } -static UNUSED std::vector> blobToImageOutputArray( - InferenceEngine::TBlob::Ptr output, size_t *pWidth, size_t *pHeight, - size_t *pChannels) { +static UNUSED std::vector> blobToImageOutputArray(InferenceEngine::TBlob::Ptr output, + size_t* pWidth, size_t* pHeight, + size_t* pChannels) +{ std::vector> outArray; size_t W = output->dims().at(0); size_t H = output->dims().at(1); size_t C = output->dims().at(2); // Get classes - const float *outData = output->data(); - for (unsigned h = 0; h < H; h++) { + const float* outData = output->data(); + for (unsigned h = 0; h < H; h++) + { std::vector row; - for (unsigned w = 0; w < W; w++) { + for (unsigned w = 0; w < W; w++) + { float max_value = outData[h * W + w]; size_t index = 0; - for (size_t c = 1; c < C; c++) { + for (size_t c = 1; c < C; c++) + { size_t dataIndex = c * H * W + h * W + w; - if (outData[dataIndex] > max_value) { + if (outData[dataIndex] > max_value) + { index = c; max_value = outData[dataIndex]; } @@ -230,9 +260,12 @@ static UNUSED std::vector> blobToImageOutputArray( outArray.push_back(row); } - if (pWidth != nullptr) *pWidth = W; - if (pHeight != nullptr) *pHeight = H; - if (pChannels != nullptr) *pChannels = C; + if (pWidth != nullptr) + *pWidth = W; + if (pHeight != nullptr) + *pHeight = H; + if (pChannels != nullptr) + *pChannels = C; return outArray; } @@ -241,27 +274,38 @@ static UNUSED std::vector> blobToImageOutputArray( * @class Color * @brief A Color class stores channels of a given color */ -class Color { - private: +class Color +{ +private: unsigned char _r; unsigned char _g; unsigned char _b; - public: +public: /** * A default constructor. * @param r - value for red channel * @param g - value for green channel * @param b - value for blue channel */ - Color(unsigned char r, unsigned char g, unsigned char b) - : _r(r), _g(g), _b(b) {} + Color(unsigned char r, unsigned char g, unsigned char b) : _r(r), _g(g), _b(b) + { + } - inline unsigned char red() { return _r; } + inline unsigned char red() + { + return _r; + } - inline unsigned char blue() { return _b; } + inline unsigned char blue() + { + return _b; + } - inline unsigned char green() { return _g; } + inline unsigned char green() + { + return _g; + } }; // TODO : keep only one version of writeOutputBMP @@ -273,19 +317,19 @@ class Color { * @param classesNum - the number of classes * @return false if error else true */ -static UNUSED void writeOutputBmp(std::vector> data, - size_t classesNum, std::ostream &outFile) { +static UNUSED void writeOutputBmp(std::vector> data, size_t classesNum, std::ostream& outFile) +{ unsigned int seed = (unsigned int)time(NULL); // Known colors for training classes from Cityscape dataset - static std::vector colors = { - {128, 64, 128}, {232, 35, 244}, {70, 70, 70}, {156, 102, 102}, - {153, 153, 190}, {153, 153, 153}, {30, 170, 250}, {0, 220, 220}, - {35, 142, 107}, {152, 251, 152}, {180, 130, 70}, {60, 20, 220}, - {0, 0, 255}, {142, 0, 0}, {70, 0, 0}, {100, 60, 0}, - {90, 0, 0}, {230, 0, 0}, {32, 11, 119}, {0, 74, 111}, - {81, 0, 81}}; - - while (classesNum > colors.size()) { + static std::vector colors = { { 128, 64, 128 }, { 232, 35, 244 }, { 70, 70, 70 }, { 156, 102, 102 }, + { 153, 153, 190 }, { 153, 153, 153 }, { 30, 170, 250 }, { 0, 220, 220 }, + { 35, 142, 107 }, { 152, 251, 152 }, { 180, 130, 70 }, { 60, 20, 220 }, + { 0, 0, 255 }, { 142, 0, 0 }, { 70, 0, 0 }, { 100, 60, 0 }, + { 90, 0, 0 }, { 230, 0, 0 }, { 32, 11, 119 }, { 0, 74, 111 }, + { 81, 0, 81 } }; + + while (classesNum > colors.size()) + { static std::mt19937 rng(seed); std::uniform_int_distribution dist(0, 255); Color color(dist(rng), dist(rng), dist(rng)); @@ -293,31 +337,31 @@ static UNUSED void writeOutputBmp(std::vector> data, } unsigned char file[14] = { - 'B', 'M', // magic - 0, 0, 0, 0, // size in bytes - 0, 0, // app data - 0, 0, // app data - 40 + 14, 0, 0, 0 // start of data offset + 'B', 'M', // magic + 0, 0, 0, 0, // size in bytes + 0, 0, // app data + 0, 0, // app data + 40 + 14, 0, 0, 0 // start of data offset }; unsigned char info[40] = { - 40, 0, 0, 0, // info hd size - 0, 0, 0, 0, // width - 0, 0, 0, 0, // height - 1, 0, // number color planes - 24, 0, // bits per pixel - 0, 0, 0, 0, // compression is none - 0, 0, 0, 0, // image bits size - 0x13, 0x0B, 0, 0, // horz resolution in pixel / m - 0x13, 0x0B, 0, 0, // vert resolution (0x03C3 = 96 dpi, 0x0B13 = 72 dpi) - 0, 0, 0, 0, // #colors in palette - 0, 0, 0, 0, // #important colors + 40, 0, 0, 0, // info hd size + 0, 0, 0, 0, // width + 0, 0, 0, 0, // height + 1, 0, // number color planes + 24, 0, // bits per pixel + 0, 0, 0, 0, // compression is none + 0, 0, 0, 0, // image bits size + 0x13, 0x0B, 0, 0, // horz resolution in pixel / m + 0x13, 0x0B, 0, 0, // vert resolution (0x03C3 = 96 dpi, 0x0B13 = 72 dpi) + 0, 0, 0, 0, // #colors in palette + 0, 0, 0, 0, // #important colors }; auto height = data.size(); auto width = data.at(0).size(); - if (height > (size_t)std::numeric_limits::max || - width > (size_t)std::numeric_limits::max) { + if (height > (size_t)std::numeric_limits::max || width > (size_t)std::numeric_limits::max) + { THROW_IE_EXCEPTION << "File size is too big: " << height << " X " << width; } @@ -346,21 +390,23 @@ static UNUSED void writeOutputBmp(std::vector> data, info[22] = (unsigned char)(sizeData >> 16); info[23] = (unsigned char)(sizeData >> 24); - outFile.write(reinterpret_cast(file), sizeof(file)); - outFile.write(reinterpret_cast(info), sizeof(info)); + outFile.write(reinterpret_cast(file), sizeof(file)); + outFile.write(reinterpret_cast(info), sizeof(info)); - unsigned char pad[3] = {0, 0, 0}; + unsigned char pad[3] = { 0, 0, 0 }; - for (size_t y = 0; y < height; y++) { - for (size_t x = 0; x < width; x++) { + for (size_t y = 0; y < height; y++) + { + for (size_t x = 0; x < width; x++) + { unsigned char pixel[3]; size_t index = data.at(y).at(x); pixel[0] = colors.at(index).red(); pixel[1] = colors.at(index).green(); pixel[2] = colors.at(index).blue(); - outFile.write(reinterpret_cast(pixel), 3); + outFile.write(reinterpret_cast(pixel), 3); } - outFile.write(reinterpret_cast(pad), padSize); + outFile.write(reinterpret_cast(pad), padSize); } } @@ -372,37 +418,38 @@ static UNUSED void writeOutputBmp(std::vector> data, * @param width - width of the target image * @return false if error else true */ -static UNUSED bool writeOutputBmp(std::string name, unsigned char *data, - size_t height, size_t width) { +static UNUSED bool writeOutputBmp(std::string name, unsigned char* data, size_t height, size_t width) +{ std::ofstream outFile; outFile.open(name, std::ofstream::binary); - if (!outFile.is_open()) { + if (!outFile.is_open()) + { return false; } unsigned char file[14] = { - 'B', 'M', // magic - 0, 0, 0, 0, // size in bytes - 0, 0, // app data - 0, 0, // app data - 40 + 14, 0, 0, 0 // start of data offset + 'B', 'M', // magic + 0, 0, 0, 0, // size in bytes + 0, 0, // app data + 0, 0, // app data + 40 + 14, 0, 0, 0 // start of data offset }; unsigned char info[40] = { - 40, 0, 0, 0, // info hd size - 0, 0, 0, 0, // width - 0, 0, 0, 0, // height - 1, 0, // number color planes - 24, 0, // bits per pixel - 0, 0, 0, 0, // compression is none - 0, 0, 0, 0, // image bits size - 0x13, 0x0B, 0, 0, // horz resolution in pixel / m - 0x13, 0x0B, 0, 0, // vert resolution (0x03C3 = 96 dpi, 0x0B13 = 72 dpi) - 0, 0, 0, 0, // #colors in palette - 0, 0, 0, 0, // #important colors + 40, 0, 0, 0, // info hd size + 0, 0, 0, 0, // width + 0, 0, 0, 0, // height + 1, 0, // number color planes + 24, 0, // bits per pixel + 0, 0, 0, 0, // compression is none + 0, 0, 0, 0, // image bits size + 0x13, 0x0B, 0, 0, // horz resolution in pixel / m + 0x13, 0x0B, 0, 0, // vert resolution (0x03C3 = 96 dpi, 0x0B13 = 72 dpi) + 0, 0, 0, 0, // #colors in palette + 0, 0, 0, 0, // #important colors }; - if (height > (size_t)std::numeric_limits::max || - width > (size_t)std::numeric_limits::max) { + if (height > (size_t)std::numeric_limits::max || width > (size_t)std::numeric_limits::max) + { THROW_IE_EXCEPTION << "File size is too big: " << height << " X " << width; } @@ -431,21 +478,23 @@ static UNUSED bool writeOutputBmp(std::string name, unsigned char *data, info[22] = (unsigned char)(sizeData >> 16); info[23] = (unsigned char)(sizeData >> 24); - outFile.write(reinterpret_cast(file), sizeof(file)); - outFile.write(reinterpret_cast(info), sizeof(info)); + outFile.write(reinterpret_cast(file), sizeof(file)); + outFile.write(reinterpret_cast(info), sizeof(info)); - unsigned char pad[3] = {0, 0, 0}; + unsigned char pad[3] = { 0, 0, 0 }; - for (size_t y = 0; y < height; y++) { - for (size_t x = 0; x < width; x++) { + for (size_t y = 0; y < height; y++) + { + for (size_t x = 0; x < width; x++) + { unsigned char pixel[3]; pixel[0] = data[y * width * 3 + x * 3]; pixel[1] = data[y * width * 3 + x * 3 + 1]; pixel[2] = data[y * width * 3 + x * 3 + 2]; - outFile.write(reinterpret_cast(pixel), 3); + outFile.write(reinterpret_cast(pixel), 3); } - outFile.write(reinterpret_cast(pad), padSize); + outFile.write(reinterpret_cast(pad), padSize); } return true; } @@ -458,31 +507,31 @@ static UNUSED bool writeOutputBmp(std::string name, unsigned char *data, * \return false if error else true */ -static UNUSED bool writeOutputBmp(unsigned char *data, size_t height, - size_t width, std::ostream &outFile) { +static UNUSED bool writeOutputBmp(unsigned char* data, size_t height, size_t width, std::ostream& outFile) +{ unsigned char file[14] = { - 'B', 'M', // magic - 0, 0, 0, 0, // size in bytes - 0, 0, // app data - 0, 0, // app data - 40 + 14, 0, 0, 0 // start of data offset + 'B', 'M', // magic + 0, 0, 0, 0, // size in bytes + 0, 0, // app data + 0, 0, // app data + 40 + 14, 0, 0, 0 // start of data offset }; unsigned char info[40] = { - 40, 0, 0, 0, // info hd size - 0, 0, 0, 0, // width - 0, 0, 0, 0, // height - 1, 0, // number color planes - 24, 0, // bits per pixel - 0, 0, 0, 0, // compression is none - 0, 0, 0, 0, // image bits size - 0x13, 0x0B, 0, 0, // horz resolution in pixel / m - 0x13, 0x0B, 0, 0, // vert resolution (0x03C3 = 96 dpi, 0x0B13 = 72 dpi) - 0, 0, 0, 0, // #colors in palette - 0, 0, 0, 0, // #important colors + 40, 0, 0, 0, // info hd size + 0, 0, 0, 0, // width + 0, 0, 0, 0, // height + 1, 0, // number color planes + 24, 0, // bits per pixel + 0, 0, 0, 0, // compression is none + 0, 0, 0, 0, // image bits size + 0x13, 0x0B, 0, 0, // horz resolution in pixel / m + 0x13, 0x0B, 0, 0, // vert resolution (0x03C3 = 96 dpi, 0x0B13 = 72 dpi) + 0, 0, 0, 0, // #colors in palette + 0, 0, 0, 0, // #important colors }; - if (height > (size_t)std::numeric_limits::max || - width > (size_t)std::numeric_limits::max) { + if (height > (size_t)std::numeric_limits::max || width > (size_t)std::numeric_limits::max) + { THROW_IE_EXCEPTION << "File size is too big: " << height << " X " << width; } @@ -511,55 +560,60 @@ static UNUSED bool writeOutputBmp(unsigned char *data, size_t height, info[22] = (unsigned char)(sizeData >> 16); info[23] = (unsigned char)(sizeData >> 24); - outFile.write(reinterpret_cast(file), sizeof(file)); - outFile.write(reinterpret_cast(info), sizeof(info)); + outFile.write(reinterpret_cast(file), sizeof(file)); + outFile.write(reinterpret_cast(info), sizeof(info)); - unsigned char pad[3] = {0, 0, 0}; + unsigned char pad[3] = { 0, 0, 0 }; - for (size_t y = 0; y < height; y++) { - for (size_t x = 0; x < width; x++) { + for (size_t y = 0; y < height; y++) + { + for (size_t x = 0; x < width; x++) + { unsigned char pixel[3]; pixel[0] = data[y * width * 3 + x * 3]; pixel[1] = data[y * width * 3 + x * 3 + 1]; pixel[2] = data[y * width * 3 + x * 3 + 2]; - outFile.write(reinterpret_cast(pixel), 3); + outFile.write(reinterpret_cast(pixel), 3); } - outFile.write(reinterpret_cast(pad), padSize); + outFile.write(reinterpret_cast(pad), padSize); } return true; } -inline double getDurationOf(std::function func) { +inline double getDurationOf(std::function func) +{ auto t0 = std::chrono::high_resolution_clock::now(); func(); auto t1 = std::chrono::high_resolution_clock::now(); std::chrono::duration fs = t1 - t0; - return std::chrono::duration_cast< - std::chrono::duration>>(fs) - .count(); + return std::chrono::duration_cast>>(fs).count(); } -static UNUSED void printPerformanceCounts( - const std::map - &performanceMap, - std::ostream &stream, bool bshowHeader = true) { +static UNUSED void +printPerformanceCounts(const std::map& performanceMap, + std::ostream& stream, bool bshowHeader = true) +{ long long totalTime = 0; // Print performance counts - if (bshowHeader) { + if (bshowHeader) + { stream << std::endl << "performance counts:" << std::endl << std::endl; } - for (const auto &it : performanceMap) { + for (const auto& it : performanceMap) + { std::string toPrint(it.first); const int maxLayerName = 30; - if (it.first.length() >= maxLayerName) { + if (it.first.length() >= maxLayerName) + { toPrint = it.first.substr(0, maxLayerName - 4); toPrint += "..."; } stream << std::setw(maxLayerName) << std::left << toPrint; - switch (it.second.status) { + switch (it.second.status) + { case InferenceEngine::InferenceEngineProfileInfo::EXECUTED: stream << std::setw(15) << std::left << "EXECUTED"; break; @@ -570,24 +624,20 @@ static UNUSED void printPerformanceCounts( stream << std::setw(15) << std::left << "OPTIMIZED_OUT"; break; } - stream << std::setw(30) << std::left - << "layerType: " + std::string(it.second.layer_type) + " "; - stream << std::setw(20) << std::left - << "realTime: " + std::to_string(it.second.realTime_uSec); - stream << std::setw(20) << std::left - << " cpu: " + std::to_string(it.second.cpu_uSec); + stream << std::setw(30) << std::left << "layerType: " + std::string(it.second.layer_type) + " "; + stream << std::setw(20) << std::left << "realTime: " + std::to_string(it.second.realTime_uSec); + stream << std::setw(20) << std::left << " cpu: " + std::to_string(it.second.cpu_uSec); stream << " execType: " << it.second.exec_type << std::endl; - if (it.second.realTime_uSec > 0) { + if (it.second.realTime_uSec > 0) + { totalTime += it.second.realTime_uSec; } } - stream << std::setw(20) << std::left - << "Total time: " + std::to_string(totalTime) << " microseconds" - << std::endl; + stream << std::setw(20) << std::left << "Total time: " + std::to_string(totalTime) << " microseconds" << std::endl; } -static UNUSED void printPerformanceCounts(InferenceEngine::InferRequest request, - std::ostream &stream) { +static UNUSED void printPerformanceCounts(InferenceEngine::InferRequest request, std::ostream& stream) +{ auto perfomanceMap = request.GetPerformanceCounts(); printPerformanceCounts(perfomanceMap, stream); } @@ -595,10 +645,9 @@ static UNUSED void printPerformanceCounts(InferenceEngine::InferRequest request, /** * @deprecated */ -static UNUSED void printPerformanceCountsPlugin( - InferenceEngine::InferenceEnginePluginPtr plugin, std::ostream &stream) { - std::map - perfomanceMap; +static UNUSED void printPerformanceCountsPlugin(InferenceEngine::InferenceEnginePluginPtr plugin, std::ostream& stream) +{ + std::map perfomanceMap; plugin->GetPerformanceCounts(perfomanceMap, nullptr); printPerformanceCounts(perfomanceMap, stream); } @@ -607,47 +656,46 @@ static UNUSED void printPerformanceCountsPlugin( * @brief This class represents an object that is found by an object detection * net */ -class DetectedObject { - public: +class DetectedObject +{ +public: int objectType; float xmin, xmax, ymin, ymax, prob; bool difficult; - DetectedObject(int objectType, float xmin, float ymin, float xmax, float ymax, - float prob, bool difficult = false) - : objectType(objectType), - xmin(xmin), - xmax(xmax), - ymin(ymin), - ymax(ymax), - prob(prob), - difficult(difficult) {} + DetectedObject(int objectType, float xmin, float ymin, float xmax, float ymax, float prob, bool difficult = false) + : objectType(objectType), xmin(xmin), xmax(xmax), ymin(ymin), ymax(ymax), prob(prob), difficult(difficult) + { + } - DetectedObject(const DetectedObject &other) = default; + DetectedObject(const DetectedObject& other) = default; - static float ioU(const DetectedObject &detectedObject1_, - const DetectedObject &detectedObject2_) { + static float ioU(const DetectedObject& detectedObject1_, const DetectedObject& detectedObject2_) + { // Add small space to eliminate empty squares float epsilon = 0; // 1e-5f; - DetectedObject detectedObject1( - detectedObject1_.objectType, (detectedObject1_.xmin - epsilon), - (detectedObject1_.ymin - epsilon), (detectedObject1_.xmax - epsilon), - (detectedObject1_.ymax - epsilon), detectedObject1_.prob); - DetectedObject detectedObject2( - detectedObject2_.objectType, (detectedObject2_.xmin + epsilon), - (detectedObject2_.ymin + epsilon), (detectedObject2_.xmax), - (detectedObject2_.ymax), detectedObject2_.prob); + DetectedObject detectedObject1(detectedObject1_.objectType, (detectedObject1_.xmin - epsilon), + (detectedObject1_.ymin - epsilon), (detectedObject1_.xmax - epsilon), + (detectedObject1_.ymax - epsilon), detectedObject1_.prob); + DetectedObject detectedObject2(detectedObject2_.objectType, (detectedObject2_.xmin + epsilon), + (detectedObject2_.ymin + epsilon), (detectedObject2_.xmax), (detectedObject2_.ymax), + detectedObject2_.prob); - if (detectedObject1.objectType != detectedObject2.objectType) { + if (detectedObject1.objectType != detectedObject2.objectType) + { // objects are different, so the result is 0 return 0.0f; } - if (detectedObject1.xmax < detectedObject1.xmin) return 0.0; - if (detectedObject1.ymax < detectedObject1.ymin) return 0.0; - if (detectedObject2.xmax < detectedObject2.xmin) return 0.0; - if (detectedObject2.ymax < detectedObject2.ymin) return 0.0; + if (detectedObject1.xmax < detectedObject1.xmin) + return 0.0; + if (detectedObject1.ymax < detectedObject1.ymin) + return 0.0; + if (detectedObject2.xmax < detectedObject2.xmin) + return 0.0; + if (detectedObject2.ymax < detectedObject2.ymin) + return 0.0; float xmin = (std::max)(detectedObject1.xmin, detectedObject2.xmin); float ymin = (std::max)(detectedObject1.ymin, detectedObject2.ymin); @@ -663,9 +711,12 @@ class DetectedObject { // intersection float intr; - if ((xmax >= xmin) && (ymax >= ymin)) { + if ((xmax >= xmin) && (ymax >= ymin)) + { intr = (addendum + xmax - xmin) * (addendum + ymax - ymin); - } else { + } + else + { intr = 0.0f; } @@ -680,33 +731,42 @@ class DetectedObject { return static_cast(intr) / unn; } - DetectedObject scale(float scale_x, float scale_y) const { - return DetectedObject(objectType, xmin * scale_x, ymin * scale_y, - xmax * scale_x, ymax * scale_y, prob, difficult); + DetectedObject scale(float scale_x, float scale_y) const + { + return DetectedObject(objectType, xmin * scale_x, ymin * scale_y, xmax * scale_x, ymax * scale_y, prob, difficult); } }; -class ImageDescription { - public: +class ImageDescription +{ +public: const std::list alist; const bool check_probs; - explicit ImageDescription(const std::list &alist, - bool check_probs = false) - : alist(alist), check_probs(check_probs) {} + explicit ImageDescription(const std::list& alist, bool check_probs = false) + : alist(alist), check_probs(check_probs) + { + } - ImageDescription scale(float scale_x, float scale_y) const { + ImageDescription scale(float scale_x, float scale_y) const + { std::list slist; - for (auto &dob : alist) { + for (auto& dob : alist) + { slist.push_back(dob.scale(scale_x, scale_y)); } return ImageDescription(slist, check_probs); } }; -struct AveragePrecisionCalculator { - private: - enum MatchKind { TruePositive, FalsePositive }; +struct AveragePrecisionCalculator +{ +private: + enum MatchKind + { + TruePositive, + FalsePositive + }; /** * Here we count all TP and FP matches for all the classes in all the images. @@ -717,18 +777,20 @@ struct AveragePrecisionCalculator { double threshold; - static bool SortBBoxDescend(const DetectedObject &bbox1, - const DetectedObject &bbox2) { + static bool SortBBoxDescend(const DetectedObject& bbox1, const DetectedObject& bbox2) + { return bbox1.prob > bbox2.prob; } - static bool SortPairDescend(const std::pair &p1, - const std::pair &p2) { + static bool SortPairDescend(const std::pair& p1, const std::pair& p2) + { return p1.first > p2.first; } - public: - AveragePrecisionCalculator(double threshold) : threshold(threshold) {} +public: + AveragePrecisionCalculator(double threshold) : threshold(threshold) + { + } // gt_bboxes -> des // bboxes -> det @@ -741,18 +803,18 @@ struct AveragePrecisionCalculator { * @param width - width of the rectangle * @param detectedObjects - vector of detected objects */ -static UNUSED void addRectangles(unsigned char *data, size_t height, - size_t width, - std::vector detectedObjects) { - std::vector colors = { - {128, 64, 128}, {232, 35, 244}, {70, 70, 70}, {156, 102, 102}, - {153, 153, 190}, {153, 153, 153}, {30, 170, 250}, {0, 220, 220}, - {35, 142, 107}, {152, 251, 152}, {180, 130, 70}, {60, 20, 220}, - {0, 0, 255}, {142, 0, 0}, {70, 0, 0}, {100, 60, 0}, - {90, 0, 0}, {230, 0, 0}, {32, 11, 119}, {0, 74, 111}, - {81, 0, 81}}; - - for (size_t i = 0; i < detectedObjects.size(); i++) { +static UNUSED void addRectangles(unsigned char* data, size_t height, size_t width, + std::vector detectedObjects) +{ + std::vector colors = { { 128, 64, 128 }, { 232, 35, 244 }, { 70, 70, 70 }, { 156, 102, 102 }, + { 153, 153, 190 }, { 153, 153, 153 }, { 30, 170, 250 }, { 0, 220, 220 }, + { 35, 142, 107 }, { 152, 251, 152 }, { 180, 130, 70 }, { 60, 20, 220 }, + { 0, 0, 255 }, { 142, 0, 0 }, { 70, 0, 0 }, { 100, 60, 0 }, + { 90, 0, 0 }, { 230, 0, 0 }, { 32, 11, 119 }, { 0, 74, 111 }, + { 81, 0, 81 } }; + + for (size_t i = 0; i < detectedObjects.size(); i++) + { int cls = detectedObjects[i].objectType % colors.size(); int xmin = detectedObjects[i].xmin * width; @@ -762,7 +824,8 @@ static UNUSED void addRectangles(unsigned char *data, size_t height, size_t shift_first = ymin * width * 3; size_t shift_second = ymax * width * 3; - for (int x = xmin; x < xmax; x++) { + for (int x = xmin; x < xmax; x++) + { data[shift_first + x * 3] = colors.at(cls).red(); data[shift_first + x * 3 + 1] = colors.at(cls).green(); data[shift_first + x * 3 + 2] = colors.at(cls).blue(); @@ -773,7 +836,8 @@ static UNUSED void addRectangles(unsigned char *data, size_t height, shift_first = xmin * 3; shift_second = xmax * 3; - for (int y = ymin; y < ymax; y++) { + for (int y = ymin; y < ymax; y++) + { data[shift_first + y * width * 3] = colors.at(cls).red(); data[shift_first + y * width * 3 + 1] = colors.at(cls).green(); data[shift_first + y * width * 3 + 2] = colors.at(cls).blue(); diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/engines/engine.h b/dynamic_vino_lib/include/dynamic_vino_lib/engines/engine.h index 0faff891..a467a02a 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/engines/engine.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/engines/engine.h @@ -36,7 +36,7 @@ namespace Engines class Engine { public: -#if(defined(USE_OLD_E_PLUGIN_API)) +#if (defined(USE_OLD_E_PLUGIN_API)) /** * DEPRECATED! instead of using Engine(InferenceEngine::InferRequest::Ptr &) * @brief Create an NetworkEngine instance @@ -48,7 +48,7 @@ class Engine /** * @brief Using an Inference Request to initialize the inference Engine. */ - Engine(InferenceEngine::InferRequest::Ptr &); + Engine(InferenceEngine::InferRequest::Ptr&); /** * @brief Get the inference request this instance holds. * @return The inference request this instance holds. @@ -68,7 +68,7 @@ class Engine request_->SetCompletionCallback(callbackToSet); } - private: +private: InferenceEngine::InferRequest::Ptr request_ = nullptr; }; } // namespace Engines diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/engines/engine_manager.h b/dynamic_vino_lib/include/dynamic_vino_lib/engines/engine_manager.h index 6d1cb9b7..cf6a8896 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/engines/engine_manager.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/engines/engine_manager.h @@ -38,25 +38,21 @@ class EngineManager * @brief Create InferenceEngine instance by given Engine Name and Network. * @return The shared pointer of created Engine instance. */ - std::shared_ptr createEngine( - const std::string &, const std::shared_ptr &); + std::shared_ptr createEngine(const std::string&, const std::shared_ptr&); private: -#if(defined(USE_OLD_E_PLUGIN_API)) +#if (defined(USE_OLD_E_PLUGIN_API)) std::map plugins_for_devices_; - std::unique_ptr - makePluginByName( - const std::string & device_name, const std::string & custom_cpu_library_message, - const std::string & custom_cldnn_message, bool performance_message); + std::unique_ptr makePluginByName(const std::string& device_name, + const std::string& custom_cpu_library_message, + const std::string& custom_cldnn_message, + bool performance_message); - std::shared_ptr createEngine_beforeV2019R2( - const std::string &, const std::shared_ptr &); + std::shared_ptr createEngine_beforeV2019R2(const std::string&, const std::shared_ptr&); #endif - std::shared_ptr createEngine_V2019R2_plus( - const std::string &, const std::shared_ptr &); - + std::shared_ptr createEngine_V2019R2_plus(const std::string&, const std::shared_ptr&); }; } // namespace Engines diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/age_gender_detection.h b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/age_gender_detection.h index 8b991aa2..dc25b836 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/age_gender_detection.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/age_gender_detection.h @@ -44,7 +44,7 @@ namespace dynamic_vino_lib */ class AgeGenderResult : public Result { - public: +public: explicit AgeGenderResult(const cv::Rect& location); /** * @brief Get the age of the detected person from the result. @@ -75,7 +75,7 @@ class AgeGenderResult : public Result */ class AgeGenderDetection : public BaseInference { - public: +public: using Result = dynamic_vino_lib::AgeGenderResult; AgeGenderDetection(); ~AgeGenderDetection() override; @@ -124,14 +124,11 @@ class AgeGenderDetection : public BaseInference * @brief Show the observed detection result either through image window * or ROS topic. */ - void observeOutput( - const std::shared_ptr& output) override; + void observeOutput(const std::shared_ptr& output) override; - const std::vector getFilteredROIs( - const std::string filter_conditions) const override; + const std::vector getFilteredROIs(const std::string filter_conditions) const override; - - private: +private: std::shared_ptr valid_model_; std::vector results_; }; diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/base_filter.h b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/base_filter.h index 1695b9f7..ded5ebd6 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/base_filter.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/base_filter.h @@ -27,7 +27,6 @@ namespace dynamic_vino_lib { - /** * @class BaseFilter * @brief Base class for result filter. @@ -52,34 +51,34 @@ class BaseFilter * @param[in] Filter conditions. * @return true if some of the conditions are valid, otherwise false. */ - bool isValidFilterConditions(const std::string &); + bool isValidFilterConditions(const std::string&); /** * @brief Accept the filter conditions for filtering. * @param[in] Filter conditions. */ - void acceptFilterConditions(const std::string &); + void acceptFilterConditions(const std::string&); /** * @brief Decide whether the input string is a relational operator or not. * @param[in] A string to be decided. * @return True if the input string is a relational operator, false if not. */ - bool isRelationOperator(const std::string &); + bool isRelationOperator(const std::string&); /** * @brief Decide whether the input string is a logic operator or not. * @param[in] A string to be decided. * @return True if the input string is a logic operator, false if not. */ - bool isLogicOperator(const std::string &); + bool isLogicOperator(const std::string&); /** * @brief Decide whether the an operator has a higher priority than anthor. * @param[in] The two operators. * @return True if the first operator has higher priority, false if not. */ - bool isPriorTo(const std::string &, const std::string &); + bool isPriorTo(const std::string&, const std::string&); /** * @brief Convert the input bool variable to a string type. @@ -93,77 +92,85 @@ class BaseFilter * @param[in] A string type to be converted. * @return A converted bool result. */ - bool strToBool(const std::string &); + bool strToBool(const std::string&); /** * @brief Get the filter conditions in the suffix order. * @return A vector with suffix-order filter conditions. */ - const std::vector & getSuffixConditions() const; + const std::vector& getSuffixConditions() const; /** * @brief Do logic operation with the given bool values and the operator. * @param[in] A bool string, an logic operator, the other bool string. * @return The logic operation result. */ - bool logicOperation(const std::string &, const std::string &, const std::string &); + bool logicOperation(const std::string&, const std::string&, const std::string&); /** * @brief Compare two strings with a given relational operator. * @param[in] A string, an relational operator, the other string. * @return True if valid, false if not. */ - static bool stringCompare(const std::string &, const std::string &, const std::string &); + static bool stringCompare(const std::string&, const std::string&, const std::string&); /** * @brief Compare two floats with a given relational operator. * @param[in] A float number, an relational operator, the other float number. * @return True if valid, false if not. */ - static bool floatCompare(float, const std::string &, float); + static bool floatCompare(float, const std::string&, float); /** * @brief Convert a string into a float number. * @param[in] A string to be converted. * @return The converted float number, 0 if string is invalid. */ - static float stringToFloat(const std::string &); + static float stringToFloat(const std::string&); - /** - * @brief A macro to decide whether a given result satisfies the filter condition. - * @param[in] A key to function mapping, a given result. - * @return True if valid, false if not. - */ - #define ISVALIDRESULT(key_to_function, result) \ - { \ - std::vector suffix_conditons = getSuffixConditions(); \ - std::stack result_stack; \ - for (auto elem : suffix_conditons) { \ - if (!isRelationOperator(elem) && !isLogicOperator(elem)) { \ - result_stack.push(elem); \ - } \ - else { \ - try { \ - std::string str1 = result_stack.top(); \ - result_stack.pop(); \ - std::string str2 = result_stack.top(); \ - result_stack.pop(); \ - if (key_to_function.count(str2)) { \ - result_stack.push(boolToStr(key_to_function[str2](result, elem, str1))); \ - } \ - else { \ - result_stack.push(boolToStr(logicOperation(str1, elem, str2))); \ - } \ - } \ - catch (...) { \ - slog::err << "Invalid filter conditions format!" << slog::endl; \ - } \ - } \ - } \ - if (result_stack.empty()) { \ - return true; \ - } \ - return strToBool(result_stack.top()); \ +/** + * @brief A macro to decide whether a given result satisfies the filter condition. + * @param[in] A key to function mapping, a given result. + * @return True if valid, false if not. + */ +#define ISVALIDRESULT(key_to_function, result) \ + { \ + std::vector suffix_conditons = getSuffixConditions(); \ + std::stack result_stack; \ + for (auto elem : suffix_conditons) \ + { \ + if (!isRelationOperator(elem) && !isLogicOperator(elem)) \ + { \ + result_stack.push(elem); \ + } \ + else \ + { \ + try \ + { \ + std::string str1 = result_stack.top(); \ + result_stack.pop(); \ + std::string str2 = result_stack.top(); \ + result_stack.pop(); \ + if (key_to_function.count(str2)) \ + { \ + result_stack.push(boolToStr(key_to_function[str2](result, elem, str1))); \ + } \ + else \ + { \ + result_stack.push(boolToStr(logicOperation(str1, elem, str2))); \ + } \ + } \ + catch (...) \ + { \ + slog::err << "Invalid filter conditions format!" << slog::endl; \ + } \ + } \ + } \ + if (result_stack.empty()) \ + { \ + return true; \ + } \ + return strToBool(result_stack.top()); \ } private: @@ -172,25 +179,25 @@ class BaseFilter * @param[in] A string form filter conditions. * @return The vector form filter conditions. */ - std::vector split(const std::string & filter_conditions); + std::vector split(const std::string& filter_conditions); /** * @brief Convert the infix expression into suffix expression. * @param[in] The infix form filter conditions. */ - void infixToSuffix(std::vector&infix_conditions); + void infixToSuffix(std::vector& infix_conditions); /** * @brief Strip the extra space in a string. * @param[in] A string to be striped. * @return The striped string. */ - std::string strip(const std::string & str); + std::string strip(const std::string& str); std::string striped_conditions_ = ""; std::vector suffix_conditons_; - std::vector relation_operators_ = {"==", "!=", "<=", ">=", "<", ">"}; - std::vector logic_operators_ = {"&&", "||"}; + std::vector relation_operators_ = { "==", "!=", "<=", ">=", "<", ">" }; + std::vector logic_operators_ = { "&&", "||" }; }; } // namespace dynamic_vino_lib diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/base_inference.h b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/base_inference.h index 2fcbd3bb..0fa21adf 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/base_inference.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/base_inference.h @@ -42,28 +42,31 @@ class BaseOutput; * @param[in] scale_factor Scale factor for loading. * @param[in] batch_index Indicates the batch index for the frame. */ -template -void matU8ToBlob( - const cv::Mat & orig_image, InferenceEngine::Blob::Ptr & blob, - float scale_factor = 1.0, int batch_index = 0) +template +void matU8ToBlob(const cv::Mat& orig_image, InferenceEngine::Blob::Ptr& blob, float scale_factor = 1.0, + int batch_index = 0) { InferenceEngine::SizeVector blob_size = blob->getTensorDesc().getDims(); const size_t width = blob_size[3]; const size_t height = blob_size[2]; const size_t channels = blob_size[1]; - T * blob_data = blob->buffer().as(); + T* blob_data = blob->buffer().as(); cv::Mat resized_image(orig_image); - if (width != (size_t)orig_image.size().width || height != (size_t)orig_image.size().height) { + if (width != (size_t)orig_image.size().width || height != (size_t)orig_image.size().height) + { cv::resize(orig_image, resized_image, cv::Size(width, height)); } int batchOffset = batch_index * width * height * channels; - for (size_t c = 0; c < channels; c++) { - for (size_t h = 0; h < height; h++) { - for (size_t w = 0; w < width; w++) { + for (size_t c = 0; c < channels; c++) + { + for (size_t h = 0; h < height; h++) + { + for (size_t w = 0; w < width; w++) + { blob_data[batchOffset + c * width * height + h * width + w] = - resized_image.at(h, w)[c] * scale_factor; + resized_image.at(h, w)[c] * scale_factor; } } } @@ -79,7 +82,7 @@ class Result { public: friend class BaseInference; - explicit Result(const cv::Rect & location); + explicit Result(const cv::Rect& location); inline const cv::Rect getLocation() const { return location_; @@ -129,7 +132,7 @@ class BaseInference return max_batch_size_; } - inline void setMaxBatchSize(int max) + inline void setMaxBatchSize(int max) { max_batch_size_ = max; } @@ -142,7 +145,7 @@ class BaseInference * to the frame generated by the input device. * @return Whether this operation is successful. */ - virtual bool enqueue(const cv::Mat & frame, const cv::Rect & input_frame_loc) = 0; + virtual bool enqueue(const cv::Mat& frame, const cv::Rect& input_frame_loc) = 0; /** * @brief Start inference for all buffered frames. * @return Whether this operation is successful. @@ -150,7 +153,7 @@ class BaseInference virtual bool submitRequest(); virtual bool SynchronousRequest(); - virtual void observeOutput(const std::shared_ptr & output) = 0; + virtual void observeOutput(const std::shared_ptr& output) = 0; /** * @brief This function will fetch the results of the previous inference and @@ -168,15 +171,14 @@ class BaseInference * to the frame generated by the input device. * @param[in] idx The index of the result. */ - virtual const dynamic_vino_lib::Result * getLocationResult(int idx) const = 0; + virtual const dynamic_vino_lib::Result* getLocationResult(int idx) const = 0; /** * @brief Get the name of the Inference instance. * @return The name of the Inference instance. */ virtual const std::string getName() const = 0; - virtual const std::vector getFilteredROIs( - const std::string filter_conditions) const = 0; + virtual const std::vector getFilteredROIs(const std::string filter_conditions) const = 0; void addCandidatedModel(std::shared_ptr model); @@ -186,14 +188,14 @@ class BaseInference * device. Check OpenVINO document for detailed information. * @return Whether this operation is successful. */ - template - bool enqueue( - const cv::Mat & frame, const cv::Rect &, float scale_factor, int batch_index, - const std::string & input_name) + template + bool enqueue(const cv::Mat& frame, const cv::Rect&, float scale_factor, int batch_index, + const std::string& input_name) { - if (enqueued_frames_ == max_batch_size_) { - slog::warn << "Number of " << getName() << "input more than maximum(" << max_batch_size_ << - ") processed by inference" << slog::endl; + if (enqueued_frames_ == max_batch_size_) + { + slog::warn << "Number of " << getName() << "input more than maximum(" << max_batch_size_ + << ") processed by inference" << slog::endl; return false; } InferenceEngine::Blob::Ptr input_blob = engine_->getRequest()->GetBlob(input_name); diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/base_reidentification.h b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/base_reidentification.h index 17fd90f0..4ebfd732 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/base_reidentification.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/base_reidentification.h @@ -41,7 +41,7 @@ class Tracker * @param[in] feature The new detected track feature. * @return The detected track ID. */ - int processNewTrack(const std::vector & feature); + int processNewTrack(const std::vector& feature); private: /** @@ -50,13 +50,13 @@ class Tracker * @param[in] most similar track's ID to be recorded. * @return similarity with the most similar track. */ - double findMostSimilarTrack(const std::vector & feature, int & most_similar_id); + double findMostSimilarTrack(const std::vector& feature, int& most_similar_id); /** * @brief Update the matched track's feature by the new track. * @param[in] track_id The matched track ID. * @param[in] feature The matched track's feature */ - void updateMatchTrack(int track_id, const std::vector & feature); + void updateMatchTrack(int track_id, const std::vector& feature); /** * @brief Remove the earlest track from the recorded tracks. */ @@ -66,13 +66,12 @@ class Tracker * @param[in] feature A track's feature. * @return new added track's ID. */ - int addNewTrack(const std::vector & feature); + int addNewTrack(const std::vector& feature); /** * @brief Calculate the cosine similarity between two features. * @return The simlarity result. */ - double calcSimilarity( - const std::vector & feature_a, const std::vector & feature_b); + double calcSimilarity(const std::vector& feature_a, const std::vector& feature_b); /** * @brief get the current millisecond count since epoch. * @return millisecond count since epoch. diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/emotions_detection.h b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/emotions_detection.h index dbaf592f..7de27d47 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/emotions_detection.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/emotions_detection.h @@ -43,7 +43,7 @@ namespace dynamic_vino_lib */ class EmotionsResult : public Result { - public: +public: friend class EmotionsDetection; explicit EmotionsResult(const cv::Rect& location); /** @@ -55,7 +55,7 @@ class EmotionsResult : public Result return label_; } - private: +private: std::string label_ = ""; float confidence_ = -1; }; @@ -66,7 +66,7 @@ class EmotionsResult : public Result */ class EmotionsDetection : public BaseInference { - public: +public: using Result = dynamic_vino_lib::EmotionsResult; EmotionsDetection(); ~EmotionsDetection() override; @@ -115,17 +115,15 @@ class EmotionsDetection : public BaseInference * @brief Show the observed detection result either through image window * or ROS topic. */ - void observeOutput( - const std::shared_ptr& output) override; + void observeOutput(const std::shared_ptr& output) override; std::vector getResults() { return results_; } - const std::vector getFilteredROIs( - const std::string filter_conditions) const override; - - private: + const std::vector getFilteredROIs(const std::string filter_conditions) const override; + +private: std::shared_ptr valid_model_; std::vector results_; }; diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/face_detection.h b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/face_detection.h index f1e484ea..b63ee76a 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/face_detection.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/face_detection.h @@ -46,7 +46,7 @@ namespace dynamic_vino_lib class FaceDetectionResult : public ObjectDetectionResult { public: - explicit FaceDetectionResult(const cv::Rect & location); + explicit FaceDetectionResult(const cv::Rect& location); }; /** diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/face_reidentification.h b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/face_reidentification.h index 5b8487e4..fe2b9bfc 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/face_reidentification.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/face_reidentification.h @@ -38,8 +38,11 @@ class FaceReidentificationResult : public Result { public: friend class FaceReidentification; - explicit FaceReidentificationResult(const cv::Rect & location); - std::string getFaceID() const {return face_id_;} + explicit FaceReidentificationResult(const cv::Rect& location); + std::string getFaceID() const + { + return face_id_; + } private: std::string face_id_ = "No.#"; @@ -67,7 +70,7 @@ class FaceReidentification : public BaseInference * to the frame generated by the input device. * @return Whether this operation is successful. */ - bool enqueue(const cv::Mat &, const cv::Rect &) override; + bool enqueue(const cv::Mat&, const cv::Rect&) override; /** * @brief Start inference for all buffered frames. * @return Whether this operation is successful. @@ -90,19 +93,18 @@ class FaceReidentification : public BaseInference * to the frame generated by the input device. * @param[in] idx The index of the result. */ - const dynamic_vino_lib::Result * getLocationResult(int idx) const override; + const dynamic_vino_lib::Result* getLocationResult(int idx) const override; /** * @brief Show the observed reidentification result either through image window or ROS topic. */ - void observeOutput(const std::shared_ptr & output); + void observeOutput(const std::shared_ptr& output); /** * @brief Get the name of the Inference instance. * @return The name of the Inference instance. */ const std::string getName() const override; - const std::vector getFilteredROIs( - const std::string filter_conditions) const override; + const std::vector getFilteredROIs(const std::string filter_conditions) const override; private: std::shared_ptr valid_model_; diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/head_pose_detection.h b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/head_pose_detection.h index ef16d415..f5ef466a 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/head_pose_detection.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/head_pose_detection.h @@ -39,7 +39,7 @@ namespace dynamic_vino_lib */ class HeadPoseResult : public Result { - public: +public: friend class HeadPoseDetection; explicit HeadPoseResult(const cv::Rect& location); /** @@ -67,7 +67,7 @@ class HeadPoseResult : public Result return angle_r_; } - private: +private: float angle_y_ = -1; float angle_p_ = -1; float angle_r_ = -1; @@ -79,7 +79,7 @@ class HeadPoseResult : public Result */ class HeadPoseDetection : public BaseInference { - public: +public: using Result = dynamic_vino_lib::HeadPoseResult; HeadPoseDetection(); ~HeadPoseDetection() override; @@ -128,17 +128,15 @@ class HeadPoseDetection : public BaseInference * @brief Show the observed detection result either through image window or ROS topic. */ - void observeOutput( - const std::shared_ptr& output) override; + void observeOutput(const std::shared_ptr& output) override; std::vector getResults() { return results_; } - const std::vector getFilteredROIs( - const std::string filter_conditions) const override; + const std::vector getFilteredROIs(const std::string filter_conditions) const override; - private: +private: std::shared_ptr valid_model_; std::vector results_; }; diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/inference_manager.h b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/inference_manager.h index 8151f052..4fd33f8a 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/inference_manager.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/inference_manager.h @@ -42,18 +42,15 @@ class InferenceManager * The instance will be created when first call. * @return The reference of InferenceManager instance. */ - static InferenceManager & getInstance() + static InferenceManager& getInstance() { static InferenceManager manager_; return manager_; } - std::shared_ptr createPipeline( - const Params::ParamManager::PipelineRawData & params); - void removePipeline(const std::string & name); - InferenceManager & updatePipeline( - const std::string & name, - const Params::ParamManager::PipelineRawData & params); + std::shared_ptr createPipeline(const Params::ParamManager::PipelineRawData& params); + void removePipeline(const std::string& name); + InferenceManager& updatePipeline(const std::string& name, const Params::ParamManager::PipelineRawData& params); void runAll(); void stopAll(); @@ -76,26 +73,28 @@ class InferenceManager }; private: - InferenceManager() {} - InferenceManager(InferenceManager const &); - void operator=(InferenceManager const &); - void threadPipeline(const char * name); + InferenceManager() + { + } + InferenceManager(InferenceManager const&); + void operator=(InferenceManager const&); + void threadPipeline(const char* name); std::map> - parseInputDevice(const Params::ParamManager::PipelineRawData & params); - std::map> parseOutput( - const Params::ParamManager::PipelineRawData & params); + parseInputDevice(const Params::ParamManager::PipelineRawData& params); + std::map> + parseOutput(const Params::ParamManager::PipelineRawData& params); std::map> - parseInference(const Params::ParamManager::PipelineRawData & params); - std::shared_ptr createFaceDetection( - const Params::ParamManager::InferenceParams & infer); - std::shared_ptr createAgeGenderRecognition( - const Params::ParamManager::InferenceParams & infer); - std::shared_ptr createEmotionRecognition( - const Params::ParamManager::InferenceParams & infer); - std::shared_ptr createHeadPoseEstimation( - const Params::ParamManager::InferenceParams & infer); - std::shared_ptr createObjectDetection( - const Params::ParamManager::InferenceParams & infer); + parseInference(const Params::ParamManager::PipelineRawData& params); + std::shared_ptr + createFaceDetection(const Params::ParamManager::InferenceParams& infer); + std::shared_ptr + createAgeGenderRecognition(const Params::ParamManager::InferenceParams& infer); + std::shared_ptr + createEmotionRecognition(const Params::ParamManager::InferenceParams& infer); + std::shared_ptr + createHeadPoseEstimation(const Params::ParamManager::InferenceParams& infer); + std::shared_ptr + createObjectDetection(const Params::ParamManager::InferenceParams& infer); std::map pipelines_; // std::map plugins_for_devices_; diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/landmarks_detection.h b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/landmarks_detection.h index 9e697ece..a69e9e0f 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/landmarks_detection.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/landmarks_detection.h @@ -37,7 +37,7 @@ class LandmarksDetectionResult : public Result { public: friend class LandmarksDetection; - explicit LandmarksDetectionResult(const cv::Rect & location); + explicit LandmarksDetectionResult(const cv::Rect& location); std::vector getLandmarks() const { return landmark_points_; @@ -68,7 +68,7 @@ class LandmarksDetection : public BaseInference * to the frame generated by the input device. * @return Whether this operation is successful. */ - bool enqueue(const cv::Mat &, const cv::Rect &) override; + bool enqueue(const cv::Mat&, const cv::Rect&) override; /** * @brief Start inference for all buffered frames. * @return Whether this operation is successful. @@ -91,19 +91,18 @@ class LandmarksDetection : public BaseInference * to the frame generated by the input device. * @param[in] idx The index of the result. */ - const dynamic_vino_lib::Result * getLocationResult(int idx) const override; + const dynamic_vino_lib::Result* getLocationResult(int idx) const override; /** * @brief Show the observed detection result either through image window or ROS topic. */ - void observeOutput(const std::shared_ptr & output); + void observeOutput(const std::shared_ptr& output); /** * @brief Get the name of the Inference instance. * @return The name of the Inference instance. */ const std::string getName() const override; - const std::vector getFilteredROIs( - const std::string filter_conditions) const override; + const std::vector getFilteredROIs(const std::string filter_conditions) const override; private: std::shared_ptr valid_model_; diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/license_plate_detection.h b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/license_plate_detection.h index 9de41af6..0d5b80ec 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/license_plate_detection.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/license_plate_detection.h @@ -37,7 +37,7 @@ class LicensePlateDetectionResult : public Result { public: friend class LicensePlateDetection; - explicit LicensePlateDetectionResult(const cv::Rect & location); + explicit LicensePlateDetectionResult(const cv::Rect& location); std::string getLicense() const { return license_; @@ -68,7 +68,7 @@ class LicensePlateDetection : public BaseInference * to the frame generated by the input device. * @return Whether this operation is successful. */ - bool enqueue(const cv::Mat &, const cv::Rect &) override; + bool enqueue(const cv::Mat&, const cv::Rect&) override; /** * @brief Set the sequence input blob */ @@ -95,38 +95,92 @@ class LicensePlateDetection : public BaseInference * to the frame generated by the input device. * @param[in] idx The index of the result. */ - const dynamic_vino_lib::Result * getLocationResult(int idx) const override; + const dynamic_vino_lib::Result* getLocationResult(int idx) const override; /** * @brief Show the observed detection result either through image window or ROS topic. */ - void observeOutput(const std::shared_ptr & output); + void observeOutput(const std::shared_ptr& output); /** * @brief Get the name of the Inference instance. * @return The name of the Inference instance. */ const std::string getName() const override; - const std::vector getFilteredROIs( - const std::string filter_conditions) const override; + const std::vector getFilteredROIs(const std::string filter_conditions) const override; private: std::shared_ptr valid_model_; std::vector results_; - const std::vector licenses_ = { - "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", - "", "", "", "", - "", "", "", "", - "", "", "", "", - "", "", "", "", - "", "", "", "", - "", "", "", "", - "", "", "", "", - "", "", "", "", - "", "", - "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", - "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", - "U", "V", "W", "X", "Y", "Z" - }; + const std::vector licenses_ = { "0", + "1", + "2", + "3", + "4", + "5", + "6", + "7", + "8", + "9", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "A", + "B", + "C", + "D", + "E", + "F", + "G", + "H", + "I", + "J", + "K", + "L", + "M", + "N", + "O", + "P", + "Q", + "R", + "S", + "T", + "U", + "V", + "W", + "X", + "Y", + "Z" }; }; } // namespace dynamic_vino_lib #endif // DYNAMIC_VINO_LIB__INFERENCES__LICENSE_PLATE_DETECTION_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/object_detection.h b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/object_detection.h index 75c5b85f..8890c69b 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/object_detection.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/object_detection.h @@ -42,13 +42,13 @@ class ObjectDetectionResult : public Result { public: friend class ObjectDetection; - explicit ObjectDetectionResult(const cv::Rect & location); + explicit ObjectDetectionResult(const cv::Rect& location); std::string getLabel() const { return label_; } - void setLabel(const std::string & label) + void setLabel(const std::string& label) { label_ = label; } @@ -61,12 +61,12 @@ class ObjectDetectionResult : public Result return confidence_; } - void setConfidence(const float & con) + void setConfidence(const float& con) { confidence_ = con; } - bool operator<(const ObjectDetectionResult & s2) const + bool operator<(const ObjectDetectionResult& s2) const { return this->confidence_ > s2.confidence_; } @@ -95,7 +95,7 @@ class ObjectDetectionResultFilter : public BaseFilter * @brief Set the object detection results into filter. * @param[in] The object detection results. */ - void acceptResults(const std::vector & results); + void acceptResults(const std::vector& results); /** * @brief Get the filtered results' ROIs. * @return The filtered ROIs. @@ -108,27 +108,22 @@ class ObjectDetectionResultFilter : public BaseFilter * @param[in] Result to be decided, filter operator, target label value. * @return True if valid, false if not. */ - static bool isValidLabel( - const Result & result, - const std::string & op, const std::string & target); + static bool isValidLabel(const Result& result, const std::string& op, const std::string& target); /** * @brief Decide whether a result is valid for confidence filter condition. * @param[in] Result to be decided, filter operator, target confidence value. * @return True if valid, false if not. */ - static bool isValidConfidence( - const Result & result, - const std::string & op, const std::string & target); + static bool isValidConfidence(const Result& result, const std::string& op, const std::string& target); /** * @brief Decide whether a result is valid. * @param[in] Result to be decided. * @return True if valid, false if not. */ - bool isValidResult(const Result & result); + bool isValidResult(const Result& result); - std::map key_to_function_; + std::map key_to_function_; std::vector results_; }; @@ -155,7 +150,7 @@ class ObjectDetection : public BaseInference * to the frame generated by the input device. * @return Whether this operation is successful. */ - bool enqueue(const cv::Mat &, const cv::Rect &) override; + bool enqueue(const cv::Mat&, const cv::Rect&) override; /** * @brief This function will fetch the results of the previous inference and @@ -174,25 +169,24 @@ class ObjectDetection : public BaseInference * to the frame generated by the input device. * @param[in] idx The index of the result. */ - const Result * getLocationResult(int idx) const override; + const Result* getLocationResult(int idx) const override; /** * @brief Show the observed detection result either through image window or ROS topic. */ - void observeOutput(const std::shared_ptr & output); + void observeOutput(const std::shared_ptr& output); /** * @brief Get the name of the Inference instance. * @return The name of the Inference instance. */ const std::string getName() const override; - const std::vector getFilteredROIs( - const std::string filter_conditions) const override; + const std::vector getFilteredROIs(const std::string filter_conditions) const override; /** * @brief Calculate the IoU ratio for the given rectangles. * @return IoU Ratio of the given rectangles. */ - static double calcIoU(const cv::Rect & box_1, const cv::Rect & box_2); + static double calcIoU(const cv::Rect& box_1, const cv::Rect& box_2); private: std::shared_ptr valid_model_; diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/object_segmentation.h b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/object_segmentation.h index 41b548df..eac05d0c 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/object_segmentation.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/object_segmentation.h @@ -40,7 +40,7 @@ class ObjectSegmentationResult : public Result { public: friend class ObjectSegmentation; - explicit ObjectSegmentationResult(const cv::Rect & location); + explicit ObjectSegmentationResult(const cv::Rect& location); std::string getLabel() const { return label_; @@ -85,10 +85,10 @@ class ObjectSegmentation : public BaseInference * to the frame generated by the input device. * @return Whether this operation is successful. */ - bool enqueue(const cv::Mat &, const cv::Rect &) override; + bool enqueue(const cv::Mat&, const cv::Rect&) override; - //Deprecated!! - bool enqueue_for_one_input(const cv::Mat &, const cv::Rect &); + // Deprecated!! + bool enqueue_for_one_input(const cv::Mat&, const cv::Rect&); /** * @brief Start inference for all buffered frames. @@ -112,19 +112,18 @@ class ObjectSegmentation : public BaseInference * to the frame generated by the input device. * @param[in] idx The index of the result. */ - const dynamic_vino_lib::Result * getLocationResult(int idx) const override; + const dynamic_vino_lib::Result* getLocationResult(int idx) const override; /** * @brief Show the observed detection result either through image window or ROS topic. */ - void observeOutput(const std::shared_ptr & output); + void observeOutput(const std::shared_ptr& output); /** * @brief Get the name of the Inference instance. * @return The name of the Inference instance. */ const std::string getName() const override; - const std::vector getFilteredROIs( - const std::string filter_conditions) const override; + const std::vector getFilteredROIs(const std::string filter_conditions) const override; private: std::shared_ptr valid_model_; @@ -133,13 +132,12 @@ class ObjectSegmentation : public BaseInference int height_ = 0; double show_output_thresh_ = 0; - std::vector colors_ = { - {128, 64, 128}, {232, 35, 244}, {70, 70, 70}, {156, 102, 102}, {153, 153, 190}, - {153, 153, 153}, {30, 170, 250}, {0, 220, 220}, {35, 142, 107}, {152, 251, 152}, - {180, 130, 70}, {60, 20, 220}, {0, 0, 255}, {142, 0, 0}, {70, 0, 0}, - {100, 60, 0}, {90, 0, 0}, {230, 0, 0}, {32, 11, 119}, {0, 74, 111}, - {81, 0, 81} - }; + std::vector colors_ = { { 128, 64, 128 }, { 232, 35, 244 }, { 70, 70, 70 }, { 156, 102, 102 }, + { 153, 153, 190 }, { 153, 153, 153 }, { 30, 170, 250 }, { 0, 220, 220 }, + { 35, 142, 107 }, { 152, 251, 152 }, { 180, 130, 70 }, { 60, 20, 220 }, + { 0, 0, 255 }, { 142, 0, 0 }, { 70, 0, 0 }, { 100, 60, 0 }, + { 90, 0, 0 }, { 230, 0, 0 }, { 32, 11, 119 }, { 0, 74, 111 }, + { 81, 0, 81 } }; }; } // namespace dynamic_vino_lib #endif // DYNAMIC_VINO_LIB__INFERENCES__OBJECT_SEGMENTATION_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/person_attribs_detection.h b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/person_attribs_detection.h index b4d8c2be..ae170a57 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/person_attribs_detection.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/person_attribs_detection.h @@ -37,7 +37,7 @@ class PersonAttribsDetectionResult : public Result { public: friend class PersonAttribsDetection; - explicit PersonAttribsDetectionResult(const cv::Rect & location); + explicit PersonAttribsDetectionResult(const cv::Rect& location); std::string getAttributes() const { return attributes_; @@ -84,7 +84,7 @@ class PersonAttribsDetection : public BaseInference * to the frame generated by the input device. * @return Whether this operation is successful. */ - bool enqueue(const cv::Mat &, const cv::Rect &) override; + bool enqueue(const cv::Mat&, const cv::Rect&) override; /** * @brief Start inference for all buffered frames. * @return Whether this operation is successful. @@ -107,26 +107,26 @@ class PersonAttribsDetection : public BaseInference * to the frame generated by the input device. * @param[in] idx The index of the result. */ - const dynamic_vino_lib::Result * getLocationResult(int idx) const override; + const dynamic_vino_lib::Result* getLocationResult(int idx) const override; /** * @brief Show the observed detection result either through image window or ROS topic. */ - void observeOutput(const std::shared_ptr & output); + void observeOutput(const std::shared_ptr& output); /** * @brief Get the name of the Inference instance. * @return The name of the Inference instance. */ const std::string getName() const override; - const std::vector getFilteredROIs( - const std::string filter_conditions) const override; + const std::vector getFilteredROIs(const std::string filter_conditions) const override; private: std::shared_ptr valid_model_; std::vector results_; double attribs_confidence_; - const std::vector net_attributes_ = {"is male", "has_bag", "has_backpack" , "has hat", - "has longsleeves", "has longpants", "has longhair", "has coat_jacket"}; + const std::vector net_attributes_ = { "is male", "has_bag", "has_backpack", + "has hat", "has longsleeves", "has longpants", + "has longhair", "has coat_jacket" }; }; } // namespace dynamic_vino_lib #endif // DYNAMIC_VINO_LIB__INFERENCES__PERSON_ATTRIBS_DETECTION_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/person_reidentification.h b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/person_reidentification.h index 8a728ec1..051bd4f0 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/person_reidentification.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/person_reidentification.h @@ -37,8 +37,11 @@ class PersonReidentificationResult : public Result { public: friend class PersonReidentification; - explicit PersonReidentificationResult(const cv::Rect & location); - std::string getPersonID() const {return person_id_;} + explicit PersonReidentificationResult(const cv::Rect& location); + std::string getPersonID() const + { + return person_id_; + } private: std::string person_id_ = "No.#"; @@ -65,7 +68,7 @@ class PersonReidentification : public BaseInference * to the frame generated by the input device. * @return Whether this operation is successful. */ - bool enqueue(const cv::Mat &, const cv::Rect &) override; + bool enqueue(const cv::Mat&, const cv::Rect&) override; /** * @brief Start inference for all buffered frames. * @return Whether this operation is successful. @@ -88,19 +91,18 @@ class PersonReidentification : public BaseInference * to the frame generated by the input device. * @param[in] idx The index of the result. */ - const dynamic_vino_lib::Result * getLocationResult(int idx) const override; + const dynamic_vino_lib::Result* getLocationResult(int idx) const override; /** * @brief Show the observed detection result either through image window or ROS topic. */ - void observeOutput(const std::shared_ptr & output); + void observeOutput(const std::shared_ptr& output); /** * @brief Get the name of the Inference instance. * @return The name of the Inference instance. */ const std::string getName() const override; - const std::vector getFilteredROIs( - const std::string filter_conditions) const override; + const std::vector getFilteredROIs(const std::string filter_conditions) const override; private: std::shared_ptr valid_model_; diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/vehicle_attribs_detection.h b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/vehicle_attribs_detection.h index ca2cdc4b..e32812b4 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/vehicle_attribs_detection.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/vehicle_attribs_detection.h @@ -37,7 +37,7 @@ class VehicleAttribsDetectionResult : public Result { public: friend class VehicleAttribsDetection; - explicit VehicleAttribsDetectionResult(const cv::Rect & location); + explicit VehicleAttribsDetectionResult(const cv::Rect& location); std::string getColor() const { return color_; @@ -73,7 +73,7 @@ class VehicleAttribsDetection : public BaseInference * to the frame generated by the input device. * @return Whether this operation is successful. */ - bool enqueue(const cv::Mat &, const cv::Rect &) override; + bool enqueue(const cv::Mat&, const cv::Rect&) override; /** * @brief Start inference for all buffered frames. * @return Whether this operation is successful. @@ -96,27 +96,24 @@ class VehicleAttribsDetection : public BaseInference * to the frame generated by the input device. * @param[in] idx The index of the result. */ - const dynamic_vino_lib::Result * getLocationResult(int idx) const override; + const dynamic_vino_lib::Result* getLocationResult(int idx) const override; /** * @brief Show the observed detection result either through image window or ROS topic. */ - void observeOutput(const std::shared_ptr & output); + void observeOutput(const std::shared_ptr& output); /** * @brief Get the name of the Inference instance. * @return The name of the Inference instance. */ const std::string getName() const override; - const std::vector getFilteredROIs( - const std::string filter_conditions) const override; + const std::vector getFilteredROIs(const std::string filter_conditions) const override; private: std::shared_ptr valid_model_; std::vector results_; - const std::vector types_ = { - "car", "van", "truck", "bus"}; - const std::vector colors_ = { - "white", "gray", "yellow", "red", "green", "blue", "black"}; + const std::vector types_ = { "car", "van", "truck", "bus" }; + const std::vector colors_ = { "white", "gray", "yellow", "red", "green", "blue", "black" }; }; } // namespace dynamic_vino_lib #endif // DYNAMIC_VINO_LIB__INFERENCES__VEHICLE_ATTRIBS_DETECTION_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/base_input.h b/dynamic_vino_lib/include/dynamic_vino_lib/inputs/base_input.h index 527d3ad2..0ef23427 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/base_input.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inputs/base_input.h @@ -38,7 +38,7 @@ struct Config class BaseInputDevice : public Ros2Handler { - public: +public: /** * @brief Initialize the input device, * for cameras, it will turn the camera on and get ready to read frames, @@ -55,12 +55,14 @@ class BaseInputDevice : public Ros2Handler * @brief Read next frame, and give the value to argument frame. * @return Whether the next frame is successfully read. */ - virtual bool read(cv::Mat * frame) = 0; - virtual bool readService(cv::Mat * frame, std::string config_path) + virtual bool read(cv::Mat* frame) = 0; + virtual bool readService(cv::Mat* frame, std::string config_path) { return true; } - virtual void config(const Config &) {} + virtual void config(const Config&) + { + } virtual ~BaseInputDevice() = default; /** * @brief Get the width of the frame read from input device. @@ -111,9 +113,9 @@ class BaseInputDevice : public Ros2Handler is_init_ = is_init; } - private: - size_t width_ = 0; // 0 means using the original size - size_t height_ = 0; // 0 means using the original size +private: + size_t width_ = 0; // 0 means using the original size + size_t height_ = 0; // 0 means using the original size bool is_init_ = false; }; } // namespace Input diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/image_input.h b/dynamic_vino_lib/include/dynamic_vino_lib/inputs/image_input.h index 03558a9e..4711ddc9 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/image_input.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inputs/image_input.h @@ -33,7 +33,7 @@ namespace Input */ class Image : public BaseInputDevice { - public: +public: explicit Image(const std::string&); /** * @brief Read an image file from the file path. @@ -56,9 +56,9 @@ class Image : public BaseInputDevice */ bool read(cv::Mat* frame) override; - void config(const Config &) override; + void config(const Config&) override; - private: +private: cv::Mat image_; std::string file_; }; diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/image_topic.h b/dynamic_vino_lib/include/dynamic_vino_lib/inputs/image_topic.h index 77cb8c2a..28d4e405 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/image_topic.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inputs/image_topic.h @@ -39,7 +39,7 @@ class ImageTopic : public BaseInputDevice ImageTopic(rclcpp::Node::SharedPtr node = nullptr); bool initialize() override; bool initialize(size_t width, size_t height) override; - bool read(cv::Mat * frame) override; + bool read(cv::Mat* frame) override; private: ros::NodeHandle nh_; diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/realsense_camera.h b/dynamic_vino_lib/include/dynamic_vino_lib/inputs/realsense_camera.h index b2391543..423dcb96 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/realsense_camera.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inputs/realsense_camera.h @@ -34,7 +34,7 @@ namespace Input */ class RealSenseCamera : public BaseInputDevice { - public: +public: /** * @brief Initialize the input device, turn the * camera on and get ready to read frames. @@ -52,7 +52,7 @@ class RealSenseCamera : public BaseInputDevice */ bool read(cv::Mat* frame) override; - private: +private: void bypassFewFramesOnceInited(); std::string getCameraSN(); diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/ros_handler.h b/dynamic_vino_lib/include/dynamic_vino_lib/inputs/ros_handler.h index 4003b699..e2314619 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/ros_handler.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inputs/ros_handler.h @@ -26,10 +26,9 @@ namespace Input { - class Ros2Handler { - public: +public: void setHandler(const std::shared_ptr& node) { node_ = node; @@ -39,7 +38,7 @@ class Ros2Handler return node_; } - private: +private: std::shared_ptr node_; }; diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/standard_camera.h b/dynamic_vino_lib/include/dynamic_vino_lib/inputs/standard_camera.h index 78fc408f..265493be 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/standard_camera.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inputs/standard_camera.h @@ -39,7 +39,7 @@ namespace Input */ class StandardCamera : public BaseInputDevice { - public: +public: /** * @brief Initialize the input device, * for cameras, it will turn the camera on and get ready to read frames, @@ -58,7 +58,7 @@ class StandardCamera : public BaseInputDevice */ bool read(cv::Mat* frame) override; - private: +private: int getCameraId(); cv::VideoCapture cap; int camera_id_ = -1; diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/video_input.h b/dynamic_vino_lib/include/dynamic_vino_lib/inputs/video_input.h index e70337a3..8bac0265 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/video_input.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inputs/video_input.h @@ -33,7 +33,7 @@ namespace Input */ class Video : public BaseInputDevice { - public: +public: explicit Video(const std::string&); /** * @brief Read a video file from the file path. @@ -53,7 +53,7 @@ class Video : public BaseInputDevice */ bool read(cv::Mat* frame) override; - private: +private: cv::VideoCapture cap; std::string video_; }; diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/models/age_gender_detection_model.h b/dynamic_vino_lib/include/dynamic_vino_lib/models/age_gender_detection_model.h index fcaeae74..1bd47be8 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/models/age_gender_detection_model.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/models/age_gender_detection_model.h @@ -34,7 +34,7 @@ namespace Models class AgeGenderDetectionModel : public BaseModel { public: - AgeGenderDetectionModel(const std::string & model_loc, int batch_size = 1); + AgeGenderDetectionModel(const std::string& model_loc, int batch_size = 1); /** * @brief Get the input name. * @return Input name. @@ -64,7 +64,6 @@ class AgeGenderDetectionModel : public BaseModel protected: bool updateLayerProperty(InferenceEngine::CNNNetReader::Ptr) override; - }; } // namespace Models diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/models/attributes/base_attribute.h b/dynamic_vino_lib/include/dynamic_vino_lib/models/attributes/base_attribute.h index 453470d7..61326acb 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/models/attributes/base_attribute.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/models/attributes/base_attribute.h @@ -38,7 +38,8 @@ class ModelAttribute { public: using Ptr = std::shared_ptr; - struct ModelAttr { + struct ModelAttr + { int max_proposal_count = 0; int object_size = 0; int input_height = 0; @@ -48,7 +49,7 @@ class ModelAttribute std::map output_names; std::vector labels; }; - + ModelAttribute(const std::string model_name) { attr_.model_name = model_name; @@ -56,8 +57,8 @@ class ModelAttribute inline bool isVerified() { - return (attr_.max_proposal_count > 0 && attr_.object_size > 0 && attr_.input_height > 0 - && attr_.input_width > 0 && attr_.input_names.empty() && attr_.output_names.empty()); + return (attr_.max_proposal_count > 0 && attr_.object_size > 0 && attr_.input_height > 0 && attr_.input_width > 0 && + attr_.input_names.empty() && attr_.output_names.empty()); } inline void printAttribute() { @@ -68,26 +69,30 @@ class ModelAttribute slog::info << "| input_height: " << attr_.input_height << slog::endl; slog::info << "| input_width: " << attr_.input_width << slog::endl; slog::info << "| input_names: " << slog::endl; - for (auto & item: attr_.input_names) { + for (auto& item : attr_.input_names) + { slog::info << "| " << item.first << "-->" << item.second << slog::endl; } slog::info << "| output_names: " << slog::endl; - for (auto & item: attr_.output_names) { + for (auto& item : attr_.output_names) + { slog::info << "| " << item.first << "-->" << item.second << slog::endl; } - if(attr_.max_proposal_count <= 0 || attr_.object_size <= 0 || attr_.input_height <= 0 - || attr_.input_width <= 0 || attr_.input_names.empty() || attr_.output_names.empty()){ + if (attr_.max_proposal_count <= 0 || attr_.object_size <= 0 || attr_.input_height <= 0 || attr_.input_width <= 0 || + attr_.input_names.empty() || attr_.output_names.empty()) + { slog::info << "--------" << slog::endl; slog::warn << "Not all attributes are set correctly! not 0 or empty is allowed in" - << " the above list." << slog::endl; + << " the above list." << slog::endl; } slog::info << "--------------------------------" << slog::endl; } - virtual bool updateLayerProperty( - const InferenceEngine::CNNNetReader::Ptr &) - { return false; } + virtual bool updateLayerProperty(const InferenceEngine::CNNNetReader::Ptr&) + { + return false; + } inline std::string getModelName() const { @@ -103,23 +108,25 @@ class ModelAttribute { // std::map::iterator it; auto it = attr_.input_names.find(name); - if(it == attr_.input_names.end()){ + if (it == attr_.input_names.end()) + { slog::warn << "No input named: " << name << slog::endl; return std::string(""); } - + return it->second; } inline std::string getOutputName(std::string name = "output") const { - //std::map::iterator it; + // std::map::iterator it; auto it = attr_.output_names.find(name); - if(it == attr_.output_names.end()){ + if (it == attr_.output_names.end()) + { slog::warn << "No output named: " << name << slog::endl; return std::string(""); } - + return it->second; } @@ -136,15 +143,14 @@ class ModelAttribute inline void loadLabelsFromFile(const std::string file_path) { std::ifstream input_file(file_path); - std::copy(std::istream_iterator(input_file), - std::istream_iterator(), - std::back_inserter(attr_.labels)); - } + std::copy(std::istream_iterator(input_file), std::istream_iterator(), + std::back_inserter(attr_.labels)); + } - inline std::vector& getLabels() - { - return attr_.labels; - } + inline std::vector& getLabels() + { + return attr_.labels; + } inline void addInputInfo(std::string key, std::string value) { @@ -178,10 +184,9 @@ class ModelAttribute protected: ModelAttr attr_; - }; -#if 0 //not used +#if 0 // not used class SSDModelAttr : public ModelAttribute { public: @@ -193,7 +198,6 @@ class SSDModelAttr : public ModelAttribute }; #endif - } // namespace Models #endif // DYNAMIC_VINO_LIB__MODELS__ATTRIBUTES_BASE_ATTRIBUTE_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/models/base_model.h b/dynamic_vino_lib/include/dynamic_vino_lib/models/base_model.h index 826e5d1b..b49859fd 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/models/base_model.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/models/base_model.h @@ -35,12 +35,12 @@ namespace Engines { - class Engine; +class Engine; } namespace dynamic_vino_lib { - class ObjectDetectionResult; +class ObjectDetectionResult; } namespace Models @@ -49,96 +49,97 @@ namespace Models * @class BaseModel * @brief This class represents the network given by .xml and .bin file */ - class BaseModel : public ModelAttribute +class BaseModel : public ModelAttribute +{ +public: + using Ptr = std::shared_ptr; + /** + * @brief Initialize the class with given .xml, .bin and .labels file. It will + * also check whether the number of input and output are fit. + * @param[in] model_loc The location of model' s .xml file + * (model' s bin file should be the same as .xml file except for extension) + * @param[in] input_num The number of input the network should have. + * @param[in] output_num The number of output the network should have. + * @param[in] batch_size The number of batch size (default: 1) the network should have. + * @return Whether the input device is successfully turned on. + */ + BaseModel(const std::string& model_loc, int batch_size = 1); + + /** + * @brief Get the maximum batch size of the model. + * @return The maximum batch size of the model. + */ + inline int getMaxBatchSize() const { - public: - using Ptr = std::shared_ptr; - /** - * @brief Initialize the class with given .xml, .bin and .labels file. It will - * also check whether the number of input and output are fit. - * @param[in] model_loc The location of model' s .xml file - * (model' s bin file should be the same as .xml file except for extension) - * @param[in] input_num The number of input the network should have. - * @param[in] output_num The number of output the network should have. - * @param[in] batch_size The number of batch size (default: 1) the network should have. - * @return Whether the input device is successfully turned on. - */ - BaseModel(const std::string &model_loc, int batch_size = 1); + return max_batch_size_; + } + inline void setMaxBatchSize(int max_batch_size) + { + max_batch_size_ = max_batch_size; + } - /** - * @brief Get the maximum batch size of the model. - * @return The maximum batch size of the model. - */ - inline int getMaxBatchSize() const - { - return max_batch_size_; - } - inline void setMaxBatchSize(int max_batch_size) - { - max_batch_size_ = max_batch_size; - } - - virtual bool enqueue( - const std::shared_ptr &engine, - const cv::Mat &frame, - const cv::Rect &input_frame_loc) { return true; } - /** - * @brief Initialize the model. During the process the class will check - * the network input, output size, check layer property and - * set layer property. - */ + virtual bool enqueue(const std::shared_ptr& engine, const cv::Mat& frame, + const cv::Rect& input_frame_loc) + { + return true; + } + /** + * @brief Initialize the model. During the process the class will check + * the network input, output size, check layer property and + * set layer property. + */ void modelInit(); /** * @brief Get the name of the model. * @return The name of the model. */ - virtual const std::string getModelCategory() const = 0; - inline ModelAttr getAttribute() { return attr_; } - - inline InferenceEngine::CNNNetReader::Ptr getNetReader() const - { - return net_reader_; - } - - protected: - /** - * New infterface to check and update Layer Property - * @brief Set the layer property (layer layout, layer precision, etc.). - * @param[in] network_reader The reader of the network to be set. - */ - virtual bool updateLayerProperty(InferenceEngine::CNNNetReader::Ptr network_reader) = 0; - - InferenceEngine::CNNNetReader::Ptr net_reader_; - void setFrameSize(const int &w, const int &h) - { - frame_size_.width = w; - frame_size_.height = h; - } - cv::Size getFrameSize() - { - return frame_size_; - } - - private: - int max_batch_size_; - std::string model_loc_; - cv::Size frame_size_; - }; - - class ObjectDetectionModel : public BaseModel + virtual const std::string getModelCategory() const = 0; + inline ModelAttr getAttribute() + { + return attr_; + } + + inline InferenceEngine::CNNNetReader::Ptr getNetReader() const + { + return net_reader_; + } + +protected: + /** + * New infterface to check and update Layer Property + * @brief Set the layer property (layer layout, layer precision, etc.). + * @param[in] network_reader The reader of the network to be set. + */ + virtual bool updateLayerProperty(InferenceEngine::CNNNetReader::Ptr network_reader) = 0; + + InferenceEngine::CNNNetReader::Ptr net_reader_; + void setFrameSize(const int& w, const int& h) + { + frame_size_.width = w; + frame_size_.height = h; + } + cv::Size getFrameSize() { - public: - ObjectDetectionModel(const std::string &model_loc, int batch_size = 1); - virtual bool fetchResults( - const std::shared_ptr &engine, - std::vector &result, - const float &confidence_thresh = 0.3, - const bool &enable_roi_constraint = false) = 0; - virtual bool matToBlob( - const cv::Mat &orig_image, const cv::Rect &, float scale_factor, - int batch_index, const std::shared_ptr &engine) = 0; - }; - -} // namespace Models + return frame_size_; + } + +private: + int max_batch_size_; + std::string model_loc_; + cv::Size frame_size_; +}; + +class ObjectDetectionModel : public BaseModel +{ +public: + ObjectDetectionModel(const std::string& model_loc, int batch_size = 1); + virtual bool fetchResults(const std::shared_ptr& engine, + std::vector& result, + const float& confidence_thresh = 0.3, const bool& enable_roi_constraint = false) = 0; + virtual bool matToBlob(const cv::Mat& orig_image, const cv::Rect&, float scale_factor, int batch_index, + const std::shared_ptr& engine) = 0; +}; + +} // namespace Models #endif // DYNAMIC_VINO_LIB__MODELS__BASE_MODEL_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/models/emotion_detection_model.h b/dynamic_vino_lib/include/dynamic_vino_lib/models/emotion_detection_model.h index 1f749e83..1a64a28e 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/models/emotion_detection_model.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/models/emotion_detection_model.h @@ -34,7 +34,7 @@ namespace Models class EmotionDetectionModel : public BaseModel { public: - EmotionDetectionModel(const std::string & model_loc, int batch_size = 1); + EmotionDetectionModel(const std::string& model_loc, int batch_size = 1); /** * @brief Get the name of this detection model. @@ -44,8 +44,7 @@ class EmotionDetectionModel : public BaseModel bool updateLayerProperty(InferenceEngine::CNNNetReader::Ptr) override; private: - bool verifyOutputLayer(const InferenceEngine::DataPtr & ptr); - + bool verifyOutputLayer(const InferenceEngine::DataPtr& ptr); }; } // namespace Models diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/models/face_detection_model.h b/dynamic_vino_lib/include/dynamic_vino_lib/models/face_detection_model.h index 59086c20..d384432b 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/models/face_detection_model.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/models/face_detection_model.h @@ -34,8 +34,8 @@ namespace Models class FaceDetectionModel : public ObjectDetectionModel { public: - FaceDetectionModel(const std::string & model_loc, int batch_size = 1); - //void checkLayerProperty(const InferenceEngine::CNNNetReader::Ptr &) override; + FaceDetectionModel(const std::string& model_loc, int batch_size = 1); + // void checkLayerProperty(const InferenceEngine::CNNNetReader::Ptr &) override; /** * @brief Get the name of this detection model. * @return Name of the model. diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/models/face_reidentification_model.h b/dynamic_vino_lib/include/dynamic_vino_lib/models/face_reidentification_model.h index 31f03010..b1d895a1 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/models/face_reidentification_model.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/models/face_reidentification_model.h @@ -29,9 +29,15 @@ namespace Models class FaceReidentificationModel : public BaseModel { public: - FaceReidentificationModel(const std::string & model_loc, int batch_size = 1); - inline const std::string getInputName() {return input_;} - inline const std::string getOutputName() {return output_;} + FaceReidentificationModel(const std::string& model_loc, int batch_size = 1); + inline const std::string getInputName() + { + return input_; + } + inline const std::string getOutputName() + { + return output_; + } /** * @brief Get the name of this detection model. * @return Name of the model. diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/models/head_pose_detection_model.h b/dynamic_vino_lib/include/dynamic_vino_lib/models/head_pose_detection_model.h index fab5b6b0..4297603d 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/models/head_pose_detection_model.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/models/head_pose_detection_model.h @@ -34,7 +34,7 @@ namespace Models class HeadPoseDetectionModel : public BaseModel { public: - HeadPoseDetectionModel(const std::string & model_loc, int batch_size = 1); + HeadPoseDetectionModel(const std::string& model_loc, int batch_size = 1); /** * @brief Get the output angle roll. @@ -67,7 +67,7 @@ class HeadPoseDetectionModel : public BaseModel const std::string getModelCategory() const override; bool updateLayerProperty(InferenceEngine::CNNNetReader::Ptr) override; - private: +private: std::string output_angle_r_ = "angle_r_fc"; std::string output_angle_p_ = "angle_p_fc"; std::string output_angle_y_ = "angle_y_fc"; diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/models/landmarks_detection_model.h b/dynamic_vino_lib/include/dynamic_vino_lib/models/landmarks_detection_model.h index 49149c67..24b72a26 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/models/landmarks_detection_model.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/models/landmarks_detection_model.h @@ -29,9 +29,15 @@ namespace Models class LandmarksDetectionModel : public BaseModel { public: - LandmarksDetectionModel(const std::string & model_loc, int batch_size = 1); - inline const std::string getInputName() {return input_;} - inline const std::string getOutputName() {return output_;} + LandmarksDetectionModel(const std::string& model_loc, int batch_size = 1); + inline const std::string getInputName() + { + return input_; + } + inline const std::string getOutputName() + { + return output_; + } /** * @brief Get the name of this detection model. * @return Name of the model. diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/models/license_plate_detection_model.h b/dynamic_vino_lib/include/dynamic_vino_lib/models/license_plate_detection_model.h index 17f90dd3..2eb0b782 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/models/license_plate_detection_model.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/models/license_plate_detection_model.h @@ -29,11 +29,23 @@ namespace Models class LicensePlateDetectionModel : public BaseModel { public: - LicensePlateDetectionModel(const std::string & model_loc, int batch_size = 1); - inline const std::string getInputName() {return input_;} - inline const std::string getSeqInputName() {return seq_input_;} - inline const std::string getOutputName() {return output_;} - inline int getMaxSequenceSize() const {return max_sequence_size_;} + LicensePlateDetectionModel(const std::string& model_loc, int batch_size = 1); + inline const std::string getInputName() + { + return input_; + } + inline const std::string getSeqInputName() + { + return seq_input_; + } + inline const std::string getOutputName() + { + return output_; + } + inline int getMaxSequenceSize() const + { + return max_sequence_size_; + } /** * @brief Get the name of this detection model. * @return Name of the model. diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/models/object_detection_ssd_model.h b/dynamic_vino_lib/include/dynamic_vino_lib/models/object_detection_ssd_model.h index bf023cea..59d1c971 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/models/object_detection_ssd_model.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/models/object_detection_ssd_model.h @@ -21,7 +21,8 @@ #define DYNAMIC_VINO_LIB_MODELS_OBJECT_DETECTION_SSD_MODEL_H #include #include "dynamic_vino_lib/models/base_model.h" -namespace Models { +namespace Models +{ /** * @class ObjectDetectionModel * @brief This class generates the face detection model. @@ -31,22 +32,17 @@ class ObjectDetectionSSDModel : public ObjectDetectionModel using Result = dynamic_vino_lib::ObjectDetectionResult; public: - ObjectDetectionSSDModel(const std::string & model_loc, int batch_size = 1); + ObjectDetectionSSDModel(const std::string& model_loc, int batch_size = 1); - bool fetchResults( - const std::shared_ptr & engine, - std::vector & results, - const float & confidence_thresh = 0.3, - const bool & enable_roi_constraint = false) override; + bool fetchResults(const std::shared_ptr& engine, + std::vector& results, const float& confidence_thresh = 0.3, + const bool& enable_roi_constraint = false) override; - bool enqueue( - const std::shared_ptr & engine, - const cv::Mat & frame, - const cv::Rect & input_frame_loc) override; + bool enqueue(const std::shared_ptr& engine, const cv::Mat& frame, + const cv::Rect& input_frame_loc) override; - bool matToBlob( - const cv::Mat & orig_image, const cv::Rect &, float scale_factor, - int batch_index, const std::shared_ptr & engine) override; + bool matToBlob(const cv::Mat& orig_image, const cv::Rect&, float scale_factor, int batch_index, + const std::shared_ptr& engine) override; /** * @brief Get the name of this detection model. @@ -55,7 +51,6 @@ class ObjectDetectionSSDModel : public ObjectDetectionModel const std::string getModelCategory() const override; bool updateLayerProperty(InferenceEngine::CNNNetReader::Ptr) override; - }; } // namespace Models #endif // DYNAMIC_VINO_LIB_MODELS_OBJECT_DETECTION_SSD_MODEL_H diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/models/object_detection_yolov2voc_model.h b/dynamic_vino_lib/include/dynamic_vino_lib/models/object_detection_yolov2voc_model.h index 0a9bd45d..ff886514 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/models/object_detection_yolov2voc_model.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/models/object_detection_yolov2voc_model.h @@ -21,7 +21,8 @@ #define DYNAMIC_VINO_LIB_MODELS_OBJECT_DETECTION_YOLOV2VOC_MODEL_H #include #include "dynamic_vino_lib/models/base_model.h" -namespace Models { +namespace Models +{ /** * @class ObjectDetectionModel * @brief This class generates the face detection model. @@ -31,22 +32,17 @@ class ObjectDetectionYolov2Model : public ObjectDetectionModel using Result = dynamic_vino_lib::ObjectDetectionResult; public: - ObjectDetectionYolov2Model(const std::string & model_loc, int batch_size = 1); + ObjectDetectionYolov2Model(const std::string& model_loc, int batch_size = 1); - bool fetchResults( - const std::shared_ptr & engine, - std::vector & results, - const float & confidence_thresh = 0.3, - const bool & enable_roi_constraint = false) override; + bool fetchResults(const std::shared_ptr& engine, + std::vector& results, const float& confidence_thresh = 0.3, + const bool& enable_roi_constraint = false) override; - bool enqueue( - const std::shared_ptr & engine, - const cv::Mat & frame, - const cv::Rect & input_frame_loc) override; + bool enqueue(const std::shared_ptr& engine, const cv::Mat& frame, + const cv::Rect& input_frame_loc) override; - bool matToBlob( - const cv::Mat & orig_image, const cv::Rect &, float scale_factor, - int batch_index, const std::shared_ptr & engine) override; + bool matToBlob(const cv::Mat& orig_image, const cv::Rect&, float scale_factor, int batch_index, + const std::shared_ptr& engine) override; /** * @brief Get the name of this detection model. diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/models/object_segmentation_model.h b/dynamic_vino_lib/include/dynamic_vino_lib/models/object_segmentation_model.h index 6859ecbe..e13a8ce9 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/models/object_segmentation_model.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/models/object_segmentation_model.h @@ -28,7 +28,7 @@ namespace Models class ObjectSegmentationModel : public BaseModel { public: - ObjectSegmentationModel(const std::string & model_loc, int batch_size = 1); + ObjectSegmentationModel(const std::string& model_loc, int batch_size = 1); inline int getMaxProposalCount() const { return max_proposal_count_; @@ -38,12 +38,9 @@ class ObjectSegmentationModel : public BaseModel return object_size_; } - bool enqueue(const std::shared_ptr & ,const cv::Mat &, - const cv::Rect & ) override; + bool enqueue(const std::shared_ptr&, const cv::Mat&, const cv::Rect&) override; - bool matToBlob( - const cv::Mat & , const cv::Rect &, float , - int , const std::shared_ptr & ); + bool matToBlob(const cv::Mat&, const cv::Rect&, float, int, const std::shared_ptr&); /** * @brief Get the name of this segmentation model. diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/models/person_attribs_detection_model.h b/dynamic_vino_lib/include/dynamic_vino_lib/models/person_attribs_detection_model.h index 11d8c384..ebd90be8 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/models/person_attribs_detection_model.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/models/person_attribs_detection_model.h @@ -29,9 +29,9 @@ namespace Models class PersonAttribsDetectionModel : public BaseModel { public: - PersonAttribsDetectionModel(const std::string & model_loc, int batch_size = 1); - //inline const std::string getInputName() {return input_;} - //inline const std::string getOutputName() {return output_;} + PersonAttribsDetectionModel(const std::string& model_loc, int batch_size = 1); + // inline const std::string getInputName() {return input_;} + // inline const std::string getOutputName() {return output_;} /** * @brief Get the name of this detection model. * @return Name of the model. @@ -39,8 +39,8 @@ class PersonAttribsDetectionModel : public BaseModel const std::string getModelCategory() const override; protected: - //void checkLayerProperty(const InferenceEngine::CNNNetReader::Ptr &) override; - //void setLayerProperty(InferenceEngine::CNNNetReader::Ptr) override; + // void checkLayerProperty(const InferenceEngine::CNNNetReader::Ptr &) override; + // void setLayerProperty(InferenceEngine::CNNNetReader::Ptr) override; bool updateLayerProperty(InferenceEngine::CNNNetReader::Ptr) override; std::string input_; std::string output_; diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/models/person_reidentification_model.h b/dynamic_vino_lib/include/dynamic_vino_lib/models/person_reidentification_model.h index 450c0caa..b1f801f5 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/models/person_reidentification_model.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/models/person_reidentification_model.h @@ -29,9 +29,15 @@ namespace Models class PersonReidentificationModel : public BaseModel { public: - PersonReidentificationModel(const std::string & model_loc, int batch_size = 1); - inline const std::string getInputName() {return input_;} - inline const std::string getOutputName() {return output_;} + PersonReidentificationModel(const std::string& model_loc, int batch_size = 1); + inline const std::string getInputName() + { + return input_; + } + inline const std::string getOutputName() + { + return output_; + } /** * @brief Get the name of this detection model. * @return Name of the model. @@ -40,8 +46,8 @@ class PersonReidentificationModel : public BaseModel protected: bool updateLayerProperty(InferenceEngine::CNNNetReader::Ptr) override; - //void checkLayerProperty(const InferenceEngine::CNNNetReader::Ptr &) override; - //void setLayerProperty(InferenceEngine::CNNNetReader::Ptr) override; + // void checkLayerProperty(const InferenceEngine::CNNNetReader::Ptr &) override; + // void setLayerProperty(InferenceEngine::CNNNetReader::Ptr) override; std::string input_; std::string output_; }; diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/models/vehicle_attribs_detection_model.h b/dynamic_vino_lib/include/dynamic_vino_lib/models/vehicle_attribs_detection_model.h index 884f0a66..e716b5bb 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/models/vehicle_attribs_detection_model.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/models/vehicle_attribs_detection_model.h @@ -29,10 +29,19 @@ namespace Models class VehicleAttribsDetectionModel : public BaseModel { public: - VehicleAttribsDetectionModel(const std::string & model_loc, int batch_size = 1); - inline const std::string getInputName() {return input_;} - inline const std::string getColorOutputName() {return color_output_;} - inline const std::string getTypeOutputName() {return type_output_;} + VehicleAttribsDetectionModel(const std::string& model_loc, int batch_size = 1); + inline const std::string getInputName() + { + return input_; + } + inline const std::string getColorOutputName() + { + return color_output_; + } + inline const std::string getTypeOutputName() + { + return type_output_; + } /** * @brief Get the name of this detection model. * @return Name of the model. @@ -40,8 +49,8 @@ class VehicleAttribsDetectionModel : public BaseModel const std::string getModelCategory() const override; protected: - //void checkLayerProperty(const InferenceEngine::CNNNetReader::Ptr &) override; - //void setLayerProperty(InferenceEngine::CNNNetReader::Ptr) override; + // void checkLayerProperty(const InferenceEngine::CNNNetReader::Ptr &) override; + // void setLayerProperty(InferenceEngine::CNNNetReader::Ptr) override; bool updateLayerProperty(InferenceEngine::CNNNetReader::Ptr) override; std::string input_; std::string color_output_; diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/base_output.h b/dynamic_vino_lib/include/dynamic_vino_lib/outputs/base_output.h index f0f1d353..ab5f0d2a 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/base_output.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/outputs/base_output.h @@ -52,50 +52,49 @@ namespace Outputs class BaseOutput { public: - explicit BaseOutput(std::string output_name) - : output_name_(output_name) {} + explicit BaseOutput(std::string output_name) : output_name_(output_name) + { + } /** * @brief Generate output content according to the license plate detection result. */ - virtual void accept(const std::vector &) + virtual void accept(const std::vector&) { } /** * @brief Generate output content according to the vehicle attributes detection result. */ - virtual void accept(const std::vector &) + virtual void accept(const std::vector&) { } /** * @brief Generate output content according to the person atrribute detection result. */ - virtual void accept(const std::vector &) + virtual void accept(const std::vector&) { } /** * @brief Generate output content according to the face reidentification result. */ - virtual void accept(const std::vector &) + virtual void accept(const std::vector&) { } /** * @brief Generate output content according to the landmarks detection result. */ - virtual void accept(const std::vector &) + virtual void accept(const std::vector&) { } /** * @brief Generate output content according to the face detection result. */ - virtual void accept( - const std::vector&) + virtual void accept(const std::vector&) { } /** * @brief Generate output content according to the face detection result. */ - virtual void accept( - const std::vector&) + virtual void accept(const std::vector&) { } /** @@ -126,7 +125,7 @@ class BaseOutput /** * @brief Generate output content according to the person reidentification result. */ - virtual void accept(const std::vector &) + virtual void accept(const std::vector&) { } /** @@ -142,29 +141,39 @@ class BaseOutput virtual void handleOutput() = 0; void setPipeline(Pipeline* const pipeline); - virtual void setServiceResponse( - boost::shared_ptr response) {} - virtual void setServiceResponseForFace( - boost::shared_ptr response) {} - virtual void setServiceResponse( - boost::shared_ptr response) {} - virtual void setServiceResponse( - boost::shared_ptr response) {} - virtual void setServiceResponse( - boost::shared_ptr response) {} - virtual void setServiceResponse( - boost::shared_ptr response) {} - virtual void setServiceResponse( - boost::shared_ptr response) {} - virtual void setServiceResponse( - boost::shared_ptr response) {} + virtual void setServiceResponse(boost::shared_ptr response) + { + } + virtual void setServiceResponseForFace(boost::shared_ptr response) + { + } + virtual void setServiceResponse(boost::shared_ptr response) + { + } + virtual void setServiceResponse(boost::shared_ptr response) + { + } + virtual void setServiceResponse(boost::shared_ptr response) + { + } + virtual void setServiceResponse(boost::shared_ptr response) + { + } + virtual void setServiceResponse(boost::shared_ptr response) + { + } + virtual void setServiceResponse(boost::shared_ptr response) + { + } Pipeline* getPipeline() const; cv::Mat getFrame() const; - virtual void clearData() {} + virtual void clearData() + { + } - protected: +protected: cv::Mat frame_; - Pipeline * pipeline_; + Pipeline* pipeline_; std::string output_name_; }; } // namespace Outputs diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/image_window_output.h b/dynamic_vino_lib/include/dynamic_vino_lib/outputs/image_window_output.h index a162772e..e8819d9d 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/image_window_output.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/outputs/image_window_output.h @@ -34,9 +34,8 @@ namespace Outputs */ class ImageWindowOutput : public BaseOutput { - public: - explicit ImageWindowOutput(const std::string& window_name, - int focal_length = 950); +public: + explicit ImageWindowOutput(const std::string& window_name, int focal_length = 950); /** * @brief Calculate the camera matrix of a frame for image * window output. @@ -57,45 +56,39 @@ class ImageWindowOutput : public BaseOutput * the license plate detection result. * @param[in] A license plate detection result objetc. */ - void accept( - const std::vector &) override; + void accept(const std::vector&) override; /** * @brief Generate image window output content according to * the vehicle attributes detection result. * @param[in] A vehicle attributes detection result objetc. */ - void accept( - const std::vector &) override; - /** - * @brief Generate image window output content according to - * the landmarks detection result. - * @param[in] A landmarks detection result objetc. - */ - void accept( - const std::vector &) override; + void accept(const std::vector&) override; + /** +*@brief Generate image window output content according to +*the landmarks detection result. +*@param[in] A landmarks detection result objetc. +*/ + void accept(const std::vector&) override; /** * @brief Generate image window output content according to * the person attributes detection result. * @param[in] A person attributes detection result objetc. */ - void accept( - const std::vector &) override; - /** - * @brief Generate image window output content according to - * the landmarks detetection result. - * the face reidentification result. - * @param[in] A face reidentification result objetc. - */ - void accept( - const std::vector &) override; + void accept(const std::vector&) override; + /** + * @brief Generate image window output content according to + * the landmarks detetection result. + * the face reidentification result. + * @param[in] A face reidentification result objetc. + */ + void accept(const std::vector&) override; /** * @brief Generate image window output content according to * the face detection result. * @param[in] A face detection result objetc. */ - - void accept( - const std::vector&) override; + + void accept(const std::vector&) override; /** * @brief Generate image window output content according to * the emotion detection result. @@ -126,22 +119,22 @@ class ImageWindowOutput : public BaseOutput * @param[in] An object segmentation result objetc. */ void accept(const std::vector&) override; - /** - * @brief Generate image window output content according to - * the person re-ID result. - * @param[in] An object segmentation result objetc. - */ - void accept(const std::vector &) override; - /** - * @brief Merge mask for image window ouput - * the object segmentation result. - * @param[in] An object segmentation result objetc. - */ - void mergeMask(const std::vector &); - private: + /** + * @brief Generate image window output content according to + * the person re-ID result. + * @param[in] An object segmentation result objetc. + */ + void accept(const std::vector&) override; + /** + * @brief Merge mask for image window ouput + * the object segmentation result. + * @param[in] An object segmentation result objetc. + */ + void mergeMask(const std::vector&); - unsigned findOutput(const cv::Rect &); - //void initOutputs(unsigned size); +private: + unsigned findOutput(const cv::Rect&); + // void initOutputs(unsigned size); /** * @brief Calculate the axises of the coordinates for showing * the image window. @@ -155,20 +148,20 @@ class ImageWindowOutput : public BaseOutput */ cv::Mat getRotationTransform(double yaw, double pitch, double roll); - void mergeMask(const std::vector &); + void mergeMask(const std::vector&); struct OutputData { std::string desc; cv::Rect rect; cv::Scalar scalar; - cv::Point hp_cp; // for headpose, center point - cv::Point hp_x; // for headpose, end point of xAxis - cv::Point hp_y; // for headpose, end point of yAxis - cv::Point hp_zs; // for headpose, start point of zAxis - cv::Point hp_ze; // for headpose, end point of zAxis - cv::Point pa_top; // for person attributes, top position - cv::Point pa_bottom; //for person attributes, bottom position + cv::Point hp_cp; // for headpose, center point + cv::Point hp_x; // for headpose, end point of xAxis + cv::Point hp_y; // for headpose, end point of yAxis + cv::Point hp_zs; // for headpose, start point of zAxis + cv::Point hp_ze; // for headpose, end point of zAxis + cv::Point pa_top; // for person attributes, top position + cv::Point pa_bottom; // for person attributes, bottom position std::vector landmarks; }; @@ -176,13 +169,12 @@ class ImageWindowOutput : public BaseOutput const std::string window_name_; float focal_length_; cv::Mat camera_matrix_; - std::vector> colors_ = { - {128, 64, 128}, {232, 35, 244}, {70, 70, 70}, {156, 102, 102}, {153, 153, 190}, - {153, 153, 153}, {30, 170, 250}, {0, 220, 220}, {35, 142, 107}, {152, 251, 152}, - {180, 130, 70}, {60, 20, 220}, {0, 0, 255}, {142, 0, 0}, {70, 0, 0}, - {100, 60, 0}, {90, 0, 0}, {230, 0, 0}, {32, 11, 119}, {0, 74, 111}, - {81, 0, 81} - }; + std::vector> colors_ = { { 128, 64, 128 }, { 232, 35, 244 }, { 70, 70, 70 }, { 156, 102, 102 }, + { 153, 153, 190 }, { 153, 153, 153 }, { 30, 170, 250 }, { 0, 220, 220 }, + { 35, 142, 107 }, { 152, 251, 152 }, { 180, 130, 70 }, { 60, 20, 220 }, + { 0, 0, 255 }, { 142, 0, 0 }, { 70, 0, 0 }, { 100, 60, 0 }, + { 90, 0, 0 }, { 230, 0, 0 }, { 32, 11, 119 }, { 0, 74, 111 }, + { 81, 0, 81 } }; }; } // namespace Outputs #endif // DYNAMIC_VINO_LIB_OUTPUTS_IMAGE_WINDOW_OUTPUT_H diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/ros_service_output.h b/dynamic_vino_lib/include/dynamic_vino_lib/outputs/ros_service_output.h index 170ab96b..d65e9e06 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/ros_service_output.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/outputs/ros_service_output.h @@ -42,7 +42,6 @@ #include #include - #include #include @@ -61,13 +60,17 @@ namespace Outputs class RosServiceOutput : public RosTopicOutput { public: - RosServiceOutput(std::string pipeline_name): pipeline_name_(pipeline_name) {} + RosServiceOutput(std::string pipeline_name) : pipeline_name_(pipeline_name) + { + } /** * @brief Publish all the detected infomations generated by the accept * functions with ros topic. */ - void handleOutput() override {} + void handleOutput() override + { + } void clearData(); void setServiceResponse(boost::shared_ptr response); @@ -78,7 +81,7 @@ class RosServiceOutput : public RosTopicOutput void setServiceResponse(boost::shared_ptr response); void setServiceResponse(boost::shared_ptr response); void setServiceResponse(boost::shared_ptr response); - + private: const std::string service_name_; const std::string pipeline_name_; diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/ros_topic_output.h b/dynamic_vino_lib/include/dynamic_vino_lib/outputs/ros_topic_output.h index bfaa8e4b..2433c7ff 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/ros_topic_output.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/outputs/ros_topic_output.h @@ -59,7 +59,7 @@ namespace Outputs */ class RosTopicOutput : public BaseOutput { - public: +public: RosTopicOutput(){}; RosTopicOutput(std::string pipeline_name); /** @@ -77,46 +77,43 @@ class RosTopicOutput : public BaseOutput * the license plate detection result. * @param[in] A license plate detection result objetc. */ - void accept( - const std::vector &) override; + void accept(const std::vector&) override; /** * @brief Generate image window output content according to * the vehicle attributes detection result. * @param[in] A vehicle attributes detection result objetc. */ - void accept( - const std::vector &) override; + void accept(const std::vector&) override; /** * @brief Generate ros topic infomation according to * the person attributes detection result. * @param[in] results a bundle of person attributes detection results. */ - void accept(const std::vector &) override; + void accept(const std::vector&) override; /** * @brief Generate ros topic infomation according to * the face reidentification result. * @param[in] results a bundle of face reidentification results. */ - void accept(const std::vector &) override; + void accept(const std::vector&) override; /** * @brief Generate ros topic infomation according to * the person reidentification result. * @param[in] results a bundle of person reidentification results. */ - void accept(const std::vector &) override; + void accept(const std::vector&) override; /** * @brief Generate ros topic infomation according to * the object segmentation result. * @param[in] results a bundle of object segmentation results. */ - void accept(const std::vector &) override; + void accept(const std::vector&) override; /** * @brief Generate ros topic infomation according to * the face detection result. * @param[in] An face detection result objetc. */ - void accept( - const std::vector&) override; + void accept(const std::vector&) override; /** * @brief Generate ros topic infomation according to * the emotion detection result. @@ -128,7 +125,7 @@ class RosTopicOutput : public BaseOutput * the age gender detection result. * @param[in] An age gender detection result objetc. */ - void accept(const std::vector &) override; + void accept(const std::vector&) override; /**detected_objects_topic_ * @brief Generate ros topic infomation according to * the headpose detection result. @@ -142,14 +139,14 @@ class RosTopicOutput : public BaseOutput */ void accept(const std::vector&) override; - - private: +private: std_msgs::Header getHeader(); const std::string pipeline_name_; const std::string topic_name_; cv::Mat frame_; ros::NodeHandle nh_; - protected: + +protected: ros::Publisher pub_face_; std::shared_ptr faces_topic_; ros::Publisher pub_emotion_; @@ -172,7 +169,6 @@ class RosTopicOutput : public BaseOutput std::shared_ptr license_plate_topic_; ros::Publisher pub_vehicle_attribs_; std::shared_ptr vehicle_attribs_topic_; - }; } // namespace Outputs #endif // DYNAMIC_VINO_LIB_OUTPUTS_ROS_TOPIC_OUTPUT_H diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/rviz_output.h b/dynamic_vino_lib/include/dynamic_vino_lib/outputs/rviz_output.h index 6f944005..3b62186e 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/rviz_output.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/outputs/rviz_output.h @@ -20,8 +20,6 @@ #ifndef DYNAMIC_VINO_LIB__OUTPUTS__RVIZ_OUTPUT_HPP_ #define DYNAMIC_VINO_LIB__OUTPUTS__RVIZ_OUTPUT_HPP_ - - #include #include #include @@ -43,7 +41,7 @@ class RvizOutput : public BaseOutput * @brief Construct frame for rviz * @param[in] A frame. */ - void feedFrame(const cv::Mat &) override; + void feedFrame(const cv::Mat&) override; /** * @brief Show all the contents generated by the accept * functions with rviz. @@ -54,57 +52,55 @@ class RvizOutput : public BaseOutput * the license plate detection result. * @param[in] A license plate detection result objetc. */ - void accept( - const std::vector &) override; + void accept(const std::vector&) override; /** * @brief Generate image window output content according to * the vehicle attributes detection result. * @param[in] A vehicle attributes detection result objetc. */ - void accept( - const std::vector &) override; + void accept(const std::vector&) override; /** * @brief Generate rviz output content according to * the person attributes detection result. * @param[in] A person attributes detection result objetc. */ - void accept(const std::vector &) override; + void accept(const std::vector&) override; /** * @brief Generate rviz output content according to * the face reidentification result. * @param[in] A face reidentification result objetc. */ - void accept(const std::vector &) override; + void accept(const std::vector&) override; /** * @brief Generate rviz output content according to * the face detection result. * @param[in] A face detection result objetc. */ - void accept(const std::vector &) override; + void accept(const std::vector&) override; /** * @brief Generate rviz output content according to * the object detection result. * @param[in] results A bundle of object detection results. */ - void accept(const std::vector &) override; + void accept(const std::vector&) override; /** * @brief Generate rviz output content according to * the object segmentation result. * @param[in] results A bundle of object segmentation results. */ - void accept(const std::vector &) override; + void accept(const std::vector&) override; /** * @brief Generate rviz output content according to * the age and gender detection result. * @param[in] A head pose detection result objetc. */ - void accept(const std::vector &) override; + void accept(const std::vector&) override; /** * @brief Generate rviz output content according to * the headpose detection result. * @param[in] An age gender detection result objetc. */ - void accept(const std::vector &) override; + void accept(const std::vector&) override; /** * @brief Generate rviz output content according to @@ -112,17 +108,17 @@ class RvizOutput : public BaseOutput * @param[in] An object segmentation result objetc. */ void accept(const std::vector&) override; - /** - * @brief Generate rviz output content according to - * the person re-ID result. - * @param[in] An object segmentation result objetc. - */ - void accept(const std::vector &) override; - /** - * @brief Merge mask for image window ouput - * the object segmentation result. - * @param[in] An object segmentation result objetc. - */ + /** + * @brief Generate rviz output content according to + * the person re-ID result. + * @param[in] An object segmentation result objetc. + */ + void accept(const std::vector&) override; + /** + * @brief Merge mask for image window ouput + * the object segmentation result. + * @param[in] An object segmentation result objetc. + */ private: std_msgs::Header getHeader(); @@ -133,4 +129,3 @@ class RvizOutput : public BaseOutput }; } // namespace Outputs #endif // DYNAMIC_VINO_LIB__OUTPUTS__RVIZ_OUTPUT_HPP_ - diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/pipeline.h b/dynamic_vino_lib/include/dynamic_vino_lib/pipeline.h index e8a06b6b..0120e7c6 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/pipeline.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/pipeline.h @@ -43,7 +43,7 @@ */ class Pipeline { - public: +public: explicit Pipeline(const std::string& name = "pipeline"); /** * @brief Add input device to the pipeline. @@ -51,8 +51,7 @@ class Pipeline * @param[in] input_device the input device instance to be added. * @return whether the add operation is successful */ - bool add(const std::string& name, - std::shared_ptr input_device); + bool add(const std::string& name, std::shared_ptr input_device); /** * @brief Add inference network to the pipeline. * @param[in] parent name of the parent device or inference. @@ -68,8 +67,7 @@ class Pipeline * @param[in] inference the inference instance to be added. * @return whether the add operation is successful */ - bool add(const std::string & name, - std::shared_ptr inference); + bool add(const std::string& name, std::shared_ptr inference); /** * @brief Add output device to the pipeline. * @param[in] parent name of the parent inference. @@ -77,16 +75,14 @@ class Pipeline * @param[in] output the output instance to be added. * @return whether the add operation is successful */ - bool add(const std::string& parent, const std::string& name, - std::shared_ptr output); + bool add(const std::string& parent, const std::string& name, std::shared_ptr output); /** * @brief Add output device to the pipeline. * @param[in] name name of the current output device. * @param[in] output the output instance to be added. * @return whether the add operation is successful */ - bool add(const std::string& name, - std::shared_ptr output); + bool add(const std::string& name, std::shared_ptr output); /** * @brief Add inference network-output device edge to the pipeline. * @param[in] parent name of the parent inference. @@ -99,7 +95,7 @@ class Pipeline * @param[in] name of the instance. * @return the category order of this instance. */ - void addConnect(const std::string & parent, const std::string & name); + void addConnect(const std::string& parent, const std::string& name); /** * @brief Do the inference once. * Data flow from input device to inference network, then to output device. @@ -146,12 +142,12 @@ class Pipeline return fps_; } - std::string findFilterConditions(const std::string & input, const std::string & output) + std::string findFilterConditions(const std::string& input, const std::string& output) { return params_->findFilterConditions(input, output); } - private: +private: void initInferenceCounter(); void increaseInferenceCounter(); void decreaseInferenceCounter(); @@ -174,10 +170,8 @@ class Pipeline std::shared_ptr input_device_; std::string input_device_name_; std::multimap next_; - std::map> - name_to_detection_map_; - std::map> - name_to_output_map_; + std::map> name_to_detection_map_; + std::map> name_to_output_map_; int total_inference_ = 0; std::set output_names_; int width_ = 0; diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/pipeline_manager.h b/dynamic_vino_lib/include/dynamic_vino_lib/pipeline_manager.h index 27c44fe8..d131c530 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/pipeline_manager.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/pipeline_manager.h @@ -37,30 +37,30 @@ * @class PipelineManager * @brief This class manages the lifecycles of pipelines. */ -class PipelineManager { - public: +class PipelineManager +{ +public: /** * @brief Get the singleton instance of PipelineManager class. * The instance will be created when first call. * @return The reference of PipelineManager instance. */ - static PipelineManager& getInstance() { + static PipelineManager& getInstance() + { static PipelineManager manager_; return manager_; }; - std::shared_ptr createPipeline( - const Params::ParamManager::PipelineRawData & params); + std::shared_ptr createPipeline(const Params::ParamManager::PipelineRawData& params); void removePipeline(const std::string& name); - PipelineManager& updatePipeline( - const std::string& name, - const Params::ParamManager::PipelineRawData & params); - + PipelineManager& updatePipeline(const std::string& name, const Params::ParamManager::PipelineRawData& params); + void runAll(); void stopAll(); void joinAll(); - - enum PipelineState { + + enum PipelineState + { PipelineState_ThreadNotCreated = 0, PipelineState_ThreadStopped = 1, PipelineState_ThreadRunning = 2, @@ -68,14 +68,15 @@ class PipelineManager { PipelineState_Error = 4 }; - struct PipelineData { + struct PipelineData + { Params::ParamManager::PipelineRawData params; std::shared_ptr pipeline; std::vector> spin_nodes; std::shared_ptr thread; PipelineState state; }; - void runService(); + void runService(); std::map getPipelines() { return pipelines_; @@ -85,42 +86,39 @@ class PipelineManager { return &pipelines_; } - private: +private: PipelineManager(){}; PipelineManager(PipelineManager const&); void operator=(PipelineManager const&); void threadPipeline(const char* name); - std::map> - parseInputDevice(const PipelineData & params); - std::map> - parseOutput(const PipelineData & pdata); + std::map> parseInputDevice(const PipelineData& params); + std::map> parseOutput(const PipelineData& pdata); std::map> parseInference(const Params::ParamManager::PipelineRawData& params); - std::shared_ptr createFaceDetection( - const Params::ParamManager::InferenceRawData& infer); - std::shared_ptr createAgeGenderRecognition( - const Params::ParamManager::InferenceRawData& infer); - std::shared_ptr createEmotionRecognition( - const Params::ParamManager::InferenceRawData& infer); - std::shared_ptr createHeadPoseEstimation( - const Params::ParamManager::InferenceRawData& infer); - std::shared_ptr createObjectDetection( - const Params::ParamManager::InferenceRawData& infer); - std::shared_ptr createObjectSegmentation( - const Params::ParamManager::InferenceRawData& infer); - std::shared_ptr createPersonReidentification( - const Params::ParamManager::InferenceRawData& infer); - std::shared_ptr createFaceReidentification( - const Params::ParamManager::InferenceRawData & infer); - std::shared_ptr createPersonAttribsDetection( - const Params::ParamManager::InferenceRawData & infer); - std::shared_ptr createVehicleAttribsDetection( - const Params::ParamManager::InferenceRawData & infer); - std::shared_ptr createLicensePlateDetection( - const Params::ParamManager::InferenceRawData & infer); - std::shared_ptr createLandmarksDetection( - const Params::ParamManager::InferenceRawData & infer); - + std::shared_ptr + createFaceDetection(const Params::ParamManager::InferenceRawData& infer); + std::shared_ptr + createAgeGenderRecognition(const Params::ParamManager::InferenceRawData& infer); + std::shared_ptr + createEmotionRecognition(const Params::ParamManager::InferenceRawData& infer); + std::shared_ptr + createHeadPoseEstimation(const Params::ParamManager::InferenceRawData& infer); + std::shared_ptr + createObjectDetection(const Params::ParamManager::InferenceRawData& infer); + std::shared_ptr + createObjectSegmentation(const Params::ParamManager::InferenceRawData& infer); + std::shared_ptr + createPersonReidentification(const Params::ParamManager::InferenceRawData& infer); + std::shared_ptr + createFaceReidentification(const Params::ParamManager::InferenceRawData& infer); + std::shared_ptr + createPersonAttribsDetection(const Params::ParamManager::InferenceRawData& infer); + std::shared_ptr + createVehicleAttribsDetection(const Params::ParamManager::InferenceRawData& infer); + std::shared_ptr + createLicensePlateDetection(const Params::ParamManager::InferenceRawData& infer); + std::shared_ptr + createLandmarksDetection(const Params::ParamManager::InferenceRawData& infer); std::map pipelines_; Engines::EngineManager engine_manager_; diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/pipeline_params.h b/dynamic_vino_lib/include/dynamic_vino_lib/pipeline_params.h index 7718c273..f1480d06 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/pipeline_params.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/pipeline_params.h @@ -35,7 +35,6 @@ #include "opencv2/opencv.hpp" #include "vino_param_lib/param_manager.h" - const char kInputType_Image[] = "Image"; const char kInputType_Video[] = "Video"; const char kInputType_StandardCamera[] = "StandardCamera"; @@ -71,21 +70,21 @@ const char kInferTpye_LicensePlateDetection[] = "LicensePlateDetection"; */ class PipelineParams { - public: +public: explicit PipelineParams(const std::string& name); - explicit PipelineParams(const Params::ParamManager::PipelineRawData & params); - Params::ParamManager::PipelineRawData getPipeline(const std::string & name); - PipelineParams & operator=(const Params::ParamManager::PipelineRawData & params); + explicit PipelineParams(const Params::ParamManager::PipelineRawData& params); + Params::ParamManager::PipelineRawData getPipeline(const std::string& name); + PipelineParams& operator=(const Params::ParamManager::PipelineRawData& params); void update(); - void update(const Params::ParamManager::PipelineRawData & params); + void update(const Params::ParamManager::PipelineRawData& params); bool isOutputTo(std::string& name); bool isGetFps(); - std::string findFilterConditions(const std::string & input, const std::string & output); + std::string findFilterConditions(const std::string& input, const std::string& output); const std::string kInputType_Image = "Image"; const std::string kOutputTpye_RViz = "RViz"; - private: +private: Params::ParamManager::PipelineRawData params_; }; diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/services/frame_processing_server.h b/dynamic_vino_lib/include/dynamic_vino_lib/services/frame_processing_server.h index cba664a8..6924ebec 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/services/frame_processing_server.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/services/frame_processing_server.h @@ -40,23 +40,20 @@ namespace vino_service { -template -class FrameProcessingServer +template +class FrameProcessingServer { public: - explicit FrameProcessingServer( - const std::string & service_name, - const std::string & config_path); + explicit FrameProcessingServer(const std::string& service_name, const std::string& config_path); void initService(); + private: std::string service_name_; std::string config_path_; std::shared_ptr nh_; - std::shared_ptr service_; - - bool cbService(ros::ServiceEvent& event); + std::shared_ptr service_; - + bool cbService(ros::ServiceEvent& event); }; } // namespace vino_service -#endif // DYNAMIC_VINO_LIB__SERVICES__FRAME_PROCESSING_SERVER_HPP_ +#endif // DYNAMIC_VINO_LIB__SERVICES__FRAME_PROCESSING_SERVER_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/services/pipeline_processing_server.h b/dynamic_vino_lib/include/dynamic_vino_lib/services/pipeline_processing_server.h index 3437f2c4..76d35adc 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/services/pipeline_processing_server.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/services/pipeline_processing_server.h @@ -41,34 +41,28 @@ namespace vino_service { -template -class PipelineProcessingServer +template +class PipelineProcessingServer { public: - explicit PipelineProcessingServer( - const std::string & service_name); + explicit PipelineProcessingServer(const std::string& service_name); private: - - void initPipelineService(); + void initPipelineService(); std::shared_ptr nh_; - bool cbService(ros::ServiceEvent& event); + bool cbService(ros::ServiceEvent& event); + + void setResponse(ros::ServiceEvent& event); - void setResponse( - ros::ServiceEvent& event); - void setPipelineByRequest(std::string pipeline_name, PipelineManager::PipelineState state); std::shared_ptr service_; - std::map * pipelines_; + std::map* pipelines_; std::string service_name_; - - - }; } // namespace vino_service -#endif // DYNAMIC_VINO_LIB__SERVICES__FRAME_PROCESSING_SERVER_HPP_ +#endif // DYNAMIC_VINO_LIB__SERVICES__FRAME_PROCESSING_SERVER_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/slog.h b/dynamic_vino_lib/include/dynamic_vino_lib/slog.h index 56d7ebb8..1d81bf29 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/slog.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/slog.h @@ -29,33 +29,34 @@ namespace slog { #if 1 - enum COLOR { - RESET = 0, - BLUE = 1, - GREEN = 2, - YELLOW = 3, - RED = 4, - }; +enum COLOR +{ + RESET = 0, + BLUE = 1, + GREEN = 2, + YELLOW = 3, + RED = 4, +}; #else -//the following are UBUNTU/LINUX ONLY terminal color codes. -#define RESET "\033[0m" -#define BLACK "\033[30m" /* Black */ -#define RED "\033[31m" /* Red */ -#define GREEN "\033[32m" /* Green */ -#define YELLOW "\033[33m" /* Yellow */ -#define BLUE "\033[34m" /* Blue */ -#define MAGENTA "\033[35m" /* Magenta */ -#define CYAN "\033[36m" /* Cyan */ -#define WHITE "\033[37m" /* White */ -#define BOLDBLACK "\033[1m\033[30m" /* Bold Black */ -#define BOLDRED "\033[1m\033[31m" /* Bold Red */ -#define BOLDGREEN "\033[1m\033[32m" /* Bold Green */ -#define BOLDYELLOW "\033[1m\033[33m" /* Bold Yellow */ -#define BOLDBLUE "\033[1m\033[34m" /* Bold Blue */ -#define BOLDMAGENTA "\033[1m\033[35m" /* Bold Magenta */ -#define BOLDCYAN "\033[1m\033[36m" /* Bold Cyan */ -#define BOLDWHITE "\033[1m\033[37m" /* Bold White */ +// the following are UBUNTU/LINUX ONLY terminal color codes. +#define RESET "\033[0m" +#define BLACK "\033[30m" /* Black */ +#define RED "\033[31m" /* Red */ +#define GREEN "\033[32m" /* Green */ +#define YELLOW "\033[33m" /* Yellow */ +#define BLUE "\033[34m" /* Blue */ +#define MAGENTA "\033[35m" /* Magenta */ +#define CYAN "\033[36m" /* Cyan */ +#define WHITE "\033[37m" /* White */ +#define BOLDBLACK "\033[1m\033[30m" /* Bold Black */ +#define BOLDRED "\033[1m\033[31m" /* Bold Red */ +#define BOLDGREEN "\033[1m\033[32m" /* Bold Green */ +#define BOLDYELLOW "\033[1m\033[33m" /* Bold Yellow */ +#define BOLDBLUE "\033[1m\033[34m" /* Bold Blue */ +#define BOLDMAGENTA "\033[1m\033[35m" /* Bold Magenta */ +#define BOLDCYAN "\033[1m\033[36m" /* Bold Cyan */ +#define BOLDWHITE "\033[1m\033[37m" /* Bold White */ #endif /** @@ -80,14 +81,13 @@ class LogStream bool _new_line; int _color_id; - public: +public: /** * @brief A constructor. Creates an LogStream object * @param prefix The prefix to print */ - LogStream(const std::string & prefix, std::ostream & log_stream, - const int color_id = -1) - : _prefix(prefix), _new_line(true), _color_id(color_id) + LogStream(const std::string& prefix, std::ostream& log_stream, const int color_id = -1) + : _prefix(prefix), _new_line(true), _color_id(color_id) { _log_stream = &log_stream; } @@ -99,9 +99,10 @@ class LogStream template LogStream& operator<<(const T& arg) { - if (_new_line) { + if (_new_line) + { setLineColor(); - (*_log_stream) << "[ " << _prefix << " ] "; + (*_log_stream) << "[ " << _prefix << " ] "; _new_line = false; } @@ -120,28 +121,30 @@ class LogStream void setLineColor() { - switch(_color_id){ - case BLUE: - (*_log_stream) << "\033[34m"; - break; - case GREEN: - (*_log_stream) << "\033[32m"; - break; - case YELLOW: - (*_log_stream) << "\033[33m"; - break; - case RED: - (*_log_stream) << "\033[31m"; - break; - default: - break; - } + switch (_color_id) + { + case BLUE: + (*_log_stream) << "\033[34m"; + break; + case GREEN: + (*_log_stream) << "\033[32m"; + break; + case YELLOW: + (*_log_stream) << "\033[33m"; + break; + case RED: + (*_log_stream) << "\033[31m"; + break; + default: + break; + } } void resetLineColor() { - if(_color_id > 0){ - (*_log_stream) << "\033[0m"; //RESET + if (_color_id > 0) + { + (*_log_stream) << "\033[0m"; // RESET } } }; @@ -149,25 +152,27 @@ class LogStream class NullStream { public: - NullStream(){} + NullStream() + { + } - NullStream(const std::string & prefix, std::ostream & log_stream) + NullStream(const std::string& prefix, std::ostream& log_stream) { (void)prefix; (void)log_stream; } - template - NullStream & operator<<(const T & arg) + template + NullStream& operator<<(const T& arg) { return *this; } }; #ifdef LOG_LEVEL_DEBUG - static LogStream debug("DEBUG", std::cout, GREEN); +static LogStream debug("DEBUG", std::cout, GREEN); #else - static NullStream debug; +static NullStream debug; #endif static LogStream info("INFO", std::cout, BLUE); static LogStream warn("WARNING", std::cout, YELLOW); diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/utils/version_info.hpp b/dynamic_vino_lib/include/dynamic_vino_lib/utils/version_info.hpp index 431fa86d..d928f67a 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/utils/version_info.hpp +++ b/dynamic_vino_lib/include/dynamic_vino_lib/utils/version_info.hpp @@ -24,7 +24,7 @@ #include #include #include -#if(defined(USE_OLD_E_PLUGIN_API)) +#if (defined(USE_OLD_E_PLUGIN_API)) #include #endif #include @@ -46,36 +46,38 @@ * @param s - string to trim * @return trimmed string */ -inline std::string & trim(std::string & s) +inline std::string& trim(std::string& s) { - s.erase(s.begin(), - std::find_if(s.begin(), s.end(), std::not1(std::ptr_fun(std::isspace)))); - s.erase( - std::find_if(s.rbegin(), s.rend(), std::not1(std::ptr_fun(std::isspace))).base(), - s.end()); + s.erase(s.begin(), std::find_if(s.begin(), s.end(), std::not1(std::ptr_fun(std::isspace)))); + s.erase(std::find_if(s.rbegin(), s.rend(), std::not1(std::ptr_fun(std::isspace))).base(), s.end()); return s; } -static std::ostream & operator<<(std::ostream & os, const InferenceEngine::Version * version) +static std::ostream& operator<<(std::ostream& os, const InferenceEngine::Version* version) { os << "\n\tAPI version ............ "; - if (nullptr == version) { + if (nullptr == version) + { os << "UNKNOWN"; - } else { + } + else + { os << version->apiVersion.major << "." << version->apiVersion.minor; - if (nullptr != version->buildNumber) { - os << "\n\t" << - "Build .................. " << version->buildNumber; + if (nullptr != version->buildNumber) + { + os << "\n\t" + << "Build .................. " << version->buildNumber; } - if (nullptr != version->description) { - os << "\n\t" << - "Description ............ " << version->description; + if (nullptr != version->description) + { + os << "\n\t" + << "Description ............ " << version->description; } } return os; } -#if(defined(USE_OLD_E_PLUGIN_API)) +#if (defined(USE_OLD_E_PLUGIN_API)) /** * @class PluginVersion * @brief A PluginVersion class stores plugin version and initialization status @@ -84,9 +86,10 @@ struct PluginVersion : public InferenceEngine::Version { bool initialized = false; - explicit PluginVersion(const InferenceEngine::Version * ver) + explicit PluginVersion(const InferenceEngine::Version* ver) { - if (nullptr == ver) { + if (nullptr == ver) + { return; } InferenceEngine::Version::operator=(*ver); @@ -99,38 +102,47 @@ struct PluginVersion : public InferenceEngine::Version } }; -static UNUSED std::ostream & operator<<(std::ostream & os, const PluginVersion & version) +static UNUSED std::ostream& operator<<(std::ostream& os, const PluginVersion& version) { os << "\tPlugin version ......... "; - if (!version) { + if (!version) + { os << "UNKNOWN"; - } else { + } + else + { os << version.apiVersion.major << "." << version.apiVersion.minor; } os << "\n\tPlugin name ............ "; - if (!version || version.description == nullptr) { + if (!version || version.description == nullptr) + { os << "UNKNOWN"; - } else { + } + else + { os << version.description; } os << "\n\tPlugin build ........... "; - if (!version || version.buildNumber == nullptr) { + if (!version || version.buildNumber == nullptr) + { os << "UNKNOWN"; - } else { + } + else + { os << version.buildNumber; } return os; } -inline void printPluginVersion(InferenceEngine::InferenceEnginePluginPtr ptr, std::ostream & stream) +inline void printPluginVersion(InferenceEngine::InferenceEnginePluginPtr ptr, std::ostream& stream) { - const PluginVersion * pluginVersion = nullptr; - ptr->GetVersion((const InferenceEngine::Version * &)pluginVersion); + const PluginVersion* pluginVersion = nullptr; + ptr->GetVersion((const InferenceEngine::Version*&)pluginVersion); stream << pluginVersion << std::endl; } -#endif // (defined(USE_OLD_E_PLUGIN_API)) +#endif // (defined(USE_OLD_E_PLUGIN_API)) #endif // DYNAMIC_VINO_LIB__UTILS__VERSION_INFO_HPP_ diff --git a/dynamic_vino_lib/src/engines/engine.cpp b/dynamic_vino_lib/src/engines/engine.cpp index 2f1eaf29..04fe6d77 100644 --- a/dynamic_vino_lib/src/engines/engine.cpp +++ b/dynamic_vino_lib/src/engines/engine.cpp @@ -20,18 +20,14 @@ */ #include "dynamic_vino_lib/engines/engine.h" -#if(defined(USE_OLD_E_PLUGIN_API)) -Engines::Engine::Engine( - InferenceEngine::InferencePlugin plg, - const Models::BaseModel::Ptr base_model) +#if (defined(USE_OLD_E_PLUGIN_API)) +Engines::Engine::Engine(InferenceEngine::InferencePlugin plg, const Models::BaseModel::Ptr base_model) { - request_ = (plg.LoadNetwork(base_model->getNetReader()->getNetwork(), {})) - .CreateInferRequestPtr(); + request_ = (plg.LoadNetwork(base_model->getNetReader()->getNetwork(), {})).CreateInferRequestPtr(); } #endif -Engines::Engine::Engine( - InferenceEngine::InferRequest::Ptr & request) +Engines::Engine::Engine(InferenceEngine::InferRequest::Ptr& request) { request_ = request; } diff --git a/dynamic_vino_lib/src/engines/engine_manager.cpp b/dynamic_vino_lib/src/engines/engine_manager.cpp index 27b6ce9c..591b12e2 100644 --- a/dynamic_vino_lib/src/engines/engine_manager.cpp +++ b/dynamic_vino_lib/src/engines/engine_manager.cpp @@ -23,14 +23,14 @@ #include "dynamic_vino_lib/utils/version_info.hpp" #include #include -#if(defined(USE_OLD_E_PLUGIN_API)) +#if (defined(USE_OLD_E_PLUGIN_API)) #include #endif -std::shared_ptr Engines::EngineManager::createEngine( - const std::string & device, const std::shared_ptr & model) +std::shared_ptr Engines::EngineManager::createEngine(const std::string& device, + const std::shared_ptr& model) { -#if(defined(USE_OLD_E_PLUGIN_API)) +#if (defined(USE_OLD_E_PLUGIN_API)) return createEngine_beforeV2019R2(device, model); #else return createEngine_V2019R2_plus(device, model); @@ -38,7 +38,7 @@ std::shared_ptr Engines::EngineManager::createEngine( } std::shared_ptr Engines::EngineManager::createEngine_V2019R2_plus( - const std::string & device, const std::shared_ptr & model) + const std::string& device, const std::shared_ptr& model) { InferenceEngine::Core core; auto executable_network = core.LoadNetwork(model->getNetReader()->getNetwork(), device); @@ -47,64 +47,64 @@ std::shared_ptr Engines::EngineManager::createEngine_V2019R2_pl return std::make_shared(request); } -#if(defined(USE_OLD_E_PLUGIN_API)) +#if (defined(USE_OLD_E_PLUGIN_API)) std::shared_ptr Engines::EngineManager::createEngine_beforeV2019R2( - const std::string & device, const std::shared_ptr & model) + const std::string& device, const std::shared_ptr& model) { - if(plugins_for_devices_.find(device) == plugins_for_devices_.end()) { - auto pcommon = Params::ParamManager::getInstance().getCommon(); - plugins_for_devices_[device] = *makePluginByName(device, pcommon.custom_cpu_library, - pcommon.custom_cldnn_library, pcommon.enable_performance_count); - slog::info << "Created plugin for " << device << slog::endl; + if (plugins_for_devices_.find(device) == plugins_for_devices_.end()) + { + auto pcommon = Params::ParamManager::getInstance().getCommon(); + plugins_for_devices_[device] = *makePluginByName(device, pcommon.custom_cpu_library, pcommon.custom_cldnn_library, + pcommon.enable_performance_count); + slog::info << "Created plugin for " << device << slog::endl; } - auto executeable_network = - plugins_for_devices_[device].LoadNetwork(model->getNetReader()->getNetwork(), {}); + auto executeable_network = plugins_for_devices_[device].LoadNetwork(model->getNetReader()->getNetwork(), {}); auto request = executeable_network.CreateInferRequestPtr(); return std::make_shared(request); } std::unique_ptr -Engines::EngineManager::makePluginByName( - const std::string & device_name, const std::string & custom_cpu_library_message, - const std::string & custom_cldnn_message, bool performance_message) +Engines::EngineManager::makePluginByName(const std::string& device_name, const std::string& custom_cpu_library_message, + const std::string& custom_cldnn_message, bool performance_message) { slog::info << "Creating plugin for " << device_name << slog::endl; InferenceEngine::InferencePlugin plugin = - InferenceEngine::PluginDispatcher({"../../../lib/intel64", ""}) - .getPluginByDevice(device_name); + InferenceEngine::PluginDispatcher({ "../../../lib/intel64", "" }).getPluginByDevice(device_name); /** Printing plugin version **/ printPluginVersion(plugin, std::cout); /** Load extensions for the CPU plugin **/ - if ((device_name.find("CPU") != std::string::npos)) { + if ((device_name.find("CPU") != std::string::npos)) + { plugin.AddExtension(std::make_shared()); - if (!custom_cpu_library_message.empty()) { - slog::info << "custom cpu library is not empty, tyring to use this extension:" - << custom_cpu_library_message << slog::endl; + if (!custom_cpu_library_message.empty()) + { + slog::info << "custom cpu library is not empty, tyring to use this extension:" << custom_cpu_library_message + << slog::endl; // CPU(MKLDNN) extensions are loaded as a shared library and passed as a // pointer to base // extension - auto extension_ptr = - InferenceEngine::make_so_pointer(custom_cpu_library_message); + auto extension_ptr = InferenceEngine::make_so_pointer(custom_cpu_library_message); plugin.AddExtension(extension_ptr); } - } else if (!custom_cldnn_message.empty()) { - slog::info << "custom cldnn library is not empty, tyring to use this extension:" - << custom_cldnn_message << slog::endl; + } + else if (!custom_cldnn_message.empty()) + { + slog::info << "custom cldnn library is not empty, tyring to use this extension:" << custom_cldnn_message + << slog::endl; // Load Extensions for other plugins not CPU - plugin.SetConfig( - {{InferenceEngine::PluginConfigParams::KEY_CONFIG_FILE, custom_cldnn_message}}); + plugin.SetConfig({ { InferenceEngine::PluginConfigParams::KEY_CONFIG_FILE, custom_cldnn_message } }); } - if (performance_message) { - plugin.SetConfig({{InferenceEngine::PluginConfigParams::KEY_PERF_COUNT, - InferenceEngine::PluginConfigParams::YES}}); + if (performance_message) + { + plugin.SetConfig( + { { InferenceEngine::PluginConfigParams::KEY_PERF_COUNT, InferenceEngine::PluginConfigParams::YES } }); } - return std::make_unique( - InferenceEngine::InferenceEnginePluginPtr(plugin)); + return std::make_unique(InferenceEngine::InferenceEnginePluginPtr(plugin)); } #endif diff --git a/dynamic_vino_lib/src/inferences/age_gender_detection.cpp b/dynamic_vino_lib/src/inferences/age_gender_detection.cpp index b8393b91..ea27b369 100644 --- a/dynamic_vino_lib/src/inferences/age_gender_detection.cpp +++ b/dynamic_vino_lib/src/inferences/age_gender_detection.cpp @@ -27,37 +27,33 @@ #include "dynamic_vino_lib/outputs/base_output.h" // AgeGenderResult -dynamic_vino_lib::AgeGenderResult::AgeGenderResult(const cv::Rect& location) - : Result(location) +dynamic_vino_lib::AgeGenderResult::AgeGenderResult(const cv::Rect& location) : Result(location) { } // AgeGender Detection -dynamic_vino_lib::AgeGenderDetection::AgeGenderDetection() - : dynamic_vino_lib::BaseInference() +dynamic_vino_lib::AgeGenderDetection::AgeGenderDetection() : dynamic_vino_lib::BaseInference() { } dynamic_vino_lib::AgeGenderDetection::~AgeGenderDetection() = default; -void dynamic_vino_lib::AgeGenderDetection::loadNetwork( - std::shared_ptr network) +void dynamic_vino_lib::AgeGenderDetection::loadNetwork(std::shared_ptr network) { valid_model_ = network; setMaxBatchSize(network->getMaxBatchSize()); } -bool dynamic_vino_lib::AgeGenderDetection::enqueue( - const cv::Mat& frame, const cv::Rect& input_frame_loc) +bool dynamic_vino_lib::AgeGenderDetection::enqueue(const cv::Mat& frame, const cv::Rect& input_frame_loc) { if (getEnqueuedNum() == 0) { results_.clear(); } - bool succeed = dynamic_vino_lib::BaseInference::enqueue( - frame, input_frame_loc, 1, getResultsLength(), - valid_model_->getInputName()); - if (!succeed) return false; + bool succeed = dynamic_vino_lib::BaseInference::enqueue(frame, input_frame_loc, 1, getResultsLength(), + valid_model_->getInputName()); + if (!succeed) + return false; Result r(input_frame_loc); results_.emplace_back(r); return true; @@ -71,12 +67,11 @@ bool dynamic_vino_lib::AgeGenderDetection::submitRequest() bool dynamic_vino_lib::AgeGenderDetection::fetchResults() { bool can_fetch = dynamic_vino_lib::BaseInference::fetchResults(); - if (!can_fetch) return false; + if (!can_fetch) + return false; auto request = getEngine()->getRequest(); - InferenceEngine::Blob::Ptr genderBlob = - request->GetBlob(valid_model_->getOutputGenderName()); - InferenceEngine::Blob::Ptr ageBlob = - request->GetBlob(valid_model_->getOutputAgeName()); + InferenceEngine::Blob::Ptr genderBlob = request->GetBlob(valid_model_->getOutputGenderName()); + InferenceEngine::Blob::Ptr ageBlob = request->GetBlob(valid_model_->getOutputAgeName()); for (size_t i = 0; i < results_.size(); ++i) { @@ -91,8 +86,7 @@ int dynamic_vino_lib::AgeGenderDetection::getResultsLength() const return static_cast(results_.size()); } -const dynamic_vino_lib::Result* -dynamic_vino_lib::AgeGenderDetection::getLocationResult(int idx) const +const dynamic_vino_lib::Result* dynamic_vino_lib::AgeGenderDetection::getLocationResult(int idx) const { return &(results_[idx]); } @@ -102,8 +96,7 @@ const std::string dynamic_vino_lib::AgeGenderDetection::getName() const return valid_model_->getModelCategory(); } -void dynamic_vino_lib::AgeGenderDetection::observeOutput( - const std::shared_ptr& output) +void dynamic_vino_lib::AgeGenderDetection::observeOutput(const std::shared_ptr& output) { if (output != nullptr) { @@ -111,15 +104,17 @@ void dynamic_vino_lib::AgeGenderDetection::observeOutput( } } -const std::vector dynamic_vino_lib::AgeGenderDetection::getFilteredROIs( - const std::string filter_conditions) const +const std::vector +dynamic_vino_lib::AgeGenderDetection::getFilteredROIs(const std::string filter_conditions) const { - if (!filter_conditions.empty()) { - slog::err << "Age gender detection does not support filtering now! " << - "Filter conditions: " << filter_conditions << slog::endl; + if (!filter_conditions.empty()) + { + slog::err << "Age gender detection does not support filtering now! " + << "Filter conditions: " << filter_conditions << slog::endl; } std::vector filtered_rois; - for (auto res : results_) { + for (auto res : results_) + { filtered_rois.push_back(res.getLocation()); } return filtered_rois; diff --git a/dynamic_vino_lib/src/inferences/base_filter.cpp b/dynamic_vino_lib/src/inferences/base_filter.cpp index cf308114..0e9a22b8 100644 --- a/dynamic_vino_lib/src/inferences/base_filter.cpp +++ b/dynamic_vino_lib/src/inferences/base_filter.cpp @@ -22,48 +22,47 @@ #include #include -dynamic_vino_lib::BaseFilter::BaseFilter() {} +dynamic_vino_lib::BaseFilter::BaseFilter() +{ +} -bool dynamic_vino_lib::BaseFilter::isValidFilterConditions( - const std::string & filter_conditions) +bool dynamic_vino_lib::BaseFilter::isValidFilterConditions(const std::string& filter_conditions) { return strip(filter_conditions) != ""; } -void dynamic_vino_lib::BaseFilter::acceptFilterConditions( - const std::string & filter_conditions) +void dynamic_vino_lib::BaseFilter::acceptFilterConditions(const std::string& filter_conditions) { - if (striped_conditions_.empty()) { + if (striped_conditions_.empty()) + { striped_conditions_ = strip(filter_conditions); std::vector infix_conditions = split(striped_conditions_); infixToSuffix(infix_conditions); } } -bool dynamic_vino_lib::BaseFilter::isRelationOperator(const std::string & str) +bool dynamic_vino_lib::BaseFilter::isRelationOperator(const std::string& str) { - if (std::find(relation_operators_.begin(), relation_operators_.end(), str) != - relation_operators_.end()) + if (std::find(relation_operators_.begin(), relation_operators_.end(), str) != relation_operators_.end()) { return true; } return false; } -bool dynamic_vino_lib::BaseFilter::isLogicOperator(const std::string & str) +bool dynamic_vino_lib::BaseFilter::isLogicOperator(const std::string& str) { - if (std::find(logic_operators_.begin(), logic_operators_.end(), str) != - logic_operators_.end()) + if (std::find(logic_operators_.begin(), logic_operators_.end(), str) != logic_operators_.end()) { return true; } return false; } -bool dynamic_vino_lib::BaseFilter::isPriorTo( - const std::string & operator1, const std::string & operator2) +bool dynamic_vino_lib::BaseFilter::isPriorTo(const std::string& operator1, const std::string& operator2) { - if (isRelationOperator(operator1) && isLogicOperator(operator2)) { + if (isRelationOperator(operator1) && isLogicOperator(operator2)) + { return true; } return false; @@ -71,82 +70,111 @@ bool dynamic_vino_lib::BaseFilter::isPriorTo( std::string dynamic_vino_lib::BaseFilter::boolToStr(bool value) { - if (value) {return "true";} + if (value) + { + return "true"; + } return "false"; } -bool dynamic_vino_lib::BaseFilter::strToBool(const std::string & value) +bool dynamic_vino_lib::BaseFilter::strToBool(const std::string& value) { - if (!value.compare("true")) {return true;} else if (!value.compare("false")) { + if (!value.compare("true")) + { + return true; + } + else if (!value.compare("false")) + { return false; - } else { + } + else + { slog::err << "Invalid string: " << value << " for bool conversion!" << slog::endl; } return false; } -const std::vector & -dynamic_vino_lib::BaseFilter::getSuffixConditions() const +const std::vector& dynamic_vino_lib::BaseFilter::getSuffixConditions() const { return suffix_conditons_; } -bool dynamic_vino_lib::BaseFilter::logicOperation( - const std::string & logic1, const std::string & op, const std::string & logic2) +bool dynamic_vino_lib::BaseFilter::logicOperation(const std::string& logic1, const std::string& op, + const std::string& logic2) { - if (!op.compare("&&")) { + if (!op.compare("&&")) + { return strToBool(logic1) && strToBool(logic2); - } else if (!op.compare("||")) { + } + else if (!op.compare("||")) + { return strToBool(logic1) || strToBool(logic2); - } else { + } + else + { slog::err << "Invalid operator: " << op << " for logic operation!" << slog::endl; return false; } } -bool dynamic_vino_lib::BaseFilter::stringCompare( - const std::string & candidate, const std::string & op, const std::string & target) +bool dynamic_vino_lib::BaseFilter::stringCompare(const std::string& candidate, const std::string& op, + const std::string& target) { - if (!op.compare("==")) { + if (!op.compare("==")) + { return !target.compare(candidate); - } else if (!op.compare("!=")) { + } + else if (!op.compare("!=")) + { return target.compare(candidate); - } else { + } + else + { slog::err << "Invalid operator " << op << " for label comparsion" << slog::endl; return false; } } -bool dynamic_vino_lib::BaseFilter::floatCompare( - float candidate, const std::string & op, float target) +bool dynamic_vino_lib::BaseFilter::floatCompare(float candidate, const std::string& op, float target) { - if (!op.compare("<=")) { + if (!op.compare("<=")) + { return candidate <= target; - } else if (!op.compare(">=")) { + } + else if (!op.compare(">=")) + { return candidate >= target; - } else if (!op.compare("<")) { + } + else if (!op.compare("<")) + { return candidate < target; - } else if (!op.compare(">")) { + } + else if (!op.compare(">")) + { return candidate > target; - } else { + } + else + { slog::err << "Invalid operator " << op << " for confidence comparsion" << slog::endl; return false; } } -float dynamic_vino_lib::BaseFilter::stringToFloat(const std::string & candidate) +float dynamic_vino_lib::BaseFilter::stringToFloat(const std::string& candidate) { float result = 0; - try { + try + { result = std::stof(candidate); - } catch (...) { + } + catch (...) + { slog::err << "Failed when converting string " << candidate << " to float" << slog::endl; } return result; } -std::vector dynamic_vino_lib::BaseFilter::split( - const std::string & filter_conditions) +std::vector dynamic_vino_lib::BaseFilter::split(const std::string& filter_conditions) { std::vector seperators; seperators.insert(seperators.end(), relation_operators_.begin(), relation_operators_.end()); @@ -155,11 +183,15 @@ std::vector dynamic_vino_lib::BaseFilter::split( seperators.push_back(")"); std::vector infix_conditions; int last_pos = 0, pos = 0; - for (pos = 0; pos < filter_conditions.length(); pos++) { - for (auto sep : seperators) { - if (!sep.compare(filter_conditions.substr(pos, sep.length()))) { + for (pos = 0; pos < filter_conditions.length(); pos++) + { + for (auto sep : seperators) + { + if (!sep.compare(filter_conditions.substr(pos, sep.length()))) + { std::string elem = filter_conditions.substr(last_pos, pos - last_pos); - if (!elem.empty()) { + if (!elem.empty()) + { infix_conditions.push_back(elem); } elem = filter_conditions.substr(pos, sep.length()); @@ -170,49 +202,63 @@ std::vector dynamic_vino_lib::BaseFilter::split( } } } - if (last_pos != pos) { + if (last_pos != pos) + { infix_conditions.push_back(filter_conditions.substr(last_pos, pos - last_pos)); } return infix_conditions; } -void dynamic_vino_lib::BaseFilter::infixToSuffix( - std::vector & infix_conditions) +void dynamic_vino_lib::BaseFilter::infixToSuffix(std::vector& infix_conditions) { std::stack operator_stack; - for (auto elem : infix_conditions) { - if (!elem.compare("(")) { + for (auto elem : infix_conditions) + { + if (!elem.compare("(")) + { operator_stack.push(elem); - } else if (!elem.compare(")")) { - while (!operator_stack.empty() && operator_stack.top().compare("(")) { + } + else if (!elem.compare(")")) + { + while (!operator_stack.empty() && operator_stack.top().compare("(")) + { suffix_conditons_.push_back(operator_stack.top()); operator_stack.pop(); } - if (operator_stack.empty()) { + if (operator_stack.empty()) + { slog::err << "Brackets mismatch in filter_conditions!" << slog::endl; } operator_stack.pop(); - } else if (isRelationOperator(elem) || isLogicOperator(elem)) { - while (!operator_stack.empty() && isPriorTo(operator_stack.top(), elem)) { + } + else if (isRelationOperator(elem) || isLogicOperator(elem)) + { + while (!operator_stack.empty() && isPriorTo(operator_stack.top(), elem)) + { suffix_conditons_.push_back(operator_stack.top()); operator_stack.pop(); } operator_stack.push(elem); - } else { + } + else + { suffix_conditons_.push_back(elem); } } - while (!operator_stack.empty()) { + while (!operator_stack.empty()) + { suffix_conditons_.push_back(operator_stack.top()); operator_stack.pop(); } } -std::string dynamic_vino_lib::BaseFilter::strip(const std::string & str) +std::string dynamic_vino_lib::BaseFilter::strip(const std::string& str) { std::string stripped_string = ""; - for (auto character : str) { - if (character != ' ') { + for (auto character : str) + { + if (character != ' ') + { stripped_string += character; } } diff --git a/dynamic_vino_lib/src/inferences/base_inference.cpp b/dynamic_vino_lib/src/inferences/base_inference.cpp index 33e4ac01..10058cd9 100644 --- a/dynamic_vino_lib/src/inferences/base_inference.cpp +++ b/dynamic_vino_lib/src/inferences/base_inference.cpp @@ -24,7 +24,7 @@ #include "dynamic_vino_lib/inferences/base_inference.h" // Result -dynamic_vino_lib::Result::Result(const cv::Rect & location) +dynamic_vino_lib::Result::Result(const cv::Rect& location) { location_ = location; } @@ -41,10 +41,12 @@ void dynamic_vino_lib::BaseInference::loadEngine(const std::shared_ptrgetRequest() == nullptr) { + if (engine_->getRequest() == nullptr) + { return false; } - if (!enqueued_frames_) { + if (!enqueued_frames_) + { return false; } enqueued_frames_ = 0; @@ -55,10 +57,12 @@ bool dynamic_vino_lib::BaseInference::submitRequest() bool dynamic_vino_lib::BaseInference::SynchronousRequest() { - if (engine_->getRequest() == nullptr) { + if (engine_->getRequest() == nullptr) + { return false; } - if (!enqueued_frames_) { + if (!enqueued_frames_) + { return false; } enqueued_frames_ = 0; @@ -69,7 +73,8 @@ bool dynamic_vino_lib::BaseInference::SynchronousRequest() bool dynamic_vino_lib::BaseInference::fetchResults() { - if (results_fetched_) { + if (results_fetched_) + { return false; } results_fetched_ = true; @@ -79,7 +84,8 @@ bool dynamic_vino_lib::BaseInference::fetchResults() void dynamic_vino_lib::BaseInference::addCandidatedModel(std::shared_ptr model) { slog::info << "TESTING in addCandidatedModel()" << slog::endl; - if (model != nullptr) { + if (model != nullptr) + { slog::info << "adding new Model Candidate..." << slog::endl; candidated_models_.push_back(model); } diff --git a/dynamic_vino_lib/src/inferences/base_reidentification.cpp b/dynamic_vino_lib/src/inferences/base_reidentification.cpp index 36ded051..1e2c0a8d 100644 --- a/dynamic_vino_lib/src/inferences/base_reidentification.cpp +++ b/dynamic_vino_lib/src/inferences/base_reidentification.cpp @@ -27,32 +27,35 @@ #include "dynamic_vino_lib/slog.h" // Tracker -dynamic_vino_lib::Tracker::Tracker( - int max_record_size, double same_track_thresh, double new_track_thresh) -: max_record_size_(max_record_size), - same_track_thresh_(same_track_thresh), - new_track_thresh_(new_track_thresh) {} +dynamic_vino_lib::Tracker::Tracker(int max_record_size, double same_track_thresh, double new_track_thresh) + : max_record_size_(max_record_size), same_track_thresh_(same_track_thresh), new_track_thresh_(new_track_thresh) +{ +} -int dynamic_vino_lib::Tracker::processNewTrack(const std::vector & feature) +int dynamic_vino_lib::Tracker::processNewTrack(const std::vector& feature) { int most_similar_id; double similarity = findMostSimilarTrack(feature, most_similar_id); - if (similarity > same_track_thresh_) { + if (similarity > same_track_thresh_) + { updateMatchTrack(most_similar_id, feature); - } else if (similarity < new_track_thresh_) { + } + else if (similarity < new_track_thresh_) + { most_similar_id = addNewTrack(feature); } return most_similar_id; } -double dynamic_vino_lib::Tracker::findMostSimilarTrack( - const std::vector & feature, int & most_similar_id) +double dynamic_vino_lib::Tracker::findMostSimilarTrack(const std::vector& feature, int& most_similar_id) { double max_similarity = 0; most_similar_id = -1; - for (auto iter = recorded_tracks_.begin(); iter != recorded_tracks_.end(); iter++) { + for (auto iter = recorded_tracks_.begin(); iter != recorded_tracks_.end(); iter++) + { double sim = calcSimilarity(feature, iter->second.feature); - if (sim > max_similarity) { + if (sim > max_similarity) + { max_similarity = sim; most_similar_id = iter->first; } @@ -60,37 +63,43 @@ double dynamic_vino_lib::Tracker::findMostSimilarTrack( return max_similarity; } -double dynamic_vino_lib::Tracker::calcSimilarity( - const std::vector & feature_a, const std::vector & feature_b) +double dynamic_vino_lib::Tracker::calcSimilarity(const std::vector& feature_a, + const std::vector& feature_b) { - if (feature_a.size() != feature_b.size()) { - slog::err << "cosine similarity can't be called for vectors of different lengths: " << - "feature_a size = " << std::to_string(feature_a.size()) << - "feature_b size = " << std::to_string(feature_b.size()) << slog::endl; + if (feature_a.size() != feature_b.size()) + { + slog::err << "cosine similarity can't be called for vectors of different lengths: " + << "feature_a size = " << std::to_string(feature_a.size()) + << "feature_b size = " << std::to_string(feature_b.size()) << slog::endl; } float mul_sum, denom_a, denom_b, value_a, value_b; mul_sum = denom_a = denom_b = value_a = value_b = 0; - for (auto i = 0; i < feature_a.size(); i++) { + for (auto i = 0; i < feature_a.size(); i++) + { value_a = feature_a[i]; value_b = feature_b[i]; mul_sum += value_a * value_b; denom_a += value_a * value_a; denom_b += value_b * value_b; } - if (denom_a == 0 || denom_b == 0) { + if (denom_a == 0 || denom_b == 0) + { slog::err << "cosine similarity is not defined whenever one or both " - "input vectors are zero-vectors." << slog::endl; + "input vectors are zero-vectors." + << slog::endl; } return mul_sum / (sqrt(denom_a) * sqrt(denom_b)); } -void dynamic_vino_lib::Tracker::updateMatchTrack( - int track_id, const std::vector & feature) +void dynamic_vino_lib::Tracker::updateMatchTrack(int track_id, const std::vector& feature) { - if (recorded_tracks_.find(track_id) != recorded_tracks_.end()) { + if (recorded_tracks_.find(track_id) != recorded_tracks_.end()) + { recorded_tracks_[track_id].feature.assign(feature.begin(), feature.end()); recorded_tracks_[track_id].lastest_update_time = getCurrentTime(); - } else { + } + else + { slog::err << "updating a non-existing track." << slog::endl; } } @@ -100,8 +109,10 @@ void dynamic_vino_lib::Tracker::removeEarlestTrack() std::lock_guard lk(tracks_mtx_); int64_t earlest_time = LONG_MAX; auto remove_iter = recorded_tracks_.begin(); - for (auto iter = recorded_tracks_.begin(); iter != recorded_tracks_.end(); iter++) { - if (iter->second.lastest_update_time < earlest_time) { + for (auto iter = recorded_tracks_.begin(); iter != recorded_tracks_.end(); iter++) + { + if (iter->second.lastest_update_time < earlest_time) + { earlest_time = iter->second.lastest_update_time; remove_iter = iter; } @@ -109,10 +120,10 @@ void dynamic_vino_lib::Tracker::removeEarlestTrack() recorded_tracks_.erase(remove_iter); } - -int dynamic_vino_lib::Tracker::addNewTrack(const std::vector & feature) +int dynamic_vino_lib::Tracker::addNewTrack(const std::vector& feature) { - if (recorded_tracks_.size() >= max_record_size_) { + if (recorded_tracks_.size() >= max_record_size_) + { std::thread remove_thread(std::bind(&Tracker::removeEarlestTrack, this)); remove_thread.detach(); } @@ -127,22 +138,23 @@ int dynamic_vino_lib::Tracker::addNewTrack(const std::vector & feature) int64_t dynamic_vino_lib::Tracker::getCurrentTime() { - auto tp = std::chrono::time_point_cast( - std::chrono::system_clock::now()); + auto tp = std::chrono::time_point_cast(std::chrono::system_clock::now()); return static_cast(tp.time_since_epoch().count()); } bool dynamic_vino_lib::Tracker::saveTracksToFile(std::string filepath) { std::ofstream outfile(filepath); - if (!outfile.is_open()) { + if (!outfile.is_open()) + { slog::err << "file not exists in file path: " << filepath << slog::endl; return false; } - for (auto record : recorded_tracks_) { - outfile << record.first << " " << - record.second.lastest_update_time << " "; - for (auto elem : record.second.feature) { + for (auto record : recorded_tracks_) + { + outfile << record.first << " " << record.second.lastest_update_time << " "; + for (auto elem : record.second.feature) + { outfile << elem << " "; } outfile << "\n"; @@ -155,17 +167,20 @@ bool dynamic_vino_lib::Tracker::saveTracksToFile(std::string filepath) bool dynamic_vino_lib::Tracker::loadTracksFromFile(std::string filepath) { std::ifstream infile(filepath); - if (!infile.is_open()) { + if (!infile.is_open()) + { slog::err << "file not exists in file path: " << filepath << slog::endl; return false; } recorded_tracks_.clear(); - while (!infile.eof()) { + while (!infile.eof()) + { int track_id; int64_t lastest_update_time; std::vector feature; infile >> track_id >> lastest_update_time; - for (int num = 0; num < 256; num++) { + for (int num = 0; num < 256; num++) + { float elem; infile >> elem; feature.push_back(elem); diff --git a/dynamic_vino_lib/src/inferences/emotions_detection.cpp b/dynamic_vino_lib/src/inferences/emotions_detection.cpp index 3a870cdb..54ef3d43 100644 --- a/dynamic_vino_lib/src/inferences/emotions_detection.cpp +++ b/dynamic_vino_lib/src/inferences/emotions_detection.cpp @@ -22,43 +22,38 @@ #include #include - #include +#include #include "dynamic_vino_lib/inferences/emotions_detection.h" #include "dynamic_vino_lib/outputs/base_output.h" #include "dynamic_vino_lib/slog.h" // EmotionsResult -dynamic_vino_lib::EmotionsResult::EmotionsResult(const cv::Rect& location) - : Result(location) +dynamic_vino_lib::EmotionsResult::EmotionsResult(const cv::Rect& location) : Result(location) { } // Emotions Detection -dynamic_vino_lib::EmotionsDetection::EmotionsDetection() - : dynamic_vino_lib::BaseInference() +dynamic_vino_lib::EmotionsDetection::EmotionsDetection() : dynamic_vino_lib::BaseInference() { } dynamic_vino_lib::EmotionsDetection::~EmotionsDetection() = default; -void dynamic_vino_lib::EmotionsDetection::loadNetwork( - const std::shared_ptr network) +void dynamic_vino_lib::EmotionsDetection::loadNetwork(const std::shared_ptr network) { valid_model_ = network; setMaxBatchSize(network->getMaxBatchSize()); } -bool dynamic_vino_lib::EmotionsDetection::enqueue( - const cv::Mat& frame, const cv::Rect& input_frame_loc) +bool dynamic_vino_lib::EmotionsDetection::enqueue(const cv::Mat& frame, const cv::Rect& input_frame_loc) { if (getEnqueuedNum() == 0) { results_.clear(); } - bool succeed = dynamic_vino_lib::BaseInference::enqueue( - frame, input_frame_loc, 1, getResultsLength(), - valid_model_->getInputName()); + bool succeed = dynamic_vino_lib::BaseInference::enqueue(frame, input_frame_loc, 1, getResultsLength(), + valid_model_->getInputName()); if (!succeed) { slog::err << "Failed enqueue Emotion frame." << slog::endl; @@ -78,21 +73,19 @@ bool dynamic_vino_lib::EmotionsDetection::submitRequest() bool dynamic_vino_lib::EmotionsDetection::fetchResults() { bool can_fetch = dynamic_vino_lib::BaseInference::fetchResults(); - if (!can_fetch) return false; + if (!can_fetch) + return false; int label_length = static_cast(valid_model_->getLabels().size()); std::string output_name = valid_model_->getOutputName(); - InferenceEngine::Blob::Ptr emotions_blob = - getEngine()->getRequest()->GetBlob(output_name); + InferenceEngine::Blob::Ptr emotions_blob = getEngine()->getRequest()->GetBlob(output_name); /** emotions vector must have the same size as number of channels in model output. Default output format is NCHW so we check index 1 */ int64 num_of_channels = emotions_blob->getTensorDesc().getDims().at(1); if (num_of_channels != label_length) { - slog::err << "Output size (" << num_of_channels - << ") of the Emotions Recognition network is not equal " - << "to used emotions vector size (" << label_length << ")" - << slog::endl; + slog::err << "Output size (" << num_of_channels << ") of the Emotions Recognition network is not equal " + << "to used emotions vector size (" << label_length << ")" << slog::endl; throw std::logic_error("Output size (" + std::to_string(num_of_channels) + ") of the Emotions Recognition network is not equal " "to used emotions vector size (" + @@ -105,9 +98,7 @@ bool dynamic_vino_lib::EmotionsDetection::fetchResults() for (unsigned int idx = 0; idx < results_.size(); ++idx) { auto output_idx_pos = emotions_values + idx; - int64 max_prob_emotion_idx = - std::max_element(output_idx_pos, output_idx_pos + label_length) - - output_idx_pos; + int64 max_prob_emotion_idx = std::max_element(output_idx_pos, output_idx_pos + label_length) - output_idx_pos; results_[idx].label_ = valid_model_->getLabels()[max_prob_emotion_idx]; } @@ -119,8 +110,7 @@ int dynamic_vino_lib::EmotionsDetection::getResultsLength() const return static_cast(results_.size()); } -const dynamic_vino_lib::Result* -dynamic_vino_lib::EmotionsDetection::getLocationResult(int idx) const +const dynamic_vino_lib::Result* dynamic_vino_lib::EmotionsDetection::getLocationResult(int idx) const { return &(results_[idx]); } @@ -130,8 +120,7 @@ const std::string dynamic_vino_lib::EmotionsDetection::getName() const return valid_model_->getModelCategory(); } -void dynamic_vino_lib::EmotionsDetection::observeOutput( - const std::shared_ptr& output) +void dynamic_vino_lib::EmotionsDetection::observeOutput(const std::shared_ptr& output) { if (output != nullptr) { @@ -139,15 +128,17 @@ void dynamic_vino_lib::EmotionsDetection::observeOutput( } } -const std::vector dynamic_vino_lib::EmotionsDetection::getFilteredROIs( - const std::string filter_conditions) const +const std::vector +dynamic_vino_lib::EmotionsDetection::getFilteredROIs(const std::string filter_conditions) const { - if (!filter_conditions.empty()) { - slog::err << "Emotion detection does not support filtering now! " << - "Filter conditions: " << filter_conditions << slog::endl; + if (!filter_conditions.empty()) + { + slog::err << "Emotion detection does not support filtering now! " + << "Filter conditions: " << filter_conditions << slog::endl; } std::vector filtered_rois; - for (auto res : results_) { + for (auto res : results_) + { filtered_rois.push_back(res.getLocation()); } return filtered_rois; diff --git a/dynamic_vino_lib/src/inferences/face_detection.cpp b/dynamic_vino_lib/src/inferences/face_detection.cpp index 60cad5a6..7fdb62a7 100644 --- a/dynamic_vino_lib/src/inferences/face_detection.cpp +++ b/dynamic_vino_lib/src/inferences/face_detection.cpp @@ -29,16 +29,12 @@ #include "dynamic_vino_lib/slog.h" // FaceDetectionResult -dynamic_vino_lib::FaceDetectionResult::FaceDetectionResult(const cv::Rect & location) -: ObjectDetectionResult(location) +dynamic_vino_lib::FaceDetectionResult::FaceDetectionResult(const cv::Rect& location) : ObjectDetectionResult(location) { } // FaceDetection -dynamic_vino_lib::FaceDetection::FaceDetection( - bool enable_roi_constraint, - double show_output_thresh) -: ObjectDetection(enable_roi_constraint, show_output_thresh) +dynamic_vino_lib::FaceDetection::FaceDetection(bool enable_roi_constraint, double show_output_thresh) + : ObjectDetection(enable_roi_constraint, show_output_thresh) { } - diff --git a/dynamic_vino_lib/src/inferences/face_reidentification.cpp b/dynamic_vino_lib/src/inferences/face_reidentification.cpp index b3e18f88..c8f3d83b 100644 --- a/dynamic_vino_lib/src/inferences/face_reidentification.cpp +++ b/dynamic_vino_lib/src/inferences/face_reidentification.cpp @@ -25,33 +25,31 @@ #include "dynamic_vino_lib/slog.h" // FaceReidentificationResult -dynamic_vino_lib::FaceReidentificationResult::FaceReidentificationResult( - const cv::Rect & location) -: Result(location) {} +dynamic_vino_lib::FaceReidentificationResult::FaceReidentificationResult(const cv::Rect& location) : Result(location) +{ +} // FaceReidentification -dynamic_vino_lib::FaceReidentification::FaceReidentification(double match_thresh) -: dynamic_vino_lib::BaseInference() +dynamic_vino_lib::FaceReidentification::FaceReidentification(double match_thresh) : dynamic_vino_lib::BaseInference() { face_tracker_ = std::make_shared(1000, match_thresh, 0.3); } dynamic_vino_lib::FaceReidentification::~FaceReidentification() = default; void dynamic_vino_lib::FaceReidentification::loadNetwork( - const std::shared_ptr network) + const std::shared_ptr network) { valid_model_ = network; setMaxBatchSize(network->getMaxBatchSize()); } -bool dynamic_vino_lib::FaceReidentification::enqueue( - const cv::Mat & frame, const cv::Rect & input_frame_loc) +bool dynamic_vino_lib::FaceReidentification::enqueue(const cv::Mat& frame, const cv::Rect& input_frame_loc) { - if (getEnqueuedNum() == 0) { + if (getEnqueuedNum() == 0) + { results_.clear(); } - if (!dynamic_vino_lib::BaseInference::enqueue( - frame, input_frame_loc, 1, 0, valid_model_->getInputName())) + if (!dynamic_vino_lib::BaseInference::enqueue(frame, input_frame_loc, 1, 0, valid_model_->getInputName())) { return false; } @@ -68,20 +66,27 @@ bool dynamic_vino_lib::FaceReidentification::submitRequest() bool dynamic_vino_lib::FaceReidentification::fetchResults() { bool can_fetch = dynamic_vino_lib::BaseInference::fetchResults(); - if (!can_fetch) {return false;} + if (!can_fetch) + { + return false; + } bool found_result = false; InferenceEngine::InferRequest::Ptr request = getEngine()->getRequest(); std::string output = valid_model_->getOutputName(); - const float * output_values = request->GetBlob(output)->buffer().as(); + const float* output_values = request->GetBlob(output)->buffer().as(); int result_length = request->GetBlob(output)->getTensorDesc().getDims()[1]; - for (int i = 0; i < getResultsLength(); i++) { - std::vector new_face = std::vector( - output_values + result_length * i, output_values + result_length * (i + 1)); + for (int i = 0; i < getResultsLength(); i++) + { + std::vector new_face = + std::vector(output_values + result_length * i, output_values + result_length * (i + 1)); std::string face_id = "No." + std::to_string(face_tracker_->processNewTrack(new_face)); results_[i].face_id_ = face_id; found_result = true; } - if (!found_result) {results_.clear();} + if (!found_result) + { + results_.clear(); + } return true; } @@ -90,8 +95,7 @@ int dynamic_vino_lib::FaceReidentification::getResultsLength() const return static_cast(results_.size()); } -const dynamic_vino_lib::Result * -dynamic_vino_lib::FaceReidentification::getLocationResult(int idx) const +const dynamic_vino_lib::Result* dynamic_vino_lib::FaceReidentification::getLocationResult(int idx) const { return &(results_[idx]); } @@ -101,23 +105,25 @@ const std::string dynamic_vino_lib::FaceReidentification::getName() const return valid_model_->getModelCategory(); } -void dynamic_vino_lib::FaceReidentification::observeOutput( - const std::shared_ptr & output) +void dynamic_vino_lib::FaceReidentification::observeOutput(const std::shared_ptr& output) { - if (output != nullptr) { + if (output != nullptr) + { output->accept(results_); } } -const std::vector dynamic_vino_lib::FaceReidentification::getFilteredROIs( - const std::string filter_conditions) const +const std::vector +dynamic_vino_lib::FaceReidentification::getFilteredROIs(const std::string filter_conditions) const { - if (!filter_conditions.empty()) { - slog::err << "Face reidentification does not support filtering now! " << - "Filter conditions: " << filter_conditions << slog::endl; + if (!filter_conditions.empty()) + { + slog::err << "Face reidentification does not support filtering now! " + << "Filter conditions: " << filter_conditions << slog::endl; } std::vector filtered_rois; - for (auto res : results_) { + for (auto res : results_) + { filtered_rois.push_back(res.getLocation()); } return filtered_rois; diff --git a/dynamic_vino_lib/src/inferences/head_pose_detection.cpp b/dynamic_vino_lib/src/inferences/head_pose_detection.cpp index ee647674..9b5e4d06 100644 --- a/dynamic_vino_lib/src/inferences/head_pose_detection.cpp +++ b/dynamic_vino_lib/src/inferences/head_pose_detection.cpp @@ -26,37 +26,33 @@ #include "dynamic_vino_lib/outputs/base_output.h" // HeadPoseResult -dynamic_vino_lib::HeadPoseResult::HeadPoseResult(const cv::Rect& location) - : Result(location) +dynamic_vino_lib::HeadPoseResult::HeadPoseResult(const cv::Rect& location) : Result(location) { } // Head Pose Detection -dynamic_vino_lib::HeadPoseDetection::HeadPoseDetection() - : dynamic_vino_lib::BaseInference() +dynamic_vino_lib::HeadPoseDetection::HeadPoseDetection() : dynamic_vino_lib::BaseInference() { } dynamic_vino_lib::HeadPoseDetection::~HeadPoseDetection() = default; -void dynamic_vino_lib::HeadPoseDetection::loadNetwork( - std::shared_ptr network) +void dynamic_vino_lib::HeadPoseDetection::loadNetwork(std::shared_ptr network) { valid_model_ = network; setMaxBatchSize(network->getMaxBatchSize()); } -bool dynamic_vino_lib::HeadPoseDetection::enqueue( - const cv::Mat& frame, const cv::Rect& input_frame_loc) +bool dynamic_vino_lib::HeadPoseDetection::enqueue(const cv::Mat& frame, const cv::Rect& input_frame_loc) { if (getEnqueuedNum() == 0) { results_.clear(); } - bool succeed = dynamic_vino_lib::BaseInference::enqueue( - frame, input_frame_loc, 1, getResultsLength(), - valid_model_->getInputName()); - if (!succeed) return false; + bool succeed = dynamic_vino_lib::BaseInference::enqueue(frame, input_frame_loc, 1, getResultsLength(), + valid_model_->getInputName()); + if (!succeed) + return false; Result r(input_frame_loc); results_.emplace_back(r); return true; @@ -70,14 +66,12 @@ bool dynamic_vino_lib::HeadPoseDetection::submitRequest() bool dynamic_vino_lib::HeadPoseDetection::fetchResults() { bool can_fetch = dynamic_vino_lib::BaseInference::fetchResults(); - if (!can_fetch) return false; + if (!can_fetch) + return false; auto request = getEngine()->getRequest(); - InferenceEngine::Blob::Ptr angle_r = - request->GetBlob(valid_model_->getOutputOutputAngleR()); - InferenceEngine::Blob::Ptr angle_p = - request->GetBlob(valid_model_->getOutputOutputAngleP()); - InferenceEngine::Blob::Ptr angle_y = - request->GetBlob(valid_model_->getOutputOutputAngleY()); + InferenceEngine::Blob::Ptr angle_r = request->GetBlob(valid_model_->getOutputOutputAngleR()); + InferenceEngine::Blob::Ptr angle_p = request->GetBlob(valid_model_->getOutputOutputAngleP()); + InferenceEngine::Blob::Ptr angle_y = request->GetBlob(valid_model_->getOutputOutputAngleY()); for (int i = 0; i < getResultsLength(); ++i) { @@ -93,8 +87,7 @@ int dynamic_vino_lib::HeadPoseDetection::getResultsLength() const return static_cast(results_.size()); } -const dynamic_vino_lib::Result* -dynamic_vino_lib::HeadPoseDetection::getLocationResult(int idx) const +const dynamic_vino_lib::Result* dynamic_vino_lib::HeadPoseDetection::getLocationResult(int idx) const { return &(results_[idx]); } @@ -104,8 +97,7 @@ const std::string dynamic_vino_lib::HeadPoseDetection::getName() const return valid_model_->getModelCategory(); } -void dynamic_vino_lib::HeadPoseDetection::observeOutput( - const std::shared_ptr& output) +void dynamic_vino_lib::HeadPoseDetection::observeOutput(const std::shared_ptr& output) { if (output != nullptr) { @@ -113,15 +105,17 @@ void dynamic_vino_lib::HeadPoseDetection::observeOutput( } } -const std::vector dynamic_vino_lib::HeadPoseDetection::getFilteredROIs( - const std::string filter_conditions) const +const std::vector +dynamic_vino_lib::HeadPoseDetection::getFilteredROIs(const std::string filter_conditions) const { - if (!filter_conditions.empty()) { - slog::err << "Headpose detection does not support filtering now! " << - "Filter conditions: " << filter_conditions << slog::endl; + if (!filter_conditions.empty()) + { + slog::err << "Headpose detection does not support filtering now! " + << "Filter conditions: " << filter_conditions << slog::endl; } std::vector filtered_rois; - for (auto res : results_) { + for (auto res : results_) + { filtered_rois.push_back(res.getLocation()); } return filtered_rois; diff --git a/dynamic_vino_lib/src/inferences/landmarks_detection.cpp b/dynamic_vino_lib/src/inferences/landmarks_detection.cpp index 4f86e78d..81e40ef7 100644 --- a/dynamic_vino_lib/src/inferences/landmarks_detection.cpp +++ b/dynamic_vino_lib/src/inferences/landmarks_detection.cpp @@ -24,30 +24,29 @@ #include "dynamic_vino_lib/outputs/base_output.h" // LandmarksDetectionResult -dynamic_vino_lib::LandmarksDetectionResult::LandmarksDetectionResult( - const cv::Rect & location) -: Result(location) {} +dynamic_vino_lib::LandmarksDetectionResult::LandmarksDetectionResult(const cv::Rect& location) : Result(location) +{ +} // LandmarksDetection -dynamic_vino_lib::LandmarksDetection::LandmarksDetection() -: dynamic_vino_lib::BaseInference() {} +dynamic_vino_lib::LandmarksDetection::LandmarksDetection() : dynamic_vino_lib::BaseInference() +{ +} dynamic_vino_lib::LandmarksDetection::~LandmarksDetection() = default; -void dynamic_vino_lib::LandmarksDetection::loadNetwork( - const std::shared_ptr network) +void dynamic_vino_lib::LandmarksDetection::loadNetwork(const std::shared_ptr network) { valid_model_ = network; setMaxBatchSize(network->getMaxBatchSize()); } -bool dynamic_vino_lib::LandmarksDetection::enqueue( - const cv::Mat & frame, const cv::Rect & input_frame_loc) +bool dynamic_vino_lib::LandmarksDetection::enqueue(const cv::Mat& frame, const cv::Rect& input_frame_loc) { - if (getEnqueuedNum() == 0) { + if (getEnqueuedNum() == 0) + { results_.clear(); } - if (!dynamic_vino_lib::BaseInference::enqueue( - frame, input_frame_loc, 1, 0, valid_model_->getInputName())) + if (!dynamic_vino_lib::BaseInference::enqueue(frame, input_frame_loc, 1, 0, valid_model_->getInputName())) { return false; } @@ -64,16 +63,21 @@ bool dynamic_vino_lib::LandmarksDetection::submitRequest() bool dynamic_vino_lib::LandmarksDetection::fetchResults() { bool can_fetch = dynamic_vino_lib::BaseInference::fetchResults(); - if (!can_fetch) {return false;} + if (!can_fetch) + { + return false; + } bool found_result = false; InferenceEngine::InferRequest::Ptr request = getEngine()->getRequest(); std::string output = valid_model_->getOutputName(); - const float * output_values = request->GetBlob(output)->buffer().as(); + const float* output_values = request->GetBlob(output)->buffer().as(); int result_length = request->GetBlob(output)->getTensorDesc().getDims()[1]; - for (int i = 0; i < getResultsLength(); i++) { - std::vector coordinates = std::vector( - output_values + result_length * i, output_values + result_length * (i + 1)); - for (int j = 0; j < result_length; j += 2) { + for (int i = 0; i < getResultsLength(); i++) + { + std::vector coordinates = + std::vector(output_values + result_length * i, output_values + result_length * (i + 1)); + for (int j = 0; j < result_length; j += 2) + { cv::Rect rect = results_[i].getLocation(); int col = static_cast(coordinates[j] * rect.width); int row = static_cast(coordinates[j + 1] * rect.height); @@ -82,7 +86,10 @@ bool dynamic_vino_lib::LandmarksDetection::fetchResults() } found_result = true; } - if (!found_result) {results_.clear();} + if (!found_result) + { + results_.clear(); + } return true; } @@ -91,8 +98,7 @@ int dynamic_vino_lib::LandmarksDetection::getResultsLength() const return static_cast(results_.size()); } -const dynamic_vino_lib::Result * -dynamic_vino_lib::LandmarksDetection::getLocationResult(int idx) const +const dynamic_vino_lib::Result* dynamic_vino_lib::LandmarksDetection::getLocationResult(int idx) const { return &(results_[idx]); } @@ -102,24 +108,25 @@ const std::string dynamic_vino_lib::LandmarksDetection::getName() const return valid_model_->getModelCategory(); } -void dynamic_vino_lib::LandmarksDetection::observeOutput( - const std::shared_ptr & output) +void dynamic_vino_lib::LandmarksDetection::observeOutput(const std::shared_ptr& output) { - if (output != nullptr) { + if (output != nullptr) + { output->accept(results_); } } - -const std::vector dynamic_vino_lib::LandmarksDetection::getFilteredROIs( - const std::string filter_conditions) const +const std::vector +dynamic_vino_lib::LandmarksDetection::getFilteredROIs(const std::string filter_conditions) const { - if (!filter_conditions.empty()) { - slog::err << "Landmarks detection does not support filtering now! " << - "Filter conditions: " << filter_conditions << slog::endl; + if (!filter_conditions.empty()) + { + slog::err << "Landmarks detection does not support filtering now! " + << "Filter conditions: " << filter_conditions << slog::endl; } std::vector filtered_rois; - for (auto res : results_) { + for (auto res : results_) + { filtered_rois.push_back(res.getLocation()); } return filtered_rois; diff --git a/dynamic_vino_lib/src/inferences/license_plate_detection.cpp b/dynamic_vino_lib/src/inferences/license_plate_detection.cpp index 4528a182..76e43e72 100644 --- a/dynamic_vino_lib/src/inferences/license_plate_detection.cpp +++ b/dynamic_vino_lib/src/inferences/license_plate_detection.cpp @@ -25,17 +25,18 @@ #include "dynamic_vino_lib/slog.h" // LicensePlateDetectionResult -dynamic_vino_lib::LicensePlateDetectionResult::LicensePlateDetectionResult( - const cv::Rect & location) -: Result(location) {} +dynamic_vino_lib::LicensePlateDetectionResult::LicensePlateDetectionResult(const cv::Rect& location) : Result(location) +{ +} // LicensePlateDetection -dynamic_vino_lib::LicensePlateDetection::LicensePlateDetection() -: dynamic_vino_lib::BaseInference() {} +dynamic_vino_lib::LicensePlateDetection::LicensePlateDetection() : dynamic_vino_lib::BaseInference() +{ +} dynamic_vino_lib::LicensePlateDetection::~LicensePlateDetection() = default; void dynamic_vino_lib::LicensePlateDetection::loadNetwork( - const std::shared_ptr network) + const std::shared_ptr network) { valid_model_ = network; setMaxBatchSize(network->getMaxBatchSize()); @@ -43,24 +44,22 @@ void dynamic_vino_lib::LicensePlateDetection::loadNetwork( void dynamic_vino_lib::LicensePlateDetection::fillSeqBlob() { - InferenceEngine::Blob::Ptr seq_blob = getEngine()->getRequest()->GetBlob( - valid_model_->getSeqInputName()); + InferenceEngine::Blob::Ptr seq_blob = getEngine()->getRequest()->GetBlob(valid_model_->getSeqInputName()); int max_sequence_size = seq_blob->getTensorDesc().getDims()[0]; // second input is sequence, which is some relic from the training // it should have the leading 0.0f and rest 1.0f - float * blob_data = seq_blob->buffer().as(); + float* blob_data = seq_blob->buffer().as(); blob_data[0] = 0.0f; std::fill(blob_data + 1, blob_data + max_sequence_size, 1.0f); } -bool dynamic_vino_lib::LicensePlateDetection::enqueue( - const cv::Mat & frame, const cv::Rect & input_frame_loc) +bool dynamic_vino_lib::LicensePlateDetection::enqueue(const cv::Mat& frame, const cv::Rect& input_frame_loc) { - if (getEnqueuedNum() == 0) { + if (getEnqueuedNum() == 0) + { results_.clear(); } - if (!dynamic_vino_lib::BaseInference::enqueue( - frame, input_frame_loc, 1, 0, valid_model_->getInputName())) + if (!dynamic_vino_lib::BaseInference::enqueue(frame, input_frame_loc, 1, 0, valid_model_->getInputName())) { return false; } @@ -78,16 +77,22 @@ bool dynamic_vino_lib::LicensePlateDetection::submitRequest() bool dynamic_vino_lib::LicensePlateDetection::fetchResults() { bool can_fetch = dynamic_vino_lib::BaseInference::fetchResults(); - if (!can_fetch) {return false;} + if (!can_fetch) + { + return false; + } bool found_result = false; InferenceEngine::InferRequest::Ptr request = getEngine()->getRequest(); std::string output = valid_model_->getOutputName(); - const float * output_values = request->GetBlob(output)->buffer().as(); - for (int i = 0; i < getResultsLength(); i++) { + const float* output_values = request->GetBlob(output)->buffer().as(); + for (int i = 0; i < getResultsLength(); i++) + { std::string license = ""; int max_size = valid_model_->getMaxSequenceSize(); - for (int j = 0; j < max_size; j++) { - if (output_values[i * max_size + j] == -1) { + for (int j = 0; j < max_size; j++) + { + if (output_values[i * max_size + j] == -1) + { break; } license += licenses_[output_values[i * max_size + j]]; @@ -95,7 +100,10 @@ bool dynamic_vino_lib::LicensePlateDetection::fetchResults() results_[i].license_ = license; found_result = true; } - if (!found_result) {results_.clear();} + if (!found_result) + { + results_.clear(); + } return true; } @@ -104,8 +112,7 @@ int dynamic_vino_lib::LicensePlateDetection::getResultsLength() const return static_cast(results_.size()); } -const dynamic_vino_lib::Result * -dynamic_vino_lib::LicensePlateDetection::getLocationResult(int idx) const +const dynamic_vino_lib::Result* dynamic_vino_lib::LicensePlateDetection::getLocationResult(int idx) const { return &(results_[idx]); } @@ -115,23 +122,25 @@ const std::string dynamic_vino_lib::LicensePlateDetection::getName() const return valid_model_->getModelCategory(); } -void dynamic_vino_lib::LicensePlateDetection::observeOutput( - const std::shared_ptr & output) +void dynamic_vino_lib::LicensePlateDetection::observeOutput(const std::shared_ptr& output) { - if (output != nullptr) { + if (output != nullptr) + { output->accept(results_); } } -const std::vector dynamic_vino_lib::LicensePlateDetection::getFilteredROIs( - const std::string filter_conditions) const +const std::vector +dynamic_vino_lib::LicensePlateDetection::getFilteredROIs(const std::string filter_conditions) const { - if (!filter_conditions.empty()) { - slog::err << "License plate detection does not support filtering now! " << - "Filter conditions: " << filter_conditions << slog::endl; + if (!filter_conditions.empty()) + { + slog::err << "License plate detection does not support filtering now! " + << "Filter conditions: " << filter_conditions << slog::endl; } std::vector filtered_rois; - for (auto res : results_) { + for (auto res : results_) + { filtered_rois.push_back(res.getLocation()); } return filtered_rois; diff --git a/dynamic_vino_lib/src/inferences/object_detection.cpp b/dynamic_vino_lib/src/inferences/object_detection.cpp index b45c65bd..46f0559d 100644 --- a/dynamic_vino_lib/src/inferences/object_detection.cpp +++ b/dynamic_vino_lib/src/inferences/object_detection.cpp @@ -27,17 +27,15 @@ #include "dynamic_vino_lib/outputs/base_output.h" #include "dynamic_vino_lib/slog.h" // ObjectDetectionResult -dynamic_vino_lib::ObjectDetectionResult::ObjectDetectionResult(const cv::Rect & location) -: Result(location) +dynamic_vino_lib::ObjectDetectionResult::ObjectDetectionResult(const cv::Rect& location) : Result(location) { } // ObjectDetection -dynamic_vino_lib::ObjectDetection::ObjectDetection( - bool enable_roi_constraint, - double show_output_thresh) -: show_output_thresh_(show_output_thresh), - enable_roi_constraint_(enable_roi_constraint), dynamic_vino_lib::BaseInference() +dynamic_vino_lib::ObjectDetection::ObjectDetection(bool enable_roi_constraint, double show_output_thresh) + : show_output_thresh_(show_output_thresh) + , enable_roi_constraint_(enable_roi_constraint) + , dynamic_vino_lib::BaseInference() { result_filter_ = std::make_shared(); result_filter_->init(); @@ -45,28 +43,28 @@ dynamic_vino_lib::ObjectDetection::ObjectDetection( dynamic_vino_lib::ObjectDetection::~ObjectDetection() = default; -void dynamic_vino_lib::ObjectDetection::loadNetwork( - std::shared_ptr network) +void dynamic_vino_lib::ObjectDetection::loadNetwork(std::shared_ptr network) { valid_model_ = network; setMaxBatchSize(network->getMaxBatchSize()); } -bool dynamic_vino_lib::ObjectDetection::enqueue( - const cv::Mat & frame, - const cv::Rect & input_frame_loc) +bool dynamic_vino_lib::ObjectDetection::enqueue(const cv::Mat& frame, const cv::Rect& input_frame_loc) { - if (valid_model_ == nullptr || getEngine() == nullptr) { + if (valid_model_ == nullptr || getEngine() == nullptr) + { return false; } - if (enqueued_frames_ >= valid_model_->getMaxBatchSize()) { - slog::warn << "Number of " << getName() << "input more than maximum(" << - max_batch_size_ << ") processed by inference" << slog::endl; + if (enqueued_frames_ >= valid_model_->getMaxBatchSize()) + { + slog::warn << "Number of " << getName() << "input more than maximum(" << max_batch_size_ + << ") processed by inference" << slog::endl; return false; } - if (!valid_model_->enqueue(getEngine(), frame, input_frame_loc)) { + if (!valid_model_->enqueue(getEngine(), frame, input_frame_loc)) + { return false; } @@ -81,14 +79,15 @@ bool dynamic_vino_lib::ObjectDetection::enqueue( bool dynamic_vino_lib::ObjectDetection::fetchResults() { bool can_fetch = dynamic_vino_lib::BaseInference::fetchResults(); - if (!can_fetch) { + if (!can_fetch) + { return false; } results_.clear(); - return (valid_model_ != nullptr) && valid_model_->fetchResults( - getEngine(), results_, show_output_thresh_, enable_roi_constraint_); + return (valid_model_ != nullptr) && + valid_model_->fetchResults(getEngine(), results_, show_output_thresh_, enable_roi_constraint_); } int dynamic_vino_lib::ObjectDetection::getResultsLength() const @@ -96,8 +95,7 @@ int dynamic_vino_lib::ObjectDetection::getResultsLength() const return static_cast(results_.size()); } -const dynamic_vino_lib::ObjectDetection::Result * -dynamic_vino_lib::ObjectDetection::getLocationResult(int idx) const +const dynamic_vino_lib::ObjectDetection::Result* dynamic_vino_lib::ObjectDetection::getLocationResult(int idx) const { return &(results_[idx]); } @@ -107,20 +105,22 @@ const std::string dynamic_vino_lib::ObjectDetection::getName() const return valid_model_->getModelCategory(); } -void dynamic_vino_lib::ObjectDetection::observeOutput( - const std::shared_ptr & output) +void dynamic_vino_lib::ObjectDetection::observeOutput(const std::shared_ptr& output) { - if (output != nullptr) { + if (output != nullptr) + { output->accept(results_); } } -const std::vector dynamic_vino_lib::ObjectDetection::getFilteredROIs( - const std::string filter_conditions) const +const std::vector +dynamic_vino_lib::ObjectDetection::getFilteredROIs(const std::string filter_conditions) const { - if (!result_filter_->isValidFilterConditions(filter_conditions)) { + if (!result_filter_->isValidFilterConditions(filter_conditions)) + { std::vector filtered_rois; - for (auto result : results_) { + for (auto result : results_) + { filtered_rois.push_back(result.getLocation()); } return filtered_rois; @@ -131,7 +131,9 @@ const std::vector dynamic_vino_lib::ObjectDetection::getFilteredROIs( } // ObjectDetectionResultFilter -dynamic_vino_lib::ObjectDetectionResultFilter::ObjectDetectionResultFilter() {} +dynamic_vino_lib::ObjectDetectionResultFilter::ObjectDetectionResultFilter() +{ +} void dynamic_vino_lib::ObjectDetectionResultFilter::init() { @@ -139,45 +141,42 @@ void dynamic_vino_lib::ObjectDetectionResultFilter::init() key_to_function_.insert(std::make_pair("confidence", isValidConfidence)); } -void dynamic_vino_lib::ObjectDetectionResultFilter::acceptResults( - const std::vector & results) +void dynamic_vino_lib::ObjectDetectionResultFilter::acceptResults(const std::vector& results) { results_ = results; } -std::vector -dynamic_vino_lib::ObjectDetectionResultFilter::getFilteredLocations() +std::vector dynamic_vino_lib::ObjectDetectionResultFilter::getFilteredLocations() { std::vector locations; - for (auto result : results_) { - if (isValidResult(result)) { + for (auto result : results_) + { + if (isValidResult(result)) + { locations.push_back(result.getLocation()); } } return locations; } -bool dynamic_vino_lib::ObjectDetectionResultFilter::isValidLabel( - const Result & result, const std::string & op, const std::string & target) +bool dynamic_vino_lib::ObjectDetectionResultFilter::isValidLabel(const Result& result, const std::string& op, + const std::string& target) { return stringCompare(result.getLabel(), op, target); } -bool dynamic_vino_lib::ObjectDetectionResultFilter::isValidConfidence( - const Result & result, const std::string & op, const std::string & target) +bool dynamic_vino_lib::ObjectDetectionResultFilter::isValidConfidence(const Result& result, const std::string& op, + const std::string& target) { return floatCompare(result.getConfidence(), op, stringToFloat(target)); } -bool dynamic_vino_lib::ObjectDetectionResultFilter::isValidResult( - const Result & result) +bool dynamic_vino_lib::ObjectDetectionResultFilter::isValidResult(const Result& result) { ISVALIDRESULT(key_to_function_, result); } -double dynamic_vino_lib::ObjectDetection::calcIoU( - const cv::Rect & box_1, - const cv::Rect & box_2) +double dynamic_vino_lib::ObjectDetection::calcIoU(const cv::Rect& box_1, const cv::Rect& box_2) { cv::Rect i = box_1 & box_2; cv::Rect u = box_1 | box_2; diff --git a/dynamic_vino_lib/src/inferences/object_segmentation.cpp b/dynamic_vino_lib/src/inferences/object_segmentation.cpp index f28b7e2f..0dab5d42 100644 --- a/dynamic_vino_lib/src/inferences/object_segmentation.cpp +++ b/dynamic_vino_lib/src/inferences/object_segmentation.cpp @@ -28,21 +28,19 @@ #include "dynamic_vino_lib/slog.h" // ObjectSegmentationResult -dynamic_vino_lib::ObjectSegmentationResult::ObjectSegmentationResult(const cv::Rect & location) -: Result(location) +dynamic_vino_lib::ObjectSegmentationResult::ObjectSegmentationResult(const cv::Rect& location) : Result(location) { } // ObjectSegmentation dynamic_vino_lib::ObjectSegmentation::ObjectSegmentation(double show_output_thresh) -: show_output_thresh_(show_output_thresh), dynamic_vino_lib::BaseInference() + : show_output_thresh_(show_output_thresh), dynamic_vino_lib::BaseInference() { } dynamic_vino_lib::ObjectSegmentation::~ObjectSegmentation() = default; -void dynamic_vino_lib::ObjectSegmentation::loadNetwork( - const std::shared_ptr network) +void dynamic_vino_lib::ObjectSegmentation::loadNetwork(const std::shared_ptr network) { slog::info << "Loading Network: " << network->getModelCategory() << slog::endl; valid_model_ = network; @@ -53,16 +51,14 @@ void dynamic_vino_lib::ObjectSegmentation::loadNetwork( * Deprecated! * This function only support OpenVINO version <=2018R5 */ -bool dynamic_vino_lib::ObjectSegmentation::enqueue_for_one_input( - const cv::Mat & frame, - const cv::Rect & input_frame_loc) +bool dynamic_vino_lib::ObjectSegmentation::enqueue_for_one_input(const cv::Mat& frame, const cv::Rect& input_frame_loc) { - if (width_ == 0 && height_ == 0) { + if (width_ == 0 && height_ == 0) + { width_ = frame.cols; height_ = frame.rows; } - if (!dynamic_vino_lib::BaseInference::enqueue(frame, input_frame_loc, 1, 0, - valid_model_->getInputName())) + if (!dynamic_vino_lib::BaseInference::enqueue(frame, input_frame_loc, 1, 0, valid_model_->getInputName())) { return false; } @@ -72,27 +68,29 @@ bool dynamic_vino_lib::ObjectSegmentation::enqueue_for_one_input( return true; } -bool dynamic_vino_lib::ObjectSegmentation::enqueue( - const cv::Mat & frame, - const cv::Rect & input_frame_loc) +bool dynamic_vino_lib::ObjectSegmentation::enqueue(const cv::Mat& frame, const cv::Rect& input_frame_loc) { - if (width_ == 0 && height_ == 0) { + if (width_ == 0 && height_ == 0) + { width_ = frame.cols; height_ = frame.rows; } - if (valid_model_ == nullptr || getEngine() == nullptr) { + if (valid_model_ == nullptr || getEngine() == nullptr) + { throw std::logic_error("Model or Engine is not set correctly!"); return false; } - if (enqueued_frames_ >= valid_model_->getMaxBatchSize()) { - slog::warn << "Number of " << getName() << "input more than maximum(" << - getMaxBatchSize()<< ") processed by inference" << slog::endl; + if (enqueued_frames_ >= valid_model_->getMaxBatchSize()) + { + slog::warn << "Number of " << getName() << "input more than maximum(" << getMaxBatchSize() + << ") processed by inference" << slog::endl; return false; } - if (!valid_model_->enqueue(getEngine(), frame, input_frame_loc)) { + if (!valid_model_->enqueue(getEngine(), frame, input_frame_loc)) + { return false; } @@ -108,7 +106,8 @@ bool dynamic_vino_lib::ObjectSegmentation::submitRequest() bool dynamic_vino_lib::ObjectSegmentation::fetchResults() { bool can_fetch = dynamic_vino_lib::BaseInference::fetchResults(); - if (!can_fetch) { + if (!can_fetch) + { return false; } bool found_result = false; @@ -119,22 +118,22 @@ bool dynamic_vino_lib::ObjectSegmentation::fetchResults() std::string mask_output = valid_model_->getOutputName("masks"); const InferenceEngine::Blob::Ptr do_blob = request->GetBlob(detection_output.c_str()); - const auto do_data = do_blob->buffer().as(); + const auto do_data = do_blob->buffer().as(); const auto masks_blob = request->GetBlob(mask_output.c_str()); - const auto masks_data = masks_blob->buffer().as(); + const auto masks_data = masks_blob->buffer().as(); const size_t output_w = masks_blob->getTensorDesc().getDims().at(3); const size_t output_h = masks_blob->getTensorDesc().getDims().at(2); - const size_t output_des = masks_blob-> getTensorDesc().getDims().at(1); - const size_t output_extra = masks_blob-> getTensorDesc().getDims().at(0); + const size_t output_des = masks_blob->getTensorDesc().getDims().at(1); + const size_t output_extra = masks_blob->getTensorDesc().getDims().at(0); - slog::debug << "output w " << output_w<< slog::endl; + slog::debug << "output w " << output_w << slog::endl; slog::debug << "output h " << output_h << slog::endl; slog::debug << "output description " << output_des << slog::endl; slog::debug << "output extra " << output_extra << slog::endl; - const float * detections = request->GetBlob(detection_output)->buffer().as(); - std::vector &labels = valid_model_->getLabels(); - slog::debug << "label size " <GetBlob(detection_output)->buffer().as(); + std::vector& labels = valid_model_->getLabels(); + slog::debug << "label size " << labels.size() << slog::endl; cv::Mat inImg, resImg, maskImg(output_h, output_w, CV_8UC3); cv::Mat colored_mask(output_h, output_w, CV_8UC3); @@ -146,17 +145,21 @@ bool dynamic_vino_lib::ObjectSegmentation::fetchResults() { std::size_t classId = 0; float maxProb = -1.0f; - if (output_des < 2) { // assume the output is already ArgMax'ed + if (output_des < 2) + { // assume the output is already ArgMax'ed classId = static_cast(detections[rowId * output_w + colId]); - for (int ch = 0; ch < colored_mask.channels();++ch){ + for (int ch = 0; ch < colored_mask.channels(); ++ch) + { colored_mask.at(rowId, colId)[ch] = colors_[classId][ch]; } - //classId = static_cast(predictions[rowId * output_w + colId]); - } else { + // classId = static_cast(predictions[rowId * output_w + colId]); + } + else + { for (int chId = 0; chId < output_des; ++chId) { - float prob = detections[chId * output_h * output_w + rowId * output_w+ colId]; - //float prob = predictions[chId * output_h * output_w + rowId * output_w+ colId]; + float prob = detections[chId * output_h * output_w + rowId * output_w + colId]; + // float prob = predictions[chId * output_h * output_w + rowId * output_w+ colId]; if (prob > maxProb) { classId = chId; @@ -170,11 +173,13 @@ bool dynamic_vino_lib::ObjectSegmentation::fetchResults() cv::Vec3b color(distr(rng), distr(rng), distr(rng)); colors_.push_back(color); } - if(maxProb > 0.5){ - for (int ch = 0; ch < colored_mask.channels();++ch){ - colored_mask.at(rowId, colId)[ch] = colors_[classId][ch]; + if (maxProb > 0.5) + { + for (int ch = 0; ch < colored_mask.channels(); ++ch) + { + colored_mask.at(rowId, colId)[ch] = colors_[classId][ch]; + } } - } } } } @@ -191,8 +196,7 @@ int dynamic_vino_lib::ObjectSegmentation::getResultsLength() const return static_cast(results_.size()); } -const dynamic_vino_lib::Result * -dynamic_vino_lib::ObjectSegmentation::getLocationResult(int idx) const +const dynamic_vino_lib::Result* dynamic_vino_lib::ObjectSegmentation::getLocationResult(int idx) const { return &(results_[idx]); } @@ -202,23 +206,25 @@ const std::string dynamic_vino_lib::ObjectSegmentation::getName() const return valid_model_->getModelCategory(); } -void dynamic_vino_lib::ObjectSegmentation::observeOutput( - const std::shared_ptr & output) +void dynamic_vino_lib::ObjectSegmentation::observeOutput(const std::shared_ptr& output) { - if (output != nullptr) { + if (output != nullptr) + { output->accept(results_); } } -const std::vector dynamic_vino_lib::ObjectSegmentation::getFilteredROIs( - const std::string filter_conditions) const +const std::vector +dynamic_vino_lib::ObjectSegmentation::getFilteredROIs(const std::string filter_conditions) const { - if (!filter_conditions.empty()) { - slog::err << "Object segmentation does not support filtering now! " << - "Filter conditions: " << filter_conditions << slog::endl; + if (!filter_conditions.empty()) + { + slog::err << "Object segmentation does not support filtering now! " + << "Filter conditions: " << filter_conditions << slog::endl; } std::vector filtered_rois; - for (auto res : results_) { + for (auto res : results_) + { filtered_rois.push_back(res.getLocation()); } return filtered_rois; diff --git a/dynamic_vino_lib/src/inferences/person_attribs_detection.cpp b/dynamic_vino_lib/src/inferences/person_attribs_detection.cpp index 9db8a3c8..24200cb9 100644 --- a/dynamic_vino_lib/src/inferences/person_attribs_detection.cpp +++ b/dynamic_vino_lib/src/inferences/person_attribs_detection.cpp @@ -25,30 +25,32 @@ #include "dynamic_vino_lib/slog.h" // PersonAttribsDetectionResult -dynamic_vino_lib::PersonAttribsDetectionResult::PersonAttribsDetectionResult( - const cv::Rect & location) -: Result(location) {} +dynamic_vino_lib::PersonAttribsDetectionResult::PersonAttribsDetectionResult(const cv::Rect& location) + : Result(location) +{ +} // PersonAttribsDetection dynamic_vino_lib::PersonAttribsDetection::PersonAttribsDetection(double attribs_confidence) -: attribs_confidence_(attribs_confidence), dynamic_vino_lib::BaseInference() {} + : attribs_confidence_(attribs_confidence), dynamic_vino_lib::BaseInference() +{ +} dynamic_vino_lib::PersonAttribsDetection::~PersonAttribsDetection() = default; void dynamic_vino_lib::PersonAttribsDetection::loadNetwork( - const std::shared_ptr network) + const std::shared_ptr network) { valid_model_ = network; setMaxBatchSize(network->getMaxBatchSize()); } -bool dynamic_vino_lib::PersonAttribsDetection::enqueue( - const cv::Mat & frame, const cv::Rect & input_frame_loc) +bool dynamic_vino_lib::PersonAttribsDetection::enqueue(const cv::Mat& frame, const cv::Rect& input_frame_loc) { - if (getEnqueuedNum() == 0) { + if (getEnqueuedNum() == 0) + { results_.clear(); } - if (!dynamic_vino_lib::BaseInference::enqueue( - frame, input_frame_loc, 1, 0, valid_model_->getInputName())) + if (!dynamic_vino_lib::BaseInference::enqueue(frame, input_frame_loc, 1, 0, valid_model_->getInputName())) { return false; } @@ -88,7 +90,10 @@ bool dynamic_vino_lib::PersonAttribsDetection::fetchResults() bool dynamic_vino_lib::PersonAttribsDetection::fetchResults() { bool can_fetch = dynamic_vino_lib::BaseInference::fetchResults(); - if (!can_fetch) {return false;} + if (!can_fetch) + { + return false; + } bool found_result = false; InferenceEngine::InferRequest::Ptr request = getEngine()->getRequest(); slog::debug << "Analyzing Attributes Detection results..." << slog::endl; @@ -108,22 +113,26 @@ bool dynamic_vino_lib::PersonAttribsDetection::fetchResults() auto bottom_values = bottomBlob->buffer().as(); int net_attrib_length = net_attributes_.size(); - for (int i = 0; i < getResultsLength(); i++) { + for (int i = 0; i < getResultsLength(); i++) + { results_[i].male_probability_ = attri_values[i * net_attrib_length]; results_[i].top_point_.x = top_values[i]; - results_[i].top_point_.y = top_values[i+1]; + results_[i].top_point_.y = top_values[i + 1]; results_[i].bottom_point_.x = bottom_values[i]; - results_[i].bottom_point_.y = bottom_values[i+1]; + results_[i].bottom_point_.y = bottom_values[i + 1]; std::string attrib = ""; - for (int j = 1; j < net_attrib_length; j++) { - attrib += (attri_values[i * net_attrib_length + j] > attribs_confidence_) ? - net_attributes_[j] + ", " : ""; + for (int j = 1; j < net_attrib_length; j++) + { + attrib += (attri_values[i * net_attrib_length + j] > attribs_confidence_) ? net_attributes_[j] + ", " : ""; } - results_[i].attributes_ = attrib; + results_[i].attributes_ = attrib; found_result = true; - } - if (!found_result) {results_.clear();} + } + if (!found_result) + { + results_.clear(); + } return true; } @@ -132,8 +141,7 @@ int dynamic_vino_lib::PersonAttribsDetection::getResultsLength() const return static_cast(results_.size()); } -const dynamic_vino_lib::Result * -dynamic_vino_lib::PersonAttribsDetection::getLocationResult(int idx) const +const dynamic_vino_lib::Result* dynamic_vino_lib::PersonAttribsDetection::getLocationResult(int idx) const { return &(results_[idx]); } @@ -143,23 +151,25 @@ const std::string dynamic_vino_lib::PersonAttribsDetection::getName() const return valid_model_->getModelCategory(); } -void dynamic_vino_lib::PersonAttribsDetection::observeOutput( - const std::shared_ptr & output) +void dynamic_vino_lib::PersonAttribsDetection::observeOutput(const std::shared_ptr& output) { - if (output != nullptr) { + if (output != nullptr) + { output->accept(results_); } } -const std::vector dynamic_vino_lib::PersonAttribsDetection::getFilteredROIs( - const std::string filter_conditions) const +const std::vector +dynamic_vino_lib::PersonAttribsDetection::getFilteredROIs(const std::string filter_conditions) const { - if (!filter_conditions.empty()) { - slog::err << "Person attributes detection does not support filtering now! " << - "Filter conditions: " << filter_conditions << slog::endl; + if (!filter_conditions.empty()) + { + slog::err << "Person attributes detection does not support filtering now! " + << "Filter conditions: " << filter_conditions << slog::endl; } std::vector filtered_rois; - for (auto res : results_) { + for (auto res : results_) + { filtered_rois.push_back(res.getLocation()); } return filtered_rois; diff --git a/dynamic_vino_lib/src/inferences/person_reidentification.cpp b/dynamic_vino_lib/src/inferences/person_reidentification.cpp index 9d96105d..772c63ed 100644 --- a/dynamic_vino_lib/src/inferences/person_reidentification.cpp +++ b/dynamic_vino_lib/src/inferences/person_reidentification.cpp @@ -25,33 +25,33 @@ #include "dynamic_vino_lib/slog.h" // PersonReidentificationResult -dynamic_vino_lib::PersonReidentificationResult::PersonReidentificationResult( - const cv::Rect & location) -: Result(location) {} +dynamic_vino_lib::PersonReidentificationResult::PersonReidentificationResult(const cv::Rect& location) + : Result(location) +{ +} // PersonReidentification dynamic_vino_lib::PersonReidentification::PersonReidentification(double match_thresh) -: dynamic_vino_lib::BaseInference() + : dynamic_vino_lib::BaseInference() { person_tracker_ = std::make_shared(1000, match_thresh, 0.3); } dynamic_vino_lib::PersonReidentification::~PersonReidentification() = default; void dynamic_vino_lib::PersonReidentification::loadNetwork( - const std::shared_ptr network) + const std::shared_ptr network) { valid_model_ = network; setMaxBatchSize(network->getMaxBatchSize()); } -bool dynamic_vino_lib::PersonReidentification::enqueue( - const cv::Mat & frame, const cv::Rect & input_frame_loc) +bool dynamic_vino_lib::PersonReidentification::enqueue(const cv::Mat& frame, const cv::Rect& input_frame_loc) { - if (getEnqueuedNum() == 0) { + if (getEnqueuedNum() == 0) + { results_.clear(); } - if (!dynamic_vino_lib::BaseInference::enqueue( - frame, input_frame_loc, 1, 0, valid_model_->getInputName())) + if (!dynamic_vino_lib::BaseInference::enqueue(frame, input_frame_loc, 1, 0, valid_model_->getInputName())) { return false; } @@ -68,20 +68,25 @@ bool dynamic_vino_lib::PersonReidentification::submitRequest() bool dynamic_vino_lib::PersonReidentification::fetchResults() { bool can_fetch = dynamic_vino_lib::BaseInference::fetchResults(); - if (!can_fetch) {return false;} + if (!can_fetch) + { + return false; + } bool found_result = false; InferenceEngine::InferRequest::Ptr request = getEngine()->getRequest(); std::string output = valid_model_->getOutputName(); - const float * output_values = request->GetBlob(output)->buffer().as(); - for (int i = 0; i < getResultsLength(); i++) { - std::vector new_person = std::vector( - output_values + 256 * i, output_values + 256 * i + 256); - std::string person_id = "No." + std::to_string( - person_tracker_->processNewTrack(new_person)); + const float* output_values = request->GetBlob(output)->buffer().as(); + for (int i = 0; i < getResultsLength(); i++) + { + std::vector new_person = std::vector(output_values + 256 * i, output_values + 256 * i + 256); + std::string person_id = "No." + std::to_string(person_tracker_->processNewTrack(new_person)); results_[i].person_id_ = person_id; found_result = true; } - if (!found_result) {results_.clear();} + if (!found_result) + { + results_.clear(); + } return true; } @@ -90,8 +95,7 @@ int dynamic_vino_lib::PersonReidentification::getResultsLength() const return static_cast(results_.size()); } -const dynamic_vino_lib::Result * -dynamic_vino_lib::PersonReidentification::getLocationResult(int idx) const +const dynamic_vino_lib::Result* dynamic_vino_lib::PersonReidentification::getLocationResult(int idx) const { return &(results_[idx]); } @@ -101,23 +105,25 @@ const std::string dynamic_vino_lib::PersonReidentification::getName() const return valid_model_->getModelCategory(); } -void dynamic_vino_lib::PersonReidentification::observeOutput( - const std::shared_ptr & output) +void dynamic_vino_lib::PersonReidentification::observeOutput(const std::shared_ptr& output) { - if (output != nullptr) { + if (output != nullptr) + { output->accept(results_); } } -const std::vector dynamic_vino_lib::PersonReidentification::getFilteredROIs( - const std::string filter_conditions) const +const std::vector +dynamic_vino_lib::PersonReidentification::getFilteredROIs(const std::string filter_conditions) const { - if (!filter_conditions.empty()) { - slog::err << "Person reidentification does not support filtering now! " << - "Filter conditions: " << filter_conditions << slog::endl; + if (!filter_conditions.empty()) + { + slog::err << "Person reidentification does not support filtering now! " + << "Filter conditions: " << filter_conditions << slog::endl; } std::vector filtered_rois; - for (auto res : results_) { + for (auto res : results_) + { filtered_rois.push_back(res.getLocation()); } return filtered_rois; diff --git a/dynamic_vino_lib/src/inferences/vehicle_attribs_detection.cpp b/dynamic_vino_lib/src/inferences/vehicle_attribs_detection.cpp index 0c55aa80..bba84f33 100644 --- a/dynamic_vino_lib/src/inferences/vehicle_attribs_detection.cpp +++ b/dynamic_vino_lib/src/inferences/vehicle_attribs_detection.cpp @@ -25,30 +25,31 @@ #include "dynamic_vino_lib/slog.h" // VehicleAttribsDetectionResult -dynamic_vino_lib::VehicleAttribsDetectionResult::VehicleAttribsDetectionResult( - const cv::Rect & location) -: Result(location) {} +dynamic_vino_lib::VehicleAttribsDetectionResult::VehicleAttribsDetectionResult(const cv::Rect& location) + : Result(location) +{ +} // VehicleAttribsDetection -dynamic_vino_lib::VehicleAttribsDetection::VehicleAttribsDetection() -: dynamic_vino_lib::BaseInference() {} +dynamic_vino_lib::VehicleAttribsDetection::VehicleAttribsDetection() : dynamic_vino_lib::BaseInference() +{ +} dynamic_vino_lib::VehicleAttribsDetection::~VehicleAttribsDetection() = default; void dynamic_vino_lib::VehicleAttribsDetection::loadNetwork( - const std::shared_ptr network) + const std::shared_ptr network) { valid_model_ = network; setMaxBatchSize(network->getMaxBatchSize()); } -bool dynamic_vino_lib::VehicleAttribsDetection::enqueue( - const cv::Mat & frame, const cv::Rect & input_frame_loc) +bool dynamic_vino_lib::VehicleAttribsDetection::enqueue(const cv::Mat& frame, const cv::Rect& input_frame_loc) { - if (getEnqueuedNum() == 0) { + if (getEnqueuedNum() == 0) + { results_.clear(); } - if (!dynamic_vino_lib::BaseInference::enqueue( - frame, input_frame_loc, 1, 0, valid_model_->getInputName())) + if (!dynamic_vino_lib::BaseInference::enqueue(frame, input_frame_loc, 1, 0, valid_model_->getInputName())) { return false; } @@ -65,16 +66,20 @@ bool dynamic_vino_lib::VehicleAttribsDetection::submitRequest() bool dynamic_vino_lib::VehicleAttribsDetection::fetchResults() { bool can_fetch = dynamic_vino_lib::BaseInference::fetchResults(); - if (!can_fetch) {return false;} + if (!can_fetch) + { + return false; + } bool found_result = false; InferenceEngine::InferRequest::Ptr request = getEngine()->getRequest(); - //std::string color_name = valid_model_->getColorOutputName(); - //std::string type_name = valid_model_->getTypeOutputName(); + // std::string color_name = valid_model_->getColorOutputName(); + // std::string type_name = valid_model_->getTypeOutputName(); std::string color_name = valid_model_->getOutputName("color_output_"); std::string type_name = valid_model_->getOutputName("type_output_"); - const float * color_values = request->GetBlob(color_name)->buffer().as(); - const float * type_values = request->GetBlob(type_name)->buffer().as(); - for (int i = 0; i < getResultsLength(); i++) { + const float* color_values = request->GetBlob(color_name)->buffer().as(); + const float* type_values = request->GetBlob(type_name)->buffer().as(); + for (int i = 0; i < getResultsLength(); i++) + { auto color_id = std::max_element(color_values, color_values + 7) - color_values; auto type_id = std::max_element(type_values, type_values + 4) - type_values; color_values += 7; @@ -83,7 +88,10 @@ bool dynamic_vino_lib::VehicleAttribsDetection::fetchResults() results_[i].type_ = types_[type_id]; found_result = true; } - if (!found_result) {results_.clear();} + if (!found_result) + { + results_.clear(); + } return true; } @@ -92,8 +100,7 @@ int dynamic_vino_lib::VehicleAttribsDetection::getResultsLength() const return static_cast(results_.size()); } -const dynamic_vino_lib::Result * -dynamic_vino_lib::VehicleAttribsDetection::getLocationResult(int idx) const +const dynamic_vino_lib::Result* dynamic_vino_lib::VehicleAttribsDetection::getLocationResult(int idx) const { return &(results_[idx]); } @@ -103,23 +110,25 @@ const std::string dynamic_vino_lib::VehicleAttribsDetection::getName() const return valid_model_->getModelCategory(); } -void dynamic_vino_lib::VehicleAttribsDetection::observeOutput( - const std::shared_ptr & output) +void dynamic_vino_lib::VehicleAttribsDetection::observeOutput(const std::shared_ptr& output) { - if (output != nullptr) { + if (output != nullptr) + { output->accept(results_); } } -const std::vector dynamic_vino_lib::VehicleAttribsDetection::getFilteredROIs( - const std::string filter_conditions) const +const std::vector +dynamic_vino_lib::VehicleAttribsDetection::getFilteredROIs(const std::string filter_conditions) const { - if (!filter_conditions.empty()) { - slog::err << "Vehicle attributes detection does not support filtering now! " << - "Filter conditions: " << filter_conditions << slog::endl; + if (!filter_conditions.empty()) + { + slog::err << "Vehicle attributes detection does not support filtering now! " + << "Filter conditions: " << filter_conditions << slog::endl; } std::vector filtered_rois; - for (auto res : results_) { + for (auto res : results_) + { filtered_rois.push_back(res.getLocation()); } return filtered_rois; diff --git a/dynamic_vino_lib/src/inputs/image_input.cpp b/dynamic_vino_lib/src/inputs/image_input.cpp index 72bc8c38..34c68153 100644 --- a/dynamic_vino_lib/src/inputs/image_input.cpp +++ b/dynamic_vino_lib/src/inputs/image_input.cpp @@ -32,11 +32,14 @@ Input::Image::Image(const std::string& file) bool Input::Image::initialize() { image_ = cv::imread(file_); - if (image_.data != NULL) { + if (image_.data != NULL) + { setInitStatus(true); setWidth((size_t)image_.cols); setHeight((size_t)image_.rows); - } else { + } + else + { setInitStatus(false); } return isInit(); @@ -53,12 +56,12 @@ bool Input::Image::read(cv::Mat* frame) return true; } -void Input::Image::config(const Input::Config & config) +void Input::Image::config(const Input::Config& config) { - if (config.path != "") { + if (config.path != "") + { file_.assign(config.path); initialize(); - slog::info << "Image Input device was reinitialized with new file:" << - config.path.c_str() << slog::endl; + slog::info << "Image Input device was reinitialized with new file:" << config.path.c_str() << slog::endl; } } diff --git a/dynamic_vino_lib/src/inputs/image_topic.cpp b/dynamic_vino_lib/src/inputs/image_topic.cpp index f1fddf68..8c4562cf 100644 --- a/dynamic_vino_lib/src/inputs/image_topic.cpp +++ b/dynamic_vino_lib/src/inputs/image_topic.cpp @@ -27,9 +27,7 @@ #define INPUT_TOPIC "/camera/color/image_raw" - -Input::ImageTopic::ImageTopic(rclcpp::Node::SharedPtr node) -: node_(node) +Input::ImageTopic::ImageTopic(rclcpp::Node::SharedPtr node) : node_(node) { } @@ -37,24 +35,24 @@ bool Input::ImageTopic::initialize() { slog::debug << "before Image Topic init" << slog::endl; - if(node_ == nullptr){ + if (node_ == nullptr) + { throw std::runtime_error("Image Topic is not instancialized because of no parent node."); return false; } auto qos = rclcpp::QoS(rclcpp::KeepLast(1)).best_effort(); - sub_ = node_->create_subscription( - INPUT_TOPIC, qos, - std::bind(&ImageTopic::cb, this, std::placeholders::_1)); + sub_ = node_->create_subscription(INPUT_TOPIC, qos, + std::bind(&ImageTopic::cb, this, std::placeholders::_1)); return true; } - bool Input::ImageTopic::initialize(size_t width, size_t height) - { - slog::warn << "BE CAREFUL: nothing for resolution is done when calling initialize(width, height)" - << " for Image Topic" << slog::endl; - return initialize(); - } +bool Input::ImageTopic::initialize(size_t width, size_t height) +{ + slog::warn << "BE CAREFUL: nothing for resolution is done when calling initialize(width, height)" + << " for Image Topic" << slog::endl; + return initialize(); +} void Input::ImageTopic::cb(const sensor_msgs::msg::Image::SharedPtr image_msg) { @@ -62,16 +60,17 @@ void Input::ImageTopic::cb(const sensor_msgs::msg::Image::SharedPtr image_msg) setHeader(image_msg->header); image_ = cv_bridge::toCvCopy(image_msg, "bgr8")->image; - //Suppose Image Topic is sent within BGR order, so the below line would work. - //image_ = cv::Mat(image_msg->height, image_msg->width, CV_8UC3, + // Suppose Image Topic is sent within BGR order, so the below line would work. + // image_ = cv::Mat(image_msg->height, image_msg->width, CV_8UC3, // const_cast(&image_msg->data[0]), image_msg->step); image_count_.increaseCounter(); } -bool Input::ImageTopic::read(cv::Mat * frame) +bool Input::ImageTopic::read(cv::Mat* frame) { - if (image_count_.get() < 0 || image_.empty()) { + if (image_count_.get() < 0 || image_.empty()) + { slog::debug << "No data received in CameraTopic instance" << slog::endl; return false; } diff --git a/dynamic_vino_lib/src/inputs/realsense_camera.cpp b/dynamic_vino_lib/src/inputs/realsense_camera.cpp index 1685da67..0b909d62 100644 --- a/dynamic_vino_lib/src/inputs/realsense_camera.cpp +++ b/dynamic_vino_lib/src/inputs/realsense_camera.cpp @@ -29,7 +29,8 @@ bool Input::RealSenseCamera::initialize() bool Input::RealSenseCamera::initialize(size_t width, size_t height) { - if (3 * width != 4 * height) { + if (3 * width != 4 * height) + { slog::err << "The aspect ratio must be 4:3 when using RealSense camera" << slog::endl; throw std::runtime_error("The aspect ratio must be 4:3 when using RealSense camera!"); return false; @@ -39,14 +40,13 @@ bool Input::RealSenseCamera::initialize(size_t width, size_t height) slog::info << "RealSense Serial number : " << devSerialNumber << slog::endl; cfg_.enable_device(devSerialNumber); - cfg_.enable_stream(RS2_STREAM_COLOR, static_cast(width), static_cast(height), - RS2_FORMAT_BGR8, 30); + cfg_.enable_stream(RS2_STREAM_COLOR, static_cast(width), static_cast(height), RS2_FORMAT_BGR8, 30); setInitStatus(pipe_.start(cfg_)); setWidth(width); setHeight(height); - //bypass RealSense's bug: several captured frames after HW is inited are with wrong data. + // bypass RealSense's bug: several captured frames after HW is inited are with wrong data. bypassFewFramesOnceInited(); return isInit(); @@ -54,19 +54,23 @@ bool Input::RealSenseCamera::initialize(size_t width, size_t height) bool Input::RealSenseCamera::read(cv::Mat* frame) { - if (!isInit()) { + if (!isInit()) + { return false; } - try { + try + { rs2::frameset data = pipe_.wait_for_frames(); // Wait for next set of frames from the camera rs2::frame color_frame; color_frame = data.get_color_frame(); cv::Mat(cv::Size(static_cast(getWidth()), static_cast(getHeight())), CV_8UC3, - const_cast(color_frame.get_data()), cv::Mat::AUTO_STEP) - .copyTo(*frame); - } catch (...) { + const_cast(color_frame.get_data()), cv::Mat::AUTO_STEP) + .copyTo(*frame); + } + catch (...) + { return false; } @@ -90,12 +94,14 @@ std::string Input::RealSenseCamera::getCameraSN() void Input::RealSenseCamera::bypassFewFramesOnceInited() { - if(!isInit() || !first_read_){ + if (!isInit() || !first_read_) + { return; } rs2::frameset frames; - for (int i = 0; i < 30; i++) { + for (int i = 0; i < 30; i++) + { frames = pipe_.wait_for_frames(); } first_read_ = false; diff --git a/dynamic_vino_lib/src/inputs/standard_camera.cpp b/dynamic_vino_lib/src/inputs/standard_camera.cpp index 3bee9c8a..09ad6d28 100644 --- a/dynamic_vino_lib/src/inputs/standard_camera.cpp +++ b/dynamic_vino_lib/src/inputs/standard_camera.cpp @@ -40,7 +40,8 @@ bool Input::StandardCamera::initialize(size_t width, size_t height) bool Input::StandardCamera::read(cv::Mat* frame) { - if (!isInit()) { + if (!isInit()) + { return false; } cap.grab(); @@ -51,24 +52,25 @@ bool Input::StandardCamera::read(cv::Mat* frame) int Input::StandardCamera::getCameraId() { // In case this function is invoked more than once. - if (camera_id_ >= 0){ + if (camera_id_ >= 0) + { return camera_id_; } static int STANDARD_CAMERA_COUNT = -1; - int fd; // A file descriptor to the video device + int fd; // A file descriptor to the video device struct v4l2_capability cap; char file[20]; - //if it is a realsense camera then skip it until we meet a standard camera + // if it is a realsense camera then skip it until we meet a standard camera do { - STANDARD_CAMERA_COUNT ++; - sprintf(file,"/dev/video%d",STANDARD_CAMERA_COUNT);//format filename - fd = open(file,O_RDWR); + STANDARD_CAMERA_COUNT++; + sprintf(file, "/dev/video%d", STANDARD_CAMERA_COUNT); // format filename + fd = open(file, O_RDWR); ioctl(fd, VIDIOC_QUERYCAP, &cap); close(fd); - std::cout << "!!camera: "<< cap.card << std::endl; - }while(!strcmp((char*)cap.card,"Intel(R) RealSense(TM) Depth Ca")); + std::cout << "!!camera: " << cap.card << std::endl; + } while (!strcmp((char*)cap.card, "Intel(R) RealSense(TM) Depth Ca")); camera_id_ = STANDARD_CAMERA_COUNT; return STANDARD_CAMERA_COUNT; diff --git a/dynamic_vino_lib/src/inputs/video_input.cpp b/dynamic_vino_lib/src/inputs/video_input.cpp index 392f752f..ff6bf4e6 100644 --- a/dynamic_vino_lib/src/inputs/video_input.cpp +++ b/dynamic_vino_lib/src/inputs/video_input.cpp @@ -43,7 +43,8 @@ bool Input::Video::initialize(size_t width, size_t height) setWidth(width); setHeight(height); setInitStatus(cap.open(video_)); - if (isInit()) { + if (isInit()) + { cap.set(cv::CAP_PROP_FRAME_WIDTH, width); cap.set(cv::CAP_PROP_FRAME_HEIGHT, height); } @@ -52,7 +53,8 @@ bool Input::Video::initialize(size_t width, size_t height) bool Input::Video::read(cv::Mat* frame) { - if (!isInit()) { + if (!isInit()) + { return false; } cap.grab(); diff --git a/dynamic_vino_lib/src/models/age_gender_detection_model.cpp b/dynamic_vino_lib/src/models/age_gender_detection_model.cpp index 04638728..a4847858 100644 --- a/dynamic_vino_lib/src/models/age_gender_detection_model.cpp +++ b/dynamic_vino_lib/src/models/age_gender_detection_model.cpp @@ -26,22 +26,20 @@ #include "dynamic_vino_lib/slog.h" // Validated Age Gender Classification Network -Models::AgeGenderDetectionModel::AgeGenderDetectionModel( - const std::string& model_loc, int input_num, int output_num, - int max_batch_size) - : BaseModel(model_loc, input_num, output_num, max_batch_size) +Models::AgeGenderDetectionModel::AgeGenderDetectionModel(const std::string& model_loc, int input_num, int output_num, + int max_batch_size) + : BaseModel(model_loc, input_num, output_num, max_batch_size) { } -bool Models::AgeGenderDetectionModel::updateLayerProperty( - InferenceEngine::CNNNetReader::Ptr net_reader) +bool Models::AgeGenderDetectionModel::updateLayerProperty(InferenceEngine::CNNNetReader::Ptr net_reader) { slog::info << "Checking INPUTs for model " << getModelName() << slog::endl; // set input property InferenceEngine::InputsDataMap input_info_map(net_reader->getNetwork().getInputsInfo()); - if (input_info_map.size() != 1) { + if (input_info_map.size() != 1) + { slog::warn << "This model seems not Age-Gender-like, which should have only one input," - <<" but we got " << std::to_string(input_info_map.size()) << "inputs" - << slog::endl; + << " but we got " << std::to_string(input_info_map.size()) << "inputs" << slog::endl; return false; } InferenceEngine::InputInfo::Ptr input_info = input_info_map.begin()->second; @@ -50,34 +48,33 @@ bool Models::AgeGenderDetectionModel::updateLayerProperty( addInputInfo("input", input_info_map.begin()->first); // set output property InferenceEngine::OutputsDataMap output_info_map(net_reader->getNetwork().getOutputsInfo()); - if (output_info_map.size() != 2) { + if (output_info_map.size() != 2) + { // throw std::logic_error("Age/Gender Recognition network should have two output layers"); slog::warn << "This model seems not Age-gender like, which should have and only have 2" - " outputs, but we got " << std::to_string(output_info_map.size()) << "outputs" - << slog::endl; + " outputs, but we got " + << std::to_string(output_info_map.size()) << "outputs" << slog::endl; return false; } auto it = output_info_map.begin(); InferenceEngine::DataPtr age_output_ptr = (it++)->second; InferenceEngine::DataPtr gender_output_ptr = (it++)->second; - //Check More Configuration: - if (gender_output_ptr->getCreatorLayer().lock()->type == "Convolution") { + // Check More Configuration: + if (gender_output_ptr->getCreatorLayer().lock()->type == "Convolution") + { std::swap(age_output_ptr, gender_output_ptr); } - if (age_output_ptr->getCreatorLayer().lock()->type != "Convolution") { - slog::err << "In Age Gender network, age layer (" - << age_output_ptr->getCreatorLayer().lock()->name - << ") should be a Convolution, but was: " - << age_output_ptr->getCreatorLayer().lock()->type << slog::endl; + if (age_output_ptr->getCreatorLayer().lock()->type != "Convolution") + { + slog::err << "In Age Gender network, age layer (" << age_output_ptr->getCreatorLayer().lock()->name + << ") should be a Convolution, but was: " << age_output_ptr->getCreatorLayer().lock()->type << slog::endl; return false; } - if (gender_output_ptr->getCreatorLayer().lock()->type != "SoftMax") { - slog::err <<"In Age Gender network, gender layer (" - << gender_output_ptr->getCreatorLayer().lock()->name - << ") should be a SoftMax, but was: " - << gender_output_ptr->getCreatorLayer().lock()->type - << slog::endl; + if (gender_output_ptr->getCreatorLayer().lock()->type != "SoftMax") + { + slog::err << "In Age Gender network, gender layer (" << gender_output_ptr->getCreatorLayer().lock()->name + << ") should be a SoftMax, but was: " << gender_output_ptr->getCreatorLayer().lock()->type << slog::endl; return false; } slog::info << "Age layer: " << age_output_ptr->getCreatorLayer().lock()->name << slog::endl; @@ -88,9 +85,9 @@ bool Models::AgeGenderDetectionModel::updateLayerProperty( gender_output_ptr->setPrecision(InferenceEngine::Precision::FP32); gender_output_ptr->setLayout(InferenceEngine::Layout::NCHW); - //output_age_ = age_output_ptr->name; + // output_age_ = age_output_ptr->name; addOutputInfo("age", age_output_ptr->getName()); - //output_gender_ = gender_output_ptr->name; + // output_gender_ = gender_output_ptr->name; addOutputInfo("gender", gender_output_ptr->getName()); printAttribute(); diff --git a/dynamic_vino_lib/src/models/base_model.cpp b/dynamic_vino_lib/src/models/base_model.cpp index de805a3b..4b3c64bf 100644 --- a/dynamic_vino_lib/src/models/base_model.cpp +++ b/dynamic_vino_lib/src/models/base_model.cpp @@ -28,11 +28,8 @@ #include "dynamic_vino_lib/slog.h" // Validated Base Network -Models::BaseModel::BaseModel( - const std::string & model_loc, int max_batch_size) -: model_loc_(model_loc), - max_batch_size_(max_batch_size), - ModelAttribute(model_loc) +Models::BaseModel::BaseModel(const std::string& model_loc, int max_batch_size) + : model_loc_(model_loc), max_batch_size_(max_batch_size), ModelAttribute(model_loc) { if (model_loc.empty()) { @@ -87,7 +84,7 @@ bool Models::BaseModel::updateLayerProperty( } #endif -Models::ObjectDetectionModel::ObjectDetectionModel( - const std::string & model_loc, - int max_batch_size) -: BaseModel(model_loc, max_batch_size) {} +Models::ObjectDetectionModel::ObjectDetectionModel(const std::string& model_loc, int max_batch_size) + : BaseModel(model_loc, max_batch_size) +{ +} diff --git a/dynamic_vino_lib/src/models/emotion_detection_model.cpp b/dynamic_vino_lib/src/models/emotion_detection_model.cpp index 47dbbd53..75d164ac 100644 --- a/dynamic_vino_lib/src/models/emotion_detection_model.cpp +++ b/dynamic_vino_lib/src/models/emotion_detection_model.cpp @@ -24,23 +24,21 @@ #include "dynamic_vino_lib/slog.h" // Validated Emotions Detection Network -Models::EmotionDetectionModel::EmotionDetectionModel( - const std::string& model_loc, int input_num, int output_num, - int max_batch_size) - : BaseModel(model_loc, input_num, output_num, max_batch_size) +Models::EmotionDetectionModel::EmotionDetectionModel(const std::string& model_loc, int input_num, int output_num, + int max_batch_size) + : BaseModel(model_loc, input_num, output_num, max_batch_size) { } -bool Models::EmotionDetectionModel::updateLayerProperty -(InferenceEngine::CNNNetReader::Ptr net_reader) +bool Models::EmotionDetectionModel::updateLayerProperty(InferenceEngine::CNNNetReader::Ptr net_reader) { slog::info << "Checking INPUTs for model " << getModelName() << slog::endl; // set input property InferenceEngine::InputsDataMap input_info_map(net_reader->getNetwork().getInputsInfo()); - if (input_info_map.size() != 1) { + if (input_info_map.size() != 1) + { slog::warn << "This model seems not Age-Gender-like, which should have only one input," - <<" but we got " << std::to_string(input_info_map.size()) << "inputs" - << slog::endl; + << " but we got " << std::to_string(input_info_map.size()) << "inputs" << slog::endl; return false; } InferenceEngine::InputInfo::Ptr input_info = input_info_map.begin()->second; @@ -50,15 +48,15 @@ bool Models::EmotionDetectionModel::updateLayerProperty // set output property InferenceEngine::OutputsDataMap output_info_map(net_reader->getNetwork().getOutputsInfo()); - if (output_info_map.size() != 1) { + if (output_info_map.size() != 1) + { // throw std::logic_error("Age/Gender Recognition network should have two output layers"); - slog::warn << "This model should have and only have 1 output, but we got " - << std::to_string(output_info_map.size()) << "outputs" << slog::endl; + slog::warn << "This model should have and only have 1 output, but we got " << std::to_string(output_info_map.size()) + << "outputs" << slog::endl; return false; } - InferenceEngine::DataPtr & output_data_ptr = output_info_map.begin()->second; - slog::info << "Emotions layer: " << output_data_ptr->getCreatorLayer().lock()->name << - slog::endl; + InferenceEngine::DataPtr& output_data_ptr = output_info_map.begin()->second; + slog::info << "Emotions layer: " << output_data_ptr->getCreatorLayer().lock()->name << slog::endl; output_data_ptr->setPrecision(InferenceEngine::Precision::FP32); output_data_ptr->setLayout(InferenceEngine::Layout::NCHW); addOutputInfo("output", output_info_map.begin()->first); @@ -67,14 +65,12 @@ bool Models::EmotionDetectionModel::updateLayerProperty return verifyOutputLayer(output_data_ptr); } -bool Models::EmotionDetectionModel::verifyOutputLayer(const InferenceEngine::DataPtr & ptr) +bool Models::EmotionDetectionModel::verifyOutputLayer(const InferenceEngine::DataPtr& ptr) { - if (ptr->getCreatorLayer().lock()->type != "SoftMax") { - slog::err <<"In Emotion network, gender layer (" - << ptr->getCreatorLayer().lock()->name - << ") should be a SoftMax, but was: " - << ptr->getCreatorLayer().lock()->type - << slog::endl; + if (ptr->getCreatorLayer().lock()->type != "SoftMax") + { + slog::err << "In Emotion network, gender layer (" << ptr->getCreatorLayer().lock()->name + << ") should be a SoftMax, but was: " << ptr->getCreatorLayer().lock()->type << slog::endl; return false; } diff --git a/dynamic_vino_lib/src/models/face_detection_model.cpp b/dynamic_vino_lib/src/models/face_detection_model.cpp index 4b6d3f5e..35b613b7 100644 --- a/dynamic_vino_lib/src/models/face_detection_model.cpp +++ b/dynamic_vino_lib/src/models/face_detection_model.cpp @@ -26,9 +26,8 @@ #include "dynamic_vino_lib/slog.h" // Validated Face Detection Network -Models::FaceDetectionModel::FaceDetectionModel( - const std::string & model_loc, int max_batch_size) -: ObjectDetectionModel(model_loc, max_batch_size) +Models::FaceDetectionModel::FaceDetectionModel(const std::string& model_loc, int max_batch_size) + : ObjectDetectionModel(model_loc, max_batch_size) { } diff --git a/dynamic_vino_lib/src/models/face_reidentification_model.cpp b/dynamic_vino_lib/src/models/face_reidentification_model.cpp index d131e517..add2b5fb 100644 --- a/dynamic_vino_lib/src/models/face_reidentification_model.cpp +++ b/dynamic_vino_lib/src/models/face_reidentification_model.cpp @@ -20,23 +20,21 @@ #include "dynamic_vino_lib/models/face_reidentification_model.h" #include "dynamic_vino_lib/slog.h" // Validated Face Reidentification Network -Models::FaceReidentificationModel::FaceReidentificationModel( - const std::string & model_loc, int max_batch_size) -: BaseModel(model_loc, max_batch_size) {} +Models::FaceReidentificationModel::FaceReidentificationModel(const std::string& model_loc, int max_batch_size) + : BaseModel(model_loc, max_batch_size) +{ +} -void Models::FaceReidentificationModel::setLayerProperty( - InferenceEngine::CNNNetReader::Ptr net_reader) +void Models::FaceReidentificationModel::setLayerProperty(InferenceEngine::CNNNetReader::Ptr net_reader) { // set input property - InferenceEngine::InputsDataMap input_info_map( - net_reader->getNetwork().getInputsInfo()); + InferenceEngine::InputsDataMap input_info_map(net_reader->getNetwork().getInputsInfo()); InferenceEngine::InputInfo::Ptr input_info = input_info_map.begin()->second; input_info->setPrecision(InferenceEngine::Precision::U8); input_info->getInputData()->setLayout(InferenceEngine::Layout::NCHW); // set output property - InferenceEngine::OutputsDataMap output_info_map( - net_reader->getNetwork().getOutputsInfo()); - InferenceEngine::DataPtr & output_data_ptr = output_info_map.begin()->second; + InferenceEngine::OutputsDataMap output_info_map(net_reader->getNetwork().getOutputsInfo()); + InferenceEngine::DataPtr& output_data_ptr = output_info_map.begin()->second; output_data_ptr->setPrecision(InferenceEngine::Precision::FP32); output_data_ptr->setLayout(InferenceEngine::Layout::NCHW); // set input and output layer name @@ -44,8 +42,9 @@ void Models::FaceReidentificationModel::setLayerProperty( output_ = output_info_map.begin()->first; } -void Models::FaceReidentificationModel::checkLayerProperty( - const InferenceEngine::CNNNetReader::Ptr & net_reader) {} +void Models::FaceReidentificationModel::checkLayerProperty(const InferenceEngine::CNNNetReader::Ptr& net_reader) +{ +} const std::string Models::FaceReidentificationModel::getModelCategory() const { diff --git a/dynamic_vino_lib/src/models/head_pose_detection_model.cpp b/dynamic_vino_lib/src/models/head_pose_detection_model.cpp index 2ae13e10..2761174e 100644 --- a/dynamic_vino_lib/src/models/head_pose_detection_model.cpp +++ b/dynamic_vino_lib/src/models/head_pose_detection_model.cpp @@ -27,22 +27,20 @@ #include "dynamic_vino_lib/slog.h" // Validated Head Pose Network -Models::HeadPoseDetectionModel::HeadPoseDetectionModel( - const std::string & model_loc, int max_batch_size) -: BaseModel(model_loc, max_batch_size) +Models::HeadPoseDetectionModel::HeadPoseDetectionModel(const std::string& model_loc, int max_batch_size) + : BaseModel(model_loc, max_batch_size) { } -bool Models::HeadPoseDetectionModel::updateLayerProperty -(InferenceEngine::CNNNetReader::Ptr net_reader) +bool Models::HeadPoseDetectionModel::updateLayerProperty(InferenceEngine::CNNNetReader::Ptr net_reader) { slog::info << "Checking INPUTs for model " << getModelName() << slog::endl; // set input property InferenceEngine::InputsDataMap input_info_map(net_reader->getNetwork().getInputsInfo()); - if (input_info_map.size() != 1) { - slog::warn << "This model should have only one input, but we got" - << std::to_string(input_info_map.size()) << "inputs" - << slog::endl; + if (input_info_map.size() != 1) + { + slog::warn << "This model should have only one input, but we got" << std::to_string(input_info_map.size()) + << "inputs" << slog::endl; return false; } InferenceEngine::InputInfo::Ptr input_info = input_info_map.begin()->second; @@ -52,15 +50,20 @@ bool Models::HeadPoseDetectionModel::updateLayerProperty // set output property InferenceEngine::OutputsDataMap output_info_map(net_reader->getNetwork().getOutputsInfo()); - for (auto & output : output_info_map) { + for (auto& output : output_info_map) + { output.second->setPrecision(InferenceEngine::Precision::FP32); output.second->setLayout(InferenceEngine::Layout::NC); } - for (const std::string& outName : {output_angle_r_, output_angle_p_, output_angle_y_}) { - if (output_info_map.find(outName) == output_info_map.end()) { + for (const std::string& outName : { output_angle_r_, output_angle_p_, output_angle_y_ }) + { + if (output_info_map.find(outName) == output_info_map.end()) + { throw std::logic_error("There is no " + outName + " output in Head Pose Estimation network"); - } else { + } + else + { addOutputInfo(outName, outName); } } diff --git a/dynamic_vino_lib/src/models/landmarks_detection_model.cpp b/dynamic_vino_lib/src/models/landmarks_detection_model.cpp index 94a3b594..1d6c0781 100644 --- a/dynamic_vino_lib/src/models/landmarks_detection_model.cpp +++ b/dynamic_vino_lib/src/models/landmarks_detection_model.cpp @@ -19,23 +19,21 @@ #include #include "dynamic_vino_lib/models/landmarks_detection_model.h" // Validated Landmarks Detection Network -Models::LandmarksDetectionModel::LandmarksDetectionModel( - const std::string & model_loc, int max_batch_size) -: BaseModel(model_loc, max_batch_size) {} +Models::LandmarksDetectionModel::LandmarksDetectionModel(const std::string& model_loc, int max_batch_size) + : BaseModel(model_loc, max_batch_size) +{ +} -void Models::LandmarksDetectionModel::setLayerProperty( - InferenceEngine::CNNNetReader::Ptr net_reader) +void Models::LandmarksDetectionModel::setLayerProperty(InferenceEngine::CNNNetReader::Ptr net_reader) { // set input property - InferenceEngine::InputsDataMap input_info_map( - net_reader->getNetwork().getInputsInfo()); + InferenceEngine::InputsDataMap input_info_map(net_reader->getNetwork().getInputsInfo()); InferenceEngine::InputInfo::Ptr input_info = input_info_map.begin()->second; input_info->setPrecision(InferenceEngine::Precision::U8); input_info->getInputData()->setLayout(InferenceEngine::Layout::NCHW); // set output property - InferenceEngine::OutputsDataMap output_info_map( - net_reader->getNetwork().getOutputsInfo()); - InferenceEngine::DataPtr & output_data_ptr = output_info_map.begin()->second; + InferenceEngine::OutputsDataMap output_info_map(net_reader->getNetwork().getOutputsInfo()); + InferenceEngine::DataPtr& output_data_ptr = output_info_map.begin()->second; output_data_ptr->setPrecision(InferenceEngine::Precision::FP32); output_data_ptr->setLayout(InferenceEngine::Layout::NCHW); // set input and output layer name @@ -43,17 +41,16 @@ void Models::LandmarksDetectionModel::setLayerProperty( output_ = output_info_map.begin()->first; } -void Models::LandmarksDetectionModel::checkLayerProperty( - const InferenceEngine::CNNNetReader::Ptr & net_reader) +void Models::LandmarksDetectionModel::checkLayerProperty(const InferenceEngine::CNNNetReader::Ptr& net_reader) { - InferenceEngine::InputsDataMap input_info_map( - net_reader->getNetwork().getInputsInfo()); - if (input_info_map.size() != 1) { + InferenceEngine::InputsDataMap input_info_map(net_reader->getNetwork().getInputsInfo()); + if (input_info_map.size() != 1) + { throw std::logic_error("Landmarks Detection topology should have only one input"); } - InferenceEngine::OutputsDataMap output_info_map( - net_reader->getNetwork().getOutputsInfo()); - if (output_info_map.size() != 1) { + InferenceEngine::OutputsDataMap output_info_map(net_reader->getNetwork().getOutputsInfo()); + if (output_info_map.size() != 1) + { throw std::logic_error("Landmarks Detection Network expects networks having one output"); } } diff --git a/dynamic_vino_lib/src/models/license_plate_detection_model.cpp b/dynamic_vino_lib/src/models/license_plate_detection_model.cpp index eb0afee9..5c1e1a2f 100644 --- a/dynamic_vino_lib/src/models/license_plate_detection_model.cpp +++ b/dynamic_vino_lib/src/models/license_plate_detection_model.cpp @@ -20,26 +20,27 @@ #include "dynamic_vino_lib/models/license_plate_detection_model.h" #include "dynamic_vino_lib/slog.h" // Validated Vehicle Attributes Detection Network -Models::LicensePlateDetectionModel::LicensePlateDetectionModel( - const std::string & model_loc, int max_batch_size) -: BaseModel(model_loc, max_batch_size) {} +Models::LicensePlateDetectionModel::LicensePlateDetectionModel(const std::string& model_loc, int max_batch_size) + : BaseModel(model_loc, max_batch_size) +{ +} -bool Models::LicensePlateDetectionModel::updateLayerProperty( - const InferenceEngine::CNNNetReader::Ptr net_reader) +bool Models::LicensePlateDetectionModel::updateLayerProperty(const InferenceEngine::CNNNetReader::Ptr net_reader) { slog::info << "Checking INPUTs for model " << getModelName() << slog::endl; - InferenceEngine::InputsDataMap input_info_map( - net_reader->getNetwork().getInputsInfo()); - if (input_info_map.size() != 2) { + InferenceEngine::InputsDataMap input_info_map(net_reader->getNetwork().getInputsInfo()); + if (input_info_map.size() != 2) + { throw std::logic_error("Vehicle Attribs topology should have only two inputs"); } auto sequence_input = (++input_info_map.begin()); - if (sequence_input->second->getTensorDesc().getDims()[0] != getMaxSequenceSize()) { + if (sequence_input->second->getTensorDesc().getDims()[0] != getMaxSequenceSize()) + { throw std::logic_error("License plate detection max sequence size dismatch"); } - InferenceEngine::OutputsDataMap output_info_map( - net_reader->getNetwork().getOutputsInfo()); - if (output_info_map.size() != 1) { + InferenceEngine::OutputsDataMap output_info_map(net_reader->getNetwork().getOutputsInfo()); + if (output_info_map.size() != 1) + { throw std::logic_error("Vehicle Attribs Network expects networks having one output"); } diff --git a/dynamic_vino_lib/src/models/object_detection_ssd_model.cpp b/dynamic_vino_lib/src/models/object_detection_ssd_model.cpp index 0c281c3b..96c998d6 100644 --- a/dynamic_vino_lib/src/models/object_detection_ssd_model.cpp +++ b/dynamic_vino_lib/src/models/object_detection_ssd_model.cpp @@ -21,12 +21,11 @@ #include "dynamic_vino_lib/models/object_detection_ssd_model.h" #include "dynamic_vino_lib/slog.h" // Validated Object Detection Network -Models::ObjectDetectionSSDModel::ObjectDetectionSSDModel( - const std::string & model_loc, int max_batch_size) -: ObjectDetectionModel(model_loc, max_batch_size) +Models::ObjectDetectionSSDModel::ObjectDetectionSSDModel(const std::string& model_loc, int max_batch_size) + : ObjectDetectionModel(model_loc, max_batch_size) { slog::debug << "TESTING: in ObjectDetectionSSDModel" << slog::endl; - //addCandidatedAttr(std::make_shared()); + // addCandidatedAttr(std::make_shared()); } const std::string Models::ObjectDetectionSSDModel::getModelCategory() const @@ -34,12 +33,11 @@ const std::string Models::ObjectDetectionSSDModel::getModelCategory() const return "Object Detection SSD"; } -bool Models::ObjectDetectionSSDModel::enqueue( - const std::shared_ptr & engine, - const cv::Mat & frame, - const cv::Rect & input_frame_loc) +bool Models::ObjectDetectionSSDModel::enqueue(const std::shared_ptr& engine, const cv::Mat& frame, + const cv::Rect& input_frame_loc) { - if (!this->matToBlob(frame, input_frame_loc, 1, 0, engine)) { + if (!this->matToBlob(frame, input_frame_loc, 1, 0, engine)) + { return false; } @@ -47,37 +45,40 @@ bool Models::ObjectDetectionSSDModel::enqueue( return true; } -bool Models::ObjectDetectionSSDModel::matToBlob( - const cv::Mat & orig_image, const cv::Rect &, float scale_factor, - int batch_index, const std::shared_ptr & engine) +bool Models::ObjectDetectionSSDModel::matToBlob(const cv::Mat& orig_image, const cv::Rect&, float scale_factor, + int batch_index, const std::shared_ptr& engine) { - if (engine == nullptr) { + if (engine == nullptr) + { slog::err << "A frame is trying to be enqueued in a NULL Engine." << slog::endl; return false; } std::string input_name = getInputName(); slog::debug << "add input image to blob: " << input_name << slog::endl; - InferenceEngine::Blob::Ptr input_blob = - engine->getRequest()->GetBlob(input_name); + InferenceEngine::Blob::Ptr input_blob = engine->getRequest()->GetBlob(input_name); InferenceEngine::SizeVector blob_size = input_blob->getTensorDesc().getDims(); const int width = blob_size[3]; const int height = blob_size[2]; const int channels = blob_size[1]; - u_int8_t * blob_data = input_blob->buffer().as(); + u_int8_t* blob_data = input_blob->buffer().as(); cv::Mat resized_image(orig_image); - if (width != orig_image.size().width || height != orig_image.size().height) { + if (width != orig_image.size().width || height != orig_image.size().height) + { cv::resize(orig_image, resized_image, cv::Size(width, height)); } int batchOffset = batch_index * width * height * channels; - for (int c = 0; c < channels; c++) { - for (int h = 0; h < height; h++) { - for (int w = 0; w < width; w++) { + for (int c = 0; c < channels; c++) + { + for (int h = 0; h < height; h++) + { + for (int w = 0; w < width; w++) + { blob_data[batchOffset + c * width * height + h * width + w] = - resized_image.at(h, w)[c] * scale_factor; + resized_image.at(h, w)[c] * scale_factor; } } } @@ -86,14 +87,13 @@ bool Models::ObjectDetectionSSDModel::matToBlob( return true; } -bool Models::ObjectDetectionSSDModel::fetchResults( - const std::shared_ptr & engine, - std::vector & results, - const float & confidence_thresh, - const bool & enable_roi_constraint) +bool Models::ObjectDetectionSSDModel::fetchResults(const std::shared_ptr& engine, + std::vector& results, + const float& confidence_thresh, const bool& enable_roi_constraint) { slog::debug << "fetching Infer Resulsts from the given SSD model" << slog::endl; - if (engine == nullptr) { + if (engine == nullptr) + { slog::err << "Trying to fetch results from Engines." << slog::endl; return false; } @@ -101,37 +101,42 @@ bool Models::ObjectDetectionSSDModel::fetchResults( slog::debug << "Fetching Detection Results ..." << slog::endl; InferenceEngine::InferRequest::Ptr request = engine->getRequest(); std::string output = getOutputName(); - const float * detections = request->GetBlob(output)->buffer().as(); + const float* detections = request->GetBlob(output)->buffer().as(); slog::debug << "Analyzing Detection results..." << slog::endl; auto max_proposal_count = getMaxProposalCount(); auto object_size = getObjectSize(); - slog::debug << "MaxProprosalCount=" << max_proposal_count - << ", ObjectSize=" << object_size << slog::endl; - for (int i = 0; i < max_proposal_count; i++) { + slog::debug << "MaxProprosalCount=" << max_proposal_count << ", ObjectSize=" << object_size << slog::endl; + for (int i = 0; i < max_proposal_count; i++) + { float image_id = detections[i * object_size + 0]; - if (image_id < 0) { - //slog::info << "Found objects: " << i << "|" << results.size() << slog::endl; + if (image_id < 0) + { + // slog::info << "Found objects: " << i << "|" << results.size() << slog::endl; break; } cv::Rect r; auto label_num = static_cast(detections[i * object_size + 1]); - std::vector & labels = getLabels(); + std::vector& labels = getLabels(); auto frame_size = getFrameSize(); r.x = static_cast(detections[i * object_size + 3] * frame_size.width); r.y = static_cast(detections[i * object_size + 4] * frame_size.height); r.width = static_cast(detections[i * object_size + 5] * frame_size.width - r.x); r.height = static_cast(detections[i * object_size + 6] * frame_size.height - r.y); - if (enable_roi_constraint) {r &= cv::Rect(0, 0, frame_size.width, frame_size.height);} + if (enable_roi_constraint) + { + r &= cv::Rect(0, 0, frame_size.width, frame_size.height); + } dynamic_vino_lib::ObjectDetectionResult result(r); - std::string label = label_num < labels.size() ? labels[label_num] : - std::string("label #") + std::to_string(label_num); + std::string label = + label_num < labels.size() ? labels[label_num] : std::string("label #") + std::to_string(label_num); result.setLabel(label); float confidence = detections[i * object_size + 2]; - if (confidence <= confidence_thresh /* || r.x == 0 */) { // why r.x needs to be checked? + if (confidence <= confidence_thresh /* || r.x == 0 */) + { // why r.x needs to be checked? continue; } result.setConfidence(confidence); @@ -142,18 +147,18 @@ bool Models::ObjectDetectionSSDModel::fetchResults( return true; } -bool Models::ObjectDetectionSSDModel::updateLayerProperty( - const InferenceEngine::CNNNetReader::Ptr net_reader) +bool Models::ObjectDetectionSSDModel::updateLayerProperty(const InferenceEngine::CNNNetReader::Ptr net_reader) { slog::info << "Checking INPUTs for model " << getModelName() << slog::endl; InferenceEngine::InputsDataMap input_info_map(net_reader->getNetwork().getInputsInfo()); - if (input_info_map.size() != 1) { + if (input_info_map.size() != 1) + { slog::warn << "This model seems not SSDNet-like, SSDnet has only one input, but we got " - << std::to_string(input_info_map.size()) << "inputs" << slog::endl; + << std::to_string(input_info_map.size()) << "inputs" << slog::endl; return false; } - + InferenceEngine::InputInfo::Ptr input_info = input_info_map.begin()->second; input_info->setPrecision(InferenceEngine::Precision::U8); addInputInfo("input", input_info_map.begin()->first); @@ -164,26 +169,26 @@ bool Models::ObjectDetectionSSDModel::updateLayerProperty( slog::info << "Checking OUTPUTs for model " << getModelName() << slog::endl; InferenceEngine::OutputsDataMap output_info_map(net_reader->getNetwork().getOutputsInfo()); - if (output_info_map.size() != 1) { - slog::warn << "This model seems not SSDNet-like! We got " - << std::to_string(output_info_map.size()) << "outputs, but SSDnet has only one." - << slog::endl; + if (output_info_map.size() != 1) + { + slog::warn << "This model seems not SSDNet-like! We got " << std::to_string(output_info_map.size()) + << "outputs, but SSDnet has only one." << slog::endl; return false; } - InferenceEngine::DataPtr & output_data_ptr = output_info_map.begin()->second; + InferenceEngine::DataPtr& output_data_ptr = output_info_map.begin()->second; addOutputInfo("output", output_info_map.begin()->first); - slog::info << "Checking Object Detection output ... Name=" << output_info_map.begin()->first - << slog::endl; + slog::info << "Checking Object Detection output ... Name=" << output_info_map.begin()->first << slog::endl; output_data_ptr->setPrecision(InferenceEngine::Precision::FP32); -///TODO: double check this part: BEGIN + /// TODO: double check this part: BEGIN const InferenceEngine::CNNLayerPtr output_layer = - net_reader->getNetwork().getLayerByName(output_info_map.begin()->first.c_str()); + net_reader->getNetwork().getLayerByName(output_info_map.begin()->first.c_str()); // output layer should have attribute called num_classes slog::info << "Checking Object Detection num_classes" << slog::endl; - if (output_layer->params.find("num_classes") == output_layer->params.end()) { + if (output_layer->params.find("num_classes") == output_layer->params.end()) + { slog::warn << "This model's output layer (" << output_info_map.begin()->first - << ") should have num_classes integer attribute" << slog::endl; + << ") should have num_classes integer attribute" << slog::endl; return false; } // class number should be equal to size of label vector @@ -199,7 +204,7 @@ bool Models::ObjectDetectionSSDModel::updateLayerProperty( } } #endif - ///TODO: double check this part: END + /// TODO: double check this part: END // last dimension of output layer should be 7 const InferenceEngine::SizeVector output_dims = output_data_ptr->getTensorDesc().getDims(); @@ -207,17 +212,18 @@ bool Models::ObjectDetectionSSDModel::updateLayerProperty( slog::info << "max proposal count is: " << getMaxProposalCount() << slog::endl; auto object_size = static_cast(output_dims[3]); - if (object_size != 7) { + if (object_size != 7) + { slog::warn << "This model is NOT SSDNet-like, whose output data for each detected object" - << "should have 7 dimensions, but was " << std::to_string(object_size) - << slog::endl; + << "should have 7 dimensions, but was " << std::to_string(object_size) << slog::endl; return false; } setObjectSize(object_size); - if (output_dims.size() != 4) { + if (output_dims.size() != 4) + { slog::warn << "This model is not SSDNet-like, output dimensions shoulld be 4, but was" - << std::to_string(output_dims.size()) << slog::endl; + << std::to_string(output_dims.size()) << slog::endl; return false; } diff --git a/dynamic_vino_lib/src/models/object_detection_yolov2voc_model.cpp b/dynamic_vino_lib/src/models/object_detection_yolov2voc_model.cpp index d24bb946..ba86a868 100644 --- a/dynamic_vino_lib/src/models/object_detection_yolov2voc_model.cpp +++ b/dynamic_vino_lib/src/models/object_detection_yolov2voc_model.cpp @@ -21,21 +21,20 @@ #include "dynamic_vino_lib/models/object_detection_yolov2voc_model.h" #include "dynamic_vino_lib/slog.h" // Validated Object Detection Network -Models::ObjectDetectionYolov2Model::ObjectDetectionYolov2Model( - const std::string & model_loc, int max_batch_size) -: ObjectDetectionModel(model_loc, max_batch_size) +Models::ObjectDetectionYolov2Model::ObjectDetectionYolov2Model(const std::string& model_loc, int max_batch_size) + : ObjectDetectionModel(model_loc, max_batch_size) { } -bool Models::ObjectDetectionYolov2Model::updateLayerProperty( - const InferenceEngine::CNNNetReader::Ptr net_reader) +bool Models::ObjectDetectionYolov2Model::updateLayerProperty(const InferenceEngine::CNNNetReader::Ptr net_reader) { slog::info << "Checking INPUTs for model " << getModelName() << slog::endl; InferenceEngine::InputsDataMap input_info_map(net_reader->getNetwork().getInputsInfo()); - if (input_info_map.size() != 1) { + if (input_info_map.size() != 1) + { slog::warn << "This model seems not Yolo-like, which has only one input, but we got " - << std::to_string(input_info_map.size()) << "inputs" << slog::endl; + << std::to_string(input_info_map.size()) << "inputs" << slog::endl; return false; } @@ -47,37 +46,39 @@ bool Models::ObjectDetectionYolov2Model::updateLayerProperty( // set output property InferenceEngine::OutputsDataMap output_info_map(net_reader->getNetwork().getOutputsInfo()); - if (output_info_map.size() != 1) { - slog::warn << "This model seems not Yolo-like! We got " - << std::to_string(output_info_map.size()) << "outputs, but SSDnet has only one." - << slog::endl; + if (output_info_map.size() != 1) + { + slog::warn << "This model seems not Yolo-like! We got " << std::to_string(output_info_map.size()) + << "outputs, but SSDnet has only one." << slog::endl; return false; } - InferenceEngine::DataPtr & output_data_ptr = output_info_map.begin()->second; + InferenceEngine::DataPtr& output_data_ptr = output_info_map.begin()->second; output_data_ptr->setPrecision(InferenceEngine::Precision::FP32); addOutputInfo("output", output_info_map.begin()->first); - slog::info << "Checking Object Detection output ... Name=" << output_info_map.begin()->first - << slog::endl; + slog::info << "Checking Object Detection output ... Name=" << output_info_map.begin()->first << slog::endl; const InferenceEngine::CNNLayerPtr output_layer = - net_reader->getNetwork().getLayerByName(output_info_map.begin()->first.c_str()); + net_reader->getNetwork().getLayerByName(output_info_map.begin()->first.c_str()); // output layer should have attribute called num_classes slog::info << "Checking Object Detection num_classes" << slog::endl; - if (output_layer == nullptr || - output_layer->params.find("classes") == output_layer->params.end()) { + if (output_layer == nullptr || output_layer->params.find("classes") == output_layer->params.end()) + { slog::warn << "This model's output layer (" << output_info_map.begin()->first - << ") should have num_classes integer attribute" << slog::endl; + << ") should have num_classes integer attribute" << slog::endl; return false; } // class number should be equal to size of label vector // if network has default "background" class, fake is used const int num_classes = output_layer->GetParamAsInt("classes"); slog::info << "Checking Object Detection output ... num_classes=" << num_classes << slog::endl; - if (getLabels().size() != num_classes) { - - if (getLabels().size() == (num_classes - 1)) { + if (getLabels().size() != num_classes) + { + if (getLabels().size() == (num_classes - 1)) + { getLabels().insert(getLabels().begin(), "fake"); - } else { + } + else + { getLabels().clear(); } } @@ -88,17 +89,18 @@ bool Models::ObjectDetectionYolov2Model::updateLayerProperty( slog::info << "max proposal count is: " << getMaxProposalCount() << slog::endl; auto object_size = static_cast(output_dims[3]); - if (object_size != 33) { + if (object_size != 33) + { slog::warn << "This model is NOT Yolo-like, whose output data for each detected object" - << "should have 7 dimensions, but was " << std::to_string(object_size) - << slog::endl; + << "should have 7 dimensions, but was " << std::to_string(object_size) << slog::endl; return false; } setObjectSize(object_size); - if (output_dims.size() != 2) { + if (output_dims.size() != 2) + { slog::warn << "This model is not Yolo-like, output dimensions shoulld be 2, but was" - << std::to_string(output_dims.size()) << slog::endl; + << std::to_string(output_dims.size()) << slog::endl; return false; } @@ -112,38 +114,35 @@ const std::string Models::ObjectDetectionYolov2Model::getModelCategory() const return "Object Detection Yolo v2"; } -bool Models::ObjectDetectionYolov2Model::enqueue( - const std::shared_ptr & engine, - const cv::Mat & frame, - const cv::Rect & input_frame_loc) +bool Models::ObjectDetectionYolov2Model::enqueue(const std::shared_ptr& engine, const cv::Mat& frame, + const cv::Rect& input_frame_loc) { setFrameSize(frame.cols, frame.rows); - if (!matToBlob(frame, input_frame_loc, 1, 0, engine)) { + if (!matToBlob(frame, input_frame_loc, 1, 0, engine)) + { return false; } return true; } -bool Models::ObjectDetectionYolov2Model::matToBlob( - const cv::Mat & orig_image, const cv::Rect &, float scale_factor, - int batch_index, const std::shared_ptr & engine) +bool Models::ObjectDetectionYolov2Model::matToBlob(const cv::Mat& orig_image, const cv::Rect&, float scale_factor, + int batch_index, const std::shared_ptr& engine) { - if (engine == nullptr) { + if (engine == nullptr) + { slog::err << "A frame is trying to be enqueued in a NULL Engine." << slog::endl; return false; } std::string input_name = getInputName(); - InferenceEngine::Blob::Ptr input_blob = - engine->getRequest()->GetBlob(input_name); + InferenceEngine::Blob::Ptr input_blob = engine->getRequest()->GetBlob(input_name); InferenceEngine::SizeVector blob_size = input_blob->getTensorDesc().getDims(); const int width = blob_size[3]; const int height = blob_size[2]; const int channels = blob_size[1]; - float * blob_data = input_blob->buffer().as(); - + float* blob_data = input_blob->buffer().as(); int dx = 0; int dy = 0; @@ -169,10 +168,13 @@ bool Models::ObjectDetectionYolov2Model::matToBlob( int new_w = imw; int new_h = imh; - if ((static_cast(IW) / imw) < (static_cast(IH) / imh)) { + if ((static_cast(IW) / imw) < (static_cast(IH) / imh)) + { new_w = IW; new_h = (imh * IW) / imw; - } else { + } + else + { new_h = IH; new_w = (imw * IW) / imh; } @@ -182,17 +184,23 @@ bool Models::ObjectDetectionYolov2Model::matToBlob( imh = image.size().height; imw = image.size().width; - for (int row = 0; row < imh; row++) { - for (int col = 0; col < imw; col++) { - for (int ch = 0; ch < 3; ch++) { + for (int row = 0; row < imh; row++) + { + for (int col = 0; col < imw; col++) + { + for (int ch = 0; ch < 3; ch++) + { resizedImg.at(dy + row, dx + col)[ch] = image.at(row, col)[ch]; } } } - for (int c = 0; c < channels; c++) { - for (int h = 0; h < height; h++) { - for (int w = 0; w < width; w++) { + for (int c = 0; c < channels; c++) + { + for (int h = 0; h < height; h++) + { + for (int w = 0; w < width; w++) + { blob_data[c * width * height + h * width + w] = resizedImg.at(h, w)[c]; } } @@ -202,14 +210,14 @@ bool Models::ObjectDetectionYolov2Model::matToBlob( return true; } -bool Models::ObjectDetectionYolov2Model::fetchResults( - const std::shared_ptr & engine, - std::vector & results, - const float & confidence_thresh, - const bool & enable_roi_constraint) +bool Models::ObjectDetectionYolov2Model::fetchResults(const std::shared_ptr& engine, + std::vector& results, + const float& confidence_thresh, const bool& enable_roi_constraint) { - try { - if (engine == nullptr) { + try + { + if (engine == nullptr) + { slog::err << "Trying to fetch results from Engines." << slog::endl; return false; } @@ -217,17 +225,17 @@ bool Models::ObjectDetectionYolov2Model::fetchResults( InferenceEngine::InferRequest::Ptr request = engine->getRequest(); std::string output = getOutputName(); - std::vector & labels = getLabels(); - const float * detections = - request->GetBlob(output)->buffer().as::value_type *>(); - InferenceEngine::CNNLayerPtr layer = - getNetReader()->getNetwork().getLayerByName(output.c_str()); + std::vector& labels = getLabels(); + const float* detections = request->GetBlob(output) + ->buffer() + .as::value_type*>(); + InferenceEngine::CNNLayerPtr layer = getNetReader()->getNetwork().getLayerByName(output.c_str()); int input_height = input_info_->getTensorDesc().getDims()[2]; int input_width = input_info_->getTensorDesc().getDims()[3]; // --------------------------- Validating output parameters -------------------------------- - if (layer != nullptr && layer->type != "RegionYolo") { + if (layer != nullptr && layer->type != "RegionYolo") + { throw std::runtime_error("Invalid output type: " + layer->type + ". RegionYolo expected"); } // --------------------------- Extracting layer parameters -------------------------------- @@ -235,47 +243,45 @@ bool Models::ObjectDetectionYolov2Model::fetchResults( const int coords = layer->GetParamAsInt("coords"); const int classes = layer->GetParamAsInt("classes"); auto blob = request->GetBlob(output); - const int out_blob_h = static_cast(blob->getTensorDesc().getDims()[2]);; - - std::vector anchors = { - 0.572730, 0.677385, - 1.874460, 2.062530, - 3.338430, 5.474340, - 7.882820, 3.527780, - 9.770520, 9.168280 - }; + const int out_blob_h = static_cast(blob->getTensorDesc().getDims()[2]); + ; + + std::vector anchors = { 0.572730, 0.677385, 1.874460, 2.062530, 3.338430, + 5.474340, 7.882820, 3.527780, 9.770520, 9.168280 }; auto side = out_blob_h; auto side_square = side * side; // --------------------------- Parsing YOLO Region output ------------------------------------- std::vector raw_results; - for (int i = 0; i < side_square; ++i) { + for (int i = 0; i < side_square; ++i) + { int row = i / side; int col = i % side; - for (int n = 0; n < num; ++n) { + for (int n = 0; n < num; ++n) + { int obj_index = getEntryIndex(side, coords, classes, n * side * side + i, coords); int box_index = getEntryIndex(side, coords, classes, n * side * side + i, 0); float scale = detections[obj_index]; - if (scale < confidence_thresh) { + if (scale < confidence_thresh) + { continue; } float x = (col + detections[box_index + 0 * side_square]) / side * input_width; float y = (row + detections[box_index + 1 * side_square]) / side * input_height; - float height = std::exp(detections[box_index + 3 * side_square]) * anchors[2 * n + 1] / - side * input_height; - float width = std::exp(detections[box_index + 2 * side_square]) * anchors[2 * n] / side * - input_width; + float height = std::exp(detections[box_index + 3 * side_square]) * anchors[2 * n + 1] / side * input_height; + float width = std::exp(detections[box_index + 2 * side_square]) * anchors[2 * n] / side * input_width; - for (int j = 0; j < classes; ++j) { - int class_index = - getEntryIndex(side, coords, classes, n * side_square + i, coords + 1 + j); + for (int j = 0; j < classes; ++j) + { + int class_index = getEntryIndex(side, coords, classes, n * side_square + i, coords + 1 + j); float prob = scale * detections[class_index]; - if (prob < confidence_thresh) { + if (prob < confidence_thresh) + { continue; } @@ -291,8 +297,7 @@ bool Models::ObjectDetectionYolov2Model::fetchResults( cv::Rect r(x_min_resized, y_min_resized, width_resized, height_resized); Result result(r); // result.label_ = j; - std::string label = j < - labels.size() ? labels[j] : std::string("label #") + std::to_string(j); + std::string label = j < labels.size() ? labels[j] : std::string("label #") + std::to_string(j); result.setLabel(label); result.setConfidence(prob); @@ -302,21 +307,27 @@ bool Models::ObjectDetectionYolov2Model::fetchResults( } std::sort(raw_results.begin(), raw_results.end()); - for (unsigned int i = 0; i < raw_results.size(); ++i) { - if (raw_results[i].getConfidence() == 0) { + for (unsigned int i = 0; i < raw_results.size(); ++i) + { + if (raw_results[i].getConfidence() == 0) + { continue; } - for (unsigned int j = i + 1; j < raw_results.size(); ++j) { - auto iou = dynamic_vino_lib::ObjectDetection::calcIoU( - raw_results[i].getLocation(), raw_results[j].getLocation()); - if (iou >= 0.45) { + for (unsigned int j = i + 1; j < raw_results.size(); ++j) + { + auto iou = + dynamic_vino_lib::ObjectDetection::calcIoU(raw_results[i].getLocation(), raw_results[j].getLocation()); + if (iou >= 0.45) + { raw_results[j].setConfidence(0); } } } - for (auto & raw_result : raw_results) { - if (raw_result.getConfidence() < confidence_thresh) { + for (auto& raw_result : raw_results) + { + if (raw_result.getConfidence() < confidence_thresh) + { continue; } @@ -326,18 +337,20 @@ bool Models::ObjectDetectionYolov2Model::fetchResults( raw_results.clear(); return true; - } catch (const std::exception & error) { + } + catch (const std::exception& error) + { slog::err << error.what() << slog::endl; return false; - } catch (...) { + } + catch (...) + { slog::err << "Unknown/internal exception happened." << slog::endl; return false; } } -int Models::ObjectDetectionYolov2Model::getEntryIndex( - int side, int lcoords, int lclasses, - int location, int entry) +int Models::ObjectDetectionYolov2Model::getEntryIndex(int side, int lcoords, int lclasses, int location, int entry) { int n = location / (side * side); int loc = location % (side * side); diff --git a/dynamic_vino_lib/src/models/object_segmentation_model.cpp b/dynamic_vino_lib/src/models/object_segmentation_model.cpp index 800fdd8b..343fcd69 100644 --- a/dynamic_vino_lib/src/models/object_segmentation_model.cpp +++ b/dynamic_vino_lib/src/models/object_segmentation_model.cpp @@ -21,17 +21,13 @@ #include "dynamic_vino_lib/inferences/object_segmentation.h" #include "dynamic_vino_lib/slog.h" // Validated Object Detection Network -Models::ObjectSegmentationModel::ObjectSegmentationModel( - const std::string &model_loc, - int max_batch_size) - : BaseModel(model_loc, max_batch_size) +Models::ObjectSegmentationModel::ObjectSegmentationModel(const std::string& model_loc, int max_batch_size) + : BaseModel(model_loc, max_batch_size) { } -bool Models::ObjectSegmentationModel::enqueue( - const std::shared_ptr &engine, - const cv::Mat &frame, - const cv::Rect &input_frame_loc) +bool Models::ObjectSegmentationModel::enqueue(const std::shared_ptr& engine, const cv::Mat& frame, + const cv::Rect& input_frame_loc) { if (engine == nullptr) { @@ -39,11 +35,11 @@ bool Models::ObjectSegmentationModel::enqueue( return false; } - for (const auto &inputInfoItem : input_info_) + for (const auto& inputInfoItem : input_info_) { // Fill first input tensor with images. First b channel, then g and r channels - slog::debug<<"first tensor"<getTensorDesc().getDims().size()<getTensorDesc().getDims().size()==4) + slog::debug << "first tensor" << inputInfoItem.second->getTensorDesc().getDims().size() << slog::endl; + if (inputInfoItem.second->getTensorDesc().getDims().size() == 4) { matToBlob(frame, input_frame_loc, 1.0, 0, engine); } @@ -52,19 +48,17 @@ bool Models::ObjectSegmentationModel::enqueue( if (inputInfoItem.second->getTensorDesc().getDims().size() == 2) { InferenceEngine::Blob::Ptr input = engine->getRequest()->GetBlob(inputInfoItem.first); - auto data = input->buffer().as::value_type *>(); - data[0] = static_cast(frame.rows); // height + auto data = input->buffer().as::value_type*>(); + data[0] = static_cast(frame.rows); // height data[1] = static_cast(frame.cols); // width data[2] = 1; } } return true; - } -bool Models::ObjectSegmentationModel::matToBlob( - const cv::Mat &orig_image, const cv::Rect &, float scale_factor, - int batch_index, const std::shared_ptr &engine) +bool Models::ObjectSegmentationModel::matToBlob(const cv::Mat& orig_image, const cv::Rect&, float scale_factor, + int batch_index, const std::shared_ptr& engine) { (void)scale_factor; (void)batch_index; @@ -82,17 +76,15 @@ bool Models::ObjectSegmentationModel::matToBlob( size_t strideH = orig_image.step.buf[0]; size_t strideW = orig_image.step.buf[1]; - bool is_dense = - strideW == channels && - strideH == channels * width; + bool is_dense = strideW == channels && strideH == channels * width; - if (!is_dense){ + if (!is_dense) + { slog::err << "Doesn't support conversion from not dense cv::Mat." << slog::endl; return false; } - InferenceEngine::TensorDesc tDesc(InferenceEngine::Precision::U8, - {1, channels, height, width}, + InferenceEngine::TensorDesc tDesc(InferenceEngine::Precision::U8, { 1, channels, height, width }, InferenceEngine::Layout::NHWC); auto shared_blob = InferenceEngine::make_shared_blob(tDesc, orig_image.data); @@ -106,61 +98,61 @@ const std::string Models::ObjectSegmentationModel::getModelCategory() const return "Object Segmentation"; } -bool Models::ObjectSegmentationModel::updateLayerProperty( - const InferenceEngine::CNNNetReader::Ptr net_reader) +bool Models::ObjectSegmentationModel::updateLayerProperty(const InferenceEngine::CNNNetReader::Ptr net_reader) { - slog::info<< "Checking INPUTS for Model" <getNetwork(); input_info_ = InferenceEngine::InputsDataMap(network.getInputsInfo()); - InferenceEngine::ICNNNetwork:: InputShapes inputShapes = network.getInputShapes(); - slog::debug<<"input size"<second; - slog::debug<<"channel size"<second; + slog::debug << "channel size" << in_size_vector[1] << "dimensional" << in_size_vector.size() << slog::endl; + if (in_size_vector.size() != 4 || in_size_vector[1] != 3) + { + // throw std::runtime_error("3-channel 4-dimensional model's input is expected"); + slog::warn << "3-channel 4-dimensional model's input is expected, but we got " << std::to_string(in_size_vector[1]) + << " channels and " << std::to_string(in_size_vector.size()) << " dimensions." << slog::endl; return false; } in_size_vector[0] = 1; network.reshape(inputShapes); - InferenceEngine:: InputInfo &inputInfo = *network.getInputsInfo().begin()->second; + InferenceEngine::InputInfo& inputInfo = *network.getInputsInfo().begin()->second; inputInfo.getPreProcess().setResizeAlgorithm(InferenceEngine::ResizeAlgorithm::RESIZE_BILINEAR); inputInfo.setLayout(InferenceEngine::Layout::NHWC); inputInfo.setPrecision(InferenceEngine::Precision::U8); - //InferenceEngine::InputInfo::Ptr input_info = input_info_map.begin()->second; - //addInputInfo("input", input_info_map.begin()->first.c_str()); + // InferenceEngine::InputInfo::Ptr input_info = input_info_map.begin()->second; + // addInputInfo("input", input_info_map.begin()->first.c_str()); addInputInfo("input", inputShapes.begin()->first); InferenceEngine::OutputsDataMap outputsDataMap = network.getOutputsInfo(); - if (outputsDataMap.size() != 1) { - //throw std::runtime_error("Demo supports topologies only with 1 output"); + if (outputsDataMap.size() != 1) + { + // throw std::runtime_error("Demo supports topologies only with 1 output"); slog::warn << "This inference sample should have only one output, but we got" - << std::to_string(outputsDataMap.size()) << "outputs" - << slog::endl; + << std::to_string(outputsDataMap.size()) << "outputs" << slog::endl; return false; } - InferenceEngine::Data & data = *outputsDataMap.begin()->second; + InferenceEngine::Data& data = *outputsDataMap.begin()->second; data.setPrecision(InferenceEngine::Precision::FP32); const InferenceEngine::SizeVector& outSizeVector = data.getTensorDesc().getDims(); int outChannels, outHeight, outWidth; slog::debug << "output size vector " << outSizeVector.size() << slog::endl; - switch(outSizeVector.size()){ + switch (outSizeVector.size()) + { case 3: outChannels = 0; outHeight = outSizeVector[1]; @@ -173,28 +165,27 @@ bool Models::ObjectSegmentationModel::updateLayerProperty( break; default: throw std::runtime_error("Unexpected output blob shape. Only 4D and 3D output blobs are" - "supported."); - + "supported."); } - if(outHeight == 0 || outWidth == 0){ + if (outHeight == 0 || outWidth == 0) + { slog::err << "output_height or output_width is not set, please check the MaskOutput Info " << "is set correctly." << slog::endl; - //throw std::runtime_error("output_height or output_width is not set, please check the MaskOutputInfo"); + // throw std::runtime_error("output_height or output_width is not set, please check the MaskOutputInfo"); return false; } - slog::debug << "output width " << outWidth<< slog::endl; - slog::debug << "output hEIGHT " << outHeight<< slog::endl; - slog::debug << "output CHANNALS " << outChannels<< slog::endl; + slog::debug << "output width " << outWidth << slog::endl; + slog::debug << "output hEIGHT " << outHeight << slog::endl; + slog::debug << "output CHANNALS " << outChannels << slog::endl; addOutputInfo("masks", (outputsDataMap.begin()++)->first); addOutputInfo("detection", outputsDataMap.begin()->first); - //const InferenceEngine::CNNLayerPtr output_layer = - //network.getLayerByName(outputsDataMap.begin()->first.c_str()); - const InferenceEngine::CNNLayerPtr output_layer = - network.getLayerByName(getOutputName("detection").c_str()); - //const int num_classes = output_layer->GetParamAsInt("num_classes"); - //slog::info << "Checking Object Segmentation output ... num_classes=" << num_classes << slog::endl; + // const InferenceEngine::CNNLayerPtr output_layer = + // network.getLayerByName(outputsDataMap.begin()->first.c_str()); + const InferenceEngine::CNNLayerPtr output_layer = network.getLayerByName(getOutputName("detection").c_str()); +// const int num_classes = output_layer->GetParamAsInt("num_classes"); +// slog::info << "Checking Object Segmentation output ... num_classes=" << num_classes << slog::endl; #if 0 if (getLabels().size() != num_classes) @@ -209,16 +200,15 @@ bool Models::ObjectSegmentationModel::updateLayerProperty( } } #endif -/* - const InferenceEngine::SizeVector output_dims = data.getTensorDesc().getDims(); - setMaxProposalCount(static_cast(output_dims[2])); - slog::info << "max proposal count is: " << getMaxProposalCount() << slog::endl; - auto object_size = static_cast(output_dims[3]); - setObjectSize(object_size); - - slog::debug << "model size" << output_dims.size() << slog::endl;*/ + /* + const InferenceEngine::SizeVector output_dims = data.getTensorDesc().getDims(); + setMaxProposalCount(static_cast(output_dims[2])); + slog::info << "max proposal count is: " << getMaxProposalCount() << slog::endl; + auto object_size = static_cast(output_dims[3]); + setObjectSize(object_size); + + slog::debug << "model size" << output_dims.size() << slog::endl;*/ printAttribute(); slog::info << "This model is SSDNet-like, Layer Property updated!" << slog::endl; return true; } - diff --git a/dynamic_vino_lib/src/models/person_attribs_detection_model.cpp b/dynamic_vino_lib/src/models/person_attribs_detection_model.cpp index 31505820..586878a8 100644 --- a/dynamic_vino_lib/src/models/person_attribs_detection_model.cpp +++ b/dynamic_vino_lib/src/models/person_attribs_detection_model.cpp @@ -20,17 +20,17 @@ #include "dynamic_vino_lib/models/person_attribs_detection_model.h" #include "dynamic_vino_lib/slog.h" // Validated Person Attributes Detection Network -Models::PersonAttribsDetectionModel::PersonAttribsDetectionModel( - const std::string & model_loc, int max_batch_size) -: BaseModel(model_loc, max_batch_size) {} +Models::PersonAttribsDetectionModel::PersonAttribsDetectionModel(const std::string& model_loc, int max_batch_size) + : BaseModel(model_loc, max_batch_size) +{ +} -bool Models::PersonAttribsDetectionModel::updateLayerProperty( - InferenceEngine::CNNNetReader::Ptr net_reader) +bool Models::PersonAttribsDetectionModel::updateLayerProperty(InferenceEngine::CNNNetReader::Ptr net_reader) { slog::info << "Checking INPUTs for model " << getModelName() << slog::endl; - InferenceEngine::InputsDataMap input_info_map( - net_reader->getNetwork().getInputsInfo()); - if (input_info_map.size() != 1) { + InferenceEngine::InputsDataMap input_info_map(net_reader->getNetwork().getInputsInfo()); + if (input_info_map.size() != 1) + { throw std::logic_error("Person Attribs topology should have only one input"); } InferenceEngine::InputInfo::Ptr input_info = input_info_map.begin()->second; @@ -39,9 +39,9 @@ bool Models::PersonAttribsDetectionModel::updateLayerProperty( addInputInfo("input", input_info_map.begin()->first); slog::info << "Checking OUTPUTs for model " << getModelName() << slog::endl; - InferenceEngine::OutputsDataMap output_info_map( - net_reader->getNetwork().getOutputsInfo()); - if (output_info_map.size() != 3) { + InferenceEngine::OutputsDataMap output_info_map(net_reader->getNetwork().getOutputsInfo()); + if (output_info_map.size() != 3) + { throw std::logic_error("Person Attribs Network expects networks having 3 output"); } input_ = input_info_map.begin()->first; @@ -51,9 +51,9 @@ bool Models::PersonAttribsDetectionModel::updateLayerProperty( InferenceEngine::DataPtr attribute_output_ptr = (output_iter++)->second; InferenceEngine::DataPtr top_output_ptr = (output_iter++)->second; InferenceEngine::DataPtr bottom_output_ptr = (output_iter++)->second; - + addOutputInfo("attributes_output_", attribute_output_ptr->getName()); - //output_gender_ = gender_output_ptr->name; + // output_gender_ = gender_output_ptr->name; addOutputInfo("top_output_", top_output_ptr->getName()); addOutputInfo("bottom_output_", bottom_output_ptr->getName()); printAttribute(); diff --git a/dynamic_vino_lib/src/models/person_reidentification_model.cpp b/dynamic_vino_lib/src/models/person_reidentification_model.cpp index 2a1beb92..e0b911ce 100644 --- a/dynamic_vino_lib/src/models/person_reidentification_model.cpp +++ b/dynamic_vino_lib/src/models/person_reidentification_model.cpp @@ -20,29 +20,28 @@ #include "dynamic_vino_lib/models/person_reidentification_model.h" #include "dynamic_vino_lib/slog.h" // Validated Object Detection Network -Models::PersonReidentificationModel::PersonReidentificationModel( - const std::string & model_loc, int max_batch_size) -: BaseModel(model_loc, max_batch_size) {} +Models::PersonReidentificationModel::PersonReidentificationModel(const std::string& model_loc, int max_batch_size) + : BaseModel(model_loc, max_batch_size) +{ +} -bool Models::PersonReidentificationModel::updateLayerProperty( - InferenceEngine::CNNNetReader::Ptr netreader) +bool Models::PersonReidentificationModel::updateLayerProperty(InferenceEngine::CNNNetReader::Ptr netreader) { slog::info << "Checking Inputs for Model" << getModelName() << slog::endl; auto network = netreader->getNetwork(); - + InferenceEngine::InputsDataMap input_info_map(network.getInputsInfo()); - + InferenceEngine::InputInfo::Ptr input_info = input_info_map.begin()->second; input_info->setPrecision(InferenceEngine::Precision::U8); input_info->getInputData()->setLayout(InferenceEngine::Layout::NCHW); // set output property - InferenceEngine::OutputsDataMap output_info_map( - network.getOutputsInfo()); + InferenceEngine::OutputsDataMap output_info_map(network.getOutputsInfo()); // set input and output layer name input_ = input_info_map.begin()->first; output_ = output_info_map.begin()->first; - + return true; } diff --git a/dynamic_vino_lib/src/models/vehicle_attribs_detection_model.cpp b/dynamic_vino_lib/src/models/vehicle_attribs_detection_model.cpp index 3328eca4..825738d4 100644 --- a/dynamic_vino_lib/src/models/vehicle_attribs_detection_model.cpp +++ b/dynamic_vino_lib/src/models/vehicle_attribs_detection_model.cpp @@ -20,30 +20,30 @@ #include "dynamic_vino_lib/models/vehicle_attribs_detection_model.h" #include "dynamic_vino_lib/slog.h" // Validated Vehicle Attributes Detection Network -Models::VehicleAttribsDetectionModel::VehicleAttribsDetectionModel( - const std::string & model_loc, int max_batch_size) -: BaseModel(model_loc, max_batch_size) {} +Models::VehicleAttribsDetectionModel::VehicleAttribsDetectionModel(const std::string& model_loc, int max_batch_size) + : BaseModel(model_loc, max_batch_size) +{ +} -bool Models::VehicleAttribsDetectionModel::updateLayerProperty( - InferenceEngine::CNNNetReader::Ptr net_reader) +bool Models::VehicleAttribsDetectionModel::updateLayerProperty(InferenceEngine::CNNNetReader::Ptr net_reader) { slog::info << "Checking INPUTs for model " << getModelName() << slog::endl; - // set input property - InferenceEngine::InputsDataMap input_info_map( - net_reader->getNetwork().getInputsInfo()); - if (input_info_map.size() != 1) { + // set input property + InferenceEngine::InputsDataMap input_info_map(net_reader->getNetwork().getInputsInfo()); + if (input_info_map.size() != 1) + { throw std::logic_error("Vehicle Attribs topology should have only one input"); } - InferenceEngine::OutputsDataMap output_info_map( - net_reader->getNetwork().getOutputsInfo()); - if (output_info_map.size() != 2) { + InferenceEngine::OutputsDataMap output_info_map(net_reader->getNetwork().getOutputsInfo()); + if (output_info_map.size() != 2) + { throw std::logic_error("Vehicle Attribs Network expects networks having two outputs"); } InferenceEngine::InputInfo::Ptr input_info = input_info_map.begin()->second; input_info->setPrecision(InferenceEngine::Precision::U8); input_info->getInputData()->setLayout(InferenceEngine::Layout::NCHW); - + // set input and output layer name input_ = input_info_map.begin()->first; auto output_iter = output_info_map.begin(); @@ -51,9 +51,9 @@ bool Models::VehicleAttribsDetectionModel::updateLayerProperty( // type_output_ = (output_iter++)->second->name; InferenceEngine::DataPtr color_output_ptr = (output_iter++)->second; InferenceEngine::DataPtr type_output_ptr = (output_iter++)->second; - + addOutputInfo("color_output_", color_output_ptr->getName()); - //output_gender_ = gender_output_ptr->name; + // output_gender_ = gender_output_ptr->name; addOutputInfo("type_output_", type_output_ptr->getName()); printAttribute(); @@ -64,4 +64,3 @@ const std::string Models::VehicleAttribsDetectionModel::getModelCategory() const { return "Vehicle Attributes Detection"; } - diff --git a/dynamic_vino_lib/src/outputs/base_output.cpp b/dynamic_vino_lib/src/outputs/base_output.cpp index a3248c8d..e3d333ec 100644 --- a/dynamic_vino_lib/src/outputs/base_output.cpp +++ b/dynamic_vino_lib/src/outputs/base_output.cpp @@ -19,11 +19,14 @@ void Outputs::BaseOutput::setPipeline(Pipeline* const pipeline) { - //pipeline->printPipeline(); + // pipeline->printPipeline(); pipeline_ = pipeline; } -Pipeline* Outputs::BaseOutput::getPipeline() const { return pipeline_; } +Pipeline* Outputs::BaseOutput::getPipeline() const +{ + return pipeline_; +} cv::Mat Outputs::BaseOutput::getFrame() const { diff --git a/dynamic_vino_lib/src/outputs/image_window_output.cpp b/dynamic_vino_lib/src/outputs/image_window_output.cpp index 397f582c..e1286a50 100644 --- a/dynamic_vino_lib/src/outputs/image_window_output.cpp +++ b/dynamic_vino_lib/src/outputs/image_window_output.cpp @@ -29,13 +29,13 @@ #include "dynamic_vino_lib/outputs/image_window_output.h" #include "dynamic_vino_lib/pipeline.h" -Outputs::ImageWindowOutput::ImageWindowOutput(const std::string & output_name, int focal_length) -: BaseOutput(output_name), focal_length_(focal_length) +Outputs::ImageWindowOutput::ImageWindowOutput(const std::string& output_name, int focal_length) + : BaseOutput(output_name), focal_length_(focal_length) { cv::namedWindow(output_name_, cv::WINDOW_AUTOSIZE); } -void Outputs::ImageWindowOutput::feedFrame(const cv::Mat & frame) +void Outputs::ImageWindowOutput::feedFrame(const cv::Mat& frame) { // frame_ = frame; frame_ = frame.clone(); @@ -52,11 +52,12 @@ void Outputs::ImageWindowOutput::feedFrame(const cv::Mat & frame) } } -unsigned Outputs::ImageWindowOutput::findOutput( - const cv::Rect & result_rect) +unsigned Outputs::ImageWindowOutput::findOutput(const cv::Rect& result_rect) { - for (unsigned i = 0; i < outputs_.size(); i++) { - if (outputs_[i].rect == result_rect) { + for (unsigned i = 0; i < outputs_.size(); i++) + { + if (outputs_[i].rect == result_rect) + { return i; } } @@ -67,10 +68,10 @@ unsigned Outputs::ImageWindowOutput::findOutput( return outputs_.size() - 1; } -void Outputs::ImageWindowOutput::accept( - const std::vector & results) +void Outputs::ImageWindowOutput::accept(const std::vector& results) { - for (unsigned i = 0; i < results.size(); i++) { + for (unsigned i = 0; i < results.size(); i++) + { cv::Rect result_rect = results[i].getLocation(); unsigned target_index = findOutput(result_rect); outputs_[target_index].rect = result_rect; @@ -78,27 +79,27 @@ void Outputs::ImageWindowOutput::accept( } } -void Outputs::ImageWindowOutput::accept( - const std::vector & results) +void Outputs::ImageWindowOutput::accept(const std::vector& results) { - for (unsigned i = 0; i < results.size(); i++) { + for (unsigned i = 0; i < results.size(); i++) + { cv::Rect result_rect = results[i].getLocation(); unsigned target_index = findOutput(result_rect); outputs_[target_index].rect = result_rect; - outputs_[target_index].desc += - ("[" + results[i].getColor() + "," + results[i].getType() + "]"); + outputs_[target_index].desc += ("[" + results[i].getColor() + "," + results[i].getType() + "]"); } } -void Outputs::ImageWindowOutput::accept( - const std::vector& results) +void Outputs::ImageWindowOutput::accept(const std::vector& results) { - for (unsigned i = 0; i < results.size(); i++) { + for (unsigned i = 0; i < results.size(); i++) + { cv::Rect result_rect = results[i].getLocation(); unsigned target_index = findOutput(result_rect); outputs_[target_index].rect = result_rect; auto fd_conf = results[i].getConfidence(); - if (fd_conf >= 0) { + if (fd_conf >= 0) + { std::ostringstream ostream; ostream << "[" << std::fixed << std::setprecision(3) << fd_conf << "]"; outputs_[target_index].desc += ostream.str(); @@ -106,10 +107,10 @@ void Outputs::ImageWindowOutput::accept( } } -void Outputs::ImageWindowOutput::accept( - const std::vector& results) +void Outputs::ImageWindowOutput::accept(const std::vector& results) { - for (unsigned i = 0; i < results.size(); i++) { + for (unsigned i = 0; i < results.size(); i++) + { cv::Rect result_rect = results[i].getLocation(); unsigned target_index = findOutput(result_rect); outputs_[target_index].rect = result_rect; @@ -119,10 +120,10 @@ void Outputs::ImageWindowOutput::accept( } } -void Outputs::ImageWindowOutput::accept( - const std::vector& results) +void Outputs::ImageWindowOutput::accept(const std::vector& results) { - for (unsigned i = 0; i < results.size(); i++) { + for (unsigned i = 0; i < results.size(); i++) + { cv::Rect result_rect = results[i].getLocation(); unsigned target_index = findOutput(result_rect); outputs_[target_index].rect = result_rect; @@ -131,14 +132,14 @@ void Outputs::ImageWindowOutput::accept( outputs_[target_index].desc += ostream.str(); auto male_prob = results[i].getMaleProbability(); - if (male_prob < 0.5) { + if (male_prob < 0.5) + { outputs_[target_index].scalar = cv::Scalar(0, 0, 255); } } } -cv::Point Outputs::ImageWindowOutput::calcAxis(cv::Mat r, double cx, double cy, - double cz, cv::Point cp) +cv::Point Outputs::ImageWindowOutput::calcAxis(cv::Mat r, double cx, double cy, double cz, cv::Point cp) { cv::Mat Axis(3, 1, CV_32F); Axis.at(0) = cx; @@ -148,34 +149,27 @@ cv::Point Outputs::ImageWindowOutput::calcAxis(cv::Mat r, double cx, double cy, o.at(2) = camera_matrix_.at(0); Axis = r * Axis + o; cv::Point point; - point.x = static_cast( - (Axis.at(0) / Axis.at(2) * camera_matrix_.at(0)) + - cp.x); - point.y = static_cast( - (Axis.at(1) / Axis.at(2) * camera_matrix_.at(4)) + - cp.y); + point.x = static_cast((Axis.at(0) / Axis.at(2) * camera_matrix_.at(0)) + cp.x); + point.y = static_cast((Axis.at(1) / Axis.at(2) * camera_matrix_.at(4)) + cp.y); return point; } -cv::Mat Outputs::ImageWindowOutput::getRotationTransform(double yaw, - double pitch, - double roll) +cv::Mat Outputs::ImageWindowOutput::getRotationTransform(double yaw, double pitch, double roll) { pitch *= CV_PI / 180.0; yaw *= CV_PI / 180.0; roll *= CV_PI / 180.0; - cv::Matx33f Rx(1, 0, 0, 0, cos(pitch), -sin(pitch), 0, sin(pitch), - cos(pitch)); + cv::Matx33f Rx(1, 0, 0, 0, cos(pitch), -sin(pitch), 0, sin(pitch), cos(pitch)); cv::Matx33f Ry(cos(yaw), 0, -sin(yaw), 0, 1, 0, sin(yaw), 0, cos(yaw)); cv::Matx33f Rz(cos(roll), -sin(roll), 0, sin(roll), cos(roll), 0, 0, 0, 1); auto r = cv::Mat(Rz * Ry * Rx); return r; } -void Outputs::ImageWindowOutput::accept( - const std::vector& results) +void Outputs::ImageWindowOutput::accept(const std::vector& results) { - for (unsigned i = 0; i < results.size(); i++) { + for (unsigned i = 0; i < results.size(); i++) + { auto result = results[i]; cv::Rect result_rect = result.getLocation(); unsigned target_index = findOutput(result_rect); @@ -196,15 +190,16 @@ void Outputs::ImageWindowOutput::accept( } } -void Outputs::ImageWindowOutput::accept( - const std::vector& results) +void Outputs::ImageWindowOutput::accept(const std::vector& results) { - for (unsigned i = 0; i < results.size(); i++) { + for (unsigned i = 0; i < results.size(); i++) + { cv::Rect result_rect = results[i].getLocation(); unsigned target_index = findOutput(result_rect); outputs_[target_index].rect = result_rect; auto fd_conf = results[i].getConfidence(); - if (fd_conf >= 0) { + if (fd_conf >= 0) + { std::ostringstream ostream; ostream << "[" << std::fixed << std::setprecision(3) << fd_conf << "]"; outputs_[target_index].desc += ostream.str(); @@ -214,8 +209,7 @@ void Outputs::ImageWindowOutput::accept( } } -void Outputs::ImageWindowOutput::mergeMask( - const std::vector & results) +void Outputs::ImageWindowOutput::mergeMask(const std::vector& results) { /* std::map class_color; @@ -248,24 +242,27 @@ void Outputs::ImageWindowOutput::mergeMask( const float alpha = 0.5f; cv::Mat roi_img = frame_; cv::Mat colored_mask = results[0].getMask(); - cv::resize(colored_mask,colored_mask,cv::Size(frame_.size().width,frame_.size().height)); + cv::resize(colored_mask, colored_mask, cv::Size(frame_.size().width, frame_.size().height)); cv::addWeighted(colored_mask, alpha, roi_img, 1.0f - alpha, 0.0f, roi_img); } -void Outputs::ImageWindowOutput::accept( - const std::vector & results) +void Outputs::ImageWindowOutput::accept(const std::vector& results) { - if (outputs_.size() == 0) { + if (outputs_.size() == 0) + { initOutputs(results.size()); } - if (outputs_.size() != results.size()) { + if (outputs_.size() != results.size()) + { slog::err << "the size of Object Segmentation and Output Vector is not equal!" << slog::endl; return; } - for (unsigned i = 0; i < results.size(); i++) { + for (unsigned i = 0; i < results.size(); i++) + { outputs_[i].rect = results[i].getLocation(); auto fd_conf = results[i].getConfidence(); - if (fd_conf >= 0) { + if (fd_conf >= 0) + { std::ostringstream ostream; ostream << "[" << std::fixed << std::setprecision(3) << fd_conf << "]"; outputs_[i].desc += ostream.str(); @@ -276,34 +273,34 @@ void Outputs::ImageWindowOutput::accept( mergeMask(results); } - - -void Outputs::ImageWindowOutput::accept( - const std::vector & results) +void Outputs::ImageWindowOutput::accept(const std::vector& results) { - for (unsigned i = 0; i < results.size(); i++) { + for (unsigned i = 0; i < results.size(); i++) + { cv::Rect result_rect = results[i].getLocation(); unsigned target_index = findOutput(result_rect); - if (results[i].getMaleProbability() < 0.5) { + if (results[i].getMaleProbability() < 0.5) + { outputs_[target_index].scalar = cv::Scalar(0, 0, 255); } - else{ + else + { outputs_[target_index].scalar = cv::Scalar(0, 255, 0); } - outputs_[target_index].pa_top.x = results[i].getTopLocation().x*result_rect.width + result_rect.x; - outputs_[target_index].pa_top.y = results[i].getTopLocation().y*result_rect.height + result_rect.y; - outputs_[target_index].pa_bottom.x = results[i].getBottomLocation().x*result_rect.width + result_rect.x; - outputs_[target_index].pa_bottom.y = results[i].getBottomLocation().y*result_rect.height + result_rect.y; + outputs_[target_index].pa_top.x = results[i].getTopLocation().x * result_rect.width + result_rect.x; + outputs_[target_index].pa_top.y = results[i].getTopLocation().y * result_rect.height + result_rect.y; + outputs_[target_index].pa_bottom.x = results[i].getBottomLocation().x * result_rect.width + result_rect.x; + outputs_[target_index].pa_bottom.y = results[i].getBottomLocation().y * result_rect.height + result_rect.y; outputs_[target_index].rect = result_rect; outputs_[target_index].desc += "[" + results[i].getAttributes() + "]"; } } -void Outputs::ImageWindowOutput::accept( - const std::vector & results) +void Outputs::ImageWindowOutput::accept(const std::vector& results) { - for (unsigned i = 0; i < results.size(); i++) { + for (unsigned i = 0; i < results.size(); i++) + { cv::Rect result_rect = results[i].getLocation(); unsigned target_index = findOutput(result_rect); outputs_[target_index].rect = result_rect; @@ -311,10 +308,10 @@ void Outputs::ImageWindowOutput::accept( } } -void Outputs::ImageWindowOutput::accept( - const std::vector & results) +void Outputs::ImageWindowOutput::accept(const std::vector& results) { - for (unsigned i = 0; i < results.size(); i++) { + for (unsigned i = 0; i < results.size(); i++) + { cv::Rect result_rect = results[i].getLocation(); unsigned target_index = findOutput(result_rect); outputs_[target_index].rect = result_rect; @@ -322,14 +319,15 @@ void Outputs::ImageWindowOutput::accept( } } -void Outputs::ImageWindowOutput::accept( - const std::vector & results) +void Outputs::ImageWindowOutput::accept(const std::vector& results) { - for (unsigned i = 0; i < results.size(); i++) { + for (unsigned i = 0; i < results.size(); i++) + { cv::Rect result_rect = results[i].getLocation(); unsigned target_index = findOutput(result_rect); std::vector landmark_points = results[i].getLandmarks(); - for (int j = 0; j < landmark_points.size(); j++) { + for (int j = 0; j < landmark_points.size(); j++) + { outputs_[target_index].landmarks.push_back(landmark_points[j]); } } @@ -342,27 +340,28 @@ void Outputs::ImageWindowOutput::decorateFrame() int fps = getPipeline()->getFPS(); std::stringstream ss; ss << "FPS: " << fps; - cv::putText(frame_, ss.str(), cv::Point2f(0, 65), cv::FONT_HERSHEY_TRIPLEX, - 0.5, cv::Scalar(255, 0, 0)); + cv::putText(frame_, ss.str(), cv::Point2f(0, 65), cv::FONT_HERSHEY_TRIPLEX, 0.5, cv::Scalar(255, 0, 0)); } for (auto o : outputs_) { auto new_y = std::max(15, o.rect.y - 15); - cv::putText(frame_, o.desc, cv::Point2f(o.rect.x, new_y), - cv::FONT_HERSHEY_COMPLEX_SMALL, 0.8, o.scalar); + cv::putText(frame_, o.desc, cv::Point2f(o.rect.x, new_y), cv::FONT_HERSHEY_COMPLEX_SMALL, 0.8, o.scalar); cv::rectangle(frame_, o.rect, o.scalar, 1); - if (o.hp_cp != o.hp_x){ + if (o.hp_cp != o.hp_x) + { cv::line(frame_, o.hp_cp, o.hp_x, cv::Scalar(0, 0, 255), 2); } - if (o.hp_cp != o.hp_y) { + if (o.hp_cp != o.hp_y) + { cv::line(frame_, o.hp_cp, o.hp_y, cv::Scalar(0, 255, 0), 2); } - if (o.hp_zs != o.hp_ze) { + if (o.hp_zs != o.hp_ze) + { cv::line(frame_, o.hp_zs, o.hp_ze, cv::Scalar(255, 0, 0), 2); cv::circle(frame_, o.hp_ze, 3, cv::Scalar(255, 0, 0), 2); } - + for (int i = 0; i < o.landmarks.size(); i++) { cv::circle(frame_, o.landmarks[i], 3, cv::Scalar(255, 0, 0), 2); @@ -373,7 +372,8 @@ void Outputs::ImageWindowOutput::decorateFrame() } void Outputs::ImageWindowOutput::handleOutput() { - if(frame_.cols == 0 || frame_.rows == 0){ + if (frame_.cols == 0 || frame_.rows == 0) + { return; } decorateFrame(); diff --git a/dynamic_vino_lib/src/outputs/ros_service_output.cpp b/dynamic_vino_lib/src/outputs/ros_service_output.cpp index 8302dfb5..b23dcd60 100644 --- a/dynamic_vino_lib/src/outputs/ros_service_output.cpp +++ b/dynamic_vino_lib/src/outputs/ros_service_output.cpp @@ -24,91 +24,99 @@ #include "cv_bridge/cv_bridge.h" #include -void Outputs::RosServiceOutput::setServiceResponse( - boost::shared_ptr response) +void Outputs::RosServiceOutput::setServiceResponse(boost::shared_ptr response) { - if (detected_objects_topic_ != nullptr && detected_objects_topic_->objects_vector.size() > 0) { + if (detected_objects_topic_ != nullptr && detected_objects_topic_->objects_vector.size() > 0) + { object_msgs::ObjectsInBoxes objs; objs.objects_vector = detected_objects_topic_->objects_vector; response->objects.push_back(objs); - } else if (faces_topic_ != nullptr && faces_topic_ ->objects_vector.size() > 0) { - object_msgs::ObjectsInBoxes objs; + } + else if (faces_topic_ != nullptr && faces_topic_->objects_vector.size() > 0) + { + object_msgs::ObjectsInBoxes objs; objs.objects_vector = faces_topic_->objects_vector; response->objects.push_back(objs); } } -void Outputs::RosServiceOutput::setResponseForFace( - boost::shared_ptr response) +void Outputs::RosServiceOutput::setResponseForFace(boost::shared_ptr response) { - if (faces_topic_ != nullptr && faces_topic_->objects_vector.size() > 0) { - object_msgs::ObjectsInBoxes objs; - objs.objects_vector = faces_topic_->objects_vector; + if (faces_topic_ != nullptr && faces_topic_->objects_vector.size() > 0) + { + object_msgs::ObjectsInBoxes objs; + objs.objects_vector = faces_topic_->objects_vector; response->objects.push_back(objs); } } -void Outputs::RosServiceOutput::setServiceResponse( - boost::shared_ptr response) +void Outputs::RosServiceOutput::setServiceResponse(boost::shared_ptr response) { - if (age_gender_topic_ != nullptr) { + if (age_gender_topic_ != nullptr) + { response->age_gender.objects = age_gender_topic_->objects; } } -void Outputs::RosServiceOutput::setServiceResponse( - boost::shared_ptr response) +void Outputs::RosServiceOutput::setServiceResponse(boost::shared_ptr response) { - if (emotions_topic_ != nullptr) { + if (emotions_topic_ != nullptr) + { response->emotion.emotions = emotions_topic_->emotions; } } -void Outputs::RosServiceOutput::setServiceResponse( - boost::shared_ptr response) +void Outputs::RosServiceOutput::setServiceResponse(boost::shared_ptr response) { - if (headpose_topic_ != nullptr) { + if (headpose_topic_ != nullptr) + { response->headpose.headposes = headpose_topic_->headposes; } } -void Outputs::RosServiceOutput::setServiceResponse( - boost::shared_ptr response) +void Outputs::RosServiceOutput::setServiceResponse(boost::shared_ptr response) +{ + slog::info << "in ObjectsInMasks service::Response ..."; + if (segmented_objects_topic_ != nullptr) { - slog::info << "in ObjectsInMasks service::Response ..."; - if (segmented_objects_topic_ != nullptr) { - response->segmentation.objects_vector = segmented_objects_topic_->objects_vector; - } + response->segmentation.objects_vector = segmented_objects_topic_->objects_vector; } +} void Outputs::RosServiceOutput::setServiceResponse( - boost::shared_ptr response) + boost::shared_ptr response) +{ + slog::info << "in Reidentification service::Response ..."; + if (person_reid_topic_ != nullptr) { - slog::info << "in Reidentification service::Response ..."; - if (person_reid_topic_ != nullptr) { - response->reidentification.reidentified_vector = person_reid_msg_ptr_->reidentified_vector; - } + response->reidentification.reidentified_vector = person_reid_msg_ptr_->reidentified_vector; } +} -void Outputs::RosServiceOutput::setServiceResponse( - boost::shared_ptr response) +void Outputs::RosServiceOutput::setServiceResponse(boost::shared_ptr response) { - slog::info << "in People::Response ..."; - if (faces_topic_ != nullptr) { + slog::info << "in People::Response ..."; + if (faces_topic_ != nullptr) + { slog::info << "[FACES],"; response->persons.faces = faces_topic_->objects_vector; - } else if (detected_objects_topic_ != nullptr) { + } + else if (detected_objects_topic_ != nullptr) + { slog::info << "[FACES(objects)],"; response->persons.faces = detected_objects_topic_->objects_vector; } - if (age_gender_topic_ != nullptr) { + if (age_gender_topic_ != nullptr) + { slog::info << "[AGE_GENDER],"; response->persons.agegenders = age_gender_topic_->objects; } - if (emotions_topic_ != nullptr) { + if (emotions_topic_ != nullptr) + { slog::info << "[EMOTION],"; response->persons.emotions = emotions_topic_->emotions; } - if (headpose_topic_ != nullptr) { + if (headpose_topic_ != nullptr) + { slog::info << "[HEADPOSE],"; response->persons.headposes = headpose_topic_->headposes; } diff --git a/dynamic_vino_lib/src/outputs/ros_topic_output.cpp b/dynamic_vino_lib/src/outputs/ros_topic_output.cpp index 4cd96895..83590d10 100644 --- a/dynamic_vino_lib/src/outputs/ros_topic_output.cpp +++ b/dynamic_vino_lib/src/outputs/ros_topic_output.cpp @@ -25,32 +25,27 @@ #include #include -Outputs::RosTopicOutput::RosTopicOutput(std::string pipeline_name): - pipeline_name_(pipeline_name) +Outputs::RosTopicOutput::RosTopicOutput(std::string pipeline_name) : pipeline_name_(pipeline_name) { - pub_face_ = - nh_.advertise( - "/openvino_toolkit/" + pipeline_name_ + "/faces", 16); - pub_emotion_ = nh_.advertise( - "/openvino_toolkit/" + pipeline_name_ + "/emotions", 16); - pub_age_gender_ = nh_.advertise( - "/openvino_toolkit/" + pipeline_name_ + "/age_genders", 16); - pub_headpose_ = nh_.advertise( - "/openvino_toolkit/" + pipeline_name_ + "/headposes", 16); - pub_object_ = nh_.advertise( - "/openvino_toolkit/" + pipeline_name_ + "/detected_objects", 16); + pub_face_ = nh_.advertise("/openvino_toolkit/" + pipeline_name_ + "/faces", 16); + pub_emotion_ = nh_.advertise("/openvino_toolkit/" + pipeline_name_ + "/emotions", 16); + pub_age_gender_ = + nh_.advertise("/openvino_toolkit/" + pipeline_name_ + "/age_genders", 16); + pub_headpose_ = nh_.advertise("/openvino_toolkit/" + pipeline_name_ + "/headposes", 16); + pub_object_ = + nh_.advertise("/openvino_toolkit/" + pipeline_name_ + "/detected_objects", 16); pub_person_reid_ = nh_.advertise( "/openvino_toolkit/" + pipeline_name_ + "/reidentified_persons", 16); - pub_segmented_object_ = nh_.advertise( - "/openvino_toolkit/" + pipeline_name_ + "/segmented_obejcts", 16); + pub_segmented_object_ = + nh_.advertise("/openvino_toolkit/" + pipeline_name_ + "/segmented_obejcts", 16); pub_face_reid_ = nh_.advertise( "/openvino_toolkit/" + pipeline_name_ + "/reidentified_faces", 16); pub_person_attribs_ = nh_.advertise( "/openvino_toolkit/" + pipeline_name_ + "/person_attributes", 16); pub_license_plate_ = nh_.advertise( - "/openvino_toolkit/" + pipeline_name_ + "/detected_license_plates", 16); + "/openvino_toolkit/" + pipeline_name_ + "/detected_license_plates", 16); pub_vehicle_attribs_ = nh_.advertise( - "/openvino_toolkit/" + pipeline_name_ + "/detected_vehicles_attribs", 16); + "/openvino_toolkit/" + pipeline_name_ + "/detected_vehicles_attribs", 16); emotions_topic_ = NULL; faces_topic_ = NULL; @@ -61,7 +56,7 @@ Outputs::RosTopicOutput::RosTopicOutput(std::string pipeline_name): segmented_objects_topic_ = NULL; face_reid_topic_ = NULL; person_attribs_topic_ = NULL; - license_plate_topic_ = NULL; + license_plate_topic_ = NULL; vehicle_attribs_topic_ = NULL; } @@ -70,12 +65,12 @@ void Outputs::RosTopicOutput::feedFrame(const cv::Mat& frame) frame_ = frame.clone(); } -void Outputs::RosTopicOutput::accept( - const std::vector & results) +void Outputs::RosTopicOutput::accept(const std::vector& results) { vehicle_attribs_topic_ = std::make_shared(); people_msgs::VehicleAttribs attribs; - for (auto & r : results) { + for (auto& r : results) + { // slog::info << ">"; auto loc = r.getLocation(); attribs.roi.x_offset = loc.x; @@ -88,12 +83,12 @@ void Outputs::RosTopicOutput::accept( } } -void Outputs::RosTopicOutput::accept( - const std::vector & results) +void Outputs::RosTopicOutput::accept(const std::vector& results) { license_plate_topic_ = std::make_shared(); people_msgs::LicensePlate plate; - for (auto & r : results) { + for (auto& r : results) + { // slog::info << ">"; auto loc = r.getLocation(); plate.roi.x_offset = loc.x; @@ -105,12 +100,12 @@ void Outputs::RosTopicOutput::accept( } } -void Outputs::RosTopicOutput::accept( - const std::vector & results) +void Outputs::RosTopicOutput::accept(const std::vector& results) { person_attribs_topic_ = std::make_shared(); people_msgs::PersonAttribute person_attrib; - for (auto & r : results) { + for (auto& r : results) + { // slog::info << ">"; auto loc = r.getLocation(); person_attrib.roi.x_offset = loc.x; @@ -122,12 +117,12 @@ void Outputs::RosTopicOutput::accept( } } -void Outputs::RosTopicOutput::accept( - const std::vector & results) +void Outputs::RosTopicOutput::accept(const std::vector& results) { face_reid_topic_ = std::make_shared(); people_msgs::Reidentification face; - for (auto & r : results) { + for (auto& r : results) + { // slog::info << ">"; auto loc = r.getLocation(); face.roi.x_offset = loc.x; @@ -139,12 +134,12 @@ void Outputs::RosTopicOutput::accept( } } -void Outputs::RosTopicOutput::accept( - const std::vector & results) +void Outputs::RosTopicOutput::accept(const std::vector& results) { person_reid_msg_ptr_ = std::make_shared(); people_msgs::Reidentification person; - for (auto & r : results) { + for (auto& r : results) + { // slog::info << ">"; auto loc = r.getLocation(); person.roi.x_offset = loc.x; @@ -156,12 +151,12 @@ void Outputs::RosTopicOutput::accept( } } -void Outputs::RosTopicOutput::accept( - const std::vector & results) +void Outputs::RosTopicOutput::accept(const std::vector& results) { segmented_objects_topic_ = std::make_shared(); people_msgs::ObjectInMask object; - for (auto & r : results) { + for (auto& r : results) + { // slog::info << ">"; auto loc = r.getLocation(); object.roi.x_offset = loc.x; @@ -171,18 +166,18 @@ void Outputs::RosTopicOutput::accept( object.object_name = r.getLabel(); object.probability = r.getConfidence(); cv::Mat mask = r.getMask(); - for (int h = 0; h < mask.size().height; ++h) { - for (int w = 0; w < mask.size().width; ++w) { + for (int h = 0; h < mask.size().height; ++h) + { + for (int w = 0; w < mask.size().width; ++w) + { object.mask_array.push_back(mask.at(h, w)); } } - segmented_objects_topic_->objects_vector.push_back(object); + segmented_objects_topic_->objects_vector.push_back(object); } } - -void Outputs::RosTopicOutput::accept( - const std::vector& results) +void Outputs::RosTopicOutput::accept(const std::vector& results) { faces_topic_ = std::make_shared(); @@ -200,8 +195,7 @@ void Outputs::RosTopicOutput::accept( } } -void Outputs::RosTopicOutput::accept( - const std::vector& results) +void Outputs::RosTopicOutput::accept(const std::vector& results) { emotions_topic_ = std::make_shared(); @@ -218,8 +212,7 @@ void Outputs::RosTopicOutput::accept( } } -void Outputs::RosTopicOutput::accept( - const std::vector& results) +void Outputs::RosTopicOutput::accept(const std::vector& results) { age_gender_topic_ = std::make_shared(); @@ -247,8 +240,7 @@ void Outputs::RosTopicOutput::accept( } } -void Outputs::RosTopicOutput::accept( - const std::vector& results) +void Outputs::RosTopicOutput::accept(const std::vector& results) { headpose_topic_ = std::make_shared(); @@ -267,8 +259,7 @@ void Outputs::RosTopicOutput::accept( } } -void Outputs::RosTopicOutput::accept( - const std::vector& results) +void Outputs::RosTopicOutput::accept(const std::vector& results) { detected_objects_topic_ = std::make_shared(); @@ -286,12 +277,12 @@ void Outputs::RosTopicOutput::accept( } } -void Outputs::RosTopicOutput::accept( - const std::vector & results) +void Outputs::RosTopicOutput::accept(const std::vector& results) { landmarks_topic_ = std::make_shared(); people_msgs::msg::Landmark landmark; - for (auto & r : results) { + for (auto& r : results) + { // slog::info << ">"; auto loc = r.getLocation(); landmark.roi.x_offset = loc.x; @@ -299,7 +290,8 @@ void Outputs::RosTopicOutput::accept( landmark.roi.width = loc.width; landmark.roi.height = loc.height; std::vector landmark_points = r.getLandmarks(); - for (auto pt : landmark_points) { + for (auto pt : landmark_points) + { geometry_msgs::msg::Point point; point.x = pt.x; point.y = pt.y; @@ -311,37 +303,42 @@ void Outputs::RosTopicOutput::accept( void Outputs::RosTopicOutput::handleOutput() { - //std_msgs::Header header = getHeader(); + // std_msgs::Header header = getHeader(); auto header = getPipeline()->getInputDevice()->getLockedHeader(); - if (vehicle_attribs_topic_ != nullptr) { + if (vehicle_attribs_topic_ != nullptr) + { people_msgs::VehicleAttribsStamped vehicle_attribs_msg; vehicle_attribs_msg.header = header; vehicle_attribs_msg.vehicles.swap(vehicle_attribs_topic_->vehicles); pub_vehicle_attribs_.publish(vehicle_attribs_msg); vehicle_attribs_topic_ = nullptr; } - if (license_plate_topic_ != nullptr) { + if (license_plate_topic_ != nullptr) + { people_msgs::LicensePlateStamped license_plate_msg; license_plate_msg.header = header; license_plate_msg.licenses.swap(license_plate_topic_->licenses); pub_license_plate_.publish(license_plate_msg); license_plate_topic_ = nullptr; } - if (person_attribs_topic_ != nullptr) { + if (person_attribs_topic_ != nullptr) + { people_msgs::PersonAttributeStamped person_attribute_msg; person_attribute_msg.header = header; person_attribute_msg.attributes.swap(person_attribs_topic_->attributes); pub_person_attribs_.publish(person_attribute_msg); person_attribs_topic_ = nullptr; } - if (person_reid_msg_ptr_ != nullptr) { + if (person_reid_msg_ptr_ != nullptr) + { people_msgs::ReidentificationStamped person_reid_msg; person_reid_msg.header = header; person_reid_msg.reidentified_vector.swap(person_reid_msg_ptr_->reidentified_vector); pub_person_reid_.publish(person_reid_msg); person_reid_msg_ptr_ = nullptr; } - if (segmented_objects_topic_ != nullptr) { + if (segmented_objects_topic_ != nullptr) + { // slog::info << "publishing faces outputs." << slog::endl; people_msgs::ObjectsInMasks segmented_msg; segmented_msg.header = header; @@ -403,17 +400,18 @@ void Outputs::RosTopicOutput::handleOutput() pub_object_.publish(object_msg); detected_objects_topic_ = nullptr; } - if (face_reid_topic_ != nullptr) { + if (face_reid_topic_ != nullptr) + { people_msgs::ReidentificationStamped face_reid_msg; face_reid_msg.header = header; face_reid_msg.reidentified_vector.swap(face_reid_topic_->reidentified_vector); - + pub_face_reid_.publish(face_reid_msg); face_reid_topic_ = nullptr; } } -#if 0 //deprecated +#if 0 // deprecated /** * Don't use this inferface to create new time stamp, it'd better use camera/topic * time stamp. @@ -430,4 +428,4 @@ std_msgs::Header Outputs::RosTopicOutput::getHeader() header.stamp.nsec = ns % 1000000000; return header; } -#endif //depreated +#endif // depreated diff --git a/dynamic_vino_lib/src/outputs/rviz_output.cpp b/dynamic_vino_lib/src/outputs/rviz_output.cpp index 1c9e8670..e587a989 100644 --- a/dynamic_vino_lib/src/outputs/rviz_output.cpp +++ b/dynamic_vino_lib/src/outputs/rviz_output.cpp @@ -25,64 +25,58 @@ #include "dynamic_vino_lib/pipeline.h" #include "dynamic_vino_lib/outputs/rviz_output.h" -Outputs::RvizOutput::RvizOutput(std::string pipeline_name) -: BaseOutput(pipeline_name) +Outputs::RvizOutput::RvizOutput(std::string pipeline_name) : BaseOutput(pipeline_name) { image_topic_ = nullptr; - pub_image_ = nh_.advertise("/openvino_toolkit/"+pipeline_name+"/images", 16); + pub_image_ = nh_.advertise("/openvino_toolkit/" + pipeline_name + "/images", 16); image_window_output_ = std::make_shared(pipeline_name, 950); } -void Outputs::RvizOutput::feedFrame(const cv::Mat & frame) +void Outputs::RvizOutput::feedFrame(const cv::Mat& frame) { image_window_output_->feedFrame(frame); } -void Outputs::RvizOutput::accept( - const std::vector & results) +void Outputs::RvizOutput::accept(const std::vector& results) { image_window_output_->accept(results); } -void Outputs::RvizOutput::accept( - const std::vector & results) +void Outputs::RvizOutput::accept(const std::vector& results) { image_window_output_->accept(results); } -void Outputs::RvizOutput::accept( - const std::vector & results) +void Outputs::RvizOutput::accept(const std::vector& results) { image_window_output_->accept(results); } -void Outputs::RvizOutput::accept( - const std::vector & results) +void Outputs::RvizOutput::accept(const std::vector& results) { image_window_output_->accept(results); } -void Outputs::RvizOutput::accept(const std::vector & results) +void Outputs::RvizOutput::accept(const std::vector& results) { image_window_output_->accept(results); } -void Outputs::RvizOutput::accept( - const std::vector & results) +void Outputs::RvizOutput::accept(const std::vector& results) { image_window_output_->accept(results); } -void Outputs::RvizOutput::accept(const std::vector & results) +void Outputs::RvizOutput::accept(const std::vector& results) { image_window_output_->accept(results); } -void Outputs::RvizOutput::accept(const std::vector & results) +void Outputs::RvizOutput::accept(const std::vector& results) { image_window_output_->accept(results); } -void Outputs::RvizOutput::accept(const std::vector & results) +void Outputs::RvizOutput::accept(const std::vector& results) { image_window_output_->accept(results); } @@ -91,11 +85,10 @@ void Outputs::RvizOutput::accept(const std::vectoraccept(results); } -void Outputs::RvizOutput::accept(const std::vector & results) +void Outputs::RvizOutput::accept(const std::vector& results) { image_window_output_->accept(results); } - void Outputs::RvizOutput::handleOutput() { @@ -103,10 +96,8 @@ void Outputs::RvizOutput::handleOutput() image_window_output_->decorateFrame(); cv::Mat frame = image_window_output_->getFrame(); std_msgs::Header header = getPipeline()->getInputDevice()->getLockedHeader(); - std::shared_ptr cv_ptr = - std::make_shared(header, "bgr8", frame); - sensor_msgs::Image image_msg; + std::shared_ptr cv_ptr = std::make_shared(header, "bgr8", frame); + sensor_msgs::Image image_msg; image_topic_ = cv_ptr->toImageMsg(); pub_image_.publish(image_topic_); } - diff --git a/dynamic_vino_lib/src/pipeline.cpp b/dynamic_vino_lib/src/pipeline.cpp index 16b3e505..8f4a1a1f 100644 --- a/dynamic_vino_lib/src/pipeline.cpp +++ b/dynamic_vino_lib/src/pipeline.cpp @@ -29,16 +29,17 @@ Pipeline::Pipeline(const std::string& name) { - if (!name.empty()) { + if (!name.empty()) + { params_ = std::make_shared(name); } counter_ = 0; } -bool Pipeline::add(const std::string& name, - std::shared_ptr input_device) +bool Pipeline::add(const std::string& name, std::shared_ptr input_device) { - if (name.empty()) { + if (name.empty()) + { slog::err << "Item name can't be empty!" << slog::endl; return false; } @@ -50,15 +51,16 @@ bool Pipeline::add(const std::string& name, return true; } -bool Pipeline::add(const std::string& parent, const std::string& name, - std::shared_ptr output) +bool Pipeline::add(const std::string& parent, const std::string& name, std::shared_ptr output) { - if (parent.empty() || name.empty() || !isLegalConnect(parent, name) || output == nullptr) { + if (parent.empty() || name.empty() || !isLegalConnect(parent, name) || output == nullptr) + { slog::err << "ARGuments ERROR when adding output instance!" << slog::endl; return false; } - if (add(name, output)) { + if (add(name, output)) + { addConnect(parent, name); return true; @@ -69,7 +71,8 @@ bool Pipeline::add(const std::string& parent, const std::string& name, bool Pipeline::add(const std::string& parent, const std::string& name) { - if (isLegalConnect(parent, name)) { + if (isLegalConnect(parent, name)) + { addConnect(parent, name); return true; } @@ -77,17 +80,18 @@ bool Pipeline::add(const std::string& parent, const std::string& name) return false; } -bool Pipeline::add(const std::string& name, - std::shared_ptr output) { - if (name.empty()) { +bool Pipeline::add(const std::string& name, std::shared_ptr output) +{ + if (name.empty()) + { slog::err << "Item name can't be empty!" << slog::endl; return false; } - + std::map>::iterator it = name_to_output_map_.find(name); - if (it != name_to_output_map_.end()) { - slog::warn << "inferance instance for [" << name << - "] already exists, update it with new instance." << slog::endl; + if (it != name_to_output_map_.end()) + { + slog::warn << "inferance instance for [" << name << "] already exists, update it with new instance." << slog::endl; } name_to_output_map_[name] = output; output_names_.insert(name); @@ -97,34 +101,34 @@ bool Pipeline::add(const std::string& name, return true; } -void Pipeline::addConnect(const std::string & parent, const std::string & name) +void Pipeline::addConnect(const std::string& parent, const std::string& name) { - std::pair::iterator, - std::multimap::iterator> - ret; + std::pair::iterator, std::multimap::iterator> ret; ret = next_.equal_range(parent); - for (std::multimap::iterator it = ret.first; it != ret.second; ++it) { - if (it->second == name) { - slog::warn << "The connect [" << parent << "<-->" << name << "] already exists." << - slog::endl; + for (std::multimap::iterator it = ret.first; it != ret.second; ++it) + { + if (it->second == name) + { + slog::warn << "The connect [" << parent << "<-->" << name << "] already exists." << slog::endl; return; } } - slog::info << "Adding connection into pipeline:[" << parent << "<-->" << name << "]" << - slog::endl; - next_.insert({parent, name}); + slog::info << "Adding connection into pipeline:[" << parent << "<-->" << name << "]" << slog::endl; + next_.insert({ parent, name }); } bool Pipeline::add(const std::string& parent, const std::string& name, std::shared_ptr inference) { - if (parent.empty() || name.empty() || !isLegalConnect(parent, name)) { + if (parent.empty() || name.empty() || !isLegalConnect(parent, name)) + { slog::err << "ARGuments ERROR when adding inference instance!" << slog::endl; return false; } - if (add(name, inference)) { + if (add(name, inference)) + { addConnect(parent, name); return true; } @@ -132,19 +136,22 @@ bool Pipeline::add(const std::string& parent, const std::string& name, return false; } -bool Pipeline::add(const std::string& name, - std::shared_ptr inference) { - if (name.empty()) { +bool Pipeline::add(const std::string& name, std::shared_ptr inference) +{ + if (name.empty()) + { slog::err << "Item name can't be empty!" << slog::endl; return false; } std::map>::iterator it = - name_to_detection_map_.find(name); - if (it != name_to_detection_map_.end()) { - slog::warn << "inferance instance for [" << name << - "] already exists, update it with new instance." << slog::endl; - } else { + name_to_detection_map_.find(name); + if (it != name_to_detection_map_.end()) + { + slog::warn << "inferance instance for [" << name << "] already exists, update it with new instance." << slog::endl; + } + else + { ++total_inference_; } name_to_detection_map_[name] = inference; @@ -156,9 +163,9 @@ bool Pipeline::isLegalConnect(const std::string parent, const std::string child) { int parent_order = getCatagoryOrder(parent); int child_order = getCatagoryOrder(child); - slog::info << "Checking connection into pipeline:[" << parent << "(" << parent_order << ")" << - "<-->" << child << "(" << child_order << ")" << - "]" << slog::endl; + slog::info << "Checking connection into pipeline:[" << parent << "(" << parent_order << ")" + << "<-->" << child << "(" << child_order << ")" + << "]" << slog::endl; return (parent_order != kCatagoryOrder_Unknown) && (child_order != kCatagoryOrder_Unknown) && (parent_order <= child_order); @@ -167,11 +174,16 @@ bool Pipeline::isLegalConnect(const std::string parent, const std::string child) int Pipeline::getCatagoryOrder(const std::string name) { int order = kCatagoryOrder_Unknown; - if (name == input_device_name_) { + if (name == input_device_name_) + { order = kCatagoryOrder_Input; - } else if (name_to_detection_map_.find(name) != name_to_detection_map_.end()) { + } + else if (name_to_detection_map_.find(name) != name_to_detection_map_.end()) + { order = kCatagoryOrder_Inference; - } else if (name_to_output_map_.find(name) != name_to_output_map_.end()) { + } + else if (name_to_output_map_.find(name) != name_to_output_map_.end()) + { order = kCatagoryOrder_Output; } @@ -182,17 +194,19 @@ void Pipeline::runOnce() { initInferenceCounter(); - if (!input_device_->read(&frame_)) { + if (!input_device_->read(&frame_)) + { // throw std::logic_error("Failed to get frame from cv::VideoCapture"); // slog::warn << "Failed to get frame from input_device." << slog::endl; - return; //do nothing if now frame read out + return; // do nothing if now frame read out } width_ = frame_.cols; height_ = frame_.rows; slog::debug << "DEBUG: in Pipeline run process..." << slog::endl; // auto t0 = std::chrono::high_resolution_clock::now(); - for (auto pos = next_.equal_range(input_device_name_); pos.first != pos.second; ++pos.first) { + for (auto pos = next_.equal_range(input_device_name_); pos.first != pos.second; ++pos.first) + { std::string detection_name = pos.first->second; auto detection_ptr = name_to_detection_map_[detection_name]; detection_ptr->enqueue(frame_, cv::Rect(width_ / 2, height_ / 2, width_, height_)); @@ -200,7 +214,7 @@ void Pipeline::runOnce() detection_ptr->submitRequest(); } - for (auto &pair : name_to_output_map_) + for (auto& pair : name_to_output_map_) { pair.second->feedFrame(frame_); } @@ -208,13 +222,14 @@ void Pipeline::runOnce() slog::debug << "DEBUG: align inference process, waiting until all inferences done!" << slog::endl; std::unique_lock lock(counter_mutex_); - cv_.wait(lock, [self = this]() {return self->counter_ == 0;}); + cv_.wait(lock, [self = this]() { return self->counter_ == 0; }); - //auto t1 = std::chrono::high_resolution_clock::now(); - //typedef std::chrono::duration> ms; + // auto t1 = std::chrono::high_resolution_clock::now(); + // typedef std::chrono::duration> ms; slog::debug << "DEBUG: in Pipeline run process...handleOutput" << slog::endl; - for (auto & pair : name_to_output_map_) { + for (auto& pair : name_to_output_map_) + { // slog::info << "Handling Output ..." << pair.first << slog::endl; pair.second->handleOutput(); } @@ -224,8 +239,7 @@ void Pipeline::printPipeline() { for (auto& current_node : next_) { - printf("%s --> %s\n", current_node.first.c_str(), - current_node.second.c_str()); + printf("%s --> %s\n", current_node.first.c_str(), current_node.second.c_str()); } } @@ -236,8 +250,7 @@ void Pipeline::setCallback() std::string detection_name = pair.first; std::function callb; - callb = [detection_name, this]() - { + callb = [detection_name, this]() { this->callback(detection_name); return; }; @@ -245,31 +258,38 @@ void Pipeline::setCallback() } } -void Pipeline::callback(const std::string & detection_name) +void Pipeline::callback(const std::string& detection_name) { - //slog::info<<"Hello callback ----> " << detection_name < " << detection_name <fetchResults(); // set output - for (auto pos = next_.equal_range(detection_name); pos.first != pos.second; ++pos.first) { + for (auto pos = next_.equal_range(detection_name); pos.first != pos.second; ++pos.first) + { std::string next_name = pos.first->second; std::string filter_conditions = findFilterConditions(detection_name, next_name); // if next is output, then print - if (output_names_.find(next_name) != output_names_.end()) { + if (output_names_.find(next_name) != output_names_.end()) + { detection_ptr->observeOutput(name_to_output_map_[next_name]); - } else { + } + else + { auto detection_ptr_iter = name_to_detection_map_.find(next_name); - if (detection_ptr_iter != name_to_detection_map_.end()) { + if (detection_ptr_iter != name_to_detection_map_.end()) + { auto next_detection_ptr = detection_ptr_iter->second; size_t batch_size = next_detection_ptr->getMaxBatchSize(); std::vector next_rois = detection_ptr->getFilteredROIs(filter_conditions); - for (size_t i = 0; i < next_rois.size(); i++) { + for (size_t i = 0; i < next_rois.size(); i++) + { auto roi = next_rois[i]; auto clippedRect = roi & cv::Rect(0, 0, width_, height_); cv::Mat next_input = frame_(clippedRect); next_detection_ptr->enqueue(next_input, roi); - if ((i + 1) == next_rois.size() || (i + 1) % batch_size == 0) { + if ((i + 1) == next_rois.size() || (i + 1) % batch_size == 0) + { increaseInferenceCounter(); next_detection_ptr->submitRequest(); auto request = next_detection_ptr->getEngine()->getRequest(); @@ -309,7 +329,8 @@ void Pipeline::countFPS() auto t_end = std::chrono::high_resolution_clock::now(); typedef std::chrono::duration> ms; ms secondDetection = std::chrono::duration_cast(t_end - t_start_); - if (secondDetection.count() > 1000) { + if (secondDetection.count() > 1000) + { setFPS(frame_cnt_); frame_cnt_ = 0; t_start_ = t_end; diff --git a/dynamic_vino_lib/src/pipeline_manager.cpp b/dynamic_vino_lib/src/pipeline_manager.cpp index 8ac16fc1..7b85aedf 100644 --- a/dynamic_vino_lib/src/pipeline_manager.cpp +++ b/dynamic_vino_lib/src/pipeline_manager.cpp @@ -58,9 +58,10 @@ #include "dynamic_vino_lib/services/pipeline_processing_server.h" #include "dynamic_vino_lib/engines/engine_manager.h" -std::shared_ptr PipelineManager::createPipeline( - const Params::ParamManager::PipelineRawData& params) { - if (params.name == "") { +std::shared_ptr PipelineManager::createPipeline(const Params::ParamManager::PipelineRawData& params) +{ + if (params.name == "") + { throw std::logic_error("The name of pipeline won't be empty!"); } @@ -74,35 +75,40 @@ std::shared_ptr PipelineManager::createPipeline( data.state = PipelineState_ThreadNotCreated; auto inputs = parseInputDevice(data); - if (inputs.size() != 1) { - slog::err << "currently one pipeline only supports ONE input." - << slog::endl; + if (inputs.size() != 1) + { + slog::err << "currently one pipeline only supports ONE input." << slog::endl; return nullptr; } - for (auto it = inputs.begin(); it != inputs.end(); ++it) { + for (auto it = inputs.begin(); it != inputs.end(); ++it) + { pipeline->add(it->first, it->second); auto node = it->second->getHandler(); - if(node != nullptr) { - data.spin_nodes.emplace_back(node); + if (node != nullptr) + { + data.spin_nodes.emplace_back(node); } } auto outputs = parseOutput(data); - for (auto it = outputs.begin(); it != outputs.end(); ++it) { + for (auto it = outputs.begin(); it != outputs.end(); ++it) + { pipeline->add(it->first, it->second); } auto infers = parseInference(params); - for (auto it = infers.begin(); it != infers.end(); ++it) { + for (auto it = infers.begin(); it != infers.end(); ++it) + { pipeline->add(it->first, it->second); } slog::info << "Updating connections ..." << slog::endl; - for (auto it = params.connects.begin(); it != params.connects.end(); ++it) { + for (auto it = params.connects.begin(); it != params.connects.end(); ++it) + { pipeline->add(it->first, it->second); } - pipelines_.insert({params.name, data}); + pipelines_.insert({ params.name, data }); pipeline->setCallback(); slog::info << "One Pipeline Created!" << slog::endl; @@ -111,37 +117,55 @@ std::shared_ptr PipelineManager::createPipeline( } std::map> -PipelineManager::parseInputDevice(const PipelineData & pdata) +PipelineManager::parseInputDevice(const PipelineData& pdata) { std::map> inputs; - for (auto & name : pdata.params.inputs) { + for (auto& name : pdata.params.inputs) + { slog::info << "Parsing InputDvice: " << name << slog::endl; std::shared_ptr device = nullptr; - if (name == kInputType_RealSenseCamera) { + if (name == kInputType_RealSenseCamera) + { device = std::make_shared(); - } else if (name == kInputType_StandardCamera) { + } + else if (name == kInputType_StandardCamera) + { device = std::make_shared(); - } else if (name == kInputType_IpCamera) { - if (pdata.params.input_meta != "") { + } + else if (name == kInputType_IpCamera) + { + if (pdata.params.input_meta != "") + { device = std::make_shared(pdata.params.input_meta); } - } else if (name == kInputType_CameraTopic || name == kInputType_ImageTopic) { + } + else if (name == kInputType_CameraTopic || name == kInputType_ImageTopic) + { device = std::make_shared(pdata.parent_node); - } else if (name == kInputType_Video) { - if (pdata.params.input_meta != "") { + } + else if (name == kInputType_Video) + { + if (pdata.params.input_meta != "") + { device = std::make_shared(pdata.params.input_meta); } - } else if (name == kInputType_Image) { - if (pdata.params.input_meta != "") { + } + else if (name == kInputType_Image) + { + if (pdata.params.input_meta != "") + { device = std::make_shared(pdata.params.input_meta); } - } else { + } + else + { slog::err << "Invalid input device name: " << name << slog::endl; } - if (device != nullptr) { + if (device != nullptr) + { device->initialize(); - inputs.insert({name, device}); + inputs.insert({ name, device }); slog::info << " ... Adding one Input device: " << name << slog::endl; } } @@ -149,26 +173,36 @@ PipelineManager::parseInputDevice(const PipelineData & pdata) return inputs; } -std::map> -PipelineManager::parseOutput(const PipelineData & pdata) +std::map> PipelineManager::parseOutput(const PipelineData& pdata) { std::map> outputs; - for (auto & name : pdata.params.outputs) { + for (auto& name : pdata.params.outputs) + { slog::info << "Parsing Output: " << name << slog::endl; std::shared_ptr object = nullptr; - if (name == kOutputTpye_RosTopic) { + if (name == kOutputTpye_RosTopic) + { object = std::make_shared(pdata.params.name, pdata.parent_node); - } else if (name == kOutputTpye_ImageWindow) { + } + else if (name == kOutputTpye_ImageWindow) + { object = std::make_shared(pdata.params.name); - } else if (name == kOutputTpye_RViz) { + } + else if (name == kOutputTpye_RViz) + { object = std::make_shared(pdata.params.name, pdata.parent_node); - } else if (name == kOutputTpye_RosService) { + } + else if (name == kOutputTpye_RosService) + { object = std::make_shared(pdata.params.name); - } else { + } + else + { slog::err << "Invalid output name: " << name << slog::endl; } - if (object != nullptr) { - outputs.insert({name, object}); + if (object != nullptr) + { + outputs.insert({ name, object }); slog::info << " ... Adding one Output: " << name << slog::endl; } } @@ -177,58 +211,74 @@ PipelineManager::parseOutput(const PipelineData & pdata) } std::map> -PipelineManager::parseInference(const Params::ParamManager::PipelineRawData & params) +PipelineManager::parseInference(const Params::ParamManager::PipelineRawData& params) { std::map> inferences; - for (auto & infer : params.infers) { - if (infer.name.empty() || infer.model.empty()) { + for (auto& infer : params.infers) + { + if (infer.name.empty() || infer.model.empty()) + { continue; } slog::info << "Parsing Inference: " << infer.name << slog::endl; std::shared_ptr object = nullptr; - if (infer.name == kInferTpye_FaceDetection) { + if (infer.name == kInferTpye_FaceDetection) + { object = createFaceDetection(infer); - } - else if (infer.name == kInferTpye_AgeGenderRecognition) { + } + else if (infer.name == kInferTpye_AgeGenderRecognition) + { object = createAgeGenderRecognition(infer); - } - else if (infer.name == kInferTpye_EmotionRecognition) { + } + else if (infer.name == kInferTpye_EmotionRecognition) + { object = createEmotionRecognition(infer); - } - else if (infer.name == kInferTpye_HeadPoseEstimation) { + } + else if (infer.name == kInferTpye_HeadPoseEstimation) + { object = createHeadPoseEstimation(infer); - } - else if (infer.name == kInferTpye_ObjectDetection) { + } + else if (infer.name == kInferTpye_ObjectDetection) + { object = createObjectDetection(infer); } - else if (infer.name == kInferTpye_ObjectSegmentation) { + else if (infer.name == kInferTpye_ObjectSegmentation) + { object = createObjectSegmentation(infer); } - else if (infer.name == kInferTpye_PersonReidentification) { + else if (infer.name == kInferTpye_PersonReidentification) + { object = createPersonReidentification(infer); - } - else if (infer.name == kInferTpye_FaceReidentification) { + } + else if (infer.name == kInferTpye_FaceReidentification) + { object = createFaceReidentification(infer); - } - else if (infer.name == kInferTpye_PersonAttribsDetection) { + } + else if (infer.name == kInferTpye_PersonAttribsDetection) + { object = createPersonAttribsDetection(infer); } - else if (infer.name == kInferTpye_LandmarksDetection) { + else if (infer.name == kInferTpye_LandmarksDetection) + { object = createLandmarksDetection(infer); } - else if (infer.name == kInferTpye_VehicleAttribsDetection) { + else if (infer.name == kInferTpye_VehicleAttribsDetection) + { object = createVehicleAttribsDetection(infer); } - else if (infer.name == kInferTpye_LicensePlateDetection) { + else if (infer.name == kInferTpye_LicensePlateDetection) + { object = createLicensePlateDetection(infer); - } - else { + } + else + { slog::err << "Invalid inference name: " << infer.name << slog::endl; } - if (object != nullptr) { - inferences.insert({infer.name, object}); + if (object != nullptr) + { + inferences.insert({ infer.name, object }); slog::info << " ... Adding one Inference: " << infer.name << slog::endl; } } @@ -237,14 +287,13 @@ PipelineManager::parseInference(const Params::ParamManager::PipelineRawData & pa } std::shared_ptr -PipelineManager::createFaceDetection( - const Params::ParamManager::InferenceRawData & infer) +PipelineManager::createFaceDetection(const Params::ParamManager::InferenceRawData& infer) { return createObjectDetection(infer); } std::shared_ptr -PipelineManager::createAgeGenderRecognition(const Params::ParamManager::InferenceRawData & param) +PipelineManager::createAgeGenderRecognition(const Params::ParamManager::InferenceRawData& param) { auto model = std::make_shared(param.model, param.batch); model->modelInit(); @@ -257,7 +306,7 @@ PipelineManager::createAgeGenderRecognition(const Params::ParamManager::Inferenc } std::shared_ptr -PipelineManager::createEmotionRecognition(const Params::ParamManager::InferenceRawData & param) +PipelineManager::createEmotionRecognition(const Params::ParamManager::InferenceRawData& param) { auto model = std::make_shared(param.model, param.batch); model->modelInit(); @@ -270,7 +319,7 @@ PipelineManager::createEmotionRecognition(const Params::ParamManager::InferenceR } std::shared_ptr -PipelineManager::createHeadPoseEstimation(const Params::ParamManager::InferenceRawData & param) +PipelineManager::createHeadPoseEstimation(const Params::ParamManager::InferenceRawData& param) { auto model = std::make_shared(param.model, param.batch); model->modelInit(); @@ -283,28 +332,26 @@ PipelineManager::createHeadPoseEstimation(const Params::ParamManager::InferenceR } std::shared_ptr -PipelineManager::createObjectDetection( -const Params::ParamManager::InferenceRawData & infer) +PipelineManager::createObjectDetection(const Params::ParamManager::InferenceRawData& infer) { std::shared_ptr object_detection_model; std::shared_ptr object_inference_ptr; slog::debug << "for test in createObjectDetection()" << slog::endl; - if (infer.model_type == kInferTpye_ObjectDetectionTypeSSD) { - object_detection_model = - std::make_shared(infer.model, infer.batch); + if (infer.model_type == kInferTpye_ObjectDetectionTypeSSD) + { + object_detection_model = std::make_shared(infer.model, infer.batch); } - if (infer.model_type == kInferTpye_ObjectDetectionTypeYolov2) { - object_detection_model = - std::make_shared(infer.model, infer.batch); + if (infer.model_type == kInferTpye_ObjectDetectionTypeYolov2) + { + object_detection_model = std::make_shared(infer.model, infer.batch); } slog::debug << "for test in createObjectDetection(), Created SSDModel" << slog::endl; object_inference_ptr = std::make_shared( - infer.enable_roi_constraint, infer.confidence_threshold); // To-do theshold configuration + infer.enable_roi_constraint, infer.confidence_threshold); // To-do theshold configuration slog::debug << "for test in createObjectDetection(), before modelInit()" << slog::endl; object_detection_model->modelInit(); - auto object_detection_engine = engine_manager_.createEngine( - infer.engine, object_detection_model); + auto object_detection_engine = engine_manager_.createEngine(infer.engine, object_detection_model); object_inference_ptr->loadNetwork(object_detection_model); object_inference_ptr->loadEngine(object_detection_engine); @@ -312,17 +359,15 @@ const Params::ParamManager::InferenceRawData & infer) } std::shared_ptr -PipelineManager::createObjectSegmentation(const Params::ParamManager::InferenceRawData & infer) +PipelineManager::createObjectSegmentation(const Params::ParamManager::InferenceRawData& infer) { - auto model = - std::make_shared(infer.model, infer.batch); + auto model = std::make_shared(infer.model, infer.batch); model->modelInit(); slog::info << "Segmentation model initialized." << slog::endl; auto engine = engine_manager_.createEngine(infer.engine, model); slog::info << "Segmentation Engine initialized." << slog::endl; - auto segmentation_inference_ptr = std::make_shared( - infer.confidence_threshold); - slog::info << "Segmentation Inference instanced." << slog::endl; + auto segmentation_inference_ptr = std::make_shared(infer.confidence_threshold); + slog::info << "Segmentation Inference instanced." << slog::endl; segmentation_inference_ptr->loadNetwork(model); segmentation_inference_ptr->loadEngine(engine); @@ -330,19 +375,17 @@ PipelineManager::createObjectSegmentation(const Params::ParamManager::InferenceR } std::shared_ptr -PipelineManager::createPersonReidentification( - const Params::ParamManager::InferenceRawData & infer) +PipelineManager::createPersonReidentification(const Params::ParamManager::InferenceRawData& infer) { std::shared_ptr person_reidentification_model; std::shared_ptr reidentification_inference_ptr; - slog::debug << "for test in createPersonReidentification()"<(infer.model, infer.batch); + slog::debug << "for test in createPersonReidentification()" << slog::endl; + person_reidentification_model = std::make_shared(infer.model, infer.batch); person_reidentification_model->modelInit(); slog::info << "Reidentification model initialized" << slog::endl; auto person_reidentification_engine = engine_manager_.createEngine(infer.engine, person_reidentification_model); reidentification_inference_ptr = - std::make_shared(infer.confidence_threshold); + std::make_shared(infer.confidence_threshold); reidentification_inference_ptr->loadNetwork(person_reidentification_model); reidentification_inference_ptr->loadEngine(person_reidentification_engine); @@ -350,15 +393,12 @@ PipelineManager::createPersonReidentification( } std::shared_ptr -PipelineManager::createVehicleAttribsDetection( - const Params::ParamManager::InferenceRawData & infer) +PipelineManager::createVehicleAttribsDetection(const Params::ParamManager::InferenceRawData& infer) { - auto model = - std::make_shared(infer.model, infer.batch); + auto model = std::make_shared(infer.model, infer.batch); model->modelInit(); auto engine = engine_manager_.createEngine(infer.engine, model); - auto vehicle_attribs_ptr = - std::make_shared(); + auto vehicle_attribs_ptr = std::make_shared(); vehicle_attribs_ptr->loadNetwork(model); vehicle_attribs_ptr->loadEngine(engine); @@ -366,15 +406,12 @@ PipelineManager::createVehicleAttribsDetection( } std::shared_ptr -PipelineManager::createLicensePlateDetection( - const Params::ParamManager::InferenceRawData & infer) +PipelineManager::createLicensePlateDetection(const Params::ParamManager::InferenceRawData& infer) { - auto model = - std::make_shared(infer.model, infer.batch); + auto model = std::make_shared(infer.model, infer.batch); model->modelInit(); auto engine = engine_manager_.createEngine(infer.engine, model); - auto license_plate_ptr = - std::make_shared(); + auto license_plate_ptr = std::make_shared(); license_plate_ptr->loadNetwork(model); license_plate_ptr->loadEngine(engine); @@ -382,27 +419,25 @@ PipelineManager::createLicensePlateDetection( } std::shared_ptr -PipelineManager::createPersonAttribsDetection( - const Params::ParamManager::InferenceRawData & infer) +PipelineManager::createPersonAttribsDetection(const Params::ParamManager::InferenceRawData& infer) { - auto model = - std::make_shared(infer.model, infer.batch); - slog::debug << "for test in createPersonAttributesDetection()"<(infer.model, infer.batch); + slog::debug << "for test in createPersonAttributesDetection()" << slog::endl; model->modelInit(); auto engine = engine_manager_.createEngine(infer.engine, model); - auto attribs_inference_ptr = - std::make_shared(infer.confidence_threshold); + auto attribs_inference_ptr = std::make_shared(infer.confidence_threshold); attribs_inference_ptr->loadNetwork(model); attribs_inference_ptr->loadEngine(engine); return attribs_inference_ptr; } -void PipelineManager::threadPipeline(const char* name) { +void PipelineManager::threadPipeline(const char* name) +{ PipelineData& p = pipelines_[name]; - while ( p.state != PipelineState_ThreadStopped && p.pipeline != nullptr && ros::ok()) { - - if(p.state != PipelineState_ThreadPasued) + while (p.state != PipelineState_ThreadStopped && p.pipeline != nullptr && ros::ok()) + { + if (p.state != PipelineState_ThreadPasued) { ros::spinOnce(); p.pipeline->runOnce(); @@ -410,42 +445,47 @@ void PipelineManager::threadPipeline(const char* name) { std::this_thread::sleep_for(std::chrono::milliseconds(1)); } } -void PipelineManager::runAll() { - for (auto it = pipelines_.begin(); it != pipelines_.end(); ++it) { - if(it->second.state != PipelineState_ThreadRunning) { +void PipelineManager::runAll() +{ + for (auto it = pipelines_.begin(); it != pipelines_.end(); ++it) + { + if (it->second.state != PipelineState_ThreadRunning) + { it->second.state = PipelineState_ThreadRunning; } - if(it->second.thread == nullptr) { - it->second.thread = std::make_shared(&PipelineManager::threadPipeline, this, it->second.params.name.c_str()); + if (it->second.thread == nullptr) + { + it->second.thread = + std::make_shared(&PipelineManager::threadPipeline, this, it->second.params.name.c_str()); } } - - } void PipelineManager::stopAll() { - for (auto it = pipelines_.begin(); it != pipelines_.end(); ++it) { - if (it->second.state == PipelineState_ThreadRunning) { + for (auto it = pipelines_.begin(); it != pipelines_.end(); ++it) + { + if (it->second.state == PipelineState_ThreadRunning) + { it->second.state = PipelineState_ThreadStopped; } } - } -void PipelineManager::runService() +void PipelineManager::runService() { - auto node = std::make_shared>("pipeline_service"); - ros::spin();//hold the thread waiting for pipeline service - + auto node = + std::make_shared>("pipeline_service"); + ros::spin(); // hold the thread waiting for pipeline service } -void PipelineManager::joinAll() { - - auto service_thread = std::make_shared(&PipelineManager::runService,this); - service_thread->join();// pipeline service +void PipelineManager::joinAll() +{ + auto service_thread = std::make_shared(&PipelineManager::runService, this); + service_thread->join(); // pipeline service - for (auto it = pipelines_.begin(); it != pipelines_.end(); ++it) { - if(it->second.thread != nullptr && it->second.state == PipelineState_ThreadRunning) { + for (auto it = pipelines_.begin(); it != pipelines_.end(); ++it) + { + if (it->second.thread != nullptr && it->second.state == PipelineState_ThreadRunning) + { it->second.thread->join(); } } diff --git a/dynamic_vino_lib/src/pipeline_params.cpp b/dynamic_vino_lib/src/pipeline_params.cpp index 9b7b60f1..18cb1835 100644 --- a/dynamic_vino_lib/src/pipeline_params.cpp +++ b/dynamic_vino_lib/src/pipeline_params.cpp @@ -26,16 +26,17 @@ #include #include "dynamic_vino_lib/pipeline_params.h" -PipelineParams::PipelineParams(const std::string& name) { params_.name = name; } +PipelineParams::PipelineParams(const std::string& name) +{ + params_.name = name; +} -PipelineParams::PipelineParams( - const Params::ParamManager::PipelineRawData& params) +PipelineParams::PipelineParams(const Params::ParamManager::PipelineRawData& params) { params_ = params; } -PipelineParams& PipelineParams::operator=( - const Params::ParamManager::PipelineRawData& params) +PipelineParams& PipelineParams::operator=(const Params::ParamManager::PipelineRawData& params) { params_.name = params.name; params_.infers = params.infers; @@ -46,8 +47,7 @@ PipelineParams& PipelineParams::operator=( return *this; } -Params::ParamManager::PipelineRawData PipelineParams::getPipeline( - const std::string& name) +Params::ParamManager::PipelineRawData PipelineParams::getPipeline(const std::string& name) { return Params::ParamManager::getInstance().getPipeline(name); } @@ -60,15 +60,14 @@ void PipelineParams::update() } } -void PipelineParams::update( - const Params::ParamManager::PipelineRawData& params) { +void PipelineParams::update(const Params::ParamManager::PipelineRawData& params) +{ params_ = params; } bool PipelineParams::isOutputTo(std::string& output) { - if (std::find(params_.outputs.begin(), params_.outputs.end(), output) != - params_.outputs.end()) + if (std::find(params_.outputs.begin(), params_.outputs.end(), output) != params_.outputs.end()) { return true; } @@ -79,18 +78,19 @@ bool PipelineParams::isOutputTo(std::string& output) bool PipelineParams::isGetFps() { /**< Only "Image" input can't computing FPS >**/ - if (params_.inputs.size() == 0) { + if (params_.inputs.size() == 0) + { return false; } - return std::find(params_.inputs.begin(), params_.inputs.end(), kInputType_Image) == - params_.inputs.end(); + return std::find(params_.inputs.begin(), params_.inputs.end(), kInputType_Image) == params_.inputs.end(); } -std::string PipelineParams::findFilterConditions( - const std::string & input, const std::string & output) +std::string PipelineParams::findFilterConditions(const std::string& input, const std::string& output) { - for (auto filter : params_.filters) { - if (!input.compare(filter.input) && !output.compare(filter.output)) { + for (auto filter : params_.filters) + { + if (!input.compare(filter.input) && !output.compare(filter.output)) + { return filter.filter_conditions; } } diff --git a/dynamic_vino_lib/src/services/frame_processing_server.cpp b/dynamic_vino_lib/src/services/frame_processing_server.cpp index 095f9885..08a1aca5 100644 --- a/dynamic_vino_lib/src/services/frame_processing_server.cpp +++ b/dynamic_vino_lib/src/services/frame_processing_server.cpp @@ -35,51 +35,47 @@ namespace vino_service { -template -FrameProcessingServer::FrameProcessingServer( - const std::string & service_name, - const std::string & config_path): - service_name_(service_name), - config_path_(config_path) +template +FrameProcessingServer::FrameProcessingServer(const std::string& service_name, const std::string& config_path) + : service_name_(service_name), config_path_(config_path) { - nh_=std::make_shared(service_name_); + nh_ = std::make_shared(service_name_); initService(); } -template +template void FrameProcessingServer::initService() { std::cout << "!!!!" << config_path_ << std::endl; Params::ParamManager::getInstance().parse(config_path_); Params::ParamManager::getInstance().print(); - + auto pcommon = Params::ParamManager::getInstance().getCommon(); auto pipelines = Params::ParamManager::getInstance().getPipelines(); - if (pipelines.size() != 1) { + if (pipelines.size() != 1) + { throw std::logic_error("1 and only 1 pipeline can be set to FrameProcessServer!"); } - for (auto & p : pipelines) { + for (auto& p : pipelines) + { PipelineManager::getInstance().createPipeline(p); } - - ros::ServiceServer srv = nh_->advertiseService >("/openvino_toolkit/service",std::bind(&FrameProcessingServer::cbService,this,std::placeholders::_1)); - service_ = std::make_shared(srv); - + ros::ServiceServer srv = nh_->advertiseService >( + "/openvino_toolkit/service", std::bind(&FrameProcessingServer::cbService, this, std::placeholders::_1)); + service_ = std::make_shared(srv); } - -template -bool FrameProcessingServer::cbService( - ros::ServiceEvent& event) +template +bool FrameProcessingServer::cbService(ros::ServiceEvent& event) { boost::shared_ptr res = boost::make_shared(); - std::map pipelines_ = - PipelineManager::getInstance().getPipelines(); - for (auto it = pipelines_.begin(); it != pipelines_.end(); ++it) { - PipelineManager::PipelineData & p = pipelines_[it->second.params.name.c_str()]; + std::map pipelines_ = PipelineManager::getInstance().getPipelines(); + for (auto it = pipelines_.begin(); it != pipelines_.end(); ++it) + { + PipelineManager::PipelineData& p = pipelines_[it->second.params.name.c_str()]; auto input = p.pipeline->getInputDevice(); Input::Config config; config.path = event.getRequest().image_path; @@ -87,8 +83,10 @@ bool FrameProcessingServer::cbService( p.pipeline->runOnce(); auto output_handle = p.pipeline->getOutputHandle(); - for (auto & pair : output_handle) { - if (!pair.first.compare(kOutputTpye_RosService)) { + for (auto& pair : output_handle) + { + if (!pair.first.compare(kOutputTpye_RosService)) + { pair.second->setServiceResponse(res); event.getResponse() = *res; pair.second->clearData(); @@ -100,8 +98,6 @@ bool FrameProcessingServer::cbService( return false; } - - template class FrameProcessingServer; template class FrameProcessingServer; template class FrameProcessingServer; diff --git a/dynamic_vino_lib/src/services/pipeline_processing_server.cpp b/dynamic_vino_lib/src/services/pipeline_processing_server.cpp index 28554164..97d04322 100644 --- a/dynamic_vino_lib/src/services/pipeline_processing_server.cpp +++ b/dynamic_vino_lib/src/services/pipeline_processing_server.cpp @@ -36,75 +36,78 @@ namespace vino_service { -template -PipelineProcessingServer::PipelineProcessingServer( - const std::string & service_name): - service_name_(service_name) +template +PipelineProcessingServer::PipelineProcessingServer(const std::string& service_name) : service_name_(service_name) { - nh_=std::make_shared(); + nh_ = std::make_shared(); pipelines_ = PipelineManager::getInstance().getPipelinesPtr(); initPipelineService(); } -template +template void PipelineProcessingServer::initPipelineService() { - ros::ServiceServer srv= nh_->advertiseService >("/openvino_toolkit/pipeline/service",std::bind(&PipelineProcessingServer::cbService,this,std::placeholders::_1)); - service_ = std::make_shared(srv); + ros::ServiceServer srv = nh_->advertiseService< + ros::ServiceEvent >( + "/openvino_toolkit/pipeline/service", + std::bind(&PipelineProcessingServer::cbService, this, std::placeholders::_1)); + service_ = std::make_shared(srv); } -template void PipelineProcessingServer::setResponse( - ros::ServiceEvent& event) +template +void PipelineProcessingServer::setResponse(ros::ServiceEvent& event) { - for (auto it = pipelines_->begin(); it != pipelines_->end(); ++it) { - pipeline_srv_msgs::Pipelines pipeline_msg; - pipeline_msg.name = it->first; - pipeline_msg.running_status = std::to_string(it->second.state); - - auto connection_map = it->second.pipeline->getPipelineDetail(); - for (auto& current_pipe : connection_map) - { - pipeline_srv_msgs::Pipeline connection; - connection.input = current_pipe.first.c_str(); - connection.output = current_pipe.second.c_str(); - pipeline_msg.connections.push_back(connection); - } - event.getResponse().pipelines.push_back(pipeline_msg); - } + for (auto it = pipelines_->begin(); it != pipelines_->end(); ++it) + { + pipeline_srv_msgs::Pipelines pipeline_msg; + pipeline_msg.name = it->first; + pipeline_msg.running_status = std::to_string(it->second.state); + + auto connection_map = it->second.pipeline->getPipelineDetail(); + for (auto& current_pipe : connection_map) + { + pipeline_srv_msgs::Pipeline connection; + connection.input = current_pipe.first.c_str(); + connection.output = current_pipe.second.c_str(); + pipeline_msg.connections.push_back(connection); + } + event.getResponse().pipelines.push_back(pipeline_msg); + } } -template -void PipelineProcessingServer::setPipelineByRequest(std::string pipeline_name, - PipelineManager::PipelineState state){ - for (auto it = pipelines_->begin(); it != pipelines_->end(); ++it) - { - if(pipeline_name == it->first) - { - it->second.state = state; - } +template +void PipelineProcessingServer::setPipelineByRequest(std::string pipeline_name, PipelineManager::PipelineState state) +{ + for (auto it = pipelines_->begin(); it != pipelines_->end(); ++it) + { + if (pipeline_name == it->first) + { + it->second.state = state; } + } } -template -bool PipelineProcessingServer::cbService( - ros::ServiceEvent& event) +template +bool PipelineProcessingServer::cbService(ros::ServiceEvent& event) { std::string req_cmd = event.getRequest().pipeline_request.cmd; std::string req_val = event.getRequest().pipeline_request.value; - slog::info <<"[PipelineProcessingServer] Pipeline Service get request cmd: "<< req_cmd << - " val:"<< req_val<< slog::endl ; + slog::info << "[PipelineProcessingServer] Pipeline Service get request cmd: " << req_cmd << " val:" << req_val + << slog::endl; PipelineManager::PipelineState state; - if( req_cmd != "GET_PIPELINE")//not only get pipeline but also set pipeline by request + if (req_cmd != "GET_PIPELINE") // not only get pipeline but also set pipeline by request { - if(req_cmd == "STOP_PIPELINE") state = PipelineManager::PipelineState_ThreadStopped; - else if(req_cmd == "RUN_PIPELINE") state = PipelineManager::PipelineState_ThreadRunning; - else if(req_cmd == "PAUSE_PIPELINE") state = PipelineManager::PipelineState_ThreadPasued; + if (req_cmd == "STOP_PIPELINE") + state = PipelineManager::PipelineState_ThreadStopped; + else if (req_cmd == "RUN_PIPELINE") + state = PipelineManager::PipelineState_ThreadRunning; + else if (req_cmd == "PAUSE_PIPELINE") + state = PipelineManager::PipelineState_ThreadPasued; - setPipelineByRequest(req_val,state); + setPipelineByRequest(req_val, state); } - else //fill in pipeline status into service response + else // fill in pipeline status into service response { setResponse(event); } diff --git a/sample/include/sample/utility.hpp b/sample/include/sample/utility.hpp index f29217d0..b1e052e5 100644 --- a/sample/include/sample/utility.hpp +++ b/sample/include/sample/utility.hpp @@ -32,84 +32,73 @@ static const char help_message[] = "Print a usage message."; /// @brief message for images argument -static const char input_choice[] = - "Optional. Input choice (RealSenseCamera, StandardCamera, Video, Image). " \ - "Default value is StandardCamera."; +static const char input_choice[] = "Optional. Input choice (RealSenseCamera, StandardCamera, Video, Image). " + "Default value is StandardCamera."; /// @brief message for model argument -static const char face_detection_model_message[] = - "Required. Path to an .xml file with a trained face detection model."; -static const char age_gender_model_message[] = - "Optional. Path to an .xml file with a trained age gender model."; -static const char head_pose_model_message[] = - "Optional. Path to an .xml file with a trained head pose model."; -static const char emotions_model_message[] = - "Optional. Path to an .xml file with a trained emotions model."; +static const char face_detection_model_message[] = "Required. Path to an .xml file with a trained face detection " + "model."; +static const char age_gender_model_message[] = "Optional. Path to an .xml file with a trained age gender model."; +static const char head_pose_model_message[] = "Optional. Path to an .xml file with a trained head pose model."; +static const char emotions_model_message[] = "Optional. Path to an .xml file with a trained emotions model."; static const char object_model_message[] = "Required. Path to an .xml file with a trained model."; /// @brief message for plugin argument -static const char plugin_message[] = - "Plugin name. For example MKLDNNPlugin. If this parameter is pointed, " \ -"the sample will look for this plugin only."; +static const char plugin_message[] = "Plugin name. For example MKLDNNPlugin. If this parameter is pointed, " + "the sample will look for this plugin only."; /// @brief message for assigning face detection calculation to device -static const char target_device_message[] = - "Specify the target device for Face Detection (CPU, GPU, FPGA, or MYRIAD). " \ -"Sample will look for a suitable plugin for device specified."; +static const char target_device_message[] = "Specify the target device for Face Detection (CPU, GPU, FPGA, or MYRIAD). " + "Sample will look for a suitable plugin for device specified."; /// @brief message for assigning age gender calculation to device static const char target_device_message_ag[] = - "Specify the target device for Age Gender Detection (CPU, GPU, FPGA, or MYRIAD). " \ -"Sample will look for a suitable plugin for device specified."; + "Specify the target device for Age Gender Detection (CPU, GPU, FPGA, or MYRIAD). " + "Sample will look for a suitable plugin for device specified."; /// @brief message for assigning age gender calculation to device static const char target_device_message_hp[] = - "Specify the target device for Head Pose Detection (CPU, GPU, FPGA, or MYRIAD). " \ -"Sample will look for a suitable plugin for device specified."; + "Specify the target device for Head Pose Detection (CPU, GPU, FPGA, or MYRIAD). " + "Sample will look for a suitable plugin for device specified."; static const char target_device_message_em[] = - "Specify the target device for Emotions Detection (CPU, GPU, FPGA, or MYRIAD). " \ -"Sample will look for a suitable plugin for device specified."; + "Specify the target device for Emotions Detection (CPU, GPU, FPGA, or MYRIAD). " + "Sample will look for a suitable plugin for device specified."; /// @brief message for number of simultaneously age gender detections using dynamic batch static const char num_batch_ag_message[] = - "Specify number of maximum simultaneously processed faces for Age Gender Detection" \ + "Specify number of maximum simultaneously processed faces for Age Gender Detection" "(default is 16)."; /// @brief message for number of simultaneously age gender detections using dynamic batch static const char num_batch_hp_message[] = - "Specify number of maximum simultaneously processed faces for Head Pose Detection" \ + "Specify number of maximum simultaneously processed faces for Head Pose Detection" "(default is 16)."; /// @brief message for number of simultaneously age gender detections using dynamic batch static const char num_batch_em_message[] = - "Specify number of maximum simultaneously processed faces for Emotions Detection" \ + "Specify number of maximum simultaneously processed faces for Emotions Detection" "(default is 16)."; /// @brief message for performance counters -static const char - performance_counter_message[] = "Enables per-layer performance report."; +static const char performance_counter_message[] = "Enables per-layer performance report."; /// @brief message for clDNN custom kernels desc -static const char - custom_cldnn_message[] = "Required for clDNN (GPU)-targeted custom kernels."\ -"Absolute path to the xml file with the kernels desc."; +static const char custom_cldnn_message[] = "Required for clDNN (GPU)-targeted custom kernels." + "Absolute path to the xml file with the kernels desc."; /// @brief message for user library argument -static const char custom_cpu_library_message[] = - "Required for MKLDNN (CPU)-targeted custom layers." \ -"Absolute path to a shared library with the kernels impl."; +static const char custom_cpu_library_message[] = "Required for MKLDNN (CPU)-targeted custom layers." + "Absolute path to a shared library with the kernels impl."; /// @brief message for probability threshold argument -static const char - thresh_output_message[] = "Probability threshold for detections."; +static const char thresh_output_message[] = "Probability threshold for detections."; /// @brief message raw output flag static const char raw_output_message[] = "Inference results as raw values."; /// @brief message no wait for keypress after input stream completed -static const char - no_wait_for_keypress_message[] = "No wait for key press in the end."; +static const char no_wait_for_keypress_message[] = "No wait for key press in the end."; /// @brief message no show processed video static const char no_show_processed_video[] = "No show processed video."; @@ -199,13 +188,12 @@ DEFINE_string(i_path, "", input_file_path); /// It is a optional parameter DEFINE_string(config, "", parameter_file_message); -static void showUsageForParam() { +static void showUsageForParam() +{ std::cout << std::endl; std::cout << "vino_param_sample [OPTION]" << std::endl; std::cout << "Options:" << std::endl; std::cout << std::endl; std::cout << " -h " << help_message << std::endl; - std::cout << " -config \"\" " << parameter_file_message - << std::endl; + std::cout << " -config \"\" " << parameter_file_message << std::endl; } - diff --git a/sample/src/image_object_client.cpp b/sample/src/image_object_client.cpp index 5245f3e1..c3a09957 100644 --- a/sample/src/image_object_client.cpp +++ b/sample/src/image_object_client.cpp @@ -26,47 +26,44 @@ #include - - - -int main(int argc, char ** argv) +int main(int argc, char** argv) { - ros::init(argc, argv, "image_object_client"); + ros::init(argc, argv, "image_object_client"); ros::NodeHandle n; - if (argc != 2) { + if (argc != 2) + { ROS_INFO("Usage: rosrun dynamic_vino_sample image_object_client "); - //You can find a sample image in /opt/openvino_toolkit/ros_openvino_toolkit/data/images/car.png + // You can find a sample image in /opt/openvino_toolkit/ros_openvino_toolkit/data/images/car.png return -1; } std::string image_path = argv[1]; ros::ServiceClient client = n.serviceClient("/openvino_toolkit/service"); - + object_msgs::DetectObjectSrv srv; srv.request.image_path = image_path; if (client.call(srv)) { - ROS_INFO("Request service success!"); + ROS_INFO("Request service success!"); cv::Mat image = cv::imread(image_path); int width = image.cols; int height = image.rows; - for (unsigned int i = 0; i < srv.response.objects[0].objects_vector.size(); i++) { + for (unsigned int i = 0; i < srv.response.objects[0].objects_vector.size(); i++) + { std::stringstream ss; - ss << srv.response.objects[0].objects_vector[i].object.object_name << ": " << - srv.response.objects[0].objects_vector[i].object.probability * 100 << "%"; - ROS_INFO("%d: object: %s", i, - srv.response.objects[0].objects_vector[i].object.object_name.c_str()); - ROS_INFO( "prob: %f", - srv.response.objects[0].objects_vector[i].object.probability); - ROS_INFO( - "location: (%d, %d, %d, %d)", - srv.response.objects[0].objects_vector[i].roi.x_offset, srv.response.objects[0].objects_vector[i].roi.y_offset, - srv.response.objects[0].objects_vector[i].roi.width, srv.response.objects[0].objects_vector[i].roi.height); + ss << srv.response.objects[0].objects_vector[i].object.object_name << ": " + << srv.response.objects[0].objects_vector[i].object.probability * 100 << "%"; + ROS_INFO("%d: object: %s", i, srv.response.objects[0].objects_vector[i].object.object_name.c_str()); + ROS_INFO("prob: %f", srv.response.objects[0].objects_vector[i].object.probability); + ROS_INFO("location: (%d, %d, %d, %d)", srv.response.objects[0].objects_vector[i].roi.x_offset, + srv.response.objects[0].objects_vector[i].roi.y_offset, + srv.response.objects[0].objects_vector[i].roi.width, + srv.response.objects[0].objects_vector[i].roi.height); int xmin = srv.response.objects[0].objects_vector[i].roi.x_offset; int ymin = srv.response.objects[0].objects_vector[i].roi.y_offset; @@ -79,24 +76,15 @@ int main(int argc, char ** argv) cv::Point left_top = cv::Point(xmin, ymin); cv::Point right_bottom = cv::Point(xmax, ymax); cv::rectangle(image, left_top, right_bottom, cv::Scalar(0, 255, 0), 1, 8, 0); - cv::rectangle(image, cv::Point(xmin, ymin), cv::Point(xmax, ymin + 20), cv::Scalar(0, 255, 0), - -1); - cv::putText(image, ss.str(), cv::Point(xmin + 5, ymin + 20), cv::FONT_HERSHEY_PLAIN, 1, - cv::Scalar(0, 0, 255), 1); + cv::rectangle(image, cv::Point(xmin, ymin), cv::Point(xmax, ymin + 20), cv::Scalar(0, 255, 0), -1); + cv::putText(image, ss.str(), cv::Point(xmin + 5, ymin + 20), cv::FONT_HERSHEY_PLAIN, 1, cv::Scalar(0, 0, 255), 1); } cv::imshow("image_detection", image); cv::waitKey(0); - - - } else + } + else { - ROS_ERROR("Failed to request service \"openvino_toolkit/service\" "); return -1; + ROS_ERROR("Failed to request service \"openvino_toolkit/service\" "); + return -1; } - - - - } - - - diff --git a/sample/src/image_object_server.cpp b/sample/src/image_object_server.cpp index 2469eb1a..feca1288 100644 --- a/sample/src/image_object_server.cpp +++ b/sample/src/image_object_server.cpp @@ -74,8 +74,9 @@ bool parseAndCheckCommandLine(int argc, char** argv) int main(int argc, char** argv) { ros::init(argc, argv, "image_object_servier"); - - if (!parseAndCheckCommandLine(argc, argv)) return 0; + + if (!parseAndCheckCommandLine(argc, argv)) + return 0; ros::param::param("~param_file", FLAGS_config, "/param/pipeline_people.yaml"); @@ -83,14 +84,12 @@ int main(int argc, char** argv) std::string service_name = "/openvino_toolkit/service"; slog::info << "service name=" << service_name << slog::endl; - // ----- Parsing and validation of input args----------------------- + // ----- Parsing and validation of input args----------------------- + auto node = + std::make_shared>(service_name, FLAGS_config); - auto node = std::make_shared>(service_name, FLAGS_config); - slog::info << "Waiting for service request..." << slog::endl; ros::spin(); slog::info << "--------------End of Excution--------------" << FLAGS_config << slog::endl; - } diff --git a/sample/src/image_people_client.cpp b/sample/src/image_people_client.cpp index ecb25636..85d310a7 100644 --- a/sample/src/image_people_client.cpp +++ b/sample/src/image_people_client.cpp @@ -25,20 +25,18 @@ #include - - -int main(int argc, char ** argv) +int main(int argc, char** argv) { - ros::init(argc, argv, "image_people_client"); + ros::init(argc, argv, "image_people_client"); ros::NodeHandle n; - if (argc != 2) { - ROS_INFO( "Usage: rosrun dynamic_vino_sample image_people_client" - ""); + if (argc != 2) + { + ROS_INFO("Usage: rosrun dynamic_vino_sample image_people_client" + ""); return -1; } - ros::ServiceClient client = n.serviceClient("/openvino_toolkit/service"); std::string image_path = argv[1]; @@ -48,39 +46,33 @@ int main(int argc, char ** argv) if (client.call(srv)) { - ROS_INFO("Request service success!"); + ROS_INFO("Request service success!"); if (srv.response.persons.emotions.size() == 0 && srv.response.persons.agegenders.size() == 0 && srv.response.persons.headposes.size() == 0) { - ROS_INFO( "Get response, but no any person found."); + ROS_INFO("Get response, but no any person found."); return 0; } - ROS_INFO( "Found persons..."); + ROS_INFO("Found persons..."); - for (unsigned int i = 0; i < srv.response.persons.faces.size(); i++) { - ROS_INFO( "%d: object: %s", i, - srv.response.persons.faces[i].object.object_name.c_str()); - ROS_INFO( "prob: %f", - srv.response.persons.faces[i].object.probability); - ROS_INFO( "location: (%d, %d, %d, %d)", - srv.response.persons.faces[i].roi.x_offset, srv.response.persons.faces[i].roi.y_offset, - srv.response.persons.faces[i].roi.width, srv.response.persons.faces[i].roi.height); - ROS_INFO( "Emotions: %s", - srv.response.persons.emotions[i].emotion.c_str()); - ROS_INFO( "Age: %f, Gender: %s", - srv.response.persons.agegenders[i].age, srv.response.persons.agegenders[i].gender.c_str()); - ROS_INFO( "Yaw, Pitch and Roll for head pose is: (%f, %f, %f),", - srv.response.persons.headposes[i].yaw, srv.response.persons.headposes[i].pitch, - srv.response.persons.headposes[i].roll); - } + for (unsigned int i = 0; i < srv.response.persons.faces.size(); i++) + { + ROS_INFO("%d: object: %s", i, srv.response.persons.faces[i].object.object_name.c_str()); + ROS_INFO("prob: %f", srv.response.persons.faces[i].object.probability); + ROS_INFO("location: (%d, %d, %d, %d)", srv.response.persons.faces[i].roi.x_offset, + srv.response.persons.faces[i].roi.y_offset, srv.response.persons.faces[i].roi.width, + srv.response.persons.faces[i].roi.height); + ROS_INFO("Emotions: %s", srv.response.persons.emotions[i].emotion.c_str()); + ROS_INFO("Age: %f, Gender: %s", srv.response.persons.agegenders[i].age, + srv.response.persons.agegenders[i].gender.c_str()); + ROS_INFO("Yaw, Pitch and Roll for head pose is: (%f, %f, %f),", srv.response.persons.headposes[i].yaw, + srv.response.persons.headposes[i].pitch, srv.response.persons.headposes[i].roll); + } } - else + else { - ROS_ERROR("Failed to request service \"/openvino_toolkit/service\" "); - return -1; + ROS_ERROR("Failed to request service \"/openvino_toolkit/service\" "); + return -1; } } - - - diff --git a/sample/src/image_people_server.cpp b/sample/src/image_people_server.cpp index 00a91985..f32de3ee 100644 --- a/sample/src/image_people_server.cpp +++ b/sample/src/image_people_server.cpp @@ -58,7 +58,6 @@ #include "opencv2/opencv.hpp" #include "sample/utility.hpp" - bool parseAndCheckCommandLine(int argc, char** argv) { // -----Parsing and validation of input args--------------------------- @@ -75,9 +74,9 @@ bool parseAndCheckCommandLine(int argc, char** argv) int main(int argc, char** argv) { ros::init(argc, argv, "sample_image_people_client"); - - if (!parseAndCheckCommandLine(argc, argv)) return 0; + if (!parseAndCheckCommandLine(argc, argv)) + return 0; ros::param::param("~param_file", FLAGS_config, "/param/pipeline_people.yaml"); @@ -87,13 +86,11 @@ int main(int argc, char** argv) slog::info << "service name=" << service_name << slog::endl; - auto node = std::make_shared>(service_name, FLAGS_config); - + auto node = std::make_shared>(service_name, FLAGS_config); + slog::info << "Waiting for service request..." << slog::endl; - + ros::spin(); - - slog::info << "--------------End of Excution--------------" << FLAGS_config << slog::endl; + slog::info << "--------------End of Excution--------------" << FLAGS_config << slog::endl; } diff --git a/sample/src/image_reidentification_client.cpp b/sample/src/image_reidentification_client.cpp index 70a94991..ab7a8e31 100644 --- a/sample/src/image_reidentification_client.cpp +++ b/sample/src/image_reidentification_client.cpp @@ -26,46 +26,35 @@ #include - - -int main(int argc, char ** argv) +int main(int argc, char** argv) { - ros::init(argc, argv, "image_reidentification_client"); + ros::init(argc, argv, "image_reidentification_client"); ros::NodeHandle n; ros::ServiceClient client = n.serviceClient("/openvino_toolkit/service"); - people_msgs::ReidentificationSrv srv; if (client.call(srv)) { - ROS_INFO("Request service success!"); + ROS_INFO("Request service success!"); - for (unsigned int i = 0; i < srv.response.reidentification.reidentified_vector.size(); i++) { + for (unsigned int i = 0; i < srv.response.reidentification.reidentified_vector.size(); i++) + { std::stringstream ss; - ss << srv.response.reidentification.reidentified_vector[i].identity ; - ROS_INFO("%d: object: %s", i, - srv.response.reidentification.reidentified_vector[i].identity.c_str()); - - ROS_INFO( - "location: (%d, %d, %d, %d)", - srv.response.reidentification.reidentified_vector[i].roi.x_offset, srv.response.reidentification.reidentified_vector[i].roi.y_offset, - srv.response.reidentification.reidentified_vector[i].roi.width,srv.response.reidentification.reidentified_vector[i].roi.height); + ss << srv.response.reidentification.reidentified_vector[i].identity; + ROS_INFO("%d: object: %s", i, srv.response.reidentification.reidentified_vector[i].identity.c_str()); + + ROS_INFO("location: (%d, %d, %d, %d)", srv.response.reidentification.reidentified_vector[i].roi.x_offset, + srv.response.reidentification.reidentified_vector[i].roi.y_offset, + srv.response.reidentification.reidentified_vector[i].roi.width, + srv.response.reidentification.reidentified_vector[i].roi.height); } - - } - else + else { - ROS_ERROR("Failed to request service \"openvino_toolkit/service\" "); return -1; + ROS_ERROR("Failed to request service \"openvino_toolkit/service\" "); + return -1; } - - - - } - - - \ No newline at end of file diff --git a/sample/src/image_reidentification_server.cpp b/sample/src/image_reidentification_server.cpp index 0300cb9f..0eaba1aa 100644 --- a/sample/src/image_reidentification_server.cpp +++ b/sample/src/image_reidentification_server.cpp @@ -59,7 +59,6 @@ #include "sample/utility.hpp" #include - bool parseAndCheckCommandLine(int argc, char** argv) { // -----Parsing and validation of input args--------------------------- @@ -76,8 +75,9 @@ bool parseAndCheckCommandLine(int argc, char** argv) int main(int argc, char** argv) { ros::init(argc, argv, "image_segmentation_servier"); - - if (!parseAndCheckCommandLine(argc, argv)) return 0; + + if (!parseAndCheckCommandLine(argc, argv)) + return 0; ros::param::param("~param_file", FLAGS_config, "/param/image_segmentation_server.yaml"); @@ -85,14 +85,12 @@ int main(int argc, char** argv) std::string service_name = "/openvino_toolkit/service"; slog::info << "service name=" << service_name << slog::endl; - // ----- Parsing and validation of input args----------------------- + // ----- Parsing and validation of input args----------------------- + auto node = std::make_shared>(service_name, + FLAGS_config); - auto node = std::make_shared>(service_name, FLAGS_config); - slog::info << "Waiting for reid service request..." << slog::endl; ros::spin(); slog::info << "--------------End of Excution--------------" << FLAGS_config << slog::endl; - } diff --git a/sample/src/image_segmentation_client.cpp b/sample/src/image_segmentation_client.cpp index be98d04a..60b0505b 100644 --- a/sample/src/image_segmentation_client.cpp +++ b/sample/src/image_segmentation_client.cpp @@ -26,47 +26,36 @@ #include - - -int main(int argc, char ** argv) +int main(int argc, char** argv) { - ros::init(argc, argv, "image_segmentation_client"); + ros::init(argc, argv, "image_segmentation_client"); ros::NodeHandle n; ros::ServiceClient client = n.serviceClient("/openvino_toolkit/service"); - people_msgs::ObjectsInMasksSrv srv; if (client.call(srv)) { - ROS_INFO("Request service success!"); - - for (unsigned int i = 0; i < srv.response.segmentation.objects_vector.size(); i++) { + ROS_INFO("Request service success!"); + + for (unsigned int i = 0; i < srv.response.segmentation.objects_vector.size(); i++) + { std::stringstream ss; - ss << srv.response.segmentation.objects_vector[i].object_name << ": " << - srv.response.segmentation.objects_vector[i].probability * 100 << "%"; - ROS_INFO("%d: object: %s", i, - srv.response.segmentation.objects_vector[i].object_name.c_str()); - ROS_INFO( "prob: %f", - srv.response.segmentation.objects_vector[i].probability); - ROS_INFO( - "location: (%d, %d, %d, %d)", - srv.response.segmentation.objects_vector[i].roi.x_offset, srv.response.segmentation.objects_vector[i].roi.y_offset, - srv.response.segmentation.objects_vector[i].roi.width,srv.response.segmentation.objects_vector[i].roi.height); + ss << srv.response.segmentation.objects_vector[i].object_name << ": " + << srv.response.segmentation.objects_vector[i].probability * 100 << "%"; + ROS_INFO("%d: object: %s", i, srv.response.segmentation.objects_vector[i].object_name.c_str()); + ROS_INFO("prob: %f", srv.response.segmentation.objects_vector[i].probability); + ROS_INFO("location: (%d, %d, %d, %d)", srv.response.segmentation.objects_vector[i].roi.x_offset, + srv.response.segmentation.objects_vector[i].roi.y_offset, + srv.response.segmentation.objects_vector[i].roi.width, + srv.response.segmentation.objects_vector[i].roi.height); } - } - else + else { - ROS_ERROR("Failed to request service \"openvino_toolkit/service\" "); return -1; + ROS_ERROR("Failed to request service \"openvino_toolkit/service\" "); + return -1; } - - - - } - - - \ No newline at end of file diff --git a/sample/src/image_segmentation_server.cpp b/sample/src/image_segmentation_server.cpp index 130b2899..7a827609 100644 --- a/sample/src/image_segmentation_server.cpp +++ b/sample/src/image_segmentation_server.cpp @@ -58,8 +58,6 @@ #include "opencv2/opencv.hpp" #include "sample/utility.hpp" - - bool parseAndCheckCommandLine(int argc, char** argv) { // -----Parsing and validation of input args--------------------------- @@ -76,8 +74,9 @@ bool parseAndCheckCommandLine(int argc, char** argv) int main(int argc, char** argv) { ros::init(argc, argv, "image_segmentation_servier"); - - if (!parseAndCheckCommandLine(argc, argv)) return 0; + + if (!parseAndCheckCommandLine(argc, argv)) + return 0; ros::param::param("~param_file", FLAGS_config, "/param/image_segmentation_server.yaml"); @@ -85,14 +84,12 @@ int main(int argc, char** argv) std::string service_name = "/openvino_toolkit/service"; slog::info << "service name=" << service_name << slog::endl; - // ----- Parsing and validation of input args----------------------- + // ----- Parsing and validation of input args----------------------- + auto node = + std::make_shared>(service_name, FLAGS_config); - auto node = std::make_shared>(service_name, FLAGS_config); - slog::info << "Waiting for seg service request..." << slog::endl; ros::spin(); slog::info << "--------------End of Excution--------------" << FLAGS_config << slog::endl; - } diff --git a/sample/src/pipeline_service_client.cpp b/sample/src/pipeline_service_client.cpp index fb657a8b..108c68d5 100644 --- a/sample/src/pipeline_service_client.cpp +++ b/sample/src/pipeline_service_client.cpp @@ -27,92 +27,92 @@ #include #include - -pipeline_srv_msgs::PipelineSrv getRequestMsg(std::string cmd, std::string value){ +pipeline_srv_msgs::PipelineSrv getRequestMsg(std::string cmd, std::string value) +{ pipeline_srv_msgs::PipelineSrv srv; srv.request.pipeline_request.cmd = cmd; srv.request.pipeline_request.value = value; return srv; } -bool stopPipelineByName(ros::ServiceClient& client,std::string name){ - - auto srv = getRequestMsg("STOP_PIPELINE",name);//object_pipeline1 +bool stopPipelineByName(ros::ServiceClient& client, std::string name) +{ + auto srv = getRequestMsg("STOP_PIPELINE", name); // object_pipeline1 bool result = client.call(srv) ? true : false; return result; } -bool pausePipelineByName(ros::ServiceClient& client,std::string name){ - - auto srv = getRequestMsg("PAUSE_PIPELINE",name);//object_pipeline1 +bool pausePipelineByName(ros::ServiceClient& client, std::string name) +{ + auto srv = getRequestMsg("PAUSE_PIPELINE", name); // object_pipeline1 bool result = client.call(srv) ? true : false; return result; } -bool runPipelineByName(ros::ServiceClient& client,std::string name){ - - auto srv = getRequestMsg("RUN_PIPELINE",name);//object_pipeline1 +bool runPipelineByName(ros::ServiceClient& client, std::string name) +{ + auto srv = getRequestMsg("RUN_PIPELINE", name); // object_pipeline1 bool result = client.call(srv) ? true : false; return result; } -bool request(ros::ServiceClient& client,std::string cmd_name, std::string cmd_value){ - - auto srv = getRequestMsg(cmd_name, cmd_value);//object_pipeline1 +bool request(ros::ServiceClient& client, std::string cmd_name, std::string cmd_value) +{ + auto srv = getRequestMsg(cmd_name, cmd_value); // object_pipeline1 bool result = client.call(srv) ? true : false; return result; } -bool getAllPipelines(ros::ServiceClient& client, std::vector & pipelines){ - auto srv = getRequestMsg("GET_PIPELINE",""); +bool getAllPipelines(ros::ServiceClient& client, std::vector& pipelines) +{ + auto srv = getRequestMsg("GET_PIPELINE", ""); bool success = client.call(srv) ? true : false; - if(success) + if (success) { for (auto it = srv.response.pipelines.begin(); it != srv.response.pipelines.end(); ++it) { - pipelines.push_back(*it); - std::cout<name<<" status:"<running_status << std::endl; - for( auto connect = it->connections.begin(); connect!= it->connections.end(); ++connect) - { - printf("%s --> %s\n", connect->input.c_str(), - connect->output.c_str()); - } + pipelines.push_back(*it); + std::cout << it->name << " status:" << it->running_status << std::endl; + for (auto connect = it->connections.begin(); connect != it->connections.end(); ++connect) + { + printf("%s --> %s\n", connect->input.c_str(), connect->output.c_str()); + } } } return success; } - -int main(int argc, char ** argv) +int main(int argc, char** argv) { - if (argc != 3) { + if (argc != 3) + { ROS_INFO("Usage: rosrun dynamic_vino_sample pipeline_service_client "); return -1; } - std::string cmd_name = argv[1]; + std::string cmd_name = argv[1]; std::string cmd_value = argv[2]; - - ros::init(argc, argv, "pipeline_service_client"); + ros::init(argc, argv, "pipeline_service_client"); ros::NodeHandle n; ros::ServiceClient client = n.serviceClient("/openvino_toolkit/pipeline/service"); std::vector pipelines; - - auto success = getAllPipelines(client,pipelines) ? true : false; - - if(!success) { ROS_ERROR("Failed to request service."); return -1;} - - success = request(client,cmd_name, cmd_value); - if(!success) { ROS_ERROR("Failed to request service."); return -1;} - //stopPipelineByName(client,"object_pipeline1"); - + auto success = getAllPipelines(client, pipelines) ? true : false; + if (!success) + { + ROS_ERROR("Failed to request service."); + return -1; + } + success = request(client, cmd_name, cmd_value); + if (!success) + { + ROS_ERROR("Failed to request service."); + return -1; + } + // stopPipelineByName(client,"object_pipeline1"); } - - - diff --git a/sample/src/pipeline_with_params.cpp b/sample/src/pipeline_with_params.cpp index ed0e1dcb..8bce1606 100644 --- a/sample/src/pipeline_with_params.cpp +++ b/sample/src/pipeline_with_params.cpp @@ -82,9 +82,8 @@ bool parseAndCheckCommandLine(int argc, char** argv) int main(int argc, char** argv) { + ros::init(argc, argv, "sample_with_params"); - ros::init(argc, argv, "sample_with_params"); - // register signal SIGINT and signal handler signal(SIGINT, signalHandler); @@ -94,8 +93,7 @@ int main(int argc, char** argv) try { - std::cout << "InferenceEngine: " - << InferenceEngine::GetInferenceEngineVersion() << std::endl; + std::cout << "InferenceEngine: " << InferenceEngine::GetInferenceEngineVersion() << std::endl; // ----- Parsing and validation of input args----------------------- if (!parseAndCheckCommandLine(argc, argv)) @@ -113,15 +111,15 @@ int main(int argc, char** argv) throw std::logic_error("Pipeline parameters should be set!"); } - for (auto & p : pipelines) { + for (auto& p : pipelines) + { PipelineManager::getInstance().createPipeline(p); } - + PipelineManager::getInstance().runAll(); PipelineManager::getInstance().joinAll(); slog::info << "Execution successful" << slog::endl; - } catch (const std::exception& error) { diff --git a/vino_param_lib/include/vino_param_lib/param_manager.h b/vino_param_lib/include/vino_param_lib/param_manager.h index 8144a21c..74c29c3d 100644 --- a/vino_param_lib/include/vino_param_lib/param_manager.h +++ b/vino_param_lib/include/vino_param_lib/param_manager.h @@ -42,7 +42,7 @@ namespace Params */ class ParamManager // singleton { - public: +public: /** * @brief Get the singleton instance of ParamManager class. * The instance will be created when first call. @@ -72,7 +72,8 @@ class ParamManager // singleton bool enable_roi_constraint = false; }; - struct FilterRawData { + struct FilterRawData + { std::string input; std::string output; std::string filter_conditions; @@ -88,7 +89,7 @@ class ParamManager // singleton std::string input_meta; std::vector filters; }; - + struct CommonRawData { std::string custom_cpu_library; @@ -137,7 +138,7 @@ class ParamManager // singleton return common_; } - private: +private: ParamManager() { } @@ -149,4 +150,4 @@ class ParamManager // singleton }; } // namespace Params -#endif // VINO_PARAM_LIB_PARAM_MANAGER_H \ No newline at end of file +#endif // VINO_PARAM_LIB_PARAM_MANAGER_H \ No newline at end of file diff --git a/vino_param_lib/include/vino_param_lib/slog.h b/vino_param_lib/include/vino_param_lib/slog.h index 0b2de083..3838e6d8 100644 --- a/vino_param_lib/include/vino_param_lib/slog.h +++ b/vino_param_lib/include/vino_param_lib/slog.h @@ -49,13 +49,12 @@ class LogStream std::ostream* _log_stream; bool _new_line; - public: +public: /** * @brief A constructor. Creates an LogStream object * @param prefix The prefix to print */ - LogStream(const std::string& prefix, std::ostream& log_stream) - : _prefix(prefix), _new_line(true) + LogStream(const std::string& prefix, std::ostream& log_stream) : _prefix(prefix), _new_line(true) { _log_stream = &log_stream; } diff --git a/vino_param_lib/src/param_manager.cpp b/vino_param_lib/src/param_manager.cpp index e2d4195c..d9695477 100644 --- a/vino_param_lib/src/param_manager.cpp +++ b/vino_param_lib/src/param_manager.cpp @@ -23,34 +23,32 @@ namespace Params { - -void operator>>(const YAML::Node & node, ParamManager::PipelineRawData & pipeline); -void operator>>(const YAML::Node & node, std::vector & list); -void operator>>(const YAML::Node & node, ParamManager::InferenceRawData & infer); +void operator>>(const YAML::Node& node, ParamManager::PipelineRawData& pipeline); +void operator>>(const YAML::Node& node, std::vector& list); +void operator>>(const YAML::Node& node, ParamManager::InferenceRawData& infer); void operator>>(const YAML::Node& node, std::vector& list); void operator>>(const YAML::Node& node, std::multimap& connect); -void operator>>(const YAML::Node & node, std::vector & filters); +void operator>>(const YAML::Node& node, std::vector& filters); void operator>>(const YAML::Node& node, std::string& str); void operator>>(const YAML::Node& node, bool& val); void operator>>(const YAML::Node& node, int& val); void operator>>(const YAML::Node& node, float& val); void operator>>(const YAML::Node& node, ParamManager::CommonRawData& common); -#define YAML_PARSE(node, key, val) \ - try \ - { \ - node[key] >> val; \ - } \ - catch (YAML::Exception e) \ - { \ - slog::warn << e.msg << slog::endl; \ - } \ - catch (...) \ - { \ - slog::warn << "Exception occurs when parsing string." << slog::endl; \ +#define YAML_PARSE(node, key, val) \ + try \ + { \ + node[key] >> val; \ + } \ + catch (YAML::Exception e) \ + { \ + slog::warn << e.msg << slog::endl; \ + } \ + catch (...) \ + { \ + slog::warn << "Exception occurs when parsing string." << slog::endl; \ } -void operator>>(const YAML::Node& node, - std::vector& list) +void operator>>(const YAML::Node& node, std::vector& list) { slog::info << "Pipeline size: " << node.size() << slog::endl; for (unsigned i = 0; i < node.size(); i++) @@ -69,8 +67,7 @@ void operator>>(const YAML::Node& node, ParamManager::CommonRawData& common) YAML_PARSE(node, "enable_performance_count", common.enable_performance_count) } -void operator>>(const YAML::Node& node, - ParamManager::PipelineRawData& pipeline) +void operator>>(const YAML::Node& node, ParamManager::PipelineRawData& pipeline) { YAML_PARSE(node, "name", pipeline.name) YAML_PARSE(node, "inputs", pipeline.inputs) @@ -82,8 +79,7 @@ void operator>>(const YAML::Node& node, slog::info << "Pipeline Params:name=" << pipeline.name << slog::endl; } -void operator>>(const YAML::Node& node, - std::vector& list) +void operator>>(const YAML::Node& node, std::vector& list) { slog::info << "Inferences size: " << node.size() << slog::endl; for (unsigned i = 0; i < node.size(); i++) @@ -117,35 +113,40 @@ void operator>>(const YAML::Node& node, std::vector& list) } } -void operator>>(const YAML::Node& node, - std::multimap& connect) +void operator>>(const YAML::Node& node, std::multimap& connect) { for (unsigned i = 0; i < node.size(); i++) { std::string left; node[i]["left"] >> left; YAML::Node rights = node[i]["right"]; - for (unsigned i = 0; i < rights.size(); i++) { + for (unsigned i = 0; i < rights.size(); i++) + { std::string right; - if (rights[i].Type() == YAML::NodeType::Map) { + if (rights[i].Type() == YAML::NodeType::Map) + { rights[i].begin()->first >> right; } - else { + else + { rights[i] >> right; } - connect.insert({left, right}); + connect.insert({ left, right }); } } } -void operator>>(const YAML::Node & node, std::vector & filters) +void operator>>(const YAML::Node& node, std::vector& filters) { - for (unsigned i = 0; i < node.size(); i++) { + for (unsigned i = 0; i < node.size(); i++) + { std::string left; node[i]["left"] >> left; YAML::Node rights = node[i]["right"]; - for (unsigned i = 0; i < rights.size(); i++) { - if (rights[i].Type() == YAML::NodeType::Map) { + for (unsigned i = 0; i < rights.size(); i++) + { + if (rights[i].Type() == YAML::NodeType::Map) + { ParamManager::FilterRawData filter; filter.input = left; rights[i].begin()->first >> filter.output; @@ -166,12 +167,12 @@ void operator>>(const YAML::Node& node, bool& val) val = node.as(); } -void operator>>(const YAML::Node & node, int & val) +void operator>>(const YAML::Node& node, int& val) { val = node.as(); } -void operator>>(const YAML::Node & node, float & val) +void operator>>(const YAML::Node& node, float& val) { val = node.as(); } @@ -218,12 +219,9 @@ void ParamManager::print() const // Pring Common Info slog::info << "Common:" << slog::endl; slog::info << "\tcamera_topic: " << common_.camera_topic << slog::endl; - slog::info << "\tcustom_cpu_library: " << common_.custom_cpu_library - << slog::endl; - slog::info << "\tcustom_cldnn_library: " << common_.custom_cldnn_library - << slog::endl; - slog::info << "\tenable_performance_count: " - << common_.enable_performance_count << slog::endl; + slog::info << "\tcustom_cpu_library: " << common_.custom_cpu_library << slog::endl; + slog::info << "\tcustom_cldnn_library: " << common_.custom_cldnn_library << slog::endl; + slog::info << "\tenable_performance_count: " << common_.enable_performance_count << slog::endl; } void ParamManager::parse(std::string path) @@ -251,8 +249,7 @@ std::vector ParamManager::getPipelineNames() const return names; } -ParamManager::PipelineRawData ParamManager::getPipeline( - const std::string& name) const +ParamManager::PipelineRawData ParamManager::getPipeline(const std::string& name) const { for (auto& p : pipelines_) { @@ -263,4 +260,4 @@ ParamManager::PipelineRawData ParamManager::getPipeline( } throw std::logic_error("No parameters found for pipeline [" + name + "]"); } -} // namespace Params \ No newline at end of file +} // namespace Params \ No newline at end of file From 7f175e35f2d1d1ed5427bf482a412ff656c215cf Mon Sep 17 00:00:00 2001 From: Corsair-cxs <1494089474@qq.com> Date: Mon, 22 Feb 2021 22:15:58 +0800 Subject: [PATCH 41/63] Fix some bugs --- README.md | 4 ++-- .../dynamic_vino_lib/inferences/face_detection.h | 1 + .../inferences/person_reidentification.h | 1 + .../include/dynamic_vino_lib/inputs/image_topic.h | 10 +++++----- .../dynamic_vino_lib/inputs/realsense_camera_topic.h | 1 + .../include/dynamic_vino_lib/outputs/base_output.h | 4 ++-- .../dynamic_vino_lib/outputs/ros_service_output.h | 5 +++-- dynamic_vino_lib/include/dynamic_vino_lib/pipeline.h | 1 - dynamic_vino_lib/src/inputs/image_topic.cpp | 2 +- dynamic_vino_lib/src/outputs/ros_service_output.cpp | 4 ++-- .../src/services/frame_processing_server.cpp | 3 ++- sample/src/image_object_client.cpp | 6 +++--- sample/src/image_object_server.cpp | 2 +- 13 files changed, 24 insertions(+), 20 deletions(-) diff --git a/README.md b/README.md index e6a40aba..e0056607 100644 --- a/README.md +++ b/README.md @@ -89,9 +89,9 @@ Currently, the inference feature list is supported: ### Service - Object Detection Service: -```/detect_object``` ([object_msgs::DetectObjectSrv](https://github.com/intel/object_msgs/blob/master/srv/DetectObjectSrv.srv)) +```/detect_object``` ([object_msgs::DetectObject](https://github.com/intel/object_msgs/blob/master/srv/DetectObject.srv)) - Face Detection Service: -```/detect_face``` ([object_msgs::DetectObjectSrv](https://github.com/intel/object_msgs/blob/master/srv/DetectObjectSrv.srv)) +```/detect_face``` ([object_msgs::DetectObject](https://github.com/intel/object_msgs/blob/master/srv/DetectObject.srv)) - Age & Gender Detection Service: ```/detect_age_gender``` ([people_msgs::AgeGender](https://github.com/intel/ros_openvino_toolkit/blob/master/people_msgs/srv/AgeGenderSrv.srv)) - Headpose Detection Service: diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/face_detection.h b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/face_detection.h index b63ee76a..c24a64db 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/face_detection.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/face_detection.h @@ -32,6 +32,7 @@ #include "dynamic_vino_lib/engines/engine.h" #include "dynamic_vino_lib/inferences/base_inference.h" +#include "dynamic_vino_lib/inferences/object_detection.h" #include "dynamic_vino_lib/models/face_detection_model.h" #include "inference_engine.hpp" #include "opencv2/opencv.hpp" diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/person_reidentification.h b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/person_reidentification.h index 051bd4f0..1caa9990 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/person_reidentification.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/person_reidentification.h @@ -24,6 +24,7 @@ #include "dynamic_vino_lib/models/person_reidentification_model.h" #include "dynamic_vino_lib/engines/engine.h" #include "dynamic_vino_lib/inferences/base_inference.h" +#include "dynamic_vino_lib/inferences/base_reidentification.h" #include "inference_engine.hpp" #include "opencv2/opencv.hpp" // namespace diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/image_topic.h b/dynamic_vino_lib/include/dynamic_vino_lib/inputs/image_topic.h index 28d4e405..a09cee76 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/image_topic.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inputs/image_topic.h @@ -20,12 +20,12 @@ #ifndef DYNAMIC_VINO_LIB__INPUTS__IMAGE_TOPIC_HPP_ #define DYNAMIC_VINO_LIB__INPUTS__IMAGE_TOPIC_HPP_ -#include +#include #include -#include +#include #include #include "dynamic_vino_lib/utils/mutex_counter.hpp" -#include "dynamic_vino_lib/inputs/base_input.hpp" +#include "dynamic_vino_lib/inputs/base_input.h" namespace Input { @@ -36,7 +36,7 @@ namespace Input class ImageTopic : public BaseInputDevice { public: - ImageTopic(rclcpp::Node::SharedPtr node = nullptr); + ImageTopic(boost::shared_ptr node = nullptr); bool initialize() override; bool initialize(size_t width, size_t height) override; bool read(cv::Mat* frame) override; @@ -46,7 +46,7 @@ class ImageTopic : public BaseInputDevice image_transport::Subscriber sub_; cv::Mat image_; MutexCounter image_count_; - rclcpp::Node::SharedPtr node_ = nullptr; + boost::shared_ptr node_ = nullptr; void cb(const sensor_msgs::ImageConstPtr& image_msg); }; diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/realsense_camera_topic.h b/dynamic_vino_lib/include/dynamic_vino_lib/inputs/realsense_camera_topic.h index e5d8ad0d..98d5ea6e 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/realsense_camera_topic.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inputs/realsense_camera_topic.h @@ -31,6 +31,7 @@ #include #include "dynamic_vino_lib/inputs/base_input.h" +#include "dynamic_vino_lib/inputs/image_topic.h" namespace Input { diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/base_output.h b/dynamic_vino_lib/include/dynamic_vino_lib/outputs/base_output.h index ab5f0d2a..8b5e2304 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/base_output.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/outputs/base_output.h @@ -141,10 +141,10 @@ class BaseOutput virtual void handleOutput() = 0; void setPipeline(Pipeline* const pipeline); - virtual void setServiceResponse(boost::shared_ptr response) + virtual void setServiceResponse(boost::shared_ptr response) { } - virtual void setServiceResponseForFace(boost::shared_ptr response) + virtual void setServiceResponseForFace(boost::shared_ptr response) { } virtual void setServiceResponse(boost::shared_ptr response) diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/ros_service_output.h b/dynamic_vino_lib/include/dynamic_vino_lib/outputs/ros_service_output.h index d65e9e06..cb0c0de7 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/ros_service_output.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/outputs/ros_service_output.h @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include @@ -73,8 +74,8 @@ class RosServiceOutput : public RosTopicOutput } void clearData(); - void setServiceResponse(boost::shared_ptr response); - void setResponseForFace(boost::shared_ptr response); + void setServiceResponse(boost::shared_ptr response); + void setResponseForFace(boost::shared_ptr response); void setServiceResponse(boost::shared_ptr response); void setServiceResponse(boost::shared_ptr response); void setServiceResponse(boost::shared_ptr response); diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/pipeline.h b/dynamic_vino_lib/include/dynamic_vino_lib/pipeline.h index 0120e7c6..f9597d6a 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/pipeline.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/pipeline.h @@ -172,7 +172,6 @@ class Pipeline std::multimap next_; std::map> name_to_detection_map_; std::map> name_to_output_map_; - int total_inference_ = 0; std::set output_names_; int width_ = 0; int height_ = 0; diff --git a/dynamic_vino_lib/src/inputs/image_topic.cpp b/dynamic_vino_lib/src/inputs/image_topic.cpp index 8c4562cf..4ad2515e 100644 --- a/dynamic_vino_lib/src/inputs/image_topic.cpp +++ b/dynamic_vino_lib/src/inputs/image_topic.cpp @@ -27,7 +27,7 @@ #define INPUT_TOPIC "/camera/color/image_raw" -Input::ImageTopic::ImageTopic(rclcpp::Node::SharedPtr node) : node_(node) +Input::ImageTopic::ImageTopic(boost::shared_ptr node) : node_(node) { } diff --git a/dynamic_vino_lib/src/outputs/ros_service_output.cpp b/dynamic_vino_lib/src/outputs/ros_service_output.cpp index b23dcd60..414b8c8d 100644 --- a/dynamic_vino_lib/src/outputs/ros_service_output.cpp +++ b/dynamic_vino_lib/src/outputs/ros_service_output.cpp @@ -24,7 +24,7 @@ #include "cv_bridge/cv_bridge.h" #include -void Outputs::RosServiceOutput::setServiceResponse(boost::shared_ptr response) +void Outputs::RosServiceOutput::setServiceResponse(boost::shared_ptr response) { if (detected_objects_topic_ != nullptr && detected_objects_topic_->objects_vector.size() > 0) { @@ -40,7 +40,7 @@ void Outputs::RosServiceOutput::setServiceResponse(boost::shared_ptr response) +void Outputs::RosServiceOutput::setResponseForFace(boost::shared_ptr response) { if (faces_topic_ != nullptr && faces_topic_->objects_vector.size() > 0) { diff --git a/dynamic_vino_lib/src/services/frame_processing_server.cpp b/dynamic_vino_lib/src/services/frame_processing_server.cpp index 08a1aca5..857d4ce6 100644 --- a/dynamic_vino_lib/src/services/frame_processing_server.cpp +++ b/dynamic_vino_lib/src/services/frame_processing_server.cpp @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include @@ -98,7 +99,7 @@ bool FrameProcessingServer::cbService(ros::ServiceEvent; +template class FrameProcessingServer; template class FrameProcessingServer; template class FrameProcessingServer; template class FrameProcessingServer; diff --git a/sample/src/image_object_client.cpp b/sample/src/image_object_client.cpp index c3a09957..2653620d 100644 --- a/sample/src/image_object_client.cpp +++ b/sample/src/image_object_client.cpp @@ -24,7 +24,7 @@ #include #include "opencv2/opencv.hpp" -#include +#include int main(int argc, char** argv) { @@ -40,9 +40,9 @@ int main(int argc, char** argv) } std::string image_path = argv[1]; - ros::ServiceClient client = n.serviceClient("/openvino_toolkit/service"); + ros::ServiceClient client = n.serviceClient("/openvino_toolkit/service"); - object_msgs::DetectObjectSrv srv; + object_msgs::DetectObject srv; srv.request.image_path = image_path; if (client.call(srv)) diff --git a/sample/src/image_object_server.cpp b/sample/src/image_object_server.cpp index feca1288..ed31b2b7 100644 --- a/sample/src/image_object_server.cpp +++ b/sample/src/image_object_server.cpp @@ -87,7 +87,7 @@ int main(int argc, char** argv) // ----- Parsing and validation of input args----------------------- auto node = - std::make_shared>(service_name, FLAGS_config); + std::make_shared>(service_name, FLAGS_config); slog::info << "Waiting for service request..." << slog::endl; ros::spin(); From 1c13e228376f48f457f71cf442cd824a6cddfd2b Mon Sep 17 00:00:00 2001 From: Corsair-cxs <1494089474@qq.com> Date: Tue, 23 Feb 2021 00:51:18 +0800 Subject: [PATCH 42/63] Add missing header mutex_counter.hpp --- .../dynamic_vino_lib/utils/mutex_counter.hpp | 57 +++++++++++++++++++ 1 file changed, 57 insertions(+) create mode 100644 dynamic_vino_lib/include/dynamic_vino_lib/utils/mutex_counter.hpp diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/utils/mutex_counter.hpp b/dynamic_vino_lib/include/dynamic_vino_lib/utils/mutex_counter.hpp new file mode 100644 index 00000000..5409b754 --- /dev/null +++ b/dynamic_vino_lib/include/dynamic_vino_lib/utils/mutex_counter.hpp @@ -0,0 +1,57 @@ +// Copyright (c) 2018-2019 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +// @brief a utility class for mutex counter (Thread Safe). +// @file mutex_counter.hpp +// + +#ifndef DYNAMIC_VINO_LIB__UTILS__MUTEX_COUNTER_HPP_ +#define DYNAMIC_VINO_LIB__UTILS__MUTEX_COUNTER_HPP_ + +#include + +class MutexCounter +{ +public: + explicit MutexCounter(int init_counter = 0) + { + std::lock_guard lk(counter_mutex_); + counter_ = init_counter; + } + + void increaseCounter() + { + std::lock_guard lk(counter_mutex_); + ++counter_; + } + + void decreaseCounter() + { + std::lock_guard lk(counter_mutex_); + --counter_; + } + + int get() + { + return counter_; + } + +private: + std::atomic counter_; + std::mutex counter_mutex_; + std::condition_variable cv_; +}; + +#endif // DYNAMIC_VINO_LIB__UTILS__MUTEX_COUNTER_HPP_ \ No newline at end of file From a74667a8bda54f1c0ccdb5445caf3f64e9c12946 Mon Sep 17 00:00:00 2001 From: Corsair-cxs <1494089474@qq.com> Date: Thu, 25 Feb 2021 13:47:50 +0800 Subject: [PATCH 43/63] Modified image_topic to fix ROS 1 --- dynamic_vino_lib/CMakeLists.txt | 10 ++++---- .../dynamic_vino_lib/inputs/image_topic.h | 13 +++++----- .../dynamic_vino_lib/utils/mutex_counter.hpp | 1 + dynamic_vino_lib/src/inputs/image_topic.cpp | 25 ++++++------------- 4 files changed, 21 insertions(+), 28 deletions(-) diff --git a/dynamic_vino_lib/CMakeLists.txt b/dynamic_vino_lib/CMakeLists.txt index f0acaec8..f9c17ae4 100644 --- a/dynamic_vino_lib/CMakeLists.txt +++ b/dynamic_vino_lib/CMakeLists.txt @@ -188,11 +188,11 @@ add_library(${PROJECT_NAME} SHARED src/outputs/ros_service_output.cpp ) -#add_dependencies(${PROJECT_NAME} -# ${${PROJECT_NAME}_EXPORTED_TARGETS} -# ${catkin_EXPORTED_TARGETS} -# "object_msgs" -#) +add_dependencies(${PROJECT_NAME} + ${${PROJECT_NAME}_EXPORTED_TARGETS} + ${catkin_EXPORTED_TARGETS} + "object_msgs" +) target_link_libraries(${PROJECT_NAME} ${catkin_LIBRARIES} diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/image_topic.h b/dynamic_vino_lib/include/dynamic_vino_lib/inputs/image_topic.h index a09cee76..97e15a54 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/image_topic.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inputs/image_topic.h @@ -20,12 +20,15 @@ #ifndef DYNAMIC_VINO_LIB__INPUTS__IMAGE_TOPIC_HPP_ #define DYNAMIC_VINO_LIB__INPUTS__IMAGE_TOPIC_HPP_ -#include +#include "dynamic_vino_lib/inputs/base_input.h" +#include "dynamic_vino_lib/utils/mutex_counter.hpp" +#include +#include +#include +#include #include +#include #include -#include -#include "dynamic_vino_lib/utils/mutex_counter.hpp" -#include "dynamic_vino_lib/inputs/base_input.h" namespace Input { @@ -36,7 +39,6 @@ namespace Input class ImageTopic : public BaseInputDevice { public: - ImageTopic(boost::shared_ptr node = nullptr); bool initialize() override; bool initialize(size_t width, size_t height) override; bool read(cv::Mat* frame) override; @@ -46,7 +48,6 @@ class ImageTopic : public BaseInputDevice image_transport::Subscriber sub_; cv::Mat image_; MutexCounter image_count_; - boost::shared_ptr node_ = nullptr; void cb(const sensor_msgs::ImageConstPtr& image_msg); }; diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/utils/mutex_counter.hpp b/dynamic_vino_lib/include/dynamic_vino_lib/utils/mutex_counter.hpp index 5409b754..e1735576 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/utils/mutex_counter.hpp +++ b/dynamic_vino_lib/include/dynamic_vino_lib/utils/mutex_counter.hpp @@ -20,6 +20,7 @@ #ifndef DYNAMIC_VINO_LIB__UTILS__MUTEX_COUNTER_HPP_ #define DYNAMIC_VINO_LIB__UTILS__MUTEX_COUNTER_HPP_ +#include #include class MutexCounter diff --git a/dynamic_vino_lib/src/inputs/image_topic.cpp b/dynamic_vino_lib/src/inputs/image_topic.cpp index 4ad2515e..d6d02613 100644 --- a/dynamic_vino_lib/src/inputs/image_topic.cpp +++ b/dynamic_vino_lib/src/inputs/image_topic.cpp @@ -20,36 +20,26 @@ */ #include "dynamic_vino_lib/inputs/realsense_camera_topic.h" -//#include #include "dynamic_vino_lib/slog.h" #include +#include #include #define INPUT_TOPIC "/camera/color/image_raw" -Input::ImageTopic::ImageTopic(boost::shared_ptr node) : node_(node) -{ -} - bool Input::ImageTopic::initialize() { - slog::debug << "before Image Topic init" << slog::endl; - - if (node_ == nullptr) - { - throw std::runtime_error("Image Topic is not instancialized because of no parent node."); - return false; - } - auto qos = rclcpp::QoS(rclcpp::KeepLast(1)).best_effort(); - sub_ = node_->create_subscription(INPUT_TOPIC, qos, - std::bind(&ImageTopic::cb, this, std::placeholders::_1)); + slog::info << "before Image Topic init" << slog::endl; + std::shared_ptr it = std::make_shared(nh_); + sub_ = it->subscribe((INPUT_TOPIC, 1, &ImageTopic::cb, this)); return true; } bool Input::ImageTopic::initialize(size_t width, size_t height) { - slog::warn << "BE CAREFUL: nothing for resolution is done when calling initialize(width, height)" + slog::warn << "BE CAREFUL: nothing for resolution is done when calling " + "initialize(width, height)" << " for Image Topic" << slog::endl; return initialize(); } @@ -69,6 +59,7 @@ void Input::ImageTopic::cb(const sensor_msgs::msg::Image::SharedPtr image_msg) bool Input::ImageTopic::read(cv::Mat* frame) { + ros::spinOnce(); if (image_count_.get() < 0 || image_.empty()) { slog::debug << "No data received in CameraTopic instance" << slog::endl; @@ -76,7 +67,7 @@ bool Input::ImageTopic::read(cv::Mat* frame) } *frame = image_; - lockHeader(); + // lockHeader(); image_count_.decreaseCounter(); return true; } From ee95b9c6810cc4e22768d96fb97d71054acfa19c Mon Sep 17 00:00:00 2001 From: Corsair-cxs <1494089474@qq.com> Date: Thu, 25 Feb 2021 16:20:18 +0800 Subject: [PATCH 44/63] Fixed some bugs --- .../include/dynamic_vino_lib/outputs/image_window_output.h | 2 -- dynamic_vino_lib/src/services/frame_processing_server.cpp | 3 ++- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/image_window_output.h b/dynamic_vino_lib/include/dynamic_vino_lib/outputs/image_window_output.h index e8819d9d..f90dfc9d 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/image_window_output.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/outputs/image_window_output.h @@ -148,8 +148,6 @@ class ImageWindowOutput : public BaseOutput */ cv::Mat getRotationTransform(double yaw, double pitch, double roll); - void mergeMask(const std::vector&); - struct OutputData { std::string desc; diff --git a/dynamic_vino_lib/src/services/frame_processing_server.cpp b/dynamic_vino_lib/src/services/frame_processing_server.cpp index 857d4ce6..cad10c4f 100644 --- a/dynamic_vino_lib/src/services/frame_processing_server.cpp +++ b/dynamic_vino_lib/src/services/frame_processing_server.cpp @@ -12,7 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "dynamic_vino_lib/services/frame_processing_server.h" #include #include #include @@ -22,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -32,6 +32,7 @@ #include "dynamic_vino_lib/pipeline.h" #include "dynamic_vino_lib/inputs/base_input.h" #include "dynamic_vino_lib/inputs/image_input.h" +#include "dynamic_vino_lib/services/frame_processing_server.h" #include "dynamic_vino_lib/slog.h" namespace vino_service From a11dda3171ddf0d9050a24f900aa0156c416bc64 Mon Sep 17 00:00:00 2001 From: caohuiyan <77640335@qq.com> Date: Thu, 25 Feb 2021 19:07:36 +0800 Subject: [PATCH 45/63] BugFix --- .../include/dynamic_vino_lib/outputs/base_output.h | 12 ++++++++++-- .../src/services/frame_processing_server.cpp | 2 +- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/base_output.h b/dynamic_vino_lib/include/dynamic_vino_lib/outputs/base_output.h index 8b5e2304..499d5917 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/base_output.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/outputs/base_output.h @@ -40,6 +40,14 @@ #include "dynamic_vino_lib/services/frame_processing_server.h" #include "opencv2/opencv.hpp" +#include +#include +#include +#include +#include +#include +#include + class Pipeline; namespace Outputs { @@ -141,10 +149,10 @@ class BaseOutput virtual void handleOutput() = 0; void setPipeline(Pipeline* const pipeline); - virtual void setServiceResponse(boost::shared_ptr response) + virtual void setServiceResponse(boost::shared_ptr response) { } - virtual void setServiceResponseForFace(boost::shared_ptr response) + virtual void setServiceResponseForFace(boost::shared_ptr response) { } virtual void setServiceResponse(boost::shared_ptr response) diff --git a/dynamic_vino_lib/src/services/frame_processing_server.cpp b/dynamic_vino_lib/src/services/frame_processing_server.cpp index 857d4ce6..8f354716 100644 --- a/dynamic_vino_lib/src/services/frame_processing_server.cpp +++ b/dynamic_vino_lib/src/services/frame_processing_server.cpp @@ -44,7 +44,7 @@ FrameProcessingServer::FrameProcessingServer(const std::string& service_name, initService(); } -template +template void FrameProcessingServer::initService() { std::cout << "!!!!" << config_path_ << std::endl; From 2374622b2c5387812099fff97eed558f5dff816f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=99=88=E6=98=BE=E6=A3=AE?= <1494089474@qq.com> Date: Fri, 26 Feb 2021 13:45:35 +0800 Subject: [PATCH 46/63] Update image_topic.cpp --- dynamic_vino_lib/src/inputs/image_topic.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/dynamic_vino_lib/src/inputs/image_topic.cpp b/dynamic_vino_lib/src/inputs/image_topic.cpp index d6d02613..f24bdc0f 100644 --- a/dynamic_vino_lib/src/inputs/image_topic.cpp +++ b/dynamic_vino_lib/src/inputs/image_topic.cpp @@ -59,7 +59,6 @@ void Input::ImageTopic::cb(const sensor_msgs::msg::Image::SharedPtr image_msg) bool Input::ImageTopic::read(cv::Mat* frame) { - ros::spinOnce(); if (image_count_.get() < 0 || image_.empty()) { slog::debug << "No data received in CameraTopic instance" << slog::endl; From e20504be7288cec29788938e919e77c559985786 Mon Sep 17 00:00:00 2001 From: Corsair-cxs <1494089474@qq.com> Date: Fri, 26 Feb 2021 13:54:04 +0800 Subject: [PATCH 47/63] Fixed some bugs --- .../include/dynamic_vino_lib/outputs/base_output.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/base_output.h b/dynamic_vino_lib/include/dynamic_vino_lib/outputs/base_output.h index 8b5e2304..d1c8e63a 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/base_output.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/outputs/base_output.h @@ -52,6 +52,9 @@ namespace Outputs class BaseOutput { public: + explicit BaseOutput() + { + } explicit BaseOutput(std::string output_name) : output_name_(output_name) { } From e128781c85ae1e43710713258877e5f1ccfbf974 Mon Sep 17 00:00:00 2001 From: Corsair-cxs <1494089474@qq.com> Date: Sat, 27 Feb 2021 17:09:19 +0800 Subject: [PATCH 48/63] Modified image_object_client.cpp: image_path --> image_paths --- sample/src/image_object_client.cpp | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/sample/src/image_object_client.cpp b/sample/src/image_object_client.cpp index 2653620d..3b408dbe 100644 --- a/sample/src/image_object_client.cpp +++ b/sample/src/image_object_client.cpp @@ -43,7 +43,7 @@ int main(int argc, char** argv) ros::ServiceClient client = n.serviceClient("/openvino_toolkit/service"); object_msgs::DetectObject srv; - srv.request.image_path = image_path; + srv.request.image_paths = image_path; if (client.call(srv)) { @@ -60,7 +60,8 @@ int main(int argc, char** argv) << srv.response.objects[0].objects_vector[i].object.probability * 100 << "%"; ROS_INFO("%d: object: %s", i, srv.response.objects[0].objects_vector[i].object.object_name.c_str()); ROS_INFO("prob: %f", srv.response.objects[0].objects_vector[i].object.probability); - ROS_INFO("location: (%d, %d, %d, %d)", srv.response.objects[0].objects_vector[i].roi.x_offset, + ROS_INFO("location: (%d, %d, %d, %d)", + srv.response.objects[0].objects_vector[i].roi.x_offset, srv.response.objects[0].objects_vector[i].roi.y_offset, srv.response.objects[0].objects_vector[i].roi.width, srv.response.objects[0].objects_vector[i].roi.height); From 52fb7ca51e1316a520b90a519156e8c33b8ba117 Mon Sep 17 00:00:00 2001 From: Corsair-cxs <1494089474@qq.com> Date: Sun, 28 Feb 2021 15:10:57 +0800 Subject: [PATCH 49/63] Dependency optimized --- .../services/frame_processing_server.h | 1 + .../src/services/frame_processing_server.cpp | 12 ++++-------- 2 files changed, 5 insertions(+), 8 deletions(-) diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/services/frame_processing_server.h b/dynamic_vino_lib/include/dynamic_vino_lib/services/frame_processing_server.h index 6924ebec..2a76a0dd 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/services/frame_processing_server.h +++ b/dynamic_vino_lib/include/dynamic_vino_lib/services/frame_processing_server.h @@ -28,6 +28,7 @@ #include #include #include +#include #include #include #include diff --git a/dynamic_vino_lib/src/services/frame_processing_server.cpp b/dynamic_vino_lib/src/services/frame_processing_server.cpp index 010aa3ad..c8e2bfc9 100644 --- a/dynamic_vino_lib/src/services/frame_processing_server.cpp +++ b/dynamic_vino_lib/src/services/frame_processing_server.cpp @@ -12,13 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include -#include -#include -#include -#include -#include -#include #include #include #include @@ -28,11 +21,12 @@ #include #include +#include "dynamic_vino_lib/services/frame_processing_server.h" + #include "dynamic_vino_lib/pipeline_manager.h" #include "dynamic_vino_lib/pipeline.h" #include "dynamic_vino_lib/inputs/base_input.h" #include "dynamic_vino_lib/inputs/image_input.h" -#include "dynamic_vino_lib/services/frame_processing_server.h" #include "dynamic_vino_lib/slog.h" namespace vino_service @@ -79,9 +73,11 @@ bool FrameProcessingServer::cbService(ros::ServiceEventsecond.params.name.c_str()]; auto input = p.pipeline->getInputDevice(); + Input::Config config; config.path = event.getRequest().image_path; input->config(config); + p.pipeline->runOnce(); auto output_handle = p.pipeline->getOutputHandle(); From 6d41656a5a7b23d493f1647767065e999ecf4a07 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=99=88=E6=98=BE=E6=A3=AE?= <1494089474@qq.com> Date: Sun, 28 Feb 2021 15:31:21 +0800 Subject: [PATCH 50/63] Update image_object_client.cpp --- sample/src/image_object_client.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sample/src/image_object_client.cpp b/sample/src/image_object_client.cpp index 3b408dbe..37e4817c 100644 --- a/sample/src/image_object_client.cpp +++ b/sample/src/image_object_client.cpp @@ -43,7 +43,7 @@ int main(int argc, char** argv) ros::ServiceClient client = n.serviceClient("/openvino_toolkit/service"); object_msgs::DetectObject srv; - srv.request.image_paths = image_path; + srv.request.image_path = image_path; if (client.call(srv)) { From 7ddcd0d8f561c5e0c4b0bca026d8074afcf048cc Mon Sep 17 00:00:00 2001 From: Corsair-cxs <1494089474@qq.com> Date: Mon, 1 Mar 2021 21:26:54 +0800 Subject: [PATCH 51/63] add frame_processing_object_server.cpp --- dynamic_vino_lib/Doxyfile | 2427 ----------------- .../include/dynamic_vino_lib/common.h | 28 +- .../frame_processing_object_server.cpp | 103 + .../src/services/frame_processing_server.cpp | 1 - sample/src/image_object_server.cpp | 2 +- 5 files changed, 118 insertions(+), 2443 deletions(-) delete mode 100644 dynamic_vino_lib/Doxyfile create mode 100644 dynamic_vino_lib/src/services/frame_processing_object_server.cpp diff --git a/dynamic_vino_lib/Doxyfile b/dynamic_vino_lib/Doxyfile deleted file mode 100644 index 032afa3b..00000000 --- a/dynamic_vino_lib/Doxyfile +++ /dev/null @@ -1,2427 +0,0 @@ -# Doxyfile 1.8.11 - -# This file describes the settings to be used by the documentation system -# doxygen (www.doxygen.org) for a project. -# -# All text after a double hash (##) is considered a comment and is placed in -# front of the TAG it is preceding. -# -# All text after a single hash (#) is considered a comment and will be ignored. -# The format is: -# TAG = value [value, ...] -# For lists, items can also be appended using: -# TAG += value [value, ...] -# Values that contain spaces should be placed between quotes (\" \"). - -#--------------------------------------------------------------------------- -# Project related configuration options -#--------------------------------------------------------------------------- - -# This tag specifies the encoding used for all characters in the config file -# that follow. The default is UTF-8 which is also the encoding used for all text -# before the first occurrence of this tag. Doxygen uses libiconv (or the iconv -# built into libc) for the transcoding. See http://www.gnu.org/software/libiconv -# for the list of possible encodings. -# The default value is: UTF-8. - -DOXYFILE_ENCODING = UTF-8 - -# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by -# double-quotes, unless you are using Doxywizard) that should identify the -# project for which the documentation is generated. This name is used in the -# title of most generated pages and in a few other places. -# The default value is: My Project. - -PROJECT_NAME = "My Project" - -# The PROJECT_NUMBER tag can be used to enter a project or revision number. This -# could be handy for archiving the generated documentation or if some version -# control system is used. - -PROJECT_NUMBER = - -# Using the PROJECT_BRIEF tag one can provide an optional one line description -# for a project that appears at the top of each page and should give viewer a -# quick idea about the purpose of the project. Keep the description short. - -PROJECT_BRIEF = - -# With the PROJECT_LOGO tag one can specify a logo or an icon that is included -# in the documentation. The maximum height of the logo should not exceed 55 -# pixels and the maximum width should not exceed 200 pixels. Doxygen will copy -# the logo to the output directory. - -PROJECT_LOGO = - -# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path -# into which the generated documentation will be written. If a relative path is -# entered, it will be relative to the location where doxygen was started. If -# left blank the current directory will be used. - -OUTPUT_DIRECTORY = - -# If the CREATE_SUBDIRS tag is set to YES then doxygen will create 4096 sub- -# directories (in 2 levels) under the output directory of each output format and -# will distribute the generated files over these directories. Enabling this -# option can be useful when feeding doxygen a huge amount of source files, where -# putting all generated files in the same directory would otherwise causes -# performance problems for the file system. -# The default value is: NO. - -CREATE_SUBDIRS = NO - -# If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII -# characters to appear in the names of generated files. If set to NO, non-ASCII -# characters will be escaped, for example _xE3_x81_x84 will be used for Unicode -# U+3044. -# The default value is: NO. - -ALLOW_UNICODE_NAMES = NO - -# The OUTPUT_LANGUAGE tag is used to specify the language in which all -# documentation generated by doxygen is written. Doxygen will use this -# information to generate all constant output in the proper language. -# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese, -# Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States), -# Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian, -# Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages), -# Korean, Korean-en (Korean with English messages), Latvian, Lithuanian, -# Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian, -# Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish, -# Ukrainian and Vietnamese. -# The default value is: English. - -OUTPUT_LANGUAGE = English - -# If the BRIEF_MEMBER_DESC tag is set to YES, doxygen will include brief member -# descriptions after the members that are listed in the file and class -# documentation (similar to Javadoc). Set to NO to disable this. -# The default value is: YES. - -BRIEF_MEMBER_DESC = YES - -# If the REPEAT_BRIEF tag is set to YES, doxygen will prepend the brief -# description of a member or function before the detailed description -# -# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the -# brief descriptions will be completely suppressed. -# The default value is: YES. - -REPEAT_BRIEF = YES - -# This tag implements a quasi-intelligent brief description abbreviator that is -# used to form the text in various listings. Each string in this list, if found -# as the leading text of the brief description, will be stripped from the text -# and the result, after processing the whole list, is used as the annotated -# text. Otherwise, the brief description is used as-is. If left blank, the -# following values are used ($name is automatically replaced with the name of -# the entity):The $name class, The $name widget, The $name file, is, provides, -# specifies, contains, represents, a, an and the. - -ABBREVIATE_BRIEF = - -# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then -# doxygen will generate a detailed section even if there is only a brief -# description. -# The default value is: NO. - -ALWAYS_DETAILED_SEC = NO - -# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all -# inherited members of a class in the documentation of that class as if those -# members were ordinary class members. Constructors, destructors and assignment -# operators of the base classes will not be shown. -# The default value is: NO. - -INLINE_INHERITED_MEMB = NO - -# If the FULL_PATH_NAMES tag is set to YES, doxygen will prepend the full path -# before files name in the file list and in the header files. If set to NO the -# shortest path that makes the file name unique will be used -# The default value is: YES. - -FULL_PATH_NAMES = YES - -# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path. -# Stripping is only done if one of the specified strings matches the left-hand -# part of the path. The tag can be used to show relative paths in the file list. -# If left blank the directory from which doxygen is run is used as the path to -# strip. -# -# Note that you can specify absolute paths here, but also relative paths, which -# will be relative from the directory where doxygen is started. -# This tag requires that the tag FULL_PATH_NAMES is set to YES. - -STRIP_FROM_PATH = - -# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the -# path mentioned in the documentation of a class, which tells the reader which -# header file to include in order to use a class. If left blank only the name of -# the header file containing the class definition is used. Otherwise one should -# specify the list of include paths that are normally passed to the compiler -# using the -I flag. - -STRIP_FROM_INC_PATH = - -# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but -# less readable) file names. This can be useful is your file systems doesn't -# support long names like on DOS, Mac, or CD-ROM. -# The default value is: NO. - -SHORT_NAMES = NO - -# If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the -# first line (until the first dot) of a Javadoc-style comment as the brief -# description. If set to NO, the Javadoc-style will behave just like regular Qt- -# style comments (thus requiring an explicit @brief command for a brief -# description.) -# The default value is: NO. - -JAVADOC_AUTOBRIEF = NO - -# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first -# line (until the first dot) of a Qt-style comment as the brief description. If -# set to NO, the Qt-style will behave just like regular Qt-style comments (thus -# requiring an explicit \brief command for a brief description.) -# The default value is: NO. - -QT_AUTOBRIEF = NO - -# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a -# multi-line C++ special comment block (i.e. a block of //! or /// comments) as -# a brief description. This used to be the default behavior. The new default is -# to treat a multi-line C++ comment block as a detailed description. Set this -# tag to YES if you prefer the old behavior instead. -# -# Note that setting this tag to YES also means that rational rose comments are -# not recognized any more. -# The default value is: NO. - -MULTILINE_CPP_IS_BRIEF = NO - -# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the -# documentation from any documented member that it re-implements. -# The default value is: YES. - -INHERIT_DOCS = YES - -# If the SEPARATE_MEMBER_PAGES tag is set to YES then doxygen will produce a new -# page for each member. If set to NO, the documentation of a member will be part -# of the file/class/namespace that contains it. -# The default value is: NO. - -SEPARATE_MEMBER_PAGES = NO - -# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen -# uses this value to replace tabs by spaces in code fragments. -# Minimum value: 1, maximum value: 16, default value: 4. - -TAB_SIZE = 4 - -# This tag can be used to specify a number of aliases that act as commands in -# the documentation. An alias has the form: -# name=value -# For example adding -# "sideeffect=@par Side Effects:\n" -# will allow you to put the command \sideeffect (or @sideeffect) in the -# documentation, which will result in a user-defined paragraph with heading -# "Side Effects:". You can put \n's in the value part of an alias to insert -# newlines. - -ALIASES = - -# This tag can be used to specify a number of word-keyword mappings (TCL only). -# A mapping has the form "name=value". For example adding "class=itcl::class" -# will allow you to use the command class in the itcl::class meaning. - -TCL_SUBST = - -# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources -# only. Doxygen will then generate output that is more tailored for C. For -# instance, some of the names that are used will be different. The list of all -# members will be omitted, etc. -# The default value is: NO. - -OPTIMIZE_OUTPUT_FOR_C = NO - -# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or -# Python sources only. Doxygen will then generate output that is more tailored -# for that language. For instance, namespaces will be presented as packages, -# qualified scopes will look different, etc. -# The default value is: NO. - -OPTIMIZE_OUTPUT_JAVA = NO - -# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran -# sources. Doxygen will then generate output that is tailored for Fortran. -# The default value is: NO. - -OPTIMIZE_FOR_FORTRAN = NO - -# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL -# sources. Doxygen will then generate output that is tailored for VHDL. -# The default value is: NO. - -OPTIMIZE_OUTPUT_VHDL = NO - -# Doxygen selects the parser to use depending on the extension of the files it -# parses. With this tag you can assign which parser to use for a given -# extension. Doxygen has a built-in mapping, but you can override or extend it -# using this tag. The format is ext=language, where ext is a file extension, and -# language is one of the parsers supported by doxygen: IDL, Java, Javascript, -# C#, C, C++, D, PHP, Objective-C, Python, Fortran (fixed format Fortran: -# FortranFixed, free formatted Fortran: FortranFree, unknown formatted Fortran: -# Fortran. In the later case the parser tries to guess whether the code is fixed -# or free formatted code, this is the default for Fortran type files), VHDL. For -# instance to make doxygen treat .inc files as Fortran files (default is PHP), -# and .f files as C (default is Fortran), use: inc=Fortran f=C. -# -# Note: For files without extension you can use no_extension as a placeholder. -# -# Note that for custom extensions you also need to set FILE_PATTERNS otherwise -# the files are not read by doxygen. - -EXTENSION_MAPPING = - -# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments -# according to the Markdown format, which allows for more readable -# documentation. See http://daringfireball.net/projects/markdown/ for details. -# The output of markdown processing is further processed by doxygen, so you can -# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in -# case of backward compatibilities issues. -# The default value is: YES. - -MARKDOWN_SUPPORT = YES - -# When enabled doxygen tries to link words that correspond to documented -# classes, or namespaces to their corresponding documentation. Such a link can -# be prevented in individual cases by putting a % sign in front of the word or -# globally by setting AUTOLINK_SUPPORT to NO. -# The default value is: YES. - -AUTOLINK_SUPPORT = YES - -# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want -# to include (a tag file for) the STL sources as input, then you should set this -# tag to YES in order to let doxygen match functions declarations and -# definitions whose arguments contain STL classes (e.g. func(std::string); -# versus func(std::string) {}). This also make the inheritance and collaboration -# diagrams that involve STL classes more complete and accurate. -# The default value is: NO. - -BUILTIN_STL_SUPPORT = NO - -# If you use Microsoft's C++/CLI language, you should set this option to YES to -# enable parsing support. -# The default value is: NO. - -CPP_CLI_SUPPORT = NO - -# Set the SIP_SUPPORT tag to YES if your project consists of sip (see: -# http://www.riverbankcomputing.co.uk/software/sip/intro) sources only. Doxygen -# will parse them like normal C++ but will assume all classes use public instead -# of private inheritance when no explicit protection keyword is present. -# The default value is: NO. - -SIP_SUPPORT = NO - -# For Microsoft's IDL there are propget and propput attributes to indicate -# getter and setter methods for a property. Setting this option to YES will make -# doxygen to replace the get and set methods by a property in the documentation. -# This will only work if the methods are indeed getting or setting a simple -# type. If this is not the case, or you want to show the methods anyway, you -# should set this option to NO. -# The default value is: YES. - -IDL_PROPERTY_SUPPORT = YES - -# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC -# tag is set to YES then doxygen will reuse the documentation of the first -# member in the group (if any) for the other members of the group. By default -# all members of a group must be documented explicitly. -# The default value is: NO. - -DISTRIBUTE_GROUP_DOC = NO - -# If one adds a struct or class to a group and this option is enabled, then also -# any nested class or struct is added to the same group. By default this option -# is disabled and one has to add nested compounds explicitly via \ingroup. -# The default value is: NO. - -GROUP_NESTED_COMPOUNDS = NO - -# Set the SUBGROUPING tag to YES to allow class member groups of the same type -# (for instance a group of public functions) to be put as a subgroup of that -# type (e.g. under the Public Functions section). Set it to NO to prevent -# subgrouping. Alternatively, this can be done per class using the -# \nosubgrouping command. -# The default value is: YES. - -SUBGROUPING = YES - -# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions -# are shown inside the group in which they are included (e.g. using \ingroup) -# instead of on a separate page (for HTML and Man pages) or section (for LaTeX -# and RTF). -# -# Note that this feature does not work in combination with -# SEPARATE_MEMBER_PAGES. -# The default value is: NO. - -INLINE_GROUPED_CLASSES = NO - -# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions -# with only public data fields or simple typedef fields will be shown inline in -# the documentation of the scope in which they are defined (i.e. file, -# namespace, or group documentation), provided this scope is documented. If set -# to NO, structs, classes, and unions are shown on a separate page (for HTML and -# Man pages) or section (for LaTeX and RTF). -# The default value is: NO. - -INLINE_SIMPLE_STRUCTS = NO - -# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or -# enum is documented as struct, union, or enum with the name of the typedef. So -# typedef struct TypeS {} TypeT, will appear in the documentation as a struct -# with name TypeT. When disabled the typedef will appear as a member of a file, -# namespace, or class. And the struct will be named TypeS. This can typically be -# useful for C code in case the coding convention dictates that all compound -# types are typedef'ed and only the typedef is referenced, never the tag name. -# The default value is: NO. - -TYPEDEF_HIDES_STRUCT = NO - -# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This -# cache is used to resolve symbols given their name and scope. Since this can be -# an expensive process and often the same symbol appears multiple times in the -# code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small -# doxygen will become slower. If the cache is too large, memory is wasted. The -# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range -# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536 -# symbols. At the end of a run doxygen will report the cache usage and suggest -# the optimal cache size from a speed point of view. -# Minimum value: 0, maximum value: 9, default value: 0. - -LOOKUP_CACHE_SIZE = 0 - -#--------------------------------------------------------------------------- -# Build related configuration options -#--------------------------------------------------------------------------- - -# If the EXTRACT_ALL tag is set to YES, doxygen will assume all entities in -# documentation are documented, even if no documentation was available. Private -# class members and static file members will be hidden unless the -# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES. -# Note: This will also disable the warnings about undocumented members that are -# normally produced when WARNINGS is set to YES. -# The default value is: NO. - -EXTRACT_ALL = YES - -# If the EXTRACT_PRIVATE tag is set to YES, all private members of a class will -# be included in the documentation. -# The default value is: NO. - -EXTRACT_PRIVATE = NO - -# If the EXTRACT_PACKAGE tag is set to YES, all members with package or internal -# scope will be included in the documentation. -# The default value is: NO. - -EXTRACT_PACKAGE = NO - -# If the EXTRACT_STATIC tag is set to YES, all static members of a file will be -# included in the documentation. -# The default value is: NO. - -EXTRACT_STATIC = NO - -# If the EXTRACT_LOCAL_CLASSES tag is set to YES, classes (and structs) defined -# locally in source files will be included in the documentation. If set to NO, -# only classes defined in header files are included. Does not have any effect -# for Java sources. -# The default value is: YES. - -EXTRACT_LOCAL_CLASSES = YES - -# This flag is only useful for Objective-C code. If set to YES, local methods, -# which are defined in the implementation section but not in the interface are -# included in the documentation. If set to NO, only methods in the interface are -# included. -# The default value is: NO. - -EXTRACT_LOCAL_METHODS = NO - -# If this flag is set to YES, the members of anonymous namespaces will be -# extracted and appear in the documentation as a namespace called -# 'anonymous_namespace{file}', where file will be replaced with the base name of -# the file that contains the anonymous namespace. By default anonymous namespace -# are hidden. -# The default value is: NO. - -EXTRACT_ANON_NSPACES = NO - -# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all -# undocumented members inside documented classes or files. If set to NO these -# members will be included in the various overviews, but no documentation -# section is generated. This option has no effect if EXTRACT_ALL is enabled. -# The default value is: NO. - -HIDE_UNDOC_MEMBERS = NO - -# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all -# undocumented classes that are normally visible in the class hierarchy. If set -# to NO, these classes will be included in the various overviews. This option -# has no effect if EXTRACT_ALL is enabled. -# The default value is: NO. - -HIDE_UNDOC_CLASSES = NO - -# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend -# (class|struct|union) declarations. If set to NO, these declarations will be -# included in the documentation. -# The default value is: NO. - -HIDE_FRIEND_COMPOUNDS = NO - -# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any -# documentation blocks found inside the body of a function. If set to NO, these -# blocks will be appended to the function's detailed documentation block. -# The default value is: NO. - -HIDE_IN_BODY_DOCS = NO - -# The INTERNAL_DOCS tag determines if documentation that is typed after a -# \internal command is included. If the tag is set to NO then the documentation -# will be excluded. Set it to YES to include the internal documentation. -# The default value is: NO. - -INTERNAL_DOCS = NO - -# If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file -# names in lower-case letters. If set to YES, upper-case letters are also -# allowed. This is useful if you have classes or files whose names only differ -# in case and if your file system supports case sensitive file names. Windows -# and Mac users are advised to set this option to NO. -# The default value is: system dependent. - -CASE_SENSE_NAMES = YES - -# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with -# their full class and namespace scopes in the documentation. If set to YES, the -# scope will be hidden. -# The default value is: NO. - -HIDE_SCOPE_NAMES = NO - -# If the HIDE_COMPOUND_REFERENCE tag is set to NO (default) then doxygen will -# append additional text to a page's title, such as Class Reference. If set to -# YES the compound reference will be hidden. -# The default value is: NO. - -HIDE_COMPOUND_REFERENCE= NO - -# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of -# the files that are included by a file in the documentation of that file. -# The default value is: YES. - -SHOW_INCLUDE_FILES = YES - -# If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each -# grouped member an include statement to the documentation, telling the reader -# which file to include in order to use the member. -# The default value is: NO. - -SHOW_GROUPED_MEMB_INC = NO - -# If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include -# files with double quotes in the documentation rather than with sharp brackets. -# The default value is: NO. - -FORCE_LOCAL_INCLUDES = NO - -# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the -# documentation for inline members. -# The default value is: YES. - -INLINE_INFO = YES - -# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the -# (detailed) documentation of file and class members alphabetically by member -# name. If set to NO, the members will appear in declaration order. -# The default value is: YES. - -SORT_MEMBER_DOCS = YES - -# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief -# descriptions of file, namespace and class members alphabetically by member -# name. If set to NO, the members will appear in declaration order. Note that -# this will also influence the order of the classes in the class list. -# The default value is: NO. - -SORT_BRIEF_DOCS = NO - -# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the -# (brief and detailed) documentation of class members so that constructors and -# destructors are listed first. If set to NO the constructors will appear in the -# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS. -# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief -# member documentation. -# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting -# detailed member documentation. -# The default value is: NO. - -SORT_MEMBERS_CTORS_1ST = NO - -# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy -# of group names into alphabetical order. If set to NO the group names will -# appear in their defined order. -# The default value is: NO. - -SORT_GROUP_NAMES = NO - -# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by -# fully-qualified names, including namespaces. If set to NO, the class list will -# be sorted only by class name, not including the namespace part. -# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. -# Note: This option applies only to the class list, not to the alphabetical -# list. -# The default value is: NO. - -SORT_BY_SCOPE_NAME = NO - -# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper -# type resolution of all parameters of a function it will reject a match between -# the prototype and the implementation of a member function even if there is -# only one candidate or it is obvious which candidate to choose by doing a -# simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still -# accept a match between prototype and implementation in such cases. -# The default value is: NO. - -STRICT_PROTO_MATCHING = NO - -# The GENERATE_TODOLIST tag can be used to enable (YES) or disable (NO) the todo -# list. This list is created by putting \todo commands in the documentation. -# The default value is: YES. - -GENERATE_TODOLIST = YES - -# The GENERATE_TESTLIST tag can be used to enable (YES) or disable (NO) the test -# list. This list is created by putting \test commands in the documentation. -# The default value is: YES. - -GENERATE_TESTLIST = YES - -# The GENERATE_BUGLIST tag can be used to enable (YES) or disable (NO) the bug -# list. This list is created by putting \bug commands in the documentation. -# The default value is: YES. - -GENERATE_BUGLIST = YES - -# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or disable (NO) -# the deprecated list. This list is created by putting \deprecated commands in -# the documentation. -# The default value is: YES. - -GENERATE_DEPRECATEDLIST= YES - -# The ENABLED_SECTIONS tag can be used to enable conditional documentation -# sections, marked by \if ... \endif and \cond -# ... \endcond blocks. - -ENABLED_SECTIONS = - -# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the -# initial value of a variable or macro / define can have for it to appear in the -# documentation. If the initializer consists of more lines than specified here -# it will be hidden. Use a value of 0 to hide initializers completely. The -# appearance of the value of individual variables and macros / defines can be -# controlled using \showinitializer or \hideinitializer command in the -# documentation regardless of this setting. -# Minimum value: 0, maximum value: 10000, default value: 30. - -MAX_INITIALIZER_LINES = 30 - -# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at -# the bottom of the documentation of classes and structs. If set to YES, the -# list will mention the files that were used to generate the documentation. -# The default value is: YES. - -SHOW_USED_FILES = YES - -# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This -# will remove the Files entry from the Quick Index and from the Folder Tree View -# (if specified). -# The default value is: YES. - -SHOW_FILES = YES - -# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces -# page. This will remove the Namespaces entry from the Quick Index and from the -# Folder Tree View (if specified). -# The default value is: YES. - -SHOW_NAMESPACES = YES - -# The FILE_VERSION_FILTER tag can be used to specify a program or script that -# doxygen should invoke to get the current version for each file (typically from -# the version control system). Doxygen will invoke the program by executing (via -# popen()) the command command input-file, where command is the value of the -# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided -# by doxygen. Whatever the program writes to standard output is used as the file -# version. For an example see the documentation. - -FILE_VERSION_FILTER = - -# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed -# by doxygen. The layout file controls the global structure of the generated -# output files in an output format independent way. To create the layout file -# that represents doxygen's defaults, run doxygen with the -l option. You can -# optionally specify a file name after the option, if omitted DoxygenLayout.xml -# will be used as the name of the layout file. -# -# Note that if you run doxygen from a directory containing a file called -# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE -# tag is left empty. - -LAYOUT_FILE = - -# The CITE_BIB_FILES tag can be used to specify one or more bib files containing -# the reference definitions. This must be a list of .bib files. The .bib -# extension is automatically appended if omitted. This requires the bibtex tool -# to be installed. See also http://en.wikipedia.org/wiki/BibTeX for more info. -# For LaTeX the style of the bibliography can be controlled using -# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the -# search path. See also \cite for info how to create references. - -CITE_BIB_FILES = - -#--------------------------------------------------------------------------- -# Configuration options related to warning and progress messages -#--------------------------------------------------------------------------- - -# The QUIET tag can be used to turn on/off the messages that are generated to -# standard output by doxygen. If QUIET is set to YES this implies that the -# messages are off. -# The default value is: NO. - -QUIET = NO - -# The WARNINGS tag can be used to turn on/off the warning messages that are -# generated to standard error (stderr) by doxygen. If WARNINGS is set to YES -# this implies that the warnings are on. -# -# Tip: Turn warnings on while writing the documentation. -# The default value is: YES. - -WARNINGS = YES - -# If the WARN_IF_UNDOCUMENTED tag is set to YES then doxygen will generate -# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag -# will automatically be disabled. -# The default value is: YES. - -WARN_IF_UNDOCUMENTED = YES - -# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for -# potential errors in the documentation, such as not documenting some parameters -# in a documented function, or documenting parameters that don't exist or using -# markup commands wrongly. -# The default value is: YES. - -WARN_IF_DOC_ERROR = YES - -# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that -# are documented, but have no documentation for their parameters or return -# value. If set to NO, doxygen will only warn about wrong or incomplete -# parameter documentation, but not about the absence of documentation. -# The default value is: NO. - -WARN_NO_PARAMDOC = NO - -# If the WARN_AS_ERROR tag is set to YES then doxygen will immediately stop when -# a warning is encountered. -# The default value is: NO. - -WARN_AS_ERROR = NO - -# The WARN_FORMAT tag determines the format of the warning messages that doxygen -# can produce. The string should contain the $file, $line, and $text tags, which -# will be replaced by the file and line number from which the warning originated -# and the warning text. Optionally the format may contain $version, which will -# be replaced by the version of the file (if it could be obtained via -# FILE_VERSION_FILTER) -# The default value is: $file:$line: $text. - -WARN_FORMAT = "$file:$line: $text" - -# The WARN_LOGFILE tag can be used to specify a file to which warning and error -# messages should be written. If left blank the output is written to standard -# error (stderr). - -WARN_LOGFILE = - -#--------------------------------------------------------------------------- -# Configuration options related to the input files -#--------------------------------------------------------------------------- - -# The INPUT tag is used to specify the files and/or directories that contain -# documented source files. You may enter file names like myfile.cpp or -# directories like /usr/src/myproject. Separate the files or directories with -# spaces. See also FILE_PATTERNS and EXTENSION_MAPPING -# Note: If this tag is empty the current directory is searched. - -INPUT = - -# This tag can be used to specify the character encoding of the source files -# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses -# libiconv (or the iconv built into libc) for the transcoding. See the libiconv -# documentation (see: http://www.gnu.org/software/libiconv) for the list of -# possible encodings. -# The default value is: UTF-8. - -INPUT_ENCODING = UTF-8 - -# If the value of the INPUT tag contains directories, you can use the -# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and -# *.h) to filter out the source-files in the directories. -# -# Note that for custom extensions or not directly supported extensions you also -# need to set EXTENSION_MAPPING for the extension otherwise the files are not -# read by doxygen. -# -# If left blank the following patterns are tested:*.c, *.cc, *.cxx, *.cpp, -# *.c++, *.java, *.ii, *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, -# *.hh, *.hxx, *.hpp, *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, -# *.m, *.markdown, *.md, *.mm, *.dox, *.py, *.pyw, *.f90, *.f, *.for, *.tcl, -# *.vhd, *.vhdl, *.ucf, *.qsf, *.as and *.js. - -FILE_PATTERNS = - -# The RECURSIVE tag can be used to specify whether or not subdirectories should -# be searched for input files as well. -# The default value is: NO. - -RECURSIVE = YES - -# The EXCLUDE tag can be used to specify files and/or directories that should be -# excluded from the INPUT source files. This way you can easily exclude a -# subdirectory from a directory tree whose root is specified with the INPUT tag. -# -# Note that relative paths are relative to the directory from which doxygen is -# run. - -EXCLUDE = ./thirdparty/** - -# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or -# directories that are symbolic links (a Unix file system feature) are excluded -# from the input. -# The default value is: NO. - -EXCLUDE_SYMLINKS = NO - -# If the value of the INPUT tag contains directories, you can use the -# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude -# certain files from those directories. -# -# Note that the wildcards are matched against the file with absolute path, so to -# exclude all test directories for example use the pattern */test/* - -EXCLUDE_PATTERNS = - -# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names -# (namespaces, classes, functions, etc.) that should be excluded from the -# output. The symbol name can be a fully qualified name, a word, or if the -# wildcard * is used, a substring. Examples: ANamespace, AClass, -# AClass::ANamespace, ANamespace::*Test -# -# Note that the wildcards are matched against the file with absolute path, so to -# exclude all test directories use the pattern */test/* - -EXCLUDE_SYMBOLS = - -# The EXAMPLE_PATH tag can be used to specify one or more files or directories -# that contain example code fragments that are included (see the \include -# command). - -EXAMPLE_PATH = - -# If the value of the EXAMPLE_PATH tag contains directories, you can use the -# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and -# *.h) to filter out the source-files in the directories. If left blank all -# files are included. - -EXAMPLE_PATTERNS = - -# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be -# searched for input files to be used with the \include or \dontinclude commands -# irrespective of the value of the RECURSIVE tag. -# The default value is: NO. - -EXAMPLE_RECURSIVE = YES - -# The IMAGE_PATH tag can be used to specify one or more files or directories -# that contain images that are to be included in the documentation (see the -# \image command). - -IMAGE_PATH = - -# The INPUT_FILTER tag can be used to specify a program that doxygen should -# invoke to filter for each input file. Doxygen will invoke the filter program -# by executing (via popen()) the command: -# -# -# -# where is the value of the INPUT_FILTER tag, and is the -# name of an input file. Doxygen will then use the output that the filter -# program writes to standard output. If FILTER_PATTERNS is specified, this tag -# will be ignored. -# -# Note that the filter must not add or remove lines; it is applied before the -# code is scanned, but not when the output code is generated. If lines are added -# or removed, the anchors will not be placed correctly. -# -# Note that for custom extensions or not directly supported extensions you also -# need to set EXTENSION_MAPPING for the extension otherwise the files are not -# properly processed by doxygen. - -INPUT_FILTER = - -# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern -# basis. Doxygen will compare the file name with each pattern and apply the -# filter if there is a match. The filters are a list of the form: pattern=filter -# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how -# filters are used. If the FILTER_PATTERNS tag is empty or if none of the -# patterns match the file name, INPUT_FILTER is applied. -# -# Note that for custom extensions or not directly supported extensions you also -# need to set EXTENSION_MAPPING for the extension otherwise the files are not -# properly processed by doxygen. - -FILTER_PATTERNS = - -# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using -# INPUT_FILTER) will also be used to filter the input files that are used for -# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES). -# The default value is: NO. - -FILTER_SOURCE_FILES = NO - -# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file -# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and -# it is also possible to disable source filtering for a specific pattern using -# *.ext= (so without naming a filter). -# This tag requires that the tag FILTER_SOURCE_FILES is set to YES. - -FILTER_SOURCE_PATTERNS = - -# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that -# is part of the input, its contents will be placed on the main page -# (index.html). This can be useful if you have a project on for instance GitHub -# and want to reuse the introduction page also for the doxygen output. - -USE_MDFILE_AS_MAINPAGE = - -#--------------------------------------------------------------------------- -# Configuration options related to source browsing -#--------------------------------------------------------------------------- - -# If the SOURCE_BROWSER tag is set to YES then a list of source files will be -# generated. Documented entities will be cross-referenced with these sources. -# -# Note: To get rid of all source code in the generated output, make sure that -# also VERBATIM_HEADERS is set to NO. -# The default value is: NO. - -SOURCE_BROWSER = YES - -# Setting the INLINE_SOURCES tag to YES will include the body of functions, -# classes and enums directly into the documentation. -# The default value is: NO. - -INLINE_SOURCES = YES - -# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any -# special comment blocks from generated source code fragments. Normal C, C++ and -# Fortran comments will always remain visible. -# The default value is: YES. - -STRIP_CODE_COMMENTS = YES - -# If the REFERENCED_BY_RELATION tag is set to YES then for each documented -# function all documented functions referencing it will be listed. -# The default value is: NO. - -REFERENCED_BY_RELATION = NO - -# If the REFERENCES_RELATION tag is set to YES then for each documented function -# all documented entities called/used by that function will be listed. -# The default value is: NO. - -REFERENCES_RELATION = NO - -# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set -# to YES then the hyperlinks from functions in REFERENCES_RELATION and -# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will -# link to the documentation. -# The default value is: YES. - -REFERENCES_LINK_SOURCE = YES - -# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the -# source code will show a tooltip with additional information such as prototype, -# brief description and links to the definition and documentation. Since this -# will make the HTML file larger and loading of large files a bit slower, you -# can opt to disable this feature. -# The default value is: YES. -# This tag requires that the tag SOURCE_BROWSER is set to YES. - -SOURCE_TOOLTIPS = YES - -# If the USE_HTAGS tag is set to YES then the references to source code will -# point to the HTML generated by the htags(1) tool instead of doxygen built-in -# source browser. The htags tool is part of GNU's global source tagging system -# (see http://www.gnu.org/software/global/global.html). You will need version -# 4.8.6 or higher. -# -# To use it do the following: -# - Install the latest version of global -# - Enable SOURCE_BROWSER and USE_HTAGS in the config file -# - Make sure the INPUT points to the root of the source tree -# - Run doxygen as normal -# -# Doxygen will invoke htags (and that will in turn invoke gtags), so these -# tools must be available from the command line (i.e. in the search path). -# -# The result: instead of the source browser generated by doxygen, the links to -# source code will now point to the output of htags. -# The default value is: NO. -# This tag requires that the tag SOURCE_BROWSER is set to YES. - -USE_HTAGS = NO - -# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a -# verbatim copy of the header file for each class for which an include is -# specified. Set to NO to disable this. -# See also: Section \class. -# The default value is: YES. - -VERBATIM_HEADERS = YES - -# If the CLANG_ASSISTED_PARSING tag is set to YES then doxygen will use the -# clang parser (see: http://clang.llvm.org/) for more accurate parsing at the -# cost of reduced performance. This can be particularly helpful with template -# rich C++ code for which doxygen's built-in parser lacks the necessary type -# information. -# Note: The availability of this option depends on whether or not doxygen was -# generated with the -Duse-libclang=ON option for CMake. -# The default value is: NO. - -CLANG_ASSISTED_PARSING = NO - -# If clang assisted parsing is enabled you can provide the compiler with command -# line options that you would normally use when invoking the compiler. Note that -# the include paths will already be set by doxygen for the files and directories -# specified with INPUT and INCLUDE_PATH. -# This tag requires that the tag CLANG_ASSISTED_PARSING is set to YES. - -CLANG_OPTIONS = - -#--------------------------------------------------------------------------- -# Configuration options related to the alphabetical class index -#--------------------------------------------------------------------------- - -# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all -# compounds will be generated. Enable this if the project contains a lot of -# classes, structs, unions or interfaces. -# The default value is: YES. - -ALPHABETICAL_INDEX = YES - -# The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in -# which the alphabetical index list will be split. -# Minimum value: 1, maximum value: 20, default value: 5. -# This tag requires that the tag ALPHABETICAL_INDEX is set to YES. - -COLS_IN_ALPHA_INDEX = 5 - -# In case all classes in a project start with a common prefix, all classes will -# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag -# can be used to specify a prefix (or a list of prefixes) that should be ignored -# while generating the index headers. -# This tag requires that the tag ALPHABETICAL_INDEX is set to YES. - -IGNORE_PREFIX = - -#--------------------------------------------------------------------------- -# Configuration options related to the HTML output -#--------------------------------------------------------------------------- - -# If the GENERATE_HTML tag is set to YES, doxygen will generate HTML output -# The default value is: YES. - -GENERATE_HTML = YES - -# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a -# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of -# it. -# The default directory is: html. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_OUTPUT = html - -# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each -# generated HTML page (for example: .htm, .php, .asp). -# The default value is: .html. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_FILE_EXTENSION = .html - -# The HTML_HEADER tag can be used to specify a user-defined HTML header file for -# each generated HTML page. If the tag is left blank doxygen will generate a -# standard header. -# -# To get valid HTML the header file that includes any scripts and style sheets -# that doxygen needs, which is dependent on the configuration options used (e.g. -# the setting GENERATE_TREEVIEW). It is highly recommended to start with a -# default header using -# doxygen -w html new_header.html new_footer.html new_stylesheet.css -# YourConfigFile -# and then modify the file new_header.html. See also section "Doxygen usage" -# for information on how to generate the default header that doxygen normally -# uses. -# Note: The header is subject to change so you typically have to regenerate the -# default header when upgrading to a newer version of doxygen. For a description -# of the possible markers and block names see the documentation. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_HEADER = - -# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each -# generated HTML page. If the tag is left blank doxygen will generate a standard -# footer. See HTML_HEADER for more information on how to generate a default -# footer and what special commands can be used inside the footer. See also -# section "Doxygen usage" for information on how to generate the default footer -# that doxygen normally uses. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_FOOTER = - -# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style -# sheet that is used by each HTML page. It can be used to fine-tune the look of -# the HTML output. If left blank doxygen will generate a default style sheet. -# See also section "Doxygen usage" for information on how to generate the style -# sheet that doxygen normally uses. -# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as -# it is more robust and this tag (HTML_STYLESHEET) will in the future become -# obsolete. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_STYLESHEET = - -# The HTML_EXTRA_STYLESHEET tag can be used to specify additional user-defined -# cascading style sheets that are included after the standard style sheets -# created by doxygen. Using this option one can overrule certain style aspects. -# This is preferred over using HTML_STYLESHEET since it does not replace the -# standard style sheet and is therefore more robust against future updates. -# Doxygen will copy the style sheet files to the output directory. -# Note: The order of the extra style sheet files is of importance (e.g. the last -# style sheet in the list overrules the setting of the previous ones in the -# list). For an example see the documentation. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_EXTRA_STYLESHEET = - -# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or -# other source files which should be copied to the HTML output directory. Note -# that these files will be copied to the base HTML output directory. Use the -# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these -# files. In the HTML_STYLESHEET file, use the file name only. Also note that the -# files will be copied as-is; there are no commands or markers available. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_EXTRA_FILES = - -# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen -# will adjust the colors in the style sheet and background images according to -# this color. Hue is specified as an angle on a colorwheel, see -# http://en.wikipedia.org/wiki/Hue for more information. For instance the value -# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300 -# purple, and 360 is red again. -# Minimum value: 0, maximum value: 359, default value: 220. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_COLORSTYLE_HUE = 220 - -# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors -# in the HTML output. For a value of 0 the output will use grayscales only. A -# value of 255 will produce the most vivid colors. -# Minimum value: 0, maximum value: 255, default value: 100. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_COLORSTYLE_SAT = 100 - -# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the -# luminance component of the colors in the HTML output. Values below 100 -# gradually make the output lighter, whereas values above 100 make the output -# darker. The value divided by 100 is the actual gamma applied, so 80 represents -# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not -# change the gamma. -# Minimum value: 40, maximum value: 240, default value: 80. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_COLORSTYLE_GAMMA = 80 - -# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML -# page will contain the date and time when the page was generated. Setting this -# to YES can help to show when doxygen was last run and thus if the -# documentation is up to date. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_TIMESTAMP = NO - -# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML -# documentation will contain sections that can be hidden and shown after the -# page has loaded. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_DYNAMIC_SECTIONS = NO - -# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries -# shown in the various tree structured indices initially; the user can expand -# and collapse entries dynamically later on. Doxygen will expand the tree to -# such a level that at most the specified number of entries are visible (unless -# a fully collapsed tree already exceeds this amount). So setting the number of -# entries 1 will produce a full collapsed tree by default. 0 is a special value -# representing an infinite number of entries and will result in a full expanded -# tree by default. -# Minimum value: 0, maximum value: 9999, default value: 100. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_INDEX_NUM_ENTRIES = 100 - -# If the GENERATE_DOCSET tag is set to YES, additional index files will be -# generated that can be used as input for Apple's Xcode 3 integrated development -# environment (see: http://developer.apple.com/tools/xcode/), introduced with -# OSX 10.5 (Leopard). To create a documentation set, doxygen will generate a -# Makefile in the HTML output directory. Running make will produce the docset in -# that directory and running make install will install the docset in -# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at -# startup. See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html -# for more information. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -GENERATE_DOCSET = NO - -# This tag determines the name of the docset feed. A documentation feed provides -# an umbrella under which multiple documentation sets from a single provider -# (such as a company or product suite) can be grouped. -# The default value is: Doxygen generated docs. -# This tag requires that the tag GENERATE_DOCSET is set to YES. - -DOCSET_FEEDNAME = "Doxygen generated docs" - -# This tag specifies a string that should uniquely identify the documentation -# set bundle. This should be a reverse domain-name style string, e.g. -# com.mycompany.MyDocSet. Doxygen will append .docset to the name. -# The default value is: org.doxygen.Project. -# This tag requires that the tag GENERATE_DOCSET is set to YES. - -DOCSET_BUNDLE_ID = org.doxygen.Project - -# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify -# the documentation publisher. This should be a reverse domain-name style -# string, e.g. com.mycompany.MyDocSet.documentation. -# The default value is: org.doxygen.Publisher. -# This tag requires that the tag GENERATE_DOCSET is set to YES. - -DOCSET_PUBLISHER_ID = org.doxygen.Publisher - -# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher. -# The default value is: Publisher. -# This tag requires that the tag GENERATE_DOCSET is set to YES. - -DOCSET_PUBLISHER_NAME = Publisher - -# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three -# additional HTML index files: index.hhp, index.hhc, and index.hhk. The -# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop -# (see: http://www.microsoft.com/en-us/download/details.aspx?id=21138) on -# Windows. -# -# The HTML Help Workshop contains a compiler that can convert all HTML output -# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML -# files are now used as the Windows 98 help format, and will replace the old -# Windows help format (.hlp) on all Windows platforms in the future. Compressed -# HTML files also contain an index, a table of contents, and you can search for -# words in the documentation. The HTML workshop also contains a viewer for -# compressed HTML files. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -GENERATE_HTMLHELP = NO - -# The CHM_FILE tag can be used to specify the file name of the resulting .chm -# file. You can add a path in front of the file if the result should not be -# written to the html output directory. -# This tag requires that the tag GENERATE_HTMLHELP is set to YES. - -CHM_FILE = - -# The HHC_LOCATION tag can be used to specify the location (absolute path -# including file name) of the HTML help compiler (hhc.exe). If non-empty, -# doxygen will try to run the HTML help compiler on the generated index.hhp. -# The file has to be specified with full path. -# This tag requires that the tag GENERATE_HTMLHELP is set to YES. - -HHC_LOCATION = - -# The GENERATE_CHI flag controls if a separate .chi index file is generated -# (YES) or that it should be included in the master .chm file (NO). -# The default value is: NO. -# This tag requires that the tag GENERATE_HTMLHELP is set to YES. - -GENERATE_CHI = NO - -# The CHM_INDEX_ENCODING is used to encode HtmlHelp index (hhk), content (hhc) -# and project file content. -# This tag requires that the tag GENERATE_HTMLHELP is set to YES. - -CHM_INDEX_ENCODING = - -# The BINARY_TOC flag controls whether a binary table of contents is generated -# (YES) or a normal table of contents (NO) in the .chm file. Furthermore it -# enables the Previous and Next buttons. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTMLHELP is set to YES. - -BINARY_TOC = NO - -# The TOC_EXPAND flag can be set to YES to add extra items for group members to -# the table of contents of the HTML help documentation and to the tree view. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTMLHELP is set to YES. - -TOC_EXPAND = NO - -# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and -# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that -# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help -# (.qch) of the generated HTML documentation. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -GENERATE_QHP = NO - -# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify -# the file name of the resulting .qch file. The path specified is relative to -# the HTML output folder. -# This tag requires that the tag GENERATE_QHP is set to YES. - -QCH_FILE = - -# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help -# Project output. For more information please see Qt Help Project / Namespace -# (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#namespace). -# The default value is: org.doxygen.Project. -# This tag requires that the tag GENERATE_QHP is set to YES. - -QHP_NAMESPACE = org.doxygen.Project - -# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt -# Help Project output. For more information please see Qt Help Project / Virtual -# Folders (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#virtual- -# folders). -# The default value is: doc. -# This tag requires that the tag GENERATE_QHP is set to YES. - -QHP_VIRTUAL_FOLDER = doc - -# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom -# filter to add. For more information please see Qt Help Project / Custom -# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom- -# filters). -# This tag requires that the tag GENERATE_QHP is set to YES. - -QHP_CUST_FILTER_NAME = - -# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the -# custom filter to add. For more information please see Qt Help Project / Custom -# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom- -# filters). -# This tag requires that the tag GENERATE_QHP is set to YES. - -QHP_CUST_FILTER_ATTRS = - -# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this -# project's filter section matches. Qt Help Project / Filter Attributes (see: -# http://qt-project.org/doc/qt-4.8/qthelpproject.html#filter-attributes). -# This tag requires that the tag GENERATE_QHP is set to YES. - -QHP_SECT_FILTER_ATTRS = - -# The QHG_LOCATION tag can be used to specify the location of Qt's -# qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the -# generated .qhp file. -# This tag requires that the tag GENERATE_QHP is set to YES. - -QHG_LOCATION = - -# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be -# generated, together with the HTML files, they form an Eclipse help plugin. To -# install this plugin and make it available under the help contents menu in -# Eclipse, the contents of the directory containing the HTML and XML files needs -# to be copied into the plugins directory of eclipse. The name of the directory -# within the plugins directory should be the same as the ECLIPSE_DOC_ID value. -# After copying Eclipse needs to be restarted before the help appears. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -GENERATE_ECLIPSEHELP = NO - -# A unique identifier for the Eclipse help plugin. When installing the plugin -# the directory name containing the HTML and XML files should also have this -# name. Each documentation set should have its own identifier. -# The default value is: org.doxygen.Project. -# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES. - -ECLIPSE_DOC_ID = org.doxygen.Project - -# If you want full control over the layout of the generated HTML pages it might -# be necessary to disable the index and replace it with your own. The -# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top -# of each HTML page. A value of NO enables the index and the value YES disables -# it. Since the tabs in the index contain the same information as the navigation -# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -DISABLE_INDEX = NO - -# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index -# structure should be generated to display hierarchical information. If the tag -# value is set to YES, a side panel will be generated containing a tree-like -# index structure (just like the one that is generated for HTML Help). For this -# to work a browser that supports JavaScript, DHTML, CSS and frames is required -# (i.e. any modern browser). Windows users are probably better off using the -# HTML help feature. Via custom style sheets (see HTML_EXTRA_STYLESHEET) one can -# further fine-tune the look of the index. As an example, the default style -# sheet generated by doxygen has an example that shows how to put an image at -# the root of the tree instead of the PROJECT_NAME. Since the tree basically has -# the same information as the tab index, you could consider setting -# DISABLE_INDEX to YES when enabling this option. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -GENERATE_TREEVIEW = NO - -# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that -# doxygen will group on one line in the generated HTML documentation. -# -# Note that a value of 0 will completely suppress the enum values from appearing -# in the overview section. -# Minimum value: 0, maximum value: 20, default value: 4. -# This tag requires that the tag GENERATE_HTML is set to YES. - -ENUM_VALUES_PER_LINE = 4 - -# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used -# to set the initial width (in pixels) of the frame in which the tree is shown. -# Minimum value: 0, maximum value: 1500, default value: 250. -# This tag requires that the tag GENERATE_HTML is set to YES. - -TREEVIEW_WIDTH = 250 - -# If the EXT_LINKS_IN_WINDOW option is set to YES, doxygen will open links to -# external symbols imported via tag files in a separate window. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -EXT_LINKS_IN_WINDOW = NO - -# Use this tag to change the font size of LaTeX formulas included as images in -# the HTML documentation. When you change the font size after a successful -# doxygen run you need to manually remove any form_*.png images from the HTML -# output directory to force them to be regenerated. -# Minimum value: 8, maximum value: 50, default value: 10. -# This tag requires that the tag GENERATE_HTML is set to YES. - -FORMULA_FONTSIZE = 10 - -# Use the FORMULA_TRANPARENT tag to determine whether or not the images -# generated for formulas are transparent PNGs. Transparent PNGs are not -# supported properly for IE 6.0, but are supported on all modern browsers. -# -# Note that when changing this option you need to delete any form_*.png files in -# the HTML output directory before the changes have effect. -# The default value is: YES. -# This tag requires that the tag GENERATE_HTML is set to YES. - -FORMULA_TRANSPARENT = YES - -# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see -# http://www.mathjax.org) which uses client side Javascript for the rendering -# instead of using pre-rendered bitmaps. Use this if you do not have LaTeX -# installed or if you want to formulas look prettier in the HTML output. When -# enabled you may also need to install MathJax separately and configure the path -# to it using the MATHJAX_RELPATH option. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -USE_MATHJAX = NO - -# When MathJax is enabled you can set the default output format to be used for -# the MathJax output. See the MathJax site (see: -# http://docs.mathjax.org/en/latest/output.html) for more details. -# Possible values are: HTML-CSS (which is slower, but has the best -# compatibility), NativeMML (i.e. MathML) and SVG. -# The default value is: HTML-CSS. -# This tag requires that the tag USE_MATHJAX is set to YES. - -MATHJAX_FORMAT = HTML-CSS - -# When MathJax is enabled you need to specify the location relative to the HTML -# output directory using the MATHJAX_RELPATH option. The destination directory -# should contain the MathJax.js script. For instance, if the mathjax directory -# is located at the same level as the HTML output directory, then -# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax -# Content Delivery Network so you can quickly see the result without installing -# MathJax. However, it is strongly recommended to install a local copy of -# MathJax from http://www.mathjax.org before deployment. -# The default value is: http://cdn.mathjax.org/mathjax/latest. -# This tag requires that the tag USE_MATHJAX is set to YES. - -MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest - -# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax -# extension names that should be enabled during MathJax rendering. For example -# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols -# This tag requires that the tag USE_MATHJAX is set to YES. - -MATHJAX_EXTENSIONS = - -# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces -# of code that will be used on startup of the MathJax code. See the MathJax site -# (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an -# example see the documentation. -# This tag requires that the tag USE_MATHJAX is set to YES. - -MATHJAX_CODEFILE = - -# When the SEARCHENGINE tag is enabled doxygen will generate a search box for -# the HTML output. The underlying search engine uses javascript and DHTML and -# should work on any modern browser. Note that when using HTML help -# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET) -# there is already a search function so this one should typically be disabled. -# For large projects the javascript based search engine can be slow, then -# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to -# search using the keyboard; to jump to the search box use + S -# (what the is depends on the OS and browser, but it is typically -# , /