diff --git a/README.md b/README.md index 9058b1e..b8ebb7c 100644 --- a/README.md +++ b/README.md @@ -134,6 +134,14 @@ The `inference_pkg_launch.py`, included in this package, provides an example dem |`load_model`|`LoadModelSrv`|Service that is responsible for setting pre-processing algorithm and inference tasks for the specific type of model loaded.| |`inference_state`|`InferenceStateSrv`|Service that is responsible for starting and stopping inference tasks.| + +### Parameters + +| Parameter name | Description | +| ---------------- | ----------- | +| `device` | String that is either `CPU`, `GPU` or `MYRIAD`. Default is `CPU`. `MYRIAD` is the Intel Compute Stick 2. | + + ## Resources * [Getting started with AWS DeepRacer OpenSource](https://github.com/aws-deepracer/aws-deepracer-launcher/blob/main/getting-started.md) diff --git a/inference_pkg/include/inference_pkg/inference_base.hpp b/inference_pkg/include/inference_pkg/inference_base.hpp index 0dd4208..6cddcb1 100644 --- a/inference_pkg/include/inference_pkg/inference_base.hpp +++ b/inference_pkg/include/inference_pkg/inference_base.hpp @@ -32,8 +32,10 @@ namespace InferTask { /// @returns True if model loaded successfully, false otherwise /// @param artifactPath Path to the model artifact. /// @param imgProcess Pointer to the image processing algorithm + /// @param device Reference to the compute device (CPU, GPU, MYRIAD) virtual bool loadModel(const char* artifactPath, - std::shared_ptr imgProcess) = 0; + std::shared_ptr imgProcess, + std::string device) = 0; /// Starts the inference task until stopped. virtual void startInference() = 0; /// Stops the inference task if running. diff --git a/inference_pkg/include/inference_pkg/intel_inference_eng.hpp b/inference_pkg/include/inference_pkg/intel_inference_eng.hpp index 47738f0..4295950 100644 --- a/inference_pkg/include/inference_pkg/intel_inference_eng.hpp +++ b/inference_pkg/include/inference_pkg/intel_inference_eng.hpp @@ -34,7 +34,8 @@ namespace IntelInferenceEngine { RLInferenceModel(std::shared_ptr inferenceNodePtr, const std::string &sensorSubName); virtual ~RLInferenceModel(); virtual bool loadModel(const char* artifactPath, - std::shared_ptr imgProcess) override; + std::shared_ptr imgProcess, + std::string device) override; virtual void startInference() override; virtual void stopInference() override; /// Callback method to retrieve sensor data. diff --git a/inference_pkg/src/inference_node.cpp b/inference_pkg/src/inference_node.cpp index 56e71a5..12628fd 100644 --- a/inference_pkg/src/inference_node.cpp +++ b/inference_pkg/src/inference_node.cpp @@ -41,11 +41,18 @@ namespace InferTask { /// Class that will manage the inference task. In particular it will start and stop the /// inference tasks and feed the inference task the sensor data. /// @param nodeName Reference to the string containing name of the node. + /// @param device Reference to the compute device (CPU, GPU, MYRIAD) public: InferenceNodeMgr(const std::string & nodeName) - : Node(nodeName) + : Node(nodeName), + deviceName_("CPU") { RCLCPP_INFO(this->get_logger(), "%s started", nodeName.c_str()); + + this->declare_parameter("device", deviceName_); + // Device name; OpenVINO supports CPU, GPU and MYRIAD + deviceName_ = this->get_parameter("device").as_string(); + loadModelServiceCbGrp_ = this->create_callback_group(rclcpp::callback_group::CallbackGroupType::MutuallyExclusive); loadModelService_ = this->create_service("load_model", std::bind(&InferTask::InferenceNodeMgr::LoadModelHdl, @@ -129,7 +136,7 @@ namespace InferTask { RCLCPP_ERROR(this->get_logger(), "Unknown inference task"); return; } - itInferTask->second->loadModel(req->artifact_path.c_str(), itPreProcess->second); + itInferTask->second->loadModel(req->artifact_path.c_str(), itPreProcess->second, deviceName_); res->error = 0; } } @@ -149,6 +156,9 @@ namespace InferTask { /// List of available pre-processing algorithms. std::unordered_map> preProcessList_; /// Reference to the node handler. + + /// Compute device type. + std::string deviceName_; }; } diff --git a/inference_pkg/src/intel_inference_eng.cpp b/inference_pkg/src/intel_inference_eng.cpp index e9e2c40..6c65277 100644 --- a/inference_pkg/src/intel_inference_eng.cpp +++ b/inference_pkg/src/intel_inference_eng.cpp @@ -201,7 +201,8 @@ namespace IntelInferenceEngine { } bool RLInferenceModel::loadModel(const char* artifactPath, - std::shared_ptr imgProcess) { + std::shared_ptr imgProcess, + std::string device) { if (doInference_) { RCLCPP_ERROR(inferenceNode->get_logger(), "Please stop inference prior to loading a model"); return false; @@ -214,7 +215,7 @@ namespace IntelInferenceEngine { imgProcess_ = imgProcess; // Load the model try { - inferRequest_ = setMultiHeadModel(artifactPath, "CPU", core_, inputNamesArr_, + inferRequest_ = setMultiHeadModel(artifactPath, device, core_, inputNamesArr_, outputName_, InferenceEngine::Precision::FP32, InferenceEngine::Precision::FP32, inferenceNode); for(size_t i = 0; i != inputNamesArr_.size(); ++i) {