Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Exposing device selection as parameter #3

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -134,6 +134,14 @@ The `inference_pkg_launch.py`, included in this package, provides an example dem
|`load_model`|`LoadModelSrv`|Service that is responsible for setting pre-processing algorithm and inference tasks for the specific type of model loaded.|
|`inference_state`|`InferenceStateSrv`|Service that is responsible for starting and stopping inference tasks.|


### Parameters

| Parameter name | Description |
| ---------------- | ----------- |
| `device` | String that is either `CPU`, `GPU` or `MYRIAD`. Default is `CPU`. `MYRIAD` is the Intel Compute Stick 2. |


## Resources

* [Getting started with AWS DeepRacer OpenSource](https://github.com/aws-deepracer/aws-deepracer-launcher/blob/main/getting-started.md)
4 changes: 3 additions & 1 deletion inference_pkg/include/inference_pkg/inference_base.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -32,8 +32,10 @@ namespace InferTask {
/// @returns True if model loaded successfully, false otherwise
/// @param artifactPath Path to the model artifact.
/// @param imgProcess Pointer to the image processing algorithm
/// @param device Reference to the compute device (CPU, GPU, MYRIAD)
virtual bool loadModel(const char* artifactPath,
std::shared_ptr<ImgProcessBase> imgProcess) = 0;
std::shared_ptr<ImgProcessBase> imgProcess,
std::string device) = 0;
/// Starts the inference task until stopped.
virtual void startInference() = 0;
/// Stops the inference task if running.
Expand Down
3 changes: 2 additions & 1 deletion inference_pkg/include/inference_pkg/intel_inference_eng.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,8 @@ namespace IntelInferenceEngine {
RLInferenceModel(std::shared_ptr<rclcpp::Node> inferenceNodePtr, const std::string &sensorSubName);
virtual ~RLInferenceModel();
virtual bool loadModel(const char* artifactPath,
std::shared_ptr<InferTask::ImgProcessBase> imgProcess) override;
std::shared_ptr<InferTask::ImgProcessBase> imgProcess,
std::string device) override;
virtual void startInference() override;
virtual void stopInference() override;
/// Callback method to retrieve sensor data.
Expand Down
14 changes: 12 additions & 2 deletions inference_pkg/src/inference_node.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -41,11 +41,18 @@ namespace InferTask {
/// Class that will manage the inference task. In particular it will start and stop the
/// inference tasks and feed the inference task the sensor data.
/// @param nodeName Reference to the string containing name of the node.
/// @param device Reference to the compute device (CPU, GPU, MYRIAD)
public:
InferenceNodeMgr(const std::string & nodeName)
: Node(nodeName)
: Node(nodeName),
deviceName_("CPU")
{
RCLCPP_INFO(this->get_logger(), "%s started", nodeName.c_str());

this->declare_parameter<std::string>("device", deviceName_);
// Device name; OpenVINO supports CPU, GPU and MYRIAD
deviceName_ = this->get_parameter("device").as_string();

loadModelServiceCbGrp_ = this->create_callback_group(rclcpp::callback_group::CallbackGroupType::MutuallyExclusive);
loadModelService_ = this->create_service<deepracer_interfaces_pkg::srv::LoadModelSrv>("load_model",
std::bind(&InferTask::InferenceNodeMgr::LoadModelHdl,
Expand Down Expand Up @@ -129,7 +136,7 @@ namespace InferTask {
RCLCPP_ERROR(this->get_logger(), "Unknown inference task");
return;
}
itInferTask->second->loadModel(req->artifact_path.c_str(), itPreProcess->second);
itInferTask->second->loadModel(req->artifact_path.c_str(), itPreProcess->second, deviceName_);
res->error = 0;
}
}
Expand All @@ -149,6 +156,9 @@ namespace InferTask {
/// List of available pre-processing algorithms.
std::unordered_map<int, std::shared_ptr<ImgProcessBase>> preProcessList_;
/// Reference to the node handler.

/// Compute device type.
std::string deviceName_;
};
}

Expand Down
5 changes: 3 additions & 2 deletions inference_pkg/src/intel_inference_eng.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -201,7 +201,8 @@ namespace IntelInferenceEngine {
}

bool RLInferenceModel::loadModel(const char* artifactPath,
std::shared_ptr<InferTask::ImgProcessBase> imgProcess) {
std::shared_ptr<InferTask::ImgProcessBase> imgProcess,
std::string device) {
if (doInference_) {
RCLCPP_ERROR(inferenceNode->get_logger(), "Please stop inference prior to loading a model");
return false;
Expand All @@ -214,7 +215,7 @@ namespace IntelInferenceEngine {
imgProcess_ = imgProcess;
// Load the model
try {
inferRequest_ = setMultiHeadModel(artifactPath, "CPU", core_, inputNamesArr_,
inferRequest_ = setMultiHeadModel(artifactPath, device, core_, inputNamesArr_,
outputName_, InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP32, inferenceNode);
for(size_t i = 0; i != inputNamesArr_.size(); ++i) {
Expand Down