diff --git a/ext/nnstreamer/tensor_filter/tensor_filter_armnn.cc b/ext/nnstreamer/tensor_filter/tensor_filter_armnn.cc index 8a2f5a63b7..d98218d647 100644 --- a/ext/nnstreamer/tensor_filter/tensor_filter_armnn.cc +++ b/ext/nnstreamer/tensor_filter/tensor_filter_armnn.cc @@ -305,6 +305,7 @@ ArmNNCore::makeNetwork (const GstTensorFilterProperties *prop) { std::vector output_vec; std::map input_map; + GstTensorInfo *_info; if (g_str_has_suffix (model_path, ".tflite")) { return makeTfLiteNetwork (); @@ -314,33 +315,35 @@ ArmNNCore::makeNetwork (const GstTensorFilterProperties *prop) if (prop->output_meta.num_tensors != 0) { output_vec.reserve (prop->output_meta.num_tensors); for (unsigned int i = 0; i < prop->output_meta.num_tensors; i++) { - if (prop->output_meta.info[i].name == NULL) { + _info = gst_tensors_info_get_nth_info ((GstTensorsInfo *) &prop->output_meta, i); + + if (_info->name == NULL) { /** clear output vec in case of error */ output_vec.clear (); output_vec.shrink_to_fit (); break; } - output_vec.push_back (prop->output_meta.info[i].name); + output_vec.push_back (_info->name); } } /** Create input map with name and data shape */ for (unsigned int i = 0; i < prop->input_meta.num_tensors; i++) { - if (prop->input_meta.info[i].name == NULL) { + _info = gst_tensors_info_get_nth_info ((GstTensorsInfo *) &prop->input_meta, i); + + if (_info->name == NULL) { /** clear input map in case of error */ input_map.clear (); break; } /** Set dimension only if valid */ - if (gst_tensor_dimension_is_valid (prop->input_meta.info[i].dimension)) { + if (gst_tensor_dimension_is_valid (_info->dimension)) { unsigned int rev_dim[NNS_TENSOR_RANK_LIMIT]; - std::reverse_copy (prop->input_meta.info[i].dimension, - prop->input_meta.info[i].dimension + NNS_TENSOR_RANK_LIMIT, rev_dim); - input_map[prop->input_meta.info[i].name] - = armnn::TensorShape (NNS_TENSOR_RANK_LIMIT, rev_dim); + std::reverse_copy (_info->dimension, _info->dimension + NNS_TENSOR_RANK_LIMIT, rev_dim); + input_map[_info->name] = armnn::TensorShape (NNS_TENSOR_RANK_LIMIT, rev_dim); } else { - input_map[prop->input_meta.info[i].name] = armnn::TensorShape (); + input_map[_info->name] = armnn::TensorShape (); } } @@ -507,7 +510,7 @@ ArmNNCore::setTensorProp (const std::vector &bindings, for (unsigned int idx = 0; idx < bindings.size (); ++idx) { armnn::TensorInfo arm_info = bindings[idx].second; armnn::TensorShape arm_shape; - GstTensorInfo *gst_info = &tensorMeta->info[idx]; + GstTensorInfo *gst_info = gst_tensors_info_get_nth_info (tensorMeta, idx); /* Use binding id as a name, if no name already exists */ if (gst_info->name == NULL) { @@ -541,7 +544,7 @@ ArmNNCore::setTensorProp (const std::vector &bindings, } for (int i = NNS_TENSOR_RANK_LIMIT - 1; i >= num_dim; i--) { - gst_info->dimension[i] = 1; + gst_info->dimension[i] = 0; } } diff --git a/ext/nnstreamer/tensor_filter/tensor_filter_caffe2.cc b/ext/nnstreamer/tensor_filter/tensor_filter_caffe2.cc index 03b05432a9..fe3323c2b1 100644 --- a/ext/nnstreamer/tensor_filter/tensor_filter_caffe2.cc +++ b/ext/nnstreamer/tensor_filter/tensor_filter_caffe2.cc @@ -150,12 +150,10 @@ Caffe2Core::init (const GstTensorFilterProperties *prop) return 0; } -#define initializeTensor(type) \ - do { \ - ReinitializeTensor (inputTensor, \ - { inputTensorMeta.info[i].dimension[3], inputTensorMeta.info[i].dimension[2], \ - inputTensorMeta.info[i].dimension[1], inputTensorMeta.info[i].dimension[0] }, \ - at::dtype ().device (CPU)); \ +#define initializeTensor(type) \ + do { \ + ReinitializeTensor ( \ + inputTensor, at::IntArrayRef (dims), at::dtype ().device (CPU)); \ } while (0); /** @@ -164,14 +162,21 @@ Caffe2Core::init (const GstTensorFilterProperties *prop) int Caffe2Core::initInputTensor () { - guint i; + GstTensorInfo *_info; + guint i, j, rank; inputTensorMap.clear (); for (i = 0; i < inputTensorMeta.num_tensors; i++) { - Tensor *inputTensor - = workSpace.CreateBlob (inputTensorMeta.info[i].name)->GetMutable (); + _info = gst_tensors_info_get_nth_info (&inputTensorMeta, i); + rank = gst_tensor_info_get_rank (_info); - switch (inputTensorMeta.info[i].type) { + Tensor *inputTensor = workSpace.CreateBlob (_info->name)->GetMutable (); + std::vector dims (rank); + + for (j = 0; j < rank; j++) + dims[j] = (long int) _info->dimension[rank - j - 1]; + + switch (_info->type) { case _NNS_INT32: initializeTensor (int32_t); break; @@ -207,7 +212,7 @@ Caffe2Core::initInputTensor () return -1; } - inputTensorMap.insert (std::make_pair (inputTensorMeta.info[i].name, inputTensor)); + inputTensorMap.insert (std::make_pair (_info->name, inputTensor)); } return 0; } @@ -310,15 +315,18 @@ Caffe2Core::getOutputTensorDim (GstTensorsInfo *info) int Caffe2Core::run (const GstTensorMemory *input, GstTensorMemory *output) { + GstTensorInfo *_info; unsigned int i; #if (DBG) gint64 start_time = g_get_real_time (); #endif for (i = 0; i < inputTensorMeta.num_tensors; i++) { - Tensor *inputTensor = inputTensorMap.find (inputTensorMeta.info[i].name)->second; + _info = gst_tensors_info_get_nth_info (&inputTensorMeta, i); - switch (inputTensorMeta.info[i].type) { + Tensor *inputTensor = inputTensorMap.find (_info->name)->second; + + switch (_info->type) { case _NNS_INT32: inputTensor->ShareExternalPointer ((int32_t *) input[i].data); break; @@ -378,9 +386,11 @@ Caffe2Core::run (const GstTensorMemory *input, GstTensorMemory *output) } for (i = 0; i < outputTensorMeta.num_tensors; i++) { - const auto &out = workSpace.GetBlob (outputTensorMeta.info[i].name)->Get (); + _info = gst_tensors_info_get_nth_info (&outputTensorMeta, i); + + const auto &out = workSpace.GetBlob (_info->name)->Get (); - switch (outputTensorMeta.info[i].type) { + switch (_info->type) { case _NNS_INT32: output[i].data = out.data (); break; diff --git a/ext/nnstreamer/tensor_filter/tensor_filter_deepview_rt.cc b/ext/nnstreamer/tensor_filter/tensor_filter_deepview_rt.cc index c4067ded2f..3983822a33 100644 --- a/ext/nnstreamer/tensor_filter/tensor_filter_deepview_rt.cc +++ b/ext/nnstreamer/tensor_filter/tensor_filter_deepview_rt.cc @@ -301,9 +301,9 @@ dvrt_subplugin::getTensorDim (gsize index, tensor_dim dim) for (i = 0; i < dims; i++) dim[dims - i - 1] = shape[i]; - /* fill remaining entries with 1 */ + /* fill remaining entries with 0 */ for (i = dims; i < NNS_TENSOR_RANK_LIMIT; ++i) { - dim[i] = 1; + dim[i] = 0; } return 0; @@ -317,6 +317,7 @@ int dvrt_subplugin::setTensorProp (gint isInput) { GstTensorsInfo *tensorMeta; + GstTensorInfo *_info; const guint32 *indices; gsize num; vector *tensors; @@ -343,20 +344,22 @@ dvrt_subplugin::setTensorProp (gint isInput) for (size_t i = 0; i < num; i++) { gsize index = indices[i]; NNTensor *tensor = nn_context_tensor_index (context, index); + + _info = gst_tensors_info_get_nth_info (tensorMeta, (guint) i); tensors->push_back (tensor); const gchar *name = nn_model_layer_name (model, index); - tensorMeta->info[i].name = g_strdup (name); - if (getTensorDim (index, tensorMeta->info[i].dimension)) + _info->name = g_strdup (name); + if (getTensorDim (index, _info->dimension)) return -EINVAL; - if (getTensorType (index, &tensorMeta->info[i].type)) + if (getTensorType (index, &_info->type)) return -EINVAL; gchar *dim; - dim = gst_tensor_get_dimension_string (tensorMeta->info[i].dimension); - ml_logd ("tensorMeta[%zu] >> name[%s], type[%d], dim[%s]", i, - tensorMeta->info[i].name, tensorMeta->info[i].type, dim); + dim = gst_tensor_get_dimension_string (_info->dimension); + ml_logd ("tensorMeta[%zu] >> name[%s], type[%d], dim[%s]", i, _info->name, + _info->type, dim); g_free (dim); } diff --git a/ext/nnstreamer/tensor_filter/tensor_filter_edgetpu.cc b/ext/nnstreamer/tensor_filter/tensor_filter_edgetpu.cc index 20fa9dc43f..376096df7a 100644 --- a/ext/nnstreamer/tensor_filter/tensor_filter_edgetpu.cc +++ b/ext/nnstreamer/tensor_filter/tensor_filter_edgetpu.cc @@ -489,9 +489,9 @@ edgetpu_subplugin::getTensorDim (tflite::Interpreter *interpreter, int tensor_id /* the order of dimension is reversed at CAPS negotiation */ std::reverse_copy (tensor_dims->data, tensor_dims->data + len, dim); - /* fill the remnants with 1 */ + /* fill the remnants with 0 */ for (int i = len; i < NNS_TENSOR_RANK_LIMIT; ++i) { - dim[i] = 1; + dim[i] = 0; } return 0; @@ -535,19 +535,25 @@ void edgetpu_subplugin::setTensorProp (tflite::Interpreter *interpreter, const std::vector &tensor_idx_list, GstTensorsInfo &tensorMeta) { - tensorMeta.num_tensors = tensor_idx_list.size (); - if (tensorMeta.num_tensors > NNS_TENSOR_SIZE_LIMIT) + GstTensorInfo *_info; + unsigned int num; + + num = tensor_idx_list.size (); + if (num > NNS_TENSOR_SIZE_LIMIT) throw std::invalid_argument ( "The number of tensors required by the given model exceeds the nnstreamer tensor limit (16 by default)."); - for (unsigned int i = 0; i < tensorMeta.num_tensors; ++i) { - if (getTensorDim (interpreter, tensor_idx_list[i], tensorMeta.info[i].dimension)) { + tensorMeta.num_tensors = num; + + for (unsigned int i = 0; i < num; ++i) { + _info = gst_tensors_info_get_nth_info (std::addressof (tensorMeta), i); + + if (getTensorDim (interpreter, tensor_idx_list[i], _info->dimension)) { std::cerr << "failed to get the dimension of tensors" << std::endl; throw std::invalid_argument ("Cannot get the dimensions of given tensors at the tensor "); } - tensorMeta.info[i].type - = getTensorType (interpreter->tensor (tensor_idx_list[i])->type); - tensorMeta.info[i].name = nullptr; /** @todo tensor name is not retrieved */ + _info->type = getTensorType (interpreter->tensor (tensor_idx_list[i])->type); + _info->name = nullptr; /** @todo tensor name is not retrieved */ } } diff --git a/ext/nnstreamer/tensor_filter/tensor_filter_lua.cc b/ext/nnstreamer/tensor_filter/tensor_filter_lua.cc index 41368b44ad..be3955b872 100644 --- a/ext/nnstreamer/tensor_filter/tensor_filter_lua.cc +++ b/ext/nnstreamer/tensor_filter/tensor_filter_lua.cc @@ -363,6 +363,7 @@ lua_subplugin::getEmptyInstance () void lua_subplugin::setTensorsInfo (GstTensorsInfo &tensors_info) { + GstTensorInfo *_info; if (lua_istable (L, -1)) { lua_pushstring (L, "num"); @@ -383,10 +384,11 @@ lua_subplugin::setTensorsInfo (GstTensorsInfo &tensors_info) lua_gettable (L, -2); if (lua_istable (L, -1)) { for (uint j = 1; j <= tensors_info.num_tensors; ++j) { + _info = gst_tensors_info_get_nth_info (std::addressof (tensors_info), (j - 1)); lua_pushinteger (L, j); lua_gettable (L, -2); - tensors_info.info[j - 1].type = gst_tensor_get_type (lua_tostring (L, -1)); - if (tensors_info.info[j - 1].type == _NNS_END) + _info->type = gst_tensor_get_type (lua_tostring (L, -1)); + if (_info->type == _NNS_END) throw std::invalid_argument ( "Failed to parse `type`. Possible types are " GST_TENSOR_TYPE_ALL); lua_pop (L, 1); @@ -400,6 +402,7 @@ lua_subplugin::setTensorsInfo (GstTensorsInfo &tensors_info) lua_gettable (L, -2); if (lua_istable (L, -1)) { for (uint j = 1; j <= tensors_info.num_tensors; ++j) { + _info = gst_tensors_info_get_nth_info (std::addressof (tensors_info), (j - 1)); lua_pushinteger (L, j); lua_gettable (L, -2); if (lua_istable (L, -1)) { @@ -408,14 +411,14 @@ lua_subplugin::setTensorsInfo (GstTensorsInfo &tensors_info) lua_pushinteger (L, i); lua_gettable (L, -2); if (lua_isnumber (L, -1)) { - tensors_info.info[j - 1].dimension[i - 1] = lua_tointeger (L, -1); + _info->dimension[i - 1] = lua_tointeger (L, -1); } else { throw std::invalid_argument ("Failed to parse `dim`. Please check the script"); } lua_pop (L, 1); } for (uint i = len + 1; i <= NNS_TENSOR_RANK_LIMIT; i++) { - tensors_info.info[j - 1].dimension[i - 1] = 1; + _info->dimension[i - 1] = 0; } } else { throw std::invalid_argument ("Failed to parse `dim`. Please check the script"); @@ -487,19 +490,25 @@ lua_subplugin::configure_instance (const GstTensorFilterProperties *prop) void lua_subplugin::invoke (const GstTensorMemory *input, GstTensorMemory *output) { + GstTensorInfo *_info; + if (!input) throw std::runtime_error ("Invalid input buffer, it is NULL."); if (!output) throw std::runtime_error ("Invalid output buffer, it is NULL."); for (uint i = 0; i < inputInfo.num_tensors; ++i) { - input_lua_tensors[i].type = inputInfo.info[i].type; + _info = gst_tensors_info_get_nth_info (std::addressof (inputInfo), i); + + input_lua_tensors[i].type = _info->type; input_lua_tensors[i].data = input[i].data; input_lua_tensors[i].size = input[i].size; } for (uint i = 0; i < outputInfo.num_tensors; ++i) { - output_lua_tensors[i].type = outputInfo.info[i].type; + _info = gst_tensors_info_get_nth_info (std::addressof (outputInfo), i); + + output_lua_tensors[i].type = _info->type; output_lua_tensors[i].data = output[i].data; output_lua_tensors[i].size = output[i].size; } diff --git a/ext/nnstreamer/tensor_filter/tensor_filter_mediapipe.cc b/ext/nnstreamer/tensor_filter/tensor_filter_mediapipe.cc index cdfe920d31..5839fa7646 100644 --- a/ext/nnstreamer/tensor_filter/tensor_filter_mediapipe.cc +++ b/ext/nnstreamer/tensor_filter/tensor_filter_mediapipe.cc @@ -112,11 +112,14 @@ mediapipe_subplugin::mediapipe_subplugin () mediapipe_subplugin::~mediapipe_subplugin () { mediapipe::Status status; + GstTensorInfo *_info; g_free (config_path); for (unsigned int i = 0; i < inputInfo.num_tensors; i++) { - status = graph.CloseInputStream (inputInfo.info[i].name); + _info = gst_tensors_info_get_nth_info (&inputInfo, i); + + status = graph.CloseInputStream (_info->name); if (!status.ok ()) { std::cerr << "Failed to close input stream" << std::endl; } @@ -240,15 +243,17 @@ mediapipe_subplugin::invoke (const GstTensorMemory *input, GstTensorMemory *outp #if (DBG) gint64 start_time = g_get_real_time (); #endif - int input_width = inputInfo.info[0].dimension[1]; - int input_height = inputInfo.info[0].dimension[2]; - int input_channels = inputInfo.info[0].dimension[0]; + GstTensorInfo *in_info = gst_tensors_info_get_nth_info (&inputInfo, 0U); + GstTensorInfo *out_info = gst_tensors_info_get_nth_info (&outputInfo, 0U); + int input_width = in_info->dimension[1]; + int input_height = in_info->dimension[2]; + int input_channels = in_info->dimension[0]; int input_widthStep = input_width * input_channels; mediapipe::Status status; /* TODO to make it better, start the graph at init or previous step */ mediapipe::OutputStreamPoller poller - = graph.AddOutputStreamPoller (outputInfo.info[0].name).ValueOrDie (); + = graph.AddOutputStreamPoller (out_info->name).ValueOrDie (); status = graph.StartRun ({}); if (!status.ok ()) { std::cerr << "Fail to start mediapipe graph" << std::endl; @@ -262,7 +267,7 @@ mediapipe_subplugin::invoke (const GstTensorMemory *input, GstTensorMemory *outp ); // Send image packet - status = graph.AddPacketToInputStream (inputInfo.info[0].name, + status = graph.AddPacketToInputStream (in_info->name, mediapipe::Adopt (input_frame.release ()).At (mediapipe::Timestamp (frame_timestamp++))); if (!status.ok ()) { std::cerr << "Failed to add input packet" << std::endl; diff --git a/ext/nnstreamer/tensor_filter/tensor_filter_movidius_ncsdk2.c b/ext/nnstreamer/tensor_filter/tensor_filter_movidius_ncsdk2.c index 4a8ecefe5a..cb024f1975 100644 --- a/ext/nnstreamer/tensor_filter/tensor_filter_movidius_ncsdk2.c +++ b/ext/nnstreamer/tensor_filter/tensor_filter_movidius_ncsdk2.c @@ -36,6 +36,7 @@ #define NO_ANONYMOUS_NESTED_STRUCT #include #undef NO_ANONYMOUS_NESTED_STRUCT +#include #include #include @@ -373,8 +374,9 @@ _mvncsdk2_getInputDim (const GstTensorFilterProperties * prop, /** MVNCSDK only supports one tensor at a time */ info->num_tensors = NNS_MVNCSDK2_MAX_NUM_TENOSORS_SUPPORTED; - nns_input_tensor_info = - &(info->info[NNS_MVNCSDK2_MAX_NUM_TENOSORS_SUPPORTED - 1]); + nns_input_tensor_info = gst_tensors_info_get_nth_info (info, + NNS_MVNCSDK2_MAX_NUM_TENOSORS_SUPPORTED - 1); + /** * MVNCSDK only supports data types of FP32 and FP16. If the data type of * input tensor is set to FP32, NCSDK automatically convert it to FP16 as @@ -407,7 +409,9 @@ _mvncsdk2_getOutputDim (const GstTensorFilterProperties * prop, /** MVNCSDK only supports one tensor at a time */ info->num_tensors = NNS_MVNCSDK2_MAX_NUM_TENOSORS_SUPPORTED; - nns_output_info = &(info->info[NNS_MVNCSDK2_MAX_NUM_TENOSORS_SUPPORTED - 1]); + nns_output_info = gst_tensors_info_get_nth_info (info, + NNS_MVNCSDK2_MAX_NUM_TENOSORS_SUPPORTED - 1); + /** * MVNCSDK only supports data types of FP32 and FP16. If the data type of * input tensor is set to FP32, NCSDK automatically convert it to FP16 as diff --git a/ext/nnstreamer/tensor_filter/tensor_filter_mxnet.cc b/ext/nnstreamer/tensor_filter/tensor_filter_mxnet.cc index 1c6bd74397..6f8b1f1ba0 100644 --- a/ext/nnstreamer/tensor_filter/tensor_filter_mxnet.cc +++ b/ext/nnstreamer/tensor_filter/tensor_filter_mxnet.cc @@ -133,7 +133,7 @@ class TensorFilterMXNet final : public tensor_filter_subplugin static const std::string ext_params; /**< extension of model parameters (.params) */ private: - Shape tensorInfoToShape (GstTensorInfo &tensorinfo, int rank); + Shape tensorDimensionToShape (tensor_dim dimension, int rank); MXDType tensorTypeToMXNet (tensor_type type); void parseCustomProperties (const GstTensorFilterProperties *prop); void splitParamMap (const std::map ¶mMap, @@ -214,6 +214,8 @@ TensorFilterMXNet::getEmptyInstance () void TensorFilterMXNet::configure_instance (const GstTensorFilterProperties *prop) { + GstTensorInfo *_info; + if (prop->num_models != 1) { throw std::invalid_argument ("Multiple models is not supported."); } @@ -287,10 +289,10 @@ TensorFilterMXNet::configure_instance (const GstTensorFilterProperties *prop) /* Set ndarrays for the input layers. */ for (unsigned int i = 0; i < inputs_info_.num_tensors; i++) { - auto &input_tensor = inputs_info_.info[i]; - args_map_[input_tensor.name] - = NDArray (tensorInfoToShape (input_tensor, input_ranks_[i]), ctx_, - false, tensorTypeToMXNet (input_tensor.type)); + _info = gst_tensors_info_get_nth_info (&inputs_info_, i); + args_map_[_info->name] + = NDArray (tensorDimensionToShape (_info->dimension, input_ranks_[i]), + ctx_, false, tensorTypeToMXNet (_info->type)); } /* These are ndarrays where the execution engine runs. */ @@ -328,8 +330,8 @@ TensorFilterMXNet::invoke (const GstTensorMemory *input, GstTensorMemory *output /* Copy input. */ for (unsigned int i = 0; i < inputs_info_.num_tensors; i++) { - auto &input_info = inputs_info_.info[i]; - auto &input_ndarray = args_map_[input_info.name]; + GstTensorInfo *input_info = gst_tensors_info_get_nth_info (&inputs_info_, i); + auto &input_ndarray = args_map_[input_info->name]; assert ((input_ndarray.Size () * sizeof (mx_float)) == input[i].size); input_ndarray.SyncCopyFromCPU ( @@ -343,14 +345,14 @@ TensorFilterMXNet::invoke (const GstTensorMemory *input, GstTensorMemory *output /* Copy outpu. */ for (unsigned int i = 0; i < outputs_info_.num_tensors; i++) { - auto &output_info = outputs_info_.info[i]; + GstTensorInfo *output_info = gst_tensors_info_get_nth_info (&outputs_info_, i); NDArray result; /** * Warning: It will cause segfault if the operator name (output name) is different from expected. * The user should know the name of the operator name. */ - Operator (output_info.name) (executor_->outputs[0]).Invoke (result); + Operator (output_info->name) (executor_->outputs[0]).Invoke (result); NDArray::WaitAll (); assert ((result.Size () * sizeof (mx_float)) == output[i].size); @@ -399,12 +401,12 @@ TensorFilterMXNet::eventHandler (event_ops ops, GstTensorFilterFrameworkEventDat } /** - * @brief Convert GstTensorInfo to MXNet Shape + * @brief Convert tensor_dim to MXNet Shape */ Shape -TensorFilterMXNet::tensorInfoToShape (GstTensorInfo &tensorinfo, int rank) +TensorFilterMXNet::tensorDimensionToShape (tensor_dim dimension, int rank) { - return Shape (std::vector (tensorinfo.dimension, tensorinfo.dimension + rank)); + return Shape (std::vector (dimension, dimension + rank)); } /** diff --git a/ext/nnstreamer/tensor_filter/tensor_filter_nnfw.c b/ext/nnstreamer/tensor_filter/tensor_filter_nnfw.c index d34fc74d67..120eb4ffba 100644 --- a/ext/nnstreamer/tensor_filter/tensor_filter_nnfw.c +++ b/ext/nnstreamer/tensor_filter/tensor_filter_nnfw.c @@ -558,6 +558,7 @@ nnfw_invoke_dummy (const nnfw_pdata * pdata, const nnfw_tinfo_s * in_info, const nnfw_tinfo_s * out_info) { GstTensorsInfo gst_in_info, gst_out_info; + GstTensorInfo *_info; GstTensorMemory input[NNS_TENSOR_SIZE_LIMIT] = { {0} }; GstTensorMemory output[NNS_TENSOR_SIZE_LIMIT] = { {0} }; gboolean failed = FALSE; @@ -571,13 +572,17 @@ nnfw_invoke_dummy (const nnfw_pdata * pdata, const nnfw_tinfo_s * in_info, } for (i = 0; i < gst_in_info.num_tensors; ++i) { - input[i].size = gst_tensor_info_get_size (&gst_in_info.info[i]); + _info = gst_tensors_info_get_nth_info (&gst_in_info, i); + + input[i].size = gst_tensor_info_get_size (_info); input[i].data = g_malloc0 (input[i].size); } /* The output shape would be changed, set enough size for output buffer. */ for (i = 0; i < gst_out_info.num_tensors; ++i) { - output[i].size = gst_tensor_info_get_size (&gst_out_info.info[i]) * 2; + _info = gst_tensors_info_get_nth_info (&gst_out_info, i); + + output[i].size = gst_tensor_info_get_size (_info) * 2; output[i].data = g_malloc0 (output[i].size); } diff --git a/ext/nnstreamer/tensor_filter/tensor_filter_openvino.cc b/ext/nnstreamer/tensor_filter/tensor_filter_openvino.cc index f3a33f493c..ac9fdc3618 100644 --- a/ext/nnstreamer/tensor_filter/tensor_filter_openvino.cc +++ b/ext/nnstreamer/tensor_filter/tensor_filter_openvino.cc @@ -252,6 +252,7 @@ TensorFilterOpenvino::getInputTensorDim (GstTensorsInfo *info) { InferenceEngine::InputsDataMap *inputsDataMap = &(this->_inputsDataMap); InferenceEngine::InputsDataMap::iterator inputDataMapIter; + GstTensorInfo *_info; int ret, i, j; gst_tensors_info_init (info); @@ -285,12 +286,11 @@ TensorFilterOpenvino::getInputTensorDim (GstTensorsInfo *info) goto failed; } + _info = gst_tensors_info_get_nth_info (info, i); + for (sizeVecRIter = dimsSizeVec.rbegin (), j = 0; sizeVecRIter != dimsSizeVec.rend (); ++sizeVecRIter, ++j) { - info->info[i].dimension[j] = (*sizeVecRIter != 0 ? *sizeVecRIter : 1); - } - for (int k = j; k < NNS_TENSOR_RANK_LIMIT; ++k) { - info->info[i].dimension[k] = 1; + _info->dimension[j] = (*sizeVecRIter != 0 ? *sizeVecRIter : 1); } ieTensorTypeStr = eachInputInfo->getPrecision ().name (); @@ -303,8 +303,8 @@ TensorFilterOpenvino::getInputTensorDim (GstTensorsInfo *info) goto failed; } - info->info[i].type = nnsTensorType; - info->info[i].name = g_strdup (eachInputInfo->name ().c_str ()); + _info->type = nnsTensorType; + _info->name = g_strdup (eachInputInfo->name ().c_str ()); this->_inputTensorDescs[i] = eachInputTensorDesc; } @@ -326,6 +326,7 @@ TensorFilterOpenvino::getOutputTensorDim (GstTensorsInfo *info) { InferenceEngine::OutputsDataMap *outputsDataMap = &(this->_outputsDataMap); InferenceEngine::OutputsDataMap::iterator outputDataMapIter; + GstTensorInfo *_info; int ret, i, j; gst_tensors_info_init (info); @@ -359,12 +360,11 @@ TensorFilterOpenvino::getOutputTensorDim (GstTensorsInfo *info) goto failed; } + _info = gst_tensors_info_get_nth_info (info, i); + for (sizeVecRIter = dimsSizeVec.rbegin (), j = 0; sizeVecRIter != dimsSizeVec.rend (); ++sizeVecRIter, ++j) { - info->info[i].dimension[j] = (*sizeVecRIter != 0 ? *sizeVecRIter : 1); - } - for (int k = j; k < NNS_TENSOR_RANK_LIMIT; ++k) { - info->info[i].dimension[k] = 1; + _info->dimension[j] = (*sizeVecRIter != 0 ? *sizeVecRIter : 1); } ieTensorTypeStr = eachOutputInfo->getPrecision ().name (); @@ -377,8 +377,8 @@ TensorFilterOpenvino::getOutputTensorDim (GstTensorsInfo *info) goto failed; } - info->info[i].type = nnsTensorType; - info->info[i].name = g_strdup (eachOutputInfo->getName ().c_str ()); + _info->type = nnsTensorType; + _info->name = g_strdup (eachOutputInfo->getName ().c_str ()); this->_outputTensorDescs[i] = eachOutputTensorDesc; } @@ -403,14 +403,16 @@ TensorFilterOpenvino::invoke (const GstTensorFilterProperties *prop, { InferenceEngine::BlobMap inBlobMap; InferenceEngine::BlobMap outBlobMap; + GstTensorInfo *info; guint num_tensors; guint i; num_tensors = (prop->input_meta).num_tensors; for (i = 0; i < num_tensors; ++i) { - const GstTensorInfo *info = &((prop->input_meta).info[i]); + info = gst_tensors_info_get_nth_info ((GstTensorsInfo *) &prop->input_meta, i); + InferenceEngine::Blob::Ptr blob = convertGstTensorMemoryToBlobPtr ( - this->_inputTensorDescs[i], &(input[i]), prop->input_meta.info[i].type); + this->_inputTensorDescs[i], &(input[i]), info->type); if (blob == nullptr) { ml_loge ("Failed to create a blob for the input tensor: %u", i); return RetEInval; @@ -421,9 +423,10 @@ TensorFilterOpenvino::invoke (const GstTensorFilterProperties *prop, num_tensors = (prop->output_meta).num_tensors; for (i = 0; i < num_tensors; ++i) { - const GstTensorInfo *info = &((prop->output_meta).info[i]); + info = gst_tensors_info_get_nth_info ((GstTensorsInfo *) &prop->output_meta, i); + InferenceEngine::Blob::Ptr blob = convertGstTensorMemoryToBlobPtr ( - this->_outputTensorDescs[i], &(output[i]), prop->output_meta.info[i].type); + this->_outputTensorDescs[i], &(output[i]), info->type); outBlobMap.insert (make_pair (std::string (info->name), blob)); if (blob == nullptr) { ml_loge ("Failed to create a blob for the output tensor: %u", i); diff --git a/ext/nnstreamer/tensor_filter/tensor_filter_python3.cc b/ext/nnstreamer/tensor_filter/tensor_filter_python3.cc index c146906259..d27d7b2e9d 100644 --- a/ext/nnstreamer/tensor_filter/tensor_filter_python3.cc +++ b/ext/nnstreamer/tensor_filter/tensor_filter_python3.cc @@ -452,6 +452,7 @@ PYCore::getOutputTensorDim (GstTensorsInfo *info) int PYCore::setInputTensorDim (const GstTensorsInfo *in_info, GstTensorsInfo *out_info) { + GstTensorInfo *_info; int res = 0; if (nullptr == in_info || nullptr == out_info) @@ -465,7 +466,10 @@ PYCore::setInputTensorDim (const GstTensorsInfo *in_info, GstTensorsInfo *out_in throw std::runtime_error ("PyList_New(); has failed."); for (unsigned int i = 0; i < in_info->num_tensors; i++) { - PyObject *shape = PyTensorShape_New (shape_cls, &in_info->info[i]); + PyObject *shape; + + _info = gst_tensors_info_get_nth_info ((GstTensorsInfo *) in_info, i); + shape = PyTensorShape_New (shape_cls, _info); if (nullptr == shape) throw std::runtime_error ("PyTensorShape_New(); has failed."); @@ -525,6 +529,7 @@ PYCore::freeOutputTensors (void *data) int PYCore::run (const GstTensorMemory *input, GstTensorMemory *output) { + GstTensorInfo *_info; int res = 0; PyObject *result; @@ -539,8 +544,10 @@ PYCore::run (const GstTensorMemory *input, GstTensorMemory *output) PyObject *param = PyList_New (inputTensorMeta.num_tensors); for (unsigned int i = 0; i < inputTensorMeta.num_tensors; i++) { + _info = gst_tensors_info_get_nth_info (&inputTensorMeta, i); + /** create a Numpy array wrapper (1-D) for NNS tensor data */ - tensor_type nns_type = inputTensorMeta.info[i].type; + tensor_type nns_type = _info->type; npy_intp input_dims[] = { (npy_intp) (input[i].size / gst_tensor_get_element_size (nns_type)) }; PyObject *input_array = PyArray_SimpleNewFromData ( @@ -562,8 +569,11 @@ PYCore::run (const GstTensorMemory *input, GstTensorMemory *output) for (unsigned int i = 0; i < outputTensorMeta.num_tensors; i++) { PyArrayObject *output_array = (PyArrayObject *) PyList_GetItem (result, (Py_ssize_t) i); + + _info = gst_tensors_info_get_nth_info (&outputTensorMeta, i); + /** type/size checking */ - if (checkTensorType (outputTensorMeta.info[i].type, PyArray_TYPE (output_array)) + if (checkTensorType (_info->type, PyArray_TYPE (output_array)) && checkTensorSize (&output[i], output_array)) { /** obtain the pointer to the buffer for the output array */ output[i].data = PyArray_DATA (output_array); diff --git a/ext/nnstreamer/tensor_filter/tensor_filter_pytorch.cc b/ext/nnstreamer/tensor_filter/tensor_filter_pytorch.cc index bf1be04854..3c9e547004 100644 --- a/ext/nnstreamer/tensor_filter/tensor_filter_pytorch.cc +++ b/ext/nnstreamer/tensor_filter/tensor_filter_pytorch.cc @@ -333,6 +333,7 @@ TorchCore::validateOutputTensor (const at::Tensor output, unsigned int idx) gsize num_gst_tensor, num_torch_tensor; at::Tensor sliced_output = output.slice (0); c10::IntArrayRef sliced_output_sizes = sliced_output.sizes (); + GstTensorInfo *_info; /** if idx is in bounds */ if (outputTensorMeta.num_tensors <= idx) { @@ -341,10 +342,12 @@ TorchCore::validateOutputTensor (const at::Tensor output, unsigned int idx) return -1; } + _info = gst_tensors_info_get_nth_info (&outputTensorMeta, idx); + /** when output is a scalar */ if (tensor_shape[0] == 0) { otype = getTensorTypeFromTorch (output.scalar_type ()); - if (outputTensorMeta.info[idx].type != otype) { + if (_info->type != otype) { ml_loge ("Invalid output meta: different type at index %u. Update the type of tensor at index %u to %d tensor_type", idx, idx, otype); return -2; @@ -353,13 +356,13 @@ TorchCore::validateOutputTensor (const at::Tensor output, unsigned int idx) } otype = getTensorTypeFromTorch (sliced_output.scalar_type ()); - if (outputTensorMeta.info[idx].type != otype) { + if (_info->type != otype) { ml_loge ("Invalid output meta: different type at index %u. Update the type of tensor at index %u to %d tensor_type", idx, idx, otype); return -2; } - num_gst_tensor = gst_tensor_get_element_count (outputTensorMeta.info[idx].dimension); + num_gst_tensor = gst_tensor_get_element_count (_info->dimension); num_torch_tensor = 1; for (int j = 0; j < sliced_output.ndimension (); j++) { num_torch_tensor *= sliced_output_sizes[j]; @@ -511,7 +514,7 @@ TorchCore::invoke (const GstTensorFilterProperties *prop, #if (DBG) gint64 start_time = g_get_real_time (); #endif - + GstTensorInfo *_info; std::vector input_feeds; torch::jit::IValue output_value; torch::Dtype type; @@ -520,11 +523,13 @@ TorchCore::invoke (const GstTensorFilterProperties *prop, /** @todo Support other input types other than at::Tensor */ for (uint i = 0; i < inputTensorMeta.num_tensors; ++i) { std::vector input_shape; - input_shape.assign (&inputTensorMeta.info[i].dimension[0], - &inputTensorMeta.info[i].dimension[0] + NNS_TENSOR_RANK_LIMIT); - if (!getTensorTypeToTorch (inputTensorMeta.info[i].type, &type)) { - ml_loge ("This data type is not valid: %d", inputTensorMeta.info[i].type); + _info = gst_tensors_info_get_nth_info (&inputTensorMeta, i); + + input_shape.assign (&_info->dimension[0], &_info->dimension[0] + NNS_TENSOR_RANK_LIMIT); + + if (!getTensorTypeToTorch (_info->type, &type)) { + ml_loge ("This data type is not valid: %d", _info->type); return -1; } at::TensorOptions options = torch::TensorOptions ().dtype (type); @@ -591,9 +596,9 @@ TorchCore::fillTensorDim (torch::autograd::Variable tensor_meta, tensor_dim dim) /** the order of dimension is reversed at CAPS negotiation */ std::reverse_copy (tensor_meta.sizes ().begin (), tensor_meta.sizes ().end (), dim); - /** fill the remnants with 1 */ + /** fill the remnants with 0 */ for (int idx = num_dim; idx < NNS_TENSOR_RANK_LIMIT; ++idx) { - dim[idx] = 1; + dim[idx] = 0; } return 0; diff --git a/ext/nnstreamer/tensor_filter/tensor_filter_snap.cc b/ext/nnstreamer/tensor_filter/tensor_filter_snap.cc index 253f03af1d..1e5a2f279a 100644 --- a/ext/nnstreamer/tensor_filter/tensor_filter_snap.cc +++ b/ext/nnstreamer/tensor_filter/tensor_filter_snap.cc @@ -428,6 +428,7 @@ tensor_filter_snap::close () bool tensor_filter_snap::validate (const GstTensorFilterProperties *prop, snap_option_s &snap_option) { + GstTensorInfo *_info; GstTensorMemory in_tensors[NNS_TENSOR_SIZE_LIMIT] = { 0, }; @@ -442,7 +443,9 @@ tensor_filter_snap::validate (const GstTensorFilterProperties *prop, snap_option /* Invoke with dummy data to validate output meta. */ for (i = 0; i < prop->input_meta.num_tensors; i++) { - in_tensors[i].size = gst_tensor_info_get_size (&prop->input_meta.info[i]); + _info = gst_tensors_info_get_nth_info ((GstTensorsInfo *) &prop->input_meta, i); + + in_tensors[i].size = gst_tensor_info_get_size (_info); in_tensors[i].data = g_malloc0 (in_tensors[i].size); } @@ -706,6 +709,7 @@ bool tensor_filter_snap::configure_input_meta ( const GstTensorFilterProperties *prop, snap_option_s &snap_option) { + GstTensorInfo *_info; snap_sdk::ErrCode status; guint i, j; gulong rank; @@ -719,9 +723,11 @@ tensor_filter_snap::configure_input_meta ( snap_data_info_s snap_info; snap_sdk::SnapData in_data; + _info = gst_tensors_info_get_nth_info ((GstTensorsInfo *) &prop->input_meta, i); + snap_info.format = snap_option.input_format[i]; - if (!convert_nns_type (prop->input_meta.info[i].type, snap_info.type)) { + if (!convert_nns_type (_info->type, snap_info.type)) { snap_logw ("Failed to convert input type."); return false; } @@ -743,7 +749,7 @@ tensor_filter_snap::configure_input_meta ( */ rank = NNS_TENSOR_RANK_LIMIT; for (j = 0; j < rank; j++) { - int s = (int) prop->input_meta.info[i].dimension[rank - j - 1]; + int s = (int) _info->dimension[rank - j - 1]; snap_info.shape.push_back (s); } } else { @@ -931,9 +937,9 @@ tensor_filter_snap::parse_dimension (const std::vector &shape, tensor_dim d dim[rank - i - 1] = (unsigned int) shape[i]; } - /* fill the remnants with 1 */ + /* fill the remnants with 0 */ for (i = rank; i < NNS_TENSOR_RANK_LIMIT; i++) { - dim[i] = 1; + dim[i] = 0; } return true; @@ -945,16 +951,19 @@ tensor_filter_snap::parse_dimension (const std::vector &shape, tensor_dim d bool tensor_filter_snap::convert_names (const GstTensorsInfo *info, std::vector &names) { + GstTensorInfo *_info; guint i; for (i = 0; i < info->num_tensors; ++i) { - if (info->info[i].name == nullptr) { + _info = gst_tensors_info_get_nth_info ((GstTensorsInfo *) info, i); + + if (_info->name == nullptr) { /* failed */ snap_loge ("Given tensor name with index %d is invalid.", i); return false; } - names.push_back (std::string (info->info[i].name)); + names.push_back (std::string (_info->name)); } return true; @@ -984,6 +993,7 @@ tensor_filter_snap::compare_meta (const GstTensorFilterProperties *prop, } for (i = 0; i < nns_info->num_tensors; ++i) { + GstTensorInfo *_info; GstTensorInfo snap_info; tensor_layout snap_layout = _NNS_LAYOUT_NONE; @@ -999,7 +1009,8 @@ tensor_filter_snap::compare_meta (const GstTensorFilterProperties *prop, return false; } - if (!gst_tensor_info_is_equal (&nns_info->info[i], &snap_info)) { + _info = gst_tensors_info_get_nth_info ((GstTensorsInfo *) nns_info, i); + if (!gst_tensor_info_is_equal (_info, &snap_info)) { snap_logw ("Given tensor info is not equal."); return false; } diff --git a/ext/nnstreamer/tensor_filter/tensor_filter_snpe.cc b/ext/nnstreamer/tensor_filter/tensor_filter_snpe.cc index 242907ab80..484a7357e1 100644 --- a/ext/nnstreamer/tensor_filter/tensor_filter_snpe.cc +++ b/ext/nnstreamer/tensor_filter/tensor_filter_snpe.cc @@ -231,16 +231,20 @@ snpe_subplugin::runtimeToString (zdl::DlSystem::Runtime_t runtime) bool snpe_subplugin::set_output_tensor_names (const GstTensorsInfo *info) { + GstTensorInfo *_info; + if (output_tensor_names_list.size () > 0) { output_tensor_names_list = zdl::DlSystem::StringList (); } for (unsigned int i = 0; i < info->num_tensors; ++i) { - if (info->info[i].name == nullptr || info->info[i].name[0] == '\0') { + _info = gst_tensors_info_get_nth_info ((GstTensorsInfo *) info, i); + + if (_info->name == nullptr || _info->name[0] == '\0') { /* failed */ nns_loge ("Given output tensor name with index %u is invalid, it is null.", i); return false; } - output_tensor_names_list.append (info->info[i].name); + output_tensor_names_list.append (_info->name); } return true; } @@ -487,16 +491,17 @@ snpe_subplugin::invoke (const GstTensorMemory *input, GstTensorMemory *output) #if (DBG) gint64 start_time = g_get_real_time (); #endif + GstTensorInfo *_info; if (use_user_buffer) { for (unsigned int i = 0; i < inputInfo.num_tensors; ++i) { - input_buffer_map.getUserBuffer (inputInfo.info[i].name) - ->setBufferAddress (input[i].data); + _info = gst_tensors_info_get_nth_info (std::addressof (inputInfo), i); + input_buffer_map.getUserBuffer (_info->name)->setBufferAddress (input[i].data); } for (unsigned int i = 0; i < outputInfo.num_tensors; ++i) { - output_buffer_map.getUserBuffer (outputInfo.info[i].name) - ->setBufferAddress (output[i].data); + _info = gst_tensors_info_get_nth_info (std::addressof (outputInfo), i); + output_buffer_map.getUserBuffer (_info->name)->setBufferAddress (output[i].data); } snpe->execute (input_buffer_map, output_buffer_map); @@ -651,12 +656,15 @@ void snpe_subplugin::setTensorProp (GstTensorsInfo &tensor_meta, const zdl::DlSystem::StringList strList, tensor_type data_type) { + GstTensorInfo *_info; unsigned int idx = 0; tensor_meta.num_tensors = strList.size (); for (const char *name : strList) { - tensor_meta.info[idx].type = data_type; - tensor_meta.info[idx].name = g_strdup (name); + _info = gst_tensors_info_get_nth_info (std::addressof (tensor_meta), idx); + + _info->type = data_type; + _info->name = g_strdup (name); auto bufferAttributesOpt = snpe->getInputOutputBufferAttributes (name); const zdl::DlSystem::TensorShape &bufferShape = (*bufferAttributesOpt)->getDims (); for (size_t j = 0; j < bufferShape.rank (); ++j) { @@ -668,10 +676,10 @@ snpe_subplugin::setTensorProp (GstTensorsInfo &tensor_meta, } bufferShape[bufferShape.rank () - j - 1] = max_resizable_dim; } - tensor_meta.info[idx].dimension[j] = bufferShape[bufferShape.rank () - j - 1]; + _info->dimension[j] = bufferShape[bufferShape.rank () - j - 1]; } for (size_t j = bufferShape.rank (); j < NNS_TENSOR_RANK_LIMIT; ++j) { - tensor_meta.info[idx].dimension[j] = 1; + _info->dimension[j] = 0; } idx++; } diff --git a/ext/nnstreamer/tensor_filter/tensor_filter_tensorflow.cc b/ext/nnstreamer/tensor_filter/tensor_filter_tensorflow.cc index f95dd85f6d..eb850d2e22 100644 --- a/ext/nnstreamer/tensor_filter/tensor_filter_tensorflow.cc +++ b/ext/nnstreamer/tensor_filter/tensor_filter_tensorflow.cc @@ -378,9 +378,13 @@ TFCore::getTensorTypeToTF (tensor_type tType) int TFCore::validateTensor (const GstTensorsInfo *tensorInfo, int is_input) { + GstTensorInfo *_info; + for (unsigned int i = 0; i < tensorInfo->num_tensors; i++) { + _info = gst_tensors_info_get_nth_info ((GstTensorsInfo *) tensorInfo, i); + /* set the name of tensor */ - TF_Operation *op = TF_GraphOperationByName (graph, tensorInfo->info[i].name); + TF_Operation *op = TF_GraphOperationByName (graph, _info->name); g_assert (op != nullptr); @@ -401,7 +405,7 @@ TFCore::validateTensor (const GstTensorsInfo *tensorInfo, int is_input) } if (type != TF_STRING) { - g_assert (tensorInfo->info[i].type == getTensorTypeFromTF (type)); + g_assert (_info->type == getTensorTypeFromTF (type)); } info_s.type = type; @@ -423,12 +427,11 @@ TFCore::validateTensor (const GstTensorsInfo *tensorInfo, int is_input) /* check the validity of dimension */ for (int d = 0; d < num_dims; ++d) { - info_s.dims.push_back ( - static_cast (tensorInfo->info[i].dimension[num_dims - d - 1])); + info_s.dims.push_back (static_cast (_info->dimension[num_dims - d - 1])); if (dims[d] < 0) { continue; } - g_assert (tensorInfo->info[i].dimension[num_dims - d - 1] == dims[d]); + g_assert (_info->dimension[num_dims - d - 1] == dims[d]); } } if (is_input) { @@ -497,6 +500,7 @@ TFCore::run (const GstTensorMemory *input, GstTensorMemory *output) #if (DBG) gint64 start_time = g_get_real_time (); #endif + GstTensorInfo *_info; std::vector input_ops; std::vector input_tensors; std::vector output_ops; @@ -506,9 +510,10 @@ TFCore::run (const GstTensorMemory *input, GstTensorMemory *output) /* create input tensor for the graph from `input` */ for (unsigned int i = 0; i < inputTensorMeta.num_tensors; i++) { + _info = gst_tensors_info_get_nth_info (&inputTensorMeta, i); + TF_Tensor *in_tensor = nullptr; - TF_Output input_op - = { TF_GraphOperationByName (graph, inputTensorMeta.info[i].name), 0 }; + TF_Output input_op = { TF_GraphOperationByName (graph, _info->name), 0 }; g_assert (input_op.oper != nullptr); input_ops.push_back (input_op); @@ -550,8 +555,9 @@ TFCore::run (const GstTensorMemory *input, GstTensorMemory *output) /* create output tensor for the graph from `output` */ for (unsigned int i = 0; i < outputTensorMeta.num_tensors; i++) { - TF_Output output_op - = { TF_GraphOperationByName (graph, outputTensorMeta.info[i].name), 0 }; + _info = gst_tensors_info_get_nth_info (&outputTensorMeta, i); + + TF_Output output_op = { TF_GraphOperationByName (graph, _info->name), 0 }; g_assert (output_op.oper != nullptr); output_ops.push_back (output_op); diff --git a/ext/nnstreamer/tensor_filter/tensor_filter_tensorrt.cc b/ext/nnstreamer/tensor_filter/tensor_filter_tensorrt.cc index 2200694c5f..b652165fa8 100644 --- a/ext/nnstreamer/tensor_filter/tensor_filter_tensorrt.cc +++ b/ext/nnstreamer/tensor_filter/tensor_filter_tensorrt.cc @@ -173,6 +173,8 @@ tensorrt_subplugin::getEmptyInstance () void tensorrt_subplugin::configure_instance (const GstTensorFilterProperties *prop) { + GstTensorInfo *_info; + /* Set model path */ if (prop->num_models != 1 || !prop->model_files[0]) { ml_loge ("TensorRT filter requires one UFF model file."); @@ -191,7 +193,8 @@ tensorrt_subplugin::configure_instance (const GstTensorFilterProperties *prop) } /* Get the datatype of input tensor */ - if (setTensorType (_inputTensorMeta.info[0].type) != 0) { + _info = gst_tensors_info_get_nth_info (&_inputTensorMeta, 0U); + if (setTensorType (_info->type) != 0) { ml_loge ("TensorRT filter does not support the input data type."); throw std::invalid_argument ("TensorRT filter does not support the input data type."); } @@ -294,6 +297,8 @@ tensorrt_subplugin::eventHandler (event_ops ops, GstTensorFilterFrameworkEventDa int tensorrt_subplugin::loadModel (const GstTensorFilterProperties *prop) { + GstTensorInfo *_info; + UNUSED (prop); if (checkUnifiedMemory () != 0) { @@ -327,9 +332,11 @@ tensorrt_subplugin::loadModel (const GstTensorFilterProperties *prop) } /* Register tensor input & output */ - parser->registerInput (_inputTensorMeta.info[0].name, _InputDims, - nvuffparser::UffInputOrder::kNCHW); - parser->registerOutput (_outputTensorMeta.info[0].name); + _info = gst_tensors_info_get_nth_info (&_inputTensorMeta, 0U); + parser->registerInput (_info->name, _InputDims, nvuffparser::UffInputOrder::kNCHW); + + _info = gst_tensors_info_get_nth_info (&_outputTensorMeta, 0U); + parser->registerOutput (_info->name); /* Parse the imported model */ parser->parse (_uff_path, *network, _DataType); @@ -438,23 +445,24 @@ tensorrt_subplugin::setTensorType (tensor_type t) int tensorrt_subplugin::setInputDims (guint input_rank) { + GstTensorInfo *_info; + + _info = gst_tensors_info_get_nth_info (&_inputTensorMeta, 0U); + switch (input_rank) { case 2: - _InputDims = nvinfer1::Dims2 ((int) _inputTensorMeta.info[0].dimension[1], - (int) _inputTensorMeta.info[0].dimension[0]); + _InputDims + = nvinfer1::Dims2 ((int) _info->dimension[1], (int) _info->dimension[0]); break; case 3: - _InputDims = nvinfer1::Dims3 ((int) _inputTensorMeta.info[0].dimension[2], - (int) _inputTensorMeta.info[0].dimension[1], - (int) _inputTensorMeta.info[0].dimension[0]); + _InputDims = nvinfer1::Dims3 ((int) _info->dimension[2], + (int) _info->dimension[1], (int) _info->dimension[0]); break; case 4: - _InputDims = nvinfer1::Dims4 ((int) _inputTensorMeta.info[0].dimension[3], - (int) _inputTensorMeta.info[0].dimension[2], - (int) _inputTensorMeta.info[0].dimension[1], - (int) _inputTensorMeta.info[0].dimension[0]); + _InputDims = nvinfer1::Dims4 ((int) _info->dimension[3], (int) _info->dimension[2], + (int) _info->dimension[1], (int) _info->dimension[0]); break; default: diff --git a/ext/nnstreamer/tensor_filter/tensor_filter_trix_engine.cc b/ext/nnstreamer/tensor_filter/tensor_filter_trix_engine.cc index c9288cf81e..9f497d7ec0 100644 --- a/ext/nnstreamer/tensor_filter/tensor_filter_trix_engine.cc +++ b/ext/nnstreamer/tensor_filter/tensor_filter_trix_engine.cc @@ -123,7 +123,7 @@ TensorFilterTRIxEngine::configure_instance (const GstTensorFilterProperties *pro _info->dimension[j] = model_meta_->input_seg_dims[i][rank_limit - j - 1]; for (; j < NNS_TENSOR_RANK_LIMIT; j++) - _info->dimension[j] = 1; + _info->dimension[j] = 0; } } else { gst_tensors_info_copy (&nns_in_info_, &prop->input_meta); @@ -140,7 +140,7 @@ TensorFilterTRIxEngine::configure_instance (const GstTensorFilterProperties *pro _info->dimension[j] = model_meta_->output_seg_dims[i][rank_limit - j - 1]; for (; j < NNS_TENSOR_RANK_LIMIT; j++) - _info->dimension[j] = 1; + _info->dimension[j] = 0; } } else { gst_tensors_info_copy (&nns_out_info_, &prop->output_meta); @@ -203,21 +203,26 @@ TensorFilterTRIxEngine::convert_data_type (const tensor_type &type) void TensorFilterTRIxEngine::set_data_info (const GstTensorFilterProperties *prop) { + GstTensorInfo *_info; const tensor_layout *input_layout = &(prop->input_layout[0]); const tensor_layout *output_layout = &(prop->output_layout[0]); trix_in_info_.num_info = model_meta_->input_seg_num; for (uint32_t idx = 0; idx < trix_in_info_.num_info; ++idx) { + _info = gst_tensors_info_get_nth_info (&nns_in_info_, idx); + trix_in_info_.info[idx].layout = convert_data_layout (input_layout[idx]); - trix_in_info_.info[idx].type = convert_data_type (nns_in_info_.info[idx].type); + trix_in_info_.info[idx].type = convert_data_type (_info->type); } trix_out_info_.num_info = model_meta_->output_seg_num; for (uint32_t idx = 0; idx < trix_out_info_.num_info; ++idx) { + _info = gst_tensors_info_get_nth_info (&nns_out_info_, idx); + trix_out_info_.info[idx].layout = convert_data_layout (output_layout[idx]); - trix_out_info_.info[idx].type = convert_data_type (nns_out_info_.info[idx].type); + trix_out_info_.info[idx].type = convert_data_type (_info->type); } } diff --git a/ext/nnstreamer/tensor_filter/tensor_filter_tvm.cc b/ext/nnstreamer/tensor_filter/tensor_filter_tvm.cc index bd186dde2f..7432b5c170 100644 --- a/ext/nnstreamer/tensor_filter/tensor_filter_tvm.cc +++ b/ext/nnstreamer/tensor_filter/tensor_filter_tvm.cc @@ -198,6 +198,8 @@ tvm_subplugin::configure_instance (const GstTensorFilterProperties *prop) { unsigned int i; int idx; + GstTensorInfo *_info; + if (!parse_custom_prop (prop->custom_properties)) { nns_loge ("Failed to parse custom property."); cleanup (); @@ -251,16 +253,18 @@ tvm_subplugin::configure_instance (const GstTensorFilterProperties *prop) dt = arr.operator-> (); input_tensor_list.push_back (*dt); - if (!convert_dtype (inputInfo.info[i].type, dt->dtype)) { + _info = gst_tensors_info_get_nth_info (&inputInfo, i); + + if (!convert_dtype (_info->type, dt->dtype)) { cleanup (); throw std::invalid_argument ("Failed to convert DLPack data type"); } for (idx = 0; idx < dt->ndim; ++idx) - inputInfo.info[i].dimension[idx] = dt->shape[dt->ndim - idx - 1]; + _info->dimension[idx] = dt->shape[dt->ndim - idx - 1]; for (; idx < NNS_TENSOR_RANK_LIMIT; ++idx) - inputInfo.info[i].dimension[idx] = 1; - inputInfo.info[i].name = nullptr; + _info->dimension[idx] = 0; + _info->name = nullptr; } for (i = 0; i < outputInfo.num_tensors; ++i) { @@ -268,16 +272,18 @@ tvm_subplugin::configure_instance (const GstTensorFilterProperties *prop) dt = arr.operator-> (); output_tensor_list.push_back (*dt); - if (!convert_dtype (outputInfo.info[i].type, dt->dtype)) { + _info = gst_tensors_info_get_nth_info (&outputInfo, i); + + if (!convert_dtype (_info->type, dt->dtype)) { cleanup (); throw std::invalid_argument ("Failed to convert DLPack data type"); } for (idx = 0; idx < dt->ndim; ++idx) - outputInfo.info[i].dimension[idx] = dt->shape[dt->ndim - idx - 1]; + _info->dimension[idx] = dt->shape[dt->ndim - idx - 1]; for (; idx < NNS_TENSOR_RANK_LIMIT; ++idx) - outputInfo.info[i].dimension[idx] = 1; - outputInfo.info[i].name = nullptr; + _info->dimension[idx] = 0; + _info->name = nullptr; } empty_model = false; }