Skip to content

Commit

Permalink
[Filter/Sub] fill 0 dim at nth info
Browse files Browse the repository at this point in the history
Util function to get nth gst-info and fill remained dim as 0.

TODO:
I did not build and test all subplugins, need to check all subplugins later.

Signed-off-by: Jaeyun Jung <[email protected]>
  • Loading branch information
jaeyun-jung authored and again4you committed Sep 7, 2023
1 parent 78e434f commit 0a05b79
Show file tree
Hide file tree
Showing 18 changed files with 262 additions and 153 deletions.
25 changes: 14 additions & 11 deletions ext/nnstreamer/tensor_filter/tensor_filter_armnn.cc
Original file line number Diff line number Diff line change
Expand Up @@ -305,6 +305,7 @@ ArmNNCore::makeNetwork (const GstTensorFilterProperties *prop)
{
std::vector<std::string> output_vec;
std::map<std::string, armnn::TensorShape> input_map;
GstTensorInfo *_info;

if (g_str_has_suffix (model_path, ".tflite")) {
return makeTfLiteNetwork ();
Expand All @@ -314,33 +315,35 @@ ArmNNCore::makeNetwork (const GstTensorFilterProperties *prop)
if (prop->output_meta.num_tensors != 0) {
output_vec.reserve (prop->output_meta.num_tensors);
for (unsigned int i = 0; i < prop->output_meta.num_tensors; i++) {
if (prop->output_meta.info[i].name == NULL) {
_info = gst_tensors_info_get_nth_info ((GstTensorsInfo *) &prop->output_meta, i);

if (_info->name == NULL) {
/** clear output vec in case of error */
output_vec.clear ();
output_vec.shrink_to_fit ();
break;
}
output_vec.push_back (prop->output_meta.info[i].name);
output_vec.push_back (_info->name);
}
}

/** Create input map with name and data shape */
for (unsigned int i = 0; i < prop->input_meta.num_tensors; i++) {
if (prop->input_meta.info[i].name == NULL) {
_info = gst_tensors_info_get_nth_info ((GstTensorsInfo *) &prop->input_meta, i);

if (_info->name == NULL) {
/** clear input map in case of error */
input_map.clear ();
break;
}

/** Set dimension only if valid */
if (gst_tensor_dimension_is_valid (prop->input_meta.info[i].dimension)) {
if (gst_tensor_dimension_is_valid (_info->dimension)) {
unsigned int rev_dim[NNS_TENSOR_RANK_LIMIT];
std::reverse_copy (prop->input_meta.info[i].dimension,
prop->input_meta.info[i].dimension + NNS_TENSOR_RANK_LIMIT, rev_dim);
input_map[prop->input_meta.info[i].name]
= armnn::TensorShape (NNS_TENSOR_RANK_LIMIT, rev_dim);
std::reverse_copy (_info->dimension, _info->dimension + NNS_TENSOR_RANK_LIMIT, rev_dim);
input_map[_info->name] = armnn::TensorShape (NNS_TENSOR_RANK_LIMIT, rev_dim);
} else {
input_map[prop->input_meta.info[i].name] = armnn::TensorShape ();
input_map[_info->name] = armnn::TensorShape ();
}
}

Expand Down Expand Up @@ -507,7 +510,7 @@ ArmNNCore::setTensorProp (const std::vector<armnn::BindingPointInfo> &bindings,
for (unsigned int idx = 0; idx < bindings.size (); ++idx) {
armnn::TensorInfo arm_info = bindings[idx].second;
armnn::TensorShape arm_shape;
GstTensorInfo *gst_info = &tensorMeta->info[idx];
GstTensorInfo *gst_info = gst_tensors_info_get_nth_info (tensorMeta, idx);

/* Use binding id as a name, if no name already exists */
if (gst_info->name == NULL) {
Expand Down Expand Up @@ -541,7 +544,7 @@ ArmNNCore::setTensorProp (const std::vector<armnn::BindingPointInfo> &bindings,
}

for (int i = NNS_TENSOR_RANK_LIMIT - 1; i >= num_dim; i--) {
gst_info->dimension[i] = 1;
gst_info->dimension[i] = 0;
}
}

Expand Down
40 changes: 25 additions & 15 deletions ext/nnstreamer/tensor_filter/tensor_filter_caffe2.cc
Original file line number Diff line number Diff line change
Expand Up @@ -150,12 +150,10 @@ Caffe2Core::init (const GstTensorFilterProperties *prop)
return 0;
}

#define initializeTensor(type) \
do { \
ReinitializeTensor (inputTensor, \
{ inputTensorMeta.info[i].dimension[3], inputTensorMeta.info[i].dimension[2], \
inputTensorMeta.info[i].dimension[1], inputTensorMeta.info[i].dimension[0] }, \
at::dtype<type> ().device (CPU)); \
#define initializeTensor(type) \
do { \
ReinitializeTensor ( \
inputTensor, at::IntArrayRef (dims), at::dtype<type> ().device (CPU)); \
} while (0);

/**
Expand All @@ -164,14 +162,21 @@ Caffe2Core::init (const GstTensorFilterProperties *prop)
int
Caffe2Core::initInputTensor ()
{
guint i;
GstTensorInfo *_info;
guint i, j, rank;

inputTensorMap.clear ();
for (i = 0; i < inputTensorMeta.num_tensors; i++) {
Tensor *inputTensor
= workSpace.CreateBlob (inputTensorMeta.info[i].name)->GetMutable<Tensor> ();
_info = gst_tensors_info_get_nth_info (&inputTensorMeta, i);
rank = gst_tensor_info_get_rank (_info);

switch (inputTensorMeta.info[i].type) {
Tensor *inputTensor = workSpace.CreateBlob (_info->name)->GetMutable<Tensor> ();
std::vector<long int> dims (rank);

for (j = 0; j < rank; j++)
dims[j] = (long int) _info->dimension[rank - j - 1];

switch (_info->type) {
case _NNS_INT32:
initializeTensor (int32_t);
break;
Expand Down Expand Up @@ -207,7 +212,7 @@ Caffe2Core::initInputTensor ()
return -1;
}

inputTensorMap.insert (std::make_pair (inputTensorMeta.info[i].name, inputTensor));
inputTensorMap.insert (std::make_pair (_info->name, inputTensor));
}
return 0;
}
Expand Down Expand Up @@ -310,15 +315,18 @@ Caffe2Core::getOutputTensorDim (GstTensorsInfo *info)
int
Caffe2Core::run (const GstTensorMemory *input, GstTensorMemory *output)
{
GstTensorInfo *_info;
unsigned int i;
#if (DBG)
gint64 start_time = g_get_real_time ();
#endif

for (i = 0; i < inputTensorMeta.num_tensors; i++) {
Tensor *inputTensor = inputTensorMap.find (inputTensorMeta.info[i].name)->second;
_info = gst_tensors_info_get_nth_info (&inputTensorMeta, i);

switch (inputTensorMeta.info[i].type) {
Tensor *inputTensor = inputTensorMap.find (_info->name)->second;

switch (_info->type) {
case _NNS_INT32:
inputTensor->ShareExternalPointer ((int32_t *) input[i].data);
break;
Expand Down Expand Up @@ -378,9 +386,11 @@ Caffe2Core::run (const GstTensorMemory *input, GstTensorMemory *output)
}

for (i = 0; i < outputTensorMeta.num_tensors; i++) {
const auto &out = workSpace.GetBlob (outputTensorMeta.info[i].name)->Get<Tensor> ();
_info = gst_tensors_info_get_nth_info (&outputTensorMeta, i);

const auto &out = workSpace.GetBlob (_info->name)->Get<Tensor> ();

switch (outputTensorMeta.info[i].type) {
switch (_info->type) {
case _NNS_INT32:
output[i].data = out.data<int32_t> ();
break;
Expand Down
19 changes: 11 additions & 8 deletions ext/nnstreamer/tensor_filter/tensor_filter_deepview_rt.cc
Original file line number Diff line number Diff line change
Expand Up @@ -301,9 +301,9 @@ dvrt_subplugin::getTensorDim (gsize index, tensor_dim dim)
for (i = 0; i < dims; i++)
dim[dims - i - 1] = shape[i];

/* fill remaining entries with 1 */
/* fill remaining entries with 0 */
for (i = dims; i < NNS_TENSOR_RANK_LIMIT; ++i) {
dim[i] = 1;
dim[i] = 0;
}

return 0;
Expand All @@ -317,6 +317,7 @@ int
dvrt_subplugin::setTensorProp (gint isInput)
{
GstTensorsInfo *tensorMeta;
GstTensorInfo *_info;
const guint32 *indices;
gsize num;
vector<NNTensor *> *tensors;
Expand All @@ -343,20 +344,22 @@ dvrt_subplugin::setTensorProp (gint isInput)
for (size_t i = 0; i < num; i++) {
gsize index = indices[i];
NNTensor *tensor = nn_context_tensor_index (context, index);

_info = gst_tensors_info_get_nth_info (tensorMeta, (guint) i);
tensors->push_back (tensor);

const gchar *name = nn_model_layer_name (model, index);
tensorMeta->info[i].name = g_strdup (name);
if (getTensorDim (index, tensorMeta->info[i].dimension))
_info->name = g_strdup (name);
if (getTensorDim (index, _info->dimension))
return -EINVAL;

if (getTensorType (index, &tensorMeta->info[i].type))
if (getTensorType (index, &_info->type))
return -EINVAL;

gchar *dim;
dim = gst_tensor_get_dimension_string (tensorMeta->info[i].dimension);
ml_logd ("tensorMeta[%zu] >> name[%s], type[%d], dim[%s]", i,
tensorMeta->info[i].name, tensorMeta->info[i].type, dim);
dim = gst_tensor_get_dimension_string (_info->dimension);
ml_logd ("tensorMeta[%zu] >> name[%s], type[%d], dim[%s]", i, _info->name,
_info->type, dim);
g_free (dim);
}

Expand Down
24 changes: 15 additions & 9 deletions ext/nnstreamer/tensor_filter/tensor_filter_edgetpu.cc
Original file line number Diff line number Diff line change
Expand Up @@ -489,9 +489,9 @@ edgetpu_subplugin::getTensorDim (tflite::Interpreter *interpreter, int tensor_id
/* the order of dimension is reversed at CAPS negotiation */
std::reverse_copy (tensor_dims->data, tensor_dims->data + len, dim);

/* fill the remnants with 1 */
/* fill the remnants with 0 */
for (int i = len; i < NNS_TENSOR_RANK_LIMIT; ++i) {
dim[i] = 1;
dim[i] = 0;
}

return 0;
Expand Down Expand Up @@ -535,19 +535,25 @@ void
edgetpu_subplugin::setTensorProp (tflite::Interpreter *interpreter,
const std::vector<int> &tensor_idx_list, GstTensorsInfo &tensorMeta)
{
tensorMeta.num_tensors = tensor_idx_list.size ();
if (tensorMeta.num_tensors > NNS_TENSOR_SIZE_LIMIT)
GstTensorInfo *_info;
unsigned int num;

num = tensor_idx_list.size ();
if (num > NNS_TENSOR_SIZE_LIMIT)
throw std::invalid_argument (
"The number of tensors required by the given model exceeds the nnstreamer tensor limit (16 by default).");

for (unsigned int i = 0; i < tensorMeta.num_tensors; ++i) {
if (getTensorDim (interpreter, tensor_idx_list[i], tensorMeta.info[i].dimension)) {
tensorMeta.num_tensors = num;

for (unsigned int i = 0; i < num; ++i) {
_info = gst_tensors_info_get_nth_info (std::addressof (tensorMeta), i);

if (getTensorDim (interpreter, tensor_idx_list[i], _info->dimension)) {
std::cerr << "failed to get the dimension of tensors" << std::endl;
throw std::invalid_argument ("Cannot get the dimensions of given tensors at the tensor ");
}
tensorMeta.info[i].type
= getTensorType (interpreter->tensor (tensor_idx_list[i])->type);
tensorMeta.info[i].name = nullptr; /** @todo tensor name is not retrieved */
_info->type = getTensorType (interpreter->tensor (tensor_idx_list[i])->type);
_info->name = nullptr; /** @todo tensor name is not retrieved */
}
}

Expand Down
21 changes: 15 additions & 6 deletions ext/nnstreamer/tensor_filter/tensor_filter_lua.cc
Original file line number Diff line number Diff line change
Expand Up @@ -363,6 +363,7 @@ lua_subplugin::getEmptyInstance ()
void
lua_subplugin::setTensorsInfo (GstTensorsInfo &tensors_info)
{
GstTensorInfo *_info;

if (lua_istable (L, -1)) {
lua_pushstring (L, "num");
Expand All @@ -383,10 +384,11 @@ lua_subplugin::setTensorsInfo (GstTensorsInfo &tensors_info)
lua_gettable (L, -2);
if (lua_istable (L, -1)) {
for (uint j = 1; j <= tensors_info.num_tensors; ++j) {
_info = gst_tensors_info_get_nth_info (std::addressof (tensors_info), (j - 1));
lua_pushinteger (L, j);
lua_gettable (L, -2);
tensors_info.info[j - 1].type = gst_tensor_get_type (lua_tostring (L, -1));
if (tensors_info.info[j - 1].type == _NNS_END)
_info->type = gst_tensor_get_type (lua_tostring (L, -1));
if (_info->type == _NNS_END)
throw std::invalid_argument (
"Failed to parse `type`. Possible types are " GST_TENSOR_TYPE_ALL);
lua_pop (L, 1);
Expand All @@ -400,6 +402,7 @@ lua_subplugin::setTensorsInfo (GstTensorsInfo &tensors_info)
lua_gettable (L, -2);
if (lua_istable (L, -1)) {
for (uint j = 1; j <= tensors_info.num_tensors; ++j) {
_info = gst_tensors_info_get_nth_info (std::addressof (tensors_info), (j - 1));
lua_pushinteger (L, j);
lua_gettable (L, -2);
if (lua_istable (L, -1)) {
Expand All @@ -408,14 +411,14 @@ lua_subplugin::setTensorsInfo (GstTensorsInfo &tensors_info)
lua_pushinteger (L, i);
lua_gettable (L, -2);
if (lua_isnumber (L, -1)) {
tensors_info.info[j - 1].dimension[i - 1] = lua_tointeger (L, -1);
_info->dimension[i - 1] = lua_tointeger (L, -1);
} else {
throw std::invalid_argument ("Failed to parse `dim`. Please check the script");
}
lua_pop (L, 1);
}
for (uint i = len + 1; i <= NNS_TENSOR_RANK_LIMIT; i++) {
tensors_info.info[j - 1].dimension[i - 1] = 1;
_info->dimension[i - 1] = 0;
}
} else {
throw std::invalid_argument ("Failed to parse `dim`. Please check the script");
Expand Down Expand Up @@ -487,19 +490,25 @@ lua_subplugin::configure_instance (const GstTensorFilterProperties *prop)
void
lua_subplugin::invoke (const GstTensorMemory *input, GstTensorMemory *output)
{
GstTensorInfo *_info;

if (!input)
throw std::runtime_error ("Invalid input buffer, it is NULL.");
if (!output)
throw std::runtime_error ("Invalid output buffer, it is NULL.");

for (uint i = 0; i < inputInfo.num_tensors; ++i) {
input_lua_tensors[i].type = inputInfo.info[i].type;
_info = gst_tensors_info_get_nth_info (std::addressof (inputInfo), i);

input_lua_tensors[i].type = _info->type;
input_lua_tensors[i].data = input[i].data;
input_lua_tensors[i].size = input[i].size;
}

for (uint i = 0; i < outputInfo.num_tensors; ++i) {
output_lua_tensors[i].type = outputInfo.info[i].type;
_info = gst_tensors_info_get_nth_info (std::addressof (outputInfo), i);

output_lua_tensors[i].type = _info->type;
output_lua_tensors[i].data = output[i].data;
output_lua_tensors[i].size = output[i].size;
}
Expand Down
17 changes: 11 additions & 6 deletions ext/nnstreamer/tensor_filter/tensor_filter_mediapipe.cc
Original file line number Diff line number Diff line change
Expand Up @@ -112,11 +112,14 @@ mediapipe_subplugin::mediapipe_subplugin ()
mediapipe_subplugin::~mediapipe_subplugin ()
{
mediapipe::Status status;
GstTensorInfo *_info;

g_free (config_path);

for (unsigned int i = 0; i < inputInfo.num_tensors; i++) {
status = graph.CloseInputStream (inputInfo.info[i].name);
_info = gst_tensors_info_get_nth_info (&inputInfo, i);

status = graph.CloseInputStream (_info->name);
if (!status.ok ()) {
std::cerr << "Failed to close input stream" << std::endl;
}
Expand Down Expand Up @@ -240,15 +243,17 @@ mediapipe_subplugin::invoke (const GstTensorMemory *input, GstTensorMemory *outp
#if (DBG)
gint64 start_time = g_get_real_time ();
#endif
int input_width = inputInfo.info[0].dimension[1];
int input_height = inputInfo.info[0].dimension[2];
int input_channels = inputInfo.info[0].dimension[0];
GstTensorInfo *in_info = gst_tensors_info_get_nth_info (&inputInfo, 0U);
GstTensorInfo *out_info = gst_tensors_info_get_nth_info (&outputInfo, 0U);
int input_width = in_info->dimension[1];
int input_height = in_info->dimension[2];
int input_channels = in_info->dimension[0];
int input_widthStep = input_width * input_channels;
mediapipe::Status status;

/* TODO to make it better, start the graph at init or previous step */
mediapipe::OutputStreamPoller poller
= graph.AddOutputStreamPoller (outputInfo.info[0].name).ValueOrDie ();
= graph.AddOutputStreamPoller (out_info->name).ValueOrDie ();
status = graph.StartRun ({});
if (!status.ok ()) {
std::cerr << "Fail to start mediapipe graph" << std::endl;
Expand All @@ -262,7 +267,7 @@ mediapipe_subplugin::invoke (const GstTensorMemory *input, GstTensorMemory *outp
);

// Send image packet
status = graph.AddPacketToInputStream (inputInfo.info[0].name,
status = graph.AddPacketToInputStream (in_info->name,
mediapipe::Adopt (input_frame.release ()).At (mediapipe::Timestamp (frame_timestamp++)));
if (!status.ok ()) {
std::cerr << "Failed to add input packet" << std::endl;
Expand Down
Loading

0 comments on commit 0a05b79

Please sign in to comment.