diff --git a/.gitignore b/.gitignore index 84425f9751..363ea042fb 100644 --- a/.gitignore +++ b/.gitignore @@ -12,6 +12,7 @@ # vi files *.h~ *.c~ +*.cc~ *.swp # binary files diff --git a/debian/rules b/debian/rules index 016bea16b7..85dc28fedd 100755 --- a/debian/rules +++ b/debian/rules @@ -16,3 +16,5 @@ %: dh $@ --buildsystem=cmake --builddirectory=build +override_dh_auto_configure: + dh_auto_configure -- -DDISABLE_TENSORFLOW_LITE=ON diff --git a/gst/tensor_filter/CMakeLists.txt b/gst/tensor_filter/CMakeLists.txt index c0e9fc2434..829a3cd7b2 100644 --- a/gst/tensor_filter/CMakeLists.txt +++ b/gst/tensor_filter/CMakeLists.txt @@ -1,5 +1,7 @@ CMAKE_MINIMUM_REQUIRED(VERSION 2.8) +OPTION(DISBALE_TENSORFLOW_LITE "Disable tensorflow-lite support" OFF) + ADD_LIBRARY(tensor_filter SHARED tensor_filter.c tensor_filter_tensorflow_lite.c @@ -10,8 +12,27 @@ TARGET_LINK_LIBRARIES(tensor_filter dl ${pkgs_LIBRARIES}) TARGET_INCLUDE_DIRECTORIES(tensor_filter PUBLIC ${pkgs_INCLUDE_DIRS}) TARGET_COMPILE_OPTIONS(tensor_filter PUBLIC ${pkgs_CFLAGS_OTHER}) -INSTALL(TARGETS tensor_filter - RUNTIME DESTINATION ${EXEC_PREFIX} - LIBRARY DESTINATION ${LIB_INSTALL_DIR} - ARCHIVE DESTINATION ${LIB_INSTALL_DIR} - ) +# check whether TENSORFLOW_LITE is available. +# DISABLE_TENSORFLOW_LITE is defined at /debian/rules according to the build environment +IF(DISABLE_TENSORFLOW_LITE) # NOT AVAILABE + INSTALL(TARGETS tensor_filter + RUNTIME DESTINATION ${EXEC_PREFIX} + LIBRARY DESTINATION ${LIB_INSTALL_DIR} + ARCHIVE DESTINATION ${LIB_INSTALL_DIR} + ) +ELSE(DISABLE_TENSORFLOW_LITE) # AVAILABE + ADD_LIBRARY(tensor_filter_tflitecore SHARED + tensor_filter_tensorflow_lite_core.cc + ) + + TARGET_LINK_LIBRARIES(tensor_filter_tflitecore tensor_filter ${pkgs_LIBRARIES} tensorflow-lite) + TARGET_INCLUDE_DIRECTORIES(tensor_filter_tflitecore PUBLIC ${pkgs_INCLUDE_DIRS}) + TARGET_COMPILE_OPTIONS(tensor_filter_tflitecore PUBLIC ${pkgs_CFLAGS_OTHER}) + + INSTALL(TARGETS tensor_filter tensor_filter_tflitecore + RUNTIME DESTINATION ${EXEC_PREFIX} + LIBRARY DESTINATION ${LIB_INSTALL_DIR} + ARCHIVE DESTINATION ${LIB_INSTALL_DIR} + ) +ENDIF(DISABLE_TENSORFLOW_LITE) + diff --git a/gst/tensor_filter/tensor_filter.c b/gst/tensor_filter/tensor_filter.c index 4c4899cf13..02eb026373 100644 --- a/gst/tensor_filter/tensor_filter.c +++ b/gst/tensor_filter/tensor_filter.c @@ -86,7 +86,12 @@ GstTensor_Filter_Framework *tensor_filter_supported[] = { [_T_F_UNDEFINED] = NULL, [_T_F_CUSTOM] = &NNS_support_custom, + +#ifdef DISABLE_TENSORFLOW_LITE + [_T_F_TENSORFLOW_LITE] = NULL, +#else [_T_F_TENSORFLOW_LITE] = &NNS_support_tensorflow_lite, +#endif [_T_F_TENSORFLOW] = NULL, [_T_F_CAFFE2] = NULL, diff --git a/gst/tensor_filter/tensor_filter_tensorflow_lite.c b/gst/tensor_filter/tensor_filter_tensorflow_lite.c index 703cdcb8b0..93d2f32945 100644 --- a/gst/tensor_filter/tensor_filter_tensorflow_lite.c +++ b/gst/tensor_filter/tensor_filter_tensorflow_lite.c @@ -53,6 +53,7 @@ */ #include "tensor_filter.h" +#include "tensor_filter_tensorflow_lite_core.h" #include /** diff --git a/gst/tensor_filter/tensor_filter_tensorflow_lite_core.cc b/gst/tensor_filter/tensor_filter_tensorflow_lite_core.cc new file mode 100644 index 0000000000..571bf6179b --- /dev/null +++ b/gst/tensor_filter/tensor_filter_tensorflow_lite_core.cc @@ -0,0 +1,112 @@ +/** + * Copyright (C) 2017 - 2018 Samsung Electronics Co., Ltd. All rights reserved. + * + * PROPRIETARY/CONFIDENTIAL + * + * This software is the confidential and proprietary information of + * SAMSUNG ELECTRONICS ("Confidential Information"). You shall not + * disclose such Confidential Information and shall use it only in + * accordance with the terms of the license agreement you entered + * into with SAMSUNG ELECTRONICS. SAMSUNG make no representations + * or warranties about the suitability of the software, either + * express or implied, including but not limited to the implied + * warranties of merchantability, fitness for a particular purpose, + * or non-infringement. SAMSUNG shall not be liable for any damages + * suffered by licensee as a result of using, modifying or distributing + * this software or its derivatives. + */ + +/** + * @file tensor_filter_tensorflow_lite_core.cc + * @author HyoungJoo Ahn + * @date 7/5/2018 + * @brief connection with tflite libraries. + * + * @bug No know bugs. + * @todo Invoke() should be implemented. + * @todo If it is required, class will be implemented as a singleton. + */ +#include "tensor_filter_tensorflow_lite_core.h" + +/** + * @brief call the creator of TFLiteCore class. + * @param _model_path : the logical path to '{model_name}.tffile' file + * @return TFLiteCore class + */ +extern void * +tflite_core_new (char *_model_path) +{ + return new TFLiteCore (_model_path); +} + +/** + * @brief delete the TFLiteCore class. + * @param _tflite : the class object + * @return Nothing + */ +extern void +tflite_core_delete (void *tflite) +{ + TFLiteCore *c = (TFLiteCore *) tflite; + delete c; +} + +/** + * @brief get model path + * @param _tflite : the class object + * @return model path + */ +extern char * +tflite_core_getModelPath (void *tflite) +{ + TFLiteCore *c = (TFLiteCore *) tflite; + return c->getModelPath (); +} + +/** + * @brief get the Dimension of Input Tensor of model + * @param _tflite : the class object + * @return the input dimension + */ +int * +tflite_core_getInputDim (void *tflite) +{ + TFLiteCore *c = (TFLiteCore *) tflite; + return c->getInputTensorDim (); +} + +/** + * @brief get the Dimension of Output Tensor of model + * @param _tflite : the class object + * @return the output dimension + */ +int * +tflite_core_getOutputDim (void *tflite) +{ + TFLiteCore *c = (TFLiteCore *) tflite; + return c->getOutputTensorDim (); +} + +/** + * @brief get the size of Input Tensor of model + * @param _tflite : the class object + * @return how many input tensors are + */ +int +tflite_core_getInputSize (void *tflite) +{ + TFLiteCore *c = (TFLiteCore *) tflite; + return c->getInputTensorSize (); +} + +/** + * @brief get the size of Output Tensor of model + * @param _tflite : the class object + * @return how many output tensors are + */ +int +tflite_core_getOutputSize (void *tflite) +{ + TFLiteCore *c = (TFLiteCore *) tflite; + return c->getOutputTensorSize (); +} diff --git a/gst/tensor_filter/tensor_filter_tensorflow_lite_core.h b/gst/tensor_filter/tensor_filter_tensorflow_lite_core.h new file mode 100644 index 0000000000..1a7e41b75e --- /dev/null +++ b/gst/tensor_filter/tensor_filter_tensorflow_lite_core.h @@ -0,0 +1,182 @@ +/** + * Copyright (C) 2017 - 2018 Samsung Electronics Co., Ltd. All rights reserved. + * + * PROPRIETARY/CONFIDENTIAL + * + * This software is the confidential and proprietary information of + * SAMSUNG ELECTRONICS ("Confidential Information"). You shall not + * disclose such Confidential Information and shall use it only in + * accordance with the terms of the license agreement you entered + * into with SAMSUNG ELECTRONICS. SAMSUNG make no representations + * or warranties about the suitability of the software, either + * express or implied, including but not limited to the implied + * warranties of merchantability, fitness for a particular purpose, + * or non-infringement. SAMSUNG shall not be liable for any damages + * suffered by licensee as a result of using, modifying or distributing + * this software or its derivatives. + */ + +/** + * @file tensor_filter_tensorflow_lite_core.h + * @author HyoungJoo Ahn + * @date 7/5/2018 + * @brief connection with tflite libraries. + * + * @bug No know bugs. + * @todo Invoke() should be implemented. + * @todo If it is required, class will be implemented as a singleton. + */ +#ifndef TENSOR_FILTER_TENSORFLOW_LITE_H +#define TENSOR_FILTER_TENSORFLOW_LITE_H + +#ifdef __cplusplus +#include + +#include "tensorflow/contrib/lite/model.h" +#include "tensorflow/contrib/lite/optional_debug_tools.h" +#include "tensorflow/contrib/lite/string_util.h" +#include "tensorflow/contrib/lite/kernels/register.h" + +/** + * @brief ring cache structure + */ +class TFLiteCore +{ +public: + /** + * member functions. + */ + TFLiteCore (char *_model_path); + char *getModelPath () + { + return model_path; + } + int loadModel (); + const char *getInputTensorName (); + const char *getOutputTensorName (); + int getInputTensorSize () + { + return input_size; + } + int getOutputTensorSize () + { + return output_size; + } + int *getInputTensorDim (); + int *getOutputTensorDim (); + +private: + /** + * member variables. + */ + char *model_path; + int tensor_size; + int node_size; + int input_size; + int output_size; + const char *input_name; + const char *output_name; + int input_idx; + int output_idx; + std::unique_ptr < tflite::Interpreter > interpreter; + std::unique_ptr < tflite::FlatBufferModel > model; +}; + +/** + * @brief TFLiteCore creator + * @param _model_path : the logical path to '{model_name}.tffile' file + * @note the model of _model_path will be loaded simultaneously + * @return Nothing + */ +TFLiteCore::TFLiteCore (char *_model_path) +{ + model_path = _model_path; + loadModel (); +} + +/** + * @brief load the tflite model + * @note the model will be loaded + * @return Nothing + */ +int +TFLiteCore::loadModel () +{ + if (!interpreter) { + model = + std::unique_ptr < tflite::FlatBufferModel > + (tflite::FlatBufferModel::BuildFromFile (model_path)); + if (!model) { + std::cout << "Failed to mmap model" << std::endl; + return -1; + } + model->error_reporter (); + std::cout << "model loaded" << std::endl; + + tflite::ops::builtin::BuiltinOpResolver resolver; + tflite::InterpreterBuilder (*model, resolver) (&interpreter); + if (!interpreter) { + std::cout << "Failed to construct interpreter" << std::endl; + return -2; + } + } + // fill class parameters + tensor_size = interpreter->tensors_size (); + node_size = interpreter->nodes_size (); + input_size = interpreter->inputs ().size (); + input_name = interpreter->GetInputName (0); + output_size = interpreter->outputs ().size (); + output_name = interpreter->GetOutputName (0); + + int t_size = interpreter->tensors_size (); + for (int i = 0; i < t_size; i++) { + if (strcmp (interpreter->tensor (i)->name, + interpreter->GetInputName (0)) == 0) + input_idx = i; + if (strcmp (interpreter->tensor (i)->name, + interpreter->GetOutputName (0)) == 0) + output_idx = i; + } + return 1; +} + +/** + * @brief return the Dimension of Input Tensor. + * @return the array of integer. + */ +int * +TFLiteCore::getInputTensorDim () +{ + return interpreter->tensor (input_idx)->dims->data; +} + +/** + * @brief return the Dimension of Output Tensor. + * @return the array of integer. + */ +int * +TFLiteCore::getOutputTensorDim () +{ + return interpreter->tensor (output_idx)->dims->data; +} + +/** + * @brief the definition of functions to be used at C files. + */ +extern "C" +{ +#endif + + extern void *tflite_core_new (char *_model_path); + extern void tflite_core_delete (void *tflite); + extern char *tflite_core_getModelPath (void *tflite); + extern int *tflite_core_getInputDim (void *tflite); + extern int *tflite_core_getOutputDim (void *tflite); + extern int tflite_core_getInputSize (void *tflite); + extern int tflite_core_getOutputSize (void *tflite); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/packaging/nnstreamer.spec b/packaging/nnstreamer.spec index 87589e0f9b..7c991af903 100644 --- a/packaging/nnstreamer.spec +++ b/packaging/nnstreamer.spec @@ -30,6 +30,8 @@ BuildRequires: gst-plugins-base BuildRequires: gtest-devel # a few test cases uses python BuildRequires: python +# for tensorflow-lite +BuildRequires: tensorflow-lite-devel %if 0%{?testcoverage} BuildRequires: taos-ci-unittest-coverage-assessment