From 8d3ddf6431dd11c2df07021f6da77ba9eb8ebbd1 Mon Sep 17 00:00:00 2001 From: Michael Nosov Date: Fri, 28 May 2021 12:41:25 +0300 Subject: [PATCH] Squashed merge of pdpd_frontend and static_protobuf Removed unnecessary files (TF frontend, pdpd_poc) NGRAPH_PDPD_FRONTEND_ENABLE=OFF by default --- CMakeLists.txt | 2 + model-optimizer/automation/package_BOM.txt | 6 + model-optimizer/mo/front_ng/__init__.py | 2 + model-optimizer/mo/front_ng/extractor.py | 157 ++++++ .../mo/front_ng/frontendmanager_wrapper.py | 23 + model-optimizer/mo/front_ng/pipeline.py | 97 ++++ model-optimizer/mo/front_ng/serialize.py | 17 + model-optimizer/mo/main.py | 50 +- model-optimizer/mo/main_caffe.py | 2 +- model-optimizer/mo/main_kaldi.py | 2 +- model-optimizer/mo/main_mxnet.py | 2 +- model-optimizer/mo/main_onnx.py | 2 +- model-optimizer/mo/main_pdpd.py | 12 + model-optimizer/mo/main_tf.py | 2 +- model-optimizer/mo/middle/passes/infer.py | 31 +- model-optimizer/mo/utils/cli_parser.py | 22 +- model-optimizer/mo_pdpd.py | 8 + model-optimizer/unit_tests/mo/main_test.py | 2 +- ngraph/CMakeLists.txt | 36 +- ngraph/cmake/external_npy.cmake | 35 ++ ngraph/cmake/external_protobuf.cmake | 201 ++++++- ngraph/frontend/CMakeLists.txt | 53 ++ .../cmake_static_protobuf/CMakeLists.txt | 23 + .../frontend/frontend_manager/CMakeLists.txt | 53 ++ .../frontend_manager/frontend_exceptions.hpp | 122 ++++ .../frontend_manager/frontend_manager.hpp | 521 ++++++++++++++++++ .../frontend_manager_defs.hpp | 17 + .../frontend_manager/src/frontend_manager.cpp | 384 +++++++++++++ .../frontend_manager/src/plugin_loader.cpp | 110 ++++ .../frontend_manager/src/plugin_loader.hpp | 68 +++ ngraph/frontend/paddlepaddle/CMakeLists.txt | 65 +++ .../paddlepaddle_frontend/exceptions.hpp | 45 ++ .../paddlepaddle_frontend/frontend.hpp | 58 ++ .../include/paddlepaddle_frontend/model.hpp | 44 ++ .../include/paddlepaddle_frontend/place.hpp | 206 +++++++ .../include/paddlepaddle_frontend/utility.hpp | 31 ++ ngraph/frontend/paddlepaddle/src/decoder.cpp | 171 ++++++ ngraph/frontend/paddlepaddle/src/decoder.hpp | 65 +++ .../frontend/paddlepaddle/src/exceptions.cpp | 23 + ngraph/frontend/paddlepaddle/src/frontend.cpp | 221 ++++++++ ngraph/frontend/paddlepaddle/src/model.cpp | 447 +++++++++++++++ .../paddlepaddle/src/node_context.hpp | 192 +++++++ .../frontend/paddlepaddle/src/op/argmax.cpp | 57 ++ .../frontend/paddlepaddle/src/op/argmax.hpp | 21 + .../paddlepaddle/src/op/assign_value.cpp | 66 +++ .../paddlepaddle/src/op/assign_value.hpp | 22 + .../paddlepaddle/src/op/batch_norm.cpp | 64 +++ .../paddlepaddle/src/op/batch_norm.hpp | 21 + ngraph/frontend/paddlepaddle/src/op/cast.cpp | 28 + ngraph/frontend/paddlepaddle/src/op/cast.hpp | 21 + ngraph/frontend/paddlepaddle/src/op/clip.cpp | 31 ++ ngraph/frontend/paddlepaddle/src/op/clip.hpp | 21 + .../frontend/paddlepaddle/src/op/concat.cpp | 27 + .../frontend/paddlepaddle/src/op/concat.hpp | 21 + .../frontend/paddlepaddle/src/op/conv2d.cpp | 25 + .../frontend/paddlepaddle/src/op/conv2d.hpp | 21 + .../paddlepaddle/src/op/conv2d_transpose.cpp | 26 + .../paddlepaddle/src/op/conv2d_transpose.hpp | 21 + .../paddlepaddle/src/op/conv2d_utils.cpp | 114 ++++ .../paddlepaddle/src/op/conv2d_utils.hpp | 71 +++ .../frontend/paddlepaddle/src/op/dropout.cpp | 41 ++ .../frontend/paddlepaddle/src/op/dropout.hpp | 21 + .../paddlepaddle/src/op/elementwise_ops.cpp | 102 ++++ .../paddlepaddle/src/op/elementwise_ops.hpp | 26 + .../paddlepaddle/src/op/expand_v2.cpp | 48 ++ .../paddlepaddle/src/op/expand_v2.hpp | 21 + .../paddlepaddle/src/op/fill_constant.cpp | 51 ++ .../paddlepaddle/src/op/fill_constant.hpp | 22 + .../src/op/fill_constant_batch_size_like.cpp | 47 ++ .../src/op/fill_constant_batch_size_like.hpp | 21 + .../src/op/flatten_contiguous_range.cpp | 56 ++ .../src/op/flatten_contiguous_range.hpp | 21 + .../frontend/paddlepaddle/src/op/interp.cpp | 182 ++++++ .../frontend/paddlepaddle/src/op/interp.hpp | 23 + .../paddlepaddle/src/op/leakyrelu.cpp | 28 + .../paddlepaddle/src/op/leakyrelu.hpp | 21 + ngraph/frontend/paddlepaddle/src/op/lstm.cpp | 238 ++++++++ ngraph/frontend/paddlepaddle/src/op/lstm.hpp | 22 + .../frontend/paddlepaddle/src/op/matmul.cpp | 34 ++ .../frontend/paddlepaddle/src/op/matmul.hpp | 21 + ngraph/frontend/paddlepaddle/src/op/mul.cpp | 67 +++ ngraph/frontend/paddlepaddle/src/op/mul.hpp | 21 + .../paddlepaddle/src/op/multiclass_nms.cpp | 46 ++ .../paddlepaddle/src/op/multiclass_nms.hpp | 21 + ngraph/frontend/paddlepaddle/src/op/pad3d.cpp | 117 ++++ ngraph/frontend/paddlepaddle/src/op/pad3d.hpp | 21 + .../frontend/paddlepaddle/src/op/pool2d.cpp | 256 +++++++++ .../frontend/paddlepaddle/src/op/pool2d.hpp | 21 + ngraph/frontend/paddlepaddle/src/op/range.cpp | 41 ++ ngraph/frontend/paddlepaddle/src/op/range.hpp | 21 + ngraph/frontend/paddlepaddle/src/op/relu.cpp | 25 + ngraph/frontend/paddlepaddle/src/op/relu.hpp | 21 + .../frontend/paddlepaddle/src/op/reshape2.cpp | 36 ++ .../frontend/paddlepaddle/src/op/reshape2.hpp | 21 + ngraph/frontend/paddlepaddle/src/op/rnn.cpp | 28 + ngraph/frontend/paddlepaddle/src/op/rnn.hpp | 22 + ngraph/frontend/paddlepaddle/src/op/scale.cpp | 43 ++ ngraph/frontend/paddlepaddle/src/op/scale.hpp | 21 + ngraph/frontend/paddlepaddle/src/op/shape.cpp | 26 + ngraph/frontend/paddlepaddle/src/op/shape.hpp | 21 + ngraph/frontend/paddlepaddle/src/op/slice.cpp | 59 ++ ngraph/frontend/paddlepaddle/src/op/slice.hpp | 21 + .../frontend/paddlepaddle/src/op/softmax.cpp | 34 ++ .../frontend/paddlepaddle/src/op/softmax.hpp | 21 + ngraph/frontend/paddlepaddle/src/op/split.cpp | 46 ++ ngraph/frontend/paddlepaddle/src/op/split.hpp | 20 + .../frontend/paddlepaddle/src/op/squeeze.cpp | 34 ++ .../frontend/paddlepaddle/src/op/squeeze.hpp | 21 + .../paddlepaddle/src/op/transpose2.cpp | 40 ++ .../paddlepaddle/src/op/transpose2.hpp | 21 + .../paddlepaddle/src/op/unsqueeze.cpp | 30 + .../paddlepaddle/src/op/unsqueeze.hpp | 21 + .../frontend/paddlepaddle/src/op/yolo_box.cpp | 357 ++++++++++++ .../frontend/paddlepaddle/src/op/yolo_box.hpp | 21 + ngraph/frontend/paddlepaddle/src/op_table.cpp | 96 ++++ ngraph/frontend/paddlepaddle/src/op_table.hpp | 27 + ngraph/frontend/paddlepaddle/src/place.cpp | 115 ++++ .../paddlepaddle/src/proto/framework.proto | 205 +++++++ ngraph/python/CMakeLists.txt | 30 +- .../mock_py_ngraph_frontend/CMakeLists.txt | 28 + .../mock_py_frontend.cpp | 25 + .../mock_py_frontend.hpp | 484 ++++++++++++++++ .../pyngraph_fe_mock_api/CMakeLists.txt | 19 + .../pyngraph_mock_frontend_api.cpp | 136 +++++ ngraph/python/src/ngraph/__init__.py | 14 + ngraph/python/src/ngraph/frontend/__init__.py | 23 + ngraph/python/src/ngraph/impl/__init__.py | 1 + .../python/src/pyngraph/frontend_manager.cpp | 282 ++++++++++ .../python/src/pyngraph/frontend_manager.hpp | 21 + ngraph/python/src/pyngraph/partial_shape.cpp | 19 + ngraph/python/src/pyngraph/pyngraph.cpp | 11 + .../tests/test_ngraph/test_frontendmanager.py | 504 +++++++++++++++++ ngraph/test/CMakeLists.txt | 72 +++ .../gen_scripts/generate_2in_2out.py | 39 ++ .../gen_scripts/generate_2in_2out_dynbatch.py | 39 ++ .../gen_scripts/generate_argmax.py | 60 ++ .../gen_scripts/generate_assign_value.py | 58 ++ .../gen_scripts/generate_batch_norm.py | 89 +++ .../paddlepaddle/gen_scripts/generate_clip.py | 39 ++ .../gen_scripts/generate_conv2d.py | 22 + .../generate_conv2d_combinations.py | 145 +++++ .../gen_scripts/generate_conv2d_relu.py | 25 + .../gen_scripts/generate_conv2d_s.py | 22 + .../gen_scripts/generate_conv2d_transpose.py | 148 +++++ .../gen_scripts/generate_dropout.py | 47 ++ .../gen_scripts/generate_expand_v2.py | 63 +++ .../gen_scripts/generate_fill_constant.py | 37 ++ .../generate_fill_constant_batch_size_like.py | 39 ++ .../generate_flatten_contiguous_range.py | 38 ++ .../gen_scripts/generate_interpolate.py | 197 +++++++ .../generate_multi_tensor_split.py | 53 ++ .../gen_scripts/generate_pad3d.py | 70 +++ .../gen_scripts/generate_pool2d.py | 260 +++++++++ .../gen_scripts/generate_range.py | 45 ++ .../paddlepaddle/gen_scripts/generate_relu.py | 22 + .../gen_scripts/generate_rnn_lstm.py | 67 +++ .../gen_scripts/generate_shape.py | 38 ++ .../gen_scripts/generate_slice.py | 39 ++ .../gen_scripts/generate_split.py | 52 ++ .../gen_scripts/generate_squeeze.py | 38 ++ .../gen_scripts/generate_unsqueeze.py | 37 ++ .../gen_scripts/generate_yolo_box.py | 78 +++ .../paddlepaddle/gen_scripts/save_model.py | 80 +++ ngraph/test/files/paddlepaddle/gen_wrapper.py | 20 + .../test/files/paddlepaddle/models/models.csv | 84 +++ ngraph/test/frontend/CMakeLists.txt | 16 + ngraph/test/frontend/frontend_manager.cpp | 151 +++++ ngraph/test/frontend/mock_frontend.cpp | 34 ++ .../test/frontend/paddlepaddle/basic_api.cpp | 30 + .../paddlepaddle/cut_specific_model.cpp | 31 ++ .../test/frontend/paddlepaddle/exceptions.cpp | 38 ++ .../test/frontend/paddlepaddle/load_from.cpp | 27 + ngraph/test/frontend/paddlepaddle/op.cpp | 202 +++++++ .../frontend/paddlepaddle/partial_shape.cpp | 70 +++ .../paddlepaddle/set_element_type.cpp | 24 + .../frontend/shared/include/basic_api.hpp | 33 ++ .../shared/include/cut_specific_model.hpp | 45 ++ .../frontend/shared/include/load_from.hpp | 32 ++ ngraph/test/frontend/shared/include/op.hpp | 45 ++ .../frontend/shared/include/partial_shape.hpp | 51 ++ .../shared/include/set_element_type.hpp | 30 + ngraph/test/frontend/shared/include/utils.hpp | 43 ++ ngraph/test/frontend/shared/src/basic_api.cpp | 192 +++++++ .../shared/src/cut_specific_model.cpp | 279 ++++++++++ ngraph/test/frontend/shared/src/load_from.cpp | 101 ++++ ngraph/test/frontend/shared/src/op.cpp | 61 ++ .../frontend/shared/src/partial_shape.cpp | 86 +++ .../frontend/shared/src/set_element_type.cpp | 60 ++ 188 files changed, 12824 insertions(+), 55 deletions(-) create mode 100644 model-optimizer/mo/front_ng/__init__.py create mode 100644 model-optimizer/mo/front_ng/extractor.py create mode 100644 model-optimizer/mo/front_ng/frontendmanager_wrapper.py create mode 100644 model-optimizer/mo/front_ng/pipeline.py create mode 100644 model-optimizer/mo/front_ng/serialize.py create mode 100644 model-optimizer/mo/main_pdpd.py create mode 100755 model-optimizer/mo_pdpd.py create mode 100644 ngraph/cmake/external_npy.cmake create mode 100644 ngraph/frontend/cmake_static_protobuf/CMakeLists.txt create mode 100644 ngraph/frontend/frontend_manager/CMakeLists.txt create mode 100644 ngraph/frontend/frontend_manager/include/frontend_manager/frontend_exceptions.hpp create mode 100644 ngraph/frontend/frontend_manager/include/frontend_manager/frontend_manager.hpp create mode 100644 ngraph/frontend/frontend_manager/include/frontend_manager/frontend_manager_defs.hpp create mode 100644 ngraph/frontend/frontend_manager/src/frontend_manager.cpp create mode 100644 ngraph/frontend/frontend_manager/src/plugin_loader.cpp create mode 100644 ngraph/frontend/frontend_manager/src/plugin_loader.hpp create mode 100644 ngraph/frontend/paddlepaddle/CMakeLists.txt create mode 100644 ngraph/frontend/paddlepaddle/include/paddlepaddle_frontend/exceptions.hpp create mode 100644 ngraph/frontend/paddlepaddle/include/paddlepaddle_frontend/frontend.hpp create mode 100644 ngraph/frontend/paddlepaddle/include/paddlepaddle_frontend/model.hpp create mode 100644 ngraph/frontend/paddlepaddle/include/paddlepaddle_frontend/place.hpp create mode 100644 ngraph/frontend/paddlepaddle/include/paddlepaddle_frontend/utility.hpp create mode 100644 ngraph/frontend/paddlepaddle/src/decoder.cpp create mode 100644 ngraph/frontend/paddlepaddle/src/decoder.hpp create mode 100644 ngraph/frontend/paddlepaddle/src/exceptions.cpp create mode 100644 ngraph/frontend/paddlepaddle/src/frontend.cpp create mode 100644 ngraph/frontend/paddlepaddle/src/model.cpp create mode 100644 ngraph/frontend/paddlepaddle/src/node_context.hpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/argmax.cpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/argmax.hpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/assign_value.cpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/assign_value.hpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/batch_norm.cpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/batch_norm.hpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/cast.cpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/cast.hpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/clip.cpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/clip.hpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/concat.cpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/concat.hpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/conv2d.cpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/conv2d.hpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/conv2d_transpose.cpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/conv2d_transpose.hpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/conv2d_utils.cpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/conv2d_utils.hpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/dropout.cpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/dropout.hpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/elementwise_ops.cpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/elementwise_ops.hpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/expand_v2.cpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/expand_v2.hpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/fill_constant.cpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/fill_constant.hpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/fill_constant_batch_size_like.cpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/fill_constant_batch_size_like.hpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/flatten_contiguous_range.cpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/flatten_contiguous_range.hpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/interp.cpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/interp.hpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/leakyrelu.cpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/leakyrelu.hpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/lstm.cpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/lstm.hpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/matmul.cpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/matmul.hpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/mul.cpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/mul.hpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/multiclass_nms.cpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/multiclass_nms.hpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/pad3d.cpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/pad3d.hpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/pool2d.cpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/pool2d.hpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/range.cpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/range.hpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/relu.cpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/relu.hpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/reshape2.cpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/reshape2.hpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/rnn.cpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/rnn.hpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/scale.cpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/scale.hpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/shape.cpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/shape.hpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/slice.cpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/slice.hpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/softmax.cpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/softmax.hpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/split.cpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/split.hpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/squeeze.cpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/squeeze.hpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/transpose2.cpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/transpose2.hpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/unsqueeze.cpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/unsqueeze.hpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/yolo_box.cpp create mode 100644 ngraph/frontend/paddlepaddle/src/op/yolo_box.hpp create mode 100644 ngraph/frontend/paddlepaddle/src/op_table.cpp create mode 100644 ngraph/frontend/paddlepaddle/src/op_table.hpp create mode 100644 ngraph/frontend/paddlepaddle/src/place.cpp create mode 100644 ngraph/frontend/paddlepaddle/src/proto/framework.proto create mode 100644 ngraph/python/mock_py_frontend/mock_py_ngraph_frontend/CMakeLists.txt create mode 100644 ngraph/python/mock_py_frontend/mock_py_ngraph_frontend/mock_py_frontend.cpp create mode 100644 ngraph/python/mock_py_frontend/mock_py_ngraph_frontend/mock_py_frontend.hpp create mode 100644 ngraph/python/mock_py_frontend/pyngraph_fe_mock_api/CMakeLists.txt create mode 100644 ngraph/python/mock_py_frontend/pyngraph_fe_mock_api/pyngraph_mock_frontend_api.cpp create mode 100644 ngraph/python/src/ngraph/frontend/__init__.py create mode 100644 ngraph/python/src/pyngraph/frontend_manager.cpp create mode 100644 ngraph/python/src/pyngraph/frontend_manager.hpp create mode 100644 ngraph/python/tests/test_ngraph/test_frontendmanager.py create mode 100644 ngraph/test/files/paddlepaddle/gen_scripts/generate_2in_2out.py create mode 100644 ngraph/test/files/paddlepaddle/gen_scripts/generate_2in_2out_dynbatch.py create mode 100644 ngraph/test/files/paddlepaddle/gen_scripts/generate_argmax.py create mode 100644 ngraph/test/files/paddlepaddle/gen_scripts/generate_assign_value.py create mode 100644 ngraph/test/files/paddlepaddle/gen_scripts/generate_batch_norm.py create mode 100644 ngraph/test/files/paddlepaddle/gen_scripts/generate_clip.py create mode 100644 ngraph/test/files/paddlepaddle/gen_scripts/generate_conv2d.py create mode 100644 ngraph/test/files/paddlepaddle/gen_scripts/generate_conv2d_combinations.py create mode 100644 ngraph/test/files/paddlepaddle/gen_scripts/generate_conv2d_relu.py create mode 100644 ngraph/test/files/paddlepaddle/gen_scripts/generate_conv2d_s.py create mode 100644 ngraph/test/files/paddlepaddle/gen_scripts/generate_conv2d_transpose.py create mode 100644 ngraph/test/files/paddlepaddle/gen_scripts/generate_dropout.py create mode 100644 ngraph/test/files/paddlepaddle/gen_scripts/generate_expand_v2.py create mode 100644 ngraph/test/files/paddlepaddle/gen_scripts/generate_fill_constant.py create mode 100644 ngraph/test/files/paddlepaddle/gen_scripts/generate_fill_constant_batch_size_like.py create mode 100644 ngraph/test/files/paddlepaddle/gen_scripts/generate_flatten_contiguous_range.py create mode 100644 ngraph/test/files/paddlepaddle/gen_scripts/generate_interpolate.py create mode 100644 ngraph/test/files/paddlepaddle/gen_scripts/generate_multi_tensor_split.py create mode 100644 ngraph/test/files/paddlepaddle/gen_scripts/generate_pad3d.py create mode 100644 ngraph/test/files/paddlepaddle/gen_scripts/generate_pool2d.py create mode 100644 ngraph/test/files/paddlepaddle/gen_scripts/generate_range.py create mode 100644 ngraph/test/files/paddlepaddle/gen_scripts/generate_relu.py create mode 100644 ngraph/test/files/paddlepaddle/gen_scripts/generate_rnn_lstm.py create mode 100644 ngraph/test/files/paddlepaddle/gen_scripts/generate_shape.py create mode 100644 ngraph/test/files/paddlepaddle/gen_scripts/generate_slice.py create mode 100644 ngraph/test/files/paddlepaddle/gen_scripts/generate_split.py create mode 100644 ngraph/test/files/paddlepaddle/gen_scripts/generate_squeeze.py create mode 100644 ngraph/test/files/paddlepaddle/gen_scripts/generate_unsqueeze.py create mode 100644 ngraph/test/files/paddlepaddle/gen_scripts/generate_yolo_box.py create mode 100644 ngraph/test/files/paddlepaddle/gen_scripts/save_model.py create mode 100644 ngraph/test/files/paddlepaddle/gen_wrapper.py create mode 100644 ngraph/test/files/paddlepaddle/models/models.csv create mode 100644 ngraph/test/frontend/CMakeLists.txt create mode 100644 ngraph/test/frontend/frontend_manager.cpp create mode 100644 ngraph/test/frontend/mock_frontend.cpp create mode 100644 ngraph/test/frontend/paddlepaddle/basic_api.cpp create mode 100644 ngraph/test/frontend/paddlepaddle/cut_specific_model.cpp create mode 100644 ngraph/test/frontend/paddlepaddle/exceptions.cpp create mode 100644 ngraph/test/frontend/paddlepaddle/load_from.cpp create mode 100644 ngraph/test/frontend/paddlepaddle/op.cpp create mode 100644 ngraph/test/frontend/paddlepaddle/partial_shape.cpp create mode 100644 ngraph/test/frontend/paddlepaddle/set_element_type.cpp create mode 100644 ngraph/test/frontend/shared/include/basic_api.hpp create mode 100644 ngraph/test/frontend/shared/include/cut_specific_model.hpp create mode 100644 ngraph/test/frontend/shared/include/load_from.hpp create mode 100644 ngraph/test/frontend/shared/include/op.hpp create mode 100644 ngraph/test/frontend/shared/include/partial_shape.hpp create mode 100644 ngraph/test/frontend/shared/include/set_element_type.hpp create mode 100644 ngraph/test/frontend/shared/include/utils.hpp create mode 100644 ngraph/test/frontend/shared/src/basic_api.cpp create mode 100644 ngraph/test/frontend/shared/src/cut_specific_model.cpp create mode 100644 ngraph/test/frontend/shared/src/load_from.cpp create mode 100644 ngraph/test/frontend/shared/src/op.cpp create mode 100644 ngraph/test/frontend/shared/src/partial_shape.cpp create mode 100644 ngraph/test/frontend/shared/src/set_element_type.cpp diff --git a/CMakeLists.txt b/CMakeLists.txt index 7788252f207582..79e0d2b71dc998 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -66,6 +66,7 @@ function(build_ngraph) ngraph_set(NGRAPH_ONNX_IMPORT_ENABLE OFF) endif() ngraph_set(NGRAPH_INTERPRETER_ENABLE ON) + ngraph_set(NGRAPH_PDPD_FRONTEND_ENABLE OFF) if(TREAT_WARNING_AS_ERROR) ngraph_set(NGRAPH_WARNINGS_AS_ERRORS ON) @@ -108,6 +109,7 @@ function(build_ngraph) set(SDL_cmake_included ON) add_subdirectory(ngraph) set(NGRAPH_LIBRARIES ngraph PARENT_SCOPE) + set(FRONTEND_LIBRARIES frontend_manager PARENT_SCOPE) set(NGRAPH_REF_LIBRARIES ngraph_reference PARENT_SCOPE) endfunction() diff --git a/model-optimizer/automation/package_BOM.txt b/model-optimizer/automation/package_BOM.txt index eb216c0719980b..6ee2fc8a2561c1 100644 --- a/model-optimizer/automation/package_BOM.txt +++ b/model-optimizer/automation/package_BOM.txt @@ -933,6 +933,11 @@ mo/front/tf/partial_infer/__init__.py mo/front/tf/partial_infer/tf.py mo/front/tf/register_custom_ops.py mo/front/tf/replacement.py +mo/front_ng/__init__.py +mo/front_ng/extractor.py +mo/front_ng/frontendmanager_wrapper.py +mo/front_ng/pipeline.py +mo/front_ng/serialize.py mo/graph/__init__.py mo/graph/connection.py mo/graph/graph.py @@ -943,6 +948,7 @@ mo/main_caffe.py mo/main_kaldi.py mo/main_mxnet.py mo/main_onnx.py +mo/main_pdpd.py mo/main_tf.py mo/middle/__init__.py mo/middle/passes/__init__.py diff --git a/model-optimizer/mo/front_ng/__init__.py b/model-optimizer/mo/front_ng/__init__.py new file mode 100644 index 00000000000000..dc8ba3c4598ddc --- /dev/null +++ b/model-optimizer/mo/front_ng/__init__.py @@ -0,0 +1,2 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 diff --git a/model-optimizer/mo/front_ng/extractor.py b/model-optimizer/mo/front_ng/extractor.py new file mode 100644 index 00000000000000..a1b0e8ff94f464 --- /dev/null +++ b/model-optimizer/mo/front_ng/extractor.py @@ -0,0 +1,157 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import logging as log +import re +from collections import defaultdict +from copy import copy + +import numpy as np + +from mo.utils.error import Error + + +def fe_decodeNameWithPort (inputModel, node_name: str): + """ + Decode name with optional port specification w/o traversing all the nodes in the graph + :param inputModel: Input Model + :param node_name: + :return: decoded place in the graph + """ + # Check exact match with one of the names in the graph first + node = inputModel.get_place_by_tensor_name(node_name) + if node: + return node + # TODO: not tested for available frontends + regexpPost = r'(.*)(:(\d+))' + matchPost = re.search(regexpPost, node_name) + nodePost = inputModel.get_place_by_tensor_name(matchPost.group(1)) if matchPost else None + regexpPre = r'((\d+):)(.*)' + matchPre = re.search(regexpPre, node_name) + nodePre = inputModel.get_place_by_tensor_name(matchPre.group(3)) if matchPost else None + if nodePost and nodePre: + raise Error('Name collision for {}'.format(node_name)) + if nodePost: + return node.get_output_port(int(matchPost.group(3))) + if nodePre: + return node.get_input_port(int(matchPre.group(1))) + raise Error('There is no node with name {}'.format(node_name)) + + +def fe_input_user_data_repack(inputModel, input_user_shapes: [None, list, dict, np.ndarray], + freeze_placeholder: dict, input_user_data_types = dict()): + """ + Restructures user input cutting request. Splits ports out of node names. Transforms node names to node ids. + :param graph: graph to operate on + :param input_user_shapes: data structure representing user input cutting request. It may be: + # None value if user did not provide neither --input nor --input_shape keys + # list instance which contains input layer names with or without ports if user provided only --input key + # dict instance which contains input layer names with or without ports as keys and shapes as values if user + provided both --input and --input_shape + # np.ndarray if user provided only --input_shape key + :param freeze_placeholder: dictionary with placeholder names as keys and freezing value as values + :param input_user_data_types: dictionary with input nodes and its data types + :return: restructured input shapes and freeze placeholder shapes information + Example of input dictionary: + _input_shapes = + { + 'node_ID': + [ + {'shape': None, 'in': 0}, + {'shape': None, 'in': 1}, + ], + 'node_1_ID': + [ + {'shape': [1, 227, 227, 3], 'port': None, 'data_type': np.int32} + ], + 'node_2_ID': + [ + {'shape': None, 'out': 3} + ] + } + Example of freeze placeholder dictionary: + _freeze_placeholder = + { + 'phase_train' : False + } + """ + _input_shapes = [] + # New version of FrontEnd is activated + print("Inside input_user_data_repack") + if isinstance(input_user_shapes, list) or isinstance(input_user_shapes, dict): + for input_name in input_user_shapes: + node = fe_decodeNameWithPort(inputModel, input_name) + if node is None: + raise Error('Cannot find location {} in the input model'.format(input_name)) + shape = None if isinstance(input_user_shapes, list) else input_user_shapes[input_name] + if input_name in input_user_data_types and input_user_data_types[input_name] is not None: + data_type = input_user_data_types[input_name] + _input_shapes.append({'node': node, 'shape': shape, 'data_type': data_type}) + else: + _input_shapes.append({'node': node, 'shape': shape}) + elif isinstance(input_user_shapes, np.ndarray): + model_inputs = inputModel.get_inputs() + assert len(model_inputs) == 1 + _input_shapes.append({'node': model_inputs[0], 'shape': input_user_shapes}) + else: + assert input_user_shapes is None + # TODO: add logic for freeze_placeholder + return _input_shapes, dict() + + +def fe_output_user_data_repack(inputModel, outputs: list): + """ + + :param inputModel: Input Model to operate on + :param outputs: list of node names provided by user + :return: dictionary with node IDs as keys and list of port dictionaries as values + Example of outputs dictionary: + _outputs = + { + 'node_ID': + [ + {'out': 0}, + {'out': 1}, + ], + 'node_1_ID': + [ + {'port': None} + ], + 'node_2_ID': + [ + {'in': 3} + ] + } + """ + _outputs = [] + # New version of FrontEnd is activated + print("Frontend_ng - output_user_data_repack") + if outputs is not None and len(outputs) > 0: + for output in outputs: + node = fe_decodeNameWithPort(inputModel, output) + if node is None: + raise Error('Cannot find location {} in the graph'.format(output)) + _outputs.append({'node': node}) + return _outputs + + +def fe_user_data_repack(inputModel, input_user_shapes: [None, list, dict, np.array], + input_user_data_types: dict, outputs: list, freeze_placeholder: dict): + """ + :param inputModel: Input Model to operate on + :param input_user_shapes: data structure representing user input cutting request + :param outputs: list of node names to treat as outputs + :param freeze_placeholder: dictionary with placeholder names as keys and freezing value as values + :return: restructured input, output and freeze placeholder dictionaries or None values + """ + _input_shapes, _freeze_placeholder = fe_input_user_data_repack(inputModel, input_user_shapes, freeze_placeholder, + input_user_data_types=input_user_data_types) + _outputs = fe_output_user_data_repack(inputModel, outputs) + + print('---------- Inputs/outpus/freezePlaceholder -----------') + print(_input_shapes) + print(_outputs) + print(freeze_placeholder) + print('------------------------------------') + + return _input_shapes, _outputs, _freeze_placeholder diff --git a/model-optimizer/mo/front_ng/frontendmanager_wrapper.py b/model-optimizer/mo/front_ng/frontendmanager_wrapper.py new file mode 100644 index 00000000000000..cb1e0786017ad1 --- /dev/null +++ b/model-optimizer/mo/front_ng/frontendmanager_wrapper.py @@ -0,0 +1,23 @@ +#!/usr/bin/env python3 + +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import os +import sys + + +def create_fem(): + fem = None + try: + from ngraph.frontend import FrontEndManager # pylint: disable=no-name-in-module,import-error + fem = FrontEndManager() + except Exception: + print("nGraph FrontEndManager is not initialized") + pass + return fem + + +if __name__ == "__main__": + if not create_fem(): + exit(1) diff --git a/model-optimizer/mo/front_ng/pipeline.py b/model-optimizer/mo/front_ng/pipeline.py new file mode 100644 index 00000000000000..f27c2186b2b270 --- /dev/null +++ b/model-optimizer/mo/front_ng/pipeline.py @@ -0,0 +1,97 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import argparse + +import logging as log +from mo.front_ng.extractor import fe_user_data_repack +from mo.middle.passes.infer import validate_batch_in_shape + + +def moc_pipeline(argv: argparse.Namespace): + from ngraph import Dimension, PartialShape # pylint: disable=no-name-in-module,import-error + from ngraph.utils.types import get_element_type # pylint: disable=no-name-in-module,import-error + log.info('New MOC pipeline') + fem = argv.feManager + log.info(f'fem.availableFrontEnds: {str(fem.get_available_front_ends())}') + log.info(f'Initializing new FE for framework {argv.framework}') + fe = fem.load_by_framework(argv.framework) + inputModel = fe.load_from_file(argv.input_model) + + user_shapes, outputs, freeze_placeholder = fe_user_data_repack( + inputModel, argv.placeholder_shapes, argv.placeholder_data_types, + argv.output, argv.freeze_placeholder_with_value) + + def compare_nodes(old, new): + eq = len(old) == len(new) + if eq: + for item in old: + found = [x for x in new if x['node'].is_equal(item)] + if not found: + eq = False + break + return eq + + inputsEqual = True + if user_shapes: + inputsEqual = compare_nodes(inputModel.get_inputs(), user_shapes) + + outputsEqual = True + if outputs: + outputsEqual = compare_nodes(inputModel.get_outputs(), outputs) + log.debug(f"Inputs are same: {inputsEqual}, outputs are same: {outputsEqual}") + + if not inputsEqual and not outputsEqual: + # Use ExtractSubgraph + newInputPlaces = [x['node'] for x in user_shapes] + newOutputPlaces = [x['node'] for x in outputs] + log.debug("Using extract subgraph") + log.debug(f"Inputs: {newInputPlaces}") + log.debug(f"Outputs: {newOutputPlaces}") + inputModel.extract_subgraph(newInputPlaces, newOutputPlaces) + elif not inputsEqual: + newInputPlaces = [x['node'] for x in user_shapes] + log.debug("Using override_all_inputs") + log.debug(f"Inputs: {newInputPlaces}") + inputModel.override_all_inputs(newInputPlaces) + elif not outputsEqual: + newOutputPlaces = [x['node'] for x in outputs] + log.debug("Using override_all_outputs") + log.debug(f"Outputs: {newOutputPlaces}") + inputModel.override_all_outputs(newOutputPlaces) + + if user_shapes: + for user_shape in user_shapes: + if 'shape' in user_shape and user_shape['shape'] is not None: + inputModel.set_partial_shape(user_shape['node'], PartialShape(user_shape['shape'])) + if 'data_type' in user_shape and user_shape['data_type'] is not None: + data_type = get_element_type(user_shape['data_type']) + log.debug(f"Set data type: {data_type}") + inputModel.set_element_type(user_shape['node'], data_type) + + # Set batch size + if argv.batch is not None and argv.batch > 0: + log.debug(f"Setting batch size to {argv.batch}") + for place in inputModel.get_inputs(): + oldPartShape = inputModel.get_partial_shape(place) + newshape = [] + oldshape_converted = [] + joinedName = ' '.join(place.get_names()) + if oldPartShape.rank.is_static: + for i in range(oldPartShape.rank.get_length()): + # Assume batch size is always 1-st dimension in shape + # Keep other dimensions unchanged + newshape.append(Dimension(argv.batch) if i is 0 else oldPartShape.get_dimension(i)) + oldshape_converted.append(oldPartShape.get_dimension(i)) + + validate_batch_in_shape(oldshape_converted, joinedName) + else: + # In case of fully dynamic shape raise the same error as for invalid batch dimension + validate_batch_in_shape(oldshape_converted, joinedName) + + newPartShape = PartialShape(newshape) + log.debug(f"Input: {joinedName}, Old shape: {oldshape_converted}, New shape: {newshape}") + inputModel.set_partial_shape(place, newPartShape) + + nGraphFunction = fe.convert(inputModel) + return nGraphFunction diff --git a/model-optimizer/mo/front_ng/serialize.py b/model-optimizer/mo/front_ng/serialize.py new file mode 100644 index 00000000000000..1c19a4b6ef9022 --- /dev/null +++ b/model-optimizer/mo/front_ng/serialize.py @@ -0,0 +1,17 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import argparse +import os + + +def ngraph_emit_ir(nGraphFunction, argv: argparse.Namespace): + output_dir = argv.output_dir if argv.output_dir != '.' else os.getcwd() + + from ngraph import function_to_cnn # pylint: disable=no-name-in-module,import-error + network = function_to_cnn(nGraphFunction) + + orig_model_name = os.path.normpath(os.path.join(output_dir, argv.model_name)) + network.serialize(orig_model_name + ".xml", orig_model_name + ".bin") + print('[ SUCCESS ] Converted with nGraph Serializer') + return 0 diff --git a/model-optimizer/mo/main.py b/model-optimizer/mo/main.py index aa0eb628ad8095..f8fa6abf8ff950 100644 --- a/model-optimizer/mo/main.py +++ b/model-optimizer/mo/main.py @@ -94,9 +94,17 @@ def print_argv(argv: argparse.Namespace, is_caffe: bool, is_tf: bool, is_mxnet: def prepare_ir(argv: argparse.Namespace): is_tf, is_caffe, is_mxnet, is_kaldi, is_onnx = deduce_framework_by_namespace(argv) + fem = argv.feManager + new_front_ends = [] + if 'use_legacy_frontend' in argv and not argv.use_legacy_frontend and fem is not None: + new_front_ends = fem.get_available_front_ends() + if not any([is_tf, is_caffe, is_mxnet, is_kaldi, is_onnx]): - raise Error('Framework {} is not a valid target. Please use --framework with one from the list: caffe, tf, ' - 'mxnet, kaldi, onnx. ' + refer_to_faq_msg(15), argv.framework) + frameworks = ['tf', 'caffe', 'mxnet', 'kaldi', 'onnx'] + frameworks = list(set(frameworks + new_front_ends)) + if argv.framework not in frameworks: + raise Error('Framework {} is not a valid target. Please use --framework with one from the list: {}. ' + + refer_to_faq_msg(15), argv.framework, frameworks) if is_tf and not argv.input_model and not argv.saved_model_dir and not argv.input_meta_graph: raise Error('Path to input model or saved model dir is required: use --input_model, --saved_model_dir or ' @@ -162,9 +170,11 @@ def prepare_ir(argv: argparse.Namespace): if argv.legacy_ir_generation and len(argv.transform) != 0: raise Error("--legacy_ir_generation and --transform keys can not be used at the same time.") - ret_code = check_requirements(framework=argv.framework) - if ret_code: - raise Error('check_requirements exit with return code {}'.format(ret_code)) + if not new_front_ends or argv.framework not in new_front_ends: + ret_code = check_requirements(framework=argv.framework) + if ret_code: + raise Error('check_requirements exit with return code {}'.format(ret_code)) + # TODO: should we check some 'generic' requirements if 'framework' belongs to FrontEndManager? if is_tf and argv.tensorflow_use_custom_operations_config is not None: argv.transformations_config = argv.tensorflow_use_custom_operations_config @@ -245,12 +255,19 @@ def prepare_ir(argv: argparse.Namespace): send_framework_info('kaldi') from mo.front.kaldi.register_custom_ops import get_front_classes import_extensions.load_dirs(argv.framework, extensions, get_front_classes) - elif is_onnx: + elif is_onnx and ('onnx' not in new_front_ends or argv.use_legacy_frontend): send_framework_info('onnx') from mo.front.onnx.register_custom_ops import get_front_classes import_extensions.load_dirs(argv.framework, extensions, get_front_classes) - graph = unified_pipeline(argv) - return graph + + graph = None + ngraphFunction = None + if argv.feManager is None or argv.framework not in new_front_ends or argv.use_legacy_frontend: + graph = unified_pipeline(argv) + else: + from mo.front_ng.pipeline import moc_pipeline + ngraphFunction = moc_pipeline(argv) + return graph, ngraphFunction def emit_ir(graph: Graph, argv: argparse.Namespace): @@ -258,6 +275,9 @@ def emit_ir(graph: Graph, argv: argparse.Namespace): for_graph_and_each_sub_graph_recursively(graph, RemoveConstOps().find_and_replace_pattern) for_graph_and_each_sub_graph_recursively(graph, CreateConstNodesReplacement().find_and_replace_pattern) + if 'feManager' in argv: + del argv.feManager + mean_data = deepcopy(graph.graph['mf']) if 'mf' in graph.graph else None input_names = deepcopy(graph.graph['input_names']) if 'input_names' in graph.graph else [] @@ -353,7 +373,12 @@ def driver(argv: argparse.Namespace): start_time = datetime.datetime.now() - ret_res = emit_ir(prepare_ir(argv), argv) + graph, nGraphFunction = prepare_ir(argv) + if graph is not None: + ret_res = emit_ir(graph, argv) + else: + from mo.front_ng.serialize import ngraph_emit_ir + ret_res = ngraph_emit_ir(nGraphFunction, argv) if ret_res != 0: return ret_res @@ -373,7 +398,7 @@ def driver(argv: argparse.Namespace): return ret_res -def main(cli_parser: argparse.ArgumentParser, framework: str): +def main(cli_parser: argparse.ArgumentParser, fem, framework: str): telemetry = tm.Telemetry(app_name='Model Optimizer', app_version=get_simplified_mo_version()) telemetry.start_session('mo') telemetry.send_event('mo', 'version', get_simplified_mo_version()) @@ -387,6 +412,7 @@ def main(cli_parser: argparse.ArgumentParser, framework: str): if framework: argv.framework = framework + argv.feManager = fem ov_update_message = None if not hasattr(argv, 'silent') or not argv.silent: @@ -429,4 +455,6 @@ def main(cli_parser: argparse.ArgumentParser, framework: str): if __name__ == "__main__": from mo.utils.cli_parser import get_all_cli_parser - sys.exit(main(get_all_cli_parser(), None)) + from mo.front_ng.frontendmanager_wrapper import create_fem + fem = create_fem() + sys.exit(main(get_all_cli_parser(fem), fem, None)) diff --git a/model-optimizer/mo/main_caffe.py b/model-optimizer/mo/main_caffe.py index bcba5c8d611735..bd2f4b62bf4d98 100644 --- a/model-optimizer/mo/main_caffe.py +++ b/model-optimizer/mo/main_caffe.py @@ -7,4 +7,4 @@ if __name__ == "__main__": from mo.main import main - sys.exit(main(get_caffe_cli_parser(), 'caffe')) + sys.exit(main(get_caffe_cli_parser(), None, 'caffe')) diff --git a/model-optimizer/mo/main_kaldi.py b/model-optimizer/mo/main_kaldi.py index 15233333203adb..e2105e32e5dec6 100644 --- a/model-optimizer/mo/main_kaldi.py +++ b/model-optimizer/mo/main_kaldi.py @@ -7,4 +7,4 @@ if __name__ == "__main__": from mo.main import main - sys.exit(main(get_kaldi_cli_parser(), 'kaldi')) + sys.exit(main(get_kaldi_cli_parser(), None, 'kaldi')) diff --git a/model-optimizer/mo/main_mxnet.py b/model-optimizer/mo/main_mxnet.py index 91cb19531592e5..b22a277231b0f4 100644 --- a/model-optimizer/mo/main_mxnet.py +++ b/model-optimizer/mo/main_mxnet.py @@ -7,4 +7,4 @@ if __name__ == "__main__": from mo.main import main - sys.exit(main(get_mxnet_cli_parser(), 'mxnet')) + sys.exit(main(get_mxnet_cli_parser(), None, 'mxnet')) diff --git a/model-optimizer/mo/main_onnx.py b/model-optimizer/mo/main_onnx.py index 3bf882d65e9ed0..e0569f4c1694a0 100644 --- a/model-optimizer/mo/main_onnx.py +++ b/model-optimizer/mo/main_onnx.py @@ -7,4 +7,4 @@ if __name__ == "__main__": from mo.main import main - sys.exit(main(get_onnx_cli_parser(), 'onnx')) + sys.exit(main(get_onnx_cli_parser(), None, 'onnx')) diff --git a/model-optimizer/mo/main_pdpd.py b/model-optimizer/mo/main_pdpd.py new file mode 100644 index 00000000000000..c2836cd425d383 --- /dev/null +++ b/model-optimizer/mo/main_pdpd.py @@ -0,0 +1,12 @@ +# Copyright (C) 2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import sys + +from mo.utils.cli_parser import get_all_cli_parser + +if __name__ == "__main__": + from mo.main import main + from mo.front_ng.frontendmanager_wrapper import create_fem + fem = create_fem() + sys.exit(main(get_all_cli_parser(fem), fem, 'pdpd')) diff --git a/model-optimizer/mo/main_tf.py b/model-optimizer/mo/main_tf.py index 3c55e4ac0e2d05..5464b114082a6d 100644 --- a/model-optimizer/mo/main_tf.py +++ b/model-optimizer/mo/main_tf.py @@ -7,4 +7,4 @@ if __name__ == "__main__": from mo.main import main - sys.exit(main(get_tf_cli_parser(), 'tf')) + sys.exit(main(get_tf_cli_parser(), None, 'tf')) diff --git a/model-optimizer/mo/middle/passes/infer.py b/model-optimizer/mo/middle/passes/infer.py index 3b642ccd09b494..bc6ff64acb4f0a 100644 --- a/model-optimizer/mo/middle/passes/infer.py +++ b/model-optimizer/mo/middle/passes/infer.py @@ -206,19 +206,30 @@ def override_batch(graph: Graph, batch: int): if batch is not None: for node_id, data in graph.nodes(data=True): if 'op' in data and data['op'] == 'Parameter' and not data.get('fixed_batch', False): - if len(data['shape']) == 0 or data['shape'][0] not in (-1, 0, 1): - raise Error(('The input layer {} has a shape {} defined in the model. \n\n' + - 'When you use -b (--batch) option, Model Optimizer applies its value to the first ' + - 'element of the shape if it is equal to -1, 0 or 1. Otherwise, this is the ambiguous ' + - 'situation - Model Optimizer can not know in advance whether the layer has the batch ' + - 'dimension or not.\n\n For example, you want to set batch dimension equals 100 ' + - 'for the input layer "data" with shape (10,34). Although you can not use --batch, ' + - 'you should pass --input_shape (100,34) instead of --batch 100. \n\n' + - refer_to_faq_msg(39)) - .format(data['name'], data['shape'])) + validate_batch_in_shape(data['shape'], data['name']) data['shape'][0] = batch +def validate_batch_in_shape(shape, layer_name: str): + """ + Raises Error #39 if shape is not valid for setting batch size + Parameters + ---------- + shape: current shape of layer under validation + layer_name: name of layer under validation + """ + if len(shape) == 0 or shape[0] not in (-1, 0, 1): + raise Error(('The input layer {} has a shape {} defined in the model. \n\n' + + 'When you use -b (--batch) option, Model Optimizer applies its value to the first ' + + 'element of the shape if it is equal to -1, 0 or 1. Otherwise, this is the ambiguous ' + + 'situation - Model Optimizer can not know in advance whether the layer has the batch ' + + 'dimension or not.\n\n For example, you want to set batch dimension equals 100 ' + + 'for the input layer "data" with shape (10,34). Although you can not use --batch, ' + + 'you should pass --input_shape (100,34) instead of --batch 100. \n\n' + + refer_to_faq_msg(39)) + .format(layer_name, shape)) + + def override_placeholder_shapes(graph: Graph, user_shapes: dict, batch=None): """ This function overrides shapes for nodes with 'op' param set to 'Parameter' with shapes defined by users (only diff --git a/model-optimizer/mo/utils/cli_parser.py b/model-optimizer/mo/utils/cli_parser.py index 21d2873b54ed2c..ac8ac186227304 100644 --- a/model-optimizer/mo/utils/cli_parser.py +++ b/model-optimizer/mo/utils/cli_parser.py @@ -18,7 +18,6 @@ from mo.utils.utils import refer_to_faq_msg from mo.utils.version import get_version - class DeprecatedStoreTrue(argparse.Action): def __init__(self, nargs=0, **kw): super().__init__(nargs=nargs, **kw) @@ -87,12 +86,12 @@ def __call__(self, parser, namespace, values, option_string=None): def readable_file(path: str): """ - Check that specified path is a readable file. + Check that specified path is a readable file or directory. :param path: path to check - :return: path if the file is readable + :return: path if the file/directory is readable """ - if not os.path.isfile(path): - raise Error('The "{}" is not existing file'.format(path)) + if not os.path.isfile(path) and not os.path.exists(path): + raise Error('The "{}" is not existing file or directory'.format(path)) elif not os.access(path, os.R_OK): raise Error('The "{}" is not readable'.format(path)) else: @@ -623,10 +622,16 @@ def get_onnx_cli_parser(parser: argparse.ArgumentParser = None): onnx_group = parser.add_argument_group('ONNX*-specific parameters') + onnx_group.add_argument("--use_legacy_frontend", + help="Switch back to the original (legacy) frontend for ONNX model conversion. " + + "By default, ONNX Importer is used as a converter.", + default=False, + action='store_true') + return parser -def get_all_cli_parser(): +def get_all_cli_parser(frontEndManager=None): """ Specifies cli arguments for Model Optimizer @@ -636,10 +641,13 @@ def get_all_cli_parser(): """ parser = argparse.ArgumentParser(usage='%(prog)s [options]') + frameworks = list(set(['tf', 'caffe', 'mxnet', 'kaldi', 'onnx'] + + (frontEndManager.get_available_front_ends() if frontEndManager else []))) + parser.add_argument('--framework', help='Name of the framework used to train the input model.', type=str, - choices=['tf', 'caffe', 'mxnet', 'kaldi', 'onnx']) + choices=frameworks) get_common_cli_parser(parser=parser) diff --git a/model-optimizer/mo_pdpd.py b/model-optimizer/mo_pdpd.py new file mode 100755 index 00000000000000..abd0d2dd15c3b6 --- /dev/null +++ b/model-optimizer/mo_pdpd.py @@ -0,0 +1,8 @@ +#!/usr/bin/env python3 + +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +if __name__ == "__main__": + from mo.subprocess_main import subprocess_main + subprocess_main(framework='pdpd') diff --git a/model-optimizer/unit_tests/mo/main_test.py b/model-optimizer/unit_tests/mo/main_test.py index 7afa4e6e6e2f3c..ecb2d0baaa4f14 100644 --- a/model-optimizer/unit_tests/mo/main_test.py +++ b/model-optimizer/unit_tests/mo/main_test.py @@ -14,5 +14,5 @@ class TestMainErrors(unittest.TestCase): @patch('mo.main.driver', side_effect=FrameworkError('FW ERROR MESSAGE')) def test_FrameworkError(self, mock_argparse, mock_driver): with self.assertLogs() as logger: - main(argparse.ArgumentParser(), 'framework_string') + main(argparse.ArgumentParser(), None, 'framework_string') self.assertEqual(logger.output, ['ERROR:root:FW ERROR MESSAGE']) diff --git a/ngraph/CMakeLists.txt b/ngraph/CMakeLists.txt index 8d5d41ffc48512..07148b5e0d92c8 100644 --- a/ngraph/CMakeLists.txt +++ b/ngraph/CMakeLists.txt @@ -26,6 +26,15 @@ set(NGRAPH_INCLUDE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/core/include ) +set(FRONTEND_INCLUDE_PATH + ${CMAKE_CURRENT_SOURCE_DIR}/frontend/frontend_manager/include +) + +# Will be used by frontends to construct frontend-specific source location paths +set(FRONTEND_BASE_PATH + ${CMAKE_CURRENT_SOURCE_DIR}/frontend +) + if (APPLE) # Enable MACOS_RPATH by default. cmake_policy(SET CMP0042 NEW) @@ -79,6 +88,7 @@ option(NGRAPH_INTERPRETER_ENABLE "Control the building of the INTERPRETER backen option(NGRAPH_DEBUG_ENABLE "Enable output for NGRAPH_DEBUG statements" OFF) option(NGRAPH_ONNX_IMPORT_ENABLE "Enable ONNX importer" OFF) option(NGRAPH_ONNX_EDITOR_ENABLE "Enable ONNX Editor" OFF) +option(NGRAPH_PDPD_FRONTEND_ENABLE "Enable PaddlePaddle FrontEnd" OFF) option(NGRAPH_LIB_VERSIONING_ENABLE "Enable shared library versioning" OFF) option(NGRAPH_PYTHON_BUILD_ENABLE "Enable build nGraph python package wheel" OFF) option(NGRAPH_DYNAMIC_COMPONENTS_ENABLE "Enable dynamic loading of components" ON) @@ -88,7 +98,7 @@ option(NGRAPH_THREAD_SANITIZER_ENABLE "Compiles and links with Thread Sanitizer" option(NGRAPH_UB_SANITIZER_ENABLE "Compiles and links with Undefined Behavior Sanitizer" OFF) option(NGRAPH_USE_PROTOBUF_LITE "Compiles and links with protobuf-lite" OFF) -if (NGRAPH_ONNX_IMPORT_ENABLE) +if (NGRAPH_ONNX_IMPORT_ENABLE OR NGRAPH_PDPD_FRONTEND_ENABLE) option(NGRAPH_USE_SYSTEM_PROTOBUF "Use system provided Protobuf shared object" OFF) endif() if(NGRAPH_ONNX_EDITOR_ENABLE AND NOT NGRAPH_ONNX_IMPORT_ENABLE) @@ -103,6 +113,7 @@ message(STATUS "NGRAPH_INTERPRETER_ENABLE: ${NGRAPH_INTERPRETER_ENABL message(STATUS "NGRAPH_LIB_VERSIONING_ENABLE: ${NGRAPH_LIB_VERSIONING_ENABLE}") message(STATUS "NGRAPH_ONNX_IMPORT_ENABLE: ${NGRAPH_ONNX_IMPORT_ENABLE}") message(STATUS "NGRAPH_ONNX_EDITOR_ENABLE: ${NGRAPH_ONNX_EDITOR_ENABLE}") +message(STATUS "NGRAPH_PDPD_FRONTEND_ENABLE: ${NGRAPH_PDPD_FRONTEND_ENABLE}") message(STATUS "NGRAPH_PYTHON_BUILD_ENABLE: ${NGRAPH_PYTHON_BUILD_ENABLE}") message(STATUS "NGRAPH_THREAD_SANITIZER_ENABLE: ${NGRAPH_THREAD_SANITIZER_ENABLE}") message(STATUS "NGRAPH_UB_SANITIZER_ENABLE: ${NGRAPH_UB_SANITIZER_ENABLE}") @@ -282,7 +293,12 @@ if (NGRAPH_EXPORT_TARGETS_ENABLE) COMPONENT ngraph_dev) endif() -if (NGRAPH_ONNX_IMPORT_ENABLE) +set(USE_STATIC_PROTOBUF OFF) +if (NGRAPH_PDPD_FRONTEND_ENABLE) # add more frontends here + set(USE_STATIC_PROTOBUF ON) +endif() + +if (NGRAPH_ONNX_IMPORT_ENABLE OR USE_STATIC_PROTOBUF) if (MSVC) # When we build dll libraries. These flags make sure onnx and protobuf build with /MD, not /MT. # These two options can't be mixed, because they requires link two imcompatiable runtime. @@ -298,6 +314,7 @@ if (NGRAPH_ONNX_IMPORT_ENABLE) set(BEFORE_ONNX_BUILD_SHARED_LIBS ${BUILD_SHARED_LIBS}) set(BUILD_SHARED_LIBS ON) + set(BUILD_STANDALONE_STATIC OFF) if (NOT NGRAPH_USE_SYSTEM_PROTOBUF) include(cmake/external_protobuf.cmake) @@ -305,21 +322,28 @@ if (NGRAPH_ONNX_IMPORT_ENABLE) find_package(Protobuf 2.6.1 REQUIRED) endif() - # target onnx_proto will be shared lib, onnx static - include(cmake/external_onnx.cmake) - if (TARGET ext_protobuf) - add_dependencies(onnx ext_protobuf) + if (NGRAPH_ONNX_IMPORT_ENABLE) + # target onnx_proto will be shared lib, onnx static + include(cmake/external_onnx.cmake) + if (TARGET ext_protobuf) + add_dependencies(onnx ext_protobuf) + endif() endif() + set(BUILD_SHARED_LIBS ${BEFORE_ONNX_BUILD_SHARED_LIBS}) unset(BEFORE_ONNX_BUILD_SHARED_LIBS) endif() + add_subdirectory(frontend) if(NGRAPH_UNIT_TEST_ENABLE) set(BEFORE_GTEST_BUILD_SHARED_LIBS ${BUILD_SHARED_LIBS}) set(BUILD_SHARED_LIBS OFF) include(cmake/external_gtest.cmake) + if (NGRAPH_PDPD_FRONTEND_ENABLE) + include(cmake/external_npy.cmake) + endif () set(BUILD_SHARED_LIBS ${BEFORE_GTEST_BUILD_SHARED_LIBS}) unset(BEFORE_GTEST_BUILD_SHARED_LIBS) endif() diff --git a/ngraph/cmake/external_npy.cmake b/ngraph/cmake/external_npy.cmake new file mode 100644 index 00000000000000..e79148aad71c2c --- /dev/null +++ b/ngraph/cmake/external_npy.cmake @@ -0,0 +1,35 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +# Enable ExternalProject CMake module +include(ExternalProject) +# TODO REMOVE This Dependency when switch to hard-coded test. +#------------------------------------------------------------------------------ +# Download and install NPY ... +#------------------------------------------------------------------------------ + +SET(NPY_GIT_REPO_URL https://github.com/llohse/libnpy.git) + +# Build for ninja + +ExternalProject_Add( + ext_libnpy + PREFIX libnpy + GIT_REPOSITORY ${NPY_GIT_REPO_URL} + GIT_TAG "master" + CONFIGURE_COMMAND "" + INSTALL_COMMAND "" + UPDATE_COMMAND "" + BUILD_COMMAND "" +) + +#------------------------------------------------------------------------------ + +ExternalProject_Get_Property(ext_libnpy SOURCE_DIR) +message("******${SOURCE_DIR}") + +add_library(libnpy INTERFACE) +add_dependencies(libnpy ext_libnpy) +target_include_directories(libnpy SYSTEM INTERFACE + ${SOURCE_DIR}) diff --git a/ngraph/cmake/external_protobuf.cmake b/ngraph/cmake/external_protobuf.cmake index 502a23d572f42f..72604fc77e0801 100644 --- a/ngraph/cmake/external_protobuf.cmake +++ b/ngraph/cmake/external_protobuf.cmake @@ -64,25 +64,45 @@ if(PROTOC_VERSION VERSION_LESS "3.9" AND NGRAPH_USE_PROTOBUF_LITE) message(FATAL_ERROR "Minimum supported version of protobuf-lite library is 3.9.0") else() if(PROTOC_VERSION VERSION_GREATER_EQUAL "3.0") - FetchContent_Declare( - ext_protobuf - GIT_REPOSITORY ${NGRAPH_PROTOBUF_GIT_REPO_URL} - GIT_TAG ${NGRAPH_PROTOBUF_GIT_TAG} - GIT_SHALLOW TRUE - ) - - FetchContent_GetProperties(ext_protobuf) - if(NOT ext_protobuf_POPULATED) - FetchContent_Populate(ext_protobuf) - set(protobuf_BUILD_TESTS OFF CACHE BOOL "Build tests") - set(protobuf_WITH_ZLIB OFF CACHE BOOL "Build with zlib support") - add_subdirectory(${ext_protobuf_SOURCE_DIR}/cmake ${ext_protobuf_BINARY_DIR} EXCLUDE_FROM_ALL) + if (NOT BUILD_STANDALONE_STATIC) + FetchContent_Declare( + ext_protobuf + GIT_REPOSITORY ${NGRAPH_PROTOBUF_GIT_REPO_URL} + GIT_TAG ${NGRAPH_PROTOBUF_GIT_TAG} + GIT_SHALLOW TRUE + ) + FetchContent_GetProperties(ext_protobuf) + if(NOT ext_protobuf_POPULATED) + FetchContent_Populate(ext_protobuf) + set(protobuf_BUILD_TESTS OFF CACHE BOOL "Build tests") + set(protobuf_WITH_ZLIB OFF CACHE BOOL "Build with zlib support") + add_subdirectory(${ext_protobuf_SOURCE_DIR}/cmake ${ext_protobuf_BINARY_DIR} EXCLUDE_FROM_ALL) + endif() + endif() + if (USE_STATIC_PROTOBUF) + FetchContent_Declare( + ext_protobuf_static + GIT_REPOSITORY ${NGRAPH_PROTOBUF_GIT_REPO_URL} + GIT_TAG ${NGRAPH_PROTOBUF_GIT_TAG} + GIT_SHALLOW TRUE + ) + FetchContent_GetProperties(ext_protobuf_static) + if(NOT ext_protobuf_static_POPULATED AND BUILD_STANDALONE_STATIC) + FetchContent_Populate(ext_protobuf_static) + set(protobuf_BUILD_TESTS OFF CACHE BOOL "Build tests") + set(protobuf_WITH_ZLIB OFF CACHE BOOL "Build with zlib support") + add_subdirectory(${ext_protobuf_static_SOURCE_DIR}/cmake ${ext_protobuf_static_BINARY_DIR} EXCLUDE_FROM_ALL) + endif() endif() else() message(FATAL_ERROR "Minimum supported version of protobuf library is 3.0.0") endif() - set(Protobuf_INCLUDE_DIRS ${ext_protobuf_SOURCE_DIR}/src) + if (BUILD_STANDALONE_STATIC) + set(Protobuf_INCLUDE_DIRS ${ext_protobuf_static_SOURCE_DIR}/src) + else() + set(Protobuf_INCLUDE_DIRS ${ext_protobuf_SOURCE_DIR}/src) + endif() if(NGRAPH_USE_PROTOBUF_LITE) set(Protobuf_LIBRARIES libprotobuf-lite) else() @@ -117,6 +137,7 @@ endif() # Now make sure we restore the original flags set(CMAKE_INTERPROCEDURAL_OPTIMIZATION_RELEASE "${PUSH_CMAKE_INTERPROCEDURAL_OPTIMIZATION_RELEASE}") +message("NGRAPH_INSTALL_LIB = ${NGRAPH_INSTALL_LIB}") install(TARGETS ${Protobuf_LIBRARIES} RUNTIME DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph ARCHIVE DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph @@ -124,3 +145,155 @@ install(TARGETS ${Protobuf_LIBRARIES} if (NGRAPH_EXPORT_TARGETS_ENABLE) export(TARGETS ${Protobuf_LIBRARIES} NAMESPACE ngraph:: APPEND FILE "${NGRAPH_TARGETS_FILE}") endif() + +#TODO: ---Find out the way to reuse these function from Protobuf modules --- + +function(protobuf_generate) + include(CMakeParseArguments) + + set(_options APPEND_PATH) + set(_singleargs LANGUAGE OUT_VAR EXPORT_MACRO PROTOC_OUT_DIR) + if(COMMAND target_sources) + list(APPEND _singleargs TARGET) + endif() + set(_multiargs PROTOS IMPORT_DIRS GENERATE_EXTENSIONS) + + cmake_parse_arguments(protobuf_generate "${_options}" "${_singleargs}" "${_multiargs}" "${ARGN}") + + if(NOT protobuf_generate_PROTOS AND NOT protobuf_generate_TARGET) + message(SEND_ERROR "Error: protobuf_generate called without any targets or source files") + return() + endif() + + if(NOT protobuf_generate_OUT_VAR AND NOT protobuf_generate_TARGET) + message(SEND_ERROR "Error: protobuf_generate called without a target or output variable") + return() + endif() + + if(NOT protobuf_generate_LANGUAGE) + set(protobuf_generate_LANGUAGE cpp) + endif() + string(TOLOWER ${protobuf_generate_LANGUAGE} protobuf_generate_LANGUAGE) + + if(NOT protobuf_generate_PROTOC_OUT_DIR) + set(protobuf_generate_PROTOC_OUT_DIR ${CMAKE_CURRENT_BINARY_DIR}) + endif() + + if(protobuf_generate_EXPORT_MACRO AND protobuf_generate_LANGUAGE STREQUAL cpp) + set(_dll_export_decl "dllexport_decl=${protobuf_generate_EXPORT_MACRO}:") + endif() + + if(NOT protobuf_generate_GENERATE_EXTENSIONS) + if(protobuf_generate_LANGUAGE STREQUAL cpp) + set(protobuf_generate_GENERATE_EXTENSIONS .pb.h .pb.cc) + elseif(protobuf_generate_LANGUAGE STREQUAL python) + set(protobuf_generate_GENERATE_EXTENSIONS _pb2.py) + else() + message(SEND_ERROR "Error: protobuf_generate given unknown Language ${LANGUAGE}, please provide a value for GENERATE_EXTENSIONS") + return() + endif() + endif() + + if(protobuf_generate_TARGET) + get_target_property(_source_list ${protobuf_generate_TARGET} SOURCES) + foreach(_file ${_source_list}) + if(_file MATCHES "proto$") + list(APPEND protobuf_generate_PROTOS ${_file}) + endif() + endforeach() + endif() + + if(NOT protobuf_generate_PROTOS) + message(SEND_ERROR "Error: protobuf_generate could not find any .proto files") + return() + endif() + + if(protobuf_generate_APPEND_PATH) + # Create an include path for each file specified + foreach(_file ${protobuf_generate_PROTOS}) + get_filename_component(_abs_file ${_file} ABSOLUTE) + get_filename_component(_abs_path ${_abs_file} PATH) + list(FIND _protobuf_include_path ${_abs_path} _contains_already) + if(${_contains_already} EQUAL -1) + list(APPEND _protobuf_include_path -I ${_abs_path}) + endif() + endforeach() + else() + set(_protobuf_include_path -I ${CMAKE_CURRENT_SOURCE_DIR}) + endif() + + foreach(DIR ${protobuf_generate_IMPORT_DIRS}) + get_filename_component(ABS_PATH ${DIR} ABSOLUTE) + list(FIND _protobuf_include_path ${ABS_PATH} _contains_already) + if(${_contains_already} EQUAL -1) + list(APPEND _protobuf_include_path -I ${ABS_PATH}) + endif() + endforeach() + + set(_generated_srcs_all) + foreach(_proto ${protobuf_generate_PROTOS}) + get_filename_component(_abs_file ${_proto} ABSOLUTE) + get_filename_component(_abs_dir ${_abs_file} DIRECTORY) + get_filename_component(_basename ${_proto} NAME_WE) + file(RELATIVE_PATH _rel_dir ${CMAKE_CURRENT_SOURCE_DIR} ${_abs_dir}) + + set(_generated_srcs) + message(${_rel_dir}) + + foreach(_ext ${protobuf_generate_GENERATE_EXTENSIONS}) + list(APPEND _generated_srcs "${protobuf_generate_PROTOC_OUT_DIR}/${_basename}${_ext}") + endforeach() + list(APPEND _generated_srcs_all ${_generated_srcs}) + + add_custom_command( + OUTPUT ${_generated_srcs} + COMMAND protobuf::protoc + ARGS --${protobuf_generate_LANGUAGE}_out ${_dll_export_decl}${protobuf_generate_PROTOC_OUT_DIR} ${_protobuf_include_path} ${_abs_file} + DEPENDS ${_abs_file} protobuf::protoc + COMMENT "Running ${protobuf_generate_LANGUAGE} protocol buffer compiler on ${_proto}" + VERBATIM ) + endforeach() + + set_source_files_properties(${_generated_srcs_all} PROPERTIES GENERATED TRUE) + if(protobuf_generate_OUT_VAR) + set(${protobuf_generate_OUT_VAR} ${_generated_srcs_all} PARENT_SCOPE) + endif() + if(protobuf_generate_TARGET) + target_sources(${protobuf_generate_TARGET} PRIVATE ${_generated_srcs_all}) + endif() + +endfunction() + +function(PROTOBUF_GENERATE_CPP SRCS HDRS) + cmake_parse_arguments(protobuf_generate_cpp "" "EXPORT_MACRO" "" ${ARGN}) + + set(_proto_files "${protobuf_generate_cpp_UNPARSED_ARGUMENTS}") + if(NOT _proto_files) + message(SEND_ERROR "Error: PROTOBUF_GENERATE_CPP() called without any proto files") + return() + endif() + + if(PROTOBUF_GENERATE_CPP_APPEND_PATH) + set(_append_arg APPEND_PATH) + endif() + + if(DEFINED Protobuf_IMPORT_DIRS) + set(_import_arg IMPORT_DIRS ${Protobuf_IMPORT_DIRS}) + endif() + + set(_outvar) + protobuf_generate(${_append_arg} LANGUAGE cpp EXPORT_MACRO ${protobuf_generate_cpp_EXPORT_MACRO} OUT_VAR _outvar ${_import_arg} PROTOS ${_proto_files}) + + set(${SRCS}) + set(${HDRS}) + message(${_outvar}) + foreach(_file ${_outvar}) + if(_file MATCHES "cc$") + list(APPEND ${SRCS} ${_file}) + else() + list(APPEND ${HDRS} ${_file}) + endif() + endforeach() + set(${SRCS} ${${SRCS}} PARENT_SCOPE) + set(${HDRS} ${${HDRS}} PARENT_SCOPE) +endfunction() \ No newline at end of file diff --git a/ngraph/frontend/CMakeLists.txt b/ngraph/frontend/CMakeLists.txt index 3e21b4b50171ec..fe6d34404840ef 100644 --- a/ngraph/frontend/CMakeLists.txt +++ b/ngraph/frontend/CMakeLists.txt @@ -2,6 +2,59 @@ # SPDX-License-Identifier: Apache-2.0 # +message(${CMAKE_CURRENT_SOURCE_DIR}/cmake_static_protobuf) +message(BINARY ${CMAKE_CURRENT_BINARY_DIR}) + +## DEBUG - print all variables +# get_cmake_property(_variableNames VARIABLES) +# set(ALL_VARS "") +# foreach (_variableName ${_variableNames}) +# set(ALL_VARS ${ALL_VARS} -D${_variableName}=${${_variableName}}\ ) +# endforeach() +# message(---------------------ALL VARS: ${ALL_VARS}-------) + +# There seems no suitable other way to identify exact output binary name +if(CMAKE_BUILD_TYPE STREQUAL "Debug") + set(PROTOBUF_STATIC_LIB_OUTPUT ${CMAKE_ARCHIVE_OUTPUT_DIRECTORY}/libprotobufd.a) +else(CMAKE_BUILD_TYPE STREQUAL "Debug") + set(PROTOBUF_STATIC_LIB_OUTPUT ${CMAKE_ARCHIVE_OUTPUT_DIRECTORY}/libprotobuf.a) +endif() + +message("Static protobuf lib: ${PROTOBUF_STATIC_LIB_OUTPUT}") +add_custom_command( + OUTPUT + ${PROTOBUF_STATIC_LIB_OUTPUT} + COMMAND ${CMAKE_COMMAND} ${CMAKE_CURRENT_SOURCE_DIR}/cmake_static_protobuf + -DCMAKE_LIBRARY_OUTPUT_DIRECTORY=${CMAKE_LIBRARY_OUTPUT_DIRECTORY} + -DCMAKE_ARCHIVE_OUTPUT_DIRECTORY=${CMAKE_ARCHIVE_OUTPUT_DIRECTORY} + -DCMAKE_COMPILE_PDB_OUTPUT_DIRECTORY=${CMAKE_COMPILE_PDB_OUTPUT_DIRECTORY} + -DCMAKE_PDB_OUTPUT_DIRECTORY=${CMAKE_PDB_OUTPUT_DIRECTORY} + -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE} + -DCMAKE_CXX_FLAGS=${CMAKE_CXX_FLAGS} + -DCMAKE_C_FLAGS=${CMAKE_C_FLAGS} + -DCMAKE_CXX_VISIBILITY_PRESET=${CMAKE_CXX_VISIBILITY_PRESET} + -DNGRAPH_INSTALL_LIB=${NGRAPH_INSTALL_LIB} + ${NGRAPH_FORWARD_CMAKE_ARGS} + COMMAND ${CMAKE_COMMAND} --build . --target libprotobuf + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} + COMMENT "Build Protobuf Static Library" + # TODO: add DEPENDS +) + +# Import targets + +add_custom_target(libprotobuf_static + DEPENDS + ${PROTOBUF_STATIC_LIB_OUTPUT} + ) + + +add_subdirectory(frontend_manager) + +if (NGRAPH_PDPD_FRONTEND_ENABLE) + add_subdirectory(paddlepaddle) +endif() + if (NGRAPH_ONNX_IMPORT_ENABLE) add_subdirectory(onnx_common) add_subdirectory(onnx_import) diff --git a/ngraph/frontend/cmake_static_protobuf/CMakeLists.txt b/ngraph/frontend/cmake_static_protobuf/CMakeLists.txt new file mode 100644 index 00000000000000..6df263046380e5 --- /dev/null +++ b/ngraph/frontend/cmake_static_protobuf/CMakeLists.txt @@ -0,0 +1,23 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +cmake_minimum_required(VERSION 3.13) + +project(libprotobuf_static) + +# DEBUG - print all defined variables +# get_cmake_property(_variableNames VARIABLES) +# set(ALL_VARS "") +# foreach (_variableName ${_variableNames}) +# set(ALL_VARS ${ALL_VARS} -D${_variableName}=${${_variableName}}\n ) +# endforeach() +# message("---------------------${ALL_VARS}-------") + +message("Add PROTOBUF dependency - static") + +set(BUILD_SHARED_LIBS OFF) +set(BUILD_STANDALONE_STATIC ON) +set(USE_STATIC_PROTOBUF ON) + +include(../../cmake/external_protobuf.cmake) diff --git a/ngraph/frontend/frontend_manager/CMakeLists.txt b/ngraph/frontend/frontend_manager/CMakeLists.txt new file mode 100644 index 00000000000000..90443585181e80 --- /dev/null +++ b/ngraph/frontend/frontend_manager/CMakeLists.txt @@ -0,0 +1,53 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +set(TARGET_NAME "frontend_manager") + +file(GLOB_RECURSE LIBRARY_SRC ${CMAKE_CURRENT_SOURCE_DIR}/src/*.cpp) +file(GLOB_RECURSE LIBRARY_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/src/*.hpp) +file(GLOB_RECURSE LIBRARY_PUBLIC_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/include/*.hpp) + +set(FRONTEND_INCLUDE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/include) + +source_group("src" FILES ${LIBRARY_SRC}) +source_group("include" FILES ${LIBRARY_HEADERS}) +source_group("public include" FILES ${LIBRARY_PUBLIC_HEADERS}) + +# Create shared library +add_library(${TARGET_NAME} SHARED ${LIBRARY_SRC} ${LIBRARY_HEADERS} ${LIBRARY_PUBLIC_HEADERS}) +add_library(ngraph::${TARGET_NAME} ALIAS ${TARGET_NAME}) + +target_link_libraries(${TARGET_NAME} PRIVATE ${CMAKE_DL_LIBS} ngraph) + +add_clang_format_target(${TARGET_NAME}_clang FOR_TARGETS ${TARGET_NAME}) + +if(COMMAND ie_add_vs_version_file) + ie_add_vs_version_file(NAME ${TARGET_NAME} + FILEDESCRIPTION "Manager of OpenVINO nGraph Frontends") +endif() + +set(FRONTEND_INSTALL_INCLUDE "${NGRAPH_INSTALL_INCLUDE}/ngraph/frontend/frontend_manager") +target_include_directories(${TARGET_NAME} PUBLIC $ + $) +target_include_directories(${TARGET_NAME} PRIVATE ${NGRAPH_INCLUDE_PATH} ${FRONTEND_INCLUDE_DIR}) + +target_include_directories(${TARGET_NAME} PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/src) + +# Installation rules +install(TARGETS ${TARGET_NAME} EXPORT ngraphTargets + RUNTIME DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph + ARCHIVE DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph + LIBRARY DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph) + +install(DIRECTORY ${FRONTEND_INCLUDE_DIR}/frontend_manager + DESTINATION ${FRONTEND_INSTALL_INCLUDE} + COMPONENT ngraph + FILES_MATCHING + PATTERN "*.hpp" + PATTERN "*.h" +) + +if (NGRAPH_EXPORT_TARGETS_ENABLE) + export(TARGETS ${TARGET_NAME} NAMESPACE ngraph:: APPEND FILE "${NGRAPH_TARGETS_FILE}") +endif() diff --git a/ngraph/frontend/frontend_manager/include/frontend_manager/frontend_exceptions.hpp b/ngraph/frontend/frontend_manager/include/frontend_manager/frontend_exceptions.hpp new file mode 100644 index 00000000000000..8581900a05aa58 --- /dev/null +++ b/ngraph/frontend/frontend_manager/include/frontend_manager/frontend_exceptions.hpp @@ -0,0 +1,122 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include "ngraph/check.hpp" + +namespace ngraph +{ + namespace frontend + { + class GeneralFailure : public CheckFailure + { + public: + GeneralFailure(const CheckLocInfo& check_loc_info, + const std::string& context, + const std::string& explanation) + : CheckFailure(check_loc_info, + "FrontEnd API failed with GeneralFailure: " + context, + explanation) + { + } + }; + + class InitializationFailure : public CheckFailure + { + public: + InitializationFailure(const CheckLocInfo& check_loc_info, + const std::string& context, + const std::string& explanation) + : CheckFailure(check_loc_info, + "FrontEnd API failed with InitializationFailure: " + context, + explanation) + { + } + }; + + class OpValidationFailure : public CheckFailure + { + public: + OpValidationFailure(const CheckLocInfo& check_loc_info, + const std::string& context, + const std::string& explanation) + : CheckFailure(check_loc_info, + "FrontEnd API failed with OpValidationFailure: " + context, + explanation) + { + } + }; + + class OpConversionFailure : public CheckFailure + { + public: + OpConversionFailure(const CheckLocInfo& check_loc_info, + const std::string& context, + const std::string& explanation) + : CheckFailure(check_loc_info, + "FrontEnd API failed with OpConversionFailure: " + context, + explanation) + { + } + }; + + class NotImplementedFailure : public CheckFailure + { + public: + NotImplementedFailure(const CheckLocInfo& check_loc_info, + const std::string& context, + const std::string& explanation) + : CheckFailure(check_loc_info, + "FrontEnd API failed with NotImplementedFailure: " + context, + explanation) + { + } + }; + +/// \brief Macro to check whether a boolean condition holds. +/// \param cond Condition to check +/// \param ... Additional error message info to be added to the error message via the `<<` +/// stream-insertion operator. Note that the expressions here will be evaluated lazily, +/// i.e., only if the `cond` evalutes to `false`. +/// \throws ::ngraph::frontend::GeneralFailure if `cond` is false. +#define FRONT_END_GENERAL_CHECK(...) \ + NGRAPH_CHECK_HELPER(::ngraph::frontend::GeneralFailure, "", __VA_ARGS__) + +/// \brief Macro to check whether a boolean condition holds. +/// \param cond Condition to check +/// \param ... Additional error message info to be added to the error message via the `<<` +/// stream-insertion operator. Note that the expressions here will be evaluated lazily, +/// i.e., only if the `cond` evalutes to `false`. +/// \throws ::ngraph::frontend::InitializationFailure if `cond` is false. +#define FRONT_END_INITIALIZATION_CHECK(...) \ + NGRAPH_CHECK_HELPER(::ngraph::frontend::InitializationFailure, "", __VA_ARGS__) + +/// \brief Macro to check whether a boolean condition holds. +/// \param cond Condition to check +/// \param ... Additional error message info to be added to the error message via the `<<` +/// stream-insertion operator. Note that the expressions here will be evaluated lazily, +/// i.e., only if the `cond` evalutes to `false`. +/// \throws ::ngraph::frontend::OpConversionFailure if `cond` is false. +#define FRONT_END_OP_CONVERSION_CHECK(...) \ + NGRAPH_CHECK_HELPER(::ngraph::frontend::OpConversionFailure, "", __VA_ARGS__) + +/// \brief Assert macro. +/// \param NAME Name of the function that is not implemented +/// \throws ::ngraph::frontend::NotImplementedFailure +#define FRONT_END_NOT_IMPLEMENTED(NAME) \ + NGRAPH_CHECK_HELPER(::ngraph::frontend::NotImplementedFailure, \ + "", \ + false, \ + #NAME " is not implemented for this FrontEnd class") + +/// \brief Assert macro. +/// \param MSG Error message +/// \throws ::ngraph::frontend::GeneralFailure +#define FRONT_END_THROW(MSG) FRONT_END_GENERAL_CHECK(false, MSG) + + } // namespace frontend +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/frontend_manager/include/frontend_manager/frontend_manager.hpp b/ngraph/frontend/frontend_manager/include/frontend_manager/frontend_manager.hpp new file mode 100644 index 00000000000000..3f6d83e2f5d1fb --- /dev/null +++ b/ngraph/frontend/frontend_manager/include/frontend_manager/frontend_manager.hpp @@ -0,0 +1,521 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include "frontend_manager_defs.hpp" +#include "ngraph/function.hpp" + +namespace ngraph +{ + namespace frontend + { + /// \brief An interface for identifying a place in a graph and iterate over it; can refer to + /// an operation node, tensor, port etc. + /// + /// \note Each front end implementation provides specialization of this interface to + /// represent a place in a model graph. Various methods in the front end classes accept and + /// retrieve instances of Place to point to particular node part which should be modified or + /// satisfies some criteria. For example, this class is used to report model inputs + /// and outputs, for searching operations and tensors by name, for setting shape etc. + /// + /// Place can refer to Tensor, Input Edge, Input Port, Operation, Output Port, Output Edge + /// + /// [Tensor A] + /// | + /// | [Input Edge] + /// | + /// V + /// ------------------- + /// [ [Input Port 0] ] + /// [ ] + /// [ Operation A ] + /// [ ] + /// [ [Output Port 0] ] + /// ------------------- + /// | + /// | [Output Edge] + /// | + /// V + /// [Tensor B] + /// | + /// | [Input Edge] + /// | + /// V + /// ------------------- + /// [ [Input Port 0] ] + /// [ ] + /// [ Operation B ] + /// [ ] + /// [ [Output Port 0] ] + /// ------------------- + /// | + /// | [Output Edge] + /// | + /// V + /// [Tensor C] + /// + class FRONTEND_API Place + { + public: + typedef std::shared_ptr Ptr; + + virtual ~Place() = default; + + /// \brief All associated names (synonyms) that identify this place in the graph in a + /// framework specific way + /// + /// \return A vector of strings each representing a name that identifies this place in + /// the graph. Can be empty if there are no names associated with this place or name + /// cannot be attached. + virtual std::vector get_names() const; + + /// \brief Returns references to all operation nodes that consume data from this place + /// \note It can be called for any kind of graph place searching for the first consuming + /// operations. + /// + /// \param outputPortIndex If place is an operational node it specifies which output + /// port should be considered. It is optional if place has only one output port + /// + /// \return A vector with all operation node references that consumes data from this + /// place + virtual std::vector get_consuming_operations(int outputPortIndex = -1) const; + + /// \brief Returns a tensor place that gets data from this place; applicable for + /// operations, output ports and output edges + /// + /// \param outputPortIndex Output port index if the current place is an operation node + /// and has multiple output ports. It is optional if place has only one output port + /// + /// \return A tensor place which hold the resulting value for this place + virtual Ptr get_target_tensor(int outputPortIndex = -1) const; + + /// \brief Returns a tensor place that supplies data for this place; applicable for + /// operations, input ports and input edges + /// + /// \param inputPortIndex Input port index for operational nodes. It is optional if + /// place has only one input port + /// \return A tensor place which supplies data for this place + virtual Ptr get_source_tensor(int inputPortIndex = -1) const; + + /// \brief Get an operation node place that immediately produces data for this place + /// + /// \param inputPortIndex If a given place is itself an operation node, this specifies a + /// port index. It is optional if place has only one input port + /// + /// \return An operation place that produces data for this place + virtual Ptr get_producing_operation(int inputPortIndex = -1) const; + + /// Returns a port that produces data for this place + virtual Ptr get_producing_port() const; + + /// For operation node returns reference to an input port with specified index + /// \param inputPortIndex Input port index. It is optional if place has only one input + /// port + virtual Ptr get_input_port(int inputPortIndex = -1) const; + + /// For operation node returns reference to an input port with specified name and index + /// \param inputName Name of port group, each group can have multiple ports + /// \param inputPortIndex Input port index. It is optional if port group has only one + /// input port + virtual Ptr get_input_port(const std::string& inputName, int inputPortIndex = -1) const; + + /// For operation node returns reference to an output port with specified index + /// \param outputPortIndex Output port index. It is optional if place has only one + /// output port + virtual Ptr get_output_port(int outputPortIndex = -1) const; + + /// For operation node returns reference to an output port with specified name and index + /// \param outputName Name of output port group, each group can have multiple ports + /// \param outputPortIndex Output port index. It is optional if port group has only one + /// output port + virtual Ptr get_output_port(const std::string& outputName, + int outputPortIndex = -1) const; + + /// Returns all input ports that consume data flows through this place + virtual std::vector get_consuming_ports() const; + + /// Returns true if this place is input for a model. + virtual bool is_input() const; + + /// Returns true if this place is output for a model. + virtual bool is_output() const; + + /// Returns true if another place is the same as this place. + /// \param another Another place object + virtual bool is_equal(Ptr another) const; + + /// \brief Returns true if another place points to the same data. + /// \note The same data means all places on path: output port -> output edge -> tensor + /// -> input edge -> input port. + /// \param another Another place object + virtual bool is_equal_data(Ptr another) const; + }; + + /// \brief InputModel class represents an original, not yet converted model graph in a + /// framework format given services to find places of interest in a graph or specialize/edit + /// the model before conversion. + /// + /// \note Class methods are divided into several groups: searching for places, naming and + /// annotation, topology editing, setting tensor properties. + /// + /// Editing requests may affect ability to convert the original model to nGraph function. + /// Aim to provide these editing capabilities is to unlock conversion for models that + /// are not natively supported "as-is" because of undefined shapes, types or operations. + /// + /// Specific front-end implementation is supposed to have a lazy implementation for + /// all methods, not doing a complete load of a model without an explicit method call. + /// For example, the list of all inputs are not pre-fetched by InputModel derived + /// class instance creation, but only when get_inputs method is called. But it is not + /// an obligation, the most convenient way should be chosen depending on the framework + /// model representation. + /// + /// All editing requests affect the model representation that is held behind the scene + /// successive method calls observe a new graph structure. + class FRONTEND_API InputModel + { + public: + typedef std::shared_ptr Ptr; + + virtual ~InputModel() = default; + + ///// Searching for places ///// + + /// \brief Returns all inputs for a model + /// An input is a place in a graph where data is supposed to flow inside graph from + /// outside. It can be a tensor, port, operation; which kind of place can be an output + /// is FW dependent. Usually framework models have a dedicated artifact to code model + /// input, it can be a tensor without producer, that writes to it in ONNX, or a special + /// operation like Placeholder in TensorFlow. + /// + /// \return A vector of input place references + virtual std::vector get_inputs() const; + + /// \brief Returns all output for a model + /// An output is a terminal place in a graph where data escapes the flow. It can be a + /// tensor, port, operation; which kind of place can be an output is FW dependent. In + /// comparison to a graph input, the output is less formally defined thing and + /// determination of initial list of outputs may include some conventions defined by a + /// frontend itself, not a framework. For example, all output ports without consumers + /// may be considered as outputs. + /// + /// \return A vector of output place references + virtual std::vector get_outputs() const; + + /// \brief Returns a tensor place by a tensor name following framework conventions, or + /// nullptr if a tensor with this name doesn't exist. + /// \param tensorName Name of tensor + /// \return Tensor place corresponding to specifed tensor name + virtual Place::Ptr get_place_by_tensor_name(const std::string& tensorName) const; + + /// \brief Returns an operation place by an operation name following framework + /// conventions, or nullptr if an operation with this name doesn't exist. \param + /// operationName Name of operation \return Place representing operation + virtual Place::Ptr get_place_by_operation_name(const std::string& operationName); + + /// \brief Returns an input port place by operation name and appropriate port index + /// \param operationName Name of operation + /// \param outputPortIndex Index of input port for this operation + /// \return Place representing input port of operation + virtual Place::Ptr + get_place_by_operation_and_input_port(const std::string& operationName, + int inputPortIndex); + + /// \brief Returns an output port place by operation name and appropriate port index + /// \param operationNameNname of operation + /// \param outputPortIndex Index of output port for this operation + /// \return Place representing output port of operation + virtual Place::Ptr + get_place_by_operation_and_output_port(const std::string& operationName, + int outputPortIndex); + + ///// Naming and annotation ///// + + /// \brief Sets name for tensor. Overwrites existing names of this place + /// \param operation Tensor place + /// \param newName New name for this tensor + virtual void set_name_for_tensor(Place::Ptr tensor, const std::string& newName); + + /// \brief Adds new name for tensor + /// \param operation Tensor place + /// \param newName New name to be added to this place + virtual void add_name_for_tensor(Place::Ptr tensor, const std::string& newName); + + /// \brief Sets name for operation. Overwrites existing names of this place + /// \param operation Operation place + /// \param newName New name for this operation + virtual void set_name_for_operation(Place::Ptr operation, const std::string& newName); + + /// \brief Unassign specified name from tensor place(s) + /// \param name Name of tensor + virtual void free_name_for_tensor(const std::string& name); + + /// \brief Unassign specified name from operation place(s) + /// \param name Name of operation + virtual void free_name_for_operation(const std::string& name); + + /// \brief Set name for a particular dimension of a place (e.g. batch dimension) + /// \param place Model's place + /// \param shapeDimIndex Dimension index + /// \param dimName Name to assign on this dimension + virtual void set_name_for_dimension(Place::Ptr place, + size_t shapeDimIndex, + const std::string& dimName); + + ///// Topology Editing ///// + + /// \brief Cut immediately before this place and assign this place as new input; prune + /// all nodes that don't contribute to any output. + /// \param place New place to be assigned as input + /// \param newNameOptional Optional new name assigned to this input place + virtual void cut_and_add_new_input(Place::Ptr place, + const std::string& newNameOptional = ""); + + /// \brief Cut immediately after this place and assign this place as new output; prune + /// all nodes that don't contribute to any output. + /// \param place New place to be assigned as output + /// \param newNameOptional Optional new name assigned to this output place + virtual void cut_and_add_new_output(Place::Ptr place, + const std::string& newNameOptional = ""); + + /// \brief Assign this place as new output or add necessary nodes to represent a new + /// output. + /// + /// \param place Anchor point to add an output + /// \return new output place, may be the same as a given place + virtual Place::Ptr add_output(Place::Ptr place); + + /// \brief Removes any sinks directly attached to this place with all inbound data flow + /// if it is not required by any other output. + /// \param place Model place + virtual void remove_output(Place::Ptr place); + + /// \brief Replaces all existing outputs with new ones removing all data flow that is + /// not required for new outputs. + /// + /// \param outputs Vector with places that will become new outputs; may intersect + /// existing outputs. + /// \param outputs Array of new output places + virtual void override_all_outputs(const std::vector& outputs); + + /// \brief Modifies the graph to use new inputs instead of existing ones. New inputs + /// should completely satisfy all existing outputs. + /// \param inputs Array of new input places + virtual void override_all_inputs(const std::vector& inputs); + + /// \brief Leaves only subgraph that are defined by new inputs and new outputs. + /// \param inputs Array of new input places + /// \param outputs Array of new output places + virtual void extract_subgraph(const std::vector& inputs, + const std::vector& outputs); + + ///// Setting tensor properties ///// + + /// \brief Defines all possible shape that may be used for this place; place should be + /// uniquely refer to some data. This partial shape will be converted to corresponding + /// shape of results ngraph nodes and will define shape inference when the model is + /// converted to ngraph. + /// \param place Model place + /// \param shape Partial shape for this place + virtual void set_partial_shape(Place::Ptr place, const ngraph::PartialShape& shape); + + /// \brief Returns current partial shape used for this place + /// \param place Model place + /// \return Partial shape for this place + virtual ngraph::PartialShape get_partial_shape(Place::Ptr place) const; + + /// \brief Sets new element type for a place + /// \param place Model place + /// \param type New element type + virtual void set_element_type(Place::Ptr place, const ngraph::element::Type& type); + + /// \brief Freezes a tensor with statically defined value or replace existing value for + /// already constant node or tensor + /// \param place Tensor place + /// \param value Value for tensor place representing a memory buffer + virtual void set_tensor_value(Place::Ptr place, const void* value); + + /// \brief Defines partial value (lower bound and upper bound) for a tensor place + /// TODO: more details for minValue and maxValue format; who defines shape? + /// \param place Tensor place + /// \param minValue Lower bound of partial value for tensor place + /// \param maxValue Upper bound of partial value for tensor place + virtual void set_tensor_partial_value(Place::Ptr place, + const void* minValue, + const void* maxValue); + }; + + /// \brief An interface for identifying a frontend for a particular framework. + /// Provides an ability to load and convert of input model + class FRONTEND_API FrontEnd + { + public: + typedef std::shared_ptr Ptr; + + FrontEnd(); + + virtual ~FrontEnd(); + + /// \brief Loads an input model by specified model file path + /// If model is stored in several files (e.g. model topology and model weights) - + /// frontend implementation is responsible to handle this case, generally frontend may + /// retrieve other file names from main file + /// \param path Main model file path + /// \return Loaded input model + virtual InputModel::Ptr load_from_file(const std::string& path) const; + + /// \brief Loads an input model by specified number of model files + /// This shall be used for cases when client knows all model files (model, weights, etc) + /// \param paths Array of model files + /// \return Loaded input model + virtual InputModel::Ptr load_from_files(const std::vector& paths) const; + + /// \brief Loads an input model by already loaded memory buffer + /// Memory structure is frontend-defined and is not specified in generic API + /// \param model Model memory buffer + /// \return Loaded input model + virtual InputModel::Ptr load_from_memory(const void* model) const; + + /// \brief Loads an input model from set of memory buffers + /// Memory structure is frontend-defined and is not specified in generic API + /// \param modelParts Array of model memory buffers + /// \return Loaded input model + virtual InputModel::Ptr + load_from_memory_fragments(const std::vector& modelParts) const; + + /// \brief Loads an input model by input stream representing main model file + /// \param stream Input stream of main model + /// \return Loaded input model + virtual InputModel::Ptr load_from_stream(std::istream& stream) const; + + /// \brief Loads an input model by input streams representing all model files + /// \param streams Array of input streams for model + /// \return Loaded input model + virtual InputModel::Ptr + load_from_streams(const std::vector& streams) const; + + /// \brief Completely convert and normalize entire function, throws if it is not + /// possible + /// \param model Input model + /// \return fully converted nGraph function + virtual std::shared_ptr convert(InputModel::Ptr model) const; + + /// \brief Completely convert the remaining, not converted part of a function. + /// \param partiallyConverted partially converted nGraph function + /// \return fully converted nGraph function + virtual std::shared_ptr + convert(std::shared_ptr partiallyConverted) const; + + /// \brief Convert only those parts of the model that can be converted leaving others + /// as-is. Converted parts are not normalized by additional transformations; normalize + /// function or another form of convert function should be called to finalize the + /// conversion process. + /// \param model Input model + /// \return partially converted nGraph function + virtual std::shared_ptr + convert_partially(InputModel::Ptr model) const; + + /// \brief Convert operations with one-to-one mapping with decoding nodes. + /// Each decoding node is an nGraph node representing a single FW operation node with + /// all attributes represented in FW-independent way. + /// \param model Input model + /// \return nGraph function after decoding + virtual std::shared_ptr decode(InputModel::Ptr model) const; + + /// \brief Runs normalization passes on function that was loaded with partial conversion + /// \param function partially converted nGraph function + virtual void normalize(std::shared_ptr function) const; + }; + + /// Capabilities for requested FrontEnd + /// In general, frontend implementation may be divided into several libraries by capability + /// level It will allow faster load of frontend when only limited usage is expected by + /// client application as well as binary size can be minimized by removing not needed parts + /// from application's package + namespace FrontEndCapabilities + { + /// \brief Just reading and conversion, w/o any modifications; intended to be used in + /// Reader + static const int FEC_DEFAULT = 0; + + /// \brief Topology cutting capability + static const int FEC_CUT = 1; + + /// \brief Query entities by names, renaming and adding new names for operations and + /// tensors + static const int FEC_NAMES = 2; + + /// \brief Partial model conversion and decoding capability + static const int FEC_WILDCARDS = 4; + }; // namespace FrontEndCapabilities + + // -------------- FrontEndManager ----------------- + using FrontEndCapFlags = int; + using FrontEndFactory = std::function; + + /// \brief Frontend management class, loads available frontend plugins on construction + /// Allows load of frontends for particular framework, register new and list available + /// frontends This is a main frontend entry point for client applications + class FRONTEND_API FrontEndManager final + { + public: + FrontEndManager(); + + FrontEndManager(FrontEndManager&&); + FrontEndManager& operator=(FrontEndManager&&); + + ~FrontEndManager(); + + /// \brief Loads frontend by name of framework and capabilities + /// \param framework Framework name. Throws exception if name is not in list of + /// available frontends \param fec Frontend capabilities. It is recommended to use only + /// those capabilities which are needed to minimize load time + /// \return Frontend interface for further loading of models + FrontEnd::Ptr + load_by_framework(const std::string& framework, + FrontEndCapFlags fec = FrontEndCapabilities::FEC_DEFAULT); + + /// \brief Loads frontend by model file path. Selects and loads appropriate frontend + /// depending on model file extension and other file info (header) \param framework + /// Framework name. Throws exception if name is not in list of available frontends + /// \param fec Frontend capabilities. It is recommended to use only those capabilities + /// which are needed to minimize load time + /// \return Frontend interface for further loading of model + FrontEnd::Ptr load_by_model(const std::string& path, + FrontEndCapFlags fec = FrontEndCapabilities::FEC_DEFAULT); + + /// \brief Gets list of registered frontends + std::vector get_available_front_ends() const; + + /// \brief Register frontend with name and factory creation method + void register_front_end(const std::string& name, FrontEndFactory creator); + + private: + class Impl; + + std::unique_ptr m_impl; + }; + + // --------- Plugin exporting information -------------- + + /// \brief Each frontend plugin is responsible to export GetAPIVersion function returning + /// version of frontend API used for this plugin + /// If version is not matched with OV_FRONTEND_API_VERSION - plugin will not be loaded by + /// FrontEndManager + using FrontEndVersion = uint64_t; + + /// \brief Each frontend plugin is responsible to export GetFrontEndData function returning + /// heap-allocated pointer to this structure. Will be used by FrontEndManager during loading + /// of plugins + struct FrontEndPluginInfo + { + std::string m_name; + FrontEndFactory m_creator; + }; + + } // namespace frontend + +} // namespace ngraph diff --git a/ngraph/frontend/frontend_manager/include/frontend_manager/frontend_manager_defs.hpp b/ngraph/frontend/frontend_manager/include/frontend_manager/frontend_manager_defs.hpp new file mode 100644 index 00000000000000..f7c1f3de86419d --- /dev/null +++ b/ngraph/frontend/frontend_manager/include/frontend_manager/frontend_manager_defs.hpp @@ -0,0 +1,17 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "ngraph/visibility.hpp" + +// Increment each time when FrontEnd/InputModel/Place interface is changed +#define OV_FRONTEND_API_VERSION 1 + +// Defined if cmake is building the frontend_manager DLL (instead of using it) +#ifdef frontend_manager_EXPORTS +#define FRONTEND_API NGRAPH_HELPER_DLL_EXPORT +#else +#define FRONTEND_API NGRAPH_HELPER_DLL_IMPORT +#endif // frontend_manager_EXPORTS diff --git a/ngraph/frontend/frontend_manager/src/frontend_manager.cpp b/ngraph/frontend/frontend_manager/src/frontend_manager.cpp new file mode 100644 index 00000000000000..863873794afdd4 --- /dev/null +++ b/ngraph/frontend/frontend_manager/src/frontend_manager.cpp @@ -0,0 +1,384 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include + +#include "frontend_manager/frontend_exceptions.hpp" +#include "frontend_manager/frontend_manager.hpp" +#include "plugin_loader.hpp" + +using namespace ngraph; +using namespace ngraph::frontend; + +//----------- FrontEndManager --------------------------- +class FrontEndManager::Impl +{ + std::vector m_loadedLibs; // must be a first class member (destroyed last) + std::map m_factories; + +public: + Impl() { registerPlugins(); } + + ~Impl() = default; + + FrontEnd::Ptr loadByFramework(const std::string& framework, FrontEndCapFlags fec) + { + FRONT_END_INITIALIZATION_CHECK( + m_factories.count(framework), "FrontEnd for Framework ", framework, " is not found"); + return m_factories[framework](fec); + } + + std::vector availableFrontEnds() const + { + std::vector keys; + + std::transform( + m_factories.begin(), + m_factories.end(), + std::back_inserter(keys), + [](const std::pair& item) { return item.first; }); + return keys; + } + + FrontEnd::Ptr loadByModel(const std::string& path, FrontEndCapFlags fec) + { + FRONT_END_NOT_IMPLEMENTED(loadByModel); + } + + void registerFrontEnd(const std::string& name, FrontEndFactory creator) + { + m_factories.insert({name, creator}); + } + +private: + void registerPlugins() + { + auto registerFromDir = [&](const std::string& dir) { + if (!dir.empty()) + { + auto plugins = loadPlugins(dir); + for (auto& plugin : plugins) + { + registerFrontEnd(plugin.m_pluginInfo.m_name, plugin.m_pluginInfo.m_creator); + m_loadedLibs.push_back(std::move(plugin.m_libHandle)); + } + } + }; + std::string envPath = ngraph::getenv_string("OV_FRONTEND_PATH"); + if (!envPath.empty()) + { + auto start = 0u; + auto sepPos = envPath.find(PathSeparator, start); + while (sepPos != std::string::npos) + { + registerFromDir(envPath.substr(start, sepPos - start)); + start = sepPos + 1; + sepPos = envPath.find(PathSeparator, start); + } + registerFromDir(envPath.substr(start, sepPos)); + } + else + { + registerFromDir("."); + } + } +}; + +FrontEndManager::FrontEndManager() + : m_impl(new Impl()) +{ +} + +FrontEndManager::FrontEndManager(FrontEndManager&&) = default; +FrontEndManager& FrontEndManager::operator=(FrontEndManager&&) = default; + +FrontEndManager::~FrontEndManager() = default; + +FrontEnd::Ptr FrontEndManager::load_by_framework(const std::string& framework, FrontEndCapFlags fec) +{ + return m_impl->loadByFramework(framework, fec); +} + +FrontEnd::Ptr FrontEndManager::load_by_model(const std::string& path, FrontEndCapFlags fec) +{ + return m_impl->loadByModel(path, fec); +} + +std::vector FrontEndManager::get_available_front_ends() const +{ + return m_impl->availableFrontEnds(); +} + +void FrontEndManager::register_front_end(const std::string& name, FrontEndFactory creator) +{ + m_impl->registerFrontEnd(name, creator); +} + +//----------- FrontEnd --------------------------- + +FrontEnd::FrontEnd() = default; + +FrontEnd::~FrontEnd() = default; + +InputModel::Ptr FrontEnd::load_from_file(const std::string& path) const +{ + FRONT_END_NOT_IMPLEMENTED(load_from_file); +} + +InputModel::Ptr FrontEnd::load_from_files(const std::vector& paths) const +{ + FRONT_END_NOT_IMPLEMENTED(load_from_files); +} + +InputModel::Ptr FrontEnd::load_from_memory(const void* model) const +{ + FRONT_END_NOT_IMPLEMENTED(load_from_memory); +} + +InputModel::Ptr + FrontEnd::load_from_memory_fragments(const std::vector& modelParts) const +{ + FRONT_END_NOT_IMPLEMENTED(load_from_memory_fragments); +} + +InputModel::Ptr FrontEnd::load_from_stream(std::istream& path) const +{ + FRONT_END_NOT_IMPLEMENTED(load_from_stream); +} + +InputModel::Ptr FrontEnd::load_from_streams(const std::vector& paths) const +{ + FRONT_END_NOT_IMPLEMENTED(load_from_streams); +} + +std::shared_ptr FrontEnd::convert(InputModel::Ptr model) const +{ + FRONT_END_NOT_IMPLEMENTED(convert); +} + +std::shared_ptr FrontEnd::convert(std::shared_ptr) const +{ + FRONT_END_NOT_IMPLEMENTED(convert); +} + +std::shared_ptr FrontEnd::convert_partially(InputModel::Ptr model) const +{ + FRONT_END_NOT_IMPLEMENTED(convert_partially); +} + +std::shared_ptr FrontEnd::decode(InputModel::Ptr model) const +{ + FRONT_END_NOT_IMPLEMENTED(convertDecodingOnly); +} + +void FrontEnd::normalize(std::shared_ptr function) const +{ + FRONT_END_NOT_IMPLEMENTED(normalize); +} + +//----------- InputModel --------------------------- +std::vector InputModel::get_inputs() const +{ + FRONT_END_NOT_IMPLEMENTED(get_inputs); +} + +std::vector InputModel::get_outputs() const +{ + FRONT_END_NOT_IMPLEMENTED(get_outputs); +} + +Place::Ptr InputModel::get_place_by_tensor_name(const std::string& tensorName) const +{ + FRONT_END_NOT_IMPLEMENTED(get_place_by_tensor_name); +} + +Place::Ptr InputModel::get_place_by_operation_name(const std::string& operationName) +{ + FRONT_END_NOT_IMPLEMENTED(get_place_by_operation_name); +} + +Place::Ptr InputModel::get_place_by_operation_and_input_port(const std::string& operationName, + int inputPortIndex) +{ + FRONT_END_NOT_IMPLEMENTED(get_place_by_operation_and_input_port); +} + +Place::Ptr InputModel::get_place_by_operation_and_output_port(const std::string& operationName, + int outputPortIndex) +{ + FRONT_END_NOT_IMPLEMENTED(get_place_by_operation_and_output_port); +} + +void InputModel::set_name_for_tensor(Place::Ptr tensor, const std::string& newName) +{ + FRONT_END_NOT_IMPLEMENTED(set_name_for_tensor); +} + +void InputModel::add_name_for_tensor(Place::Ptr tensor, const std::string& newName) +{ + FRONT_END_NOT_IMPLEMENTED(add_name_for_tensor); +} + +void InputModel::set_name_for_operation(Place::Ptr operation, const std::string& newName) +{ + FRONT_END_NOT_IMPLEMENTED(set_name_for_operation); +} + +void InputModel::free_name_for_tensor(const std::string& name) +{ + FRONT_END_NOT_IMPLEMENTED(free_name_for_tensor); +} + +void InputModel::free_name_for_operation(const std::string& name) +{ + FRONT_END_NOT_IMPLEMENTED(free_name_for_operation); +} + +void InputModel::set_name_for_dimension(Place::Ptr place, + size_t shapeDimIndex, + const std::string& dimName) +{ + FRONT_END_NOT_IMPLEMENTED(set_name_for_dimension); +} + +void InputModel::cut_and_add_new_input(Place::Ptr place, const std::string& newNameOptional) +{ + FRONT_END_NOT_IMPLEMENTED(cut_and_add_new_input); +} + +void InputModel::cut_and_add_new_output(Place::Ptr place, const std::string& newNameOptional) +{ + FRONT_END_NOT_IMPLEMENTED(cut_and_add_new_output); +} + +Place::Ptr InputModel::add_output(Place::Ptr place) +{ + FRONT_END_NOT_IMPLEMENTED(add_output); +} + +void InputModel::remove_output(Place::Ptr place) +{ + FRONT_END_NOT_IMPLEMENTED(remove_output); +} + +void InputModel::override_all_outputs(const std::vector& outputs) +{ + FRONT_END_NOT_IMPLEMENTED(override_all_outputs); +} + +void InputModel::override_all_inputs(const std::vector& inputs) +{ + FRONT_END_NOT_IMPLEMENTED(override_all_inputs); +} + +void InputModel::extract_subgraph(const std::vector& inputs, + const std::vector& outputs) +{ + FRONT_END_NOT_IMPLEMENTED(extract_subgraph); +} + +// Setting tensor properties +void InputModel::set_partial_shape(Place::Ptr place, const ngraph::PartialShape&) +{ + FRONT_END_NOT_IMPLEMENTED(set_partial_shape); +} + +ngraph::PartialShape InputModel::get_partial_shape(Place::Ptr place) const +{ + FRONT_END_NOT_IMPLEMENTED(set_partial_shape); +} + +void InputModel::set_element_type(Place::Ptr place, const ngraph::element::Type&) +{ + FRONT_END_NOT_IMPLEMENTED(set_element_type); +} + +void InputModel::set_tensor_value(Place::Ptr place, const void* value) +{ + FRONT_END_NOT_IMPLEMENTED(set_tensor_value); +} + +void InputModel::set_tensor_partial_value(Place::Ptr place, + const void* minValue, + const void* maxValue) +{ + FRONT_END_NOT_IMPLEMENTED(set_tensor_partial_value); +} + +//----------- Place --------------------------- +std::vector Place::get_names() const +{ + FRONT_END_NOT_IMPLEMENTED(get_names); +} + +std::vector Place::get_consuming_operations(int outputPortIndex) const +{ + FRONT_END_NOT_IMPLEMENTED(get_consuming_operations); +} + +Place::Ptr Place::get_target_tensor(int outputPortIndex) const +{ + FRONT_END_NOT_IMPLEMENTED(get_target_tensor); +} + +Place::Ptr Place::get_producing_operation(int inputPortIndex) const +{ + FRONT_END_NOT_IMPLEMENTED(get_producing_operation); +} + +Place::Ptr Place::get_producing_port() const +{ + FRONT_END_NOT_IMPLEMENTED(get_producing_port); +} + +Place::Ptr Place::get_input_port(int inputPortIndex) const +{ + FRONT_END_NOT_IMPLEMENTED(get_input_port); +} + +Place::Ptr Place::get_input_port(const std::string& inputName, int inputPortIndex) const +{ + FRONT_END_NOT_IMPLEMENTED(get_input_port); +} + +Place::Ptr Place::get_output_port(int outputPortIndex) const +{ + FRONT_END_NOT_IMPLEMENTED(get_output_port); +} + +Place::Ptr Place::get_output_port(const std::string& outputName, int outputPortIndex) const +{ + FRONT_END_NOT_IMPLEMENTED(get_output_port); +} + +std::vector Place::get_consuming_ports() const +{ + FRONT_END_NOT_IMPLEMENTED(get_consuming_ports); +} + +bool Place::is_input() const +{ + FRONT_END_NOT_IMPLEMENTED(is_input); +} + +bool Place::is_output() const +{ + FRONT_END_NOT_IMPLEMENTED(is_output); +} + +bool Place::is_equal(Ptr another) const +{ + FRONT_END_NOT_IMPLEMENTED(is_equal); +} + +bool Place::is_equal_data(Ptr another) const +{ + FRONT_END_NOT_IMPLEMENTED(is_equal_data); +} + +Place::Ptr Place::get_source_tensor(int inputPortIndex) const +{ + FRONT_END_NOT_IMPLEMENTED(get_source_tensor); +} diff --git a/ngraph/frontend/frontend_manager/src/plugin_loader.cpp b/ngraph/frontend/frontend_manager/src/plugin_loader.cpp new file mode 100644 index 00000000000000..53a2957c93b976 --- /dev/null +++ b/ngraph/frontend/frontend_manager/src/plugin_loader.cpp @@ -0,0 +1,110 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#ifdef _WIN32 +#include +#include +#else // _WIN32 +#include +#include +#include +#endif // _WIN32 + +#include +#include +#include +#include "ngraph/file_util.hpp" + +#include "plugin_loader.hpp" + +using namespace ngraph; +using namespace ngraph::frontend; + +#ifdef WIN32 +#define DLOPEN(fileStr) LoadLibrary(TEXT(fileStr.c_str())) +#define DLSYM(obj, func) GetProcAddress(obj, func) +#define DLCLOSE(obj) FreeLibrary(obj) +#else +#define DLOPEN(fileStr) dlopen(file.c_str(), RTLD_LAZY) +#define DLSYM(obj, func) dlsym(obj, func) +#define DLCLOSE(obj) dlclose(obj) +#endif + +// TODO: change to std::filesystem for C++17 +static std::vector listFiles(const std::string& path) +{ + std::vector res; + try + { + ngraph::file_util::iterate_files( + path, + [&res](const std::string& file, bool is_dir) { + if (!is_dir && file.find("_ngraph_frontend") != std::string::npos) + { +#ifdef _WIN32 + std::string ext = ".dll"; +#elif defined(__APPLE__) + std::string ext = ".dylib"; +#else + std::string ext = ".so"; +#endif + if (file.find(ext) != std::string::npos) + { + res.push_back(file); + } + } + }, + false, + true); + } + catch (...) + { + // Ignore exceptions + } + return res; +} + +std::vector ngraph::frontend::loadPlugins(const std::string& dirName) +{ + auto files = listFiles(dirName); + std::vector res; + for (const auto& file : files) + { + auto shared_object = DLOPEN(file); + if (!shared_object) + { + continue; + } + + PluginHandle guard([shared_object, file]() { + // std::cout << "Closing plugin library " << file << std::endl; + DLCLOSE(shared_object); + }); + + auto infoAddr = reinterpret_cast(DLSYM(shared_object, "GetAPIVersion")); + if (!infoAddr) + { + continue; + } + FrontEndVersion plugInfo{reinterpret_cast(infoAddr())}; + + if (plugInfo != OV_FRONTEND_API_VERSION) + { + // Plugin has incompatible API version, do not load it + continue; + } + + auto creatorAddr = reinterpret_cast(DLSYM(shared_object, "GetFrontEndData")); + if (!creatorAddr) + { + continue; + } + + std::unique_ptr fact{ + reinterpret_cast(creatorAddr())}; + + res.push_back(PluginData(std::move(guard), std::move(*fact))); + } + return res; +} diff --git a/ngraph/frontend/frontend_manager/src/plugin_loader.hpp b/ngraph/frontend/frontend_manager/src/plugin_loader.hpp new file mode 100644 index 00000000000000..1ab3fc73baa227 --- /dev/null +++ b/ngraph/frontend/frontend_manager/src/plugin_loader.hpp @@ -0,0 +1,68 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +#ifdef _WIN32 +const char FileSeparator[] = "\\"; +const char PathSeparator[] = ";"; +#else +const char FileSeparator[] = "/"; +const char PathSeparator[] = ":"; +#endif // _WIN32 + +namespace ngraph +{ + namespace frontend + { + /// Plugin library handle wrapper. On destruction calls internal function which frees + /// library handle + class PluginHandle + { + public: + PluginHandle(std::function callOnDestruct) + : m_callOnDestruct(callOnDestruct) + { + } + + PluginHandle(const PluginHandle&) = delete; + + PluginHandle& operator=(const PluginHandle&) = delete; + + PluginHandle(PluginHandle&&) = default; + + PluginHandle& operator=(PluginHandle&&) = default; + + ~PluginHandle() + { + if (m_callOnDestruct) + { + m_callOnDestruct(); + } + } + + private: + std::function m_callOnDestruct; + }; + + struct PluginData + { + PluginData(PluginHandle&& h, FrontEndPluginInfo&& info) + : m_libHandle(std::move(h)) + , m_pluginInfo(info) + { + } + + PluginHandle + m_libHandle; // Shall be destroyed when plugin is not needed anymore to free memory + FrontEndPluginInfo m_pluginInfo; + }; + + // Searches for available plugins in a specified directory + std::vector loadPlugins(const std::string& dirName); + + } // namespace frontend +} // namespace ngraph diff --git a/ngraph/frontend/paddlepaddle/CMakeLists.txt b/ngraph/frontend/paddlepaddle/CMakeLists.txt new file mode 100644 index 00000000000000..8ae6982b277182 --- /dev/null +++ b/ngraph/frontend/paddlepaddle/CMakeLists.txt @@ -0,0 +1,65 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +file(GLOB_RECURSE LIBRARY_SRC ${CMAKE_CURRENT_SOURCE_DIR}/src/*.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/*.cc) +file(GLOB_RECURSE LIBRARY_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/src/*.hpp ${CMAKE_CURRENT_SOURCE_DIR}/src/*.h) +file(GLOB_RECURSE LIBRARY_PUBLIC_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/include/*.hpp) + +if (TARGET ext_protobuf) + add_dependencies(ngraph::paddlepaddle_ngraph_frontend ext_protobuf) +endif() + +set(paddlepaddle_ngraph_frontend_INCLUDE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/include) + +# Create named folders for the sources within the .vcproj +# Empty name lists them directly under the .vcproj + +source_group("src" FILES ${LIBRARY_SRC}) +source_group("include" FILES ${LIBRARY_HEADERS}) +source_group("public include" FILES ${LIBRARY_PUBLIC_HEADERS}) + +set(PROTOBUF_GENERATE_CPP_APPEND_PATH ON) +file(GLOB proto_files ${CMAKE_CURRENT_SOURCE_DIR}/src/proto/*.proto) +protobuf_generate_cpp(PROTO_SRCS PROTO_HDRS ${proto_files}) + +include_directories(${Protobuf_INCLUDE_DIRS} ${paddlepaddle_ngraph_frontend_INCLUDE_DIR}) + +# Create shared library +add_library(paddlepaddle_ngraph_frontend SHARED ${LIBRARY_SRC} ${LIBRARY_HEADERS} ${LIBRARY_PUBLIC_HEADERS} ${PROTO_SRCS} ${PROTO_HDRS}) +add_library(ngraph::paddlepaddle_ngraph_frontend ALIAS paddlepaddle_ngraph_frontend) + +add_dependencies(paddlepaddle_ngraph_frontend libprotobuf_static) + +target_include_directories(paddlepaddle_ngraph_frontend + PRIVATE + ${CMAKE_CURRENT_SOURCE_DIR}/src + ${FRONTEND_INCLUDE_PATH} + ${CMAKE_CURRENT_BINARY_DIR}) + +if(COMMAND ie_add_vs_version_file) + ie_add_vs_version_file(NAME paddlepaddle_ngraph_frontend + FILEDESCRIPTION "FrontEnd to load and convert PaddlePaddle file format") +endif() + +target_link_libraries(paddlepaddle_ngraph_frontend PRIVATE ${PROTOBUF_STATIC_LIB_OUTPUT} PUBLIC ngraph PRIVATE ngraph::builder) +target_link_libraries(paddlepaddle_ngraph_frontend PRIVATE frontend_manager) + +add_clang_format_target(paddlepaddle_ngraph_frontend_clang FOR_TARGETS paddlepaddle_ngraph_frontend + EXCLUDE_PATTERNS ${PROTO_SRCS} ${PROTO_HDRS}) + +# TODO: Consider to remove the following block (inherited from onnx_import just in case). +if (CMAKE_CXX_COMPILER_ID MATCHES "^(Apple)?Clang$") + target_compile_options(paddlepaddle_ngraph_frontend PRIVATE -Wno-undef -Wno-reserved-id-macro -Wno-switch-enum + -Wno-invalid-offsetof -Wno-shorten-64-to-32 -Wno-unused-macros -Wno-missing-variable-declarations + -Wno-unused-private-field -Wno-shadow -Wno-deprecated PUBLIC -Wno-undefined-func-template) +endif() + +install(TARGETS paddlepaddle_ngraph_frontend EXPORT ngraphTargets + RUNTIME DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph + ARCHIVE DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph + LIBRARY DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph) + +if (NGRAPH_EXPORT_TARGETS_ENABLE) + export(TARGETS paddlepaddle_ngraph_frontend NAMESPACE ngraph:: APPEND FILE "${NGRAPH_TARGETS_FILE}") +endif() diff --git a/ngraph/frontend/paddlepaddle/include/paddlepaddle_frontend/exceptions.hpp b/ngraph/frontend/paddlepaddle/include/paddlepaddle_frontend/exceptions.hpp new file mode 100644 index 00000000000000..4efac13a5ddbec --- /dev/null +++ b/ngraph/frontend/paddlepaddle/include/paddlepaddle_frontend/exceptions.hpp @@ -0,0 +1,45 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + class NodeContext; + + class OpValidationFailurePDPD : public OpValidationFailure + { + public: + OpValidationFailurePDPD(const CheckLocInfo& check_loc_info, + const pdpd::NodeContext& node, + const std::string& explanation) + : OpValidationFailure( + check_loc_info, get_error_msg_prefix_pdpd(node), explanation) + { + } + + private: + static std::string get_error_msg_prefix_pdpd(const pdpd::NodeContext& node); + }; + } // namespace pdpd + } // namespace frontend + +/// \brief Macro to check whether a boolean condition holds. +/// \param node_context Object of NodeContext class +/// \param cond Condition to check +/// \param ... Additional error message info to be added to the error message via the `<<` +/// stream-insertion operator. Note that the expressions here will be evaluated lazily, +/// i.e., only if the `cond` evalutes to `false`. +/// \throws ::ngraph::OpValidationFailurePDPD if `cond` is false. +#define PDPD_OP_VALIDATION_CHECK(node_context, ...) \ + NGRAPH_CHECK_HELPER( \ + ::ngraph::frontend::pdpd::OpValidationFailurePDPD, (node_context), __VA_ARGS__) +} // namespace ngraph diff --git a/ngraph/frontend/paddlepaddle/include/paddlepaddle_frontend/frontend.hpp b/ngraph/frontend/paddlepaddle/include/paddlepaddle_frontend/frontend.hpp new file mode 100644 index 00000000000000..566ea9dd910cbc --- /dev/null +++ b/ngraph/frontend/paddlepaddle/include/paddlepaddle_frontend/frontend.hpp @@ -0,0 +1,58 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include "exceptions.hpp" +#include "model.hpp" + +namespace ngraph +{ + namespace frontend + { + class PDPD_API FrontEndPDPD : public FrontEnd + { + static std::shared_ptr + convert_model(const std::shared_ptr& model); + + public: + FrontEndPDPD() = default; + + /** + * @brief Reads model from file and deducts file names of weights + * @param path path to folder which contains __model__ file or path to .pdmodel file + * @return InputModel::Ptr + */ + InputModel::Ptr load_from_file(const std::string& path) const override; + + /** + * @brief Reads model and weights from files + * @param paths vector containing path to .pdmodel and .pdiparams files + * @return InputModel::Ptr + */ + InputModel::Ptr load_from_files(const std::vector& paths) const override; + + /** + * @brief Reads model from stream + * @param model_stream stream containing .pdmodel or __model__ files. Can only be used + * if model have no weights + * @return InputModel::Ptr + */ + InputModel::Ptr load_from_stream(std::istream& model_stream) const override; + + /** + * @brief Reads model from stream + * @param paths vector of streams containing .pdmodel and .pdiparams files. Can't be + * used in case of multiple weight files + * @return InputModel::Ptr + */ + InputModel::Ptr + load_from_streams(const std::vector& paths) const override; + + std::shared_ptr convert(InputModel::Ptr model) const override; + }; + + } // namespace frontend +} // namespace ngraph diff --git a/ngraph/frontend/paddlepaddle/include/paddlepaddle_frontend/model.hpp b/ngraph/frontend/paddlepaddle/include/paddlepaddle_frontend/model.hpp new file mode 100644 index 00000000000000..ddf63fd97e5630 --- /dev/null +++ b/ngraph/frontend/paddlepaddle/include/paddlepaddle_frontend/model.hpp @@ -0,0 +1,44 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include + +namespace ngraph +{ + namespace frontend + { + class OpPlacePDPD; + class TensorPlacePDPD; + + class PDPD_API InputModelPDPD : public InputModel + { + friend class FrontEndPDPD; + class InputModelPDPDImpl; + std::shared_ptr _impl; + + std::vector> getOpPlaces() const; + std::map> getVarPlaces() const; + std::map> getTensorValues() const; + + public: + explicit InputModelPDPD(const std::string& path); + explicit InputModelPDPD(const std::vector& streams); + std::vector get_inputs() const override; + std::vector get_outputs() const override; + Place::Ptr get_place_by_tensor_name(const std::string& tensorName) const override; + void override_all_outputs(const std::vector& outputs) override; + void override_all_inputs(const std::vector& inputs) override; + void extract_subgraph(const std::vector& inputs, + const std::vector& outputs) override; + void set_partial_shape(Place::Ptr place, const ngraph::PartialShape&) override; + ngraph::PartialShape get_partial_shape(Place::Ptr place) const override; + void set_element_type(Place::Ptr place, const ngraph::element::Type&) override; + void set_tensor_value(Place::Ptr place, const void* value) override; + }; + + } // namespace frontend +} // namespace ngraph diff --git a/ngraph/frontend/paddlepaddle/include/paddlepaddle_frontend/place.hpp b/ngraph/frontend/paddlepaddle/include/paddlepaddle_frontend/place.hpp new file mode 100644 index 00000000000000..a9d041dc202896 --- /dev/null +++ b/ngraph/frontend/paddlepaddle/include/paddlepaddle_frontend/place.hpp @@ -0,0 +1,206 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include + +namespace paddle +{ + namespace framework + { + namespace proto + { + class OpDesc; + class VarDesc; + + } // namespace proto + } // namespace framework +} // namespace paddle + +namespace ngraph +{ + namespace frontend + { + class TensorPlacePDPD; + class OpPlacePDPD; + + class PlacePDPD : public Place + { + public: + PlacePDPD(const InputModel& input_model, const std::vector& names) + : m_input_model(input_model) + , m_names(names) + { + } + + explicit PlacePDPD(const InputModel& input_model) + : PlacePDPD(input_model, std::vector{}) + { + } + + ~PlacePDPD() override = default; + + bool is_input() const override; + + bool is_output() const override; + + bool is_equal(Ptr another) const override { return this == another.get(); } + + std::vector get_names() const override { return m_names; } + + private: + const InputModel& m_input_model; + std::vector m_names; + }; + + class InPortPlacePDPD : public PlacePDPD + { + public: + explicit InPortPlacePDPD(const InputModel& input_model) + : PlacePDPD(input_model) + { + } + + void setOp(const std::weak_ptr& op) { m_op = op; } + + void setSourceTensor(const std::weak_ptr& source_tensor) + { + m_source_tensor = source_tensor; + } + + std::shared_ptr getSourceTensorPDPD() const; + + std::shared_ptr getOp(); + + private: + std::weak_ptr m_source_tensor; + std::weak_ptr m_op; + }; + + class OutPortPlacePDPD : public PlacePDPD + { + public: + explicit OutPortPlacePDPD(const InputModel& input_model) + : PlacePDPD(input_model) + { + } + + void setOp(const std::weak_ptr& op) { m_op = op; } + + void setTargetTensor(const std::weak_ptr& target_tensor) + { + m_target_tensor = target_tensor; + } + + std::shared_ptr getTargetTensorPDPD() const; + + private: + std::weak_ptr m_op; + std::weak_ptr m_target_tensor; + }; + + class OpPlacePDPD : public PlacePDPD + { + public: + OpPlacePDPD(const InputModel& input_model, + const std::vector& names, + const std::shared_ptr& op_desc); + + OpPlacePDPD(const InputModel& input_model, + const std::shared_ptr& op_desc); + + void addInPort(const std::shared_ptr& input, const std::string& name) + { + m_input_ports[name].push_back(input); + } + + void addOutPort(const std::shared_ptr& output, + const std::string& name) + { + m_output_ports[name].push_back(output); + } + + const std::map>>& + getOutputPorts() const + { + return m_output_ports; + } + + const std::map>>& + getInputPorts() const + { + return m_input_ports; + } + + std::shared_ptr getOutputPortPDPD(const std::string& name, int idx) + { + return m_output_ports[name][idx]; + } + + std::shared_ptr getInputPortPDPD(const std::string& name, int idx) + { + return m_input_ports[name][idx]; + } + + const std::shared_ptr& getDesc() const + { + return m_op_desc; + } + + private: + std::shared_ptr m_op_desc; + std::map>> m_input_ports; + std::map>> m_output_ports; + }; + + class TensorPlacePDPD : public PlacePDPD + { + public: + TensorPlacePDPD(const InputModel& input_model, + const std::vector& names, + const std::shared_ptr& var_desc); + + TensorPlacePDPD(const InputModel& input_model, + const std::shared_ptr& var_desc); + + void addProducingPort(const std::shared_ptr& out_port) + { + m_producing_ports.push_back(out_port); + } + + void addConsumingPort(const std::shared_ptr& in_port) + { + m_consuming_ports.push_back(in_port); + } + + std::vector get_consuming_ports() const override; + + Ptr get_producing_port() const override; + + const PartialShape& getPartialShape() const { return m_pshape; } + + const element::Type& getElementType() const { return m_type; } + + void setPartialShape(const PartialShape& pshape) { m_pshape = pshape; } + + void setElementType(const element::Type& type) { m_type = type; } + + const std::shared_ptr& getDesc() const + { + return m_var_desc; + } + + private: + std::shared_ptr m_var_desc; + PartialShape m_pshape; + element::Type m_type; + + std::vector> m_producing_ports; + std::vector> m_consuming_ports; + }; + + } // namespace frontend +} // namespace ngraph diff --git a/ngraph/frontend/paddlepaddle/include/paddlepaddle_frontend/utility.hpp b/ngraph/frontend/paddlepaddle/include/paddlepaddle_frontend/utility.hpp new file mode 100644 index 00000000000000..19dcc61d24a4cf --- /dev/null +++ b/ngraph/frontend/paddlepaddle/include/paddlepaddle_frontend/utility.hpp @@ -0,0 +1,31 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +// Defined if we are building the plugin DLL (instead of using it) +#ifdef paddlepaddle_ngraph_frontend_EXPORTS +#define PDPD_API NGRAPH_HELPER_DLL_EXPORT +#else +#define PDPD_API NGRAPH_HELPER_DLL_IMPORT +#endif // paddlepaddle_ngraph_frontend_EXPORTS + +namespace ngraph +{ + namespace frontend + { + inline void PDPD_ASSERT(bool ex, const std::string& msg = "Unspecified error.") + { + if (!ex) + throw std::runtime_error(msg); + } + +#define PDPD_THROW(msg) throw std::runtime_error(std::string("ERROR: ") + msg) + +#define NOT_IMPLEMENTED(msg) throw std::runtime_error(std::string(msg) + " is not implemented") + + } // namespace frontend +} // namespace ngraph diff --git a/ngraph/frontend/paddlepaddle/src/decoder.cpp b/ngraph/frontend/paddlepaddle/src/decoder.cpp new file mode 100644 index 00000000000000..13beaed6a20587 --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/decoder.cpp @@ -0,0 +1,171 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "framework.pb.h" + +#include "decoder.hpp" + +namespace ngraph +{ + namespace frontend + { + using namespace paddle::framework; + + std::map TYPE_MAP{ + {proto::VarType_Type::VarType_Type_BOOL, ngraph::element::boolean}, + {proto::VarType_Type::VarType_Type_INT16, ngraph::element::i16}, + {proto::VarType_Type::VarType_Type_INT32, ngraph::element::i32}, + {proto::VarType_Type::VarType_Type_INT64, ngraph::element::i64}, + {proto::VarType_Type::VarType_Type_FP16, ngraph::element::f16}, + {proto::VarType_Type::VarType_Type_FP32, ngraph::element::f32}, + {proto::VarType_Type::VarType_Type_FP64, ngraph::element::f64}, + {proto::VarType_Type::VarType_Type_UINT8, ngraph::element::u8}, + {proto::VarType_Type::VarType_Type_INT8, ngraph::element::i8}, + {proto::VarType_Type::VarType_Type_BF16, ngraph::element::bf16}}; + + ngraph::element::Type DecoderPDPDProto::get_dtype(const std::string& name, + ngraph::element::Type def) const + { + auto dtype = (paddle::framework::proto::VarType_Type)get_int(name); + return TYPE_MAP[dtype]; + } + + std::vector DecoderPDPDProto::get_ints(const std::string& name, + const std::vector& def) const + { + auto attrs = decode_attribute_helper(name); + if (attrs.empty()) + { + return def; + } + return std::vector(attrs[0].ints().begin(), attrs[0].ints().end()); + } + + int DecoderPDPDProto::get_int(const std::string& name, int def) const + { + auto attrs = decode_attribute_helper(name); + if (attrs.empty()) + { + return def; + } + return attrs[0].i(); + } + + std::vector DecoderPDPDProto::get_floats(const std::string& name, + const std::vector& def) const + { + auto attrs = decode_attribute_helper(name); + if (attrs.empty()) + { + return def; + } + + return std::vector(attrs[0].floats().begin(), attrs[0].floats().end()); + } + + float DecoderPDPDProto::get_float(const std::string& name, float def) const + { + auto attrs = decode_attribute_helper(name); + if (attrs.empty()) + { + return def; + } + return attrs[0].f(); + } + + std::string DecoderPDPDProto::get_str(const std::string& name, const std::string& def) const + { + auto attrs = decode_attribute_helper(name); + if (attrs.empty()) + { + return def; + } + return attrs[0].s(); + } + + bool DecoderPDPDProto::get_bool(const std::string& name, bool def) const + { + auto attrs = decode_attribute_helper(name); + if (attrs.empty()) + { + return def; + } + return attrs[0].b(); + } + + std::vector DecoderPDPDProto::get_longs(const std::string& name, + const std::vector& def) const + { + auto attrs = decode_attribute_helper(name); + if (attrs.empty()) + { + return def; + } + + return std::vector(attrs[0].longs().begin(), attrs[0].longs().end()); + } + + int64_t DecoderPDPDProto::get_long(const std::string& name, const int64_t& def) const + { + auto attrs = decode_attribute_helper(name); + if (attrs.empty()) + { + return def; + } + + return attrs[0].l(); + } + + std::vector DecoderPDPDProto::get_output_names() const + { + std::vector output_names; + for (const auto& output : op_place->getDesc()->outputs()) + { + output_names.push_back(output.parameter()); + } + return output_names; + } + + std::vector + DecoderPDPDProto::decode_attribute_helper(const std::string& name) const + { + std::vector attrs; + for (const auto& attr : op_place->getDesc()->attrs()) + { + if (attr.name() == name) + attrs.push_back(attr); + } + FRONT_END_GENERAL_CHECK(attrs.size() <= 1, + "An error occurred while parsing the ", + name, + " attribute of ", + op_place->getDesc()->type(), + "node. Unsupported number of attributes. Current number: ", + attrs.size(), + " Expected number: 0 or 1"); + return attrs; + } + + std::vector + DecoderPDPDProto::get_out_port_types(const std::string& port_name) const + { + std::vector output_types; + for (const auto& out_port : op_place->getOutputPorts().at(port_name)) + { + output_types.push_back(out_port->getTargetTensorPDPD()->getElementType()); + } + return output_types; + } + + } // namespace frontend +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/decoder.hpp b/ngraph/frontend/paddlepaddle/src/decoder.hpp new file mode 100644 index 00000000000000..12a6c69b699ae5 --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/decoder.hpp @@ -0,0 +1,65 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "framework.pb.h" + +#include +#include + +#include +#include + +namespace ngraph +{ + namespace frontend + { + extern std::map TYPE_MAP; + + class DecoderPDPDProto + { + public: + explicit DecoderPDPDProto(const std::shared_ptr& op) + : op_place(op) + { + } + + // TODO: Further populate get_XXX methods on demand + std::vector get_ints(const std::string& name, + const std::vector& def = {}) const; + int get_int(const std::string& name, int def = 0) const; + std::vector get_floats(const std::string& name, + const std::vector& def = {}) const; + float get_float(const std::string& name, float def = 0.) const; + std::string get_str(const std::string& name, const std::string& def = "") const; + bool get_bool(const std::string& name, bool def = false) const; + std::vector get_longs(const std::string& name, + const std::vector& def = {}) const; + int64_t get_long(const std::string& name, const int64_t& def = {}) const; + + ngraph::element::Type get_dtype(const std::string& name, + ngraph::element::Type def) const; + + const std::string& get_op_type() const { return op_place->getDesc()->type(); } + std::vector get_output_names() const; + std::vector get_out_port_types(const std::string& port_name) const; + + private: + std::vector + decode_attribute_helper(const std::string& name) const; + std::shared_ptr op_place; + }; + + } // namespace frontend +} // namespace ngraph diff --git a/ngraph/frontend/paddlepaddle/src/exceptions.cpp b/ngraph/frontend/paddlepaddle/src/exceptions.cpp new file mode 100644 index 00000000000000..e6ed8277d7fb37 --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/exceptions.cpp @@ -0,0 +1,23 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "paddlepaddle_frontend/exceptions.hpp" +#include "node_context.hpp" + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + std::string + OpValidationFailurePDPD::get_error_msg_prefix_pdpd(const pdpd::NodeContext& node) + { + std::stringstream ss; + ss << "While validating node '" << node.op_type() << '\''; + return ss.str(); + } + } // namespace pdpd + } // namespace frontend +} // namespace ngraph diff --git a/ngraph/frontend/paddlepaddle/src/frontend.cpp b/ngraph/frontend/paddlepaddle/src/frontend.cpp new file mode 100644 index 00000000000000..b2c5fed2fd3af3 --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/frontend.cpp @@ -0,0 +1,221 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "framework.pb.h" + +#include +#include +#include + +#include +#include + +#include +#include "decoder.hpp" +#include "node_context.hpp" +#include "op_table.hpp" + +#include + +#include "frontend_manager/frontend_manager.hpp" + +using namespace ngraph::opset7; +using namespace ngraph; +using namespace ngraph::frontend; + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + NamedOutputs make_ng_node(std::map>& nodes, + const std::shared_ptr& op_place, + const std::map& CREATORS_MAP) + { + const auto& op = op_place->getDesc(); + std::cout << "Making node: " << op->type() << std::endl; + + FRONT_END_OP_CONVERSION_CHECK(CREATORS_MAP.find(op->type()) != CREATORS_MAP.end(), + "No creator found for ", + op->type(), + " node."); + pdpd::NamedInputs named_inputs; + const auto& input_ports = op_place->getInputPorts(); + for (const auto& name_to_ports : input_ports) + { + for (const auto& port : name_to_ports.second) + { + const auto& var_desc = port->getSourceTensorPDPD()->getDesc(); + if (nodes.count(var_desc->name())) + named_inputs[name_to_ports.first].push_back(nodes.at(var_desc->name())); + else + // return empty map when not all inputs exist. It usually means that + // these nodes are not used because model inputs were overwritten + return NamedOutputs(); + } + } + + return CREATORS_MAP.at(op->type())( + NodeContext(DecoderPDPDProto(op_place), named_inputs)); + } + + } // namespace pdpd + + std::shared_ptr + FrontEndPDPD::convert_model(const std::shared_ptr& model) + { + std::cout << "Convert Model Start" << std::endl; + + std::map> nodes_dict(model->getTensorValues()); + ParameterVector parameter_nodes; + ResultVector result_nodes; + + std::map CREATORS_MAP = pdpd::get_supported_ops(); + for (const auto& _inp_place : model->get_inputs()) + { + const auto& inp_place = std::dynamic_pointer_cast(_inp_place); + const auto& var = inp_place->getDesc(); + const auto& shape = inp_place->getPartialShape(); + const auto& type = inp_place->getElementType(); + auto param = std::make_shared(type, shape); + param->set_friendly_name(var->name()); + nodes_dict[var->name()] = param; + parameter_nodes.push_back(param); + } + + const auto& op_places = model->getOpPlaces(); + for (const auto& op_place : op_places) + { + const auto& op_type = op_place->getDesc()->type(); + std::cerr << "Observing " << op_type << "\n"; + if (op_type == "feed" || op_type == "fetch") + { + // inputs and outputs are stored in the model already + continue; + } + else + { + const auto& named_outputs = + pdpd::make_ng_node(nodes_dict, op_place, CREATORS_MAP); + + // set layer name by the name of first output var + if (!named_outputs.empty()) + { + const auto& first_output_var = op_place->getOutputPorts() + .begin() + ->second.at(0) + ->getTargetTensorPDPD() + ->getDesc(); + auto node = named_outputs.begin()->second[0].get_node_shared_ptr(); + node->set_friendly_name(first_output_var->name()); + std::cerr << "Named with " << node->get_friendly_name() << "\n"; + } + + const auto& out_ports = op_place->getOutputPorts(); + for (const auto& name_to_outputs : named_outputs) + { + const auto& ports = out_ports.at(name_to_outputs.first); + FRONT_END_OP_CONVERSION_CHECK( + ports.size() == name_to_outputs.second.size(), + "The number of output tensors must be equal to " + "the number of outputs of the ngraph node."); + for (size_t idx = 0; idx < ports.size(); ++idx) + { + const auto& var = ports[idx]->getTargetTensorPDPD()->getDesc(); + name_to_outputs.second[idx].get_tensor().set_names({var->name()}); + // if nodes_dict already has node mapped to this tensor name it usually + // means that it was overwritten using setTensorValue + if (!nodes_dict.count(var->name())) + nodes_dict[var->name()] = name_to_outputs.second[idx]; + } + } + } + } + + for (const auto& _outp_place : model->get_outputs()) + { + const auto& outp_place = std::dynamic_pointer_cast(_outp_place); + auto var = outp_place->getDesc(); + auto input_var_name = var->name(); + auto result = std::make_shared(nodes_dict.at(input_var_name)); + result->set_friendly_name(input_var_name + "/Result"); + result_nodes.push_back(result); + } + + return std::make_shared(result_nodes, parameter_nodes); + } + + InputModel::Ptr FrontEndPDPD::load_from_file(const std::string& path) const + { + return load_from_files({path}); + } + + InputModel::Ptr FrontEndPDPD::load_from_files(const std::vector& paths) const + { + if (paths.size() == 1) + { + // The case when folder with __model__ and weight files is provided or .pdmodel file + return std::make_shared(paths[0]); + } + else if (paths.size() == 2) + { + // The case when .pdmodel and .pdparams files are provided + std::ifstream model_stream(paths[0], std::ios::in | std::ifstream::binary); + FRONT_END_INITIALIZATION_CHECK(model_stream && model_stream.is_open(), + "Cannot open model file."); + std::ifstream weights_stream(paths[1], std::ios::in | std::ifstream::binary); + FRONT_END_INITIALIZATION_CHECK(weights_stream && weights_stream.is_open(), + "Cannot open weights file."); + return load_from_streams({&model_stream, &weights_stream}); + } + FRONT_END_INITIALIZATION_CHECK(false, "Model can be loaded either from 1 or 2 files"); + } + + InputModel::Ptr FrontEndPDPD::load_from_stream(std::istream& model_stream) const + { + return load_from_streams({&model_stream}); + } + + InputModel::Ptr + FrontEndPDPD::load_from_streams(const std::vector& streams) const + { + return std::make_shared(streams); + } + + std::shared_ptr FrontEndPDPD::convert(InputModel::Ptr model) const + { + std::cerr << "[ INFO ] PFrontEndPDPD::convert invoked\n"; + auto pdpd_model = std::dynamic_pointer_cast(model); + auto f = convert_model(pdpd_model); + std::cerr << "[ INFO ] Resulting nGraph function contains " << f->get_ops().size() + << "\n"; + return f; + } + + } // namespace frontend +} // namespace ngraph + +extern "C" PDPD_API FrontEndVersion GetAPIVersion() +{ + return OV_FRONTEND_API_VERSION; +} + +extern "C" PDPD_API void* GetFrontEndData() +{ + FrontEndPluginInfo* res = new FrontEndPluginInfo(); + res->m_name = "pdpd"; + res->m_creator = [](FrontEndCapFlags) { return std::make_shared(); }; + return res; +} \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/model.cpp b/ngraph/frontend/paddlepaddle/src/model.cpp new file mode 100644 index 00000000000000..c3045eee19906c --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/model.cpp @@ -0,0 +1,447 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include +#include + +#include +#include +#include "decoder.hpp" +#include "framework.pb.h" +#include "node_context.hpp" + +namespace ngraph +{ + namespace frontend + { + using namespace paddle::framework::proto; + + class InputModelPDPD::InputModelPDPDImpl + { + public: + InputModelPDPDImpl(const std::string& path, const InputModel& input_model); + InputModelPDPDImpl(const std::vector& streams, + const InputModel& input_model); + std::vector getInputs() const; + std::vector getOutputs() const; + Place::Ptr getPlaceByTensorName(const std::string& tensorName) const; + void overrideAllOutputs(const std::vector& outputs); + void overrideAllInputs(const std::vector& inputs); + void extractSubgraph(const std::vector& inputs, + const std::vector& outputs); + void setDefaultShape(Place::Ptr place, const ngraph::Shape&); + void setPartialShape(Place::Ptr place, const ngraph::PartialShape&); + ngraph::PartialShape getPartialShape(Place::Ptr place) const; + void setElementType(Place::Ptr place, const ngraph::element::Type&); + void setTensorValue(Place::Ptr place, const void* value); + + std::vector readWeight(const std::string& name, int64_t len); + std::vector> getOpPlaces() const { return m_op_places; } + std::map> getVarPlaces() const + { + return m_var_places; + } + std::map> getTensorValues() const + { + return m_tensor_values; + }; + + private: + void loadPlaces(); + void loadConsts(std::string folder_with_weights, std::istream* weight_stream); + + std::vector> m_op_places; + std::map> m_var_places; + std::shared_ptr m_fw_ptr; + const InputModel& m_input_model; + std::vector m_inputs; + std::vector m_outputs; + std::map> m_tensor_values; + }; + + void InputModelPDPD::InputModelPDPDImpl::loadPlaces() + { + const int cnt_of_blocks = m_fw_ptr->blocks_size(); + const auto& blocks = m_fw_ptr->blocks(); + + for (int block_idx = 0; block_idx < cnt_of_blocks; block_idx++) + { + const auto& block = blocks[block_idx]; + + for (const auto& var : block.vars()) + { + m_var_places[var.name()] = std::make_shared( + m_input_model, std::make_shared(var)); + } + + for (const auto& op : block.ops()) + { + auto op_place = + std::make_shared(m_input_model, std::make_shared(op)); + m_op_places.push_back(op_place); + + for (const auto& output : op.outputs()) + { + for (const auto& var_name : output.arguments()) + { + auto out_port = std::make_shared(m_input_model); + + // connect out_port and tensor + const auto& tensor = m_var_places.at(var_name); + tensor->addProducingPort(out_port); + out_port->setTargetTensor(tensor); + + // connect out_port and op + op_place->addOutPort(out_port, output.parameter()); + out_port->setOp(op_place); + } + } + + for (const auto& input : op.inputs()) + { + for (const auto& var_name : input.arguments()) + { + auto in_port = std::make_shared(m_input_model); + + // connect in_port and tensor + const auto& tensor = m_var_places.at(var_name); + tensor->addConsumingPort(in_port); + in_port->setSourceTensor(tensor); + + // connect in_port and op + op_place->addInPort(in_port, input.parameter()); + in_port->setOp(op_place); + } + } + + // Determine outputs and inputs + if (op.type() == "feed") + { + const auto& place = op_place->getOutputPortPDPD("Out", 0); + const auto& var_place = std::dynamic_pointer_cast( + place->getTargetTensorPDPD()); + const auto& tensor_desc = + var_place->getDesc()->type().lod_tensor().tensor(); + const auto& dims = tensor_desc.dims(); + + var_place->setElementType(TYPE_MAP[tensor_desc.data_type()]); + var_place->setPartialShape( + PartialShape(std::vector(dims.begin(), dims.end()))); + m_inputs.push_back(var_place); + } + else if (op.type() == "fetch") + { + auto place = op_place->getInputPortPDPD("X", 0); + m_outputs.push_back(place->getSourceTensorPDPD()); + } + } + } + } + + namespace pdpd + { + bool endsWith(const std::string& str, const std::string& suffix) + { + if (str.length() >= suffix.length()) + { + return (0 == + str.compare(str.length() - suffix.length(), suffix.length(), suffix)); + } + return false; + } + + void read_tensor(std::istream& is, char* data, size_t len) + { + std::vector header(16); + is.read(&header[0], 16); + uint32_t dims_len = 0; + is.read(reinterpret_cast(&dims_len), 4); + std::vector dims_struct(dims_len); + is.read(&dims_struct[0], dims_len); + is.read(data, len); + } + + } // namespace pdpd + + void InputModelPDPD::InputModelPDPDImpl::loadConsts(std::string folder_with_weights, + std::istream* weight_stream) + { + for (const auto& item : m_var_places) + { + const auto& var_desc = item.second->getDesc(); + const auto& name = item.first; + if (pdpd::endsWith(name, "feed") || pdpd::endsWith(name, "fetch")) + continue; + if (!var_desc->persistable()) + continue; + + FRONT_END_GENERAL_CHECK(var_desc->type().type() == + paddle::framework::proto::VarType::LOD_TENSOR); + const auto& tensor = var_desc->type().lod_tensor().tensor(); + Shape shape(tensor.dims().cbegin(), tensor.dims().cend()); + const auto& type = TYPE_MAP[tensor.data_type()]; + const auto& data_length = shape_size(shape) * type.size(); + std::vector tensor_data(data_length); + + if (weight_stream) + { + pdpd::read_tensor( + *weight_stream, reinterpret_cast(&tensor_data[0]), data_length); + } + else if (!folder_with_weights.empty()) + { + std::ifstream is(folder_with_weights + "/" + name, + std::ios::in | std::ifstream::binary); + FRONT_END_GENERAL_CHECK(is && is.is_open(), + "Cannot open file for constant value."); + pdpd::read_tensor(is, reinterpret_cast(&tensor_data[0]), data_length); + } + else + { + FRONT_END_GENERAL_CHECK( + false, "Either folder with weights or stream must be provided."); + } + + auto const_node = opset7::Constant::create(type, shape, &tensor_data[0]); + const_node->set_friendly_name(name); + m_tensor_values[name] = const_node; + } + } + + InputModelPDPD::InputModelPDPDImpl::InputModelPDPDImpl(const std::string& path, + const InputModel& input_model) + : m_fw_ptr{std::make_shared()} + , m_input_model(input_model) + { + std::string ext = ".pdmodel"; + std::string model_file(path); + std::unique_ptr weights_stream; + if (model_file.length() >= ext.length() && + (0 == model_file.compare(model_file.length() - ext.length(), ext.length(), ext))) + { + std::string weights_file(path); + weights_file.replace(weights_file.size() - ext.size(), ext.size(), ".pdiparams"); + weights_stream = std::unique_ptr( + new std::ifstream(weights_file, std::ios::binary)); + // if file isn't open it means model don't have constants or path is wrong + if (!weights_stream || !weights_stream->is_open()) + { + std::cerr << "[WARNING:] Cannot open file containing weights: " << weights_file + << std::endl; + } + } + else + { + model_file += "/__model__"; + } + + std::ifstream pb_stream(model_file, std::ios::binary); + FRONT_END_GENERAL_CHECK(m_fw_ptr->ParseFromIstream(&pb_stream), + "Model can't be parsed"); + + std::cout << "Loading places" << std::endl; + loadPlaces(); + std::cout << "Loading consts" << std::endl; + loadConsts(weights_stream ? "" : path, weights_stream.get()); + } + + InputModelPDPD::InputModelPDPDImpl::InputModelPDPDImpl( + const std::vector& streams, const InputModel& input_model) + : m_fw_ptr{std::make_shared()} + , m_input_model(input_model) + { + if (streams.size() == 1) + { + std::cerr << "[WARNING:] Stream for weights not provided." << std::endl; + } + else + { + FRONT_END_GENERAL_CHECK( + streams.size() == 2, + "Two streams are needed to load a model: model and weights streams"); + } + FRONT_END_GENERAL_CHECK(m_fw_ptr->ParseFromIstream(streams[0]), + "Model can't be parsed"); + + loadPlaces(); + if (streams.size() > 1) + loadConsts("", streams[1]); + } + + std::vector InputModelPDPD::InputModelPDPDImpl::getInputs() const + { + return m_inputs; + } + + std::vector InputModelPDPD::InputModelPDPDImpl::getOutputs() const + { + return m_outputs; + } + + Place::Ptr InputModelPDPD::InputModelPDPDImpl::getPlaceByTensorName( + const std::string& tensorName) const + { + if (m_var_places.count(tensorName)) + return m_var_places.at(tensorName); + return nullptr; + } + + namespace pdpd + { + std::shared_ptr castToTensorPlace(const Place::Ptr& place) + { + if (auto var_place = std::dynamic_pointer_cast(place)) + { + return var_place; + } + else if (auto in_port_place = std::dynamic_pointer_cast(place)) + { + return in_port_place->getSourceTensorPDPD(); + } + else if (auto out_port_place = std::dynamic_pointer_cast(place)) + { + return out_port_place->getTargetTensorPDPD(); + } + FRONT_END_GENERAL_CHECK(false, "Cannot cast this Place to TensorPlacePDPD."); + } + + } // namespace pdpd + + void InputModelPDPD::InputModelPDPDImpl::overrideAllInputs( + const std::vector& inputs) + { + m_inputs.clear(); + for (const auto& inp : inputs) + { + m_inputs.push_back(pdpd::castToTensorPlace(inp)); + } + } + + void InputModelPDPD::InputModelPDPDImpl::overrideAllOutputs( + const std::vector& outputs) + { + m_outputs.clear(); + for (const auto& outp : outputs) + { + m_outputs.push_back(pdpd::castToTensorPlace(outp)); + } + } + + void InputModelPDPD::InputModelPDPDImpl::extractSubgraph( + const std::vector& inputs, const std::vector& outputs) + { + overrideAllInputs(inputs); + overrideAllOutputs(outputs); + } + + void InputModelPDPD::InputModelPDPDImpl::setDefaultShape(Place::Ptr place, + const ngraph::Shape& shape) + { + FRONT_END_NOT_IMPLEMENTED("setDefaultShape"); + } + + void + InputModelPDPD::InputModelPDPDImpl::setPartialShape(Place::Ptr place, + const ngraph::PartialShape& p_shape) + { + pdpd::castToTensorPlace(place)->setPartialShape(p_shape); + } + + ngraph::PartialShape + InputModelPDPD::InputModelPDPDImpl::getPartialShape(Place::Ptr place) const + { + return pdpd::castToTensorPlace(place)->getPartialShape(); + } + + void InputModelPDPD::InputModelPDPDImpl::setElementType(Place::Ptr place, + const ngraph::element::Type& type) + { + pdpd::castToTensorPlace(place)->setElementType(type); + } + + void InputModelPDPD::InputModelPDPDImpl::setTensorValue(Place::Ptr place, const void* value) + { + auto tensor_place = pdpd::castToTensorPlace(place); + auto p_shape = tensor_place->getPartialShape(); + auto type = tensor_place->getElementType(); + auto constant = opset7::Constant::create(type, p_shape.to_shape(), value); + auto name = tensor_place->get_names()[0]; + constant->set_friendly_name(name); + m_tensor_values[name] = constant; + } + + InputModelPDPD::InputModelPDPD(const std::string& path) + : _impl{std::make_shared(path, *this)} + { + } + + InputModelPDPD::InputModelPDPD(const std::vector& streams) + : _impl{std::make_shared(streams, *this)} + { + } + + std::vector> InputModelPDPD::getOpPlaces() const + { + return _impl->getOpPlaces(); + } + + std::map> InputModelPDPD::getVarPlaces() const + { + return _impl->getVarPlaces(); + } + + std::map> InputModelPDPD::getTensorValues() const + { + return _impl->getTensorValues(); + } + + std::vector InputModelPDPD::get_inputs() const { return _impl->getInputs(); } + + std::vector InputModelPDPD::get_outputs() const { return _impl->getOutputs(); } + + Place::Ptr InputModelPDPD::get_place_by_tensor_name(const std::string& tensorName) const + { + return _impl->getPlaceByTensorName(tensorName); + } + + void InputModelPDPD::override_all_outputs(const std::vector& outputs) + { + return _impl->overrideAllOutputs(outputs); + } + + void InputModelPDPD::override_all_inputs(const std::vector& inputs) + { + return _impl->overrideAllInputs(inputs); + } + + void InputModelPDPD::extract_subgraph(const std::vector& inputs, + const std::vector& outputs) + { + return _impl->extractSubgraph(inputs, outputs); + } + + void InputModelPDPD::set_partial_shape(Place::Ptr place, + const ngraph::PartialShape& p_shape) + { + return _impl->setPartialShape(place, p_shape); + } + + ngraph::PartialShape InputModelPDPD::get_partial_shape(Place::Ptr place) const + { + return _impl->getPartialShape(place); + } + + void InputModelPDPD::set_element_type(Place::Ptr place, const ngraph::element::Type& type) + { + return _impl->setElementType(place, type); + } + + void InputModelPDPD::set_tensor_value(Place::Ptr place, const void* value) + { + return _impl->setTensorValue(place, value); + } + + } // namespace frontend +} // namespace ngraph diff --git a/ngraph/frontend/paddlepaddle/src/node_context.hpp b/ngraph/frontend/paddlepaddle/src/node_context.hpp new file mode 100644 index 00000000000000..8fc77debfb6dc1 --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/node_context.hpp @@ -0,0 +1,192 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once +#include +#include "decoder.hpp" + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + using InPortName = std::string; + using OutPortName = std::string; + using TensorName = std::string; + using NamedOutputs = std::map; + using NamedInputs = std::map; + + /// Keep necessary data for a single node in the original FW graph to facilitate + /// conversion process in the rules code. + class NodeContext + { + const DecoderPDPDProto& node; + const NamedInputs& name_map; + + public: + NodeContext(const DecoderPDPDProto& _node, NamedInputs& _name_map) + : node(_node) + , name_map(_name_map) + { + } + + /// Detects if there is at least one input attached with a given name + bool has_ng_input(const std::string& name) const + { + auto found = name_map.find(name); + if (found != name_map.end()) + return !found->second.empty(); + return false; + } + + size_t get_ng_input_size(const std::string& name) const + { + return name_map.at(name).size(); + } + + /// Returns exactly one input with a given name; throws if there is no inputs or + /// there are more than one input + Output get_ng_input(const std::string& name) const + { + FRONT_END_GENERAL_CHECK(name_map.at(name).size() == 1); + return name_map.at(name).at(0); + } + + /// Returns all inputs with a given name + OutputVector get_ng_inputs(const std::string& name) const + { + return name_map.at(name); + } + + template + T get_attribute(const std::string& name, const T& def = T()) const; + + template + bool has_attribute(const std::string& name) const + { + // TODO: Rework this hack + try + { + get_attribute(name); + return true; + } + catch (const GeneralFailure&) + { + return false; + } + } + + const std::string& op_type() const { return node.get_op_type(); } + std::vector get_output_names() const + { + return node.get_output_names(); + } + std::vector + get_out_port_types(const std::string& port_name) const + { + return node.get_out_port_types(port_name); + } + ngraph::element::Type get_out_port_type(const std::string& port_name) const; + NamedOutputs default_single_output_mapping( + const std::shared_ptr& ngraph_node, + const std::vector& required_pdpd_out_names) const; + }; + + template <> + inline int32_t NodeContext::get_attribute(const std::string& name, + const int32_t& def) const + { + return node.get_int(name, def); + } + + template <> + inline float NodeContext::get_attribute(const std::string& name, const float& def) const + { + return node.get_float(name, def); + } + + template <> + inline std::string NodeContext::get_attribute(const std::string& name, + const std::string& def) const + { + return node.get_str(name, def); + } + + template <> + inline std::vector + NodeContext::get_attribute(const std::string& name, + const std::vector& def) const + { + return node.get_ints(name, def); + } + + template <> + inline std::vector + NodeContext::get_attribute(const std::string& name, + const std::vector& def) const + { + return node.get_floats(name, def); + } + + template <> + inline bool NodeContext::get_attribute(const std::string& name, const bool& def) const + { + return node.get_bool(name, def); + } + + template <> + inline ngraph::element::Type + NodeContext::get_attribute(const std::string& name, + const ngraph::element::Type& def) const + { + return node.get_dtype(name, def); + } + + inline ngraph::element::Type + NodeContext::get_out_port_type(const std::string& port_name) const + { + auto types = get_out_port_types(port_name); + FRONT_END_GENERAL_CHECK(types.size() > 0, "Port has no tensors connected."); + FRONT_END_GENERAL_CHECK(std::equal(types.begin() + 1, types.end(), types.begin()), + "Port has tensors with different types connected."); + return types[0]; + } + + inline NamedOutputs NodeContext::default_single_output_mapping( + const std::shared_ptr& ngraph_node, + const std::vector& required_pdpd_out_names) const + { + NamedOutputs named_outputs; + const auto& ngraph_outputs = ngraph_node->outputs(); + const auto& pdpd_op_output_names = this->get_output_names(); + FRONT_END_GENERAL_CHECK(ngraph_outputs.size() == 1, + "nGraph node must have exactly one output"); + for (const auto& pdpd_name : pdpd_op_output_names) + { + if (std::find(required_pdpd_out_names.begin(), + required_pdpd_out_names.end(), + pdpd_name) != required_pdpd_out_names.end()) + named_outputs[pdpd_name] = {ngraph_outputs[0]}; + } + return named_outputs; + } + template <> + inline std::vector + NodeContext::get_attribute(const std::string& name, + const std::vector& def) const + { + return node.get_longs(name, def); + } + + template <> + inline int64_t NodeContext::get_attribute(const std::string& name, + const int64_t& def) const + { + return node.get_long(name, def); + } + + } // namespace pdpd + } // namespace frontend +} // namespace ngraph diff --git a/ngraph/frontend/paddlepaddle/src/op/argmax.cpp b/ngraph/frontend/paddlepaddle/src/op/argmax.cpp new file mode 100644 index 00000000000000..7d8c069031d07f --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/argmax.cpp @@ -0,0 +1,57 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "argmax.hpp" +#include + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs argmax(const NodeContext& node) + { + auto data = node.get_ng_input("X"); + bool flatten = node.get_attribute("flatten"); + const element::Type& index_element_type = element::i64; + const Output k = + ngraph::opset6::Constant::create(ngraph::element::i64, {}, {1}); + + if (!flatten) + { + auto axis = node.get_attribute("axis"); + const auto axis_to_remove = + ngraph::opset6::Constant::create(element::u64, Shape{}, {axis}); + auto node_topk = std::make_shared( + data, k, axis, "max", "index", index_element_type); + const auto reshaped_indices = std::make_shared( + node_topk->output(1), axis_to_remove); + return node.default_single_output_mapping( + {std::make_shared(reshaped_indices, + element::i64)}, + {"Out"}); + } + else + { + int64_t axis = 0; + const Output reshape_flatten = + ngraph::opset6::Constant::create(ngraph::element::i64, {1}, {-1}); + auto node_reshape = + std::make_shared(data, reshape_flatten, true); + auto node_topk = std::make_shared( + node_reshape, k, axis, "max", "index", index_element_type); + return node.default_single_output_mapping( + {std::make_shared(node_topk->output(1), + element::i64)}, + {"Out"}); + } + } + + } // namespace op + } // namespace pdpd + } // namespace frontend +} // namespace ngraph diff --git a/ngraph/frontend/paddlepaddle/src/op/argmax.hpp b/ngraph/frontend/paddlepaddle/src/op/argmax.hpp new file mode 100644 index 00000000000000..767e9f75770c4a --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/argmax.hpp @@ -0,0 +1,21 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once +#include "node_context.hpp" + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs argmax(const NodeContext& node); + + } + } // namespace pdpd + } // namespace frontend +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/assign_value.cpp b/ngraph/frontend/paddlepaddle/src/op/assign_value.cpp new file mode 100644 index 00000000000000..fb503abbba80e8 --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/assign_value.cpp @@ -0,0 +1,66 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "assign_value.hpp" +#include +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs assign_value(const NodeContext& node) + { + std::vector shape = node.get_attribute>("shape"); + auto dtype = node.get_attribute("dtype"); + std::shared_ptr const_node; + + switch (dtype) + { + case element::i32: + { + auto values = node.get_attribute>("int32_values"); + const_node = {opset6::Constant::create( + dtype, Shape{shape.begin(), shape.end()}, values)}; + break; + } + case element::f32: + { + std::vector values = + node.get_attribute>("fp32_values"); + const_node = {opset6::Constant::create( + dtype, Shape{shape.begin(), shape.end()}, values)}; + break; + } + case element::boolean: + { + auto values = node.get_attribute>("bool_values"); + const_node = {opset6::Constant::create( + dtype, Shape{shape.begin(), shape.end()}, values)}; + break; + } + case element::i64: + { + auto values = node.get_attribute>("int64_values"); + const_node = {opset6::Constant::create( + dtype, Shape{shape.begin(), shape.end()}, values)}; + break; + } + default: + { + PDPD_OP_VALIDATION_CHECK( + node, false, "assign_value only supports int32, int64, float32, bool"); + break; + } + } + + return node.default_single_output_mapping({const_node}, {"Out"}); + } + + } // namespace op + } // namespace pdpd + } // namespace frontend +} // namespace ngraph diff --git a/ngraph/frontend/paddlepaddle/src/op/assign_value.hpp b/ngraph/frontend/paddlepaddle/src/op/assign_value.hpp new file mode 100644 index 00000000000000..eebba5c917bc05 --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/assign_value.hpp @@ -0,0 +1,22 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "node_context.hpp" + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs assign_value(const NodeContext& node); + + } + } // namespace pdpd + } // namespace frontend +} // namespace ngraph diff --git a/ngraph/frontend/paddlepaddle/src/op/batch_norm.cpp b/ngraph/frontend/paddlepaddle/src/op/batch_norm.cpp new file mode 100644 index 00000000000000..77e6307e57bf89 --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/batch_norm.cpp @@ -0,0 +1,64 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "batch_norm.hpp" +#include + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs batch_norm(const NodeContext& node) + { + auto data = node.get_ng_input("X"); + auto gamma = node.get_ng_input("Scale"); + auto beta = node.get_ng_input("Bias"); + auto mean = node.get_ng_input("Mean"); + auto variance = node.get_ng_input("Variance"); + auto data_layout = node.get_attribute("data_layout"); + + PDPD_ASSERT((data_layout == "NCHW" || data_layout == "NHWC"), + "Not supported input data layout!"); + if (data_layout == "NCHW") + { + return node.default_single_output_mapping( + {std::make_shared( + data, + gamma, + beta, + mean, + variance, + node.get_attribute("epsilon"))}, + {"Y"}); + } + else + { + auto input_order = ngraph::opset6::Constant::create( + ngraph::element::i64, {4}, {0, 3, 1, 2}); + auto data_nchw = + std::make_shared(data, input_order); + auto node_batch_norm = std::make_shared( + data_nchw, + gamma, + beta, + mean, + variance, + node.get_attribute("epsilon")); + auto output_order = ngraph::opset6::Constant::create( + ngraph::element::i64, {4}, {0, 2, 3, 1}); + return node.default_single_output_mapping( + {std::make_shared(node_batch_norm, + output_order)}, + {"Y"}); + } + } + + } // namespace op + } // namespace pdpd + } // namespace frontend +} // namespace ngraph diff --git a/ngraph/frontend/paddlepaddle/src/op/batch_norm.hpp b/ngraph/frontend/paddlepaddle/src/op/batch_norm.hpp new file mode 100644 index 00000000000000..790ee642698e5d --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/batch_norm.hpp @@ -0,0 +1,21 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once +#include "node_context.hpp" + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs batch_norm(const NodeContext& node); + + } + } // namespace pdpd + } // namespace frontend +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/cast.cpp b/ngraph/frontend/paddlepaddle/src/op/cast.cpp new file mode 100644 index 00000000000000..2cb181f0b24158 --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/cast.cpp @@ -0,0 +1,28 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "cast.hpp" +#include + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs cast(const NodeContext& node) + { + auto data = node.get_ng_input("X"); + auto out_dtype = node.get_attribute("out_dtype"); + + return node.default_single_output_mapping( + {std::make_shared(data, out_dtype)}, {"Out"}); + } + + } // namespace op + } // namespace pdpd + } // namespace frontend +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/cast.hpp b/ngraph/frontend/paddlepaddle/src/op/cast.hpp new file mode 100644 index 00000000000000..c9a575a3e470b2 --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/cast.hpp @@ -0,0 +1,21 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once +#include "node_context.hpp" + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs cast(const NodeContext& node); + + } + } // namespace pdpd + } // namespace frontend +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/clip.cpp b/ngraph/frontend/paddlepaddle/src/op/clip.cpp new file mode 100644 index 00000000000000..1909e392eaf2f8 --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/clip.cpp @@ -0,0 +1,31 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "clip.hpp" +#include + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs clip(const NodeContext& node) + { + auto data = node.get_ng_input("X"); + auto min = node.get_attribute("min"); + auto max = node.get_attribute("max"); + PDPD_OP_VALIDATION_CHECK( + node, max >= min, "clip: max value must greater than min value!"); + + return node.default_single_output_mapping( + {std::make_shared(data, min, max)}, {"Out"}); + } + + } // namespace op + } // namespace pdpd + } // namespace frontend +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/clip.hpp b/ngraph/frontend/paddlepaddle/src/op/clip.hpp new file mode 100644 index 00000000000000..bfb1eb4999f039 --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/clip.hpp @@ -0,0 +1,21 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once +#include "node_context.hpp" + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs clip(const NodeContext& node); + + } + } // namespace pdpd + } // namespace frontend +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/concat.cpp b/ngraph/frontend/paddlepaddle/src/op/concat.cpp new file mode 100644 index 00000000000000..a9c6fa6388d848 --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/concat.cpp @@ -0,0 +1,27 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "concat.hpp" +#include + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs concat(const NodeContext& node) + { + auto data = node.get_ng_inputs("X"); + auto axis = node.get_attribute("axis"); + return node.default_single_output_mapping( + {std::make_shared(data, axis)}, {"Out"}); + } + + } // namespace op + } // namespace pdpd + } // namespace frontend +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/concat.hpp b/ngraph/frontend/paddlepaddle/src/op/concat.hpp new file mode 100644 index 00000000000000..5cf14fb15c6f42 --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/concat.hpp @@ -0,0 +1,21 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once +#include "node_context.hpp" + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs concat(const NodeContext& node); + + } + } // namespace pdpd + } // namespace frontend +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/conv2d.cpp b/ngraph/frontend/paddlepaddle/src/op/conv2d.cpp new file mode 100644 index 00000000000000..294e08134f1c27 --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/conv2d.cpp @@ -0,0 +1,25 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "conv2d.hpp" +#include +#include "conv2d_utils.hpp" + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs conv2d(const NodeContext& node) + { + return conv2d_base(node); + } + + } // namespace op + } // namespace pdpd + } // namespace frontend +} // namespace ngraph diff --git a/ngraph/frontend/paddlepaddle/src/op/conv2d.hpp b/ngraph/frontend/paddlepaddle/src/op/conv2d.hpp new file mode 100644 index 00000000000000..a2368afab9e4dc --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/conv2d.hpp @@ -0,0 +1,21 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once +#include "node_context.hpp" + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs conv2d(const NodeContext& node_context); + + } + } // namespace pdpd + } // namespace frontend +} // namespace ngraph diff --git a/ngraph/frontend/paddlepaddle/src/op/conv2d_transpose.cpp b/ngraph/frontend/paddlepaddle/src/op/conv2d_transpose.cpp new file mode 100644 index 00000000000000..bdb63d1905be13 --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/conv2d_transpose.cpp @@ -0,0 +1,26 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "conv2d_transpose.hpp" +#include +#include "conv2d_utils.hpp" + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs conv2d_transpose(const NodeContext& node) + { + return conv2d_base(node); + } + + } // namespace op + } // namespace pdpd + } // namespace frontend +} // namespace ngraph diff --git a/ngraph/frontend/paddlepaddle/src/op/conv2d_transpose.hpp b/ngraph/frontend/paddlepaddle/src/op/conv2d_transpose.hpp new file mode 100644 index 00000000000000..1de7428e6fd974 --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/conv2d_transpose.hpp @@ -0,0 +1,21 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once +#include "node_context.hpp" + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs conv2d_transpose(const NodeContext& node_context); + + } + } // namespace pdpd + } // namespace frontend +} // namespace ngraph diff --git a/ngraph/frontend/paddlepaddle/src/op/conv2d_utils.cpp b/ngraph/frontend/paddlepaddle/src/op/conv2d_utils.cpp new file mode 100644 index 00000000000000..fa7e88ae4b9d42 --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/conv2d_utils.cpp @@ -0,0 +1,114 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "conv2d_utils.hpp" +#include "node_context.hpp" + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + ngraph::op::PadType get_auto_pad(const NodeContext& node) + { + // Default value means use explicitly provided padding values. + ngraph::op::PadType pad_type{ngraph::op::PadType::NOTSET}; + auto padding_algorithm = node.get_attribute("padding_algorithm"); + static std::unordered_map auto_pad_values{ + {"VALID", ngraph::op::PadType::VALID}, + {"SAME", ngraph::op::PadType::SAME_UPPER}, + {"NOTSET", ngraph::op::PadType::NOTSET}, + }; + + const auto pad_val_it = auto_pad_values.find(padding_algorithm); + + if (pad_val_it == auto_pad_values.end()) + { + pad_type = ngraph::op::PadType::NOTSET; + } + else + { + pad_type = pad_val_it->second; + } + + return pad_type; + } + + std::pair get_pads(const NodeContext& node, + const size_t kernel_rank) + { + CoordinateDiff pads(kernel_rank, 0); + + auto pads_int32 = node.get_attribute>("paddings"); + pads = CoordinateDiff{std::begin(pads_int32), std::end(pads_int32)}; + CoordinateDiff pads_begin; + CoordinateDiff pads_end; + + if (pads.size() == kernel_rank * 2) + { + for (size_t i = 0; i < pads.size(); i++) + { + if (i & 0x01) + { + pads_end.push_back(pads[i]); + } + else + { + pads_begin.push_back(pads[i]); + } + } + return {pads_begin, pads_end}; + } + else + { + // No paddings provided or only one side values provided, which means same + // padding at both begin and end of axis. + return {pads, pads}; + } + } + + std::pair get_pads(const NodeContext& node) + { + const auto data_rank = node.get_ng_input("Input").get_partial_shape().rank(); + PDPD_ASSERT(data_rank.get_length() > 2, "the rank of conv input must > 2"); + const auto data_spatial_dims = data_rank.get_length() - 2; + + return get_pads(node, data_spatial_dims); + } + std::shared_ptr get_reshaped_filter(const Output& filters, + const int32_t groups) + { + auto shape_of_filters = std::make_shared(filters); + + auto num_begin = opset6::Constant::create(element::i64, Shape{1}, {0}); + auto num_end = opset6::Constant::create(element::i64, Shape{1}, {1}); + auto num_node = std::make_shared(shape_of_filters, + num_begin, + num_end, + std::vector{0}, + std::vector{0}); + + auto hw_begin = opset6::Constant::create(element::i64, Shape{1}, {1}); + auto hw_end = opset6::Constant::create(element::i64, Shape{1}, {4}); + auto filter_hw_node = + std::make_shared(shape_of_filters, + hw_begin, + hw_end, + std::vector{0}, + std::vector{0}); + + auto groups_node = opset6::Constant::create(element::i64, Shape{1}, {groups}); + auto grouped_num_node = std::make_shared(num_node, groups_node); + auto target_filter_shape = std::make_shared( + OutputVector{groups_node, grouped_num_node, filter_hw_node}, 0); + return std::make_shared(filters, target_filter_shape, false); + } + + } // namespace op + } // namespace pdpd + } // namespace frontend +} // namespace ngraph diff --git a/ngraph/frontend/paddlepaddle/src/op/conv2d_utils.hpp b/ngraph/frontend/paddlepaddle/src/op/conv2d_utils.hpp new file mode 100644 index 00000000000000..6718e2910e9ae7 --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/conv2d_utils.hpp @@ -0,0 +1,71 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once +#include "node_context.hpp" + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + ngraph::op::PadType get_auto_pad(const NodeContext& node); + std::pair get_pads(const NodeContext& node); + std::shared_ptr get_reshaped_filter(const Output& filters, + int32_t groups); + + template + NamedOutputs conv2d_base(const NodeContext& node) + { + auto data = node.get_ng_input("Input"); + auto filters = node.get_ng_input("Filter"); + + const auto strides = node.get_attribute>("strides"); + const auto dilations = node.get_attribute>("dilations"); + const auto auto_pad_type = get_auto_pad(node); + const auto paddings = get_pads(node); + const auto pads_begin = paddings.first; + const auto pads_end = paddings.second; + const auto groups = node.get_attribute("groups"); + const auto data_format = node.get_attribute("data_format"); + // TODO Support Other data layout #55423 + PDPD_ASSERT(data_format == "NCHW", "conv2d only supports NCHW now"); + + if (groups > 1) + { + const auto reshaped_filters = get_reshaped_filter(filters, groups); + + return node.default_single_output_mapping( + {std::make_shared( + data, + reshaped_filters, + ngraph::Strides(strides.begin(), strides.end()), + pads_begin, + pads_end, + ngraph::Strides(dilations.begin(), dilations.end()), + auto_pad_type)}, + {"Output"}); + } + else + { + return node.default_single_output_mapping( + {std::make_shared( + data, + filters, + ngraph::Strides(strides.begin(), strides.end()), + pads_begin, + pads_end, + ngraph::Strides(dilations.begin(), dilations.end()), + auto_pad_type)}, + {"Output"}); + } + } + + } // namespace op + } // namespace pdpd + } // namespace frontend +} // namespace ngraph diff --git a/ngraph/frontend/paddlepaddle/src/op/dropout.cpp b/ngraph/frontend/paddlepaddle/src/op/dropout.cpp new file mode 100644 index 00000000000000..a6d967f1c3a5aa --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/dropout.cpp @@ -0,0 +1,41 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "dropout.hpp" +#include + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs dropout(const NodeContext& node) + { + auto data = node.get_ng_input("X"); + auto dropout_implementation = + node.get_attribute("dropout_implementation"); + if (dropout_implementation == "downgrade_in_infer") + { + auto dropout_prob = ngraph::opset6::Constant::create( + ngraph::element::f32, + {1}, + {1 - node.get_attribute("dropout_prob")}); + return node.default_single_output_mapping( + {std::make_shared(data, dropout_prob)}, + {"Out"}); + } + else + { + return node.default_single_output_mapping(data.get_node_shared_ptr(), + {"Out"}); + } + } + + } // namespace op + } // namespace pdpd + } // namespace frontend +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/dropout.hpp b/ngraph/frontend/paddlepaddle/src/op/dropout.hpp new file mode 100644 index 00000000000000..f61fede9653cfe --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/dropout.hpp @@ -0,0 +1,21 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once +#include "node_context.hpp" + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs dropout(const NodeContext& node); + + } + } // namespace pdpd + } // namespace frontend +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/elementwise_ops.cpp b/ngraph/frontend/paddlepaddle/src/op/elementwise_ops.cpp new file mode 100644 index 00000000000000..9a0ef491fdaa0f --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/elementwise_ops.cpp @@ -0,0 +1,102 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include +#include "elementwise_ops.hpp" + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + template + NamedOutputs elementwise_ops(const NodeContext& node) + { + auto x = node.get_ng_input("X"); + auto y = node.get_ng_input("Y"); + + auto axis = node.get_attribute("axis"); + + PDPD_OP_VALIDATION_CHECK(node, + x.get_partial_shape().rank().is_static(), + "elementwise_ops: X rank must be static!"); + PDPD_OP_VALIDATION_CHECK(node, + y.get_partial_shape().rank().is_static(), + "elementwise_ops: Y rank must be static!"); + int64_t x_rank = x.get_partial_shape().rank().get_length(); + int64_t y_rank = y.get_partial_shape().rank().get_length(); + + if ((axis == -1) || (axis == x_rank - 1) || (x_rank == y_rank)) + { + return node.default_single_output_mapping({std::make_shared(x, y)}, + {"Out"}); + } + else + { + // This broadcast can be implemented by either ngraph::Reshape or + // ngraph::Broadcast. Since PDPD implicates y_shape is a subsequence of + // x_shape starting from axis, to use ngraph::Reshape like Paddle2ONNX, + // which is more friendly to PnP. + auto broadcast_shape = std::vector(x_rank, 1); + PartialShape y_shape = y.get_partial_shape(); + int32_t i = 0; + for (auto it = y_shape.begin(); it != y_shape.end(); ++i, ++it) + broadcast_shape[axis + i] = (*it).get_length(); + + auto reshape_node = + ngraph::opset6::Constant::create(ngraph::element::i64, + ngraph::Shape{broadcast_shape.size()}, + broadcast_shape); + auto y_node = + std::make_shared(y, reshape_node, false); + return node.default_single_output_mapping({std::make_shared(x, y_node)}, + {"Out"}); + } + } + + // + NamedOutputs elementwise_add(const NodeContext& node_context) + { + return elementwise_ops(node_context); + } + + NamedOutputs elementwise_sub(const NodeContext& node_context) + { + return elementwise_ops(node_context); + } + + NamedOutputs elementwise_mul(const NodeContext& node_context) + { + return elementwise_ops(node_context); + } + + NamedOutputs elementwise_div(const NodeContext& node_context) + { + return elementwise_ops(node_context); + } + + NamedOutputs elementwise_min(const NodeContext& node_context) + { + return elementwise_ops(node_context); + } + + NamedOutputs elementwise_max(const NodeContext& node_context) + { + return elementwise_ops(node_context); + } + + NamedOutputs elementwise_pow(const NodeContext& node_context) + { + return elementwise_ops(node_context); + } + + } // namespace op + } // namespace pdpd + } // namespace frontend +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/elementwise_ops.hpp b/ngraph/frontend/paddlepaddle/src/op/elementwise_ops.hpp new file mode 100644 index 00000000000000..981dc927421df4 --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/elementwise_ops.hpp @@ -0,0 +1,26 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once +#include "node_context.hpp" + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs elementwise_add(const NodeContext& node_context); + NamedOutputs elementwise_sub(const NodeContext& node_context); + NamedOutputs elementwise_mul(const NodeContext& node_context); + NamedOutputs elementwise_div(const NodeContext& node_context); + NamedOutputs elementwise_min(const NodeContext& node_context); + NamedOutputs elementwise_max(const NodeContext& node_context); + NamedOutputs elementwise_pow(const NodeContext& node_context); + } // namespace op + } // namespace pdpd + } // namespace frontend +} // namespace ngraph diff --git a/ngraph/frontend/paddlepaddle/src/op/expand_v2.cpp b/ngraph/frontend/paddlepaddle/src/op/expand_v2.cpp new file mode 100644 index 00000000000000..8ee6e29e8f96aa --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/expand_v2.cpp @@ -0,0 +1,48 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "expand_v2.hpp" +#include +#include + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs expand_v2(const NodeContext& node) + { + auto x = node.get_ng_input("X"); + Output shapeExpectedNode; + if (node.has_ng_input("Shape")) + { + shapeExpectedNode = node.get_ng_input("Shape"); + } + else + { + std::vector shapeExpected; + if (node.has_attribute>("shape")) + { + shapeExpected = node.get_attribute>("shape"); + } + else + { + throw std::runtime_error("expand: has no shape attribute"); + } + + shapeExpectedNode = ngraph::opset6::Constant::create( + ngraph::element::i32, {shapeExpected.size()}, shapeExpected); + } + return node.default_single_output_mapping( + {std::make_shared(x, shapeExpectedNode)}, + {"Out"}); + } + + } // namespace op + } // namespace pdpd + } // namespace frontend +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/expand_v2.hpp b/ngraph/frontend/paddlepaddle/src/op/expand_v2.hpp new file mode 100644 index 00000000000000..bd6e7707c144f4 --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/expand_v2.hpp @@ -0,0 +1,21 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once +#include "node_context.hpp" + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs expand_v2(const NodeContext& node); + + } + } // namespace pdpd + } // namespace frontend +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/fill_constant.cpp b/ngraph/frontend/paddlepaddle/src/op/fill_constant.cpp new file mode 100644 index 00000000000000..958145ba058dcb --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/fill_constant.cpp @@ -0,0 +1,51 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "fill_constant.hpp" +#include + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs fill_constant(const NodeContext& node) + { + auto shape = node.get_attribute>("shape"); + auto dtype = node.get_attribute("dtype"); + // TODO to Support Tensor/Tuple Input add more tests for other data types #55262 + Output value_node; + if (dtype == element::i32) + { + int32_t value = static_cast(node.get_attribute("value")); + value_node = opset6::Constant::create(dtype, {1}, {value}); + } + else if (dtype == element::f32) + { + float value = node.get_attribute("value"); + value_node = opset6::Constant::create(dtype, {1}, {value}); + } + else if (dtype == element::i64) + { + int64_t value = static_cast(node.get_attribute("value")); + value_node = opset6::Constant::create(dtype, {1}, {value}); + } + else + { + PDPD_ASSERT(false, "fill_constant only supports i32, f32, i64"); + } + + auto shape_node = opset6::Constant::create(element::i64, {shape.size()}, shape); + return node.default_single_output_mapping( + {std::make_shared(value_node, shape_node)}, + {"Out"}); + } + + } // namespace op + } // namespace pdpd + } // namespace frontend +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/fill_constant.hpp b/ngraph/frontend/paddlepaddle/src/op/fill_constant.hpp new file mode 100644 index 00000000000000..d260b6333d8870 --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/fill_constant.hpp @@ -0,0 +1,22 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "node_context.hpp" + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs fill_constant(const NodeContext& node); + + } + } // namespace pdpd + } // namespace frontend +} // namespace ngraph diff --git a/ngraph/frontend/paddlepaddle/src/op/fill_constant_batch_size_like.cpp b/ngraph/frontend/paddlepaddle/src/op/fill_constant_batch_size_like.cpp new file mode 100644 index 00000000000000..bd97005a6c9fe6 --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/fill_constant_batch_size_like.cpp @@ -0,0 +1,47 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "fill_constant_batch_size_like.hpp" + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs fill_constant_batch_size_like(const NodeContext& node) + { + // TODO to Support other data types other than FP32 #55263 + auto input_dim_idx = node.get_attribute("input_dim_idx", 0); + auto output_dim_idx = node.get_attribute("output_dim_idx", 0); + auto value = node.get_attribute("value"); + auto shapes = node.get_attribute>("shape"); + auto input = node.get_ng_input("Input"); + auto partial_shape = input.get_partial_shape(); + PDPD_OP_VALIDATION_CHECK( + node, + partial_shape.is_static(), + "fill_constant_batch_size_like: must use static shape."); + auto static_shape = partial_shape.get_shape(); + PDPD_OP_VALIDATION_CHECK(node, + input_dim_idx < (int32_t)static_shape.size(), + "fill_constant_batch_size_like: input_dim_idx " + "should not exceed input dims."); + PDPD_OP_VALIDATION_CHECK(node, + "fill_constant_batch_size_like: output_dim_idx " + "should not exceed shapes dims."); + shapes[output_dim_idx] = static_shape[input_dim_idx]; + auto dtype = node.get_attribute("dtype"); + return node.default_single_output_mapping( + {std::make_shared( + dtype, Shape(shapes.begin(), shapes.end()), value)}, + {"Out"}); + } + + } // namespace op + } // namespace pdpd + } // namespace frontend +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/fill_constant_batch_size_like.hpp b/ngraph/frontend/paddlepaddle/src/op/fill_constant_batch_size_like.hpp new file mode 100644 index 00000000000000..9cbd62b7a03ee4 --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/fill_constant_batch_size_like.hpp @@ -0,0 +1,21 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once +#include "node_context.hpp" + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs fill_constant_batch_size_like(const NodeContext& node); + + } + } // namespace pdpd + } // namespace frontend +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/flatten_contiguous_range.cpp b/ngraph/frontend/paddlepaddle/src/op/flatten_contiguous_range.cpp new file mode 100644 index 00000000000000..396f2fec241833 --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/flatten_contiguous_range.cpp @@ -0,0 +1,56 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "flatten_contiguous_range.hpp" +#include +#include + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs flatten_contiguous_range(const NodeContext& node) + { + auto x_node = node.get_ng_input("X"); + auto shape_of_x = std::make_shared(x_node); + int dims = x_node.get_partial_shape().rank().get_length(); + auto start_axis = node.get_attribute("start_axis"); + auto stop_axis = node.get_attribute("stop_axis"); + + auto axis1_begin = opset6::Constant::create(element::i64, {1}, {0}); + auto axis1_end = opset6::Constant::create(element::i64, {1}, {start_axis}); + auto axis1 = std::make_shared(shape_of_x, + axis1_begin, + axis1_end, + std::vector{0}, + std::vector{0}); + OutputVector axes{axis1, + opset6::Constant::create(element::i64, Shape{1}, {-1.0})}; + + if (stop_axis < dims - 1) + { + auto axis2_begin = + opset6::Constant::create(element::i64, {1}, {stop_axis + 1}); + auto axis2_end = opset6::Constant::create(element::i64, {1}, {dims}); + auto axis2_node = + std::make_shared(shape_of_x, + axis2_begin, + axis2_end, + std::vector{0}, + std::vector{0}); + axes.push_back(axis2_node); + } + + auto new_shape_node = std::make_shared(axes, 0); + return node.default_single_output_mapping( + {std::make_shared(x_node, new_shape_node, true)}, {"Out"}); + } + } // namespace op + } // namespace pdpd + } // namespace frontend +} // namespace ngraph diff --git a/ngraph/frontend/paddlepaddle/src/op/flatten_contiguous_range.hpp b/ngraph/frontend/paddlepaddle/src/op/flatten_contiguous_range.hpp new file mode 100644 index 00000000000000..46fcb42b51f98a --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/flatten_contiguous_range.hpp @@ -0,0 +1,21 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once +#include "node_context.hpp" + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs flatten_contiguous_range(const NodeContext& node); + + } + } // namespace pdpd + } // namespace frontend +} // namespace ngraph diff --git a/ngraph/frontend/paddlepaddle/src/op/interp.cpp b/ngraph/frontend/paddlepaddle/src/op/interp.cpp new file mode 100644 index 00000000000000..8d62a1a821d4f5 --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/interp.cpp @@ -0,0 +1,182 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "interp.hpp" +#include + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + std::shared_ptr + calculate_output_shape_based_on_scales(const Output& data, + const std::vector& scale, + Output& scales) + { + FRONT_END_GENERAL_CHECK(scale.size() > 0); + if (scale.size() == 1) + scales = opset6::Constant::create( + element::f32, Shape{4}, {1, 1, scale[0], scale[0]}); + else if (scale.size() == 2) + scales = opset6::Constant::create( + element::f32, Shape{4}, {1, 1, scale[0], scale[1]}); + else if (scale.size() == 3) + scales = opset6::Constant::create( + element::f32, Shape{4}, {1, scale[0], scale[1], scale[2]}); + else + scales = opset6::Constant::create( + element::f32, + Shape{scale.size()}, + std::vector(scale.begin(), scale.end())); + const auto shape_of_data = std::make_shared( + std::make_shared(data), scales.get_element_type()); + const auto multiply = std::make_shared(shape_of_data, scales); + const auto output_shape = + std::make_shared(multiply, ngraph::element::i64); + + return output_shape; + } + + std::shared_ptr + calculate_scales_based_on_sizes(const Output& data, + const Output& sizes) + { + const float epsilon = 1.0e-5; + const auto shape_of_data = std::make_shared( + std::make_shared(data), ngraph::element::f32); + const auto converted_sizes = + std::make_shared(sizes, ngraph::element::f32); + const auto divide = + std::make_shared(converted_sizes, shape_of_data); + const auto eps_node = + std::make_shared(ngraph::element::f32, Shape{}, epsilon); + const auto scales = std::make_shared(divide, eps_node); + + return scales; + } + + std::shared_ptr + extract_out_sizes(const Output& data, + const std::vector& out_sizes) + { + const auto shape_of_x = std::make_shared(data); + auto shape_begin = opset6::Constant::create(element::i64, {1}, {0}); + auto shape_end = opset6::Constant::create(element::i64, Shape{1}, {2}); + auto nc_node = std::make_shared(shape_of_x, + shape_begin, + shape_end, + std::vector{0}, + std::vector{0}); + auto hw_node = + opset6::Constant::create(element::i64, Shape{2}, out_sizes); + return std::make_shared(OutputVector{nc_node, hw_node}, 0); + } + + // TODO support different data_layout #55170 + + NamedOutputs interpolate(const NodeContext& node, + const ngraph::opset6::Interpolate::InterpolateMode& mode) + { + auto x = node.get_ng_input("X"); + using InterpolateMode = ngraph::opset6::Interpolate::InterpolateMode; + using CoordinateTransformMode = + ngraph::opset6::Interpolate::CoordinateTransformMode; + using Nearest_mode = ngraph::opset6::Interpolate::NearestMode; + using InterpolateAttrs = ngraph::opset6::Interpolate::InterpolateAttrs; + using ShapeCalcMode = ngraph::opset6::Interpolate::ShapeCalcMode; + + InterpolateAttrs attrs; + + attrs.mode = mode; + + auto out_w = node.get_attribute("out_w"); + auto out_h = node.get_attribute("out_h"); + auto scale = node.get_attribute>("scale"); + Output scales; + Output target_spatial_shape; + + if (node.has_ng_input("OutSize")) + { + attrs.shape_calculation_mode = ShapeCalcMode::sizes; + auto hw_shape = node.get_ng_input("OutSize"); + const auto shape_of_x = std::make_shared(x); + auto shape_begin = opset6::Constant::create(element::i64, {1}, {0}); + auto shape_end = opset6::Constant::create(element::i64, Shape{1}, {2}); + auto nc_node = + std::make_shared(shape_of_x, + shape_begin, + shape_end, + std::vector{0}, + std::vector{0}); + target_spatial_shape = std::make_shared( + OutputVector{nc_node, + std::make_shared(hw_shape, element::i64)}, + 0); + scales = calculate_scales_based_on_sizes(x, target_spatial_shape); + } + else if (out_w <= 0 || out_h <= 0) + { + attrs.shape_calculation_mode = ShapeCalcMode::scales; + target_spatial_shape = + calculate_output_shape_based_on_scales(x, scale, scales); + } + else + { + attrs.shape_calculation_mode = ShapeCalcMode::sizes; + target_spatial_shape = extract_out_sizes(x, {out_h, out_w}); + scales = calculate_scales_based_on_sizes(x, target_spatial_shape); + } + + bool align_corners = node.get_attribute("align_corners"); + int32_t align_mode = node.get_attribute("align_mode"); + + if (mode == InterpolateMode::nearest) + { + attrs.coordinate_transformation_mode = CoordinateTransformMode::asymmetric; + } + else if (!align_corners && align_mode == 1) + { + attrs.coordinate_transformation_mode = CoordinateTransformMode::asymmetric; + } + else if (!align_corners && align_mode == 0) + { + attrs.coordinate_transformation_mode = CoordinateTransformMode::half_pixel; + } + else if (align_corners) + { + attrs.coordinate_transformation_mode = + CoordinateTransformMode::align_corners; + } + + attrs.nearest_mode = Nearest_mode::round_prefer_floor; + attrs.antialias = false; + attrs.pads_begin = {0, 0, 0, 0}; + attrs.pads_end = {0, 0, 0, 0}; + + return node.default_single_output_mapping( + {std::make_shared( + x, target_spatial_shape, scales, attrs)}, + {"Out"}); + } + + NamedOutputs bilinear_interp_v2(const NodeContext& node) + { + auto mode = ngraph::opset6::Interpolate::InterpolateMode::linear_onnx; + return interpolate(node, mode); + } + + NamedOutputs nearest_interp_v2(const NodeContext& node) + { + auto mode = ngraph::opset6::Interpolate::InterpolateMode::nearest; + return interpolate(node, mode); + } + + } // namespace op + } // namespace pdpd + } // namespace frontend +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/interp.hpp b/ngraph/frontend/paddlepaddle/src/op/interp.hpp new file mode 100644 index 00000000000000..5738a6c2be266a --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/interp.hpp @@ -0,0 +1,23 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once +#include "node_context.hpp" + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + // TODO support other interp such as linear trilinear, bicubic. etc #55397 + NamedOutputs nearest_interp_v2(const NodeContext& node_context); + NamedOutputs bilinear_interp_v2(const NodeContext& node_context); + + } // namespace op + } // namespace pdpd + } // namespace frontend +} // namespace ngraph diff --git a/ngraph/frontend/paddlepaddle/src/op/leakyrelu.cpp b/ngraph/frontend/paddlepaddle/src/op/leakyrelu.cpp new file mode 100644 index 00000000000000..ec6498ebb0a58c --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/leakyrelu.cpp @@ -0,0 +1,28 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "leakyrelu.hpp" +#include + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs leaky_relu(const NodeContext& node) + { + auto data = node.get_ng_input("X"); + auto alpha = ngraph::opset6::Constant::create( + ngraph::element::f32, {1}, {node.get_attribute("alpha")}); + return node.default_single_output_mapping( + {std::make_shared(data, alpha)}, {"Out"}); + } + + } // namespace op + } // namespace pdpd + } // namespace frontend +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/leakyrelu.hpp b/ngraph/frontend/paddlepaddle/src/op/leakyrelu.hpp new file mode 100644 index 00000000000000..7bb181c8eeb5d3 --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/leakyrelu.hpp @@ -0,0 +1,21 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once +#include "node_context.hpp" + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs leaky_relu(const NodeContext& node); + + } + } // namespace pdpd + } // namespace frontend +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/lstm.cpp b/ngraph/frontend/paddlepaddle/src/op/lstm.cpp new file mode 100644 index 00000000000000..af8988ec0c1d94 --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/lstm.cpp @@ -0,0 +1,238 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "lstm.hpp" +#include +#include "ngraph/builder/reshape.hpp" +#include "ngraph/builder/split.hpp" +#include "paddlepaddle_frontend/utility.hpp" + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + namespace + { + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ INPUT NODES PARSING ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + enum class LSTMInput + { + LSTM_INPUT_X, + LSTM_INPUT_W, + LSTM_INPUT_R, + LSTM_INPUT_B, + LSTM_INPUT_SEQ_LENGTHS, + LSTM_INPUT_INIT_H, + LSTM_INPUT_INIT_C, + LSTM_INPUT_P + }; + + struct LSTMNgInputMap + { + explicit LSTMNgInputMap(const NodeContext& node, + Output& prev_output, + int layer) + { + auto input_x = builder::opset1::reorder_axes(prev_output, {1, 0, 2}); + //[begin. end) + auto weight_list = node.get_ng_inputs("WeightList"); + auto weight_begin = weight_list.begin(); + auto weight_end = std::next(weight_begin, weight_list.size() / 2); + auto bias_begin = weight_end; + int bidirect_len = node.get_attribute("is_bidirec") ? 4 : 2; + int layer_weight_start = layer * bidirect_len; + int layer_weight_end = bidirect_len + layer * bidirect_len; + int layer_bias_start = layer * bidirect_len; + int layer_bias_end = layer * bidirect_len + bidirect_len; + OutputVector layer_input_weight; + OutputVector layer_hidden_weight; + OutputVector layer_weight_bias; + OutputVector layer_hidden_bias; + + m_input_map[LSTMInput::LSTM_INPUT_X] = input_x; + // Parsing W R B + auto axis_const = + std::make_shared(element::i64, Shape{}, 0); + for (int i = layer_weight_start; i < layer_weight_end; i++) + { + auto weight_node = std::next(weight_begin, i); + if (i & 0x1) + layer_hidden_weight.push_back( + std::make_shared(*weight_node, + axis_const)); + else + layer_input_weight.push_back( + std::make_shared(*weight_node, + axis_const)); + } + + for (int i = layer_bias_start; i < layer_bias_end; i++) + { + auto weight_node = std::next(bias_begin, i); + + if (i & 0x1) + layer_hidden_bias.push_back(std::make_shared( + *weight_node, axis_const)); + else + layer_weight_bias.push_back(std::make_shared( + *weight_node, axis_const)); + } + + auto input_weight = + std::make_shared(layer_input_weight, 0); + auto hidden_weight = + std::make_shared(layer_hidden_weight, 0); + auto weight_bias = + std::make_shared(layer_weight_bias, 0); + auto hidden_bias = + std::make_shared(layer_hidden_bias, 0); + auto bias = std::make_shared(weight_bias, hidden_bias); + m_input_map[LSTMInput::LSTM_INPUT_W] = + ngraph::op::util::convert_lstm_node_format( + input_weight, + ngraph::op::util::LSTMWeightsFormat::IFCO, + ngraph::op::util::LSTMWeightsFormat::FICO, + 1); + m_input_map[LSTMInput::LSTM_INPUT_R] = + ngraph::op::util::convert_lstm_node_format( + hidden_weight, + ngraph::op::util::LSTMWeightsFormat::IFCO, + ngraph::op::util::LSTMWeightsFormat::FICO, + 1); + m_input_map[LSTMInput::LSTM_INPUT_B] = + ngraph::op::util::convert_lstm_node_format( + bias, + ngraph::op::util::LSTMWeightsFormat::IFCO, + ngraph::op::util::LSTMWeightsFormat::FICO, + 1); + + // Get dimensions needed for default inputs creation + // Parsing init hidden state + auto shape_of_x = std::make_shared(input_x); + + auto axes = opset6::Constant::create(element::i64, Shape{1}, {0}); + + auto batch_size_node = std::make_shared( + shape_of_x, + opset6::Constant::create(element::i64, Shape{1}, {0}), + axes); + + auto seq_length_node = std::make_shared( + shape_of_x, + opset6::Constant::create(element::i64, Shape{1}, {1}), + axes); + + // TODO Specify SEQ_LEN for each batch #55404 + m_input_map[LSTMInput::LSTM_INPUT_SEQ_LENGTHS] = + std::make_shared(seq_length_node, + batch_size_node); + + auto init_states = node.get_ng_inputs("PreState"); + // 0 for init_h, 1 for init_cell, update bidirect_len for init states + bidirect_len = node.get_attribute("is_bidirec") ? 2 : 1; + + auto h_begin = + opset6::Constant::create(element::i64, {1}, {layer * bidirect_len}); + auto h_end = opset6::Constant::create( + element::i64, Shape{1}, {layer * bidirect_len + bidirect_len}); + auto c_begin = + opset6::Constant::create(element::i64, {1}, {layer * bidirect_len}); + auto c_end = opset6::Constant::create( + element::i64, {1}, {layer * bidirect_len + bidirect_len}); + + m_input_map[LSTMInput::LSTM_INPUT_INIT_H] = + builder::opset1::reorder_axes( + std::make_shared(init_states[0], + h_begin, + h_end, + std::vector{0}, + std::vector{0}), + {1, 0, 2}); + m_input_map[LSTMInput::LSTM_INPUT_INIT_C] = + builder::opset1::reorder_axes( + std::make_shared(init_states[1], + c_begin, + c_end, + std::vector{0}, + std::vector{0}), + {1, 0, 2}); + } + + Output& at(const LSTMInput& key) + { + return m_input_map.at(key); + } + + std::map> m_input_map; + }; + + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ATTRIBUTES PARSING ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + struct LSTMAttributes + { + explicit LSTMAttributes(const NodeContext& node) + : m_direction( + node.get_attribute("is_bidirec") + ? ngraph::op::RecurrentSequenceDirection::BIDIRECTIONAL + : ngraph::op::RecurrentSequenceDirection::FORWARD) + , m_hidden_size(node.get_attribute("hidden_size")) + , m_layers(node.get_attribute("num_layers")) + + {}; + + ngraph::op::RecurrentSequenceDirection m_direction; + int32_t m_hidden_size; + int32_t m_layers; + }; + } // namespace + NamedOutputs lstm(const NodeContext& node) + { + auto mode = node.get_attribute("mode"); + PDPD_ASSERT(mode == "LSTM", "RNN only support LSTM now"); + auto prev_inputs = node.get_ng_inputs("Input"); + Output prev_output = prev_inputs[0]; + LSTMAttributes attrs(node); + OutputVector final_h; + OutputVector final_c; + auto axis_const = std::make_shared(element::i64, Shape{}, 0); + for (int i = 0; i < attrs.m_layers; i++) + { + LSTMNgInputMap input_map(node, prev_output, i); + auto lstm_sequence = std::make_shared( + input_map.at(LSTMInput::LSTM_INPUT_X), + input_map.at(LSTMInput::LSTM_INPUT_INIT_H), + input_map.at(LSTMInput::LSTM_INPUT_INIT_C), + input_map.at(LSTMInput::LSTM_INPUT_SEQ_LENGTHS), + input_map.at(LSTMInput::LSTM_INPUT_W), + input_map.at(LSTMInput::LSTM_INPUT_R), + input_map.at(LSTMInput::LSTM_INPUT_B), + attrs.m_hidden_size, + attrs.m_direction); + prev_output = + builder::opset1::reorder_axes(lstm_sequence->output(0), {2, 0, 1, 3}); + auto out_shape = + opset6::Constant::create(element::i64, Shape{3}, {0, 0, -1}); + prev_output = + std::make_shared(prev_output, out_shape, true); + + final_h.push_back( + builder::opset1::reorder_axes(lstm_sequence->output(1), {1, 0, 2})); + final_c.push_back( + builder::opset1::reorder_axes(lstm_sequence->output(2), {1, 0, 2})); + } + + NamedOutputs named_outputs; + named_outputs["Out"] = {prev_output}; + named_outputs["State"] = {std::make_shared(final_h, 0), + std::make_shared(final_c, 0)}; + return named_outputs; + } + + } // namespace op + } // namespace pdpd + } // namespace frontend +} // namespace ngraph diff --git a/ngraph/frontend/paddlepaddle/src/op/lstm.hpp b/ngraph/frontend/paddlepaddle/src/op/lstm.hpp new file mode 100644 index 00000000000000..a4aa50b7fcfd25 --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/lstm.hpp @@ -0,0 +1,22 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "node_context.hpp" + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs lstm(const NodeContext& node); + + } + } // namespace pdpd + } // namespace frontend +} // namespace ngraph diff --git a/ngraph/frontend/paddlepaddle/src/op/matmul.cpp b/ngraph/frontend/paddlepaddle/src/op/matmul.cpp new file mode 100644 index 00000000000000..20c4e4ebb2aeaa --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/matmul.cpp @@ -0,0 +1,34 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "matmul.hpp" +#include + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs matmul(const NodeContext& node) + { + auto x = node.get_ng_input("X"); + auto y = node.get_ng_input("Y"); + auto alpha = node.get_attribute("alpha"); + auto transpose_a = node.get_attribute("transpose_a"); + auto transpose_b = node.get_attribute("transpose_b"); + auto mm = + std::make_shared(x, y, transpose_a, transpose_b); + auto alpha_node = + ngraph::opset6::Constant::create(ngraph::element::f32, {1}, {alpha}); + return node.default_single_output_mapping( + {std::make_shared(mm, alpha_node)}, {"Out"}); + } + + } // namespace op + } // namespace pdpd + } // namespace frontend +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/matmul.hpp b/ngraph/frontend/paddlepaddle/src/op/matmul.hpp new file mode 100644 index 00000000000000..be5caad71735c8 --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/matmul.hpp @@ -0,0 +1,21 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once +#include "node_context.hpp" + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs matmul(const NodeContext& node); + + } // namespace op + } // namespace pdpd + } // namespace frontend +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/mul.cpp b/ngraph/frontend/paddlepaddle/src/op/mul.cpp new file mode 100644 index 00000000000000..bb03f97eda74b8 --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/mul.cpp @@ -0,0 +1,67 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "mul.hpp" +#include + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs mul(const NodeContext& node) + { + auto x = node.get_ng_input("X"); + auto y = node.get_ng_input("Y"); + PDPD_OP_VALIDATION_CHECK(node, + x.get_partial_shape().rank().is_static(), + "matmul: X rank must be static!"); + int64_t x_rank = x.get_partial_shape().rank().get_length(); + PDPD_OP_VALIDATION_CHECK(node, + y.get_partial_shape().rank().is_static() && + y.get_partial_shape().rank().get_length() == 2, + "matmul: Y rank must be static, and 2!"); + if (x_rank > 2) + { + auto shape = std::make_shared(x); + int64_t x_num_col_dims = node.get_attribute("x_num_col_dims"); + auto axis = ngraph::opset6::Constant::create(ngraph::element::i64, {}, {0}); + auto split_lengths = ngraph::opset6::Constant::create( + ngraph::element::i64, {2}, {x_num_col_dims, x_rank - x_num_col_dims}); + auto split = std::make_shared( + shape, axis, split_lengths); + auto f_dim_red_axis = + ngraph::opset6::Constant::create(ngraph::element::i64, {}, {0}); + auto first_dim_reduce = std::make_shared( + split->output(0), f_dim_red_axis); + auto f_dim_shape = + ngraph::opset6::Constant::create(ngraph::element::i64, {1}, {1}); + auto first_dim = std::make_shared( + first_dim_reduce, f_dim_shape, false); + auto s_dim_red_axis = + ngraph::opset6::Constant::create(ngraph::element::i64, {}, {0}); + auto second_dim_reduce = std::make_shared( + split->output(1), s_dim_red_axis); + auto s_dim_shape = + ngraph::opset6::Constant::create(ngraph::element::i64, {1}, {1}); + auto second_dim = std::make_shared( + second_dim_reduce, s_dim_shape, false); + auto out_shape = std::make_shared( + ngraph::NodeVector{first_dim, second_dim}, 0); + auto x_reshaped = + std::make_shared(x, out_shape, false); + return node.default_single_output_mapping( + {std::make_shared(x_reshaped, y)}, {"Out"}); + } + return node.default_single_output_mapping( + {std::make_shared(x, y)}, {"Out"}); + } + + } // namespace op + } // namespace pdpd + } // namespace frontend +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/mul.hpp b/ngraph/frontend/paddlepaddle/src/op/mul.hpp new file mode 100644 index 00000000000000..7d19a10e6ae7e0 --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/mul.hpp @@ -0,0 +1,21 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once +#include "node_context.hpp" + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs mul(const NodeContext& node); + + } + } // namespace pdpd + } // namespace frontend +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/multiclass_nms.cpp b/ngraph/frontend/paddlepaddle/src/op/multiclass_nms.cpp new file mode 100644 index 00000000000000..e99ad8e3e103f4 --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/multiclass_nms.cpp @@ -0,0 +1,46 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "multiclass_nms.hpp" +#include + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs multiclass_nms(const NodeContext& node) + { + auto bboxes = node.get_ng_input("BBoxes"); + auto scores = node.get_ng_input("Scores"); + + auto score_threshold = node.get_attribute("score_threshold"); + auto iou_threshold = node.get_attribute("nms_threshold"); + auto max_output_boxes_per_class = node.get_attribute("nms_top_k"); + + // TODO: dtype, scaler/vector attr, and more strick attributes check + auto node_max_output_boxes_per_class = ngraph::opset6::Constant::create( + element::i32, Shape{1}, {max_output_boxes_per_class}); + auto node_iou_threshold = ngraph::opset6::Constant::create( + element::f32, Shape{1}, {iou_threshold}); + auto node_score_threshold = ngraph::opset6::Constant::create( + element::f32, Shape{1}, {score_threshold}); + + return node.default_single_output_mapping( + {std::make_shared( + bboxes, + scores, + node_max_output_boxes_per_class, + node_iou_threshold, + node_score_threshold)}, + {"Out"}); + } + + } // namespace op + } // namespace pdpd + } // namespace frontend +} // namespace ngraph diff --git a/ngraph/frontend/paddlepaddle/src/op/multiclass_nms.hpp b/ngraph/frontend/paddlepaddle/src/op/multiclass_nms.hpp new file mode 100644 index 00000000000000..956d7fa72a2bd6 --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/multiclass_nms.hpp @@ -0,0 +1,21 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once +#include "node_context.hpp" + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs multiclass_nms(const NodeContext& node); + + } + } // namespace pdpd + } // namespace frontend +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/pad3d.cpp b/ngraph/frontend/paddlepaddle/src/op/pad3d.cpp new file mode 100644 index 00000000000000..530a382a65304e --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/pad3d.cpp @@ -0,0 +1,117 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "pad3d.hpp" +#include + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs pad3d(const NodeContext& node) + { + auto data = node.get_ng_input("X"); + auto mode = node.get_attribute("mode"); + auto value = node.get_attribute("value", 0.0); + auto data_format = node.get_attribute("data_format"); + + auto paddings = std::vector(6, 0); + + // padding of type int feature only supported by PaddlePaddle 'develop' + // version(>=2.1.0) + if (node.has_attribute>("paddings")) + { + auto paddings_vector = node.get_attribute>("paddings"); + PDPD_OP_VALIDATION_CHECK(node, + paddings_vector.size() == 6, + "paddings Params size should be 6 in pad3d!"); + paddings = paddings_vector; + } + else if (node.has_attribute("paddings")) + { + auto padding_int = node.get_attribute("paddings"); + for (int i = 0; i < 6; i++) + paddings[i] = padding_int; + } + else + { + throw ngraph::ngraph_error("Unsupported paddings attribute!"); + } + + auto pads_begin = std::vector(5, 0); + auto pads_end = std::vector(5, 0); + + Output values; + Output padding_begin; + Output padding_end; + + ngraph::op::PadMode pad_mode; + // TODO Support Circular mode in #55704 + if (mode == "constant") + { + pad_mode = ngraph::op::PadMode::CONSTANT; + values = ngraph::opset6::Constant::create( + element::f32, ngraph::Shape{}, {value}); + } + else if (mode == "reflect") + { + pad_mode = ngraph::op::PadMode::REFLECT; + } + else if (mode == "replicate") + { + pad_mode = ngraph::op::PadMode::EDGE; + } + else + { + throw ngraph::ngraph_error("Unsupported 3d paddings mode: [" + mode + "]"); + } + + if (data_format == "NCDHW") + { + pads_begin[4] = paddings[0]; // left + pads_end[4] = paddings[1]; // right + pads_begin[3] = paddings[2]; // top + pads_end[3] = paddings[3]; // down + pads_begin[2] = paddings[4]; // front + pads_end[2] = paddings[5]; // back + } + else if (data_format == "NDHWC") + { + pads_begin[3] = paddings[0]; // left + pads_end[3] = paddings[1]; // right + pads_begin[2] = paddings[2]; // top + pads_end[2] = paddings[3]; // down + pads_begin[1] = paddings[4]; // front + pads_end[1] = paddings[5]; // back + } + else + { + throw ngraph::ngraph_error("Unsupported 3d paddings data_format: [" + + data_format + "]"); + } + + padding_begin = ngraph::opset6::Constant::create( + element::i32, ngraph::Shape{pads_begin.size()}, pads_begin); + padding_end = ngraph::opset6::Constant::create( + element::i32, ngraph::Shape{pads_end.size()}, pads_end); + + if (mode == "constant") + return node.default_single_output_mapping( + {std::make_shared( + data, padding_begin, padding_end, values, pad_mode)}, + {"Out"}); + else + return node.default_single_output_mapping( + {std::make_shared( + data, padding_begin, padding_end, pad_mode)}, + {"Out"}); + } + } // namespace op + } // namespace pdpd + } // namespace frontend +} // namespace ngraph diff --git a/ngraph/frontend/paddlepaddle/src/op/pad3d.hpp b/ngraph/frontend/paddlepaddle/src/op/pad3d.hpp new file mode 100644 index 00000000000000..da15f027cd894b --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/pad3d.hpp @@ -0,0 +1,21 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once +#include "node_context.hpp" + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs pad3d(const NodeContext& node); + + } + } // namespace pdpd + } // namespace frontend +} // namespace ngraph diff --git a/ngraph/frontend/paddlepaddle/src/op/pool2d.cpp b/ngraph/frontend/paddlepaddle/src/op/pool2d.cpp new file mode 100644 index 00000000000000..b4565f86de8248 --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/pool2d.cpp @@ -0,0 +1,256 @@ +//***************************************************************************** +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +//***************************************************************************** + +#include "pool2d.hpp" +#include + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + // helper func - get pad_begin and pad_end + static void get_paddings(const NodeContext& node, + ngraph::Shape& pad_begin, + ngraph::Shape& pad_end, + ngraph::op::PadType& auto_pad) + { + // + auto pad_algo = node.get_attribute("padding_algorithm"); + if (pad_algo == "SAME") + { + auto_pad = ngraph::op::PadType::SAME_UPPER; + } + else if (pad_algo == "VALID") + { + auto_pad = ngraph::op::PadType::VALID; + } + else if ((pad_algo == "EXPLICIT") || pad_algo.empty()) + { // adaptive_maxpool with no such attr. + auto_pad = ngraph::op::PadType::EXPLICIT; + } + else + { + throw std::runtime_error("Unsupported pooling padding_algorithm " + + pad_algo); + } + + /*If pool padding size is a tuple or list, it could be in three forms: + [pad_height, pad_width] or [pad_height_top, pad_height_bottom, pad_width_left, + pad_width_right], and when data_format is “NCHW”, pool_padding can be in the + form [[0,0], [0,0], [pad_height_top, pad_height_bottom], [pad_width_left, + pad_width_right]]. when data_format is “NHWC”, pool_padding can be in the form + [[0,0], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], + [0,0]]. Otherwise, the pool padding size will be a square of an int.*/ + auto paddings = node.get_attribute>("paddings"); + auto data_format = node.get_attribute("data_format"); + + // TODO: need to support NHWC input #55483 + switch (paddings.size()) + { + case 2: + pad_begin = Shape{static_cast(paddings[0]), + static_cast(paddings[1])}; + pad_end = pad_begin; + break; + case 4: + pad_begin = Shape{static_cast(paddings[0]), + static_cast(paddings[2])}; + pad_end = Shape{static_cast(paddings[1]), + static_cast(paddings[3])}; + break; + default: + throw std::runtime_error("Unsupported pooling paddings " + + std::to_string(paddings.size())); + } + } + + NamedOutputs pool2d(const NodeContext& node) + { + auto data = node.get_ng_input("X"); + + auto pooling_type = node.get_attribute("pooling_type"); + auto global_pooling = node.get_attribute("global_pooling"); + auto adaptive = node.get_attribute("adaptive"); + auto kernel_shape = node.get_attribute>("ksize"); + + auto rounding_type = node.get_attribute("ceil_mode") + ? ngraph::op::RoundingType::CEIL + : ngraph::op::RoundingType::FLOOR; + + if (pooling_type.empty()) + { + pooling_type = "max"; + } + + PDPD_ASSERT((pooling_type == "max") || (pooling_type == "avg"), + "pool2d: not supported pooling type !"); + PDPD_ASSERT(kernel_shape.size() == 1 || kernel_shape.size() == 2, + "pool2d: ksize must be 1 or 2!"); + + PartialShape input_shape = data.get_partial_shape(); + + int32_t input_rank = input_shape.rank().get_length(); + PDPD_ASSERT(input_rank >= 2, "input tensor rank must be greater than 2"); + + auto auto_pad = ngraph::op::PadType::EXPLICIT; + ngraph::Shape pad_begin, pad_end; + get_paddings(node, pad_begin, pad_end, auto_pad); + + if (global_pooling || + (adaptive && std::any_of(kernel_shape.begin(), + kernel_shape.end(), + [](int32_t i) { return i == 1; }))) + { + if (pooling_type == "max") + { + auto axes = ngraph::opset6::Constant::create( + ngraph::element::i64, {2}, {input_rank - 2, input_rank - 1}); + return node.default_single_output_mapping( + {std::make_shared(data, axes, true)}, + {"Out"}); + } + else + { + auto axes = ngraph::opset6::Constant::create( + ngraph::element::i64, {2}, {input_rank - 2, input_rank - 1}); + return node.default_single_output_mapping( + {std::make_shared(data, axes, true)}, + {"Out"}); + } + } + else if (adaptive) + { + PDPD_ASSERT(input_shape[2].is_static() && input_shape[3].is_static(), + "pool2d: spatial dim must be static when using adaptive pool"); + uint64_t pool_size_Height, pool_size_Width; + uint64_t input_h = input_shape[input_rank - 2].get_length(); + uint64_t input_w = input_shape[input_rank - 1].get_length(); + + if (kernel_shape.size() == 1) + { + // Not tested: implemented according to spec, but can't generate real + // model to test + pool_size_Height = pool_size_Width = kernel_shape[0]; + } + else + { + pool_size_Height = kernel_shape[0]; + pool_size_Width = kernel_shape[1]; + } + + uint64_t stride_h = int64_t(input_h / pool_size_Height); + uint64_t stride_w = int64_t(input_w / pool_size_Width); + uint64_t kernel_h = input_h - (pool_size_Height - 1) * stride_h; + uint64_t kernel_w = input_w - (pool_size_Width - 1) * stride_w; + + PDPD_ASSERT(stride_h >= 1 && stride_w >= 1, + "Pool2d stride must be greater than 1"); + + if (pooling_type == "max") + { + return node.default_single_output_mapping( + {std::make_shared( + data, + ngraph::Strides{stride_h, stride_w}, + pad_begin, + pad_end, + ngraph::Shape{kernel_h, kernel_w}, + rounding_type, + auto_pad)}, + {"Out"}); + } + else + { + bool exclude_pad = node.get_attribute("exclusive", false); + return node.default_single_output_mapping( + {std::make_shared( + data, + ngraph::Strides{stride_h, stride_w}, + pad_begin, + pad_end, + ngraph::Shape{kernel_h, kernel_w}, + exclude_pad, + rounding_type, + auto_pad)}, + {"Out"}); + } + } + else + { + auto strides = node.get_attribute>("strides"); + auto paddings = node.get_attribute>("paddings"); + + uint64_t kernel_h, kernel_w; + if (kernel_shape.size() == 1) + { + // Not tested: implemented according to spec, but can't generate real + // model to test + kernel_h = kernel_w = kernel_shape[0]; + } + else + { + kernel_h = kernel_shape[0]; + kernel_w = kernel_shape[1]; + } + + PDPD_ASSERT(kernel_h > 0 && kernel_w > 0, + "pool2d kernel shape must be greater than 0"); + + // Note: this shape check is only valid when the spatial dim of input_shape + // is static. + if (input_shape[2].is_static() && input_shape[3].is_static()) + { + uint64_t input_h = input_shape[input_rank - 2].get_length(); + uint64_t input_w = input_shape[input_rank - 1].get_length(); + if ((input_h > 0) && (input_h + pad_begin[0] + pad_end[0] < kernel_h)) + { + kernel_h = input_h + pad_begin[0] + pad_end[0]; + } + if ((input_w > 0) && (input_w + pad_begin[1] + pad_end[1] < kernel_w)) + { + kernel_w = input_w + pad_begin[1] + pad_end[1]; + } + } + + if (pooling_type == "max") + { + return node.default_single_output_mapping( + {std::make_shared( + data, + ngraph::Strides(strides.begin(), strides.end()), + pad_begin, + pad_end, + ngraph::Shape{kernel_h, kernel_w}, + rounding_type, + auto_pad)}, + {"Out"}); + } + else + { + bool exclude_pad = node.get_attribute("exclusive", false); + return node.default_single_output_mapping( + {std::make_shared( + data, + ngraph::Strides(strides.begin(), strides.end()), + pad_begin, + pad_end, + ngraph::Shape{kernel_h, kernel_w}, + exclude_pad, + rounding_type, + auto_pad)}, + {"Out"}); + } + } + } + + } // namespace op + } // namespace pdpd + } // namespace frontend +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/pool2d.hpp b/ngraph/frontend/paddlepaddle/src/op/pool2d.hpp new file mode 100644 index 00000000000000..f43b7d61e5b987 --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/pool2d.hpp @@ -0,0 +1,21 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once +#include "node_context.hpp" + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs pool2d(const NodeContext& node); + + } + } // namespace pdpd + } // namespace frontend +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/range.cpp b/ngraph/frontend/paddlepaddle/src/op/range.cpp new file mode 100644 index 00000000000000..972374b617170c --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/range.cpp @@ -0,0 +1,41 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "range.hpp" +#include + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs range(const NodeContext& node) + { + auto start = node.get_ng_input("Start"); + auto stop = node.get_ng_input("End"); + auto step = node.get_ng_input("Step"); + auto type = node.get_out_port_type("Out"); + PDPD_ASSERT(type == element::i64 || type == element::i32 || + type == element::f32, + "Only supports int32, int64, float32"); + + const auto axis = ngraph::opset6::Constant::create(element::i64, Shape{}, {0}); + auto start_scalar = std::make_shared(start, axis); + auto stop_scalar = std::make_shared(stop, axis); + auto step_scalar = std::make_shared(step, axis); + + // TODO to support other data types other than FP32 #55267 + return node.default_single_output_mapping( + {std::make_shared( + start_scalar, stop_scalar, step_scalar, type)}, + {"Out"}); + } + + } // namespace op + } // namespace pdpd + } // namespace frontend +} // namespace ngraph diff --git a/ngraph/frontend/paddlepaddle/src/op/range.hpp b/ngraph/frontend/paddlepaddle/src/op/range.hpp new file mode 100644 index 00000000000000..3ef2fc7f2e8a95 --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/range.hpp @@ -0,0 +1,21 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once +#include "node_context.hpp" + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs range(const NodeContext& node); + + } + } // namespace pdpd + } // namespace frontend +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/relu.cpp b/ngraph/frontend/paddlepaddle/src/op/relu.cpp new file mode 100644 index 00000000000000..68d1cca3203cd3 --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/relu.cpp @@ -0,0 +1,25 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "relu.hpp" +#include + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs relu(const NodeContext& node) + { + return node.default_single_output_mapping( + {std::make_shared(node.get_ng_input("X"))}, {"Out"}); + } + + } // namespace op + } // namespace pdpd + } // namespace frontend +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/relu.hpp b/ngraph/frontend/paddlepaddle/src/op/relu.hpp new file mode 100644 index 00000000000000..7a63e7f89d8317 --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/relu.hpp @@ -0,0 +1,21 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once +#include "node_context.hpp" + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs relu(const NodeContext& node); + + } + } // namespace pdpd + } // namespace frontend +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/reshape2.cpp b/ngraph/frontend/paddlepaddle/src/op/reshape2.cpp new file mode 100644 index 00000000000000..61fbe1101aa83e --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/reshape2.cpp @@ -0,0 +1,36 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "reshape2.hpp" + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs reshape2(const NodeContext& node) + { + auto data = node.get_ng_input("X"); + if (!node.has_ng_input("Shape") && !node.has_ng_input("ShapeTensor")) + { + auto shape_attr = node.get_attribute>("shape"); + auto shape_node = ngraph::opset6::Constant::create( + ngraph::element::i32, {shape_attr.size()}, shape_attr); + return node.default_single_output_mapping( + {std::make_shared(data, shape_node, true)}, + {"Out"}); + } + else + { + FRONT_END_NOT_IMPLEMENTED("reshape2 with shape as input"); + } + } + + } // namespace op + } // namespace pdpd + } // namespace frontend +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/reshape2.hpp b/ngraph/frontend/paddlepaddle/src/op/reshape2.hpp new file mode 100644 index 00000000000000..614aeddd2dbb97 --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/reshape2.hpp @@ -0,0 +1,21 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once +#include "node_context.hpp" + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs reshape2(const NodeContext& node); + + } // namespace op + } // namespace pdpd + } // namespace frontend +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/rnn.cpp b/ngraph/frontend/paddlepaddle/src/op/rnn.cpp new file mode 100644 index 00000000000000..d677d2d652646e --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/rnn.cpp @@ -0,0 +1,28 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "rnn.hpp" +#include +#include "lstm.hpp" +#include "paddlepaddle_frontend/utility.hpp" + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs rnn(const NodeContext& node) + { + auto mode = node.get_attribute("mode"); + PDPD_ASSERT(mode == "LSTM", "RNN only support LSTM now"); + return lstm(node); + } + + } // namespace op + } // namespace pdpd + } // namespace frontend +} // namespace ngraph diff --git a/ngraph/frontend/paddlepaddle/src/op/rnn.hpp b/ngraph/frontend/paddlepaddle/src/op/rnn.hpp new file mode 100644 index 00000000000000..82e68ddfc94530 --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/rnn.hpp @@ -0,0 +1,22 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "node_context.hpp" + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs rnn(const NodeContext& node); + + } + } // namespace pdpd + } // namespace frontend +} // namespace ngraph diff --git a/ngraph/frontend/paddlepaddle/src/op/scale.cpp b/ngraph/frontend/paddlepaddle/src/op/scale.cpp new file mode 100644 index 00000000000000..2c7a967558f443 --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/scale.cpp @@ -0,0 +1,43 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "scale.hpp" +#include + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs scale(const NodeContext& node) + { + auto data = node.get_ng_input("X"); + auto scale = ngraph::opset6::Constant::create( + ngraph::element::f32, {1}, {node.get_attribute("scale")}); + auto bias = ngraph::opset6::Constant::create( + ngraph::element::f32, {1}, {node.get_attribute("bias")}); + auto bias_after_scale = node.get_attribute("bias_after_scale"); + auto fp32_data = std::make_shared(data, element::f32); + if (!bias_after_scale) + { + auto node_add = std::make_shared(fp32_data, bias); + return node.default_single_output_mapping( + {std::make_shared(node_add, scale)}, {"Out"}); + } + else + { + auto node_multiply = + std::make_shared(fp32_data, scale); + return node.default_single_output_mapping( + {std::make_shared(node_multiply, bias)}, {"Out"}); + } + } + + } // namespace op + } // namespace pdpd + } // namespace frontend +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/scale.hpp b/ngraph/frontend/paddlepaddle/src/op/scale.hpp new file mode 100644 index 00000000000000..94acdb27ef204f --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/scale.hpp @@ -0,0 +1,21 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once +#include "node_context.hpp" + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs scale(const NodeContext& node); + + } + } // namespace pdpd + } // namespace frontend +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/shape.cpp b/ngraph/frontend/paddlepaddle/src/op/shape.cpp new file mode 100644 index 00000000000000..d10eac182dfcad --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/shape.cpp @@ -0,0 +1,26 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "shape.hpp" +#include + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs shape(const NodeContext& node) + { + auto data = node.get_ng_input("Input"); + return node.default_single_output_mapping( + {std::make_shared(data)}, {"Out"}); + } + + } // namespace op + } // namespace pdpd + } // namespace frontend +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/shape.hpp b/ngraph/frontend/paddlepaddle/src/op/shape.hpp new file mode 100644 index 00000000000000..60fed610200127 --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/shape.hpp @@ -0,0 +1,21 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once +#include "node_context.hpp" + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs shape(const NodeContext& node); + + } + } // namespace pdpd + } // namespace frontend +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/slice.cpp b/ngraph/frontend/paddlepaddle/src/op/slice.cpp new file mode 100644 index 00000000000000..9133a5c73e5bd0 --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/slice.cpp @@ -0,0 +1,59 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "slice.hpp" + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs slice(const NodeContext& node) + { + auto data = node.get_ng_input("Input"); + auto axes = node.get_attribute>("axes"); + // TODO: support tensor type #55266 + auto starts = node.get_attribute>("starts"); + // TODO: support tensor type #55266 + auto ends = node.get_attribute>("ends"); + auto data_rank = data.get_partial_shape().rank(); + size_t shape_size = data_rank.get_length(); + std::vector fixedStarts(shape_size, 0); + std::vector fixedEnds(shape_size, INT_MAX); + + int n = 0; + for (auto i : axes) + { + PDPD_OP_VALIDATION_CHECK(node, + i < (int32_t)shape_size, + "slice: axes must be less than the X rank."); + fixedStarts[i] = starts[n]; + fixedEnds[i] = ends[n]; + n++; + } + + auto startsNode = ngraph::opset6::Constant::create( + ngraph::element::i32, {shape_size}, fixedStarts); + auto endsNode = ngraph::opset6::Constant::create( + ngraph::element::i32, {shape_size}, fixedEnds); + auto stridesNode = ngraph::opset6::Constant::create( + ngraph::element::i32, {shape_size}, std::vector(shape_size, 1)); + return node.default_single_output_mapping( + {std::make_shared( + data, + startsNode, + endsNode, + stridesNode, + std::vector(shape_size, 0), + std::vector(shape_size, 0))}, + {"Out"}); + } + + } // namespace op + } // namespace pdpd + } // namespace frontend +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/slice.hpp b/ngraph/frontend/paddlepaddle/src/op/slice.hpp new file mode 100644 index 00000000000000..2bce2ac3440e1e --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/slice.hpp @@ -0,0 +1,21 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once +#include "node_context.hpp" + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs slice(const NodeContext& node); + + } + } // namespace pdpd + } // namespace frontend +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/softmax.cpp b/ngraph/frontend/paddlepaddle/src/op/softmax.cpp new file mode 100644 index 00000000000000..0fb79073a57e40 --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/softmax.cpp @@ -0,0 +1,34 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "softmax.hpp" +#include + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs softmax(const NodeContext& node) + { + auto data = node.get_ng_input("X"); + auto axis = node.get_attribute("axis"); + if (axis < 0) + { + PDPD_OP_VALIDATION_CHECK(node, + data.get_partial_shape().rank().is_static(), + "Softmax rank must be static"); + auto data_rank = data.get_partial_shape().rank().get_length(); + axis = data_rank + axis; + } + return node.default_single_output_mapping( + {std::make_shared(data, axis)}, {"Out"}); + } + } // namespace op + } // namespace pdpd + } // namespace frontend +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/softmax.hpp b/ngraph/frontend/paddlepaddle/src/op/softmax.hpp new file mode 100644 index 00000000000000..337aa40f36ca81 --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/softmax.hpp @@ -0,0 +1,21 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once +#include "node_context.hpp" + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs softmax(const NodeContext& node); + + } // namespace op + } // namespace pdpd + } // namespace frontend +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/split.cpp b/ngraph/frontend/paddlepaddle/src/op/split.cpp new file mode 100644 index 00000000000000..d9b6836a5c97b2 --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/split.cpp @@ -0,0 +1,46 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "split.hpp" +#include + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs split(const NodeContext& node) + { + using namespace ngraph; + using namespace opset7; + const auto& data = node.get_ng_input("X"); + auto dim = node.get_attribute("axis"); + // todo: 'num' can be list of values, in this case we should create + // VariadicSplit todo: support VariadicSplit + auto num_or_sections = node.get_attribute("num"); + auto axis = std::make_shared(ngraph::element::i32, Shape{}, dim); + + NamedOutputs named_outputs; + auto split_outputs = + std::make_shared(data, axis, num_or_sections)->outputs(); + auto out_names = node.get_output_names(); + PDPD_OP_VALIDATION_CHECK( + node, out_names.size() == 1, "Unexpected number of outputs"); + + auto it = std::find(out_names.begin(), out_names.end(), "Out"); + PDPD_OP_VALIDATION_CHECK( + node, it != out_names.end(), "Expected output not found"); + for (const auto& split_output : split_outputs) + { + named_outputs[*it].push_back(split_output); + } + return named_outputs; + } + } // namespace op + } // namespace pdpd + } // namespace frontend +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/split.hpp b/ngraph/frontend/paddlepaddle/src/op/split.hpp new file mode 100644 index 00000000000000..3ae3a40018fcaf --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/split.hpp @@ -0,0 +1,20 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#pragma once +#include "node_context.hpp" + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs split(const NodeContext& node); + + } // namespace op + } // namespace pdpd + } // namespace frontend +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/squeeze.cpp b/ngraph/frontend/paddlepaddle/src/op/squeeze.cpp new file mode 100644 index 00000000000000..485b713eea6155 --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/squeeze.cpp @@ -0,0 +1,34 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "squeeze.hpp" +#include + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs squeeze(const NodeContext& node) + { + auto data = node.get_ng_input("X"); + std::vector axes; + if (node.has_attribute>("axes")) + { + axes = node.get_attribute>("axes"); + } + + auto axesNode = + ngraph::opset6::Constant::create(ngraph::element::i32, {axes.size()}, axes); + return node.default_single_output_mapping( + {std::make_shared(data, axesNode)}, {"Out"}); + } + + } // namespace op + } // namespace pdpd + } // namespace frontend +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/squeeze.hpp b/ngraph/frontend/paddlepaddle/src/op/squeeze.hpp new file mode 100644 index 00000000000000..c0648573c4e6b9 --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/squeeze.hpp @@ -0,0 +1,21 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once +#include "node_context.hpp" + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs squeeze(const NodeContext& node); + + } + } // namespace pdpd + } // namespace frontend +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/transpose2.cpp b/ngraph/frontend/paddlepaddle/src/op/transpose2.cpp new file mode 100644 index 00000000000000..e944bc89396125 --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/transpose2.cpp @@ -0,0 +1,40 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "transpose2.hpp" +#include + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs transpose2(const NodeContext& node) + { + auto data = node.get_ng_input("X"); + auto perm = node.get_attribute>("axis"); + + auto rank = + static_cast(data.get_partial_shape().rank().get_length()); + + std::cout << perm.size() << std::endl; + std::cout << data.get_partial_shape().rank() << ":" << rank << std::endl; + + PDPD_OP_VALIDATION_CHECK(node, + perm.size() == rank, + "transpose2: axis size must equal to data rank!"); + + auto input_order = + ngraph::opset6::Constant::create(ngraph::element::i64, {rank}, perm); + return node.default_single_output_mapping( + {std::make_shared(data, input_order)}, {"Out"}); + } + + } // namespace op + } // namespace pdpd + } // namespace frontend +} // namespace ngraph diff --git a/ngraph/frontend/paddlepaddle/src/op/transpose2.hpp b/ngraph/frontend/paddlepaddle/src/op/transpose2.hpp new file mode 100644 index 00000000000000..d33fe0a1089679 --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/transpose2.hpp @@ -0,0 +1,21 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once +#include "node_context.hpp" + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs transpose2(const NodeContext& node); + + } + } // namespace pdpd + } // namespace frontend +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/unsqueeze.cpp b/ngraph/frontend/paddlepaddle/src/op/unsqueeze.cpp new file mode 100644 index 00000000000000..dc9b7e0bb9c2e9 --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/unsqueeze.cpp @@ -0,0 +1,30 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "unsqueeze.hpp" +#include + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs unsqueeze(const NodeContext& node) + { + // TODO to support data type other than int32_t #55168 + auto data = node.get_ng_input("X"); + auto axes = node.get_attribute>("axes"); + auto axesNode = + ngraph::opset6::Constant::create(ngraph::element::i32, {axes.size()}, axes); + return node.default_single_output_mapping( + {std::make_shared(data, axesNode)}, {"Out"}); + } + + } // namespace op + } // namespace pdpd + } // namespace frontend +} // namespace ngraph diff --git a/ngraph/frontend/paddlepaddle/src/op/unsqueeze.hpp b/ngraph/frontend/paddlepaddle/src/op/unsqueeze.hpp new file mode 100644 index 00000000000000..bce9596f6f6c12 --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/unsqueeze.hpp @@ -0,0 +1,21 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once +#include "node_context.hpp" + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs unsqueeze(const NodeContext& node); + + } + } // namespace pdpd + } // namespace frontend +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/yolo_box.cpp b/ngraph/frontend/paddlepaddle/src/op/yolo_box.cpp new file mode 100644 index 00000000000000..94d02a776cc2b3 --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/yolo_box.cpp @@ -0,0 +1,357 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#include // std::numeric_limits +#include + +#include +#include "yolo_box.hpp" + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + using namespace opset6; + using namespace element; + + NamedOutputs yolo_box(const NodeContext& node_context) + { + auto data = node_context.get_ng_input("X"); + auto image_size = node_context.get_ng_input("ImgSize"); + + // TODO: add dynamic shape support - #55264 + auto input_shape = data.get_partial_shape(); + uint32_t input_height = input_shape[2].get_length(); + uint32_t input_width = input_shape[3].get_length(); + + int32_t class_num = node_context.get_attribute("class_num"); + // PDPD anchors attribute is of type int32. Convert to float for computing + // convinient. + auto _anchors = node_context.get_attribute>("anchors"); + std::vector anchors(_anchors.begin(), _anchors.end()); + + uint32_t num_anchors = anchors.size() / 2; + + auto default_scale = 1.0f; + auto scale_x_y = node_context.get_attribute("scale_x_y", default_scale); + auto downsample_ratio = node_context.get_attribute("downsample_ratio"); + auto input_size = input_height * downsample_ratio; + + std::vector score_shape{ + 1, input_height * input_width * num_anchors, class_num}; + + auto conf_thresh = node_context.get_attribute("conf_thresh"); + std::vector conf_thresh_mat(score_shape[1], conf_thresh); + + std::cout << "input_height: " << input_height << " input_width: " << input_width + << " input_size: " << input_size << std::endl; + std::cout << "num_anchors: " << num_anchors << " scale_x_y: " << scale_x_y + << std::endl; + std::cout << "downsample_ratio: " << downsample_ratio + << " conf_thresh: " << conf_thresh << std::endl; + std::cout << "class_num: " << class_num << " image_size: " << image_size + << std::endl; + + auto clip_bbox = node_context.get_attribute("clip_bbox"); + + // main X + auto node_x_shape = Constant::create( + i64, {5}, {1, num_anchors, 5 + class_num, input_height, input_width}); + + auto node_x_reshape = std::make_shared(data, node_x_shape, false); + + auto node_input_order = Constant::create(i64, {5}, {0, 1, 3, 4, 2}); + auto node_x_transpose = + std::make_shared(node_x_reshape, node_input_order); + + // range x/y + std::vector range_x(input_width); + std::iota(range_x.begin(), range_x.end(), 0); + std::vector range_y(input_height); + std::iota(range_y.begin(), range_y.end(), 0); + + auto node_range_x = Constant::create(f32, {1, range_x.size()}, range_x); + auto node_range_y = Constant::create(f32, {range_y.size(), 1}, range_y); + + auto node_range_x_shape = Constant::create(i64, {2}, {1, input_width}); + auto node_range_y_shape = + Constant::create(i64, {2}, {input_height, 1}); + + auto node_grid_x = std::make_shared(node_range_x, node_range_y_shape); + auto node_grid_y = std::make_shared(node_range_y, node_range_x_shape); + + // main X (part2) + auto node_split_axis = Constant::create(i64, {1}, {-1}); + auto node_split_lengths = + Constant::create(i64, {6}, {1, 1, 1, 1, 1, class_num}); + auto node_split_input = std::make_shared( + node_x_transpose, node_split_axis, node_split_lengths); + + auto node_box_x = node_split_input->output(0); + auto node_box_y = node_split_input->output(1); + auto node_box_w = node_split_input->output(2); + auto node_box_h = node_split_input->output(3); + auto node_conf = node_split_input->output(4); + auto node_prob = node_split_input->output(5); + + // x/y + std::shared_ptr node_box_x_sigmoid = + std::make_shared(node_box_x); + std::shared_ptr node_box_y_sigmoid = + std::make_shared(node_box_y); + + if (std::fabs(scale_x_y - default_scale) > 1e-6) + { // float not-equal + float bias_x_y = -0.5 * (scale_x_y - 1.0); + + auto scale_x_y_node = Constant::create(f32, {1}, {scale_x_y}); + auto bias_x_y_node = Constant::create(f32, {1}, {bias_x_y}); + + node_box_x_sigmoid = + std::make_shared(node_box_x_sigmoid, scale_x_y_node); + node_box_x_sigmoid = + std::make_shared(node_box_x_sigmoid, bias_x_y_node); + + node_box_y_sigmoid = + std::make_shared(node_box_y_sigmoid, scale_x_y_node); + node_box_y_sigmoid = + std::make_shared(node_box_y_sigmoid, bias_x_y_node); + } + + auto squeeze_box_x = Constant::create(i64, {1}, {4}); + auto node_box_x_squeeze = + std::make_shared(node_box_x_sigmoid, squeeze_box_x); + + auto squeeze_box_y = Constant::create(i64, {1}, {4}); + auto node_box_y_squeeze = + std::make_shared(node_box_y_sigmoid, squeeze_box_y); + + auto node_box_x_add_grid = + std::make_shared(node_grid_x, node_box_x_squeeze); + auto node_box_y_add_grid = + std::make_shared(node_grid_y, node_box_y_squeeze); + + auto node_input_h = Constant::create(f32, {1}, {(float)input_height}); + auto node_input_w = Constant::create(f32, {1}, {(float)input_width}); + + auto node_box_x_encode = + std::make_shared(node_box_x_add_grid, node_input_w); + auto node_box_y_encode = + std::make_shared(node_box_y_add_grid, node_input_h); + + // w/h + auto node_anchor_tensor = Constant::create( + f32, {num_anchors, 2}, anchors); // FIXME:Paddle2ONNX use float! + + auto node_input_size = Constant::create(f32, {1}, {(float)input_size}); + auto node_anchors_div_input_size = + std::make_shared(node_anchor_tensor, node_input_size); + + auto split_axis = Constant::create(i32, {}, {1}); + auto node_anchor_split = + std::make_shared(node_anchors_div_input_size, split_axis, 2); + + auto node_anchor_w = node_anchor_split->output(0); + auto node_anchor_h = node_anchor_split->output(1); + + auto node_new_anchor_shape = + Constant::create(i64, {4}, {1, num_anchors, 1, 1}); + auto node_anchor_w_reshape = + std::make_shared(node_anchor_w, node_new_anchor_shape, false); + auto node_anchor_h_reshape = + std::make_shared(node_anchor_h, node_new_anchor_shape, false); + + auto squeeze_box_wh = Constant::create(i64, {1}, {4}); + auto node_box_w_squeeze = std::make_shared(node_box_w, squeeze_box_wh); + auto node_box_h_squeeze = std::make_shared(node_box_h, squeeze_box_wh); + + auto node_box_w_exp = std::make_shared(node_box_w_squeeze); + auto node_box_h_exp = std::make_shared(node_box_h_squeeze); + + auto node_box_w_encode = + std::make_shared(node_box_w_exp, node_anchor_w_reshape); + auto node_box_h_encode = + std::make_shared(node_box_h_exp, node_anchor_h_reshape); + + // confidence + auto node_conf_sigmoid = std::make_shared(node_conf); + + auto node_conf_thresh = Constant::create( + f32, {1, num_anchors, input_height, input_width, 1}, conf_thresh_mat); + + auto node_conf_sub = + std::make_shared(node_conf_sigmoid, node_conf_thresh); + + auto node_conf_clip = std::make_shared( + node_conf_sub, + 0.0f, + std::numeric_limits::max()); // FIXME: PDPD not specify min/max + + auto node_zeros = Constant::create(f32, {1}, {0}); + auto node_conf_clip_bool = + std::make_shared(node_conf_clip, node_zeros); + + auto node_conf_clip_cast = + std::make_shared(node_conf_clip_bool, f32); // FIMXE: to=1 + + auto node_conf_set_zero = + std::make_shared(node_conf_sigmoid, node_conf_clip_cast); + + /* probability */ + auto node_prob_sigmoid = std::make_shared(node_prob); + + auto node_new_shape = Constant::create( + i64, {5}, {1, int(num_anchors), input_height, input_width, 1}); + auto node_conf_new_shape = + std::make_shared(node_conf_set_zero, node_new_shape, false); + + // broadcast confidence * probability of each category + auto node_score = + std::make_shared(node_prob_sigmoid, node_conf_new_shape); + + // for bbox which has object (greater than threshold) + auto node_conf_bool = + std::make_shared(node_conf_new_shape, node_zeros); + + auto node_box_x_new_shape = + std::make_shared(node_box_x_encode, node_new_shape, false); + auto node_box_y_new_shape = + std::make_shared(node_box_y_encode, node_new_shape, false); + auto node_box_w_new_shape = + std::make_shared(node_box_w_encode, node_new_shape, false); + auto node_box_h_new_shape = + std::make_shared(node_box_h_encode, node_new_shape, false); + auto node_pred_box = + std::make_shared(OutputVector{node_box_x_new_shape, + node_box_y_new_shape, + node_box_w_new_shape, + node_box_h_new_shape}, + 4); + + auto node_conf_cast = + std::make_shared(node_conf_bool, f32); // FIMXE: to=1 + + auto node_pred_box_mul_conf = std::make_shared( + node_pred_box, node_conf_cast); //(1,3,19,19,4) (1,3,19,19,1) + + auto node_box_shape = Constant::create( + i64, {3}, {1, int(num_anchors) * input_height * input_width, 4}); + auto node_pred_box_new_shape = std::make_shared( + node_pred_box_mul_conf, node_box_shape, false); //(1,3*19*19,4) + + auto pred_box_split_axis = Constant::create(i32, {}, {2}); + auto node_pred_box_split = + std::make_shared(node_pred_box_new_shape, pred_box_split_axis, 4); + + auto node_pred_box_x = node_pred_box_split->output(0); + auto node_pred_box_y = node_pred_box_split->output(1); + auto node_pred_box_w = node_pred_box_split->output(2); + auto node_pred_box_h = node_pred_box_split->output(3); + + /* x,y,w,h -> x1,y1,x2,y2 */ + auto node_number_two = Constant::create(f32, {1}, {2.0f}); + auto node_half_w = std::make_shared(node_pred_box_w, node_number_two); + auto node_half_h = std::make_shared(node_pred_box_h, node_number_two); + + auto node_pred_box_x1 = + std::make_shared(node_pred_box_x, node_half_w); + auto node_pred_box_y1 = + std::make_shared(node_pred_box_y, node_half_h); + + auto node_pred_box_x2 = std::make_shared(node_pred_box_x, node_half_w); + auto node_pred_box_y2 = std::make_shared(node_pred_box_y, node_half_h); + + /* map normalized coords to original image */ + auto squeeze_image_size_axes = Constant::create(i64, {1}, {0}); + auto node_sqeeze_image_size = std::make_shared( + image_size, squeeze_image_size_axes); // input ImgSize + + auto image_size_split_axis = Constant::create(i32, {}, {-1}); + auto node_image_size_split = + std::make_shared(node_sqeeze_image_size, image_size_split_axis, 2); + auto node_img_height = node_image_size_split->output(0); + auto node_img_width = node_image_size_split->output(1); + + auto node_img_width_cast = + std::make_shared(node_img_width, f32); // FIMXE: to=1 + auto node_img_height_cast = std::make_shared(node_img_height, f32); + + auto node_pred_box_x1_decode = + std::make_shared(node_pred_box_x1, node_img_width_cast); + auto node_pred_box_y1_decode = + std::make_shared(node_pred_box_y1, node_img_height_cast); + auto node_pred_box_x2_decode = + std::make_shared(node_pred_box_x2, node_img_width_cast); + auto node_pred_box_y2_decode = + std::make_shared(node_pred_box_y2, node_img_height_cast); + + // reference + // Paddle/python/paddle/fluid/tests/unittests/test_yolo_box_op.py + // Paddle/paddle/fluid/operators/detection/yolo_box_op.h + // Paddle2ONNX/paddle2onnx/op_mapper/detection/yolo_box.py - clip_bbox is not + // used by Paddle2ONNX. + std::shared_ptr node_pred_box_result; + if (clip_bbox) + { + auto node_number_one = Constant::create(f32, {1}, {1.0}); + auto node_new_img_height = + std::make_shared(node_img_height_cast, node_number_one); + auto node_new_img_width = + std::make_shared(node_img_width_cast, node_number_one); + auto node_pred_box_x2_sub_w = std::make_shared( + node_pred_box_x2_decode, node_new_img_width); // x2 - (w-1) + auto node_pred_box_y2_sub_h = std::make_shared( + node_pred_box_y2_decode, node_new_img_height); // y2 - (h-1) + + auto max_const = std::numeric_limits::max(); + auto node_pred_box_x1_clip = + std::make_shared(node_pred_box_x1_decode, 0.0f, max_const); + auto node_pred_box_y1_clip = + std::make_shared(node_pred_box_y1_decode, 0.0f, max_const); + auto node_pred_box_x2_clip = + std::make_shared(node_pred_box_x2_sub_w, 0.0f, max_const); + auto node_pred_box_y2_clip = + std::make_shared(node_pred_box_y2_sub_h, 0.0f, max_const); + + auto node_pred_box_x2_res = std::make_shared( + node_pred_box_x2_decode, node_pred_box_x2_clip); + auto node_pred_box_y2_res = std::make_shared( + node_pred_box_y2_decode, node_pred_box_y2_clip); + + node_pred_box_result = + std::make_shared(OutputVector{node_pred_box_x1_clip, + node_pred_box_y1_clip, + node_pred_box_x2_res, + node_pred_box_y2_res}, + -1); // outputs=node.output('Boxes') + } + else + { + node_pred_box_result = + std::make_shared(OutputVector{node_pred_box_x1_decode, + node_pred_box_y1_decode, + node_pred_box_x2_decode, + node_pred_box_y2_decode}, + -1); // outputs=node.output('Boxes') + } + + // + auto node_score_shape = + Constant::create(i64, {score_shape.size()}, score_shape); + auto node_score_new_shape = std::make_shared( + node_score, node_score_shape, false); // outputs=node.output('Scores') + + NamedOutputs outputs; + outputs["Boxes"] = {node_pred_box_result}; + outputs["Scores"] = {node_score_new_shape}; + return outputs; + } + + } // namespace op + } // namespace pdpd + } // namespace frontend +} // namespace ngraph diff --git a/ngraph/frontend/paddlepaddle/src/op/yolo_box.hpp b/ngraph/frontend/paddlepaddle/src/op/yolo_box.hpp new file mode 100644 index 00000000000000..95661a3ccebd58 --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op/yolo_box.hpp @@ -0,0 +1,21 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once +#include "node_context.hpp" + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs yolo_box(const NodeContext& node); + + } + } // namespace pdpd + } // namespace frontend +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op_table.cpp b/ngraph/frontend/paddlepaddle/src/op_table.cpp new file mode 100644 index 00000000000000..324b3e8afb837d --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op_table.cpp @@ -0,0 +1,96 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "op/argmax.hpp" +#include "op/assign_value.hpp" +#include "op/batch_norm.hpp" +#include "op/cast.hpp" +#include "op/clip.hpp" +#include "op/concat.hpp" +#include "op/conv2d.hpp" +#include "op/conv2d_transpose.hpp" +#include "op/dropout.hpp" +#include "op/elementwise_ops.hpp" +#include "op/expand_v2.hpp" +#include "op/fill_constant.hpp" +#include "op/fill_constant_batch_size_like.hpp" +#include "op/flatten_contiguous_range.hpp" +#include "op/interp.hpp" +#include "op/leakyrelu.hpp" +#include "op/matmul.hpp" +#include "op/mul.hpp" +#include "op/pad3d.hpp" +#include "op/pool2d.hpp" +#include "op/range.hpp" +#include "op/relu.hpp" +#include "op/reshape2.hpp" +#include "op/rnn.hpp" +#include "op/scale.hpp" +#include "op/shape.hpp" +#include "op/slice.hpp" +#include "op/softmax.hpp" +#include "op/split.hpp" +#include "op/squeeze.hpp" +#include "op/transpose2.hpp" +#include "op/unsqueeze.hpp" +#include "op/yolo_box.hpp" + +#include "op_table.hpp" + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + std::map get_supported_ops() + { + return {{"arg_max", op::argmax}, + {"assign_value", op::assign_value}, + {"batch_norm", op::batch_norm}, + {"bilinear_interp_v2", op::bilinear_interp_v2}, + {"bilinear_interp", op::bilinear_interp_v2}, + {"cast", op::cast}, + {"clip", op::clip}, + {"concat", op::concat}, + {"conv2d", op::conv2d}, + {"dropout", op::dropout}, + {"conv2d_transpose", op::conv2d_transpose}, + {"elementwise_add", op::elementwise_add}, + {"elementwise_div", op::elementwise_div}, + {"elementwise_max", op::elementwise_max}, + {"elementwise_min", op::elementwise_min}, + {"elementwise_mul", op::elementwise_mul}, + {"elementwise_pow", op::elementwise_pow}, + {"elementwise_sub", op::elementwise_sub}, + {"expand_v2", op::expand_v2}, + {"fill_constant_batch_size_like", op::fill_constant_batch_size_like}, + {"fill_constant", op::fill_constant}, + {"flatten_contiguous_range", op::flatten_contiguous_range}, + {"leaky_relu", op::leaky_relu}, + {"matmul", op::matmul}, + {"max_pool2d_with_index", op::pool2d}, + {"mul", op::mul}, + {"nearest_interp_v2", op::nearest_interp_v2}, + {"nearest_interp", op::nearest_interp_v2}, + {"pad3d", op::pad3d}, + {"pool2d", op::pool2d}, + {"range", op::range}, + {"relu", op::relu}, + {"reshape2", op::reshape2}, + {"rnn", op::rnn}, + {"scale", op::scale}, + {"shape", op::shape}, + {"slice", op::slice}, + {"softmax", op::softmax}, + {"split", op::split}, + {"squeeze2", op::squeeze}, + {"transpose2", op::transpose2}, + {"unsqueeze2", op::unsqueeze}, + {"yolo_box", op::yolo_box}}; + }; + + } // namespace pdpd + } // namespace frontend +} // namespace ngraph diff --git a/ngraph/frontend/paddlepaddle/src/op_table.hpp b/ngraph/frontend/paddlepaddle/src/op_table.hpp new file mode 100644 index 00000000000000..e9d6fee2172b3d --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/op_table.hpp @@ -0,0 +1,27 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include + +#include + +#include "node_context.hpp" + +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + using CreatorFunction = std::function; + + std::map get_supported_ops(); + + } // namespace pdpd + } // namespace frontend +} // namespace ngraph diff --git a/ngraph/frontend/paddlepaddle/src/place.cpp b/ngraph/frontend/paddlepaddle/src/place.cpp new file mode 100644 index 00000000000000..15c74b7fcc8e8f --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/place.cpp @@ -0,0 +1,115 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include "decoder.hpp" +#include "framework.pb.h" + +using namespace ngraph; +using namespace frontend; + +bool PlacePDPD::is_input() const +{ + const auto& model_ins = m_input_model.get_inputs(); + + const auto cmp = [this](const Place::Ptr& p) { return p.get() == this; }; + return std::find_if(model_ins.begin(), model_ins.end(), cmp) != model_ins.end(); +} + +bool PlacePDPD::is_output() const +{ + const auto& model_outs = m_input_model.get_outputs(); + const auto cmp = [this](const Place::Ptr& p) { return p.get() == this; }; + return std::find_if(model_outs.begin(), model_outs.end(), cmp) != model_outs.end(); +} + +OpPlacePDPD::OpPlacePDPD(const InputModel& input_model, + const std::vector& names, + const std::shared_ptr& op_desc) + : PlacePDPD(input_model, names) + , m_op_desc(op_desc) +{ +} + +OpPlacePDPD::OpPlacePDPD(const InputModel& input_model, + const std::shared_ptr& op_desc) + : OpPlacePDPD(input_model, {}, op_desc) +{ +} + +TensorPlacePDPD::TensorPlacePDPD(const InputModel& input_model, + const std::vector& names, + const std::shared_ptr& var_desc) + : PlacePDPD(input_model, names) + , m_var_desc(var_desc) +{ + const auto& var_type = var_desc->type(); + if (var_type.type() == paddle::framework::proto::VarType::LOD_TENSOR) + { + const auto& tensor_desc = var_type.lod_tensor().tensor(); + m_type = TYPE_MAP[tensor_desc.data_type()]; + m_pshape = PartialShape( + std::vector(tensor_desc.dims().begin(), tensor_desc.dims().end())); + } +} + +TensorPlacePDPD::TensorPlacePDPD(const InputModel& input_model, + const std::shared_ptr& var_desc) + : TensorPlacePDPD(input_model, {var_desc->name()}, var_desc) +{ +} + +std::vector TensorPlacePDPD::get_consuming_ports() const +{ + std::vector consuming_ports; + for (const auto& consuming_port : m_consuming_ports) + { + if (const auto& locked = consuming_port.lock()) + { + consuming_ports.push_back(locked); + } + else + { + FRONT_END_THROW("Consuming Port has expired."); + } + } + return consuming_ports; +} + +Place::Ptr TensorPlacePDPD::get_producing_port() const +{ + FRONT_END_GENERAL_CHECK(m_producing_ports.size() > 1, "Only one producing port is supported."); + if (const auto& producing_port = m_producing_ports[0].lock()) + { + return producing_port; + } + FRONT_END_THROW("Producing Port has expired."); +} + +std::shared_ptr InPortPlacePDPD::getSourceTensorPDPD() const +{ + if (const auto& tensor = m_source_tensor.lock()) + { + return tensor; + } + FRONT_END_THROW("Source Tensor has expired."); +} + +std::shared_ptr InPortPlacePDPD::getOp() +{ + if (const auto& op = m_op.lock()) + { + return op; + } + FRONT_END_THROW("Operation has expired."); +} + +std::shared_ptr OutPortPlacePDPD::getTargetTensorPDPD() const +{ + if (const auto& target_tensor = m_target_tensor.lock()) + { + return target_tensor; + } + FRONT_END_THROW("Target Tensor has expired."); +} diff --git a/ngraph/frontend/paddlepaddle/src/proto/framework.proto b/ngraph/frontend/paddlepaddle/src/proto/framework.proto new file mode 100644 index 00000000000000..baaecb55d06ee3 --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/proto/framework.proto @@ -0,0 +1,205 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +syntax = "proto2"; +package paddle.framework.proto; + +// Any incompatible changes to ProgramDesc and its dependencies should +// raise the version defined version.h. +// +// Serailization and Deserialization codes should be modified in a way +// that supports old versions following the version and compatibility policy. +message Version { optional int64 version = 1 [ default = 0 ]; } + +enum AttrType { + INT = 0; + FLOAT = 1; + STRING = 2; + INTS = 3; + FLOATS = 4; + STRINGS = 5; + BOOLEAN = 6; + BOOLEANS = 7; + BLOCK = 8; + LONG = 9; + BLOCKS = 10; + LONGS = 11; +} + +// OpDesc describes an instance of a C++ framework::OperatorBase +// derived class type. +message OpDesc { + + message Attr { + required string name = 1; + required AttrType type = 2; + optional int32 i = 3; + optional float f = 4; + optional string s = 5; + repeated int32 ints = 6; + repeated float floats = 7; + repeated string strings = 8; + optional bool b = 10; + repeated bool bools = 11; + optional int32 block_idx = 12; + optional int64 l = 13; + repeated int32 blocks_idx = 14; + repeated int64 longs = 15; + }; + + message Var { + required string parameter = 1; + repeated string arguments = 2; + }; + + required string type = 3; + repeated Var inputs = 1; + repeated Var outputs = 2; + repeated Attr attrs = 4; + optional bool is_target = 5 [ default = false ]; +}; + +// OpProto describes a C++ framework::OperatorBase derived class. +message OpProto { + + // VarProto describes the C++ type framework::Variable. + message Var { + required string name = 1; + required string comment = 2; + + optional bool duplicable = 3 [ default = false ]; + optional bool intermediate = 4 [ default = false ]; + optional bool dispensable = 5 [ default = false ]; + } + + // AttrProto describes the C++ type Attribute. + message Attr { + required string name = 1; + required AttrType type = 2; + required string comment = 3; + // If that attribute is generated, it means the Paddle third + // language binding has responsibility to fill that + // attribute. End-User should not set that attribute. + optional bool generated = 4 [ default = false ]; + } + + required string type = 1; + repeated Var inputs = 2; + repeated Var outputs = 3; + repeated Attr attrs = 4; + required string comment = 5; +} + +message VarType { + enum Type { + // Pod Types + BOOL = 0; + INT16 = 1; + INT32 = 2; + INT64 = 3; + FP16 = 4; + FP32 = 5; + FP64 = 6; + // Tensor is used in C++. + SIZE_T = 19; + UINT8 = 20; + INT8 = 21; + BF16 = 22; + COMPLEX64 = 23; + COMPLEX128 = 24; + + // Other types that may need additional descriptions + LOD_TENSOR = 7; + SELECTED_ROWS = 8; + FEED_MINIBATCH = 9; + FETCH_LIST = 10; + STEP_SCOPES = 11; + LOD_RANK_TABLE = 12; + LOD_TENSOR_ARRAY = 13; + PLACE_LIST = 14; + READER = 15; + // Any runtime decided variable type is raw + // raw variables should manage their own allocations + // in operators like nccl_op + RAW = 17; + TUPLE = 18; + } + + required Type type = 1; + + message TensorDesc { + // Should only be PODType. Is enforced in C++ + required Type data_type = 1; + repeated int64 dims = 2; // [UNK, 640, 480] is saved as [-1, 640, 480] + } + optional TensorDesc selected_rows = 2; + + message LoDTensorDesc { + required TensorDesc tensor = 1; + optional int32 lod_level = 2 [ default = 0 ]; + } + optional LoDTensorDesc lod_tensor = 3; + + message LoDTensorArrayDesc { + required TensorDesc tensor = 1; + optional int32 lod_level = 2 [ default = 0 ]; + } + optional LoDTensorArrayDesc tensor_array = 4; + + message ReaderDesc { repeated LoDTensorDesc lod_tensor = 1; } + optional ReaderDesc reader = 5; + + message Tuple { repeated Type element_type = 1; } + optional Tuple tuple = 7; +} + +message VarDesc { + required string name = 1; + required VarType type = 2; + optional bool persistable = 3 [ default = false ]; + // True if the variable is an input data and + // have to check the feed data shape and dtype + optional bool need_check_feed = 4 [ default = false ]; +} + +message BlockDesc { + required int32 idx = 1; + required int32 parent_idx = 2; + repeated VarDesc vars = 3; + repeated OpDesc ops = 4; + optional int32 forward_block_idx = 5 [ default = -1 ]; +} + +// In some cases, Paddle may perform operator definition iterations, +// and the operator uses OpVersionMap for compatibility testing. +message OpVersion { required int32 version = 1; } +message OpVersionMap { + message OpVersionPair { + required string op_name = 1; + required OpVersion op_version = 2; + } + repeated OpVersionPair pair = 1; +} + +// Please refer to +// https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/program.md +// for more details. +// TODO(panyx0718): A model can have multiple programs. Need a +// way to distinguish them. Maybe ID or name? +message ProgramDesc { + reserved 2, 3; // For backward compatibility. + repeated BlockDesc blocks = 1; + optional Version version = 4; + optional OpVersionMap op_version_map = 5; +} diff --git a/ngraph/python/CMakeLists.txt b/ngraph/python/CMakeLists.txt index cacaa1b669af73..d51d836f57e249 100644 --- a/ngraph/python/CMakeLists.txt +++ b/ngraph/python/CMakeLists.txt @@ -39,6 +39,10 @@ else() endif() if(OpenVINO_MAIN_SOURCE_DIR) + set(CMAKE_LIBRARY_OUTPUT_DIRECTORY_OLD ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}) + set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY_OLD ${CMAKE_ARCHIVE_OUTPUT_DIRECTORY}) + set(CMAKE_COMPILE_PDB_OUTPUT_DIRECTORY_OLD ${CMAKE_COMPILE_PDB_OUTPUT_DIRECTORY}) + set(CMAKE_PDB_OUTPUT_DIRECTORY_OLD ${CMAKE_PDB_OUTPUT_DIRECTORY}) if(WIN32) set(PYTHON_BRIDGE_OUTPUT_DIRECTORY ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/$/python_api/${PYTHON_VERSION}/) else() @@ -78,11 +82,35 @@ file(GLOB_RECURSE SOURCES src/pyngraph/*.cpp) pybind11_add_module(_${PROJECT_NAME} MODULE ${SOURCES}) target_include_directories(_${PROJECT_NAME} PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/src") -target_link_libraries(_${PROJECT_NAME} PRIVATE ngraph::ngraph) +target_include_directories(_${PROJECT_NAME} PRIVATE "${FRONTEND_INCLUDE_PATH}") + +if (NGRAPH_ONNX_IMPORT_ENABLE) + target_link_libraries(_${PROJECT_NAME} PRIVATE ngraph::onnx_importer) +endif() + +target_link_libraries(_${PROJECT_NAME} PRIVATE ngraph::ngraph ngraph::frontend_manager) + if (TARGET ngraph::onnx_importer) add_dependencies(_${PROJECT_NAME} ngraph::onnx_importer) endif() +if(NGRAPH_UNIT_TEST_ENABLE) + set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_LIBRARY_OUTPUT_DIRECTORY_OLD}) + set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_ARCHIVE_OUTPUT_DIRECTORY_OLD}) + set(CMAKE_COMPILE_PDB_OUTPUT_DIRECTORY ${CMAKE_COMPILE_PDB_OUTPUT_DIRECTORY_OLD}) + set(CMAKE_PDB_OUTPUT_DIRECTORY ${CMAKE_PDB_OUTPUT_DIRECTORY_OLD}) + add_subdirectory(mock_py_frontend/mock_py_ngraph_frontend) + add_dependencies(_${PROJECT_NAME} mock_py_ngraph_frontend) + + set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${PYTHON_BRIDGE_OUTPUT_DIRECTORY}) + set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${PYTHON_BRIDGE_OUTPUT_DIRECTORY}) + set(CMAKE_COMPILE_PDB_OUTPUT_DIRECTORY ${PYTHON_BRIDGE_OUTPUT_DIRECTORY}) + set(CMAKE_PDB_OUTPUT_DIRECTORY ${PYTHON_BRIDGE_OUTPUT_DIRECTORY}) + add_subdirectory(mock_py_frontend/pyngraph_fe_mock_api) + add_dependencies(_${PROJECT_NAME} pybind_mock_frontend) + +endif() + # perform copy if(OpenVINO_MAIN_SOURCE_DIR) add_custom_command(TARGET _${PROJECT_NAME} diff --git a/ngraph/python/mock_py_frontend/mock_py_ngraph_frontend/CMakeLists.txt b/ngraph/python/mock_py_frontend/mock_py_ngraph_frontend/CMakeLists.txt new file mode 100644 index 00000000000000..993d67279e827b --- /dev/null +++ b/ngraph/python/mock_py_frontend/mock_py_ngraph_frontend/CMakeLists.txt @@ -0,0 +1,28 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +set(TARGET_FE_NAME "mock_py_ngraph_frontend") + +file(GLOB_RECURSE LIBRARY_SRC ${CMAKE_CURRENT_SOURCE_DIR}/*.cpp) +file(GLOB_RECURSE LIBRARY_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/*.hpp) + +set(FRONTEND_INCLUDE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/include) + +source_group("src" FILES ${LIBRARY_SRC}) +source_group("include" FILES ${LIBRARY_HEADERS}) + +# Create shared library +add_library(${TARGET_FE_NAME} SHARED ${LIBRARY_SRC} ${LIBRARY_HEADERS}) + +# add_library(${TARGET_FE_NAME} SHARED ${SRC}) + +# target_compile_definitions(${TARGET_FE_NAME} PRIVATE) + +target_include_directories(${TARGET_FE_NAME} PRIVATE ".") + +target_include_directories(${TARGET_FE_NAME} PRIVATE ${FRONTEND_INCLUDE_PATH} ${NGRAPH_INCLUDE_PATH}) +target_link_libraries(${TARGET_FE_NAME} PRIVATE frontend_manager) +target_link_libraries(${TARGET_FE_NAME} PUBLIC ngraph PRIVATE ngraph::builder) + +add_clang_format_target(${TARGET_FE_NAME}_clang FOR_TARGETS ${TARGET_FE_NAME}) diff --git a/ngraph/python/mock_py_frontend/mock_py_ngraph_frontend/mock_py_frontend.cpp b/ngraph/python/mock_py_frontend/mock_py_ngraph_frontend/mock_py_frontend.cpp new file mode 100644 index 00000000000000..88e49bb68d4a7a --- /dev/null +++ b/ngraph/python/mock_py_frontend/mock_py_ngraph_frontend/mock_py_frontend.cpp @@ -0,0 +1,25 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "mock_py_frontend.hpp" +#include "frontend_manager/frontend_manager.hpp" +#include "frontend_manager/frontend_manager_defs.hpp" +#include "ngraph/visibility.hpp" + +using namespace ngraph; +using namespace ngraph::frontend; + +extern "C" MOCK_API FrontEndVersion GetAPIVersion() +{ + return OV_FRONTEND_API_VERSION; +} + +extern "C" MOCK_API void* GetFrontEndData() +{ + FrontEndPluginInfo* res = new FrontEndPluginInfo(); + res->m_name = "mock_py"; + res->m_creator = [](FrontEndCapFlags flags) { return std::make_shared(flags); }; + + return res; +} \ No newline at end of file diff --git a/ngraph/python/mock_py_frontend/mock_py_ngraph_frontend/mock_py_frontend.hpp b/ngraph/python/mock_py_frontend/mock_py_ngraph_frontend/mock_py_frontend.hpp new file mode 100644 index 00000000000000..78b9c1283621ce --- /dev/null +++ b/ngraph/python/mock_py_frontend/mock_py_ngraph_frontend/mock_py_frontend.hpp @@ -0,0 +1,484 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "frontend_manager/frontend_manager.hpp" +#include "frontend_manager/frontend_manager_defs.hpp" +#include "ngraph/visibility.hpp" + +// Defined if we are building the plugin DLL (instead of using it) +#ifdef mock_py_ngraph_frontend_EXPORTS +#define MOCK_API NGRAPH_HELPER_DLL_EXPORT +#else +#define MOCK_API NGRAPH_HELPER_DLL_IMPORT +#endif // mock1_ngraph_frontend_EXPORTS + +// OK to have 'using' in mock header + +using namespace ngraph; +using namespace ngraph::frontend; + +//////////////////////////////// + +struct MOCK_API PlaceStat +{ + int m_get_names = 0; + int m_get_consuming_operations = 0; + int m_get_target_tensor = 0; + int m_get_producing_operation = 0; + int m_get_producing_port = 0; + int m_get_input_port = 0; + int m_get_output_port = 0; + int m_get_consuming_ports = 0; + int m_is_input = 0; + int m_is_output = 0; + int m_is_equal = 0; + int m_is_equal_data = 0; + int m_get_source_tensor = 0; + + // Arguments tracking + std::string m_lastArgString; + int m_lastArgInt; + Place::Ptr m_lastArgPlace = nullptr; + + // Getters + int get_names() const { return m_get_names; } + int get_consuming_operations() const { return m_get_consuming_operations; } + int get_target_tensor() const { return m_get_target_tensor; } + int get_producing_operation() const { return m_get_producing_operation; } + int get_producing_port() const { return m_get_producing_port; } + int get_input_port() const { return m_get_input_port; } + int get_output_port() const { return m_get_output_port; } + int get_consuming_ports() const { return m_get_consuming_ports; } + int is_input() const { return m_is_input; } + int is_output() const { return m_is_output; } + int is_equal() const { return m_is_equal; } + int is_equal_data() const { return m_is_equal_data; } + int get_source_tensor() const { return m_get_source_tensor; } + + // Arguments getters + std::string get_lastArgString() const { return m_lastArgString; } + int get_lastArgInt() const { return m_lastArgInt; } + Place::Ptr get_lastArgPlace() const { return m_lastArgPlace; } +}; + +class MOCK_API PlaceMockPy : public Place +{ + mutable PlaceStat m_stat; + +public: + std::vector get_names() const override + { + m_stat.m_get_names++; + return {}; + } + + std::vector get_consuming_operations(int outputPortIndex) const override + { + m_stat.m_get_consuming_operations++; + m_stat.m_lastArgInt = outputPortIndex; + return {std::make_shared()}; + } + + Place::Ptr get_target_tensor(int outputPortIndex) const override + { + m_stat.m_get_target_tensor++; + m_stat.m_lastArgInt = outputPortIndex; + return std::make_shared(); + } + + Place::Ptr get_producing_operation(int inputPortIndex) const override + { + m_stat.m_get_producing_operation++; + m_stat.m_lastArgInt = inputPortIndex; + return std::make_shared(); + } + + Place::Ptr get_producing_port() const override + { + m_stat.m_get_producing_port++; + return std::make_shared(); + } + + Place::Ptr get_input_port(int inputPortIndex) const override + { + m_stat.m_get_input_port++; + m_stat.m_lastArgInt = inputPortIndex; + return std::make_shared(); + } + + Place::Ptr get_input_port(const std::string& inputName, int inputPortIndex) const override + { + m_stat.m_get_input_port++; + m_stat.m_lastArgInt = inputPortIndex; + m_stat.m_lastArgString = inputName; + return std::make_shared(); + } + + Place::Ptr get_output_port(int outputPortIndex) const override + { + m_stat.m_get_output_port++; + m_stat.m_lastArgInt = outputPortIndex; + return std::make_shared(); + } + + Place::Ptr get_output_port(const std::string& outputName, int outputPortIndex) const override + { + m_stat.m_get_output_port++; + m_stat.m_lastArgInt = outputPortIndex; + m_stat.m_lastArgString = outputName; + return std::make_shared(); + } + + std::vector get_consuming_ports() const override + { + m_stat.m_get_consuming_ports++; + return {std::make_shared()}; + } + + bool is_input() const override + { + m_stat.m_is_input++; + return false; + } + + bool is_output() const override + { + m_stat.m_is_output++; + return false; + } + + bool is_equal(Ptr another) const override + { + m_stat.m_is_equal++; + m_stat.m_lastArgPlace = another; + return false; + } + + bool is_equal_data(Ptr another) const override + { + m_stat.m_is_equal_data++; + m_stat.m_lastArgPlace = another; + return false; + } + + Place::Ptr get_source_tensor(int inputPortIndex) const override + { + m_stat.m_get_source_tensor++; + m_stat.m_lastArgInt = inputPortIndex; + return {std::make_shared()}; + } + + //---------------Stat-------------------- + PlaceStat get_stat() const { return m_stat; } +}; + +//////////////////////////////// + +struct MOCK_API ModelStat +{ + int m_get_inputs = 0; + int m_get_outputs = 0; + int m_get_place_by_tensor_name = 0; + int m_get_place_by_operation_name = 0; + int m_get_place_by_operation_and_input_port = 0; + int m_get_place_by_operation_and_output_port = 0; + int m_set_name_for_tensor = 0; + int m_add_name_for_tensor = 0; + int m_set_name_for_operation = 0; + int m_free_name_for_tensor = 0; + int m_free_name_for_operation = 0; + int m_set_name_for_dimension = 0; + int m_cut_and_add_new_input = 0; + int m_cut_and_add_new_output = 0; + int m_add_output = 0; + int m_remove_output = 0; + int m_set_partial_shape = 0; + int m_get_partial_shape = 0; + int m_set_element_type = 0; + + int m_extract_subgraph = 0; + int m_override_all_inputs = 0; + int m_override_all_outputs = 0; + + // Arguments tracking + std::string m_lastArgString; + int m_lastArgInt; + Place::Ptr m_lastArgPlace = nullptr; + std::vector m_lastArgInputPlaces; + std::vector m_lastArgOutputPlaces; + ngraph::element::Type m_lastArgElementType; + ngraph::PartialShape m_lastArgPartialShape; + + // Getters + int get_inputs() const { return m_get_inputs; } + int get_outputs() const { return m_get_outputs; } + int extract_subgraph() const { return m_extract_subgraph; } + int override_all_inputs() const { return m_override_all_inputs; } + int override_all_outputs() const { return m_override_all_outputs; } + int get_place_by_tensor_name() const { return m_get_place_by_tensor_name; } + int get_place_by_operation_name() const { return m_get_place_by_operation_name; } + int get_place_by_operation_and_input_port() const + { + return m_get_place_by_operation_and_input_port; + } + int get_place_by_operation_and_output_port() const + { + return m_get_place_by_operation_and_output_port; + } + int set_name_for_tensor() const { return m_set_name_for_tensor; } + int add_name_for_tensor() const { return m_add_name_for_tensor; } + int set_name_for_operation() const { return m_set_name_for_operation; } + int free_name_for_tensor() const { return m_free_name_for_tensor; } + int free_name_for_operation() const { return m_free_name_for_operation; } + int set_name_for_dimension() const { return m_set_name_for_dimension; } + int cut_and_add_new_input() const { return m_cut_and_add_new_input; } + int cut_and_add_new_output() const { return m_cut_and_add_new_output; } + int add_output() const { return m_add_output; } + int remove_output() const { return m_remove_output; } + int set_partial_shape() const { return m_set_partial_shape; } + int get_partial_shape() const { return m_get_partial_shape; } + int set_element_type() const { return m_set_element_type; } + + // Arguments getters + std::string get_lastArgString() const { return m_lastArgString; } + int get_lastArgInt() const { return m_lastArgInt; } + Place::Ptr get_lastArgPlace() const { return m_lastArgPlace; } + std::vector get_lastArgInputPlaces() const { return m_lastArgInputPlaces; } + std::vector get_lastArgOutputPlaces() const { return m_lastArgOutputPlaces; } + ngraph::element::Type get_lastArgElementType() const { return m_lastArgElementType; } + ngraph::PartialShape get_lastArgPartialShape() const { return m_lastArgPartialShape; } +}; + +class MOCK_API InputModelMockPy : public InputModel +{ + mutable ModelStat m_stat; + +public: + std::vector get_inputs() const override + { + m_stat.m_get_inputs++; + return {std::make_shared()}; + } + + std::vector get_outputs() const override + { + m_stat.m_get_outputs++; + return {std::make_shared()}; + } + + Place::Ptr get_place_by_tensor_name(const std::string& tensorName) const override + { + m_stat.m_get_place_by_tensor_name++; + m_stat.m_lastArgString = tensorName; + return std::make_shared(); + } + + Place::Ptr get_place_by_operation_name(const std::string& operationName) override + { + m_stat.m_get_place_by_operation_name++; + m_stat.m_lastArgString = operationName; + return std::make_shared(); + } + + Place::Ptr get_place_by_operation_and_input_port(const std::string& operationName, + int inputPortIndex) override + { + m_stat.m_get_place_by_operation_and_input_port++; + m_stat.m_lastArgInt = inputPortIndex; + m_stat.m_lastArgString = operationName; + return std::make_shared(); + } + + Place::Ptr get_place_by_operation_and_output_port(const std::string& operationName, + int outputPortIndex) override + { + m_stat.m_get_place_by_operation_and_output_port++; + m_stat.m_lastArgInt = outputPortIndex; + m_stat.m_lastArgString = operationName; + return std::make_shared(); + } + + void set_name_for_tensor(Place::Ptr tensor, const std::string& newName) override + { + m_stat.m_set_name_for_tensor++; + m_stat.m_lastArgPlace = tensor; + m_stat.m_lastArgString = newName; + } + + void add_name_for_tensor(Place::Ptr tensor, const std::string& newName) override + { + m_stat.m_add_name_for_tensor++; + m_stat.m_lastArgPlace = tensor; + m_stat.m_lastArgString = newName; + } + + void set_name_for_operation(Place::Ptr operation, const std::string& newName) override + { + m_stat.m_set_name_for_operation++; + m_stat.m_lastArgPlace = operation; + m_stat.m_lastArgString = newName; + } + + void free_name_for_tensor(const std::string& name) override + { + m_stat.m_free_name_for_tensor++; + m_stat.m_lastArgString = name; + } + + void free_name_for_operation(const std::string& name) override + { + m_stat.m_free_name_for_operation++; + m_stat.m_lastArgString = name; + } + + void set_name_for_dimension(Place::Ptr place, + size_t shapeDimIndex, + const std::string& dimName) override + { + m_stat.m_set_name_for_dimension++; + m_stat.m_lastArgPlace = place; + m_stat.m_lastArgInt = static_cast(shapeDimIndex); + m_stat.m_lastArgString = dimName; + } + + void cut_and_add_new_input(Place::Ptr place, const std::string& newNameOptional) override + { + m_stat.m_cut_and_add_new_input++; + m_stat.m_lastArgPlace = place; + m_stat.m_lastArgString = newNameOptional; + } + + void cut_and_add_new_output(Place::Ptr place, const std::string& newNameOptional) override + { + m_stat.m_cut_and_add_new_output++; + m_stat.m_lastArgPlace = place; + m_stat.m_lastArgString = newNameOptional; + } + + Place::Ptr add_output(Place::Ptr place) override + { + m_stat.m_add_output++; + m_stat.m_lastArgPlace = place; + return std::make_shared(); + } + + void remove_output(Place::Ptr place) override + { + m_stat.m_remove_output++; + m_stat.m_lastArgPlace = place; + } + + void override_all_outputs(const std::vector& outputs) override + { + m_stat.m_override_all_outputs++; + m_stat.m_lastArgOutputPlaces = outputs; + } + + void override_all_inputs(const std::vector& inputs) override + { + m_stat.m_override_all_inputs++; + m_stat.m_lastArgInputPlaces = inputs; + } + + void extract_subgraph(const std::vector& inputs, + const std::vector& outputs) override + { + m_stat.m_extract_subgraph++; + m_stat.m_lastArgInputPlaces = inputs; + m_stat.m_lastArgOutputPlaces = outputs; + } + + // Setting tensor properties + void set_partial_shape(Place::Ptr place, const ngraph::PartialShape& shape) override + { + m_stat.m_set_partial_shape++; + m_stat.m_lastArgPlace = place; + m_stat.m_lastArgPartialShape = shape; + } + + ngraph::PartialShape get_partial_shape(Place::Ptr place) const override + { + m_stat.m_get_partial_shape++; + m_stat.m_lastArgPlace = place; + return {}; + } + + void set_element_type(Place::Ptr place, const ngraph::element::Type& type) override + { + m_stat.m_set_element_type++; + m_stat.m_lastArgPlace = place; + m_stat.m_lastArgElementType = type; + } + + //---------------Stat-------------------- + ModelStat get_stat() const { return m_stat; } +}; + +///////////////////////////////////////////////////////// + +struct MOCK_API FeStat +{ + FrontEndCapFlags m_load_flags; + std::vector m_load_paths; + int m_convert_model = 0; + int m_convert = 0; + int m_convert_partially = 0; + int m_decode = 0; + int m_normalize = 0; + // Getters + FrontEndCapFlags load_flags() const { return m_load_flags; } + std::vector load_paths() const { return m_load_paths; } + int convert_model() const { return m_convert_model; } + int convert() const { return m_convert; } + int convert_partially() const { return m_convert_partially; } + int decode() const { return m_decode; } + int normalize() const { return m_normalize; } +}; + +class MOCK_API FrontEndMockPy : public FrontEnd +{ + mutable FeStat m_stat; + +public: + FrontEndMockPy(FrontEndCapFlags flags) { m_stat.m_load_flags = flags; } + + InputModel::Ptr load_from_file(const std::string& path) const override + { + m_stat.m_load_paths.push_back(path); + return std::make_shared(); + } + + std::shared_ptr convert(InputModel::Ptr model) const override + { + m_stat.m_convert_model++; + return std::make_shared(NodeVector{}, ParameterVector{}); + } + + std::shared_ptr convert(std::shared_ptr func) const override + { + m_stat.m_convert++; + return func; + } + + std::shared_ptr convert_partially(InputModel::Ptr model) const override + { + m_stat.m_convert_partially++; + return std::make_shared(NodeVector{}, ParameterVector{}); + } + + std::shared_ptr decode(InputModel::Ptr model) const override + { + m_stat.m_decode++; + return std::make_shared(NodeVector{}, ParameterVector{}); + } + + void normalize(std::shared_ptr function) const override + { + m_stat.m_normalize++; + } + + FeStat get_stat() const { return m_stat; } +}; diff --git a/ngraph/python/mock_py_frontend/pyngraph_fe_mock_api/CMakeLists.txt b/ngraph/python/mock_py_frontend/pyngraph_fe_mock_api/CMakeLists.txt new file mode 100644 index 00000000000000..7d2e4a3077acc0 --- /dev/null +++ b/ngraph/python/mock_py_frontend/pyngraph_fe_mock_api/CMakeLists.txt @@ -0,0 +1,19 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +set(TARGET_FE_NAME "mock_py_ngraph_frontend") +set(PYBIND_FE_NAME "pybind_mock_frontend") + +set(PYBIND_FE_SRC ${CMAKE_CURRENT_SOURCE_DIR}/pyngraph_mock_frontend_api.cpp) + +source_group("src" FILES ${PYBIND_FE_SRC}) + +pybind11_add_module(${PYBIND_FE_NAME} MODULE ${PYBIND_FE_SRC}) + +target_link_libraries(${PYBIND_FE_NAME} PRIVATE ngraph::ngraph ngraph::frontend_manager) +target_link_libraries(${PYBIND_FE_NAME} PRIVATE ${TARGET_FE_NAME}) + +add_dependencies(${PYBIND_FE_NAME} ${TARGET_FE_NAME}) + +add_clang_format_target(${PYBIND_FE_NAME}_clang FOR_TARGETS ${PYBIND_FE_NAME}) diff --git a/ngraph/python/mock_py_frontend/pyngraph_fe_mock_api/pyngraph_mock_frontend_api.cpp b/ngraph/python/mock_py_frontend/pyngraph_fe_mock_api/pyngraph_mock_frontend_api.cpp new file mode 100644 index 00000000000000..ec87842d417330 --- /dev/null +++ b/ngraph/python/mock_py_frontend/pyngraph_fe_mock_api/pyngraph_mock_frontend_api.cpp @@ -0,0 +1,136 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include + +#include "../mock_py_ngraph_frontend/mock_py_frontend.hpp" + +namespace py = pybind11; +using namespace ngraph; +using namespace ngraph::frontend; + +static void register_mock_frontend_stat(py::module m) +{ + m.def( + "get_fe_stat", + [](const std::shared_ptr& fe) { + std::shared_ptr ptr = std::dynamic_pointer_cast(fe); + if (ptr) + { + auto stat = ptr->get_stat(); + return stat; + } + return FeStat(); + }, + py::arg("frontend")); + + py::class_ feStat(m, "FeStat", py::dynamic_attr()); + feStat.def_property_readonly("load_flags", &FeStat::load_flags); + feStat.def_property_readonly("load_paths", &FeStat::load_paths); + feStat.def_property_readonly("convert_model", &FeStat::convert_model); + feStat.def_property_readonly("convert", &FeStat::convert); + feStat.def_property_readonly("convert_partially", &FeStat::convert_partially); + feStat.def_property_readonly("decode", &FeStat::decode); + feStat.def_property_readonly("normalize", &FeStat::normalize); +} + +static void register_mock_model_stat(py::module m) +{ + m.def( + "get_mdl_stat", + [](const std::shared_ptr& mdl) { + std::shared_ptr ptr = + std::dynamic_pointer_cast(mdl); + if (ptr) + { + auto stat = ptr->get_stat(); + return stat; + } + return ModelStat(); + }, + py::arg("model")); + + py::class_ mdlStat(m, "ModelStat", py::dynamic_attr()); + mdlStat.def_property_readonly("get_inputs", &ModelStat::get_inputs); + mdlStat.def_property_readonly("get_outputs", &ModelStat::get_outputs); + mdlStat.def_property_readonly("get_place_by_tensor_name", &ModelStat::get_place_by_tensor_name); + mdlStat.def_property_readonly("get_place_by_operation_name", + &ModelStat::get_place_by_operation_name); + mdlStat.def_property_readonly("get_place_by_operation_and_input_port", + &ModelStat::get_place_by_operation_and_input_port); + mdlStat.def_property_readonly("get_place_by_operation_and_output_port", + &ModelStat::get_place_by_operation_and_output_port); + + mdlStat.def_property_readonly("set_name_for_tensor", &ModelStat::set_name_for_tensor); + mdlStat.def_property_readonly("add_name_for_tensor", &ModelStat::add_name_for_tensor); + mdlStat.def_property_readonly("set_name_for_operation", &ModelStat::set_name_for_operation); + mdlStat.def_property_readonly("free_name_for_tensor", &ModelStat::free_name_for_tensor); + mdlStat.def_property_readonly("free_name_for_operation", &ModelStat::free_name_for_operation); + mdlStat.def_property_readonly("set_name_for_dimension", &ModelStat::set_name_for_dimension); + mdlStat.def_property_readonly("cut_and_add_new_input", &ModelStat::cut_and_add_new_input); + mdlStat.def_property_readonly("cut_and_add_new_output", &ModelStat::cut_and_add_new_output); + mdlStat.def_property_readonly("add_output", &ModelStat::add_output); + mdlStat.def_property_readonly("remove_output", &ModelStat::remove_output); + mdlStat.def_property_readonly("set_partial_shape", &ModelStat::set_partial_shape); + mdlStat.def_property_readonly("get_partial_shape", &ModelStat::get_partial_shape); + mdlStat.def_property_readonly("set_element_type", &ModelStat::set_element_type); + mdlStat.def_property_readonly("extract_subgraph", &ModelStat::extract_subgraph); + mdlStat.def_property_readonly("override_all_inputs", &ModelStat::override_all_inputs); + mdlStat.def_property_readonly("override_all_outputs", &ModelStat::override_all_outputs); + + // Arguments tracking + mdlStat.def_property_readonly("lastArgString", &ModelStat::get_lastArgString); + mdlStat.def_property_readonly("lastArgInt", &ModelStat::get_lastArgInt); + mdlStat.def_property_readonly("lastArgPlace", &ModelStat::get_lastArgPlace); + mdlStat.def_property_readonly("lastArgInputPlaces", &ModelStat::get_lastArgInputPlaces); + mdlStat.def_property_readonly("lastArgOutputPlaces", &ModelStat::get_lastArgOutputPlaces); + mdlStat.def_property_readonly("lastArgElementType", &ModelStat::get_lastArgElementType); + mdlStat.def_property_readonly("lastArgPartialShape", &ModelStat::get_lastArgPartialShape); +} + +static void register_mock_place_stat(py::module m) +{ + m.def( + "get_place_stat", + [](const Place::Ptr& fe) { + std::shared_ptr ptr = std::dynamic_pointer_cast(fe); + if (ptr) + { + auto stat = ptr->get_stat(); + return stat; + } + return PlaceStat(); + }, + py::arg("place")); + + py::class_ placeStat(m, "PlaceStat", py::dynamic_attr()); + + placeStat.def_property_readonly("lastArgString", &PlaceStat::get_lastArgString); + placeStat.def_property_readonly("lastArgInt", &PlaceStat::get_lastArgInt); + placeStat.def_property_readonly("lastArgPlace", &PlaceStat::get_lastArgPlace); + + placeStat.def_property_readonly("get_names", &PlaceStat::get_names); + placeStat.def_property_readonly("get_consuming_operations", + &PlaceStat::get_consuming_operations); + placeStat.def_property_readonly("get_target_tensor", &PlaceStat::get_target_tensor); + placeStat.def_property_readonly("get_producing_operation", &PlaceStat::get_producing_operation); + placeStat.def_property_readonly("get_producing_port", &PlaceStat::get_producing_port); + placeStat.def_property_readonly("get_input_port", &PlaceStat::get_input_port); + placeStat.def_property_readonly("get_output_port", &PlaceStat::get_output_port); + placeStat.def_property_readonly("get_consuming_ports", &PlaceStat::get_consuming_ports); + placeStat.def_property_readonly("is_input", &PlaceStat::is_input); + placeStat.def_property_readonly("is_output", &PlaceStat::is_output); + placeStat.def_property_readonly("is_equal", &PlaceStat::is_equal); + placeStat.def_property_readonly("is_equal_data", &PlaceStat::is_equal_data); + placeStat.def_property_readonly("get_source_tensor", &PlaceStat::get_source_tensor); +} + +PYBIND11_MODULE(pybind_mock_frontend, m) +{ + m.doc() = "Mock frontend call counters for testing Pyngraph frontend bindings"; + register_mock_frontend_stat(m); + register_mock_model_stat(m); + register_mock_place_stat(m); +} diff --git a/ngraph/python/src/ngraph/__init__.py b/ngraph/python/src/ngraph/__init__.py index 0b276049d33ea8..518c12c94bc262 100644 --- a/ngraph/python/src/ngraph/__init__.py +++ b/ngraph/python/src/ngraph/__init__.py @@ -12,7 +12,21 @@ __version__ = "0.0.0.dev0" from ngraph.impl import Node +from ngraph.impl import PartialShape +from ngraph.impl import Dimension from ngraph.impl import Function + +from ngraph.frontend import NotImplementedFailure +from ngraph.frontend import InitializationFailure +from ngraph.frontend import OpConversionFailure +from ngraph.frontend import OpValidationFailure +from ngraph.frontend import GeneralFailure +from ngraph.frontend import FrontEndManager +from ngraph.frontend import FrontEndCapabilities +from ngraph.frontend import FrontEnd +from ngraph.frontend import InputModel +from ngraph.frontend import Place + from ngraph.helpers import function_from_cnn from ngraph.helpers import function_to_cnn diff --git a/ngraph/python/src/ngraph/frontend/__init__.py b/ngraph/python/src/ngraph/frontend/__init__.py new file mode 100644 index 00000000000000..0ea21ad7c8827b --- /dev/null +++ b/ngraph/python/src/ngraph/frontend/__init__.py @@ -0,0 +1,23 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +""" +Package: ngraph +Low level wrappers for the FrontEnd c++ api. +""" + +# flake8: noqa + +# main classes +from _pyngraph import FrontEndManager +from _pyngraph import FrontEnd +from _pyngraph import FrontEndCapabilities +from _pyngraph import InputModel +from _pyngraph import Place + +# exceptions +from _pyngraph import NotImplementedFailure +from _pyngraph import InitializationFailure +from _pyngraph import OpConversionFailure +from _pyngraph import OpValidationFailure +from _pyngraph import GeneralFailure diff --git a/ngraph/python/src/ngraph/impl/__init__.py b/ngraph/python/src/ngraph/impl/__init__.py index 259a6e277f0e2b..6d4a5d1413326f 100644 --- a/ngraph/python/src/ngraph/impl/__init__.py +++ b/ngraph/python/src/ngraph/impl/__init__.py @@ -41,6 +41,7 @@ from _pyngraph import Node from _pyngraph import Type from _pyngraph import PartialShape +from _pyngraph import Dimension from _pyngraph import Shape from _pyngraph import Strides from _pyngraph import CoordinateDiff diff --git a/ngraph/python/src/pyngraph/frontend_manager.cpp b/ngraph/python/src/pyngraph/frontend_manager.cpp new file mode 100644 index 00000000000000..fbbbc0c0bb51be --- /dev/null +++ b/ngraph/python/src/pyngraph/frontend_manager.cpp @@ -0,0 +1,282 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include +#include + +#include "frontend_manager.hpp" +#include "frontend_manager/frontend_exceptions.hpp" +#include "frontend_manager/frontend_manager.hpp" +#include "pyngraph/function.hpp" + +namespace py = pybind11; + +void regclass_pyngraph_FrontEndManager(py::module m) +{ + py::class_> + fem(m, "FrontEndManager", py::dynamic_attr()); + fem.doc() = "ngraph.impl.FrontEndManager wraps ngraph::frontend::FrontEndManager"; + + fem.def(py::init<>()); + + fem.def("get_available_front_ends", + &ngraph::frontend::FrontEndManager::get_available_front_ends); + fem.def("load_by_framework", + &ngraph::frontend::FrontEndManager::load_by_framework, + py::arg("framework"), + py::arg("capabilities") = ngraph::frontend::FrontEndCapabilities::FEC_DEFAULT); +} + +void regclass_pyngraph_FrontEnd(py::module m) +{ + py::class_> fem( + m, "FrontEnd", py::dynamic_attr()); + fem.doc() = "ngraph.impl.FrontEnd wraps ngraph::frontend::FrontEnd"; + + fem.def("load_from_file", &ngraph::frontend::FrontEnd::load_from_file, py::arg("path")); + fem.def("convert", + static_cast (ngraph::frontend::FrontEnd::*)( + ngraph::frontend::InputModel::Ptr) const>(&ngraph::frontend::FrontEnd::convert), + py::arg("model")); + fem.def("convert", + static_cast (ngraph::frontend::FrontEnd::*)( + std::shared_ptr) const>(&ngraph::frontend::FrontEnd::convert), + py::arg("function")); + fem.def("convert_partially", &ngraph::frontend::FrontEnd::convert_partially, py::arg("model")); + fem.def("decode", &ngraph::frontend::FrontEnd::decode, py::arg("model")); + fem.def("normalize", &ngraph::frontend::FrontEnd::normalize, py::arg("function")); +} + +void regclass_pyngraph_Place(py::module m) +{ + py::class_> place( + m, "Place", py::dynamic_attr()); + place.doc() = "ngraph.impl.Place wraps ngraph::frontend::Place"; + + place.def("is_input", &ngraph::frontend::Place::is_input); + place.def("is_output", &ngraph::frontend::Place::is_output); + place.def("get_names", &ngraph::frontend::Place::get_names); + place.def("is_equal", &ngraph::frontend::Place::is_equal, py::arg("other")); + place.def("is_equal_data", &ngraph::frontend::Place::is_equal_data, py::arg("other")); + place.def("get_consuming_operations", + &ngraph::frontend::Place::get_consuming_operations, + py::arg_v("outputPortIndex", -1, "-1")); + place.def("get_target_tensor", + &ngraph::frontend::Place::get_target_tensor, + py::arg_v("outputPortIndex", -1, "-1")); + place.def("get_producing_operation", + &ngraph::frontend::Place::get_producing_operation, + py::arg_v("inputPortIndex", -1, "-1")); + place.def("get_producing_port", &ngraph::frontend::Place::get_producing_port); + place.def("get_input_port", + static_cast( + &ngraph::frontend::Place::get_input_port), + py::arg_v("inputPortIndex", -1, "-1")); + place.def("get_input_port", + static_cast(&ngraph::frontend::Place::get_input_port), + py::arg("inputName"), + py::arg_v("inputPortIndex", -1, "-1")); + place.def("get_output_port", + static_cast( + &ngraph::frontend::Place::get_output_port), + py::arg_v("outputPortIndex", -1, "-1")); + place.def("get_output_port", + static_cast(&ngraph::frontend::Place::get_output_port), + py::arg("outputName"), + py::arg_v("outputPortIndex", -1, "-1")); + place.def("get_consuming_ports", &ngraph::frontend::Place::get_consuming_ports); + place.def("get_source_tensor", + &ngraph::frontend::Place::get_source_tensor, + py::arg_v("inputPortIndex", -1, "-1")); +} + +void regclass_pyngraph_InputModel(py::module m) +{ + py::class_> im( + m, "InputModel", py::dynamic_attr()); + im.doc() = "ngraph.impl.InputModel wraps ngraph::frontend::InputModel"; + im.def("get_place_by_tensor_name", + &ngraph::frontend::InputModel::get_place_by_tensor_name, + py::arg("tensorName")); + im.def("get_place_by_operation_name", + &ngraph::frontend::InputModel::get_place_by_operation_name, + py::arg("operationName")); + im.def("get_place_by_operation_and_input_port", + &ngraph::frontend::InputModel::get_place_by_operation_and_input_port, + py::arg("operationName"), + py::arg("inputPortIndex")); + im.def("get_place_by_operation_and_output_port", + &ngraph::frontend::InputModel::get_place_by_operation_and_output_port, + py::arg("operationName"), + py::arg("outputPortIndex")); + + im.def("set_name_for_tensor", + &ngraph::frontend::InputModel::set_name_for_tensor, + py::arg("tensor"), + py::arg("newName")); + im.def("add_name_for_tensor", + &ngraph::frontend::InputModel::add_name_for_tensor, + py::arg("tensor"), + py::arg("newName")); + im.def("set_name_for_operation", + &ngraph::frontend::InputModel::set_name_for_operation, + py::arg("operation"), + py::arg("newName")); + im.def("free_name_for_tensor", + &ngraph::frontend::InputModel::free_name_for_tensor, + py::arg("name")); + im.def("free_name_for_operation", + &ngraph::frontend::InputModel::free_name_for_operation, + py::arg("name")); + im.def("set_name_for_dimension", + &ngraph::frontend::InputModel::set_name_for_dimension, + py::arg("place"), + py::arg("dimIndex"), + py::arg("dimName")); + im.def("cut_and_add_new_input", + &ngraph::frontend::InputModel::cut_and_add_new_input, + py::arg("place"), + py::arg("newName") = std::string()); + im.def("cut_and_add_new_output", + &ngraph::frontend::InputModel::cut_and_add_new_output, + py::arg("place"), + py::arg("newName") = std::string()); + im.def("add_output", &ngraph::frontend::InputModel::add_output, py::arg("place")); + im.def("remove_output", &ngraph::frontend::InputModel::remove_output, py::arg("place")); + + im.def("set_partial_shape", + &ngraph::frontend::InputModel::set_partial_shape, + py::arg("place"), + py::arg("shape")); + im.def("get_partial_shape", &ngraph::frontend::InputModel::get_partial_shape, py::arg("place")); + im.def("get_inputs", &ngraph::frontend::InputModel::get_inputs); + im.def("get_outputs", &ngraph::frontend::InputModel::get_outputs); + + im.def("extract_subgraph", + &ngraph::frontend::InputModel::extract_subgraph, + py::arg("inputs"), + py::arg("outputs")); + im.def("override_all_inputs", + &ngraph::frontend::InputModel::override_all_inputs, + py::arg("inputs")); + im.def("override_all_outputs", + &ngraph::frontend::InputModel::override_all_outputs, + py::arg("outputs")); + im.def("set_element_type", + &ngraph::frontend::InputModel::set_element_type, + py::arg("place"), + py::arg("type")); +} + +void regclass_pyngraph_FEC(py::module m) +{ + class FeCaps + { + public: + int get_caps() const { return m_caps; } + + private: + int m_caps; + }; + + py::class_> type(m, "FrontEndCapabilities"); + // type.doc() = "FrontEndCapabilities"; + type.attr("DEFAULT") = ngraph::frontend::FrontEndCapabilities::FEC_DEFAULT; + type.attr("CUT") = ngraph::frontend::FrontEndCapabilities::FEC_CUT; + type.attr("NAMES") = ngraph::frontend::FrontEndCapabilities::FEC_NAMES; + type.attr("WILDCARDS") = ngraph::frontend::FrontEndCapabilities::FEC_WILDCARDS; + + type.def( + "__eq__", + [](const FeCaps& a, const FeCaps& b) { return a.get_caps() == b.get_caps(); }, + py::is_operator()); +} + +void regclass_pyngraph_GeneralFailureFrontEnd(py::module m) +{ + static py::exception exc(std::move(m), "GeneralFailure"); + py::register_exception_translator([](std::exception_ptr p) { + try + { + if (p) + std::rethrow_exception(p); + } + catch (const ngraph::frontend::GeneralFailure& e) + { + exc(e.what()); + } + }); +} + +void regclass_pyngraph_OpValidationFailureFrontEnd(py::module m) +{ + static py::exception exc(std::move(m), + "OpValidationFailure"); + py::register_exception_translator([](std::exception_ptr p) { + try + { + if (p) + std::rethrow_exception(p); + } + catch (const ngraph::frontend::OpValidationFailure& e) + { + exc(e.what()); + } + }); +} + +void regclass_pyngraph_OpConversionFailureFrontEnd(py::module m) +{ + static py::exception exc(std::move(m), + "OpConversionFailure"); + py::register_exception_translator([](std::exception_ptr p) { + try + { + if (p) + std::rethrow_exception(p); + } + catch (const ngraph::frontend::OpConversionFailure& e) + { + exc(e.what()); + } + }); +} + +void regclass_pyngraph_InitializationFailureFrontEnd(py::module m) +{ + static py::exception exc(std::move(m), + "InitializationFailure"); + py::register_exception_translator([](std::exception_ptr p) { + try + { + if (p) + std::rethrow_exception(p); + } + catch (const ngraph::frontend::InitializationFailure& e) + { + exc(e.what()); + } + }); +} + +void regclass_pyngraph_NotImplementedFailureFrontEnd(py::module m) +{ + static py::exception exc(std::move(m), + "NotImplementedFailure"); + py::register_exception_translator([](std::exception_ptr p) { + try + { + if (p) + std::rethrow_exception(p); + } + catch (const ngraph::frontend::NotImplementedFailure& e) + { + exc(e.what()); + } + }); +} \ No newline at end of file diff --git a/ngraph/python/src/pyngraph/frontend_manager.hpp b/ngraph/python/src/pyngraph/frontend_manager.hpp new file mode 100644 index 00000000000000..1d3d781061849c --- /dev/null +++ b/ngraph/python/src/pyngraph/frontend_manager.hpp @@ -0,0 +1,21 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +namespace py = pybind11; + +void regclass_pyngraph_FrontEndManager(py::module m); +void regclass_pyngraph_FrontEnd(py::module m); +void regclass_pyngraph_InputModel(py::module m); +void regclass_pyngraph_FEC(py::module m); +void regclass_pyngraph_Place(py::module m); +void regclass_pyngraph_NotImplementedFailureFrontEnd(py::module m); +void regclass_pyngraph_InitializationFailureFrontEnd(py::module m); +void regclass_pyngraph_OpConversionFailureFrontEnd(py::module m); +void regclass_pyngraph_OpValidationFailureFrontEnd(py::module m); +void regclass_pyngraph_GeneralFailureFrontEnd(py::module m); + diff --git a/ngraph/python/src/pyngraph/partial_shape.cpp b/ngraph/python/src/pyngraph/partial_shape.cpp index 1a26943448636d..36c7ae20f467a2 100644 --- a/ngraph/python/src/pyngraph/partial_shape.cpp +++ b/ngraph/python/src/pyngraph/partial_shape.cpp @@ -159,6 +159,25 @@ void regclass_pyngraph_PartialShape(py::module m) to_shapess : Shape Get the unique shape. )"); + shape.def( + "get_dimension", + [](const ngraph::PartialShape& self, size_t index) -> ngraph::Dimension { + return self[index]; + }, + py::arg("index"), + R"( + Get the dimension at specified index of a partial shape. + + Parameters + ---------- + index : int + The index of dimension + + Returns + ---------- + get_dimension : Dimension + Get the particular dimension of a partial shape. + )"); shape.def( "__eq__", diff --git a/ngraph/python/src/pyngraph/pyngraph.cpp b/ngraph/python/src/pyngraph/pyngraph.cpp index 92b507b64834e1..d72a7b60967939 100644 --- a/ngraph/python/src/pyngraph/pyngraph.cpp +++ b/ngraph/python/src/pyngraph/pyngraph.cpp @@ -17,6 +17,7 @@ #include "pyngraph/onnx_import/onnx_import.hpp" #endif #include "pyngraph/dimension.hpp" +#include "pyngraph/frontend_manager.hpp" #include "pyngraph/ops/constant.hpp" #include "pyngraph/ops/parameter.hpp" #include "pyngraph/ops/result.hpp" @@ -41,6 +42,16 @@ PYBIND11_MODULE(_pyngraph, m) regclass_pyngraph_Shape(m); regclass_pyngraph_PartialShape(m); regclass_pyngraph_Node(m); + regclass_pyngraph_Place(m); + regclass_pyngraph_InitializationFailureFrontEnd(m); + regclass_pyngraph_GeneralFailureFrontEnd(m); + regclass_pyngraph_OpConversionFailureFrontEnd(m); + regclass_pyngraph_OpValidationFailureFrontEnd(m); + regclass_pyngraph_NotImplementedFailureFrontEnd(m); + regclass_pyngraph_FEC(m); + regclass_pyngraph_FrontEndManager(m); + regclass_pyngraph_FrontEnd(m); + regclass_pyngraph_InputModel(m); regclass_pyngraph_Input(m); regclass_pyngraph_Output(m); regclass_pyngraph_NodeFactory(m); diff --git a/ngraph/python/tests/test_ngraph/test_frontendmanager.py b/ngraph/python/tests/test_ngraph/test_frontendmanager.py new file mode 100644 index 00000000000000..2406f6ff4b7962 --- /dev/null +++ b/ngraph/python/tests/test_ngraph/test_frontendmanager.py @@ -0,0 +1,504 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +from ngraph.frontend import FrontEndManager, FrontEndCapabilities, InitializationFailure +from ngraph import PartialShape +from ngraph.utils.types import get_element_type + +from pybind_mock_frontend import get_fe_stat, get_mdl_stat, get_place_stat + +# FrontEndManager shall be initialized and destroyed after all tests finished +# This is because destroy of FrontEndManager will unload all plugins, no objects shall exist after this +fem = FrontEndManager() + + +# ---------- FrontEnd tests --------------- +def test_load_by_framework_caps(): + frontEnds = fem.get_available_front_ends() + assert frontEnds is not None + assert 'mock_py' in frontEnds + caps = [FrontEndCapabilities.DEFAULT, + FrontEndCapabilities.CUT, + FrontEndCapabilities.NAMES, + FrontEndCapabilities.WILDCARDS, + FrontEndCapabilities.CUT | FrontEndCapabilities.NAMES | FrontEndCapabilities.WILDCARDS] + for cap in caps: + fe = fem.load_by_framework(framework="mock_py", capabilities=cap) + stat = get_fe_stat(fe) + assert cap == stat.load_flags + for i in range(len(caps) - 1): + for j in range(i+1, len(caps)): + assert caps[i] != caps[j] + + +def test_load_by_unknown_framework(): + frontEnds = fem.get_available_front_ends() + assert not("UnknownFramework" in frontEnds) + try: + fem.load_by_framework("UnknownFramework") + except InitializationFailure as exc: + print(exc) + else: + assert False + + +def test_load_from_file(): + fe = fem.load_by_framework(framework="mock_py") + assert fe is not None + model = fe.load_from_file("abc.bin") + assert model is not None + stat = get_fe_stat(fe) + assert 'abc.bin' in stat.load_paths + + +def test_convert_model(): + fe = fem.load_by_framework(framework="mock_py") + assert fe is not None + model = fe.load_from_file(path="") + func = fe.convert(model=model) + assert func is not None + stat = get_fe_stat(fe) + assert stat.convert_model == 1 + + +def test_convert_partially(): + fe = fem.load_by_framework(framework="mock_py") + assert fe is not None + model = fe.load_from_file(path="") + func = fe.convert_partially(model=model) + stat = get_fe_stat(fe) + assert stat.convert_partially == 1 + fe.convert(function=func) + stat = get_fe_stat(fe) + assert stat.convert == 1 + + +def test_decode_and_normalize(): + fe = fem.load_by_framework(framework="mock_py") + assert fe is not None + model = fe.load_from_file(path="") + func = fe.decode(model=model) + stat = get_fe_stat(fe) + assert stat.decode == 1 + fe.normalize(function=func) + stat = get_fe_stat(fe) + assert stat.normalize == 1 + assert stat.decode == 1 + + +# --------InputModel tests----------------- +def init_model(): + fe = fem.load_by_framework(framework="mock_py") + model = fe.load_from_file(path="") + return model + + +def test_model_get_inputs(): + model = init_model() + for i in range(1, 10): + model.get_inputs() + stat = get_mdl_stat(model) + assert stat.get_inputs == i + + +def test_model_get_outputs(): + model = init_model() + for i in range(1, 10): + model.get_outputs() + stat = get_mdl_stat(model) + assert stat.get_outputs == i + + +def test_model_get_place_by_tensor_name(): + model = init_model() + for i in range(1, 10): + name = str(i) + model.get_place_by_tensor_name(tensorName=name) + stat = get_mdl_stat(model) + assert stat.get_place_by_tensor_name == i + assert stat.lastArgString == name + + +def test_model_get_place_by_operation_name(): + model = init_model() + for i in range(1, 10): + name = str(i) + model.get_place_by_operation_name(operationName=name) + stat = get_mdl_stat(model) + assert stat.get_place_by_operation_name == i + assert stat.lastArgString == name + + +def test_model_get_place_by_operation_and_input_port(): + model = init_model() + for i in range(1, 10): + name = str(i) + model.get_place_by_operation_and_input_port(operationName=name, inputPortIndex=i*2) + stat = get_mdl_stat(model) + assert stat.get_place_by_operation_and_input_port == i + assert stat.lastArgString == name + assert stat.lastArgInt == i * 2 + + +def test_model_get_place_by_operation_and_output_port(): + model = init_model() + for i in range(1, 10): + name = str(i) + model.get_place_by_operation_and_output_port(operationName=name, outputPortIndex=i*2) + stat = get_mdl_stat(model) + assert stat.get_place_by_operation_and_output_port == i + assert stat.lastArgString == name + assert stat.lastArgInt == i * 2 + + +def test_model_set_name_for_tensor(): + model = init_model() + place = model.get_place_by_tensor_name(tensorName="") + model.set_name_for_tensor(tensor=place, newName="1234") + stat = get_mdl_stat(model) + assert stat.set_name_for_tensor == 1 + assert stat.lastArgString == "1234" + assert stat.lastArgPlace == place + + +def test_model_add_name_for_tensor(): + model = init_model() + place = model.get_place_by_tensor_name(tensorName="") + model.add_name_for_tensor(tensor=place, newName="1234") + stat = get_mdl_stat(model) + assert stat.add_name_for_tensor == 1 + assert stat.lastArgString == "1234" + assert stat.lastArgPlace == place + + +def test_model_set_name_for_operation(): + model = init_model() + place = model.get_place_by_operation_name(operationName="") + model.set_name_for_operation(operation=place, newName="1111") + stat = get_mdl_stat(model) + assert stat.set_name_for_operation == 1 + assert stat.lastArgString == "1111" + assert stat.lastArgPlace == place + + +def test_model_free_name_for_tensor(): + model = init_model() + model.free_name_for_tensor(name="2222") + stat = get_mdl_stat(model) + assert stat.free_name_for_tensor == 1 + assert stat.lastArgString == "2222" + + +def test_model_free_name_for_operation(): + model = init_model() + model.free_name_for_operation(name="3333") + stat = get_mdl_stat(model) + assert stat.free_name_for_operation == 1 + assert stat.lastArgString == "3333" + + +def test_model_set_name_for_dimension(): + model = init_model() + place = model.get_place_by_operation_name(operationName="") + model.set_name_for_dimension(place=place, dimIndex=123, dimName="4444") + stat = get_mdl_stat(model) + assert stat.set_name_for_dimension == 1 + assert stat.lastArgString == "4444" + assert stat.lastArgInt == 123 + assert stat.lastArgPlace == place + + +def test_model_cut_and_add_new_input(): + model = init_model() + place = model.get_place_by_operation_name("") + model.cut_and_add_new_input(place=place, newName="5555") + stat = get_mdl_stat(model) + assert stat.cut_and_add_new_input == 1 + assert stat.lastArgString == "5555" + assert stat.lastArgPlace == place + model.cut_and_add_new_input(place=place) + stat = get_mdl_stat(model) + assert stat.cut_and_add_new_input == 2 + assert stat.lastArgString == "" + assert stat.lastArgPlace == place + + +def test_model_cut_and_add_new_output(): + model = init_model() + place = model.get_place_by_operation_name("") + model.cut_and_add_new_output(place=place, newName="5555") + stat = get_mdl_stat(model) + assert stat.cut_and_add_new_output == 1 + assert stat.lastArgString == "5555" + assert stat.lastArgPlace == place + model.cut_and_add_new_output(place=place) + stat = get_mdl_stat(model) + assert stat.cut_and_add_new_output == 2 + assert stat.lastArgString == "" + assert stat.lastArgPlace == place + + +def test_model_add_output(): + model = init_model() + place = model.get_place_by_operation_name("") + place2 = model.add_output(place=place) + assert place2 is not None + stat = get_mdl_stat(model) + assert stat.add_output == 1 + assert stat.lastArgPlace == place + + +def test_model_remove_output(): + model = init_model() + place = model.get_place_by_operation_name("") + model.remove_output(place=place) + stat = get_mdl_stat(model) + assert stat.remove_output == 1 + assert stat.lastArgPlace == place + + +def test_model_set_partial_shape(): + model = init_model() + place = model.get_place_by_tensor_name(tensorName="") + test_shape = PartialShape([1, 2, 3, 4]) + model.set_partial_shape(place=place, shape=test_shape) + stat = get_mdl_stat(model) + assert stat.set_partial_shape == 1 + assert stat.lastArgPlace == place + assert stat.lastArgPartialShape == test_shape + + +def test_model_get_partial_shape(): + model = init_model() + place = model.get_place_by_tensor_name(tensorName="") + shape = model.get_partial_shape(place=place) + assert shape is not None + stat = get_mdl_stat(model) + assert stat.get_partial_shape == 1 + assert stat.lastArgPlace == place + + +def test_model_override_all_inputs(): + model = init_model() + place1 = model.get_place_by_tensor_name(tensorName="p1") + place2 = model.get_place_by_tensor_name(tensorName="p2") + model.override_all_inputs(inputs=[place1, place2]) + stat = get_mdl_stat(model) + assert stat.override_all_inputs == 1 + assert len(stat.lastArgInputPlaces) == 2 + assert stat.lastArgInputPlaces[0] == place1 + assert stat.lastArgInputPlaces[1] == place2 + + +def test_model_override_all_outputs(): + model = init_model() + place1 = model.get_place_by_tensor_name(tensorName="p1") + place2 = model.get_place_by_tensor_name(tensorName="p2") + model.override_all_outputs(outputs=[place1, place2]) + stat = get_mdl_stat(model) + assert stat.override_all_outputs == 1 + assert len(stat.lastArgOutputPlaces) == 2 + assert stat.lastArgOutputPlaces[0] == place1 + assert stat.lastArgOutputPlaces[1] == place2 + + +def test_model_extract_subgraph(): + model = init_model() + place1 = model.get_place_by_tensor_name(tensorName="p1") + place2 = model.get_place_by_tensor_name(tensorName="p2") + place3 = model.get_place_by_tensor_name(tensorName="p3") + place4 = model.get_place_by_tensor_name(tensorName="p4") + model.extract_subgraph(inputs=[place1, place2], outputs=[place3, place4]) + stat = get_mdl_stat(model) + assert stat.extract_subgraph == 1 + assert len(stat.lastArgInputPlaces) == 2 + assert stat.lastArgInputPlaces[0] == place1 + assert stat.lastArgInputPlaces[1] == place2 + assert len(stat.lastArgOutputPlaces) == 2 + assert stat.lastArgOutputPlaces[0] == place3 + assert stat.lastArgOutputPlaces[1] == place4 + + +def test_model_set_element_type(): + model = init_model() + place = model.get_place_by_tensor_name(tensorName="") + model.set_element_type(place=place, type=get_element_type(np.int32)) + stat = get_mdl_stat(model) + assert stat.set_element_type == 1 + assert stat.lastArgPlace == place + assert stat.lastArgElementType == get_element_type(np.int32) + + +# ----------- Place test ------------ +def init_place(): + fe = fem.load_by_framework(framework="mock_py") + model = fe.load_from_file(path="") + place = model.get_place_by_tensor_name(tensorName="") + return model, place + + +def test_place_is_input(): + _, place = init_place() + assert place.is_input() is not None + stat = get_place_stat(place) + assert stat.is_input == 1 + + +def test_place_is_output(): + _, place = init_place() + assert place.is_output() is not None + stat = get_place_stat(place) + assert stat.is_output == 1 + + +def test_place_get_names(): + _, place = init_place() + assert place.get_names() is not None + stat = get_place_stat(place) + assert stat.get_names == 1 + + +def test_place_is_equal(): + model, place = init_place() + place2 = model.get_place_by_tensor_name("2") + assert place.is_equal(other=place2) is not None + stat = get_place_stat(place) + assert stat.is_equal == 1 + assert stat.lastArgPlace == place2 + + +def test_place_is_equal_data(): + model, place = init_place() + place2 = model.get_place_by_tensor_name("2") + assert place.is_equal_data(other=place2) is not None + stat = get_place_stat(place) + assert stat.is_equal_data == 1 + assert stat.lastArgPlace == place2 + + +def test_place_is_equal_data(): + model, place = init_place() + place2 = model.get_place_by_tensor_name("2") + assert place.is_equal_data(other=place2) is not None + stat = get_place_stat(place) + assert stat.is_equal_data == 1 + assert stat.lastArgPlace == place2 + + +def test_place_get_consuming_operations(): + _, place = init_place() + assert place.get_consuming_operations(outputPortIndex=22) is not None + stat = get_place_stat(place) + assert stat.get_consuming_operations == 1 + assert stat.lastArgInt == 22 + assert place.get_consuming_operations() is not None + stat = get_place_stat(place) + assert stat.get_consuming_operations == 2 + assert stat.lastArgInt == -1 + + +def test_place_get_target_tensor(): + _, place = init_place() + assert place.get_target_tensor(outputPortIndex=22) is not None + stat = get_place_stat(place) + assert stat.get_target_tensor == 1 + assert stat.lastArgInt == 22 + assert place.get_target_tensor() is not None + stat = get_place_stat(place) + assert stat.get_target_tensor == 2 + assert stat.lastArgInt == -1 + + +def test_place_get_producing_operation(): + _, place = init_place() + assert place.get_producing_operation(inputPortIndex=22) is not None + stat = get_place_stat(place) + assert stat.get_producing_operation == 1 + assert stat.lastArgInt == 22 + assert place.get_producing_operation() is not None + stat = get_place_stat(place) + assert stat.get_producing_operation == 2 + assert stat.lastArgInt == -1 + + +def test_place_get_producing_port(): + _, place = init_place() + assert place.get_producing_port() is not None + stat = get_place_stat(place) + assert stat.get_producing_port == 1 + + +def test_place_get_input_port(): + _, place = init_place() + assert place.get_input_port() is not None + stat = get_place_stat(place) + assert stat.get_input_port == 1 + assert stat.lastArgInt == -1 + assert place.get_input_port(inputPortIndex=22) is not None + stat = get_place_stat(place) + assert stat.get_input_port == 2 + assert stat.lastArgInt == 22 + + +def test_place_get_input_port2(): + _, place = init_place() + assert place.get_input_port(inputName="abc") is not None + stat = get_place_stat(place) + assert stat.get_input_port == 1 + assert stat.lastArgInt == -1 + assert stat.lastArgString == "abc" + assert place.get_input_port(inputName="abcd", inputPortIndex=22) is not None + stat = get_place_stat(place) + assert stat.get_input_port == 2 + assert stat.lastArgInt == 22 + assert stat.lastArgString == "abcd" + + +def test_place_get_output_port(): + _, place = init_place() + assert place.get_output_port() is not None + stat = get_place_stat(place) + assert stat.get_output_port == 1 + assert stat.lastArgInt == -1 + assert place.get_output_port(outputPortIndex=22) is not None + stat = get_place_stat(place) + assert stat.get_output_port == 2 + assert stat.lastArgInt == 22 + + +def test_place_get_output_port2(): + _, place = init_place() + assert place.get_output_port(outputName="abc") is not None + stat = get_place_stat(place) + assert stat.get_output_port == 1 + assert stat.lastArgInt == -1 + assert stat.lastArgString == "abc" + assert place.get_output_port(outputName="abcd", outputPortIndex=22) is not None + stat = get_place_stat(place) + assert stat.get_output_port == 2 + assert stat.lastArgInt == 22 + assert stat.lastArgString == "abcd" + + +def test_place_get_consuming_ports(): + _, place = init_place() + assert place.get_consuming_ports() is not None + stat = get_place_stat(place) + assert stat.get_consuming_ports == 1 + + +def test_place_get_source_tensor(): + _, place = init_place() + assert place.get_source_tensor() is not None + stat = get_place_stat(place) + assert stat.get_source_tensor == 1 + assert stat.lastArgInt == -1 + assert place.get_source_tensor(inputPortIndex=22) is not None + stat = get_place_stat(place) + assert stat.get_source_tensor == 2 + assert stat.lastArgInt == 22 + + +# if __name__ == '__main__': +# test_frontendmanager() diff --git a/ngraph/test/CMakeLists.txt b/ngraph/test/CMakeLists.txt index 2566031eeb34b8..e45842d4083990 100644 --- a/ngraph/test/CMakeLists.txt +++ b/ngraph/test/CMakeLists.txt @@ -483,6 +483,46 @@ foreach(BACKEND_NAME ${ACTIVE_BACKEND_LIST}) message(STATUS "Adding unit test for backend ${BACKEND_NAME}") endforeach() +# SOURCE FOR FRONTEND TESTING + +file(GLOB FRONTEND_TESTS_SRC ${CMAKE_CURRENT_SOURCE_DIR}/frontend/frontend_manager.cpp) +set(SRC ${FRONTEND_TESTS_SRC} ${SRC}) + +file(GLOB FRONTEND_SHARED_TESTS_SRC ${CMAKE_CURRENT_SOURCE_DIR}/frontend/shared/src/*.cpp) +set(SRC ${FRONTEND_SHARED_TESTS_SRC} ${SRC}) + +# TODO: try to find a way to move it out of common CMakeLists +if (NGRAPH_PDPD_FRONTEND_ENABLE) + + find_package(Python3 COMPONENTS Interpreter) + set(PDPD_PYTHON_OK TRUE) + if(NOT Python3_FOUND) + message("Python3 is required to build the PDPD frontend unit tests") + set(PDPD_PYTHON_OK FALSE) + endif() + + if (PDPD_PYTHON_OK) + execute_process( + COMMAND ${Python3_EXECUTABLE} -m pip show paddlepaddle + RESULT_VARIABLE PIP_EXIT_CODE + OUTPUT_QUIET + ) + + if (NOT ${PIP_EXIT_CODE} EQUAL 0) + message("Python paddlepaddle package is not installed. Please use \"pip install paddlepaddle==2.0.1\".") + set(PDPD_PYTHON_OK FALSE) + endif() + endif() + + if (PDPD_PYTHON_OK) + file(GLOB FRONTEND_PDPD_TESTS_SRC ${CMAKE_CURRENT_SOURCE_DIR}/frontend/paddlepaddle/*.cpp) + set(SRC ${FRONTEND_PDPD_TESTS_SRC} ${SRC}) + set(TEST_PDPD_MODELS ${CMAKE_CURRENT_BINARY_DIR}/pdpd_test_models/) + add_definitions("-DTEST_PDPD_MODELS=\"${TEST_PDPD_MODELS}\"") + endif() +endif() +#----- + add_executable(unit-test ${SRC}) target_include_directories(unit-test PRIVATE ".") @@ -554,3 +594,35 @@ if (NGRAPH_INTERPRETER_ENABLE) target_compile_definitions(unit-test PRIVATE NGRAPH_INTERPRETER_ENABLE) target_link_libraries(unit-test PRIVATE interpreter_backend) endif() + +############ FRONTEND ############ +target_include_directories(unit-test PRIVATE ${FRONTEND_INCLUDE_PATH}) +target_link_libraries(unit-test PRIVATE frontend_manager) + +add_subdirectory(frontend) + +### END FRONTEND ### + +#PaddlePaddle +if (NGRAPH_PDPD_FRONTEND_ENABLE AND PDPD_PYTHON_OK) + file(GLOB_RECURSE PDPD_GEN_SCRIPTS ${CMAKE_CURRENT_SOURCE_DIR}/files/paddlepaddle/gen_scripts/generate_*.py) + set(OUT_FILES "") + foreach(GEN_SCRIPT ${PDPD_GEN_SCRIPTS}) + get_filename_component(FILE_WE ${GEN_SCRIPT} NAME_WE) + set(OUT_DONE_FILE ${TEST_PDPD_MODELS}/${FILE_WE}_done.txt) + set(OUT_FILES ${OUT_DONE_FILE} ${OUT_FILES}) + add_custom_command(OUTPUT ${OUT_DONE_FILE} + COMMAND ${Python3_EXECUTABLE} + ${CMAKE_CURRENT_SOURCE_DIR}/files/paddlepaddle/gen_wrapper.py + ${GEN_SCRIPT} + ${TEST_PDPD_MODELS} + ${OUT_DONE_FILE} + DEPENDS ${GEN_SCRIPT} ${CMAKE_CURRENT_SOURCE_DIR}/files/paddlepaddle/gen_wrapper.py + ) + endforeach() + add_custom_target(pdpd_test_models DEPENDS ${OUT_FILES}) + add_dependencies(unit-test pdpd_test_models) + add_dependencies(unit-test paddlepaddle_ngraph_frontend) + + target_link_libraries(unit-test PRIVATE libnpy) +endif() diff --git a/ngraph/test/files/paddlepaddle/gen_scripts/generate_2in_2out.py b/ngraph/test/files/paddlepaddle/gen_scripts/generate_2in_2out.py new file mode 100644 index 00000000000000..b8bb0a7cb84894 --- /dev/null +++ b/ngraph/test/files/paddlepaddle/gen_scripts/generate_2in_2out.py @@ -0,0 +1,39 @@ +import paddle +from paddle import fluid +import numpy as np +import os +import sys + + +paddle.enable_static() + +inp_blob1 = np.random.randn(1, 1, 3, 3).astype(np.float32) +inp_blob2 = np.random.randn(1, 2, 3, 3).astype(np.float32) + +x1 = fluid.data(name='inputX1', shape=[1, 1, 3, 3], dtype='float32') +x2 = fluid.data(name='inputX2', shape=[1, 2, 3, 3], dtype='float32') + +conv2d1 = fluid.layers.conv2d(input=x1, num_filters=1, filter_size=(1, 1), stride=(1, 1), padding=(0, 0), + dilation=(1, 1), groups=1, bias_attr=False, name="conv2dX1") + +conv2d2 = fluid.layers.conv2d(input=x2, num_filters=1, filter_size=(1, 1), stride=(1, 1), padding=(0, 0), + dilation=(1, 1), groups=1, bias_attr=False, name="conv2dX2") + +add1 = fluid.layers.elementwise_add(conv2d1, conv2d2, name="add1") + +relu2a = fluid.layers.relu(add1, name="relu2a") +relu2b = fluid.layers.relu(add1, name="relu2b") + +add2 = fluid.layers.elementwise_add(relu2a, relu2b, name="add2") + +relu3a = fluid.layers.relu(add2, name="relu3a") +relu3b = fluid.layers.relu(add2, name="relu3b") + +exe = fluid.Executor(fluid.CPUPlace()) +exe.run(fluid.default_startup_program()) +inp_dict = {'inputX1': inp_blob1, 'inputX2': inp_blob2} +var = [relu3a, relu3b] +res_pdpd = exe.run(fluid.default_main_program(), fetch_list=var, feed=inp_dict) + +fluid.io.save_inference_model(os.path.join(sys.argv[1], "2in_2out"), list(inp_dict.keys()), var, exe, + model_filename="2in_2out.pdmodel", params_filename="2in_2out.pdiparams") diff --git a/ngraph/test/files/paddlepaddle/gen_scripts/generate_2in_2out_dynbatch.py b/ngraph/test/files/paddlepaddle/gen_scripts/generate_2in_2out_dynbatch.py new file mode 100644 index 00000000000000..3453189b3b7974 --- /dev/null +++ b/ngraph/test/files/paddlepaddle/gen_scripts/generate_2in_2out_dynbatch.py @@ -0,0 +1,39 @@ +import paddle +from paddle import fluid +import numpy as np +import os +import sys + + +paddle.enable_static() + +inp_blob1 = np.random.randn(1, 1, 3, 3).astype(np.float32) +inp_blob2 = np.random.randn(1, 2, 3, 3).astype(np.float32) + +x1 = fluid.data(name='inputX1', shape=[-1, 1, 3, 3], dtype='float32') +x2 = fluid.data(name='inputX2', shape=[-1, 2, 3, 3], dtype='float32') + +conv2d1 = fluid.layers.conv2d(input=x1, num_filters=1, filter_size=(1, 1), stride=(1, 1), padding=(0, 0), + dilation=(1, 1), groups=1, bias_attr=False, name="conv2dX1") + +conv2d2 = fluid.layers.conv2d(input=x2, num_filters=1, filter_size=(1, 1), stride=(1, 1), padding=(0, 0), + dilation=(1, 1), groups=1, bias_attr=False, name="conv2dX2") + +add1 = fluid.layers.elementwise_add(conv2d1, conv2d2, name="add1") + +relu2a = fluid.layers.relu(add1, name="relu2a") +relu2b = fluid.layers.relu(add1, name="relu2b") + +add2 = fluid.layers.elementwise_add(relu2a, relu2b, name="add2") + +relu3a = fluid.layers.relu(add2, name="relu3a") +relu3b = fluid.layers.relu(add2, name="relu3b") + +exe = fluid.Executor(fluid.CPUPlace()) +exe.run(fluid.default_startup_program()) +inp_dict = {'inputX1': inp_blob1, 'inputX2': inp_blob2} +var = [relu3a, relu3b] +res_pdpd = exe.run(fluid.default_main_program(), fetch_list=var, feed=inp_dict) + +fluid.io.save_inference_model(os.path.join(sys.argv[1], "2in_2out_dynbatch"), list(inp_dict.keys()), var, exe, + model_filename="2in_2out_dynbatch.pdmodel", params_filename="2in_2out_dynbatch.pdiparams") diff --git a/ngraph/test/files/paddlepaddle/gen_scripts/generate_argmax.py b/ngraph/test/files/paddlepaddle/gen_scripts/generate_argmax.py new file mode 100644 index 00000000000000..54b24364b2d481 --- /dev/null +++ b/ngraph/test/files/paddlepaddle/gen_scripts/generate_argmax.py @@ -0,0 +1,60 @@ +# +# pool2d paddle model generator +# +import numpy as np +from save_model import saveModel +import sys +data_type = 'float32' + + +def pdpd_argmax(name : str, x, axis): + import paddle as pdpd + pdpd.enable_static() + + with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): + node_x = pdpd.static.data(name='x', shape=x.shape, dtype='float32') + out = pdpd.argmax(x=node_x, axis=axis) + out = pdpd.cast(out, np.float32) + cpu = pdpd.static.cpu_places(1) + exe = pdpd.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(pdpd.static.default_startup_program()) + + outs = exe.run( + feed={'x': x}, + fetch_list=[out]) + + saveModel(name, exe, feedkeys=['x'], fetchlist=[out], inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + +def pdpd_argmax1(name : str, x): + import paddle as pdpd + pdpd.enable_static() + + with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): + node_x = pdpd.static.data(name='x', shape=x.shape, dtype='float32') + out = pdpd.argmax(x=node_x) + out = pdpd.cast(out, np.float32) + cpu = pdpd.static.cpu_places(1) + exe = pdpd.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(pdpd.static.default_startup_program()) + + outs = exe.run( + feed={'x': x}, + fetch_list=[out]) + + saveModel(name, exe, feedkeys=['x'], fetchlist=[out], inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + +def main(): + data = np.random.random([3,5,7,2]).astype("float32") + axis = 0 + pdpd_argmax("argmax", data, axis) + pdpd_argmax1("argmax1", data) + + +if __name__ == "__main__": + main() diff --git a/ngraph/test/files/paddlepaddle/gen_scripts/generate_assign_value.py b/ngraph/test/files/paddlepaddle/gen_scripts/generate_assign_value.py new file mode 100644 index 00000000000000..7d29574b2a92b4 --- /dev/null +++ b/ngraph/test/files/paddlepaddle/gen_scripts/generate_assign_value.py @@ -0,0 +1,58 @@ +import numpy as np +from save_model import saveModel +import sys + + +def pdpd_assign_value(name, test_x): + import paddle as pdpd + pdpd.enable_static() + main_program = pdpd.static.Program() + startup_program = pdpd.static.Program() + with pdpd.static.program_guard(main_program, startup_program): + node_x = pdpd.static.data(name='x', shape=test_x.shape, dtype=test_x.dtype if test_x.dtype != np.bool else np.int32) + node_x = pdpd.cast(node_x, dtype=test_x.dtype) + const_value = pdpd.assign(test_x, output=None) + result = pdpd.cast(pdpd.concat([node_x, const_value], 0), dtype=np.float32) + cpu = pdpd.static.cpu_places(1) + exe = pdpd.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(pdpd.static.default_startup_program()) + if test_x.dtype == np.bool: + test_x = test_x.astype(np.int32) + + outs = exe.run( + feed={'x': test_x}, + fetch_list=[result] + ) + + saveModel(name, exe, feedkeys=['x'], fetchlist=[result], inputs=[test_x], outputs=[outs[0]], target_dir=sys.argv[1]) + + print(outs[0]) + + +def compare(): + + test_cases = [ + { + "name": "assign_value_fp32", + "input": np.ones([1, 1, 4, 4]).astype(np.float32) + }, + { + "name": "assign_value_int32", + "input": np.ones([1, 1, 4, 4]).astype(np.int32) + }, + { + "name": "assign_value_int64", + "input": np.ones([1, 1, 4, 4]).astype(np.int64) + }, + { + "name": "assign_value_boolean", + "input": np.array([False, True, False]) + } + ] + for test in test_cases: + pdpd_assign_value(test['name'], test['input']) + + +if __name__ == "__main__": + compare() diff --git a/ngraph/test/files/paddlepaddle/gen_scripts/generate_batch_norm.py b/ngraph/test/files/paddlepaddle/gen_scripts/generate_batch_norm.py new file mode 100644 index 00000000000000..fbbba99160c4da --- /dev/null +++ b/ngraph/test/files/paddlepaddle/gen_scripts/generate_batch_norm.py @@ -0,0 +1,89 @@ +# +# pool2d paddle model generator +# +import numpy as np +from save_model import saveModel +import sys + + +def batch_norm1(name : str, x, scale, bias, mean, var, data_layout): + import paddle as pdpd + pdpd.enable_static() + + node_x = pdpd.static.data(name='x', shape=x.shape, dtype='float32') + scale_attr = pdpd.ParamAttr(name="scale1", initializer=pdpd.nn.initializer.Assign(scale)) + bias_attr = pdpd.ParamAttr(name="bias1", initializer=pdpd.nn.initializer.Assign(bias)) + + out = pdpd.static.nn.batch_norm(node_x, epsilon=1e-5, + param_attr=scale_attr, + bias_attr=bias_attr, + moving_mean_name="bn_mean1", + moving_variance_name="bn_variance1", + use_global_stats=True, + data_layout=data_layout) + + cpu = pdpd.static.cpu_places(1) + exe = pdpd.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(pdpd.static.default_startup_program()) + pdpd.static.global_scope().var("bn_mean1").get_tensor().set(mean, pdpd.CPUPlace()) + pdpd.static.global_scope().var("bn_variance1").get_tensor().set(var, pdpd.CPUPlace()) + + outs = exe.run( + feed={'x': x}, + fetch_list=[out]) + + saveModel(name, exe, feedkeys=['x'], fetchlist=[out], inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + +def batch_norm2(name : str, x, scale, bias, mean, var, data_layout): + import paddle as pdpd + pdpd.enable_static() + + node_x = pdpd.static.data(name='x', shape=x.shape, dtype='float32') + scale_attr = pdpd.ParamAttr(name="scale2", initializer=pdpd.nn.initializer.Assign(scale)) + bias_attr = pdpd.ParamAttr(name="bias2", initializer=pdpd.nn.initializer.Assign(bias)) + + out = pdpd.static.nn.batch_norm(node_x, epsilon=1e-5, + param_attr=scale_attr, + bias_attr=bias_attr, + moving_mean_name="bn_mean2", + moving_variance_name="bn_variance2", + use_global_stats=True, + data_layout=data_layout) + + cpu = pdpd.static.cpu_places(1) + exe = pdpd.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(pdpd.static.default_startup_program()) + pdpd.static.global_scope().var("bn_mean2").get_tensor().set(mean, pdpd.CPUPlace()) + pdpd.static.global_scope().var("bn_variance2").get_tensor().set(var, pdpd.CPUPlace()) + + outs = exe.run( + feed={'x': x}, + fetch_list=[out]) + + saveModel(name, exe, feedkeys=['x'], fetchlist=[out], inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + +def main(): + import paddle as pdpd + data = np.array([[[[-1, 0, 1]], [[2, 3, 4]]]]).astype(np.float32) + # data layout is NCHW + scale = np.array([1.0, 1.5]).astype(np.float32) + bias = np.array([0, 1]).astype(np.float32) + mean = np.array([0, 3]).astype(np.float32) + var = np.array([1, 1.5]).astype(np.float32) + batch_norm1("batch_norm_nchw", data, scale, bias, mean, var, "NCHW") + + # data layout is NHWC + scale = np.array([1.0, 1.5, 2.0]).astype(np.float32) + bias = np.array([0, 1, 2]).astype(np.float32) + mean = np.array([0.5, 1.5, 1.5]).astype(np.float32) + var = np.array([1, 1.5, 2]).astype(np.float32) + batch_norm2("batch_norm_nhwc", data, scale, bias, mean, var, "NHWC") + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/ngraph/test/files/paddlepaddle/gen_scripts/generate_clip.py b/ngraph/test/files/paddlepaddle/gen_scripts/generate_clip.py new file mode 100644 index 00000000000000..55edd6c62dd0d2 --- /dev/null +++ b/ngraph/test/files/paddlepaddle/gen_scripts/generate_clip.py @@ -0,0 +1,39 @@ +# +# clip paddle model generator +# +import numpy as np +from save_model import saveModel +import sys + +def clip(name: str, x, min, max): + import paddle as pdpd + pdpd.enable_static() + + with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): + node_x = pdpd.static.data(name='x', shape=x.shape, dtype='float32') + out = pdpd.fluid.layers.clip(node_x, min=min, max=max) + + cpu = pdpd.static.cpu_places(1) + exe = pdpd.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(pdpd.static.default_startup_program()) + + outs = exe.run( + feed={'x': x}, + fetch_list=[out]) + + saveModel(name, exe, feedkeys=['x'], fetchlist=[out], inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + + +def main(): + data = np.random.random([2, 3, 4]).astype('float32') + min = 0 + max = 0.8 + + clip("clip", data, min, max) + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/ngraph/test/files/paddlepaddle/gen_scripts/generate_conv2d.py b/ngraph/test/files/paddlepaddle/gen_scripts/generate_conv2d.py new file mode 100644 index 00000000000000..b3dc2c4aab294e --- /dev/null +++ b/ngraph/test/files/paddlepaddle/gen_scripts/generate_conv2d.py @@ -0,0 +1,22 @@ +import paddle +from paddle import fluid +import numpy as np +import os +import sys + + +paddle.enable_static() + +inp_blob = np.random.randn(1, 3, 4, 4).astype(np.float32) + +x = fluid.data(name='x', shape=[1, 3, 4, 4], dtype='float32') +test_layer = fluid.layers.conv2d(input=x, num_filters=5, filter_size=(1, 1), stride=(1, 1), padding=(1, 1), + dilation=(1, 1), groups=1, bias_attr=False) + +exe = fluid.Executor(fluid.CPUPlace()) +exe.run(fluid.default_startup_program()) +inp_dict = {'x': inp_blob} +var = [test_layer] +res_pdpd = exe.run(fluid.default_main_program(), fetch_list=var, feed=inp_dict) + +fluid.io.save_inference_model(os.path.join(sys.argv[1], "conv2d"), list(inp_dict.keys()), var, exe) diff --git a/ngraph/test/files/paddlepaddle/gen_scripts/generate_conv2d_combinations.py b/ngraph/test/files/paddlepaddle/gen_scripts/generate_conv2d_combinations.py new file mode 100644 index 00000000000000..7d0895f2abc6ed --- /dev/null +++ b/ngraph/test/files/paddlepaddle/gen_scripts/generate_conv2d_combinations.py @@ -0,0 +1,145 @@ +from save_model import saveModel +import numpy as np +import paddle as pdpd +import sys +pdpd.enable_static() + + +def run_and_save_model(input_x, name, feed, fetch_list, main_prog, start_prog): + cpu = pdpd.static.cpu_places(1) + exe = pdpd.static.Executor(cpu[0]) + exe.run(start_prog) + outs = exe.run( + feed={'x': input_x}, + fetch_list=fetch_list, + program=main_prog) + + with pdpd.static.program_guard(main_prog, start_prog): + saveModel(name, exe, feedkeys=['x'], fetchlist=fetch_list, inputs=[input_x], + outputs=[outs[0]], target_dir=sys.argv[1]) + + +def pdpd_conv2d(input_x, name, input_shape, kernel, dilation, padding, stride, groups=1, use_cudnn=True): + main_program = pdpd.static.Program() + startup_program = pdpd.static.Program() + with pdpd.static.program_guard(main_program, startup_program): + data = pdpd.static.data(name='x', shape=input_shape, dtype='float32') + weight_attr = pdpd.ParamAttr(name="conv2d_weight", initializer=pdpd.nn.initializer.Assign(kernel)) + conv2d = pdpd.static.nn.conv2d(input=data, num_filters=kernel.shape[0], filter_size=kernel.shape[2:4], + padding=padding, param_attr=weight_attr, dilation=dilation, stride=stride, groups=groups, use_cudnn=use_cudnn) + run_and_save_model(input_x, name, data, conv2d, main_program, startup_program) + + +if __name__ == "__main__": + + test_cases =[ + { + "input_x": np.array([[[[0., 1., 2., 3., 4.], # (1, 1, 7, 5) input tensor + [5., 6., 7., 8., 9.], + [10., 11., 12., 13., 14.], + [15., 16., 17., 18., 19.], + [20., 21., 22., 23., 24.], + [25., 26., 27., 28., 29.], + [30., 31., 32., 33., 34.,]]]]).astype(np.float32), + "name": "conv2d_SAME_padding", + "input_shape": [1, 1, 7, 5], + "kernel": np.array([[[[1., 1., 1.],[1., 1., 1.],[1., 1., 1.]]]]).astype(np.float32), + "dilation": 1, + "padding": "SAME", + "stride" : 2, + }, + { + "input_x": np.array([[[[0., 1., 2., 3., 4.], # (1, 1, 7, 5) input tensor + [5., 6., 7., 8., 9.], + [10., 11., 12., 13., 14.], + [15., 16., 17., 18., 19.], + [20., 21., 22., 23., 24.], + [25., 26., 27., 28., 29.], + [30., 31., 32., 33., 34.,]]]]).astype(np.float32), + "name": "conv2d_VALID_padding", + "input_shape": [1, 1, 7, 5], + "kernel": np.array([[[[1., 1., 1.],[1., 1., 1.],[1., 1., 1.]]]]).astype(np.float32), + "dilation": 1, + "padding": "VALID", + "stride" : 2, + }, + { + "input_x": np.array([[[[0., 1., 2., 3., 4.], # (1, 1, 7, 5) input tensor + [5., 6., 7., 8., 9.], + [10., 11., 12., 13., 14.], + [15., 16., 17., 18., 19.], + [20., 21., 22., 23., 24.], + [25., 26., 27., 28., 29.], + [30., 31., 32., 33., 34.,]]]]).astype(np.float32), + "name": "conv2d_strides_padding", + "input_shape": [1, 1, 7, 5], + "kernel": np.array([[[[1., 1., 1.],[1., 1., 1.],[1., 1., 1.]]]]).astype(np.float32), + "dilation": 1, + "padding": 1, + "stride" : 2, + }, + { "input_x": np.array([[[[0., 1., 2., 3., 4.], # (1, 1, 7, 5) input tensor + [5., 6., 7., 8., 9.], + [10., 11., 12., 13., 14.], + [15., 16., 17., 18., 19.], + [20., 21., 22., 23., 24.], + [25., 26., 27., 28., 29.], + [30., 31., 32., 33., 34.,]]]]).astype(np.float32), + "name": "conv2d_strides_no_padding", + "input_shape": [1, 1, 7, 5], + "kernel": np.array([[[[1., 1., 1.],[1., 1., 1.],[1., 1., 1.]]]]).astype(np.float32), + "dilation": 1, + "padding": 0, + "stride" : 2, + }, + { "input_x": np.array([[[[0., 1., 2., 3., 4.], # (1, 1, 7, 5) input tensor + [5., 6., 7., 8., 9.], + [10., 11., 12., 13., 14.], + [15., 16., 17., 18., 19.], + [20., 21., 22., 23., 24.], + [25., 26., 27., 28., 29.], + [30., 31., 32., 33., 34.,]]]]).astype(np.float32), + "name": "conv2d_strides_assymetric_padding", + "input_shape": [1, 1, 7, 5], + "kernel": np.array([[[[1., 1., 1.],[1., 1., 1.],[1., 1., 1.]]]]).astype(np.float32), + "dilation": 1, + "padding": [1,1,0,1], + "stride" : 2, + }, + { + "input_x": np.array([[[[0., 1., 2., 3., 4.], # (1, 1, 7, 5) input tensor + [5., 6., 7., 8., 9.], + [10., 11., 12., 13., 14.], + [15., 16., 17., 18., 19.], + [20., 21., 22., 23., 24.], + [25., 26., 27., 28., 29.], + [30., 31., 32., 33., 34.,]]]]).astype(np.float32), + "name": "conv2d_dilation_assymetric_pads_strides", + "input_shape": [1, 1, 7, 5], + "kernel": np.array([[[[1., 1., 1.],[1., 1., 1.],[1., 1., 1.]]]]).astype(np.float32), + "dilation": 1, + "padding": [1, 1, 1, 2], + "stride" : [3, 1], + }, + { + "input_x": np.arange(27).astype(np.float32).reshape([1, 3, 3, 3]), + "name": "depthwise_conv2d_convolution", + "input_shape": [1, 3, 3, 3], + "kernel": np.ones([3, 1, 3, 3]).astype(np.float32), + "dilation": 1, + "padding": 1, + "stride": 1, + "groups": 3, + "use_cudnn": True + } + ] + for test in test_cases: + + pdpd_conv2d(test['input_x'], test['name'], test["input_shape"], + test['kernel'], test['dilation'], + test['padding'], + test['stride'], + 1 if "groups" not in test else test['groups'], + True if "use_cudnn" not in test else test['use_cudnn']) + + diff --git a/ngraph/test/files/paddlepaddle/gen_scripts/generate_conv2d_relu.py b/ngraph/test/files/paddlepaddle/gen_scripts/generate_conv2d_relu.py new file mode 100644 index 00000000000000..28e818d5d6f827 --- /dev/null +++ b/ngraph/test/files/paddlepaddle/gen_scripts/generate_conv2d_relu.py @@ -0,0 +1,25 @@ +import paddle +from paddle import fluid +import numpy as np +import os +import sys + + +paddle.enable_static() + +inp_blob = np.random.randn(1, 3, 4, 4).astype(np.float32) + +x = fluid.data(name='xxx', shape=[1, 3, 4, 4], dtype='float32') +test_layer = fluid.layers.conv2d(input=x, num_filters=5, filter_size=(1, 1), stride=(1, 1), padding=(1, 1), + dilation=(1, 1), groups=1, bias_attr=False) + +relu = fluid.layers.relu(test_layer) + +exe = fluid.Executor(fluid.CPUPlace()) +exe.run(fluid.default_startup_program()) +inp_dict = {'xxx': inp_blob} +var = [relu] +res_pdpd = exe.run(fluid.default_main_program(), fetch_list=var, feed=inp_dict) + +fluid.io.save_inference_model(os.path.join(sys.argv[1], "conv2d_relu"), list(inp_dict.keys()), var, exe, + model_filename="conv2d_relu.pdmodel", params_filename="conv2d_relu.pdiparams") diff --git a/ngraph/test/files/paddlepaddle/gen_scripts/generate_conv2d_s.py b/ngraph/test/files/paddlepaddle/gen_scripts/generate_conv2d_s.py new file mode 100644 index 00000000000000..fae73f3ee84d60 --- /dev/null +++ b/ngraph/test/files/paddlepaddle/gen_scripts/generate_conv2d_s.py @@ -0,0 +1,22 @@ +import paddle +from paddle import fluid +import numpy as np +import os +import sys + +paddle.enable_static() + +inp_blob = np.random.randn(1, 3, 4, 4).astype(np.float32) + +x = fluid.data(name='x', shape=[1, 3, 4, 4], dtype='float32') +test_layer = fluid.layers.conv2d(input=x, num_filters=5, filter_size=(1, 1), stride=(1, 1), padding=(1, 1), + dilation=(1, 1), groups=1, bias_attr=False) + +exe = fluid.Executor(fluid.CPUPlace()) +exe.run(fluid.default_startup_program()) +inp_dict = {'x': inp_blob} +var = [test_layer] +res_pdpd = exe.run(fluid.default_main_program(), fetch_list=var, feed=inp_dict) + +fluid.io.save_inference_model(os.path.join(sys.argv[1], "conv2d_s"), list(inp_dict.keys()), var, exe, + model_filename="conv2d.pdmodel", params_filename="conv2d.pdiparams") diff --git a/ngraph/test/files/paddlepaddle/gen_scripts/generate_conv2d_transpose.py b/ngraph/test/files/paddlepaddle/gen_scripts/generate_conv2d_transpose.py new file mode 100644 index 00000000000000..e17f85249e3e9e --- /dev/null +++ b/ngraph/test/files/paddlepaddle/gen_scripts/generate_conv2d_transpose.py @@ -0,0 +1,148 @@ +import numpy as np +import paddle as pdpd +pdpd.enable_static() +from save_model import saveModel +import sys + + +def run_and_save_model(input_x, name, feed, fetch_list, main_prog, start_prog): + cpu = pdpd.static.cpu_places(1) + exe = pdpd.static.Executor(cpu[0]) + exe.run(start_prog) + outs = exe.run( + feed={'x': input_x}, + fetch_list=fetch_list, + program=main_prog) + + print(outs[0].shape) + + with pdpd.static.program_guard(main_prog, start_prog): + saveModel(name, exe, feedkeys=['x'], fetchlist=fetch_list, inputs=[input_x], + outputs=[outs[0]], target_dir=sys.argv[1]) + + + +def pdpd_conv2d_transpose(input_x, name, input_shape, kernel, dilation, padding, stride, groups=1, use_cudnn=True): + main_program = pdpd.static.Program() + startup_program = pdpd.static.Program() + with pdpd.static.program_guard(main_program, startup_program): + data = pdpd.static.data(name='x', shape=input_shape, dtype='float32') + weight_attr = pdpd.ParamAttr(name="conv2d_weight", initializer=pdpd.nn.initializer.Assign(kernel)) + conv2d = pdpd.static.nn.conv2d_transpose(input=data, num_filters=kernel.shape[0], filter_size=kernel.shape[2:4], + padding=padding, param_attr=weight_attr, dilation=dilation, stride=stride, groups=groups, use_cudnn=use_cudnn) + run_and_save_model(input_x, name, data, conv2d, main_program, startup_program) + + +if __name__ == "__main__": + + test_cases =[ + { + "input_x": np.array([[[[0., 1., 2., 3., 4.], # (1, 1, 7, 5) input tensor + [5., 6., 7., 8., 9.], + [10., 11., 12., 13., 14.], + [15., 16., 17., 18., 19.], + [20., 21., 22., 23., 24.], + [25., 26., 27., 28., 29.], + [30., 31., 32., 33., 34.,]]]]).astype(np.float32), + "name": "conv2d_transpose_SAME_padding", + "input_shape": [1, 1, 7, 5], + "kernel": np.array([[[[1., 1., 1.],[1., 1., 1.],[1., 1., 1.]]]]).astype(np.float32), + "dilation": 1, + "padding": "SAME", + "stride" : 2, + }, + { + "input_x": np.array([[[[0., 1., 2., 3., 4.], # (1, 1, 7, 5) input tensor + [5., 6., 7., 8., 9.], + [10., 11., 12., 13., 14.], + [15., 16., 17., 18., 19.], + [20., 21., 22., 23., 24.], + [25., 26., 27., 28., 29.], + [30., 31., 32., 33., 34.,]]]]).astype(np.float32), + "name": "conv2d_transpose_VALID_padding", + "input_shape": [1, 1, 7, 5], + "kernel": np.array([[[[1., 1., 1.],[1., 1., 1.],[1., 1., 1.]]]]).astype(np.float32), + "dilation": 1, + "padding": "VALID", + "stride" : 2, + }, + { + "input_x": np.array([[[[0., 1., 2., 3., 4.], # (1, 1, 7, 5) input tensor + [5., 6., 7., 8., 9.], + [10., 11., 12., 13., 14.], + [15., 16., 17., 18., 19.], + [20., 21., 22., 23., 24.], + [25., 26., 27., 28., 29.], + [30., 31., 32., 33., 34.,]]]]).astype(np.float32), + "name": "conv2d_transpose_strides_padding", + "input_shape": [1, 1, 7, 5], + "kernel": np.array([[[[1., 1., 1.],[1., 1., 1.],[1., 1., 1.]]]]).astype(np.float32), + "dilation": 1, + "padding": 1, + "stride" : 2, + }, + { "input_x": np.array([[[[0., 1., 2., 3., 4.], # (1, 1, 7, 5) input tensor + [5., 6., 7., 8., 9.], + [10., 11., 12., 13., 14.], + [15., 16., 17., 18., 19.], + [20., 21., 22., 23., 24.], + [25., 26., 27., 28., 29.], + [30., 31., 32., 33., 34.,]]]]).astype(np.float32), + "name": "conv2d_transpose_strides_no_padding", + "input_shape": [1, 1, 7, 5], + "kernel": np.array([[[[1., 1., 1.],[1., 1., 1.],[1., 1., 1.]]]]).astype(np.float32), + "dilation": 1, + "padding": 0, + "stride" : 2, + }, + { "input_x": np.array([[[[0., 1., 2., 3., 4.], # (1, 1, 7, 5) input tensor + [5., 6., 7., 8., 9.], + [10., 11., 12., 13., 14.], + [15., 16., 17., 18., 19.], + [20., 21., 22., 23., 24.], + [25., 26., 27., 28., 29.], + [30., 31., 32., 33., 34.,]]]]).astype(np.float32), + "name": "conv2d_transpose_strides_assymetric_padding", + "input_shape": [1, 1, 7, 5], + "kernel": np.array([[[[1., 1., 1.],[1., 1., 1.],[1., 1., 1.]]]]).astype(np.float32), + "dilation": 1, + "padding": [1,1,0,1], + "stride" : 2, + }, + { + "input_x": np.array([[[[0., 1., 2., 3., 4.], # (1, 1, 7, 5) input tensor + [5., 6., 7., 8., 9.], + [10., 11., 12., 13., 14.], + [15., 16., 17., 18., 19.], + [20., 21., 22., 23., 24.], + [25., 26., 27., 28., 29.], + [30., 31., 32., 33., 34.,]]]]).astype(np.float32), + "name": "conv2d_transpose_dilation_assymetric_pads_strides", + "input_shape": [1, 1, 7, 5], + "kernel": np.array([[[[1., 1., 1.],[1., 1., 1.],[1., 1., 1.]]]]).astype(np.float32), + "dilation": 1, + "padding": [1, 1, 1, 2], + "stride" : [3, 1], + }, + { + "input_x": np.arange(27).astype(np.float32).reshape([1, 3, 3, 3]), + "name": "depthwise_conv2d_transpose_convolution", + "input_shape": [1, 3, 3, 3], + "kernel": np.ones([3, 1, 3, 3]).astype(np.float32), + "dilation": 1, + "padding": 1, + "stride": 1, + "groups": 3, + "use_cudnn": True + } + ] + for test in test_cases: + + pdpd_conv2d_transpose(test['input_x'], test['name'], test["input_shape"], + test['kernel'], test['dilation'], + test['padding'], + test['stride'], + 1 if "groups" not in test else test['groups'], + True if "use_cudnn" not in test else test['use_cudnn']) + + diff --git a/ngraph/test/files/paddlepaddle/gen_scripts/generate_dropout.py b/ngraph/test/files/paddlepaddle/gen_scripts/generate_dropout.py new file mode 100644 index 00000000000000..6f40afdb1f2938 --- /dev/null +++ b/ngraph/test/files/paddlepaddle/gen_scripts/generate_dropout.py @@ -0,0 +1,47 @@ +# +# pool2d paddle model generator +# +import numpy as np +from save_model import saveModel +import sys + + +def pdpd_dropout(name : str, x, p, pdpd_attrs): + import paddle as pdpd + pdpd.enable_static() + + with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): + node_x = pdpd.static.data(name='x', shape=x.shape, dtype='float32') + out = pdpd.nn.functional.dropout(x=node_x, p=p, training=pdpd_attrs['training'], mode=pdpd_attrs['mode']) + + cpu = pdpd.static.cpu_places(1) + exe = pdpd.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(pdpd.static.default_startup_program()) + + outs = exe.run( + feed={'x': x}, + fetch_list=[out]) + + saveModel(name, exe, feedkeys=['x'], fetchlist=[out], inputs=[x], + outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + + +def main(): + p=0.5 + data = np.random.random(size=(3, 10, 3, 7)).astype('float32') + pdpd_attrs = { + 'training' : False, + 'mode' : "downscale_in_infer" + } + pdpd_attrs2 = { + 'training' : False, + 'mode' : "upscale_in_train" + } + pdpd_dropout("dropout", data, p, pdpd_attrs) + pdpd_dropout("dropout_upscale_in_train", data, p, pdpd_attrs2) + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/ngraph/test/files/paddlepaddle/gen_scripts/generate_expand_v2.py b/ngraph/test/files/paddlepaddle/gen_scripts/generate_expand_v2.py new file mode 100644 index 00000000000000..df6121d62b42b2 --- /dev/null +++ b/ngraph/test/files/paddlepaddle/gen_scripts/generate_expand_v2.py @@ -0,0 +1,63 @@ +# +# expand_v2 paddle model generator +# +import numpy as np +from save_model import saveModel +import paddle as pdpd +import sys + +data_type = 'float32' + + +def expand_v2(name:str, x, shape:list): + pdpd.enable_static() + + with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): + node_x = pdpd.static.data(name='x', shape=x.shape, dtype=data_type) + out = pdpd.expand(node_x, shape=shape, name='expand_v2') + + cpu = pdpd.static.cpu_places(1) + exe = pdpd.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(pdpd.static.default_startup_program()) + + outs = exe.run( + feed={'x': x}, + fetch_list=[out]) + + saveModel(name, exe, feedkeys=['x'], fetchlist=[out], inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + + +def expand_v2_tensor(name:str, x, shape): + pdpd.enable_static() + + with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): + node_x = pdpd.static.data(name='x', shape=x.shape, dtype=data_type) + node_shape = pdpd.assign(shape, output=None) + out = pdpd.expand(node_x, shape=node_shape, name='expand_v2') + + cpu = pdpd.static.cpu_places(1) + exe = pdpd.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(pdpd.static.default_startup_program()) + + outs = exe.run( + feed={'x': x}, + fetch_list=[out]) + + saveModel(name, exe, feedkeys=['x'], fetchlist=[out], inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + + +def main(): + data = np.random.rand(1, 1, 6).astype(data_type) + + expand_v2("expand_v2", data, [2, 3, 6]) + expand_v2_tensor("expand_v2_tensor", data, np.array([2, 3, 6]).astype('int32')) + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/ngraph/test/files/paddlepaddle/gen_scripts/generate_fill_constant.py b/ngraph/test/files/paddlepaddle/gen_scripts/generate_fill_constant.py new file mode 100644 index 00000000000000..a83403dd3d885d --- /dev/null +++ b/ngraph/test/files/paddlepaddle/gen_scripts/generate_fill_constant.py @@ -0,0 +1,37 @@ +# +# fill_const paddle model generator +# +import numpy as np +from save_model import saveModel +import paddle as pdpd +import sys + + +def fill_constant(name : str, shape : list, dtype, value): + pdpd.enable_static() + + with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): + x1 = pdpd.fluid.layers.fill_constant(shape=shape, value=value, dtype=dtype, name='fill_constant') + x2 = pdpd.fluid.layers.fill_constant(shape=shape, value=value, dtype=dtype, name='fill_constant') + out = pdpd.add(pdpd.cast(x1, np.float32), pdpd.cast(x2, np.float32)) + cpu = pdpd.static.cpu_places(1) + exe = pdpd.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(pdpd.static.default_startup_program()) + + outs = exe.run( + fetch_list=[out]) + + saveModel(name, exe, feedkeys=[], fetchlist=[out], inputs=[], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + + +def main(): + fill_constant("fill_constant", [2, 3, 4], 'float32', 0.03) + fill_constant("fill_constant_int32", [2, 3, 4], "int32", 2) + fill_constant("fill_constant_int64", [2, 3, 4], "int64", 4) + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/ngraph/test/files/paddlepaddle/gen_scripts/generate_fill_constant_batch_size_like.py b/ngraph/test/files/paddlepaddle/gen_scripts/generate_fill_constant_batch_size_like.py new file mode 100644 index 00000000000000..25bde96ad594b2 --- /dev/null +++ b/ngraph/test/files/paddlepaddle/gen_scripts/generate_fill_constant_batch_size_like.py @@ -0,0 +1,39 @@ +# +# fill_constant_batch_size_like paddle model generator +# +import numpy as np +from save_model import saveModel +import paddle as pdpd +import sys + +data_type = 'float32' + +def fill_constant_batch_size_like(name : str, x, shape, dtype, value, input_dim_idx=0, output_dim_idx=0): + pdpd.enable_static() + + with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): + like = pdpd.static.data(name='x', shape=x.shape, dtype = data_type) + out = pdpd.fluid.layers.fill_constant_batch_size_like(input=like, shape=shape, \ + value=value, dtype=dtype, \ + output_dim_idx=output_dim_idx, input_dim_idx=input_dim_idx) + + cpu = pdpd.static.cpu_places(1) + exe = pdpd.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(pdpd.static.default_startup_program()) + + outs = exe.run( + feed={'x': x}, + fetch_list=[out]) + + saveModel(name, exe, feedkeys=['x'], fetchlist=[out], inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + +def main(): + x = np.random.rand(4, 3, 2).astype(data_type) + fill_constant_batch_size_like("fill_constant_batch_size_like", \ + x, [1, -1, 3], data_type, 0.03, 2, 1) + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/ngraph/test/files/paddlepaddle/gen_scripts/generate_flatten_contiguous_range.py b/ngraph/test/files/paddlepaddle/gen_scripts/generate_flatten_contiguous_range.py new file mode 100644 index 00000000000000..5d6274587f5586 --- /dev/null +++ b/ngraph/test/files/paddlepaddle/gen_scripts/generate_flatten_contiguous_range.py @@ -0,0 +1,38 @@ +# +# generate_flatten_contiguous_range paddle model generator +# +import numpy as np +from save_model import saveModel +import sys + +def generate_flatten_contiguous_range(name : str, x, start_axis, stop_axis, in_dtype): + import paddle as pdpd + pdpd.enable_static() + + with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): + node_x = pdpd.static.data(name = 'x', shape = x.shape, dtype = in_dtype) + out = pdpd.flatten(node_x, start_axis, stop_axis) + + cpu = pdpd.static.cpu_places(1) + exe = pdpd.static.Executor(cpu[0]) + + # startup program will call initializer to initialize the parameters. + exe.run(pdpd.static.default_startup_program()) + outs = exe.run( + feed={'x': x}, + fetch_list=[out]) + saveModel(name, exe, feedkeys=['x'], fetchlist=[out], inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + + +def main(): + # TODO: more type + in_dtype = 'float32' + data = np.random.randn(3, 2, 5, 4).astype(in_dtype) + start_axis = 1 + stop_axis = 2 + generate_flatten_contiguous_range("flatten_contiguous_range_test1", data, start_axis, stop_axis, in_dtype) + +if __name__ == "__main__": + main() diff --git a/ngraph/test/files/paddlepaddle/gen_scripts/generate_interpolate.py b/ngraph/test/files/paddlepaddle/gen_scripts/generate_interpolate.py new file mode 100644 index 00000000000000..290baa18e50248 --- /dev/null +++ b/ngraph/test/files/paddlepaddle/gen_scripts/generate_interpolate.py @@ -0,0 +1,197 @@ +import numpy as np +import paddle as pdpd +from paddle.nn.functional import interpolate +from save_model import saveModel +import sys +pdpd.enable_static() + + +def run_and_save_model(input_x, name, feed, fetch_list, main_prog, start_prog): + cpu = pdpd.static.cpu_places(1) + exe = pdpd.static.Executor(cpu[0]) + exe.run(start_prog) + outs = exe.run( + feed={'x': input_x}, + fetch_list=fetch_list, + program=main_prog) + + with pdpd.static.program_guard(main_prog, start_prog): + saveModel(name, exe, feedkeys=['x'], fetchlist=fetch_list, inputs=[input_x], + outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs + + +def pdpd_interpolate(x, sizes=None, scale_factor=None, mode='nearest', align_corners=True, + align_mode=0, data_format='NCHW', name=None): + pdpd.enable_static() + main_program = pdpd.static.Program() + startup_program = pdpd.static.Program() + with pdpd.static.program_guard(main_program, startup_program): + node_x = pdpd.static.data(name='x', shape=x.shape, dtype='float32') + interp = interpolate(node_x, size=sizes, scale_factor=scale_factor, + mode=mode, align_corners=align_corners, align_mode=align_mode, + data_format=data_format, name=name) + out = pdpd.static.nn.batch_norm(interp, use_global_stats=True, epsilon=0) + outs = run_and_save_model(x, name, node_x, out, main_program, startup_program) + return outs[0] + + +def resize_upsample_bilinear(): + data = np.array([[[ + [1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12], + [13, 14, 15, 16] + ]]], dtype=np.float32) + + test_case = [{'name': 'bilinear_upsample_false_1', 'align_corners': False, 'align_mode': 1}, + {'name': 'bilinear_upsample_false_0', 'align_corners': False, 'align_mode': 0}, + {'name': 'bilinear_upsample_true_0', 'align_corners': True, 'align_mode': 0}] + + for test in test_case: + pdpd_result = pdpd_interpolate(data, [8, 8], None, mode='bilinear', align_corners=test['align_corners'], + align_mode=test['align_mode'], data_format='NCHW', name=test['name']) + print(test['name']) + print(pdpd_result) + + +def resize_downsample_bilinear(): + data = np.array([[[ + [1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12], + [13, 14, 15, 16] + ]]], dtype=np.float32) + data_28 = data.reshape([1, 1, 2, 8]) + test_case = [{'name': 'bilinear_downsample_false_1', 'align_corners': False, 'align_mode': 1}, + {'name': 'bilinear_downsample_false_0', 'align_corners': False, 'align_mode': 0}, + {'name': 'bilinear_downsample_true_0', 'align_corners': True, 'align_mode': 0}] + + for test in test_case: + pdpd_result = pdpd_interpolate(data_28, [2, 4], None, mode='bilinear', align_corners=test['align_corners'], + align_mode=test['align_mode'], data_format='NCHW', name=test['name']) + print(test['name']) + print(pdpd_result) + + +def resize_upsample_nearest(): + data = np.array([[[ + [1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12], + [13, 14, 15, 16] + ]]], dtype=np.float32) + + test_case = [{'name': 'nearest_upsample_false_0', 'align_corners': False, 'align_mode': 0}] + + for test in test_case: + pdpd_result = pdpd_interpolate(data, [8, 8], None, mode='nearest', align_corners=test['align_corners'], + align_mode=test['align_mode'], data_format='NCHW', name=test['name']) + print(test['name']) + print(pdpd_result) + + +def resize_downsample_nearest(): + data = np.array([[[ + [1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12], + [13, 14, 15, 16] + ]]], dtype=np.float32) + data_28 = data.reshape([1, 1, 2, 8]) + test_case = [{'name': 'nearest_downsample_false_0', 'align_corners': False, 'align_mode': 1}] + + for test in test_case: + pdpd_result = pdpd_interpolate(data_28, [2, 4], None, mode='nearest', align_corners=test['align_corners'], + align_mode=test['align_mode'], data_format='NCHW', name=test['name']) + print(test['name']) + print(pdpd_result) + +def nearest_upsample_tensor_size(): + data = np.array([[[ + [1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12], + [13, 14, 15, 16] + ]]], dtype=np.float32) + sizes = np.array([8, 8], dtype=np.int32) + pdpd.enable_static() + test_case = [{'name': 'nearest_upsample_tensor_size', 'align_corners': False, 'align_mode': 0}] + for test in test_case: + main_program = pdpd.static.Program() + startup_program = pdpd.static.Program() + with pdpd.static.program_guard(main_program, startup_program): + node_x = pdpd.static.data(name='x', shape=data.shape, dtype='float32') + node_sizes = pdpd.static.data(name='sizes', shape=sizes.shape, dtype='int32') + interp = interpolate(node_x, size=node_sizes, scale_factor=None, + mode='nearest', align_corners=test['align_corners'], align_mode=test['align_mode'], + data_format='NCHW', name=test['name']) + out = pdpd.static.nn.batch_norm(interp, use_global_stats=True, epsilon=0) + cpu = pdpd.static.cpu_places(1) + exe = pdpd.static.Executor(cpu[0]) + exe.run(startup_program) + outs = exe.run( + feed={'x': data, 'sizes': sizes}, + fetch_list=out, + program=main_program) + saveModel(test['name'], exe, feedkeys=['x', 'sizes'], fetchlist=out, inputs=[data, sizes], outputs=[outs[0]], target_dir=sys.argv[1]) + + +def bilinear_upsample_tensor_size(): + data = np.array([[[ + [1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12], + [13, 14, 15, 16] + ]]], dtype=np.float32) + sizes = np.array([8, 8], dtype="int32") + + test_case = [{'name': 'bilinear_upsample_tensor_size', 'align_corners': False, 'align_mode': 1}] + + for test in test_case: + main_program = pdpd.static.Program() + startup_program = pdpd.static.Program() + with pdpd.static.program_guard(main_program, startup_program): + node_x = pdpd.static.data(name='x', shape=data.shape, dtype='float32') + node_sizes = pdpd.static.data(name='sizes', shape=sizes.shape, dtype='int32') + interp = interpolate(node_x, size=node_sizes, scale_factor=None, + mode='bilinear', align_corners=test['align_corners'], align_mode=test['align_mode'], + data_format='NCHW', name=test['name']) + out = pdpd.static.nn.batch_norm(interp, use_global_stats=True, epsilon=0) + cpu = pdpd.static.cpu_places(1) + exe = pdpd.static.Executor(cpu[0]) + exe.run(startup_program) + outs = exe.run( + feed={'x': data, 'sizes': sizes}, + fetch_list=out, + program=main_program) + saveModel(test['name'], exe, feedkeys=['x', 'sizes'], fetchlist=out, inputs=[data, sizes], outputs=[outs[0]], target_dir=sys.argv[1]) + + +def bilinear_upsample_scales(): + data = np.array([[[ + [1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12], + [13, 14, 15, 16] + ]]], dtype=np.float32) + + test_case = [{'name': 'bilinear_upsample_scales', 'align_corners': False, 'align_mode': 1, "scales": 2}, + {'name': 'bilinear_upsample_scales2', 'align_corners': False, 'align_mode': 1, "scales": [2, 2]}] + + for test in test_case: + pdpd_result = pdpd_interpolate(data, None, 2, mode='bilinear', align_corners=test['align_corners'], + align_mode=test['align_mode'], data_format='NCHW', name=test['name']) + print(test['name']) + print(pdpd_result) + + +if __name__ == "__main__": + resize_downsample_bilinear() + resize_upsample_bilinear() + resize_downsample_nearest() + resize_upsample_nearest() + nearest_upsample_tensor_size() + bilinear_upsample_tensor_size() + bilinear_upsample_scales() diff --git a/ngraph/test/files/paddlepaddle/gen_scripts/generate_multi_tensor_split.py b/ngraph/test/files/paddlepaddle/gen_scripts/generate_multi_tensor_split.py new file mode 100644 index 00000000000000..1a6764941ce26a --- /dev/null +++ b/ngraph/test/files/paddlepaddle/gen_scripts/generate_multi_tensor_split.py @@ -0,0 +1,53 @@ +import paddle +from paddle import fluid +import numpy as np +import sys +import os + +# it's better to use PYTHON_PATH +# import sys +# sys.path.append('/home/itikhonov/OpenVINO/openvino/bin/intel64/Debug/lib/python_api/python3.6/') +# from openvino.inference_engine import IECore + +def create_multi_output_model(): + paddle.enable_static() + + # PDPD model creation and inference + num_splits = 20 + inp_blob_1 = np.random.randn(2, num_splits, 4, 4).astype(np.float32) + + x = fluid.data(name='x', shape=[2, num_splits, 4, 4], dtype='float32') + test_layer = fluid.layers.split(x, num_or_sections=num_splits, dim=1) + + var = [] + for i in range(num_splits//2): + add = fluid.layers.elementwise_add(test_layer[2*i], test_layer[2*i+1]) + var.append(add) + + exe = fluid.Executor(fluid.CPUPlace()) + exe.run(fluid.default_startup_program()) + inp_dict = {'x': inp_blob_1} + res_pdpd = exe.run(fluid.default_main_program(), fetch_list=var, feed=inp_dict) + + fluid.io.save_inference_model(os.path.join(sys.argv[1], "multi_tensor_split"), + list(inp_dict.keys()), var, exe, + model_filename="multi_tensor_split.pdmodel", + params_filename="multi_tensor_split.pdiparams") + + # IE inference + # ie = IECore() + # path_to_ie_model = "../models/multi_tensor_split/multi_tensor_split" + # net = ie.read_network(model=path_to_ie_model + ".xml", weights=path_to_ie_model + ".bin") + # exec_net = ie.load_network(net, "CPU") + # res = exec_net.infer({'x': inp_blob_1}) + # + # # compare results: IE vs PDPD + # idx = 0 + # for key in res: + # comp = np.all(np.isclose(res_pdpd[idx], res[key], rtol=1e-05, atol=1e-08, equal_nan=False)) + # assert comp, "PDPD and IE results are different" + # idx = idx + 1 + + +create_multi_output_model() + diff --git a/ngraph/test/files/paddlepaddle/gen_scripts/generate_pad3d.py b/ngraph/test/files/paddlepaddle/gen_scripts/generate_pad3d.py new file mode 100644 index 00000000000000..51b3a81f0e9c91 --- /dev/null +++ b/ngraph/test/files/paddlepaddle/gen_scripts/generate_pad3d.py @@ -0,0 +1,70 @@ +# +# pad3d paddle model generator +# +import numpy as np +from save_model import saveModel +import sys + +def pad3d(name : str, x, in_dtype, pad, data_format, mode, value = 0): + import paddle as pdpd + pdpd.enable_static() + + with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): + node_x = pdpd.static.data(name = 'x', shape = x.shape, dtype = in_dtype) + + if mode == 'constant': + pad_constant = pdpd.nn.Pad3D(padding=pad, mode=mode, value=value, data_format=data_format) + out = pad_constant(node_x) + else: + pad_other_mode = pdpd.nn.Pad3D(padding=pad, mode=mode, data_format=data_format) + out = pad_other_mode(node_x) + + cpu = pdpd.static.cpu_places(1) + exe = pdpd.static.Executor(cpu[0]) + + # startup program will call initializer to initialize the parameters. + exe.run(pdpd.static.default_startup_program()) + outs = exe.run( + feed={'x': x}, + fetch_list=[out]) + saveModel(name, exe, feedkeys=['x'], fetchlist=[out], inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + + +def main(): + in_dtype = 'float32' + + input_shape = (1, 2, 3, 4, 5) + pad = [1, 2, 1, 1, 3, 4] + mode = 'constant' + data_format = 'NCDHW' + value = 100 + input_data = np.random.rand(*input_shape).astype(np.float32) + pad3d("pad3d_test1", input_data, in_dtype, pad, data_format, mode, value) + + input_shape = (2, 3, 4, 5, 6) + pad = [1, 2, 1, 1, 1, 2] + mode = "reflect" + data_format = 'NDHWC' + input_data = np.random.rand(*input_shape).astype(np.float32) + pad3d("pad3d_test2", input_data, in_dtype, pad, data_format, mode) + + input_shape = (2, 3, 4, 5, 6) + pad = [1, 2, 1, 1, 1, 2] + mode = "replicate" + data_format = 'NDHWC' + input_data = np.random.rand(*input_shape).astype(np.float32) + pad3d("pad3d_test3", input_data, in_dtype, pad, data_format, mode) + + # padding of type int feature only supported by PaddlePaddle 'develop' version(>=2.1.0) +# input_shape = (1, 2, 3, 4, 5) +# pad_int = 1 +# mode = 'constant' +# data_format= 'NCDHW' +# value = 100 +# input_data = np.random.rand(*input_shape).astype(np.float32) +# pad3d("pad3d_test4", input_data, in_dtype, pad_int, data_format, mode, value) + +if __name__ == "__main__": + main() diff --git a/ngraph/test/files/paddlepaddle/gen_scripts/generate_pool2d.py b/ngraph/test/files/paddlepaddle/gen_scripts/generate_pool2d.py new file mode 100644 index 00000000000000..1f6c32e242b49a --- /dev/null +++ b/ngraph/test/files/paddlepaddle/gen_scripts/generate_pool2d.py @@ -0,0 +1,260 @@ +# +# pool2d paddle model generator +# +import numpy as np +import sys +from save_model import saveModel + +data_type = 'float32' + +def pool2d(name : str, x, attrs : dict): + import paddle as pdpd + pdpd.enable_static() + + with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): + node_x = pdpd.static.data(name='x', shape=x.shape, dtype=data_type) + out = pdpd.fluid.layers.pool2d(node_x, + pool_size=attrs['pool_size'], + pool_type=attrs['pool_type'], + pool_stride=attrs['pool_stride'], + pool_padding=attrs['pool_padding'], + global_pooling=attrs['global_pooling'], + ceil_mode=attrs['ceil_mode'], + exclusive=attrs['exclusive'], + data_format=attrs['data_format']) + + cpu = pdpd.static.cpu_places(1) + exe = pdpd.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(pdpd.static.default_startup_program()) + + outs = exe.run( + feed={'x': x}, + fetch_list=[out]) + + saveModel(name, exe, feedkeys=['x'], fetchlist=[out], inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + +def adaptive_pool2d(name : str, x, attrs : dict): + import paddle as pdpd + pdpd.enable_static() + + with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): + node_x = pdpd.static.data(name='x', shape=x.shape, dtype=data_type) + out = pdpd.fluid.layers.adaptive_pool2d( + input=node_x, + pool_size=attrs['pool_size'], + pool_type=attrs['pool_type'], + require_index=attrs['require_index']) + + cpu = pdpd.static.cpu_places(1) + exe = pdpd.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(pdpd.static.default_startup_program()) + + outs = exe.run( + feed={'x': x}, + fetch_list=[out]) + + saveModel(name, exe, feedkeys=['x'], fetchlist=[out], inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + +def main(): + N, C, H, W = 2, 3, 4, 4 + data = np.arange(N*C*H*W).astype(data_type) + data_NCHW = data.reshape(N, C, H, W) + data_NHWC = data.reshape(N, H, W, C) + #print(data_NCHW, data_NCHW.shape) + + pooling_types = ['max', 'avg'] + + # pool2d + for i, pooling_type in enumerate(pooling_types): + # example 1: + # ceil_mode = False + pdpd_attrs = { + # input=data_NCHW, # shape: [2, 3, 8, 8] + 'pool_size' : [3,3], + 'pool_type' : pooling_type, + 'pool_stride' : [3,3], + 'pool_padding' : [2,1], # it is same as pool_padding = [2,2,1,1] + 'global_pooling' : False, + 'ceil_mode' : False, + 'exclusive' : True, + 'data_format' : "NCHW" + } + # shape of out_1: [2, 3, 4, 3] + pool2d(pooling_type+'Pool_test1', data_NCHW, pdpd_attrs) + + # Cecilia: there is a bug of PaddlePaddle in this case. + # example 2: + # ceil_mode = True (different from example 1) + pdpd_attrs = { + #input=data_NCHW, + 'pool_size':[3,3], + 'pool_type' : pooling_type, + 'pool_stride' : [3,3], + 'pool_padding':[[0,0], [0,0], [2,2], [1,1]], # it is same as pool_padding = [2,2,1,1] + 'global_pooling':False, + 'ceil_mode':True, + 'exclusive':True, + 'data_format':"NCHW" + } + # shape of out_2: [2, 3, 4, 4] which is different from out_1 + pool2d(pooling_type+'Pool_test2', data_NCHW, pdpd_attrs) + + # example 3: + # pool_padding = "SAME" (different from example 1) + pdpd_attrs = { + #input=data_NCHW, + 'pool_size':[3,3], + 'pool_type' : pooling_type, + 'pool_stride' : [3,3], + 'pool_padding':"SAME", + 'global_pooling':False, + 'ceil_mode':False, + 'exclusive':True, + 'data_format':"NCHW" + } + # shape of out_3: [2, 3, 3, 3] which is different from out_1 + pool2d(pooling_type+'Pool_test3', data_NCHW, pdpd_attrs) + + # example 4: + # pool_padding = "VALID" (different from example 1) + pdpd_attrs = { + #input=data_NCHW, + 'pool_size':[3,3], + 'pool_type' : pooling_type, + 'pool_stride' : [3,3], + 'pool_padding':"VALID", + 'global_pooling':False, + 'ceil_mode':False, + 'exclusive':True, + 'data_format':"NCHW" + } + # shape of out_4: [2, 3, 2, 2] which is different from out_1 + pool2d(pooling_type+'Pool_test4', data_NCHW, pdpd_attrs) + + # example 5: + # global_pooling = True (different from example 1) + # It will be set pool_size = [8,8] and pool_padding = [0,0] actually. + pdpd_attrs = { + #input=data_NCHW, + 'pool_size':[3,3], + 'pool_type' : pooling_type, + 'pool_stride' : [3,3], + 'pool_padding':[2,1], + 'global_pooling':True, + 'ceil_mode':False, + 'exclusive':True, + 'data_format':"NCHW" + } + # shape of out_5: [2, 3, 1, 1] which is different from out_1 + pool2d(pooling_type+'Pool_test5', data_NCHW, pdpd_attrs) + + # example 6: + # data_format = "NHWC" (different from example 1) + pdpd_attrs = { + #input=data_NHWC, # shape: [2, 8, 8, 3] + 'pool_size':[3,3], + 'pool_type' : pooling_type, + 'pool_stride' : [3,3], + 'pool_padding':[2,1], + 'global_pooling':False, + 'ceil_mode':False, + 'exclusive':True, + 'data_format':"NHWC" + } + # shape of out_6: [2, 4, 3, 3] which is different from out_1 + pool2d(pooling_type+'Pool_test6', data_NHWC, pdpd_attrs) + + # example 7: + # pool_size is [9, 9] + pdpd_attrs = { + #input=data_NCHW, + 'pool_size':[9,9], + 'pool_type' : pooling_type, + 'pool_stride' : [3,3], + 'pool_padding':[[0,0], [0,0], [2,2], [1,1]], # it is same as pool_padding = [2,2,1,1] + 'global_pooling':False, + 'ceil_mode':True, + 'exclusive':True, + 'data_format':"NCHW" + } + pool2d(pooling_type+'Pool_test7', data_NCHW, pdpd_attrs) + + # example 8: + # pool_padding size is 1 + pdpd_attrs = { + 'pool_size':[3,3], + 'pool_type' : pooling_type, + 'pool_stride' : [3,3], + 'pool_padding':2, + 'global_pooling':False, + 'ceil_mode':False, + 'exclusive':True, + 'data_format':"NCHW" + } + pool2d(pooling_type+'Pool_test8', data_NCHW, pdpd_attrs) + + #input data for test9 and test10 + N_data1, C_data1, H_data1, W_data1 = 2, 3, 8, 8 + data1 = np.arange(N_data1*C_data1*H_data1*W_data1).astype(data_type) + data1_NCHW = data1.reshape(N_data1, C_data1, H_data1, W_data1) + # example 9: + # pool_padding size is 4: [pad_height_top, pad_height_bottom, pad_width_left, pad_width_right] + pdpd_attrs = { + 'pool_size':[3,3], + 'pool_type' : pooling_type, + 'pool_stride' : [3,3], + 'pool_padding':[2, 1, 2, 1], + 'global_pooling':False, + 'ceil_mode':False, + 'exclusive':True, + 'data_format':"NCHW" + } + pool2d(pooling_type+'Pool_test9', data1_NCHW, pdpd_attrs) + + # example 10: + # input=data_NCHW and pool_padding is [[0,0], [0,0], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]] + pdpd_attrs = { + 'pool_size':[3,3], + 'pool_type' : pooling_type, + 'pool_stride' : [3,3], + 'pool_padding':[[0,0], [0,0], [2, 1], [2, 1]], + 'global_pooling':False, + 'ceil_mode':False, + 'exclusive':True, + 'data_format':"NCHW" + } + pool2d(pooling_type+'Pool_test10', data1_NCHW, pdpd_attrs) + + # example 11: + # input=data_NCHW and poolsize is the multiply by width & height. pool_padding is [[0,0], [0,0], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]] + pdpd_attrs = { + 'pool_size': 9, + 'pool_type' : pooling_type, + 'pool_stride' : [3,3], + 'pool_padding':[[0,0], [0,0], [2, 1], [2, 1]], + 'global_pooling':False, + 'ceil_mode':False, + 'exclusive':True, + 'data_format':"NCHW" + } + pool2d(pooling_type+'Pool_test11', data1_NCHW, pdpd_attrs) + + + # adaptive_pool2d + for i, pooling_type in enumerate(pooling_types): + pdpd_attrs = { + 'pool_size': [3,3], + 'pool_type': pooling_type, + 'require_index': False + } + adaptive_pool2d(pooling_type+'AdaptivePool2D_test1', data_NCHW, pdpd_attrs) + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/ngraph/test/files/paddlepaddle/gen_scripts/generate_range.py b/ngraph/test/files/paddlepaddle/gen_scripts/generate_range.py new file mode 100644 index 00000000000000..c2d7a0b57156f2 --- /dev/null +++ b/ngraph/test/files/paddlepaddle/gen_scripts/generate_range.py @@ -0,0 +1,45 @@ +# +# range paddle model generator +# +import numpy as np +from save_model import saveModel +import sys + + +def pdpd_range(name : str, x, start, end, step, out_type): + import paddle as pdpd + pdpd.enable_static() + + with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): + node_x = pdpd.static.data(name='x', shape=x.shape, dtype='float32') + # Range op only support fill_constant input, since dynamic op is not supported in ov + out = pdpd.fluid.layers.range(start, end, step, out_type) + out = pdpd.cast(out, np.float32) + out = pdpd.add(node_x, out) + #out = pdpd.cast(out, np.float32) + cpu = pdpd.static.cpu_places(1) + exe = pdpd.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(pdpd.static.default_startup_program()) + + outs = exe.run( + feed={'x': x}, + fetch_list=[out]) + + saveModel(name, exe, feedkeys=['x'], fetchlist=[out], inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + + +def main(): + start = 1.5 + end = 10.5 + step = 2 + data = np.random.random([1, 5]).astype("float32") + out_type = ["float32", "int32", "int64"] + for i, dtype in enumerate(out_type): + pdpd_range("range"+str(i), data, start, end, step, dtype) + + +if __name__ == "__main__": + main() diff --git a/ngraph/test/files/paddlepaddle/gen_scripts/generate_relu.py b/ngraph/test/files/paddlepaddle/gen_scripts/generate_relu.py new file mode 100644 index 00000000000000..6327634edccdf8 --- /dev/null +++ b/ngraph/test/files/paddlepaddle/gen_scripts/generate_relu.py @@ -0,0 +1,22 @@ +import paddle +from paddle import fluid +import numpy as np +import os +import sys + +paddle.enable_static() + +inp_blob = np.random.randn(1, 3, 4, 4).astype(np.float32) + +x = fluid.data(name='xxx', shape=[1, 3, 4, 4], dtype='float32') + +relu = fluid.layers.relu(x) + +exe = fluid.Executor(fluid.CPUPlace()) +exe.run(fluid.default_startup_program()) +inp_dict = {'xxx': inp_blob} +var = [relu] +res_pdpd = exe.run(fluid.default_main_program(), fetch_list=var, feed=inp_dict) + +fluid.io.save_inference_model(os.path.join(sys.argv[1], "relu"), list(inp_dict.keys()), var, exe, + model_filename="relu.pdmodel", params_filename="relu.pdiparams") diff --git a/ngraph/test/files/paddlepaddle/gen_scripts/generate_rnn_lstm.py b/ngraph/test/files/paddlepaddle/gen_scripts/generate_rnn_lstm.py new file mode 100644 index 00000000000000..348da92fbb4e92 --- /dev/null +++ b/ngraph/test/files/paddlepaddle/gen_scripts/generate_rnn_lstm.py @@ -0,0 +1,67 @@ +import numpy as np +from save_model import saveModel +import sys + + +def pdpd_rnn_lstm(input_size, hidden_size, layers, direction): + import paddle as pdpd + pdpd.enable_static() + main_program = pdpd.static.Program() + startup_program = pdpd.static.Program() + + num_of_directions = 1 if direction == 'forward' else 2 + with pdpd.static.program_guard(main_program, startup_program): + + rnn = pdpd.nn.LSTM(input_size, hidden_size, layers, direction) + + data = pdpd.static.data(name='x', shape=[4, 3, input_size], dtype='float32') + prev_h = pdpd.ones(shape=[layers * num_of_directions, 4, hidden_size], dtype=np.float32) + prev_c = pdpd.ones(shape=[layers * num_of_directions, 4, hidden_size], dtype=np.float32) + + y, (h, c) = rnn(data, (prev_h, prev_c)) + + cpu = pdpd.static.cpu_places(1) + exe = pdpd.static.Executor(cpu[0]) + exe.run(startup_program) + + outs = exe.run( + feed={'x': np.ones([4, 3, input_size]).astype(np.float32)}, + fetch_list=[y, h, c], + program=main_program) + saveModel("rnn_lstm_layer_" + str(layers) + '_' + str(direction), exe, feedkeys=['x'], + fetchlist=[y, h, c], inputs=[np.ones([4, 3, input_size]).astype(np.float32)], outputs=[outs[0], outs[1], outs[2]], target_dir=sys.argv[1]) + print(outs[0]) + return outs[0] + + +if __name__ == "__main__": + + testCases = [ + { + 'input_size': 2, + 'hidden_size': 2, + 'layers': 1, + 'direction': 'forward', + }, + { + 'input_size': 2, + 'hidden_size': 2, + 'layers': 1, + 'direction': 'bidirectional', + }, + { + 'input_size': 2, + 'hidden_size': 2, + 'layers': 2, + 'direction': 'forward', + }, + { + 'input_size': 2, + 'hidden_size': 2, + 'layers': 2, + 'direction': 'bidirectional', + } + ] + + for test in testCases: + pdpd_rnn_lstm(test['input_size'], test['hidden_size'], test['layers'], test['direction']) \ No newline at end of file diff --git a/ngraph/test/files/paddlepaddle/gen_scripts/generate_shape.py b/ngraph/test/files/paddlepaddle/gen_scripts/generate_shape.py new file mode 100644 index 00000000000000..35241487bba327 --- /dev/null +++ b/ngraph/test/files/paddlepaddle/gen_scripts/generate_shape.py @@ -0,0 +1,38 @@ +# +# pool2d paddle model generator +# +import numpy as np +from save_model import saveModel +import sys + + +def pdpd_shape(name : str, x): + import paddle as pdpd + pdpd.enable_static() + + with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): + node_x = pdpd.static.data(name='x', shape=x.shape, dtype='float32') + out = pdpd.shape(node_x) + out = pdpd.cast(out, np.float32) + cpu = pdpd.static.cpu_places(1) + exe = pdpd.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(pdpd.static.default_startup_program()) + + outs = exe.run( + feed={'x': x}, + fetch_list=[out]) + + saveModel(name, exe, feedkeys=['x'], fetchlist=[out], inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + + +def main(): + + data = np.random.random(size=(2, 3)).astype('float32') + pdpd_shape("shape", data) + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/ngraph/test/files/paddlepaddle/gen_scripts/generate_slice.py b/ngraph/test/files/paddlepaddle/gen_scripts/generate_slice.py new file mode 100644 index 00000000000000..bcfabdd28c428d --- /dev/null +++ b/ngraph/test/files/paddlepaddle/gen_scripts/generate_slice.py @@ -0,0 +1,39 @@ +# +# slice paddle model generator +# +import numpy as np +from save_model import saveModel +import paddle as pdpd +import sys + +data_type = 'float32' + +def slice(name : str, x, axes : list, start : list, end : list): + pdpd.enable_static() + + with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): + node_x = pdpd.static.data(name='x', shape=x.shape, dtype = data_type) + out = pdpd.fluid.layers.slice(node_x, axes = axes, starts = start, ends = end) + + cpu = pdpd.static.cpu_places(1) + exe = pdpd.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(pdpd.static.default_startup_program()) + + outs = exe.run( + feed={'x': x}, + fetch_list=[out]) + + saveModel(name, exe, feedkeys=['x'], fetchlist=[out], inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + +def main(): + x = np.linspace(1, 60, num = 60, dtype=np.int32).reshape(4, 3, 5).astype(data_type) + slice("slice", x, axes=[1, 2], start=(0, 1), end=(-1, 3)) + + x = np.linspace(1, 60, num = 60, dtype=np.int32).reshape(2, 30).astype(data_type) + slice("slice_1d", x, axes=[0], start=[0], end=[1]) + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/ngraph/test/files/paddlepaddle/gen_scripts/generate_split.py b/ngraph/test/files/paddlepaddle/gen_scripts/generate_split.py new file mode 100644 index 00000000000000..93ebcf7ebc92c7 --- /dev/null +++ b/ngraph/test/files/paddlepaddle/gen_scripts/generate_split.py @@ -0,0 +1,52 @@ +# +# pool2d paddle model generator +# +import numpy as np +from save_model import saveModel +import sys + +def split(name : str, x, attrs : dict): + import paddle as pdpd + pdpd.enable_static() + + with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): + node_x = pdpd.static.data(name='x', shape=x.shape, dtype=x.dtype) + out = pdpd.fluid.layers.split(node_x, num_or_sections=attrs['num_or_sections'], dim=attrs['axis']) + + cpu = pdpd.static.cpu_places(1) + exe = pdpd.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(pdpd.static.default_startup_program()) + + outs = exe.run( + feed={'x': x}, + fetch_list=[out]) + print("outputs: ", type(outs),len(outs)) + print("out: ", type(out), len(out)) + + saveModel(name, exe, feedkeys=['x'], fetchlist=out, inputs=[x], outputs=outs, target_dir=sys.argv[1]) + + return outs[0] + +def main(): + # split + data_types = ['float32'] #TODOD: ['bool', 'float16', 'float32', 'float64', 'int32', 'int64'] + num_or_sections = [3, [2, 3, 4], [2, 3, -1]] + axes = [1, -2] + + idx = 1 + for t in data_types: + for s in num_or_sections: + for i in axes: + pdpd_attrs = { + 'num_or_sections': s, + 'axis': i + } + print(idx, t, s, i) + data_NCHW = np.random.rand(3,9,5).astype(t) + split("split_test{}".format(idx), data_NCHW, pdpd_attrs) + idx+=1 + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/ngraph/test/files/paddlepaddle/gen_scripts/generate_squeeze.py b/ngraph/test/files/paddlepaddle/gen_scripts/generate_squeeze.py new file mode 100644 index 00000000000000..04eae5cf0b1b70 --- /dev/null +++ b/ngraph/test/files/paddlepaddle/gen_scripts/generate_squeeze.py @@ -0,0 +1,38 @@ +# +# squeeze paddle model generator +# +import numpy as np +from save_model import saveModel +import paddle as pdpd +import sys + +data_type = 'float32' + +def squeeze(name : str, x, axes : list): + pdpd.enable_static() + + with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): + node_x = pdpd.static.data(name='x', shape=x.shape, dtype = data_type) + out = pdpd.fluid.layers.squeeze(node_x, axes=axes, name='squeeze') + + cpu = pdpd.static.cpu_places(1) + exe = pdpd.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(pdpd.static.default_startup_program()) + + outs = exe.run( + feed={'x': x}, + fetch_list=[out]) + + saveModel(name, exe, feedkeys=['x'], fetchlist=[out], inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + +def main(): + data = np.random.rand(1, 3, 1, 4).astype(data_type) + + squeeze("squeeze", data, [0, -2]) + squeeze("squeeze_null_axes", data, []) + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/ngraph/test/files/paddlepaddle/gen_scripts/generate_unsqueeze.py b/ngraph/test/files/paddlepaddle/gen_scripts/generate_unsqueeze.py new file mode 100644 index 00000000000000..e2fee0e97f5a13 --- /dev/null +++ b/ngraph/test/files/paddlepaddle/gen_scripts/generate_unsqueeze.py @@ -0,0 +1,37 @@ +# +# unsqueeze paddle model generator +# +import numpy as np +from save_model import saveModel +import paddle as pdpd +import sys + +data_type = 'float32' + +def unsqueeze(name : str, x, axes : list): + pdpd.enable_static() + + with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): + node_x = pdpd.static.data(name='x', shape=x.shape, dtype = data_type) + out = pdpd.fluid.layers.unsqueeze(node_x, axes = axes, name = 'unsqueeze') + + cpu = pdpd.static.cpu_places(1) + exe = pdpd.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(pdpd.static.default_startup_program()) + + outs = exe.run( + feed={'x': x}, + fetch_list=[out]) + + saveModel(name, exe, feedkeys=['x'], fetchlist=[out], inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + +def main(): + data = np.random.rand(5, 10).astype(data_type) + + unsqueeze("unsqueeze", data, [1]) + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/ngraph/test/files/paddlepaddle/gen_scripts/generate_yolo_box.py b/ngraph/test/files/paddlepaddle/gen_scripts/generate_yolo_box.py new file mode 100644 index 00000000000000..0d2e6b6bfd3bf0 --- /dev/null +++ b/ngraph/test/files/paddlepaddle/gen_scripts/generate_yolo_box.py @@ -0,0 +1,78 @@ +# +# pool2d paddle model generator +# +import numpy as np +from save_model import saveModel +import sys + +def yolo_box(name : str, x, img_size, attrs : dict): + import paddle as pdpd + pdpd.enable_static() + + with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): + node_x = pdpd.static.data(name='x', shape=x.shape, dtype=x.dtype) + node_img_size = pdpd.static.data(name='img_size', shape=img_size.shape, dtype=img_size.dtype) + boxes, scores = pdpd.vision.ops.yolo_box(node_x, + node_img_size, + anchors=attrs['anchors'], + class_num=attrs['class_num'], + conf_thresh=attrs['conf_thresh'], + downsample_ratio=attrs['downsample_ratio'], + clip_bbox=attrs['clip_bbox'], + name=None, + scale_x_y=attrs['scale_x_y']) + + cpu = pdpd.static.cpu_places(1) + exe = pdpd.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(pdpd.static.default_startup_program()) + + outs = exe.run( + feed={'x': x, 'img_size': img_size}, + fetch_list=[boxes, scores]) + + # Save inputs in order of ngraph function, to facilite Fuzzy test, + # which accepts inputs and outputs in this order as well. + saveModel(name, exe, feedkeys=['x', 'img_size'], fetchlist=[boxes, scores], + inputs=[x, img_size], outputs=outs, target_dir=sys.argv[1]) + + return outs + + +def main(): + # yolo_box + pdpd_attrs = { + 'anchors': [10, 13, 16, 30, 33, 23], + 'class_num': 2, + 'conf_thresh': 0.5, + 'downsample_ratio': 32, + 'clip_bbox': False, #There is bug in Paddle2ONN where clip_bbox is always ignored. + 'scale_x_y': 1.0 + } + + pdpd_attrs_clip_box = { + 'anchors': [10, 13, 16, 30, 33, 23], + 'class_num': 2, + 'conf_thresh': 0.5, + 'downsample_ratio': 32, + 'clip_bbox': True, #There is bug in Paddle2ONN where clip_bbox is always ignored. + 'scale_x_y': 1.0 + } + + N = 1 + num_anchors = int(len(pdpd_attrs['anchors'])//2) + x_shape = (N, num_anchors * (5 + pdpd_attrs['class_num']), 13, 13) + imgsize_shape = (N, 2) + + data = np.random.random(x_shape).astype('float32') + data_ImSize = np.random.randint(10, 20, imgsize_shape).astype('int32') + + # For any change to pdpd_attrs, do - + # step 1. generate paddle model + pred_pdpd = yolo_box('yolo_box_test1', data, data_ImSize, pdpd_attrs) + pred_pdpd = yolo_box('yolo_box_clip_box', data, data_ImSize, pdpd_attrs_clip_box) + + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/ngraph/test/files/paddlepaddle/gen_scripts/save_model.py b/ngraph/test/files/paddlepaddle/gen_scripts/save_model.py new file mode 100644 index 00000000000000..da3d102e1ef8ed --- /dev/null +++ b/ngraph/test/files/paddlepaddle/gen_scripts/save_model.py @@ -0,0 +1,80 @@ +import os +import numpy as np +import paddle as pdpd + + +#print numpy array like C structure +def print_alike(arr): + shape = arr.shape + rank = len(shape) + #print("shape: ", shape, "rank: %d" %(rank)) + + #for idx, value in np.ndenumerate(arr): + # print(idx, value) + + def print_array(arr, end=' '): + shape = arr.shape + rank = len(arr.shape) + if rank > 1: + line = "{" + for i in range(arr.shape[0]): + line += print_array(arr[i,:], end="},\n" if i < arr.shape[0]-1 else "}") + line += end + return line + else: + line = "{" + for i in range(arr.shape[0]): + line += "{:.2f}".format(arr[i]) #str(arr[i]) + line += ", " if i < shape[0]-1 else ' ' + line += end + #print(line) + return line + + + print(print_array(arr, "}")) + +def saveModel(name, exe, feedkeys:list, fetchlist:list, inputs:list, outputs:list, target_dir:str): + model_dir = os.path.join(target_dir, name) + if not os.path.exists(model_dir): + os.makedirs(model_dir) + + print("\n\n------------- %s -----------\n" % (name)) + for i, input in enumerate(inputs): + print("INPUT %s :" % (feedkeys[i]), input.shape, input.dtype, "\n") + print_alike(input) + np.save(os.path.join(model_dir, "input{}".format(i)), input) + np.save(os.path.join(model_dir, "input{}.{}.{}".format(i, feedkeys[i], input.dtype)), input) + print("\n") + + for i, output in enumerate(outputs): + print("OUTPUT %s :" % (fetchlist[i]),output.shape, output.dtype, "\n") + print_alike(output) + np.save(os.path.join(model_dir, "output{}".format(i)), output) + + # composited model + scattered model + pdpd.fluid.io.save_inference_model(model_dir, feedkeys, fetchlist, exe) + pdpd.fluid.io.save_inference_model(model_dir, feedkeys, fetchlist, exe, model_filename=name+".pdmodel", params_filename=name+".pdiparams") + + +if __name__ == "__main__": + np.set_printoptions(precision=2) + np.set_printoptions(suppress=True) + + #x = np.random.randn(2,3).astype(np.float32) + x = np.array([[[ + [1, 2, 3], + [4, 5, 6] + ], + [ + [1, 2, 3], + [4, 5, 6] + ]], + [[ + [1, 2, 3], + [4, 5, 6] + ], + [ + [1, 2, 3], + [4, 5, 6] + ]]]).astype(np.float32) + print_alike(x) \ No newline at end of file diff --git a/ngraph/test/files/paddlepaddle/gen_wrapper.py b/ngraph/test/files/paddlepaddle/gen_wrapper.py new file mode 100644 index 00000000000000..bb982baca187d4 --- /dev/null +++ b/ngraph/test/files/paddlepaddle/gen_wrapper.py @@ -0,0 +1,20 @@ +import os +import subprocess + +import sys + +print(sys.argv) +if len(sys.argv) < 4: + print("Script, output folder and mark file must be specified as arguments") + exit(1) + +gen_script = sys.argv[1] +out_folder = sys.argv[2] +mark_file = sys.argv[3] + +print("Processing: {} ".format(gen_script)) +subprocess.run([sys.executable, gen_script, out_folder], env=os.environ) + +# Create mark file indicating that script was executed +with open(mark_file, "w") as fp: + pass diff --git a/ngraph/test/files/paddlepaddle/models/models.csv b/ngraph/test/files/paddlepaddle/models/models.csv new file mode 100644 index 00000000000000..388a7b9296fc1c --- /dev/null +++ b/ngraph/test/files/paddlepaddle/models/models.csv @@ -0,0 +1,84 @@ +argmax, +argmax1, +assign_value_boolean, +assign_value_fp32, +assign_value_int32, +assign_value_int64, +avgAdaptivePool2D_test1, +avgPool_test1, +avgPool_test10, +avgPool_test11, +avgPool_test2, +avgPool_test3, +avgPool_test4, +avgPool_test5, +avgPool_test7, +avgPool_test8, +avgPool_test9, +batch_norm_nchw, +batch_norm_nhwc, +bilinear_downsample_false_0, +bilinear_downsample_false_1, +bilinear_downsample_true_0, +bilinear_upsample_false_0, +bilinear_upsample_false_1, +bilinear_upsample_scales, +bilinear_upsample_scales2, +bilinear_upsample_true_0, +clip, +conv2d_dilation_assymetric_pads_strides, +conv2d_SAME_padding, +conv2d_strides_assymetric_padding, +conv2d_strides_no_padding, +conv2d_strides_padding, +conv2d_transpose_dilation_assymetric_pads_strides, +conv2d_transpose_strides_assymetric_padding, +conv2d_transpose_strides_no_padding, +conv2d_transpose_strides_padding, +conv2d_transpose_VALID_padding, +conv2d_VALID_padding, +depthwise_conv2d_convolution, +depthwise_conv2d_transpose_convolution, +dropout, +dropout_upscale_in_train, +expand_v2, +expand_v2_tensor, +fill_constant, +fill_constant_batch_size_like, +fill_constant_int32, +fill_constant_int64, +flatten_contiguous_range_test1, +maxAdaptivePool2D_test1, +maxPool_test1, +maxPool_test10, +maxPool_test11, +maxPool_test2, +maxPool_test3, +maxPool_test4, +maxPool_test5, +maxPool_test7, +maxPool_test8, +maxPool_test9, +nearest_downsample_false_0, +nearest_upsample_false_0, +pad3d_test1, +pad3d_test2, +pad3d_test3, +#pad3d_test4, +range0, +range1, +range2, +rnn_lstm_layer_1_bidirectional, +rnn_lstm_layer_1_forward, +rnn_lstm_layer_2_bidirectional, +rnn_lstm_layer_2_forward, +shape, +slice, +slice_1d, +split_test1, +split_test2, +squeeze, +squeeze_null_axes, +unsqueeze, +yolo_box_clip_box, +yolo_box_test1 diff --git a/ngraph/test/frontend/CMakeLists.txt b/ngraph/test/frontend/CMakeLists.txt new file mode 100644 index 00000000000000..98f4095b37c5b5 --- /dev/null +++ b/ngraph/test/frontend/CMakeLists.txt @@ -0,0 +1,16 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +set(SRC ${CMAKE_CURRENT_SOURCE_DIR}/mock_frontend.cpp) +add_library(mock1_ngraph_frontend SHARED ${SRC}) + +target_compile_definitions(mock1_ngraph_frontend PRIVATE "-DMOCK_VARIANT=\"1\"") + +target_include_directories(mock1_ngraph_frontend PRIVATE ".") + +target_include_directories(mock1_ngraph_frontend PRIVATE ${FRONTEND_INCLUDE_PATH} ${NGRAPH_INCLUDE_PATH}) +target_link_libraries(mock1_ngraph_frontend PRIVATE frontend_manager) +add_dependencies(unit-test mock1_ngraph_frontend) + +add_clang_format_target(mock1_ngraph_frontend_clang FOR_TARGETS mock1_ngraph_frontend) diff --git a/ngraph/test/frontend/frontend_manager.cpp b/ngraph/test/frontend/frontend_manager.cpp new file mode 100644 index 00000000000000..3e37b4d3b6e884 --- /dev/null +++ b/ngraph/test/frontend/frontend_manager.cpp @@ -0,0 +1,151 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include + +#include "gtest/gtest.h" +#include "gmock/gmock.h" + +#include "backend.hpp" +#include "ngraph/file_util.hpp" + +#ifdef _WIN32 +const char FrontEndPathSeparator[] = ";"; +#else +const char FrontEndPathSeparator[] = ":"; +#endif // _WIN32 + +using namespace ngraph; +using namespace ngraph::frontend; + +static int set_test_env(const char* name, const char* value) +{ +#ifdef _WIN32 + return _putenv_s(name, value); +#elif defined(__linux) || defined(__APPLE__) + std::string var = std::string(name) + "=" + value; + return setenv(name, value, 0); +#endif +} + +TEST(FrontEndManagerTest, testAvailableFrontEnds) +{ + FrontEndManager fem; + ASSERT_NO_THROW(fem.register_front_end("mock", [](FrontEndCapFlags fec) + { + return std::make_shared(); + })); + auto frontends = fem.get_available_front_ends(); + ASSERT_NE(std::find(frontends.begin(), frontends.end(), "mock"), frontends.end()); + FrontEnd::Ptr fe; + ASSERT_NO_THROW(fe = fem.load_by_framework("mock")); +} + +TEST(FrontEndManagerTest, testLoadWithFlags) +{ + int expFlags = FrontEndCapabilities::FEC_CUT | + FrontEndCapabilities::FEC_WILDCARDS | + FrontEndCapabilities::FEC_NAMES; + int actualFlags = FrontEndCapabilities::FEC_DEFAULT; + FrontEndManager fem; + ASSERT_NO_THROW(fem.register_front_end("mock", [&actualFlags](int fec) + { + actualFlags = fec; + return std::make_shared(); + })); + auto frontends = fem.get_available_front_ends(); + ASSERT_NE(std::find(frontends.begin(), frontends.end(), "mock"), frontends.end()); + FrontEnd::Ptr fe; + ASSERT_NO_THROW(fe = fem.load_by_framework("mock", expFlags)); + ASSERT_TRUE(actualFlags & FrontEndCapabilities::FEC_CUT); + ASSERT_TRUE(actualFlags & FrontEndCapabilities::FEC_WILDCARDS); + ASSERT_TRUE(actualFlags & FrontEndCapabilities::FEC_NAMES); + ASSERT_EQ(expFlags, actualFlags); +} + +TEST(FrontEndManagerTest, testMockPluginFrontEnd) +{ + std::string fePath = + ngraph::file_util::get_directory(ngraph::runtime::Backend::get_backend_shared_library_search_directory()); + fePath = fePath + FrontEndPathSeparator + "someInvalidPath"; + set_test_env("OV_FRONTEND_PATH", fePath.c_str()); + + FrontEndManager fem; + auto frontends = fem.get_available_front_ends(); + ASSERT_NE(std::find(frontends.begin(), frontends.end(), "mock1"), frontends.end()); + set_test_env("OV_FRONTEND_PATH", ""); +} + +TEST(FrontEndManagerTest, testDefaultFrontEnd) +{ + FrontEndManager fem; + ASSERT_ANY_THROW(fem.load_by_model("")); + + std::unique_ptr fePtr (new FrontEnd()); // to verify base destructor + FrontEnd::Ptr fe = std::make_shared(); + ASSERT_ANY_THROW(fe->load_from_file("")); + ASSERT_ANY_THROW(fe->load_from_files({"", ""})); + ASSERT_ANY_THROW(fe->load_from_memory(nullptr)); + ASSERT_ANY_THROW(fe->load_from_memory_fragments({nullptr, nullptr})); + std::stringstream str; + ASSERT_ANY_THROW(fe->load_from_stream(str)); + ASSERT_ANY_THROW(fe->load_from_streams({&str, &str})); + ASSERT_ANY_THROW(fe->convert(std::shared_ptr(nullptr))); + ASSERT_ANY_THROW(fe->convert(InputModel::Ptr(nullptr))); + ASSERT_ANY_THROW(fe->convert_partially(nullptr)); + ASSERT_ANY_THROW(fe->decode(nullptr)); + ASSERT_ANY_THROW(fe->normalize(nullptr)); +} + +TEST(FrontEndManagerTest, testDefaultInputModel) +{ + std::unique_ptr imPtr (new InputModel()); // to verify base destructor + InputModel::Ptr im = std::make_shared(); + ASSERT_ANY_THROW(im->get_inputs()); + ASSERT_ANY_THROW(im->get_outputs()); + ASSERT_ANY_THROW(im->override_all_inputs({nullptr})); + ASSERT_ANY_THROW(im->override_all_outputs({nullptr})); + ASSERT_ANY_THROW(im->extract_subgraph({nullptr}, {nullptr})); + ASSERT_ANY_THROW(im->get_place_by_tensor_name("")); + ASSERT_ANY_THROW(im->get_place_by_operation_name("")); + ASSERT_ANY_THROW(im->get_place_by_operation_and_input_port("", 0)); + ASSERT_ANY_THROW(im->get_place_by_operation_and_output_port("", 0)); + ASSERT_ANY_THROW(im->set_name_for_tensor(nullptr, "")); + ASSERT_ANY_THROW(im->add_name_for_tensor(nullptr, "")); + ASSERT_ANY_THROW(im->set_name_for_operation(nullptr, "")); + ASSERT_ANY_THROW(im->free_name_for_tensor("")); + ASSERT_ANY_THROW(im->free_name_for_operation("")); + ASSERT_ANY_THROW(im->set_name_for_dimension(nullptr, 0, "")); + ASSERT_ANY_THROW(im->cut_and_add_new_input(nullptr, "")); + ASSERT_ANY_THROW(im->cut_and_add_new_output(nullptr, "")); + ASSERT_ANY_THROW(im->add_output(nullptr)); + ASSERT_ANY_THROW(im->remove_output(nullptr)); + ASSERT_ANY_THROW(im->set_partial_shape(nullptr, ngraph::Shape{})); + ASSERT_ANY_THROW(im->get_partial_shape(nullptr)); + ASSERT_ANY_THROW(im->set_element_type(nullptr, ngraph::element::Type{})); + ASSERT_ANY_THROW(im->set_tensor_value(nullptr, nullptr)); + ASSERT_ANY_THROW(im->set_tensor_partial_value(nullptr, nullptr, nullptr)); +} + +TEST(FrontEndManagerTest, testDefaultPlace) +{ + std::unique_ptr placePtr (new Place()); // to verify base destructor + Place::Ptr place = std::make_shared(); + ASSERT_ANY_THROW(place->get_names()); + ASSERT_ANY_THROW(place->get_consuming_operations()); + ASSERT_ANY_THROW(place->get_target_tensor()); + ASSERT_ANY_THROW(place->get_source_tensor()); + ASSERT_ANY_THROW(place->get_producing_operation()); + ASSERT_ANY_THROW(place->get_producing_port()); + ASSERT_ANY_THROW(place->get_input_port()); + ASSERT_ANY_THROW(place->get_input_port("")); + ASSERT_ANY_THROW(place->get_output_port()); + ASSERT_ANY_THROW(place->get_output_port("")); + ASSERT_ANY_THROW(place->get_consuming_ports()); + ASSERT_ANY_THROW(place->is_input()); + ASSERT_ANY_THROW(place->is_output()); + ASSERT_ANY_THROW(place->is_equal(nullptr)); + ASSERT_ANY_THROW(place->is_equal_data(nullptr)); +} diff --git a/ngraph/test/frontend/mock_frontend.cpp b/ngraph/test/frontend/mock_frontend.cpp new file mode 100644 index 00000000000000..34c8d420b031fc --- /dev/null +++ b/ngraph/test/frontend/mock_frontend.cpp @@ -0,0 +1,34 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "frontend_manager/frontend_manager.hpp" +#include "frontend_manager/frontend_manager_defs.hpp" +#include "ngraph/visibility.hpp" + +// Defined if we are building the plugin DLL (instead of using it) +#ifdef mock1_ngraph_frontend_EXPORTS +#define MOCK_API NGRAPH_HELPER_DLL_EXPORT +#else +#define MOCK_API NGRAPH_HELPER_DLL_IMPORT +#endif // mock1_ngraph_frontend_EXPORTS + +using namespace ngraph; +using namespace ngraph::frontend; + +class FrontEndMock : public FrontEnd +{ +}; + +extern "C" MOCK_API FrontEndVersion GetAPIVersion() +{ + return OV_FRONTEND_API_VERSION; +} + +extern "C" MOCK_API void* GetFrontEndData() +{ + FrontEndPluginInfo* res = new FrontEndPluginInfo(); + res->m_name = "mock1"; + res->m_creator = [](FrontEndCapFlags) { return std::make_shared(); }; + return res; +} \ No newline at end of file diff --git a/ngraph/test/frontend/paddlepaddle/basic_api.cpp b/ngraph/test/frontend/paddlepaddle/basic_api.cpp new file mode 100644 index 00000000000000..6bdb076dd8351d --- /dev/null +++ b/ngraph/test/frontend/paddlepaddle/basic_api.cpp @@ -0,0 +1,30 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "../shared/include/basic_api.hpp" + +using namespace ngraph; +using namespace ngraph::frontend; + +static const std::string PDPD = "pdpd"; + +using PDPDBasicTest = FrontEndBasicTest; + +static const std::vector models { + std::string("conv2d"), + std::string("conv2d_s/conv2d.pdmodel"), + std::string("conv2d_relu/conv2d_relu.pdmodel"), + std::string("2in_2out/2in_2out.pdmodel"), + std::string("multi_tensor_split/multi_tensor_split.pdmodel"), + std::string("2in_2out_dynbatch/2in_2out_dynbatch.pdmodel"), + std::string("bilinear_upsample_tensor_size/bilinear_upsample_tensor_size.pdmodel"), + std::string("nearest_upsample_tensor_size/nearest_upsample_tensor_size.pdmodel"), +}; + +INSTANTIATE_TEST_CASE_P(PDPDBasicTest, FrontEndBasicTest, + ::testing::Combine( + ::testing::Values(PDPD), + ::testing::Values(std::string(TEST_PDPD_MODELS)), + ::testing::ValuesIn(models)), + FrontEndBasicTest::getTestCaseName); diff --git a/ngraph/test/frontend/paddlepaddle/cut_specific_model.cpp b/ngraph/test/frontend/paddlepaddle/cut_specific_model.cpp new file mode 100644 index 00000000000000..ed8fb6f4439957 --- /dev/null +++ b/ngraph/test/frontend/paddlepaddle/cut_specific_model.cpp @@ -0,0 +1,31 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "../shared/include/cut_specific_model.hpp" + +using namespace ngraph; +using namespace ngraph::frontend; + +static const auto PDPD = "pdpd"; + +using PDPDCutTest = FrontEndCutModelTest; + +static CutModelParam getTestData_2in_2out() { + CutModelParam res; + res.m_frontEndName = PDPD; + res.m_modelsPath = std::string(TEST_PDPD_MODELS); + res.m_modelName = "2in_2out/2in_2out.pdmodel"; + res.m_oldInputs = {"inputX1", "inputX2"}; + res.m_newInputs = {"add1.tmp_0"}; + res.m_oldOutputs = {"save_infer_model/scale_0.tmp_0", "save_infer_model/scale_1.tmp_0"}; + res.m_newOutputs = {"add2.tmp_0"}; + res.m_tensorValueName = "conv2dX2.tmp_0"; + res.m_tensorValue = {1, 2, 3, 4, 5, 6, 7, 8, 9}; + res.m_op_before_name = "conv2dX2.tmp_0"; + return res; +} + +INSTANTIATE_TEST_CASE_P(PDPDCutTest, FrontEndCutModelTest, + ::testing::Values(getTestData_2in_2out()), + FrontEndCutModelTest::getTestCaseName); \ No newline at end of file diff --git a/ngraph/test/frontend/paddlepaddle/exceptions.cpp b/ngraph/test/frontend/paddlepaddle/exceptions.cpp new file mode 100644 index 00000000000000..c43b1edfa384e6 --- /dev/null +++ b/ngraph/test/frontend/paddlepaddle/exceptions.cpp @@ -0,0 +1,38 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include "gtest/gtest.h" +#include + +TEST(PDPD_Exceptions, pdpd_check_no_throw) +{ + EXPECT_NO_THROW(FRONT_END_GENERAL_CHECK(true)); +} + +TEST(PDPD_Exceptions, pdpd_check_no_throw_info) +{ + EXPECT_NO_THROW(FRONT_END_GENERAL_CHECK( true, "msg example")); +} + +TEST(PDPD_Exceptions, pdpd_check_throw_no_info) +{ + EXPECT_THROW(FRONT_END_GENERAL_CHECK( false), ngraph::frontend::GeneralFailure); +} + +TEST(PDPD_Exceptions, pdpd_check_throw_info) +{ + EXPECT_THROW(FRONT_END_THROW("msg example"), ngraph::frontend::GeneralFailure); +} + +TEST(PDPD_Exceptions, pdpd_check_throw_check_info) +{ + std::string msg("msg example"); + try { + FRONT_END_THROW(msg); + } catch (const ngraph::frontend::GeneralFailure& ex) { + std::string caught_msg(ex.what()); + EXPECT_NE(caught_msg.find(msg), std::string::npos); + } +} diff --git a/ngraph/test/frontend/paddlepaddle/load_from.cpp b/ngraph/test/frontend/paddlepaddle/load_from.cpp new file mode 100644 index 00000000000000..96b497f23c6f18 --- /dev/null +++ b/ngraph/test/frontend/paddlepaddle/load_from.cpp @@ -0,0 +1,27 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "../shared/include/load_from.hpp" + +using namespace ngraph; +using namespace ngraph::frontend; + +static const auto PDPD = "pdpd"; + +using PDPDCutTest = FrontEndLoadFromTest; + +static LoadFromFEParam getTestData() { + LoadFromFEParam res; + res.m_frontEndName = PDPD; + res.m_modelsPath = std::string(TEST_PDPD_MODELS); + res.m_file = "conv2d"; + res.m_files = {"2in_2out/2in_2out.pdmodel", "2in_2out/2in_2out.pdiparams"}; + res.m_stream = "relu/relu.pdmodel"; + res.m_streams = {"2in_2out/2in_2out.pdmodel", "2in_2out/2in_2out.pdiparams"}; + return res; +} + +INSTANTIATE_TEST_CASE_P(PDPDCutTest, FrontEndLoadFromTest, + ::testing::Values(getTestData()), + FrontEndLoadFromTest::getTestCaseName); \ No newline at end of file diff --git a/ngraph/test/frontend/paddlepaddle/op.cpp b/ngraph/test/frontend/paddlepaddle/op.cpp new file mode 100644 index 00000000000000..65acb7e022e7be --- /dev/null +++ b/ngraph/test/frontend/paddlepaddle/op.cpp @@ -0,0 +1,202 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include + +#include "util/all_close.hpp" +#include "util/all_close_f.hpp" +#include "util/engine/test_engines.hpp" +#include "util/ndarray.hpp" +#include "util/test_case.hpp" +#include "util/test_control.hpp" +#include "util/test_tools.hpp" + +#include "ngraph/ngraph.hpp" + +using namespace ngraph; +using namespace InferenceEngine; + +#include "../shared/include/basic_api.hpp" +#include "npy.hpp" + +using namespace ngraph; +using namespace ngraph::frontend; +using TestEngine = test::IE_CPU_Engine; + +static const std::string PDPD = "pdpd"; +static const std::string PATH_TO_MODELS = "/paddlepaddle/models/"; + +/* helper */ +static bool ends_with(std::string const & value, std::string const & ending) { + if (ending.size() > value.size()) return false; + return std::equal(ending.rbegin(), ending.rend(), value.rbegin()); +} + +static bool starts_with(std::string const & value, std::string const & starting) { + if (starting.size() > value.size()) return false; + return std::equal(starting.begin(), starting.end(), value.begin()); +} + +static std::string get_modelfolder(std::string& modelfile) { + if (!ends_with(modelfile, ".pdmodel")) return modelfile; + size_t found = modelfile.find_last_of("/\\"); + return modelfile.substr(0,found); +}; + +static const std::string& trim_space(std::string& str) //trim leading and tailing spaces +{ + //leading + auto it = str.begin(); + for (; it != str.end() && isspace(*it); it++); + auto d = std::distance(str.begin(), it); + str.erase(0,d); + + //tailing + auto rit = str.rbegin(); + for (; rit != str.rend() && isspace(*rit); rit++) { + str.pop_back(); + } + + //std::cout << "[" << str << "]" << std::endl; + return str; +} + +static std::vector get_models(void) { + std::string models_csv = std::string(TEST_FILES) + PATH_TO_MODELS + "models.csv"; + std::ifstream f(models_csv); + std::vector models; + std::string line; + while (getline(f, line, ',')) { + auto line_trim = trim_space(line); + if(line_trim.empty() || + starts_with(line_trim, "#")) + continue; + // std::cout<< "line in csv: [" << line_trim<< "]" << std::endl; + models.emplace_back(line_trim); + } + return models; +} + +inline void visualizer(std::shared_ptr function, std::string path) { + ngraph::pass::VisualizeTree("function.png").run_on_function(function); + + CNNNetwork network(function); + network.serialize(path+".xml", path+".bin"); +} + +std::string get_npy_dtype(std::string& filename) { + std::ifstream stream(filename, std::ifstream::binary); + if(!stream) { + throw std::runtime_error("io error: failed to open a file."); + } + + std::string header = npy::read_header(stream); + + // parse header + npy::header_t npy_header = npy::parse_header(header); + return npy_header.dtype.str(); +} + +template +void load_from_npy(std::string& file_path, std::vector &npy_data) { + std::ifstream npy_file(file_path); + std::vector npy_shape; + bool fortran_order = false; + if (npy_file.good()) + npy::LoadArrayFromNumpy(file_path, npy_shape, fortran_order, npy_data); + + if (npy_data.empty()) { + throw std::runtime_error("failed to load npy for test case "+file_path); + } +} + +namespace fuzzyOp { + using PDPDFuzzyOpTest = FrontEndBasicTest; + using PDPDFuzzyOpTestParam = std::tuple; // modelname + + void run_fuzzy(std::shared_ptr function, std::string& modelfile) { + + + auto modelfolder = get_modelfolder(modelfile); + + // run test + auto test_case = test::TestCase(function); + + const auto parameters = function->get_parameters(); + for (size_t i = 0; i < parameters.size(); i++) { + // read input npy file + std::string datafile = modelfolder+"/input"+std::to_string((parameters.size()-1)-i)+".npy"; + auto dtype = get_npy_dtype(datafile); + if (dtype == " data_in; + load_from_npy(datafile, data_in); + test_case.add_input(data_in); + } else if (dtype == " data_in; + load_from_npy(datafile, data_in); + test_case.add_input(data_in); + } else if (dtype == " data_in; + load_from_npy(datafile, data_in); + test_case.add_input(data_in); + } else { + throw std::runtime_error("not supported dtype in" + dtype); + } + } + + const auto results = function->get_results(); + for (size_t i = 0; i < results.size(); i++) { + // read expected output npy file + std::string datafile = modelfolder+"/output"+std::to_string(i)+".npy"; + auto dtype = get_npy_dtype(datafile); + if (dtype == " expected_results; + load_from_npy(datafile, expected_results); + test_case.add_expected_output(expected_results); + } else if (dtype == " expected_results; + load_from_npy(datafile, expected_results); + test_case.add_expected_output(expected_results); + } else if (dtype == " expected_results; + load_from_npy(datafile, expected_results); + test_case.add_expected_output(expected_results); + } else { + throw std::runtime_error("not supported dtype out "+ dtype); + } + } + + test_case.run_with_tolerance_as_fp(); + // test_case.run(); + } + + TEST_P(PDPDFuzzyOpTest, test_fuzzy) { + // load + ASSERT_NO_THROW(doLoadFromFile()); + + // convert + std::shared_ptr function; + function = m_frontEnd->convert(m_inputModel); + ASSERT_NE(function, nullptr); + + // debug + //visualizer(function, get_modelfolder(m_modelFile)+"/fuzzy"); + + // run + run_fuzzy(function, m_modelFile); + } + + INSTANTIATE_TEST_CASE_P(FrontendOpTest, PDPDFuzzyOpTest, + ::testing::Combine( + ::testing::Values(PDPD), + ::testing::Values(std::string(TEST_PDPD_MODELS)), + ::testing::ValuesIn(get_models())), + PDPDFuzzyOpTest::getTestCaseName); + +} diff --git a/ngraph/test/frontend/paddlepaddle/partial_shape.cpp b/ngraph/test/frontend/paddlepaddle/partial_shape.cpp new file mode 100644 index 00000000000000..cafc94553ed152 --- /dev/null +++ b/ngraph/test/frontend/paddlepaddle/partial_shape.cpp @@ -0,0 +1,70 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "../shared/include/partial_shape.hpp" + +using namespace ngraph; +using namespace ngraph::frontend; + +static const auto PDPD = "pdpd"; + +using PDPDPartialShapeTest = FrontEndPartialShapeTest; + +static PartShape getTestShape_2in_2out() { + PartShape res; + res.m_modelName = "2in_2out/2in_2out.pdmodel"; + res.m_tensorName = "inputX1"; + res.m_oldPartialShape = PartialShape {1, 1, 3, 3}; + res.m_newPartialShape = PartialShape {2, 1, 3, 3}; + return res; +} + +static PartShape getTestShape_2in_2out_dynbatch() { + PartShape res; + res.m_modelName = "2in_2out_dynbatch/2in_2out_dynbatch.pdmodel"; + res.m_tensorName = "inputX1"; + res.m_oldPartialShape = PartialShape {Dimension::dynamic(), 1, 3, 3}; + res.m_newPartialShape = PartialShape {2, 1, 3, 3}; + return res; +} + +static PartShape getTestShape_conv2d() { + PartShape res; + res.m_modelName = "conv2d_s/conv2d.pdmodel"; + res.m_tensorName = "x"; + res.m_oldPartialShape = PartialShape {1, 3, 4, 4}; + res.m_newPartialShape = PartialShape {1, 3, 8, 8}; + return res; +} + +static PartShape getTestShape_conv2d_setDynamicBatch() { + PartShape res; + res.m_modelName = "conv2d_s/conv2d.pdmodel"; + res.m_tensorName = "x"; + res.m_oldPartialShape = PartialShape {1, 3, 4, 4}; + res.m_newPartialShape = PartialShape {Dimension::dynamic(), 3, 8, 8}; + return res; +} + +static PartShape getTestShape_conv2d_relu() { + PartShape res; + res.m_modelName = "conv2d_relu/conv2d_relu.pdmodel"; + res.m_tensorName = "xxx"; + res.m_oldPartialShape = PartialShape {1, 3, 4, 4}; + res.m_newPartialShape = PartialShape {5, 3, 5, 5}; + return res; +} + +INSTANTIATE_TEST_CASE_P(PDPDPartialShapeTest, FrontEndPartialShapeTest, + ::testing::Combine( + ::testing::Values(BaseFEParam { PDPD, std::string(TEST_PDPD_MODELS) }), + ::testing::ValuesIn(std::vector { + getTestShape_2in_2out(), + getTestShape_conv2d_relu(), + getTestShape_conv2d(), + getTestShape_conv2d_setDynamicBatch(), + getTestShape_2in_2out_dynbatch() + }) + ), + FrontEndPartialShapeTest::getTestCaseName); \ No newline at end of file diff --git a/ngraph/test/frontend/paddlepaddle/set_element_type.cpp b/ngraph/test/frontend/paddlepaddle/set_element_type.cpp new file mode 100644 index 00000000000000..4cb73b7c7de80a --- /dev/null +++ b/ngraph/test/frontend/paddlepaddle/set_element_type.cpp @@ -0,0 +1,24 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "../shared/include/set_element_type.hpp" + +using namespace ngraph; +using namespace ngraph::frontend; + +static const auto PDPD = "pdpd"; + +using PDPDCutTest = FrontEndElementTypeTest; + +static SetTypeFEParam getTestData_relu() { + SetTypeFEParam res; + res.m_frontEndName = PDPD; + res.m_modelsPath = std::string(TEST_PDPD_MODELS); + res.m_modelName = "relu/relu.pdmodel"; + return res; +} + +INSTANTIATE_TEST_CASE_P(PDPDCutTest, FrontEndElementTypeTest, + ::testing::Values(getTestData_relu()), + FrontEndElementTypeTest::getTestCaseName); \ No newline at end of file diff --git a/ngraph/test/frontend/shared/include/basic_api.hpp b/ngraph/test/frontend/shared/include/basic_api.hpp new file mode 100644 index 00000000000000..cf2f37f13aa0cf --- /dev/null +++ b/ngraph/test/frontend/shared/include/basic_api.hpp @@ -0,0 +1,33 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +#include + +using BasicTestParam = std::tuple; // Model name + +class FrontEndBasicTest : public ::testing::TestWithParam +{ +public: + std::string m_feName; + std::string m_pathToModels; + std::string m_modelFile; + ngraph::frontend::FrontEndManager m_fem; + ngraph::frontend::FrontEnd::Ptr m_frontEnd; + ngraph::frontend::InputModel::Ptr m_inputModel; + + static std::string getTestCaseName(const testing::TestParamInfo& obj); + + void SetUp() override; + +protected: + void initParamTest(); + + void doLoadFromFile(); +}; diff --git a/ngraph/test/frontend/shared/include/cut_specific_model.hpp b/ngraph/test/frontend/shared/include/cut_specific_model.hpp new file mode 100644 index 00000000000000..fefae85da44013 --- /dev/null +++ b/ngraph/test/frontend/shared/include/cut_specific_model.hpp @@ -0,0 +1,45 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +#include + +struct CutModelParam +{ + std::string m_frontEndName; + std::string m_modelsPath; + std::string m_modelName; + std::vector m_oldInputs; + std::vector m_newInputs; + std::vector m_oldOutputs; + std::vector m_newOutputs; + std::string m_tensorValueName; + std::vector m_tensorValue; + std::string m_op_before_name; +}; + +class FrontEndCutModelTest : public ::testing::TestWithParam +{ +public: + CutModelParam m_param; + ngraph::frontend::FrontEndManager m_fem; + ngraph::frontend::FrontEnd::Ptr m_frontEnd; + ngraph::frontend::InputModel::Ptr m_inputModel; + + static std::string getTestCaseName(const testing::TestParamInfo& obj); + + void SetUp() override; + +protected: + void initParamTest(); + + void doLoadFromFile(); + + std::vector constructNewInputs() const; + + std::vector constructNewOutputs() const; +}; diff --git a/ngraph/test/frontend/shared/include/load_from.hpp b/ngraph/test/frontend/shared/include/load_from.hpp new file mode 100644 index 00000000000000..ca3a478fa2caf0 --- /dev/null +++ b/ngraph/test/frontend/shared/include/load_from.hpp @@ -0,0 +1,32 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +#include + +struct LoadFromFEParam { + std::string m_frontEndName; + std::string m_modelsPath; + std::string m_file; + std::vector m_files; + std::string m_stream; + std::vector m_streams; +}; + +class FrontEndLoadFromTest : public ::testing::TestWithParam { +public: + LoadFromFEParam m_param; + ngraph::frontend::FrontEndManager m_fem; + ngraph::frontend::FrontEnd::Ptr m_frontEnd; + ngraph::frontend::InputModel::Ptr m_inputModel; + + static std::string getTestCaseName(const testing::TestParamInfo &obj); + + void SetUp() override; +protected: + void initParamTest(); +}; diff --git a/ngraph/test/frontend/shared/include/op.hpp b/ngraph/test/frontend/shared/include/op.hpp new file mode 100644 index 00000000000000..84c081c2f09dc5 --- /dev/null +++ b/ngraph/test/frontend/shared/include/op.hpp @@ -0,0 +1,45 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +#include +#include "util/all_close.hpp" +#include "util/all_close_f.hpp" +#include "util/engine/test_engines.hpp" +#include "util/ndarray.hpp" +#include "util/test_case.hpp" +#include "util/test_control.hpp" +#include "util/test_tools.hpp" + +using Inputs = std::vector>; +using Outputs = std::vector>; + +struct FrontendOpTestParam { + std::string m_frontEndName; + std::string m_modelsPath; + std::string m_modelName; + + Inputs inputs; + Outputs expected_outputs; +}; + +class FrontendOpTest : public ::testing::TestWithParam { +public: + FrontendOpTestParam m_param; + + ngraph::frontend::FrontEndManager m_fem; + ngraph::frontend::FrontEnd::Ptr m_frontEnd; + ngraph::frontend::InputModel::Ptr m_inputModel; + + static std::string getTestCaseName(const testing::TestParamInfo &obj); + + void SetUp() override; + +protected: + void initParamTest(); + void validateOp(); +}; \ No newline at end of file diff --git a/ngraph/test/frontend/shared/include/partial_shape.hpp b/ngraph/test/frontend/shared/include/partial_shape.hpp new file mode 100644 index 00000000000000..f926327f78be8b --- /dev/null +++ b/ngraph/test/frontend/shared/include/partial_shape.hpp @@ -0,0 +1,51 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +#include + +struct BaseFEParam +{ + BaseFEParam() + {} + + BaseFEParam(const std::string& name, const std::string& path) : + m_frontEndName(name), m_modelsPath(path) + {} + + std::string m_frontEndName; + std::string m_modelsPath; +}; + +struct PartShape +{ + std::string m_modelName; + std::string m_tensorName; + ngraph::PartialShape m_oldPartialShape; + ngraph::PartialShape m_newPartialShape; +}; + +using PartialShapeParam = std::tuple; + +class FrontEndPartialShapeTest : public ::testing::TestWithParam +{ +public: + BaseFEParam m_baseParam; + PartShape m_partShape; + ngraph::frontend::FrontEndManager m_fem; + ngraph::frontend::FrontEnd::Ptr m_frontEnd; + ngraph::frontend::InputModel::Ptr m_inputModel; + + static std::string getTestCaseName(const testing::TestParamInfo& obj); + + void SetUp() override; + +protected: + void initParamTest(); + + void doLoadFromFile(); +}; diff --git a/ngraph/test/frontend/shared/include/set_element_type.hpp b/ngraph/test/frontend/shared/include/set_element_type.hpp new file mode 100644 index 00000000000000..0be1a95e5cae71 --- /dev/null +++ b/ngraph/test/frontend/shared/include/set_element_type.hpp @@ -0,0 +1,30 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +#include + +struct SetTypeFEParam { + std::string m_frontEndName; + std::string m_modelsPath; + std::string m_modelName; +}; + +class FrontEndElementTypeTest : public ::testing::TestWithParam { +public: + SetTypeFEParam m_param; + ngraph::frontend::FrontEndManager m_fem; + ngraph::frontend::FrontEnd::Ptr m_frontEnd; + ngraph::frontend::InputModel::Ptr m_inputModel; + + static std::string getTestCaseName(const testing::TestParamInfo &obj); + + void SetUp() override; +protected: + void initParamTest(); + void doLoadFromFile(); +}; diff --git a/ngraph/test/frontend/shared/include/utils.hpp b/ngraph/test/frontend/shared/include/utils.hpp new file mode 100644 index 00000000000000..352af475007a18 --- /dev/null +++ b/ngraph/test/frontend/shared/include/utils.hpp @@ -0,0 +1,43 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include "backend.hpp" +#include "ngraph/file_util.hpp" + +// Helper functions +namespace FrontEndTestUtils +{ + inline std::string fileToTestName(const std::string& fileName) { + // TODO: GCC 4.8 has limited support of regex + // return std::regex_replace(fileName, std::regex("[/\\.]"), "_"); + std::string res = fileName; + for (auto &c : res) { + if (c == '/') { + c = '_'; + } else if (c == '.') { + c = '_'; + } + } + return res; + } + + inline int set_test_env(const char* name, const char* value) + { +#ifdef _WIN32 + return _putenv_s(name, value); +#elif defined(__linux) || defined(__APPLE__) + std::string var = std::string(name) + "=" + value; + return setenv(name, value, 0); +#endif + } + + inline void setupTestEnv() { + std::string fePath = + ngraph::file_util::get_directory(ngraph::runtime::Backend::get_backend_shared_library_search_directory()); + set_test_env("OV_FRONTEND_PATH", fePath.c_str()); + } +} \ No newline at end of file diff --git a/ngraph/test/frontend/shared/src/basic_api.cpp b/ngraph/test/frontend/shared/src/basic_api.cpp new file mode 100644 index 00000000000000..77c581e9e4e163 --- /dev/null +++ b/ngraph/test/frontend/shared/src/basic_api.cpp @@ -0,0 +1,192 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include "../include/basic_api.hpp" +#include "../include/utils.hpp" + +using namespace ngraph; +using namespace ngraph::frontend; + +std::string FrontEndBasicTest::getTestCaseName(const testing::TestParamInfo& obj) +{ + std::string fe, path, fileName; + std::tie(fe, path, fileName) = obj.param; + return fe + "_" + FrontEndTestUtils::fileToTestName(fileName); +} + +void FrontEndBasicTest::SetUp() +{ + FrontEndTestUtils::setupTestEnv(); + m_fem = FrontEndManager(); // re-initialize after setting up environment + initParamTest(); +} + +void FrontEndBasicTest::initParamTest() +{ + std::tie(m_feName, m_pathToModels, m_modelFile) = GetParam(); + m_modelFile = m_pathToModels + m_modelFile; +} + +void FrontEndBasicTest::doLoadFromFile() +{ + std::vector frontends; + ASSERT_NO_THROW(frontends = m_fem.get_available_front_ends()); + ASSERT_NO_THROW(m_frontEnd = m_fem.load_by_framework(m_feName)); + ASSERT_NE(m_frontEnd, nullptr); + ASSERT_NO_THROW(m_inputModel = m_frontEnd->load_from_file(m_modelFile)); + ASSERT_NE(m_inputModel, nullptr); +} + +TEST_P(FrontEndBasicTest, testLoadFromFile) +{ + ASSERT_NO_THROW(doLoadFromFile()); + std::shared_ptr function; + ASSERT_NO_THROW(function = m_frontEnd->convert(m_inputModel)); + ASSERT_NE(function, nullptr); +} + +TEST_P(FrontEndBasicTest, testInputModel_getInputsOutputs) +{ + ASSERT_NO_THROW(doLoadFromFile()); + + using CustomCheck = std::function; + auto checkPlaces = [&](const std::vector& places, CustomCheck cb) + { + EXPECT_GT(places.size(), 0); + std::set placesSet(places.begin(), places.end()); + EXPECT_EQ(placesSet.size(), places.size()); + std::for_each(places.begin(), places.end(), [&](Place::Ptr place) + { + ASSERT_NE(place, nullptr); + std::vector names; + ASSERT_NO_THROW(names = place->get_names()); + EXPECT_GT(names.size(), 0); + cb(place); + }); + }; + std::vector inputs; + ASSERT_NO_THROW(inputs = m_inputModel->get_inputs()); + checkPlaces(inputs, [&](Place::Ptr place) + { + EXPECT_TRUE(place->is_input()); + }); + + std::vector outputs; + ASSERT_NO_THROW(outputs = m_inputModel->get_outputs()); + checkPlaces(outputs, [&](Place::Ptr place) + { + EXPECT_TRUE(place->is_output()); + }); +} + +TEST_P(FrontEndBasicTest, testInputModel_getPlaceByTensorName) +{ + ASSERT_NO_THROW(doLoadFromFile()); + + auto testGetPlaceByTensorName = [&](const std::vector& places) + { + EXPECT_GT(places.size(), 0); + for (auto place : places) + { + ASSERT_NE(place, nullptr); + std::vector names; + ASSERT_NO_THROW(names = place->get_names()); + for (auto name : names) + { + EXPECT_NE(name, std::string()); + Place::Ptr placeByName; + ASSERT_NO_THROW(placeByName = m_inputModel->get_place_by_tensor_name(name)); + ASSERT_NE(placeByName, nullptr); + EXPECT_TRUE(placeByName->is_equal(place)); + } + } + }; + + std::vector outputs; + ASSERT_NO_THROW(outputs = m_inputModel->get_outputs()); + testGetPlaceByTensorName(outputs); + + std::vector inputs; + ASSERT_NO_THROW(inputs = m_inputModel->get_inputs()); + testGetPlaceByTensorName(inputs); +} + +TEST_P(FrontEndBasicTest, testInputModel_overrideAll) +{ + ASSERT_NO_THROW(doLoadFromFile()); + + using GetPlaces = std::function()>; + using OverridePlaces = std::function&)>; + auto verifyOverride = [](GetPlaces getCB, OverridePlaces overrideCB) + { + std::vector places; + ASSERT_NO_THROW(places = getCB()); + std::set placesSet(places.begin(), places.end()); + + auto placesReversed = places; + std::reverse(placesReversed.begin(), placesReversed.end()); + ASSERT_NO_THROW(overrideCB(placesReversed)); + ASSERT_NO_THROW(places = getCB()); + EXPECT_GT(places.size(), 0); + std::set placesSetAfter(places.begin(), places.end()); + EXPECT_EQ(placesSet.size(), placesSet.size()); + std::for_each(places.begin(), places.end(), [&](Place::Ptr place) + { + EXPECT_GT(placesSet.count(place), 0); + }); + }; + verifyOverride([&]() + { return m_inputModel->get_inputs(); }, + [&](const std::vector& p) + { m_inputModel->override_all_inputs(p); }); + + verifyOverride([&]() + { return m_inputModel->get_outputs(); }, + [&](const std::vector& p) + { m_inputModel->override_all_outputs(p); }); +} + +TEST_P(FrontEndBasicTest, testInputModel_overrideAll_empty) +{ + ASSERT_NO_THROW(doLoadFromFile()); + using GetPlaces = std::function()>; + using OverrideEmpty = std::function; + using CustomCheck = std::function; + auto verifyOverride = [](GetPlaces getCB, OverrideEmpty overrideCB, CustomCheck customCB) + { + std::vector places; + std::vector newPlaces; + ASSERT_NO_THROW(places = getCB()); + ASSERT_NO_THROW(overrideCB()); + ASSERT_NO_THROW(newPlaces = getCB()); + ASSERT_EQ(newPlaces.size(), 0); + std::for_each(places.begin(), places.end(), [&](Place::Ptr place) + { + std::vector names; + ASSERT_NO_THROW(names = place->get_names()); + for (auto name : names) + { + customCB(name); + } + }); + }; + verifyOverride([&]() + { return m_inputModel->get_outputs(); }, + [&]() + { m_inputModel->override_all_outputs({}); }, + [&](const std::string& name) + { + EXPECT_FALSE(m_inputModel->get_place_by_tensor_name(name)->is_output()); + }); + + verifyOverride([&]() + { return m_inputModel->get_inputs(); }, + [&]() + { m_inputModel->override_all_inputs({}); }, + [&](const std::string& name) + { + EXPECT_FALSE(m_inputModel->get_place_by_tensor_name(name)->is_input()); + }); +} diff --git a/ngraph/test/frontend/shared/src/cut_specific_model.cpp b/ngraph/test/frontend/shared/src/cut_specific_model.cpp new file mode 100644 index 00000000000000..ea7fb4278a99a7 --- /dev/null +++ b/ngraph/test/frontend/shared/src/cut_specific_model.cpp @@ -0,0 +1,279 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include +#include "../include/cut_specific_model.hpp" +#include "../include/utils.hpp" +#include "ngraph/opsets/opset7.hpp" + +using namespace ngraph; +using namespace ngraph::frontend; + +static std::string joinStrings(const std::vector& strings) +{ + std::ostringstream res; + std::copy(strings.begin(), strings.end(), + std::ostream_iterator(res, "_")); + return res.str(); +} + +std::string FrontEndCutModelTest::getTestCaseName(const testing::TestParamInfo& obj) +{ + std::string res = obj.param.m_frontEndName + "_" + obj.param.m_modelName; + res += "I" + joinStrings(obj.param.m_oldInputs) + joinStrings(obj.param.m_newInputs); + res += "O" + joinStrings(obj.param.m_oldOutputs) + joinStrings(obj.param.m_newOutputs); + return FrontEndTestUtils::fileToTestName(res); +} + +void FrontEndCutModelTest::SetUp() +{ + FrontEndTestUtils::setupTestEnv(); + m_fem = FrontEndManager(); // re-initialize after setting up environment + initParamTest(); +} + +void FrontEndCutModelTest::initParamTest() +{ + m_param = GetParam(); + m_param.m_modelName = m_param.m_modelsPath + m_param.m_modelName; +} + +void FrontEndCutModelTest::doLoadFromFile() +{ + std::vector frontends; + FrontEnd::Ptr fe; + ASSERT_NO_THROW(frontends = m_fem.get_available_front_ends()); + ASSERT_NO_THROW(m_frontEnd = m_fem.load_by_framework(m_param.m_frontEndName)); + ASSERT_NE(m_frontEnd, nullptr); + ASSERT_NO_THROW(m_inputModel = m_frontEnd->load_from_file(m_param.m_modelName)); + ASSERT_NE(m_inputModel, nullptr); +} + +std::vector FrontEndCutModelTest::constructNewInputs() const +{ + std::vector newInputs; + for (const auto& name : m_param.m_newInputs) + { + newInputs.push_back(m_inputModel->get_place_by_tensor_name(name)); + } + return newInputs; +} + +std::vector FrontEndCutModelTest::constructNewOutputs() const +{ + std::vector newOutputs; + for (const auto& name : m_param.m_newOutputs) + { + newOutputs.push_back(m_inputModel->get_place_by_tensor_name(name)); + } + return newOutputs; +} + +/////////////////////////////////////////////////////////////////// + +TEST_P(FrontEndCutModelTest, testOverrideInputs) +{ + ASSERT_NO_THROW(doLoadFromFile()); + std::vector newPlaces; + ASSERT_NO_THROW(newPlaces = constructNewInputs()); + ASSERT_NO_THROW(m_inputModel->override_all_inputs(newPlaces)); + ASSERT_NO_THROW(m_inputModel->get_inputs()); + EXPECT_EQ(m_param.m_newInputs.size(), m_inputModel->get_inputs().size()); + for (auto newInput : m_inputModel->get_inputs()) + { + std::vector names; + ASSERT_NO_THROW(names = newInput->get_names()); + bool found = false; + for (const auto& name: m_param.m_newInputs) + { + if (std::find(names.begin(), names.begin(), name) != names.end()) + { + found = true; + break; + } + } + EXPECT_TRUE(found) << joinStrings(names) << " were not found in new inputs"; + } +} + +TEST_P(FrontEndCutModelTest, testOverrideOutputs) +{ + ASSERT_NO_THROW(doLoadFromFile()); + std::vector newPlaces; + ASSERT_NO_THROW(newPlaces = constructNewOutputs()); + ASSERT_NO_THROW(m_inputModel->override_all_outputs(newPlaces)); + ASSERT_NO_THROW(m_inputModel->get_outputs()); + EXPECT_EQ(m_param.m_newOutputs.size(), m_inputModel->get_outputs().size()); + for (auto newOutput : m_inputModel->get_outputs()) + { + std::vector names; + ASSERT_NO_THROW(names = newOutput->get_names()); + bool found = false; + for (const auto& name: m_param.m_newOutputs) + { + if (std::find(names.begin(), names.begin(), name) != names.end()) + { + found = true; + break; + } + } + EXPECT_TRUE(found) << joinStrings(names) << " were not found in new outputs"; + } +} + +TEST_P(FrontEndCutModelTest, testOldInputs) +{ + ASSERT_NO_THROW(doLoadFromFile()); + std::shared_ptr function; + ASSERT_NO_THROW(function = m_frontEnd->convert(m_inputModel)); + auto ops = function->get_ordered_ops(); + + // Ensure that it contains expected old inputs + for (const auto& name : m_param.m_oldInputs) + { + EXPECT_TRUE(std::find_if(ops.begin(), ops.end(), + [&](const std::shared_ptr& node) + { + return node->get_friendly_name().find(name) != std::string::npos; + }) != ops.end()) << "Name not found:" << name; + } +} + +TEST_P(FrontEndCutModelTest, testOldOutputs) +{ + ASSERT_NO_THROW(doLoadFromFile()); + std::shared_ptr function; + ASSERT_NO_THROW(function = m_frontEnd->convert(m_inputModel)); + auto ops = function->get_ordered_ops(); + // Ensure that it contains expected old outputs + for (const auto& name : m_param.m_oldOutputs) + { + EXPECT_TRUE(std::find_if(ops.begin(), ops.end(), + [&](const std::shared_ptr& node) + { + return node->get_friendly_name().find(name) != std::string::npos; + }) != ops.end()) << "Name not found:" << name; + } +} + +TEST_P(FrontEndCutModelTest, testNewInputs_func) +{ + ASSERT_NO_THROW(doLoadFromFile()); + std::vector newPlaces; + ASSERT_NO_THROW(newPlaces = constructNewInputs()); + ASSERT_NO_THROW(m_inputModel->override_all_inputs(newPlaces)); + + std::shared_ptr function; + ASSERT_NO_THROW(function = m_frontEnd->convert(m_inputModel)); + auto ops = function->get_ordered_ops(); + + // Ensure that it doesn't contain old inputs + for (const auto& name : m_param.m_oldInputs) + { + EXPECT_TRUE(std::find_if(ops.begin(), ops.end(), + [&](const std::shared_ptr& node) + { + return node->get_friendly_name().find(name) != std::string::npos; + }) == ops.end()) << "Name shall not exist:" << name; + } + + // Ensure that it contains expected new inputs + for (const auto& name : m_param.m_newInputs) + { + EXPECT_TRUE(std::find_if(ops.begin(), ops.end(), + [&](const std::shared_ptr& node) + { + return node->get_friendly_name().find(name) != std::string::npos; + }) != ops.end()) << "Name not found:" << name; + } +} + +TEST_P(FrontEndCutModelTest, testNewOutputs_func) +{ + ASSERT_NO_THROW(doLoadFromFile()); + std::vector newPlaces; + ASSERT_NO_THROW(newPlaces = constructNewOutputs()); + ASSERT_NO_THROW(m_inputModel->override_all_outputs(newPlaces)); + + std::shared_ptr function; + ASSERT_NO_THROW(function = m_frontEnd->convert(m_inputModel)); + auto ops = function->get_ordered_ops(); + + // Ensure that it doesn't contain old outputs + for (const auto& name : m_param.m_oldOutputs) + { + EXPECT_TRUE(std::find_if(ops.begin(), ops.end(), + [&](const std::shared_ptr& node) + { + return node->get_friendly_name().find(name) != std::string::npos; + }) == ops.end()) << "Name shall not exist:" << name; + } + + // Ensure that it contains expected new outputs + for (const auto& name : m_param.m_newOutputs) + { + EXPECT_TRUE(std::find_if(ops.begin(), ops.end(), + [&](const std::shared_ptr& node) + { + return node->get_friendly_name().find(name) != std::string::npos; + }) != ops.end()) << "Name not found:" << name; + } +} + +TEST_P(FrontEndCutModelTest, testExtractSubgraph) +{ + ASSERT_NO_THROW(doLoadFromFile()); + std::vector newInputs, newOutputs; + ASSERT_NO_THROW(newInputs = constructNewInputs()); + ASSERT_NO_THROW(newOutputs = constructNewOutputs()); + ASSERT_NO_THROW(m_inputModel->extract_subgraph(newInputs, newOutputs)); + + std::shared_ptr function; + ASSERT_NO_THROW(function = m_frontEnd->convert(m_inputModel)); + auto ops = function->get_ordered_ops(); + + // Ensure that it doesn't contain expected old outputs + for (const auto& name : m_param.m_oldOutputs) + { + EXPECT_TRUE(std::find_if(ops.begin(), ops.end(), + [&](const std::shared_ptr& node) + { + return node->get_friendly_name().find(name) != std::string::npos; + }) == ops.end()) << "Name shall not exist:" << name; + } + + // Ensure that it contains expected new outputs + for (const auto& name : m_param.m_newOutputs) + { + EXPECT_TRUE(std::find_if(ops.begin(), ops.end(), + [&](const std::shared_ptr& node) + { + return node->get_friendly_name().find(name) != std::string::npos; + }) != ops.end()) << "Name not found:" << name; + } +} + +TEST_P(FrontEndCutModelTest, testSetTensorValue) +{ + ASSERT_NO_THROW(doLoadFromFile()); + Place::Ptr place; + ASSERT_NO_THROW(place = m_inputModel->get_place_by_tensor_name(m_param.m_tensorValueName)); + ASSERT_NO_THROW(m_inputModel->set_tensor_value(place, &m_param.m_tensorValue[0])); + + std::shared_ptr function; + ASSERT_NO_THROW(function = m_frontEnd->convert(m_inputModel)); + auto ops = function->get_ordered_ops(); + + auto const_name = m_param.m_tensorValueName; + auto const_node_it = std::find_if(ops.begin(), ops.end(), + [&](const std::shared_ptr& node) + { + return node->get_friendly_name().find(const_name) != std::string::npos; + }); + ASSERT_TRUE(const_node_it != ops.end()) << "Name shall exist:" << const_name; + auto data = std::dynamic_pointer_cast(*const_node_it)->get_vector(); + EXPECT_EQ(data.size(), m_param.m_tensorValue.size()) << "Data size must be equal to expected size"; + EXPECT_TRUE(std::equal(data.begin(), data.end(), m_param.m_tensorValue.begin())) << "Data must be equal"; +} diff --git a/ngraph/test/frontend/shared/src/load_from.cpp b/ngraph/test/frontend/shared/src/load_from.cpp new file mode 100644 index 00000000000000..b01b1024b5714f --- /dev/null +++ b/ngraph/test/frontend/shared/src/load_from.cpp @@ -0,0 +1,101 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include +#include +#include "../include/load_from.hpp" +#include "../include/utils.hpp" + +using namespace ngraph; +using namespace ngraph::frontend; + +std::string FrontEndLoadFromTest::getTestCaseName(const testing::TestParamInfo& obj) { + std::string res = obj.param.m_frontEndName; + return FrontEndTestUtils::fileToTestName(res); +} + +void FrontEndLoadFromTest::SetUp() { + FrontEndTestUtils::setupTestEnv(); + m_fem = FrontEndManager(); // re-initialize after setting up environment + m_param = GetParam(); +} + +/////////////////////////////////////////////////////////////////// + +TEST_P(FrontEndLoadFromTest, testLoadFromFile) +{ + std::vector frontends; + FrontEnd::Ptr fe; + ASSERT_NO_THROW(frontends = m_fem.get_available_front_ends()); + ASSERT_NO_THROW(m_frontEnd = m_fem.load_by_framework(m_param.m_frontEndName)); + ASSERT_NE(m_frontEnd, nullptr); + + ASSERT_NO_THROW(m_inputModel = m_frontEnd->load_from_file(m_param.m_modelsPath + m_param.m_file)); + ASSERT_NE(m_inputModel, nullptr); + + std::shared_ptr function; + ASSERT_NO_THROW(function = m_frontEnd->convert(m_inputModel)); + ASSERT_NE(function, nullptr); +} + +TEST_P(FrontEndLoadFromTest, testLoadFromFiles) +{ + std::vector frontends; + FrontEnd::Ptr fe; + ASSERT_NO_THROW(frontends = m_fem.get_available_front_ends()); + ASSERT_NO_THROW(m_frontEnd = m_fem.load_by_framework(m_param.m_frontEndName)); + ASSERT_NE(m_frontEnd, nullptr); + + auto dir_files = m_param.m_files; + for (auto& file: dir_files) { + file = m_param.m_modelsPath + file; + } + + ASSERT_NO_THROW(m_inputModel = m_frontEnd->load_from_files(dir_files)); + ASSERT_NE(m_inputModel, nullptr); + + std::shared_ptr function; + function = m_frontEnd->convert(m_inputModel); + ASSERT_NE(function, nullptr); +} + +TEST_P(FrontEndLoadFromTest, testLoadFromStream) +{ + std::vector frontends; + FrontEnd::Ptr fe; + ASSERT_NO_THROW(frontends = m_fem.get_available_front_ends()); + ASSERT_NO_THROW(m_frontEnd = m_fem.load_by_framework(m_param.m_frontEndName)); + ASSERT_NE(m_frontEnd, nullptr); + + std::ifstream is(m_param.m_modelsPath + m_param.m_stream, std::ios::in | std::ifstream::binary); + ASSERT_NO_THROW(m_inputModel = m_frontEnd->load_from_stream(is)); + ASSERT_NE(m_inputModel, nullptr); + + std::shared_ptr function; + ASSERT_NO_THROW(function = m_frontEnd->convert(m_inputModel)); + ASSERT_NE(function, nullptr); +} + +TEST_P(FrontEndLoadFromTest, testLoadFromStreams) +{ + std::vector frontends; + FrontEnd::Ptr fe; + ASSERT_NO_THROW(frontends = m_fem.get_available_front_ends()); + ASSERT_NO_THROW(m_frontEnd = m_fem.load_by_framework(m_param.m_frontEndName)); + ASSERT_NE(m_frontEnd, nullptr); + + std::vector> is_vec; + std::vector is_ptr_vec; + for (auto& file: m_param.m_streams) { + is_vec.push_back(std::make_shared(m_param.m_modelsPath + file, std::ios::in | std::ifstream::binary)); + is_ptr_vec.push_back(is_vec.back().get()); + } + ASSERT_NO_THROW(m_inputModel = m_frontEnd->load_from_streams(is_ptr_vec)); + ASSERT_NE(m_inputModel, nullptr); + + std::shared_ptr function; + ASSERT_NO_THROW(function = m_frontEnd->convert(m_inputModel)); + ASSERT_NE(function, nullptr); +} diff --git a/ngraph/test/frontend/shared/src/op.cpp b/ngraph/test/frontend/shared/src/op.cpp new file mode 100644 index 00000000000000..66787970f755ce --- /dev/null +++ b/ngraph/test/frontend/shared/src/op.cpp @@ -0,0 +1,61 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include "../include/op.hpp" +#include "../include/utils.hpp" + +using namespace ngraph; +using namespace ngraph::frontend; + +using TestEngine = test::IE_CPU_Engine; + +std::string FrontendOpTest::getTestCaseName(const testing::TestParamInfo &obj) { + std::string res = obj.param.m_frontEndName + "_" + obj.param.m_modelName; + return FrontEndTestUtils::fileToTestName(res); +} + +void FrontendOpTest::SetUp() { + FrontEndTestUtils::setupTestEnv(); + m_fem = FrontEndManager(); // re-initialize after setting up environment + initParamTest(); +} + +void FrontendOpTest::initParamTest() { + m_param = GetParam(); + m_param.m_modelName = m_param.m_modelsPath + m_param.m_modelName; +} + +void FrontendOpTest::validateOp() { + // load + ASSERT_NO_THROW(m_fem.get_available_front_ends()); + ASSERT_NO_THROW(m_frontEnd = m_fem.load_by_framework(m_param.m_frontEndName)); + ASSERT_NE(m_frontEnd, nullptr); + ASSERT_NO_THROW(m_inputModel = m_frontEnd->load_from_file(m_param.m_modelName)); + ASSERT_NE(m_inputModel, nullptr); + + // convert + std::shared_ptr function; + function = m_frontEnd->convert(m_inputModel); + ASSERT_NE(function, nullptr); + + // run + auto test_case = test::TestCase(function); + + for (auto it = m_param.inputs.begin(); it != m_param.inputs.end(); it++ ) { + test_case.add_input(*it); + } + for (auto it = m_param.expected_outputs.begin(); it != m_param.expected_outputs.end(); it++) + { + test_case.add_expected_output(*it); + } + + test_case.run(); +} + +/*---------------------------------------------------------------------------------------------------------------------*/ + +TEST_P(FrontendOpTest, test_model_runtime) { + ASSERT_NO_THROW(validateOp()); +} diff --git a/ngraph/test/frontend/shared/src/partial_shape.cpp b/ngraph/test/frontend/shared/src/partial_shape.cpp new file mode 100644 index 00000000000000..8f9f9648f8ea6b --- /dev/null +++ b/ngraph/test/frontend/shared/src/partial_shape.cpp @@ -0,0 +1,86 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include +#include "../include/partial_shape.hpp" +#include "../include/utils.hpp" + +using namespace ngraph; +using namespace ngraph::frontend; + +std::string FrontEndPartialShapeTest::getTestCaseName(const testing::TestParamInfo& obj) +{ + BaseFEParam base; + PartShape part; + std::tie(base, part) = obj.param; + std::string res = base.m_frontEndName + "_" + part.m_modelName + "_" + part.m_tensorName; + for (auto s : part.m_newPartialShape) + { + res += "_" + (s.is_dynamic() ? "dyn" : std::to_string(s.get_length())); + } + return FrontEndTestUtils::fileToTestName(res); +} + +void FrontEndPartialShapeTest::SetUp() +{ + FrontEndTestUtils::setupTestEnv(); + m_fem = FrontEndManager(); // re-initialize after setting up environment + initParamTest(); +} + +void FrontEndPartialShapeTest::initParamTest() +{ + std::tie(m_baseParam, m_partShape) = GetParam(); + m_partShape.m_modelName = m_baseParam.m_modelsPath + m_partShape.m_modelName; +} + +void FrontEndPartialShapeTest::doLoadFromFile() +{ + std::vector frontends; + FrontEnd::Ptr fe; + ASSERT_NO_THROW(frontends = m_fem.get_available_front_ends()); + ASSERT_NO_THROW(m_frontEnd = m_fem.load_by_framework(m_baseParam.m_frontEndName)); + ASSERT_NE(m_frontEnd, nullptr); + ASSERT_NO_THROW(m_inputModel = m_frontEnd->load_from_file(m_partShape.m_modelName)); + ASSERT_NE(m_inputModel, nullptr); +} + +/////////////////////////////////////////////////////////////////// + +TEST_P(FrontEndPartialShapeTest, testCheckOldPartialShape) { + ASSERT_NO_THROW(doLoadFromFile()); + + std::shared_ptr function; + ASSERT_NO_THROW(function = m_frontEnd->convert(m_inputModel)); + auto ops = function->get_ordered_ops(); + auto it = std::find_if(ops.begin(), ops.end(), + [&](const std::shared_ptr &node) { + return node->get_friendly_name().find(m_partShape.m_tensorName) != std::string::npos; + }); + ASSERT_NE(it, ops.end()); + auto shape = (*it)->get_output_partial_shape(0); + ASSERT_EQ(shape, m_partShape.m_oldPartialShape); +} + +TEST_P(FrontEndPartialShapeTest, testSetNewPartialShape) +{ + ASSERT_NO_THROW(doLoadFromFile()); + Place::Ptr place; + ASSERT_NO_THROW(place = m_inputModel->get_place_by_tensor_name(m_partShape.m_tensorName)); + ASSERT_NE(place, nullptr); + ASSERT_NO_THROW(m_inputModel->set_partial_shape(place, PartialShape{m_partShape.m_newPartialShape})); + + std::shared_ptr function; + ASSERT_NO_THROW(function = m_frontEnd->convert(m_inputModel)); + auto ops = function->get_ordered_ops(); + auto it = std::find_if(ops.begin(), ops.end(), + [&](const std::shared_ptr& node) + { + return node->get_friendly_name().find(m_partShape.m_tensorName) != std::string::npos; + }); + ASSERT_NE(it, ops.end()); + auto shape = (*it)->get_output_partial_shape(0); + ASSERT_EQ(shape, m_partShape.m_newPartialShape); +} diff --git a/ngraph/test/frontend/shared/src/set_element_type.cpp b/ngraph/test/frontend/shared/src/set_element_type.cpp new file mode 100644 index 00000000000000..53376327bbc9a8 --- /dev/null +++ b/ngraph/test/frontend/shared/src/set_element_type.cpp @@ -0,0 +1,60 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include +#include "../include/set_element_type.hpp" +#include "../include/utils.hpp" + +using namespace ngraph; +using namespace ngraph::frontend; + +std::string FrontEndElementTypeTest::getTestCaseName(const testing::TestParamInfo& obj) { + std::string res = obj.param.m_frontEndName + "_" + obj.param.m_modelName; + return FrontEndTestUtils::fileToTestName(res); +} + +void FrontEndElementTypeTest::SetUp() { + FrontEndTestUtils::setupTestEnv(); + m_fem = FrontEndManager(); // re-initialize after setting up environment + initParamTest(); +} + +void FrontEndElementTypeTest::initParamTest() { + m_param = GetParam(); + m_param.m_modelName = m_param.m_modelsPath + m_param.m_modelName; +} + +void FrontEndElementTypeTest::doLoadFromFile() { + std::vector frontends; + FrontEnd::Ptr fe; + ASSERT_NO_THROW(frontends = m_fem.get_available_front_ends()); + ASSERT_NO_THROW(m_frontEnd = m_fem.load_by_framework(m_param.m_frontEndName)); + ASSERT_NE(m_frontEnd, nullptr); + ASSERT_NO_THROW(m_inputModel = m_frontEnd->load_from_file(m_param.m_modelName)); + ASSERT_NE(m_inputModel, nullptr); +} + +/////////////////////////////////////////////////////////////////// + +TEST_P(FrontEndElementTypeTest, testSetElementType) +{ + ASSERT_NO_THROW(doLoadFromFile()); + Place::Ptr place; + ASSERT_NO_THROW(place = m_inputModel->get_inputs()[0]); + ASSERT_NE(place, nullptr); + auto name = place->get_names()[0]; + + ASSERT_NO_THROW(m_inputModel->set_element_type(place, element::f16)); + + std::shared_ptr function; + function = m_frontEnd->convert(m_inputModel); + auto ops = function->get_ordered_ops(); + auto it = std::find_if(ops.begin(), ops.end(), + [&](const std::shared_ptr& node) { + return node->get_friendly_name().find(name) != std::string::npos; + }); + ASSERT_NE(it, ops.end()); + EXPECT_EQ((*it)->get_output_element_type(0), element::f16); +}