diff --git a/.ci/azure/linux.yml b/.ci/azure/linux.yml index e99cefbbc3727d..e1f1ef165fa977 100644 --- a/.ci/azure/linux.yml +++ b/.ci/azure/linux.yml @@ -121,6 +121,7 @@ jobs: - script: | python3 -m pip install -U setuptools==51.0.0 python3 -m pip install -r requirements.txt + python3 -m pip install -r $(OPENVINO_REPO_DIR)/model-optimizer/requirements_onnx.txt workingDirectory: $(REPO_DIR)/modules/mo_pytorch/test displayName: 'Install PyTorch tests dependencies' diff --git a/modules/arm_plugin/README.md b/modules/arm_plugin/README.md index 238585f406ce31..cdb6bde9d4c288 100644 --- a/modules/arm_plugin/README.md +++ b/modules/arm_plugin/README.md @@ -43,7 +43,7 @@ docker container run --rm -ti -v $PWD/build:/armcpu_plugin arm-plugin or export only the archive with artifacts: ``` docker container run --rm -ti --tmpfs /armcpu_plugin:rw -v $PWD:/remote \ - arm-plugin sh -c "/armplg_build.sh && cp ./OV_ARM_package.tar.gz /remote" + arm-plugin sh -c "sh /armplg_build.sh && cp ./OV_ARM_package.tar.gz /remote" ``` > **NOTE**: There are a few environment variables that control `/armplg_build.sh` script execution. > @@ -189,16 +189,16 @@ On the output video you should see people enclosed in red rectangles: ![](docs/img/object_detection_demo_yolo.gif) ### OpenVINO™ samples -You could verify the plugin by running [OpenVINO™ samples]. You can find C++ samples in `deployment_tools/inference_engine/bin` directory (if you build the plugin using approach #1) or `openvino/bin/armv7l/Release` directory (if you build the plugin using approach #2 or #3). The following procedure assumes the approach #1 is used. -OpenVINO™ samples require OpenCV libraries. If you build the plugin using approach #1 all needed OpenCV libraries are already placed in `build\lib` directory. If you build the plugin using approach #2 or #3 you need to install OpenCV or [build it from source]. +You could verify the plugin by running [OpenVINO™ samples]. You can find C++ samples in `deployment_tools/inference_engine/bin/` directory (if you build the plugin using approach #1) or `openvino/bin//Release` directory (if you build the plugin using approach #2 or #3). The following procedure assumes the approach #1 is used. +OpenVINO™ samples require OpenCV libraries. If you build the plugin using approach #1 all needed OpenCV libraries are already placed in `opencv\lib` directory. If you build the plugin using approach #2 or #3 you need to install OpenCV or [build it from source]. Let's try to run [Object Detection for SSD sample]. #### Model preparation 1. Prepare model `vehicle-license-plate-detection-barrier-0123` using Model Preparation precedure described in Open Model Zoo demos section. #### Model inference on ARM 1. Copy OpenVINO™ and ARM plugin artefacts to ARM platform. If you build the plugin using approach #1, all artefacts are packed into `OV_ARM_package.tar.gz`. -2. Go to `deployment_tools/inference_engine/bin` directory: +2. Go to Inference Engine bin directory: ``` -cd deployment_tools/inference_engine/bin +cd deployment_tools/inference_engine/bin/ ``` 3. Download a vehicle image, for instance, [this image]: ``` @@ -208,7 +208,7 @@ wget https://raw.githubusercontent.com/openvinotoolkit/openvino/master/scripts/d 5. Run object detection sample on ARM platform: ``` -export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$PWD/lib +export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opencv/lib/:/deployment_tools/inference_engine/lib// ./object_detection_sample_ssd -m vehicle-license-plate-detection-barrier-0123.xml -i car_1.bmp -d CPU ``` diff --git a/modules/arm_plugin/armplg_build.sh b/modules/arm_plugin/armplg_build.sh index 532a55fb9b8f0d..0d506b77e1527a 100644 --- a/modules/arm_plugin/armplg_build.sh +++ b/modules/arm_plugin/armplg_build.sh @@ -29,6 +29,19 @@ fail() exit $retval } +cloneSrcTree() +{ + DESTDIR=$1 + shift + SRCURL=$1 + shift + while [ $# -gt 0 ]; do + git lfs clone --recurse-submodules --shallow-submodules --depth 1 --branch=$1 $SRCURL $DESTDIR && return 0 + shift + done + return 1 +} + checkSrcTree() { [ $# -lt 3 ] && fail @@ -36,7 +49,7 @@ checkSrcTree() if ! [ -d $1 ]; then echo "Unable to detect $1" echo "Cloning $2..." - git lfs clone --recurse-submodules --shallow-submodules --depth 1 --branch=$3 $2 $1 || fail 3 "Failed to clone $2. Stopping" + cloneSrcTree $@ || fail 3 "Failed to clone $2. Stopping" else echo "Detected $1" echo "Considering it as source directory" @@ -45,7 +58,7 @@ checkSrcTree() echo "Removing existing sources..." rm -rf $1 || fail 1 "Failed to remove. Stopping" echo "Cloning $2..." - git lfs clone --recurse-submodules --shallow-submodules --depth 1 --branch=$3 $2 $1 || fail 3 "Failed to clone $2. Stopping" + cloneSrcTree $@ || fail 3 "Failed to clone $2. Stopping" elif [ -d $1/build ]; then echo "Build directory detected at $1" if [ "$UPDATE_SOURCES" = "clean" ]; then diff --git a/modules/arm_plugin/src/transformations/arm_optimizations.cpp b/modules/arm_plugin/src/transformations/arm_optimizations.cpp index f540b3da275aae..7bdf6c211d4a89 100644 --- a/modules/arm_plugin/src/transformations/arm_optimizations.cpp +++ b/modules/arm_plugin/src/transformations/arm_optimizations.cpp @@ -110,6 +110,7 @@ bool ArmPlugin::pass::ArmOptimizations::run_on_function(std::shared_ptr(); manager.register_pass(); manager.register_pass(); + manager.register_pass(); manager.register_pass(); manager.register_pass(); manager.register_pass(); diff --git a/modules/arm_plugin/src/transformations/conv_bias_activ_fusion.cpp b/modules/arm_plugin/src/transformations/conv_bias_activ_fusion.cpp index aca5432d1f5d50..ae12e6c2e25863 100644 --- a/modules/arm_plugin/src/transformations/conv_bias_activ_fusion.cpp +++ b/modules/arm_plugin/src/transformations/conv_bias_activ_fusion.cpp @@ -134,7 +134,7 @@ ngraph::matcher_pass_callback ArmPlugin::pass::ConvBiasFusionBase::fuse_conv_wit } if (!std::dynamic_pointer_cast(eltwise->input_value(1 - conv_idx).get_node_shared_ptr())) { - IE_THROW() << "Unsupported Convolution with inconstant weights."; + return false; // Unsupported Convolution with inconstant bias } auto bias = eltwise->input_value(1 - conv_idx); @@ -160,10 +160,6 @@ ngraph::matcher_pass_callback ArmPlugin::pass::ConvBiasFusionBase::fuse_conv_wit opset::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {channel_dim}), true); } - if (m_conv->output(0).get_target_inputs().size() != 1) { - return false; - } - if (m_conv->inputs().size() == 3) { new_bias = std::make_shared(new_bias, m_conv->input_value(Inputs::Bias)); } @@ -194,6 +190,10 @@ ngraph::matcher_pass_callback ArmPlugin::pass::ConvertConvBase::convert_conv_to_ return false; } + if (!std::dynamic_pointer_cast(m_conv->input_value(Inputs::Weights).get_node_shared_ptr())) { + IE_THROW() << "Unsupported Convolution with inconstant weights."; + } + auto conv_arm = std::make_shared( m_conv->input_value(Inputs::Data), m_conv->input_value(Inputs::Weights), @@ -233,17 +233,17 @@ ArmPlugin::pass::ConvertGroupConvolutionToArm::ConvertGroupConvolutionToArm() { ArmPlugin::pass::ConvBiasFusion::ConvBiasFusion() { auto m = std::make_shared( - ngraph::pattern::wrap_type({ngraph::pattern::any_input(ngraph::pattern::has_static_shape()), - ngraph::pattern::any_input(ngraph::pattern::has_static_shape())}, - ngraph::pattern::has_static_shape()), "ConvBiasFusion"); + ngraph::pattern::wrap_type({ngraph::pattern::any_input(ngraph::pattern::has_static_shape()), + ngraph::pattern::any_input(ngraph::pattern::has_static_shape())}, + ngraph::pattern::has_static_shape()), "ConvBiasFusion"); register_matcher(m, fuse_conv_with_bias()); } ArmPlugin::pass::GroupConvBiasFusion::GroupConvBiasFusion() { auto m = std::make_shared( - ngraph::pattern::wrap_type({ngraph::pattern::any_input(ngraph::pattern::has_static_shape()), - ngraph::pattern::any_input(ngraph::pattern::has_static_shape())}, - ngraph::pattern::has_static_shape()), "GroupConvBiasFusion"); + ngraph::pattern::wrap_type({ngraph::pattern::any_input(ngraph::pattern::has_static_shape()), + ngraph::pattern::any_input(ngraph::pattern::has_static_shape())}, + ngraph::pattern::has_static_shape()), "GroupConvBiasFusion"); register_matcher(m, fuse_conv_with_bias()); } diff --git a/modules/java_api/cpp/blob.cpp b/modules/java_api/cpp/blob.cpp index 304e2811f9ca25..dcca765daae4a0 100644 --- a/modules/java_api/cpp/blob.cpp +++ b/modules/java_api/cpp/blob.cpp @@ -162,6 +162,7 @@ JNIEXPORT jlong JNICALL Java_org_intel_openvino_Blob_BlobLong(JNIEnv *env, jobje JNIEXPORT jlong JNICALL Java_org_intel_openvino_Blob_BlobCArray(JNIEnv *env, jobject obj, jlong tensorDescAddr, jlong matDataAddr) { static const char method_name[] = "BlobCArray"; + Blob::Ptr *blob = nullptr; try { TensorDesc *tDesc = (TensorDesc *)tensorDescAddr; @@ -169,7 +170,7 @@ JNIEXPORT jlong JNICALL Java_org_intel_openvino_Blob_BlobCArray(JNIEnv *env, job auto precision = tDesc->getPrecision(); std::vector dims = tDesc->getDims(); - Blob::Ptr *blob = new Blob::Ptr(); + blob = new Blob::Ptr(); switch (precision) { case Precision::FP32: @@ -210,18 +211,26 @@ JNIEXPORT jlong JNICALL Java_org_intel_openvino_Blob_BlobCArray(JNIEnv *env, job *blob = make_shared_blob((*tDesc), data); break; } - default: + default: { + delete blob; throw std::runtime_error("Unsupported precision value!"); + } } return (jlong)blob; } catch (const std::exception &e) { + if (blob) { + delete blob; + } throwJavaException(env, &e, method_name); } catch (...) { + if (blob) { + delete blob; + } throwJavaException(env, 0, method_name); } @@ -258,8 +267,13 @@ JNIEXPORT jlong JNICALL Java_org_intel_openvino_Blob_rmap(JNIEnv *env, jobject o Blob::Ptr *output = reinterpret_cast(addr); if ((*output)->is()) { - LockedMemory *lmem = new LockedMemory (as(*output)->rmap()); - return (jlong)lmem; + auto mBlob = as(*output); + if (mBlob) { + LockedMemory *lmem = new LockedMemory (mBlob->rmap()); + return (jlong)lmem; + } else { + throw std::runtime_error("Target Blob cannot be cast to the MemoryBlob!"); + } } else { throw std::runtime_error("Target Blob cannot be cast to the MemoryBlob!"); } diff --git a/modules/java_api/cpp/tensor_desc.cpp b/modules/java_api/cpp/tensor_desc.cpp index 7a15e0b1c7355f..b6e25952a420c1 100644 --- a/modules/java_api/cpp/tensor_desc.cpp +++ b/modules/java_api/cpp/tensor_desc.cpp @@ -45,6 +45,9 @@ JNIEXPORT jintArray JNICALL Java_org_intel_openvino_TensorDesc_GetDims(JNIEnv *e std::vector size_t_dims = tDesc->getDims(); jintArray result = env->NewIntArray(size_t_dims.size()); + if (!result) { + throw std::runtime_error("Out of memory!"); + } jint *arr = env->GetIntArrayElements(result, nullptr); for (int i = 0; i < size_t_dims.size(); ++i) diff --git a/modules/java_api/org/intel/openvino/IEWrapper.java b/modules/java_api/org/intel/openvino/IEWrapper.java index 0b3f650c8b261f..3b6932afd1f640 100644 --- a/modules/java_api/org/intel/openvino/IEWrapper.java +++ b/modules/java_api/org/intel/openvino/IEWrapper.java @@ -14,6 +14,7 @@ protected long getNativeObjAddr() { @Override protected void finalize() throws Throwable { delete(nativeObj); + super.finalize(); } /*----------------------------------- native methods -----------------------------------*/ diff --git a/modules/java_api/samples/ArgumentParser.java b/modules/java_api/samples/ArgumentParser.java index d6bfa78782452f..1d7b1cb70a5481 100644 --- a/modules/java_api/samples/ArgumentParser.java +++ b/modules/java_api/samples/ArgumentParser.java @@ -25,24 +25,24 @@ private void printHelp() { } public void parseArgs(String[] args) { - try { - for (int i = 0; i < args.length; i++) { - String arg = args[i]; - if (arg.equals("--help") | arg.equals("-h")) { - printHelp(); - System.exit(0); - } else { - if (description.containsKey(arg)) { + for (int i = 0; i < args.length; i++) { + String arg = args[i]; + if (arg.equals("--help") || arg.equals("-h")) { + printHelp(); + System.exit(0); + } else { + if (description.containsKey(arg)) { + if (i < args.length - 1) { input.put(arg, args[++i]); } else { - System.out.println("Non-existent key: '" + arg + "'"); + System.out.println("Missed value for key: '" + arg + "'"); System.exit(0); } + } else { + System.out.println("Non-existent key: '" + arg + "'"); + System.exit(0); } } - } catch (ArrayIndexOutOfBoundsException e) { - System.out.println("Error: Incorrect number of arguments"); - System.exit(0); } } diff --git a/modules/java_api/samples/benchmark_app/Main.java b/modules/java_api/samples/benchmark_app/Main.java index 860b4baba12366..0049072c7a6636 100644 --- a/modules/java_api/samples/benchmark_app/Main.java +++ b/modules/java_api/samples/benchmark_app/Main.java @@ -1,10 +1,10 @@ import org.intel.openvino.*; +import java.security.SecureRandom; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; import java.util.Map; -import java.util.Random; import java.util.Vector; public class Main { @@ -25,6 +25,9 @@ static boolean adjustShapesBatch( } else if (layout == Layout.CN) { batchIndex = 1; } + if (!shapes.containsKey(entry.getKey())) { + continue; + } if ((batchIndex != -1) && (shapes.get(entry.getKey())[batchIndex] != batchSize)) { shapes.get(entry.getKey())[batchIndex] = batchSize; updated = true; @@ -91,7 +94,7 @@ static Blob blobRandomByte(TensorDesc tDesc) { } byte[] buff = new byte[size]; - Random rand = new Random(); + SecureRandom rand = new SecureRandom(); rand.nextBytes(buff); return new Blob(tDesc, buff); @@ -108,7 +111,7 @@ static double getMedianValue(Vector vec) { else return (double) arr[arr.length / 2]; } - static boolean getApiBoolean(String api) throws RuntimeException { + static boolean getApiBoolean(String api) throws Exception { if (api.equals("sync")) return false; else if (api.equals("async")) return true; else throw new RuntimeException("Incorrect argument: '-api'"); @@ -193,7 +196,7 @@ public static void main(String[] args) { try { isAsync = getApiBoolean(api); - } catch (RuntimeException e) { + } catch (Exception e) { System.out.println(e.getMessage()); return; } @@ -382,6 +385,10 @@ public static void main(String[] args) { || (durationMs != 0L && execTime < durationMs) || (isAsync && iteration % nireq != 0)) { inferRequest = inferRequestsQueue.getIdleRequest(); + if (inferRequest == null) { + System.out.println("No idle Infer Requests!"); + return; + } if (isAsync) { // As the inference request is currently idle, the wait() adds no additional diff --git a/modules/mo_pytorch/test/requirements.txt b/modules/mo_pytorch/test/requirements.txt index 722d6cc22bdb6d..566162768af969 100644 --- a/modules/mo_pytorch/test/requirements.txt +++ b/modules/mo_pytorch/test/requirements.txt @@ -1,6 +1,3 @@ -defusedxml>=0.5.0 -networkx>=1.11 -test-generator==0.1.1 torch==1.7.0 torchvision==0.8.1 opencv-python==4.4.0.46