From 287ccb4b88798fe88ee51b35b791b13a6376b26b Mon Sep 17 00:00:00 2001 From: Jan Iwaszkiewicz Date: Mon, 28 Aug 2023 10:02:19 +0200 Subject: [PATCH] CPU snippets update (#134) * snippets CPU 1/6 * snippets CPU 2/6 * snippets CPU 3/6 * snippets CPU 4/6 * snippets CPU 5/6 * snippets CPU 6/6 * make module TODO: REMEMBER ABOUT EXPORTING PYTONPATH ON CIs ETC * Add static model creation in snippets for CPU --- docs/snippets/__init__.py | 6 +++ docs/snippets/cpu/Bfloat16Inference.py | 20 +++++---- docs/snippets/cpu/__init__.py | 2 + docs/snippets/cpu/compile_model.py | 11 ++--- docs/snippets/cpu/dynamic_shape.py | 8 ++-- docs/snippets/cpu/multi_threading.py | 41 ++++++++++++++----- docs/snippets/cpu/ov_execution_mode.py | 16 +++++--- .../cpu/ov_sparse_weights_decompression.py | 10 +++-- 8 files changed, 77 insertions(+), 37 deletions(-) create mode 100644 docs/snippets/__init__.py create mode 100644 docs/snippets/cpu/__init__.py diff --git a/docs/snippets/__init__.py b/docs/snippets/__init__.py new file mode 100644 index 00000000000000..f48358873d4703 --- /dev/null +++ b/docs/snippets/__init__.py @@ -0,0 +1,6 @@ +# Copyright (C) 2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +from .utils import get_image +from .utils import get_model +from .utils import get_ngraph_model diff --git a/docs/snippets/cpu/Bfloat16Inference.py b/docs/snippets/cpu/Bfloat16Inference.py index c1d38c284f57fa..2d3fabfe6c4c29 100644 --- a/docs/snippets/cpu/Bfloat16Inference.py +++ b/docs/snippets/cpu/Bfloat16Inference.py @@ -2,22 +2,24 @@ # SPDX-License-Identifier: Apache-2.0 -from openvino.runtime import Core +import openvino as ov + +from snippets import get_model + +model = get_model() #! [part0] -core = Core() -cpu_optimization_capabilities = core.get_property("CPU", "OPTIMIZATION_CAPABILITIES") +core = ov.Core() +cpu_optimization_capabilities = core.get_property("CPU", ov.properties.device.capabilities()) #! [part0] -# TODO: enable part1 when property api will be supported in python #! [part1] -core = Core() -model = core.read_model("model.xml") +core = ov.Core() compiled_model = core.compile_model(model, "CPU") -inference_precision = core.get_property("CPU", "INFERENCE_PRECISION_HINT") +inference_precision = core.get_property("CPU", ov.properties.hint.inference_precision()) #! [part1] #! [part2] -core = Core() -core.set_property("CPU", {"INFERENCE_PRECISION_HINT": "f32"}) +core = ov.Core() +core.set_property("CPU", {ov.properties.hint.inference_precision(): ov.Type.f32}) #! [part2] diff --git a/docs/snippets/cpu/__init__.py b/docs/snippets/cpu/__init__.py new file mode 100644 index 00000000000000..6a16273c024652 --- /dev/null +++ b/docs/snippets/cpu/__init__.py @@ -0,0 +1,2 @@ +# Copyright (C) 2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 diff --git a/docs/snippets/cpu/compile_model.py b/docs/snippets/cpu/compile_model.py index 5cb06ae694d01e..df6b5e48571078 100644 --- a/docs/snippets/cpu/compile_model.py +++ b/docs/snippets/cpu/compile_model.py @@ -1,17 +1,18 @@ # Copyright (C) 2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +from snippets import get_model + +model = get_model() #! [compile_model_default] -from openvino.runtime import Core +import openvino as ov -core = Core() -model = core.read_model("model.xml") +core = ov.Core() compiled_model = core.compile_model(model, "CPU") #! [compile_model_default] #! [compile_model_multi] -core = Core() -model = core.read_model("model.xml") +core = ov.Core() compiled_model = core.compile_model(model, "MULTI:CPU,GPU.0") #! [compile_model_multi] diff --git a/docs/snippets/cpu/dynamic_shape.py b/docs/snippets/cpu/dynamic_shape.py index 8bfbf5d99eec2a..e442b138aeae92 100644 --- a/docs/snippets/cpu/dynamic_shape.py +++ b/docs/snippets/cpu/dynamic_shape.py @@ -1,11 +1,13 @@ # Copyright (C) 2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +from snippets import get_model -from openvino.runtime import Core +model = get_model() #! [static_shape] -core = Core() -model = core.read_model("model.xml") +import openvino as ov + +core = ov.Core() model.reshape([10, 20, 30, 40]) #! [static_shape] diff --git a/docs/snippets/cpu/multi_threading.py b/docs/snippets/cpu/multi_threading.py index a8113390e9a12c..9a5baa1e7575b1 100644 --- a/docs/snippets/cpu/multi_threading.py +++ b/docs/snippets/cpu/multi_threading.py @@ -2,28 +2,47 @@ # SPDX-License-Identifier: Apache-2.0 # -import openvino.runtime as ov -from openvino.runtime import Core, Type, OVAny, properties +from openvino import Core, properties +from snippets import get_model + +model = get_model() + +device_name = "CPU" +core = Core() +core.set_property("CPU", properties.intel_cpu.sparse_weights_decompression_rate(0.8)) -device_name = 'CPU' -xml_path = 'model.xml' -core = ov.Core() -core.set_property("CPU", ov.properties.intel_cpu.sparse_weights_decompression_rate(0.8)) -model = core.read_model(model=xml_path) # ! [ov:intel_cpu:multi_threading:part0] # Use one logical processor for inference -compiled_model_1 = core.compile_model(model=model, device_name=device_name, config={properties.inference_num_threads(1)}) +compiled_model_1 = core.compile_model( + model=model, + device_name=device_name, + config={properties.inference_num_threads(): 1}, +) # Use logical processors of Efficient-cores for inference on hybrid platform -compiled_model_2 = core.compile_model(model=model, device_name=device_name, config={properties.hint.scheduling_core_type(properties.hint.SchedulingCoreType.ECORE_ONLY)}) +compiled_model_2 = core.compile_model( + model=model, + device_name=device_name, + config={ + properties.hint.scheduling_core_type(): properties.hint.SchedulingCoreType.ECORE_ONLY, + }, +) # Use one logical processor per CPU core for inference when hyper threading is on -compiled_model_3 = core.compile_model(model=model, device_name=device_name, config={properties.hint.enable_hyper_threading(False)}) +compiled_model_3 = core.compile_model( + model=model, + device_name=device_name, + config={properties.hint.enable_hyper_threading(): False}, +) # ! [ov:intel_cpu:multi_threading:part0] # ! [ov:intel_cpu:multi_threading:part1] # Disable CPU threads pinning for inference when system supoprt it -compiled_model_4 = core.compile_model(model=model, device_name=device_name, config={properties.hint.enable_cpu_pinning(False)}) +compiled_model_4 = core.compile_model( + model=model, + device_name=device_name, + config={properties.hint.enable_cpu_pinning(): False}, +) # ! [ov:intel_cpu:multi_threading:part1] assert compiled_model_1 assert compiled_model_2 diff --git a/docs/snippets/cpu/ov_execution_mode.py b/docs/snippets/cpu/ov_execution_mode.py index 4abd0ccf3754d6..2feff2777f7d27 100644 --- a/docs/snippets/cpu/ov_execution_mode.py +++ b/docs/snippets/cpu/ov_execution_mode.py @@ -1,12 +1,18 @@ # Copyright (C) 2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -from openvino.runtime import Core - #! [ov:execution_mode:part0] -core = Core() +import openvino as ov + +core = ov.Core() # in case of Accuracy -core.set_property("CPU", {"EXECUTION_MODE_HINT": "ACCURACY"}) +core.set_property( + "CPU", + {ov.properties.hint.execution_mode(): ov.properties.hint.ExecutionMode.ACCURACY}, +) # in case of Performance -core.set_property("CPU", {"EXECUTION_MODE_HINT": "PERFORMANCE"}) +core.set_property( + "CPU", + {ov.properties.hint.execution_mode(): ov.properties.hint.ExecutionMode.PERFORMANCE}, +) #! [ov:execution_mode:part0] diff --git a/docs/snippets/cpu/ov_sparse_weights_decompression.py b/docs/snippets/cpu/ov_sparse_weights_decompression.py index f7ea0772bf73ce..76de90311d28ad 100644 --- a/docs/snippets/cpu/ov_sparse_weights_decompression.py +++ b/docs/snippets/cpu/ov_sparse_weights_decompression.py @@ -2,14 +2,16 @@ # SPDX-License-Identifier: Apache-2.0 # -import openvino.runtime as ov +import openvino as ov +from snippets import get_model -device_name = 'CPU' -xml_path = 'model.xml' +model = get_model() + +device_name = "CPU" +xml_path = "model.xml" # ! [ov:intel_cpu:sparse_weights_decompression:part0] core = ov.Core() core.set_property("CPU", ov.properties.intel_cpu.sparse_weights_decompression_rate(0.8)) -model = core.read_model(model=xml_path) compiled_model = core.compile_model(model=model, device_name=device_name) # ! [ov:intel_cpu:sparse_weights_decompression:part0] assert compiled_model