diff --git a/samples/python/benchmark/bert_benchmark/bert_benchmark.py b/samples/python/benchmark/bert_benchmark/bert_benchmark.py index eb50fb7bb52e0a..b15bbd48b34c84 100755 --- a/samples/python/benchmark/bert_benchmark/bert_benchmark.py +++ b/samples/python/benchmark/bert_benchmark/bert_benchmark.py @@ -10,7 +10,8 @@ from time import perf_counter import datasets -from openvino.runtime import Core, get_version, AsyncInferQueue, PartialShape +import openvino as ov +from openvino.runtime import get_version from transformers import AutoTokenizer from transformers.onnx import export from transformers.onnx.features import FeaturesManager @@ -28,7 +29,7 @@ def main(): # Download the tokenizer tokenizer = AutoTokenizer.from_pretrained(model_name) - core = Core() + core = ov.Core() with tempfile.TemporaryDirectory() as tmp: onnx_path = Path(tmp) / f'{model_name}.onnx' @@ -39,7 +40,7 @@ def main(): # Enforce dynamic input shape try: - model.reshape({model_input.any_name: PartialShape([1, '?']) for model_input in model.inputs}) + model.reshape({model_input.any_name: ov.PartialShape([1, '?']) for model_input in model.inputs}) except RuntimeError: log.error("Can't set dynamic shape") raise @@ -50,7 +51,7 @@ def main(): # It is possible to set CUMULATIVE_THROUGHPUT as PERFORMANCE_HINT for AUTO device compiled_model = core.compile_model(model, 'CPU', tput) # AsyncInferQueue creates optimal number of InferRequest instances - ireqs = AsyncInferQueue(compiled_model) + ireqs = ov.AsyncInferQueue(compiled_model) sst2 = datasets.load_dataset('glue', 'sst2') sst2_sentences = sst2['validation']['sentence'] diff --git a/samples/python/benchmark/sync_benchmark/sync_benchmark.py b/samples/python/benchmark/sync_benchmark/sync_benchmark.py index 367776356d389f..e270d25a64fbbd 100755 --- a/samples/python/benchmark/sync_benchmark/sync_benchmark.py +++ b/samples/python/benchmark/sync_benchmark/sync_benchmark.py @@ -9,7 +9,8 @@ from time import perf_counter import numpy as np -from openvino.runtime import Core, get_version +import openvino as ov +from openvino.runtime import get_version from openvino.runtime.utils.types import get_dtype @@ -40,7 +41,7 @@ def main(): # Pick a device by replacing CPU, for example AUTO:GPU,CPU. # Using MULTI device is pointless in sync scenario # because only one instance of openvino.runtime.InferRequest is used - core = Core() + core = ov.Core() compiled_model = core.compile_model(sys.argv[1], 'CPU', latency) ireq = compiled_model.create_infer_request() # Fill input data for the ireq diff --git a/samples/python/benchmark/throughput_benchmark/throughput_benchmark.py b/samples/python/benchmark/throughput_benchmark/throughput_benchmark.py index 3867e785b31ac9..c934a7650172e6 100755 --- a/samples/python/benchmark/throughput_benchmark/throughput_benchmark.py +++ b/samples/python/benchmark/throughput_benchmark/throughput_benchmark.py @@ -9,7 +9,8 @@ from time import perf_counter import numpy as np -from openvino.runtime import Core, get_version, AsyncInferQueue +import openvino as ov +from openvino.runtime import get_version from openvino.runtime.utils.types import get_dtype @@ -39,10 +40,10 @@ def main(): # Create Core and use it to compile a model. # Pick a device by replacing CPU, for example MULTI:CPU(4),GPU(8). # It is possible to set CUMULATIVE_THROUGHPUT as PERFORMANCE_HINT for AUTO device - core = Core() + core = ov.Core() compiled_model = core.compile_model(sys.argv[1], 'CPU', tput) # AsyncInferQueue creates optimal number of InferRequest instances - ireqs = AsyncInferQueue(compiled_model) + ireqs = ov.AsyncInferQueue(compiled_model) # Fill input data for ireqs for ireq in ireqs: for model_input in compiled_model.inputs: diff --git a/samples/python/classification_sample_async/classification_sample_async.py b/samples/python/classification_sample_async/classification_sample_async.py index 8e0b8d244d3a0c..92180f0534eb4e 100755 --- a/samples/python/classification_sample_async/classification_sample_async.py +++ b/samples/python/classification_sample_async/classification_sample_async.py @@ -9,8 +9,7 @@ import cv2 import numpy as np -from openvino.preprocess import PrePostProcessor -from openvino.runtime import AsyncInferQueue, Core, InferRequest, Layout, Type +import openvino as ov def parse_args() -> argparse.Namespace: @@ -32,7 +31,7 @@ def parse_args() -> argparse.Namespace: return parser.parse_args() -def completion_callback(infer_request: InferRequest, image_path: str) -> None: +def completion_callback(infer_request: ov.InferRequest, image_path: str) -> None: predictions = next(iter(infer_request.results.values())) # Change a shape of a numpy.ndarray with results to get another one with one dimension @@ -61,7 +60,7 @@ def main() -> int: # --------------------------- Step 1. Initialize OpenVINO Runtime Core ------------------------------------------------ log.info('Creating OpenVINO Runtime Core') - core = Core() + core = ov.Core() # --------------------------- Step 2. Read a model -------------------------------------------------------------------- log.info(f'Reading the model: {args.model}') @@ -88,22 +87,22 @@ def main() -> int: input_tensors = [np.expand_dims(image, 0) for image in resized_images] # --------------------------- Step 4. Apply preprocessing ------------------------------------------------------------- - ppp = PrePostProcessor(model) + ppp = ov.preprocess.PrePostProcessor(model) # 1) Set input tensor information: # - input() provides information about a single model input # - precision of tensor is supposed to be 'u8' # - layout of data is 'NHWC' ppp.input().tensor() \ - .set_element_type(Type.u8) \ - .set_layout(Layout('NHWC')) # noqa: N400 + .set_element_type(ov.Type.u8) \ + .set_layout(ov.Layout('NHWC')) # noqa: N400 # 2) Here we suppose model has 'NCHW' layout for input - ppp.input().model().set_layout(Layout('NCHW')) + ppp.input().model().set_layout(ov.Layout('NCHW')) # 3) Set output tensor information: # - precision of tensor is supposed to be 'f32' - ppp.output().tensor().set_element_type(Type.f32) + ppp.output().tensor().set_element_type(ov.Type.f32) # 4) Apply preprocessing modifing the original 'model' model = ppp.build() @@ -115,7 +114,7 @@ def main() -> int: # --------------------------- Step 6. Create infer request queue ------------------------------------------------------ log.info('Starting inference in asynchronous mode') # create async queue with optimal number of infer requests - infer_queue = AsyncInferQueue(compiled_model) + infer_queue = ov.AsyncInferQueue(compiled_model) infer_queue.set_callback(completion_callback) # --------------------------- Step 7. Do inference -------------------------------------------------------------------- diff --git a/samples/python/hello_classification/hello_classification.py b/samples/python/hello_classification/hello_classification.py index 135422ef96cf21..9e391c322196b1 100755 --- a/samples/python/hello_classification/hello_classification.py +++ b/samples/python/hello_classification/hello_classification.py @@ -8,8 +8,7 @@ import cv2 import numpy as np -from openvino.preprocess import PrePostProcessor, ResizeAlgorithm -from openvino.runtime import Core, Layout, Type +import openvino as ov def main(): @@ -26,7 +25,7 @@ def main(): # --------------------------- Step 1. Initialize OpenVINO Runtime Core ------------------------------------------------ log.info('Creating OpenVINO Runtime Core') - core = Core() + core = ov.Core() # --------------------------- Step 2. Read a model -------------------------------------------------------------------- log.info(f'Reading the model: {model_path}') @@ -48,7 +47,7 @@ def main(): input_tensor = np.expand_dims(image, 0) # --------------------------- Step 4. Apply preprocessing ------------------------------------------------------------- - ppp = PrePostProcessor(model) + ppp = ov.preprocess.PrePostProcessor(model) _, h, w, _ = input_tensor.shape @@ -58,19 +57,19 @@ def main(): # - layout of data is 'NHWC' ppp.input().tensor() \ .set_shape(input_tensor.shape) \ - .set_element_type(Type.u8) \ - .set_layout(Layout('NHWC')) # noqa: ECE001, N400 + .set_element_type(ov.Type.u8) \ + .set_layout(ov.Layout('NHWC')) # noqa: ECE001, N400 # 2) Adding explicit preprocessing steps: # - apply linear resize from tensor spatial dims to model spatial dims - ppp.input().preprocess().resize(ResizeAlgorithm.RESIZE_LINEAR) + ppp.input().preprocess().resize(ov.preprocess.ResizeAlgorithm.RESIZE_LINEAR) # 3) Here we suppose model has 'NCHW' layout for input - ppp.input().model().set_layout(Layout('NCHW')) + ppp.input().model().set_layout(ov.Layout('NCHW')) # 4) Set output tensor information: # - precision of tensor is supposed to be 'f32' - ppp.output().tensor().set_element_type(Type.f32) + ppp.output().tensor().set_element_type(ov.Type.f32) # 5) Apply preprocessing modifying the original 'model' model = ppp.build() diff --git a/samples/python/hello_query_device/hello_query_device.py b/samples/python/hello_query_device/hello_query_device.py index 5da48752427982..8efadc1466eff4 100755 --- a/samples/python/hello_query_device/hello_query_device.py +++ b/samples/python/hello_query_device/hello_query_device.py @@ -5,7 +5,7 @@ import logging as log import sys -from openvino.runtime import Core +import openvino as ov def param_to_string(parameters) -> str: @@ -20,7 +20,7 @@ def main(): log.basicConfig(format='[ %(levelname)s ] %(message)s', level=log.INFO, stream=sys.stdout) # --------------------------- Step 1. Initialize OpenVINO Runtime Core -------------------------------------------- - core = Core() + core = ov.Core() # --------------------------- Step 2. Get metrics of available devices -------------------------------------------- log.info('Available devices:') diff --git a/samples/python/hello_reshape_ssd/hello_reshape_ssd.py b/samples/python/hello_reshape_ssd/hello_reshape_ssd.py index e2e81d67bcc5cd..40d4afdbcf0819 100755 --- a/samples/python/hello_reshape_ssd/hello_reshape_ssd.py +++ b/samples/python/hello_reshape_ssd/hello_reshape_ssd.py @@ -9,8 +9,7 @@ import cv2 import numpy as np -from openvino.preprocess import PrePostProcessor -from openvino.runtime import Core, Layout, PartialShape, Type +import openvino as ov def main(): @@ -27,7 +26,7 @@ def main(): # --------------------------- Step 1. Initialize OpenVINO Runtime Core ------------------------------------------------ log.info('Creating OpenVINO Runtime Core') - core = Core() + core = ov.Core() # --------------------------- Step 2. Read a model -------------------------------------------------------------------- log.info(f'Reading the model: {model_path}') @@ -50,25 +49,25 @@ def main(): log.info('Reshaping the model to the height and width of the input image') n, h, w, c = input_tensor.shape - model.reshape({model.input().get_any_name(): PartialShape((n, c, h, w))}) + model.reshape({model.input().get_any_name(): ov.PartialShape((n, c, h, w))}) # --------------------------- Step 4. Apply preprocessing ------------------------------------------------------------- - ppp = PrePostProcessor(model) + ppp = ov.preprocess.PrePostProcessor(model) # 1) Set input tensor information: # - input() provides information about a single model input # - precision of tensor is supposed to be 'u8' # - layout of data is 'NHWC' ppp.input().tensor() \ - .set_element_type(Type.u8) \ - .set_layout(Layout('NHWC')) # noqa: N400 + .set_element_type(ov.Type.u8) \ + .set_layout(ov.Layout('NHWC')) # noqa: N400 # 2) Here we suppose model has 'NCHW' layout for input - ppp.input().model().set_layout(Layout('NCHW')) + ppp.input().model().set_layout(ov.Layout('NCHW')) # 3) Set output tensor information: # - precision of tensor is supposed to be 'f32' - ppp.output().tensor().set_element_type(Type.f32) + ppp.output().tensor().set_element_type(ov.Type.f32) # 4) Apply preprocessing modifing the original 'model' model = ppp.build() diff --git a/samples/python/model_creation_sample/model_creation_sample.py b/samples/python/model_creation_sample/model_creation_sample.py index 27792fef5c0c57..d76d222c5f870e 100755 --- a/samples/python/model_creation_sample/model_creation_sample.py +++ b/samples/python/model_creation_sample/model_creation_sample.py @@ -8,14 +8,13 @@ from functools import reduce import numpy as np -from openvino.preprocess import PrePostProcessor -from openvino.runtime import (Core, Layout, Model, Shape, Type, op, opset1, - opset8, set_batch) +import openvino as ov +from openvino.runtime import op, opset1, opset8 from data import digits -def create_ngraph_function(model_path: str) -> Model: +def create_ngraph_function(model_path: str) -> ov.Model: """Create a model on the fly from the source code using ngraph.""" def shape_and_length(shape: list) -> typing.Tuple[list, int]: @@ -28,17 +27,17 @@ def shape_and_length(shape: list) -> typing.Tuple[list, int]: # input input_shape = [64, 1, 28, 28] - param_node = op.Parameter(Type.f32, Shape(input_shape)) + param_node = op.Parameter(ov.Type.f32, ov.Shape(input_shape)) # convolution 1 conv_1_kernel_shape, conv_1_kernel_length = shape_and_length([20, 1, 5, 5]) - conv_1_kernel = op.Constant(Type.f32, Shape(conv_1_kernel_shape), weights[0:conv_1_kernel_length].tolist()) + conv_1_kernel = op.Constant(ov.Type.f32, ov.Shape(conv_1_kernel_shape), weights[0:conv_1_kernel_length].tolist()) weights_offset += conv_1_kernel_length conv_1_node = opset8.convolution(param_node, conv_1_kernel, [1, 1], padding_begin, padding_end, [1, 1]) # add 1 add_1_kernel_shape, add_1_kernel_length = shape_and_length([1, 20, 1, 1]) - add_1_kernel = op.Constant(Type.f32, Shape(add_1_kernel_shape), + add_1_kernel = op.Constant(ov.Type.f32, ov.Shape(add_1_kernel_shape), weights[weights_offset : weights_offset + add_1_kernel_length]) weights_offset += add_1_kernel_length add_1_node = opset8.add(conv_1_node, add_1_kernel) @@ -48,7 +47,7 @@ def shape_and_length(shape: list) -> typing.Tuple[list, int]: # convolution 2 conv_2_kernel_shape, conv_2_kernel_length = shape_and_length([50, 20, 5, 5]) - conv_2_kernel = op.Constant(Type.f32, Shape(conv_2_kernel_shape), + conv_2_kernel = op.Constant(ov.Type.f32, ov.Shape(conv_2_kernel_shape), weights[weights_offset : weights_offset + conv_2_kernel_length], ) weights_offset += conv_2_kernel_length @@ -56,7 +55,7 @@ def shape_and_length(shape: list) -> typing.Tuple[list, int]: # add 2 add_2_kernel_shape, add_2_kernel_length = shape_and_length([1, 50, 1, 1]) - add_2_kernel = op.Constant(Type.f32, Shape(add_2_kernel_shape), + add_2_kernel = op.Constant(ov.Type.f32, ov.Shape(add_2_kernel_shape), weights[weights_offset : weights_offset + add_2_kernel_length], ) weights_offset += add_2_kernel_length @@ -72,13 +71,13 @@ def shape_and_length(shape: list) -> typing.Tuple[list, int]: weights[weights_offset : weights_offset + 2 * reshape_1_length], dtype=np.int64, ) - reshape_1_kernel = op.Constant(Type.i64, Shape(list(dtype_weights.shape)), dtype_weights) + reshape_1_kernel = op.Constant(ov.Type.i64, ov.Shape(list(dtype_weights.shape)), dtype_weights) weights_offset += 2 * reshape_1_length reshape_1_node = opset8.reshape(maxpool_2_node, reshape_1_kernel, True) # matmul 1 matmul_1_kernel_shape, matmul_1_kernel_length = shape_and_length([500, 800]) - matmul_1_kernel = op.Constant(Type.f32, Shape(matmul_1_kernel_shape), + matmul_1_kernel = op.Constant(ov.Type.f32, ov.Shape(matmul_1_kernel_shape), weights[weights_offset : weights_offset + matmul_1_kernel_length], ) weights_offset += matmul_1_kernel_length @@ -86,7 +85,7 @@ def shape_and_length(shape: list) -> typing.Tuple[list, int]: # add 3 add_3_kernel_shape, add_3_kernel_length = shape_and_length([1, 500]) - add_3_kernel = op.Constant(Type.f32, Shape(add_3_kernel_shape), + add_3_kernel = op.Constant(ov.Type.f32, ov.Shape(add_3_kernel_shape), weights[weights_offset : weights_offset + add_3_kernel_length], ) weights_offset += add_3_kernel_length @@ -96,12 +95,12 @@ def shape_and_length(shape: list) -> typing.Tuple[list, int]: relu_node = opset8.relu(add_3_node) # reshape 2 - reshape_2_kernel = op.Constant(Type.i64, Shape(list(dtype_weights.shape)), dtype_weights) + reshape_2_kernel = op.Constant(ov.Type.i64, ov.Shape(list(dtype_weights.shape)), dtype_weights) reshape_2_node = opset8.reshape(relu_node, reshape_2_kernel, True) # matmul 2 matmul_2_kernel_shape, matmul_2_kernel_length = shape_and_length([10, 500]) - matmul_2_kernel = op.Constant(Type.f32, Shape(matmul_2_kernel_shape), + matmul_2_kernel = op.Constant(ov.Type.f32, ov.Shape(matmul_2_kernel_shape), weights[weights_offset : weights_offset + matmul_2_kernel_length], ) weights_offset += matmul_2_kernel_length @@ -109,7 +108,7 @@ def shape_and_length(shape: list) -> typing.Tuple[list, int]: # add 4 add_4_kernel_shape, add_4_kernel_length = shape_and_length([1, 10]) - add_4_kernel = op.Constant(Type.f32, Shape(add_4_kernel_shape), + add_4_kernel = op.Constant(ov.Type.f32, ov.Shape(add_4_kernel_shape), weights[weights_offset : weights_offset + add_4_kernel_length], ) weights_offset += add_4_kernel_length @@ -119,7 +118,7 @@ def shape_and_length(shape: list) -> typing.Tuple[list, int]: softmax_axis = 1 softmax_node = opset8.softmax(add_4_node, softmax_axis) - return Model(softmax_node, [param_node], 'lenet') + return ov.Model(softmax_node, [param_node], 'lenet') def main(): @@ -135,35 +134,35 @@ def main(): number_top = 1 # ---------------------------Step 1. Initialize OpenVINO Runtime Core-------------------------------------------------- log.info('Creating OpenVINO Runtime Core') - core = Core() # ---------------------------Step 2. Read a model in OpenVINO Intermediate Representation------------------------------ log.info(f'Loading the model using ngraph function with weights from {model_path}') model = create_ngraph_function(model_path) # ---------------------------Step 3. Apply preprocessing---------------------------------------------------------- # Get names of input and output blobs - ppp = PrePostProcessor(model) + ppp = ov.preprocess.PrePostProcessor(model) # 1) Set input tensor information: # - input() provides information about a single model input # - precision of tensor is supposed to be 'u8' # - layout of data is 'NHWC' ppp.input().tensor() \ - .set_element_type(Type.u8) \ - .set_layout(Layout('NHWC')) # noqa: N400 + .set_element_type(ov.Type.u8) \ + .set_layout(ov.Layout('NHWC')) # noqa: N400 # 2) Here we suppose model has 'NCHW' layout for input - ppp.input().model().set_layout(Layout('NCHW')) + ppp.input().model().set_layout(ov.Layout('NCHW')) # 3) Set output tensor information: # - precision of tensor is supposed to be 'f32' - ppp.output().tensor().set_element_type(Type.f32) + ppp.output().tensor().set_element_type(ov.Type.f32) # 4) Apply preprocessing modifing the original 'model' model = ppp.build() # Set a batch size equal to number of input images - set_batch(model, digits.shape[0]) + ov.set_batch(model, digits.shape[0]) # ---------------------------Step 4. Loading model to the device------------------------------------------------------- log.info('Loading the model to the plugin') + core = ov.Core() compiled_model = core.compile_model(model, device_name) # ---------------------------Step 5. Prepare input--------------------------------------------------------------------- diff --git a/samples/python/speech_sample/speech_sample.py b/samples/python/speech_sample/speech_sample.py index 8c2352defaa98a..e9ac1a821dc51a 100755 --- a/samples/python/speech_sample/speech_sample.py +++ b/samples/python/speech_sample/speech_sample.py @@ -9,8 +9,7 @@ from typing import Dict import numpy as np -from openvino.preprocess import PrePostProcessor -from openvino.runtime import Core, InferRequest, Layout, Type, set_batch +import openvino as ov from arg_parser import parse_args from file_options import read_utterance_file, write_utterance_file @@ -20,7 +19,7 @@ set_scale_factors) -def do_inference(data: Dict[str, np.ndarray], infer_request: InferRequest, cw_l: int = 0, cw_r: int = 0) -> np.ndarray: +def do_inference(data: Dict[str, np.ndarray], infer_request: ov.InferRequest, cw_l: int = 0, cw_r: int = 0) -> np.ndarray: """Do a synchronous matrix inference.""" frames_to_infer = {} result = {} @@ -69,7 +68,7 @@ def main(): # --------------------------- Step 1. Initialize OpenVINO Runtime Core ------------------------------------------------ log.info('Creating OpenVINO Runtime Core') - core = Core() + core = ov.Core() # --------------------------- Step 2. Read a model -------------------------------------------------------------------- if args.model: @@ -83,19 +82,19 @@ def main(): if args.layout: layouts = get_input_layouts(args.layout, model.inputs) - ppp = PrePostProcessor(model) + ppp = ov.preprocess.PrePostProcessor(model) for i in range(len(model.inputs)): - ppp.input(i).tensor().set_element_type(Type.f32) + ppp.input(i).tensor().set_element_type(ov.Type.f32) input_name = model.input(i).get_any_name() if args.layout and input_name in layouts.keys(): - ppp.input(i).tensor().set_layout(Layout(layouts[input_name])) - ppp.input(i).model().set_layout(Layout(layouts[input_name])) + ppp.input(i).tensor().set_layout(ov.Layout(layouts[input_name])) + ppp.input(i).model().set_layout(ov.Layout(layouts[input_name])) for i in range(len(model.outputs)): - ppp.output(i).tensor().set_element_type(Type.f32) + ppp.output(i).tensor().set_element_type(ov.Type.f32) model = ppp.build() @@ -103,7 +102,7 @@ def main(): batch_size = args.batch_size if args.context_window_left == args.context_window_right == 0 else 1 if any((not _input.node.layout.empty for _input in model.inputs)): - set_batch(model, batch_size) + ov.set_batch(model, batch_size) else: log.warning('Layout is not set for any input, so custom batch size is not set')