Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Akup/cherry pick samples namespace update #19478

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 5 additions & 4 deletions samples/python/benchmark/bert_benchmark/bert_benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,8 @@
from time import perf_counter

import datasets
from openvino.runtime import Core, get_version, AsyncInferQueue, PartialShape
import openvino as ov
from openvino.runtime import get_version
from transformers import AutoTokenizer
from transformers.onnx import export
from transformers.onnx.features import FeaturesManager
Expand All @@ -28,7 +29,7 @@ def main():
# Download the tokenizer
tokenizer = AutoTokenizer.from_pretrained(model_name)

core = Core()
core = ov.Core()

with tempfile.TemporaryDirectory() as tmp:
onnx_path = Path(tmp) / f'{model_name}.onnx'
Expand All @@ -39,7 +40,7 @@ def main():

# Enforce dynamic input shape
try:
model.reshape({model_input.any_name: PartialShape([1, '?']) for model_input in model.inputs})
model.reshape({model_input.any_name: ov.PartialShape([1, '?']) for model_input in model.inputs})
except RuntimeError:
log.error("Can't set dynamic shape")
raise
Expand All @@ -50,7 +51,7 @@ def main():
# It is possible to set CUMULATIVE_THROUGHPUT as PERFORMANCE_HINT for AUTO device
compiled_model = core.compile_model(model, 'CPU', tput)
# AsyncInferQueue creates optimal number of InferRequest instances
ireqs = AsyncInferQueue(compiled_model)
ireqs = ov.AsyncInferQueue(compiled_model)

sst2 = datasets.load_dataset('glue', 'sst2')
sst2_sentences = sst2['validation']['sentence']
Expand Down
5 changes: 3 additions & 2 deletions samples/python/benchmark/sync_benchmark/sync_benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,8 @@
from time import perf_counter

import numpy as np
from openvino.runtime import Core, get_version
import openvino as ov
from openvino.runtime import get_version
from openvino.runtime.utils.types import get_dtype


Expand Down Expand Up @@ -40,7 +41,7 @@ def main():
# Pick a device by replacing CPU, for example AUTO:GPU,CPU.
# Using MULTI device is pointless in sync scenario
# because only one instance of openvino.runtime.InferRequest is used
core = Core()
core = ov.Core()
compiled_model = core.compile_model(sys.argv[1], 'CPU', latency)
ireq = compiled_model.create_infer_request()
# Fill input data for the ireq
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,8 @@
from time import perf_counter

import numpy as np
from openvino.runtime import Core, get_version, AsyncInferQueue
import openvino as ov
from openvino.runtime import get_version
from openvino.runtime.utils.types import get_dtype


Expand Down Expand Up @@ -39,10 +40,10 @@ def main():
# Create Core and use it to compile a model.
# Pick a device by replacing CPU, for example MULTI:CPU(4),GPU(8).
# It is possible to set CUMULATIVE_THROUGHPUT as PERFORMANCE_HINT for AUTO device
core = Core()
core = ov.Core()
compiled_model = core.compile_model(sys.argv[1], 'CPU', tput)
# AsyncInferQueue creates optimal number of InferRequest instances
ireqs = AsyncInferQueue(compiled_model)
ireqs = ov.AsyncInferQueue(compiled_model)
# Fill input data for ireqs
for ireq in ireqs:
for model_input in compiled_model.inputs:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,7 @@

import cv2
import numpy as np
from openvino.preprocess import PrePostProcessor
from openvino.runtime import AsyncInferQueue, Core, InferRequest, Layout, Type
import openvino as ov


def parse_args() -> argparse.Namespace:
Expand All @@ -32,7 +31,7 @@ def parse_args() -> argparse.Namespace:
return parser.parse_args()


def completion_callback(infer_request: InferRequest, image_path: str) -> None:
def completion_callback(infer_request: ov.InferRequest, image_path: str) -> None:
predictions = next(iter(infer_request.results.values()))

# Change a shape of a numpy.ndarray with results to get another one with one dimension
Expand Down Expand Up @@ -61,7 +60,7 @@ def main() -> int:

# --------------------------- Step 1. Initialize OpenVINO Runtime Core ------------------------------------------------
log.info('Creating OpenVINO Runtime Core')
core = Core()
core = ov.Core()

# --------------------------- Step 2. Read a model --------------------------------------------------------------------
log.info(f'Reading the model: {args.model}')
Expand All @@ -88,22 +87,22 @@ def main() -> int:
input_tensors = [np.expand_dims(image, 0) for image in resized_images]

# --------------------------- Step 4. Apply preprocessing -------------------------------------------------------------
ppp = PrePostProcessor(model)
ppp = ov.preprocess.PrePostProcessor(model)

# 1) Set input tensor information:
# - input() provides information about a single model input
# - precision of tensor is supposed to be 'u8'
# - layout of data is 'NHWC'
ppp.input().tensor() \
.set_element_type(Type.u8) \
.set_layout(Layout('NHWC')) # noqa: N400
.set_element_type(ov.Type.u8) \
.set_layout(ov.Layout('NHWC')) # noqa: N400

# 2) Here we suppose model has 'NCHW' layout for input
ppp.input().model().set_layout(Layout('NCHW'))
ppp.input().model().set_layout(ov.Layout('NCHW'))

# 3) Set output tensor information:
# - precision of tensor is supposed to be 'f32'
ppp.output().tensor().set_element_type(Type.f32)
ppp.output().tensor().set_element_type(ov.Type.f32)

# 4) Apply preprocessing modifing the original 'model'
model = ppp.build()
Expand All @@ -115,7 +114,7 @@ def main() -> int:
# --------------------------- Step 6. Create infer request queue ------------------------------------------------------
log.info('Starting inference in asynchronous mode')
# create async queue with optimal number of infer requests
infer_queue = AsyncInferQueue(compiled_model)
infer_queue = ov.AsyncInferQueue(compiled_model)
infer_queue.set_callback(completion_callback)

# --------------------------- Step 7. Do inference --------------------------------------------------------------------
Expand Down
17 changes: 8 additions & 9 deletions samples/python/hello_classification/hello_classification.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,7 @@

import cv2
import numpy as np
from openvino.preprocess import PrePostProcessor, ResizeAlgorithm
from openvino.runtime import Core, Layout, Type
import openvino as ov


def main():
Expand All @@ -26,7 +25,7 @@ def main():

# --------------------------- Step 1. Initialize OpenVINO Runtime Core ------------------------------------------------
log.info('Creating OpenVINO Runtime Core')
core = Core()
core = ov.Core()

# --------------------------- Step 2. Read a model --------------------------------------------------------------------
log.info(f'Reading the model: {model_path}')
Expand All @@ -48,7 +47,7 @@ def main():
input_tensor = np.expand_dims(image, 0)

# --------------------------- Step 4. Apply preprocessing -------------------------------------------------------------
ppp = PrePostProcessor(model)
ppp = ov.preprocess.PrePostProcessor(model)

_, h, w, _ = input_tensor.shape

Expand All @@ -58,19 +57,19 @@ def main():
# - layout of data is 'NHWC'
ppp.input().tensor() \
.set_shape(input_tensor.shape) \
.set_element_type(Type.u8) \
.set_layout(Layout('NHWC')) # noqa: ECE001, N400
.set_element_type(ov.Type.u8) \
.set_layout(ov.Layout('NHWC')) # noqa: ECE001, N400

# 2) Adding explicit preprocessing steps:
# - apply linear resize from tensor spatial dims to model spatial dims
ppp.input().preprocess().resize(ResizeAlgorithm.RESIZE_LINEAR)
ppp.input().preprocess().resize(ov.preprocess.ResizeAlgorithm.RESIZE_LINEAR)

# 3) Here we suppose model has 'NCHW' layout for input
ppp.input().model().set_layout(Layout('NCHW'))
ppp.input().model().set_layout(ov.Layout('NCHW'))

# 4) Set output tensor information:
# - precision of tensor is supposed to be 'f32'
ppp.output().tensor().set_element_type(Type.f32)
ppp.output().tensor().set_element_type(ov.Type.f32)

# 5) Apply preprocessing modifying the original 'model'
model = ppp.build()
Expand Down
4 changes: 2 additions & 2 deletions samples/python/hello_query_device/hello_query_device.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
import logging as log
import sys

from openvino.runtime import Core
import openvino as ov


def param_to_string(parameters) -> str:
Expand All @@ -20,7 +20,7 @@ def main():
log.basicConfig(format='[ %(levelname)s ] %(message)s', level=log.INFO, stream=sys.stdout)

# --------------------------- Step 1. Initialize OpenVINO Runtime Core --------------------------------------------
core = Core()
core = ov.Core()

# --------------------------- Step 2. Get metrics of available devices --------------------------------------------
log.info('Available devices:')
Expand Down
17 changes: 8 additions & 9 deletions samples/python/hello_reshape_ssd/hello_reshape_ssd.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,7 @@

import cv2
import numpy as np
from openvino.preprocess import PrePostProcessor
from openvino.runtime import Core, Layout, PartialShape, Type
import openvino as ov


def main():
Expand All @@ -27,7 +26,7 @@ def main():

# --------------------------- Step 1. Initialize OpenVINO Runtime Core ------------------------------------------------
log.info('Creating OpenVINO Runtime Core')
core = Core()
core = ov.Core()

# --------------------------- Step 2. Read a model --------------------------------------------------------------------
log.info(f'Reading the model: {model_path}')
Expand All @@ -50,25 +49,25 @@ def main():

log.info('Reshaping the model to the height and width of the input image')
n, h, w, c = input_tensor.shape
model.reshape({model.input().get_any_name(): PartialShape((n, c, h, w))})
model.reshape({model.input().get_any_name(): ov.PartialShape((n, c, h, w))})

# --------------------------- Step 4. Apply preprocessing -------------------------------------------------------------
ppp = PrePostProcessor(model)
ppp = ov.preprocess.PrePostProcessor(model)

# 1) Set input tensor information:
# - input() provides information about a single model input
# - precision of tensor is supposed to be 'u8'
# - layout of data is 'NHWC'
ppp.input().tensor() \
.set_element_type(Type.u8) \
.set_layout(Layout('NHWC')) # noqa: N400
.set_element_type(ov.Type.u8) \
.set_layout(ov.Layout('NHWC')) # noqa: N400

# 2) Here we suppose model has 'NCHW' layout for input
ppp.input().model().set_layout(Layout('NCHW'))
ppp.input().model().set_layout(ov.Layout('NCHW'))

# 3) Set output tensor information:
# - precision of tensor is supposed to be 'f32'
ppp.output().tensor().set_element_type(Type.f32)
ppp.output().tensor().set_element_type(ov.Type.f32)

# 4) Apply preprocessing modifing the original 'model'
model = ppp.build()
Expand Down
Loading