Skip to content

Commit

Permalink
apply comments
Browse files Browse the repository at this point in the history
  • Loading branch information
akuporos committed Aug 29, 2023
1 parent 7a7f1db commit 6eb833d
Show file tree
Hide file tree
Showing 8 changed files with 30 additions and 34 deletions.
4 changes: 2 additions & 2 deletions docs/snippets/ie_common.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,10 +10,10 @@
core = ie.IECore()
#! [ie:create_core]

xml_path = get_path_to_model(True)
model_path = get_path_to_model(True)

#! [ie:read_model]
network = core.read_network(xml_path)
network = core.read_network(model_path)
#! [ie:read_model]

#! [ie:compile_model]
Expand Down
14 changes: 7 additions & 7 deletions docs/snippets/ov_caching.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,28 +8,28 @@
import openvino.runtime.properties as props

device_name = 'GNA'
xml_path = get_path_to_model()
path_to_cash_dir = get_temp_dir()
model_path = get_path_to_model()
path_to_cache_dir = get_temp_dir()
# ! [ov:caching:part0]
core = ov.Core()
core.set_property({props.cache_dir(): path_to_cash_dir})
model = core.read_model(model=xml_path)
core.set_property({props.cache_dir(): path_to_cache_dir})
model = core.read_model(model=model_path)
compiled_model = core.compile_model(model=model, device_name=device_name)
# ! [ov:caching:part0]

assert compiled_model

# ! [ov:caching:part1]
core = ov.Core()
compiled_model = core.compile_model(model=xml_path, device_name=device_name)
compiled_model = core.compile_model(model=model_path, device_name=device_name)
# ! [ov:caching:part1]

assert compiled_model

# ! [ov:caching:part2]
core = ov.Core()
core.set_property({props.cache_dir(): path_to_cash_dir})
compiled_model = core.compile_model(model=xml_path, device_name=device_name)
core.set_property({props.cache_dir(): path_to_cache_dir})
compiled_model = core.compile_model(model=model_path, device_name=device_name)
# ! [ov:caching:part2]

assert compiled_model
Expand Down
7 changes: 4 additions & 3 deletions docs/snippets/ov_common.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,10 +11,10 @@
core = ov.Core()
#! [ov_api_2_0:create_core]

xml_path = get_path_to_model()
model_path = get_path_to_model()

#! [ov_api_2_0:read_model]
model = core.read_model(xml_path)
model = core.read_model(model_path)
#! [ov_api_2_0:read_model]

#! [ov_api_2_0:compile_model]
Expand Down Expand Up @@ -107,7 +107,8 @@ def callback(request, frame_id):
# process output data ...
#! [ov_api_2_0:get_output_tensor_v10]

#! [ov_api_2_0:load_old_extension]
path_to_extension_library = get_path_to_extension_library()

#! [ov_api_2_0:load_old_extension]
core.add_extension(path_to_extension_library)
#! [ov_api_2_0:load_old_extension]
1 change: 0 additions & 1 deletion docs/snippets/ov_denormals.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@
from utils import get_model

device_name = 'CPU'
xml_path = 'modelWithDenormals.xml'
model = get_model()

# ! [ov:intel_cpu:denormals_optimization:part0]
Expand Down
18 changes: 10 additions & 8 deletions docs/snippets/ov_dynamic_shapes.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,15 +2,16 @@
# SPDX-License-Identifier: Apache-2.0

import numpy as np
from utils import get_model, get_dynamic_model
from utils import get_dynamic_model

#! [import]
import openvino as ov
#! [import]

model = get_dynamic_model()

#! [reshape_undefined]
core = ov.Core()
model = get_dynamic_model()

# Set first dimension to be dynamic while keeping others static
model.reshape([-1, 3, 224, 224])
Expand All @@ -37,9 +38,10 @@
print(model.input().partial_shape)
#! [print_dynamic]

#! [detect_dynamic]
model = get_dynamic_model()

#! [detect_dynamic]

if model.input(0).partial_shape.is_dynamic:
# input is dynamic
pass
Expand All @@ -55,19 +57,20 @@

executable = core.compile_model(model)
infer_request = executable.create_infer_request()
input_tensor_name = "input"

#! [set_input_tensor]
# For first inference call, prepare an input tensor with 1x128 shape and run inference request
input_data1 = np.ones(shape=[1,128])
infer_request.infer({"Parameter_319": input_data1})
infer_request.infer({input_tensor_name: input_data1})

# Get resulting outputs
output_tensor1 = infer_request.get_output_tensor()
output_data1 = output_tensor1.data[:]

# For second inference call, prepare a 1x200 input tensor and run inference request
input_data2 = np.ones(shape=[1,200])
infer_request.infer({"Parameter_319": input_data2})
infer_request.infer({input_tensor_name: input_data2})

# Get resulting outputs
output_tensor2 = infer_request.get_output_tensor()
Expand Down Expand Up @@ -98,10 +101,9 @@
data2 = output_tensor.data[:]
#! [get_input_tensor]

#! [check_inputs]
core = ov.Core()
model = core.read_model("model.xml")
model = get_dynamic_model()

#! [check_inputs]
# Print model input layer info
for input_layer in model.inputs:
print(input_layer.names, input_layer.partial_shape)
Expand Down
14 changes: 4 additions & 10 deletions docs/snippets/ov_preprocessing.py
Original file line number Diff line number Diff line change
Expand Up @@ -177,7 +177,7 @@ def custom_abs(output: Output):
from openvino.runtime.passes import Manager, Serialize
# ! [ov:preprocess:save_headers]

xml_path = get_path_to_model()
model_path = get_path_to_model()

# ! [ov:preprocess:save]
# ======== Step 0: read original model =========
Expand Down Expand Up @@ -213,20 +213,14 @@ def custom_abs(output: Output):
set_batch(model, 2)

# ======== Step 3: Save the model ================
# First method - using serialize runtime wrapper
serialize(model, xml_path)

# Second method - using Manager and Serialize pass
manager = Manager()
manager.register_pass(Serialize(xml_path))
manager.run_passes(model)
serialize(model, model_path)
# ! [ov:preprocess:save]

path_to_cash_dir = get_temp_dir()
path_to_cache_dir = get_temp_dir()

# ! [ov:preprocess:save_load]
core = Core()
core.set_property({props.cache_dir(): path_to_cash_dir})
core.set_property({props.cache_dir(): path_to_cache_dir})

# In case that no preprocessing is needed anymore, we can load model on target device directly
# With cached model available, it will also save some time on reading original model
Expand Down
4 changes: 2 additions & 2 deletions docs/snippets/ov_python_exclusives.py
Original file line number Diff line number Diff line change
Expand Up @@ -157,7 +157,7 @@ def f(request, userdata):
assert np.array_equal(unpacked_data , unt8_data)
#! [unpacking]

xml_path = get_path_to_model()
model_path = get_path_to_model()
path_to_image = get_path_to_image()


Expand All @@ -180,7 +180,7 @@ def prepare_data(input, image_path):
input_data.append(image)

core = ov.Core()
model = core.read_model(xml_path)
model = core.read_model(model_path)
# Create thread with prepare_data function as target and start it
thread = Thread(target=prepare_data, args=[model.input(), path_to_image])
thread.start()
Expand Down
2 changes: 1 addition & 1 deletion docs/snippets/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@


def get_dynamic_model():
param = ops.parameter(ov.PartialShape([-1, -1]))
param = ops.parameter(ov.PartialShape([-1, -1]), name="input")
return ov.Model(ops.relu(param), [param])


Expand Down

0 comments on commit 6eb833d

Please sign in to comment.