Skip to content

Commit

Permalink
[PyOV] Add missing is_shared flag in memory sharing scenarios (openvi…
Browse files Browse the repository at this point in the history
…notoolkit#22734)

### Details:
- Add missing flag propagation in containers dispatchers for memory
sharing scenarios.
- Expected boost in cases of already aligned memory storages with
`__array__` interface, i.e. PyTorch tensors.
 - Added testcases.
 
### Tickets:
 - *132092*
  • Loading branch information
Jan Iwaszkiewicz authored and ilya-lavrenov committed Feb 14, 2024
1 parent 4abab0c commit 070b3ef
Show file tree
Hide file tree
Showing 3 changed files with 133 additions and 19 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,7 @@ def _(
def to_c_style(value: Any, is_shared: bool = False) -> Any:
if not isinstance(value, np.ndarray):
if hasattr(value, "__array__"):
return to_c_style(np.array(value, copy=False)) if is_shared else np.array(value, copy=True)
return to_c_style(np.array(value, copy=False), is_shared) if is_shared else np.array(value, copy=True)
return value
return value if value.flags["C_CONTIGUOUS"] else np.ascontiguousarray(value)

Expand All @@ -149,7 +149,7 @@ def normalize_arrays(
) -> Any:
# Check the special case of the array-interface
if hasattr(inputs, "__array__"):
return to_c_style(np.array(inputs, copy=False)) if is_shared else np.array(inputs, copy=True)
return to_c_style(np.array(inputs, copy=False), is_shared) if is_shared else np.array(inputs, copy=True)
# Error should be raised if type does not match any dispatchers
raise TypeError(f"Incompatible inputs of type: {type(inputs)}")

Expand All @@ -159,15 +159,15 @@ def _(
inputs: dict,
is_shared: bool = False,
) -> dict:
return {k: to_c_style(v) if is_shared else v for k, v in inputs.items()}
return {k: to_c_style(v, is_shared) if is_shared else v for k, v in inputs.items()}


@normalize_arrays.register(OVDict)
def _(
inputs: OVDict,
is_shared: bool = False,
) -> dict:
return {i: to_c_style(v) if is_shared else v for i, (_, v) in enumerate(inputs.items())}
return {i: to_c_style(v, is_shared) if is_shared else v for i, (_, v) in enumerate(inputs.items())}


@normalize_arrays.register(list)
Expand All @@ -176,15 +176,15 @@ def _(
inputs: Union[list, tuple],
is_shared: bool = False,
) -> dict:
return {i: to_c_style(v) if is_shared else v for i, v in enumerate(inputs)}
return {i: to_c_style(v, is_shared) if is_shared else v for i, v in enumerate(inputs)}


@normalize_arrays.register(np.ndarray)
def _(
inputs: dict,
is_shared: bool = False,
) -> Any:
return to_c_style(inputs) if is_shared else inputs
return to_c_style(inputs, is_shared) if is_shared else inputs
###
# End of array normalization.
###
Expand Down
120 changes: 110 additions & 10 deletions src/bindings/python/tests/test_utils/test_data_dispatch.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
from copy import deepcopy
import numpy as np

from tests.utils.helpers import generate_relu_compiled_model
from tests.utils.helpers import generate_add_compiled_model, generate_relu_compiled_model

from openvino import Core, Model, Type, Shape, Tensor
import openvino.runtime.opset13 as ops
Expand All @@ -20,22 +20,30 @@ def _get_value(value):
return value.data if isinstance(value, Tensor) else value


def _run_dispatcher(device, input_data, is_shared, input_shape, input_dtype=np.float32):
def _run_dispatcher_single_input(device, input_data, is_shared, input_shape, input_dtype=np.float32):
compiled_model = generate_relu_compiled_model(device, input_shape, input_dtype)
infer_request = compiled_model.create_infer_request()
result = _data_dispatch(infer_request, input_data, is_shared)

return result, infer_request


def _run_dispatcher_multi_input(device, input_data, is_shared, input_shape, input_dtype=np.float32):
compiled_model = generate_add_compiled_model(device, input_shape, input_dtype)
infer_request = compiled_model.create_infer_request()
result = _data_dispatch(infer_request, input_data, is_shared)

return result, infer_request


@pytest.mark.parametrize("data_type", [np.float_, np.int_, int, float])
@pytest.mark.parametrize("input_shape", [[], [1]])
@pytest.mark.parametrize("is_shared", [True, False])
def test_scalars_dispatcher_old(device, data_type, input_shape, is_shared):
test_data = data_type(2)
expected = Tensor(np.ndarray([], data_type, np.array(test_data)))

result, _ = _run_dispatcher(device, test_data, is_shared, input_shape)
result, _ = _run_dispatcher_single_input(device, test_data, is_shared, input_shape)

assert isinstance(result, Tensor)
assert result.get_shape() == Shape([])
Expand All @@ -56,7 +64,7 @@ def test_scalars_dispatcher_old(device, data_type, input_shape, is_shared):
def test_scalars_dispatcher_new_0(device, input_data, input_dtype, input_shape, is_shared):
expected = Tensor(np.array(input_data, dtype=input_dtype))

result, _ = _run_dispatcher(device, input_data, is_shared, input_shape, input_dtype)
result, _ = _run_dispatcher_single_input(device, input_data, is_shared, input_shape, input_dtype)

assert isinstance(result, Tensor)
assert result.get_shape() == Shape([])
Expand All @@ -72,7 +80,7 @@ def test_scalars_dispatcher_new_0(device, input_data, input_dtype, input_shape,
])
@pytest.mark.parametrize("input_shape", [[], [1]])
def test_scalars_dispatcher_new_1(device, input_data, is_shared, expected, input_shape):
result, _ = _run_dispatcher(device, input_data, is_shared, input_shape, np.float32)
result, _ = _run_dispatcher_single_input(device, input_data, is_shared, input_shape, np.float32)

assert isinstance(result, type(expected))
if isinstance(result, dict):
Expand All @@ -90,7 +98,7 @@ def test_tensor_dispatcher(device, input_shape, is_shared):

test_data = Tensor(array, is_shared)

result, _ = _run_dispatcher(device, test_data, is_shared, input_shape)
result, _ = _run_dispatcher_single_input(device, test_data, is_shared, input_shape)

assert isinstance(result, Tensor)
assert result.get_shape() == Shape(input_shape)
Expand All @@ -107,7 +115,7 @@ def test_tensor_dispatcher(device, input_shape, is_shared):
def test_ndarray_shared_dispatcher(device, input_shape):
test_data = np.ones(input_shape).astype(np.float32)

result, _ = _run_dispatcher(device, test_data, True, input_shape)
result, _ = _run_dispatcher_single_input(device, test_data, True, input_shape)

assert isinstance(result, Tensor)
assert result.get_shape() == Shape(test_data.shape)
Expand All @@ -123,7 +131,7 @@ def test_ndarray_shared_dispatcher(device, input_shape):
def test_ndarray_shared_dispatcher_casting(device, input_shape):
test_data = np.ones(input_shape)

result, infer_request = _run_dispatcher(device, test_data, True, input_shape)
result, infer_request = _run_dispatcher_single_input(device, test_data, True, input_shape)

assert isinstance(result, Tensor)
assert result.get_shape() == Shape(test_data.shape)
Expand All @@ -139,7 +147,7 @@ def test_ndarray_shared_dispatcher_casting(device, input_shape):
def test_ndarray_shared_dispatcher_misalign(device, input_shape):
test_data = np.asfortranarray(np.ones(input_shape).astype(np.float32))

result, _ = _run_dispatcher(device, test_data, True, input_shape)
result, _ = _run_dispatcher_single_input(device, test_data, True, input_shape)

assert isinstance(result, Tensor)
assert result.get_shape() == Shape(test_data.shape)
Expand All @@ -155,7 +163,7 @@ def test_ndarray_shared_dispatcher_misalign(device, input_shape):
def test_ndarray_copied_dispatcher(device, input_shape):
test_data = np.ones(input_shape)

result, infer_request = _run_dispatcher(device, test_data, False, input_shape)
result, infer_request = _run_dispatcher_single_input(device, test_data, False, input_shape)

assert result == {}
assert np.array_equal(infer_request.input_tensors[0].data, test_data)
Expand All @@ -165,6 +173,98 @@ def test_ndarray_copied_dispatcher(device, input_shape):
assert not np.array_equal(infer_request.input_tensors[0].data, test_data)


class FakeTensor():
def __init__(self, array):
self.array = array

def __array__(self):
return self.array


@pytest.mark.parametrize("input_shape", [[1, 2, 3], [2, 2]])
def test_array_interface_copied_dispatcher(device, input_shape):
np_data = np.ascontiguousarray(np.ones((input_shape), dtype=np.float32))
test_data = FakeTensor(np_data)

result, infer_request = _run_dispatcher_single_input(device, test_data, False, input_shape)

assert result == {}
assert np.array_equal(infer_request.input_tensors[0].data, test_data)
assert not np.shares_memory(infer_request.input_tensors[0].data, test_data)

np.array(test_data, copy=False)[0] = 2.0

assert not np.array_equal(infer_request.input_tensors[0].data, test_data)


@pytest.mark.parametrize("input_shape", [[1, 2, 3], [2, 2]])
@pytest.mark.parametrize("input_container", [list, tuple, dict])
def test_array_interface_copied_multi_dispatcher(device, input_shape, input_container):
np_data_one = np.ascontiguousarray(np.ones((input_shape), dtype=np.float32))
test_data_one = FakeTensor(np_data_one)

np_data_two = np.ascontiguousarray(np.ones((input_shape), dtype=np.float32))
test_data_two = FakeTensor(np_data_two)

if input_container is dict:
test_inputs = {0: test_data_one, 1: test_data_two}
else:
test_inputs = input_container([test_data_one, test_data_two])

results, infer_request = _run_dispatcher_multi_input(device, test_inputs, False, input_shape)

assert results == {}
for i in range(len(results)):
assert np.array_equal(infer_request.input_tensors[i].data, test_inputs[i])
assert not np.shares_memory(infer_request.input_tensors[i].data, test_inputs[i])

np.array(test_inputs[i], copy=False)[0] = 2.0

assert not np.array_equal(infer_request.input_tensors[i].data, test_inputs[i])


@pytest.mark.parametrize("input_shape", [[1, 2, 3], [2, 2]])
def test_array_interface_shared_single_dispatcher(device, input_shape):
np_data = np.ascontiguousarray(np.ones((input_shape), dtype=np.float32))
test_data = FakeTensor(np_data)

result, _ = _run_dispatcher_single_input(device, test_data, True, input_shape)

assert isinstance(result, Tensor)
assert np.array_equal(result.data, test_data)
assert np.shares_memory(result.data, test_data)

np.array(test_data, copy=False)[0] = 2.0

assert np.array_equal(result.data, test_data)


@pytest.mark.parametrize("input_shape", [[1, 2, 3], [2, 2]])
@pytest.mark.parametrize("input_container", [list, tuple, dict])
def test_array_interface_shared_multi_dispatcher(device, input_shape, input_container):
np_data_one = np.ascontiguousarray(np.ones((input_shape), dtype=np.float32))
test_data_one = FakeTensor(np_data_one)

np_data_two = np.ascontiguousarray(np.ones((input_shape), dtype=np.float32))
test_data_two = FakeTensor(np_data_two)

if input_container is dict:
test_inputs = {0: test_data_one, 1: test_data_two}
else:
test_inputs = input_container([test_data_one, test_data_two])

results, _ = _run_dispatcher_multi_input(device, test_inputs, True, input_shape)

assert len(results) == 2
for i in range(len(results)):
assert np.array_equal(results[i].data, test_inputs[i])
assert np.shares_memory(results[i].data, test_inputs[i])

np.array(test_inputs[i], copy=False)[0] = 2.0

assert np.array_equal(results[i].data, test_inputs[i])


@pytest.mark.parametrize(
("input_data"),
[
Expand Down
20 changes: 17 additions & 3 deletions src/bindings/python/tests/utils/helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -202,13 +202,27 @@ def generate_model_and_image(device, input_shape: List[int] = None):
return (generate_relu_compiled_model(device, input_shape), generate_image(input_shape))


def generate_add_model() -> openvino._pyopenvino.Model:
param1 = ops.parameter(Shape([2, 1]), dtype=np.float32, name="data1")
param2 = ops.parameter(Shape([2, 1]), dtype=np.float32, name="data2")
def generate_add_model(input_shape: List[int] = None, input_dtype=np.float32) -> openvino.Model:
if input_shape is None:
input_shape = [2, 1]
param1 = ops.parameter(Shape(input_shape), dtype=np.float32, name="data1")
param2 = ops.parameter(Shape(input_shape), dtype=np.float32, name="data2")
add = ops.add(param1, param2)
return Model(add, [param1, param2], "TestModel")


def generate_add_compiled_model(
device,
input_shape: List[int] = None,
input_dtype=np.float32,
) -> openvino.CompiledModel:
if input_shape is None:
input_shape = [1, 3, 32, 32]
model = generate_add_model(input_shape, input_dtype)
core = Core()
return core.compile_model(model, device, {})


def generate_model_with_memory(input_shape, data_type) -> openvino._pyopenvino.Model:
input_data = ops.parameter(input_shape, name="input_data", dtype=data_type)
init_val = ops.constant(np.zeros(input_shape), data_type)
Expand Down

0 comments on commit 070b3ef

Please sign in to comment.