Skip to content

Commit

Permalink
Performance fix for aot_autograd (openvinotoolkit#26139)
Browse files Browse the repository at this point in the history
### Details:
- Fix the performance issues in aot_autograd path where constants are
being treated as inputs.

### Tickets:
 - https://jira.devtools.intel.com/browse/CVS-139183
  • Loading branch information
suryasidd authored Aug 29, 2024
1 parent 46d1a4d commit 91744e1
Show file tree
Hide file tree
Showing 5 changed files with 24 additions and 12 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
from torch._dynamo.backends.common import fake_tensor_unsupported, aot_autograd
from torch._dynamo.backends.registry import register_backend
from torch._inductor.compile_fx import compile_fx
from torch._inductor.freezing import replace_params_with_constants
from torch.fx.experimental.proxy_tensor import make_fx
from torch._decomp import decomposition_table, get_decompositions

Expand Down Expand Up @@ -54,10 +55,9 @@ def openvino(subgraph, example_inputs, options=None):
if (_get_aot_autograd(options)):
global openvino_options
openvino_options = options
decompositions = _get_decompositions(options) + get_inf_decomposition_list()
decompositions = decompositions + get_aot_decomposition_list()
return aot_autograd(fw_compiler=fx_openvino,
bw_compiler=fx_openvino,
decompositions = _get_decompositions(options) + get_inf_decomposition_list() + get_aot_decomposition_list()
return aot_autograd(fw_compiler=fx_openvino,
bw_compiler=fx_openvino,
decompositions=get_decompositions(decompositions))(subgraph, example_inputs)
return fx_openvino(subgraph, example_inputs, options)

Expand Down Expand Up @@ -86,7 +86,14 @@ def _call(*args):
if inputs_reversed:
example_inputs.reverse()

preserved_arg_indices = []
if (_get_aot_autograd(options)):
if tracing_context := torch._guards.TracingContext.try_get():
fw_metadata = tracing_context.fw_metadata
params_flat = tracing_context.params_flat
assert fw_metadata is not None and params_flat is not None
preserved_arg_indices = replace_params_with_constants(subgraph, params_flat, fw_metadata)
example_inputs = [example_inputs[ind] for ind in preserved_arg_indices]
model = subgraph
else:
from torch._subclasses.fake_tensor import FakeTensorMode
Expand All @@ -96,7 +103,6 @@ def _call(*args):

with torch.no_grad():
model.eval()

partitioner = Partitioner(options)
compiled_model = partitioner.make_partitions(model, options)

Expand All @@ -107,9 +113,15 @@ def _call(*args):
executor_parameters["model_hash_str"] += "_fs"

def _call(*args):
if(_get_aot_autograd(options)):
args_list = args[0]
args_new = [args_list[i] for i in preserved_arg_indices]
args = args_new
res = execute(compiled_model, *args, executor="openvino",
executor_parameters=executor_parameters, options=options)
return res
if(_get_aot_autograd(options)):
_call._boxed_call = True # type: ignore[attr-defined]
return _call
except Exception as e:
logger.debug(f"Failed in OpenVINO execution: {e}")
Expand Down
2 changes: 1 addition & 1 deletion tests/layer_tests/pytorch_tests/test_expand.py
Original file line number Diff line number Diff line change
Expand Up @@ -139,4 +139,4 @@ def forward(self, x, dym):
@pytest.mark.parametrize("dims", [(4, 3), (-1, -1)])
@pytest.mark.precommit_fx_backend
def test_dynamic_expand(self, dims, ie_device, precision, ir_version):
self._test(*self.create_model(dims), ie_device, precision, ir_version, dynamic=True, aot_autograd=True)
self._test(*self.create_model(dims), ie_device, precision, ir_version)
2 changes: 1 addition & 1 deletion tests/layer_tests/pytorch_tests/test_min_max.py
Original file line number Diff line number Diff line change
Expand Up @@ -346,5 +346,5 @@ def forward(self, x):
def test_amin_amax(self, op_type, input_dtype, axis, keep_dims, out, ie_device, precision, ir_version):
self._test(*self.create_model(op_type, axis, keep_dims, out),
ie_device, precision, ir_version, kwargs_to_prepare_input=
{"input_dtype": input_dtype, "out": out, "axes": axis, "keep_dims": keep_dims}
{"input_dtype": input_dtype, "out": out, "axes": axis, "keep_dims": keep_dims},
)
2 changes: 1 addition & 1 deletion tests/layer_tests/pytorch_tests/test_reshape.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,4 +73,4 @@ def forward(self, x, dym):
])
@pytest.mark.precommit_fx_backend
def test_dynamic_reshape(self, shape, ie_device, precision, ir_version):
self._test(*self.create_model(shape), ie_device, precision, ir_version, aot_autograd=True, dynamic=True)
self._test(*self.create_model(shape), ie_device, precision, ir_version)
8 changes: 4 additions & 4 deletions tests/layer_tests/pytorch_tests/test_scatter.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ def __init__(self, dim, index, src, inplace, reduce, has_out=False):
str_forward += "_inplace"
else:
str_forward += ("_out_of_place" if not has_out else "_with_out")


if reduce:
self.reduce = reduce
Expand Down Expand Up @@ -122,7 +122,7 @@ def test_scatter(self, dim, index, src, dtype, inplace, has_out, reduce, ie_devi
precision,
ir_version,
kwargs_to_prepare_input={"dtype": dtype, "out": has_out},
freeze_model=freeze
freeze_model=freeze,
)


Expand Down Expand Up @@ -151,7 +151,7 @@ def __init__(self, dim, index, src, inplace, reduce, include_self, has_out=False
str_forward += "_inplace"
else:
str_forward += ("_out_of_place" if not has_out else "_with_out")

self.reduce = reduce
self.include_self = include_self
self.forward = getattr(self, str_forward)
Expand Down Expand Up @@ -240,7 +240,7 @@ def __init__(self, dim, index, src, inplace):
self.index = torch.empty([1])
else:
self.index = index
self.src = src
self.src = src
self.inplace = inplace

def forward(self, x: torch.Tensor):
Expand Down

0 comments on commit 91744e1

Please sign in to comment.