Skip to content

Commit

Permalink
Nano: Enhance optimizer pipeline (#5490)
Browse files Browse the repository at this point in the history
* refactor code

* enable get_best_model

* basic impl finnish

* add license

* clean openvino output

* remove one line

* remove partial output of openvino and inc

* add trace and quantize

* update based on comments

* ingore errors

* update based on comment

* update based on comment

* add type hint for return
  • Loading branch information
rnwang04 authored Aug 25, 2022
1 parent 47208c7 commit 22055a4
Show file tree
Hide file tree
Showing 10 changed files with 413 additions and 119 deletions.
7 changes: 5 additions & 2 deletions python/nano/src/bigdl/nano/deps/openvino/core/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,10 +19,13 @@
from openvino.runtime.passes import Manager


def convert_onnx_to_xml(onnx_file_path, xml_path, batch_size=1):
def convert_onnx_to_xml(onnx_file_path, xml_path, logging=True, batch_size=1):
xml_path = Path(xml_path)
model_name, output_dir = str(xml_path.stem), str(xml_path.parent)
mo_cmd = "mo -m {} -n {} -o {}".format(str(onnx_file_path), model_name, output_dir)
if logging:
mo_cmd = "mo -m {} -n {} -o {}".format(str(onnx_file_path), model_name, output_dir)
else:
mo_cmd = "mo -m {} --silent -n {} -o {}".format(str(onnx_file_path), model_name, output_dir)
if os.system(mo_cmd) == 0:
return
else:
Expand Down
5 changes: 3 additions & 2 deletions python/nano/src/bigdl/nano/deps/openvino/openvino_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,19 +16,20 @@
from functools import partial


def PytorchOpenVINOModel(model, input_sample=None, **export_kwargs):
def PytorchOpenVINOModel(model, input_sample=None, logging=True, **export_kwargs):
"""
Create a OpenVINO model from pytorch.
:param model: Pytorch model to be converted to OpenVINO for inference or
path to Openvino saved model.
:param input_sample: A set of inputs for trace, defaults to None if you have trace before or
model is a LightningModule with any dataloader attached, defaults to None
:param logging: whether to log detailed information of model conversion. default: True.
:param **export_kwargs: will be passed to torch.onnx.export function.
:return: PytorchOpenVINOModel model for OpenVINO inference.
"""
from .pytorch.model import PytorchOpenVINOModel
return PytorchOpenVINOModel(model, input_sample, **export_kwargs)
return PytorchOpenVINOModel(model, input_sample, logging, **export_kwargs)


def load_openvino_model(path):
Expand Down
5 changes: 3 additions & 2 deletions python/nano/src/bigdl/nano/deps/openvino/pytorch/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@


class PytorchOpenVINOModel(OpenVINOModel, AcceleratedLightningModule):
def __init__(self, model, input_sample=None, **export_kwargs):
def __init__(self, model, input_sample=None, logging=True, **export_kwargs):
"""
Create a OpenVINO model from pytorch.
Expand All @@ -34,13 +34,14 @@ def __init__(self, model, input_sample=None, **export_kwargs):
:param input_sample: A set of inputs for trace, defaults to None if you have trace before or
model is a LightningModule with any dataloader attached,
defaults to None.
:param logging: whether to log detailed information of model conversion. default: True.
:param **export_kwargs: will be passed to torch.onnx.export function.
"""
ov_model_path = model
with TemporaryDirectory() as dir:
dir = Path(dir)
if isinstance(model, torch.nn.Module):
export(model, input_sample, str(dir / 'tmp.xml'), **export_kwargs)
export(model, input_sample, str(dir / 'tmp.xml'), logging, **export_kwargs)
ov_model_path = dir / 'tmp.xml'
OpenVINOModel.__init__(self, ov_model_path)
AcceleratedLightningModule.__init__(self, None)
Expand Down
5 changes: 3 additions & 2 deletions python/nano/src/bigdl/nano/deps/openvino/pytorch/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,19 +19,20 @@
from pathlib import Path


def export(model, input_sample=None, xml_path="model.xml", **kwargs):
def export(model, input_sample=None, xml_path="model.xml", logging=True, **kwargs):
'''
Function to export pytorch model into openvino and save it to local.
Any instance of torch.nn.Module including Lightning Module is acceptable.
:param model: Model instance of torch.nn.module to be exported.
:param input_sample: torch.Tensor or a list for the model tracing.
:param xml_path: The path to save openvino model file.
:param logging: whether to log detailed information of model conversion. default: True.
:param **kwargs: will be passed to torch.onnx.export function.
'''
# export a model with dynamic axes to enable IR to accept different batches and resolutions
with TemporaryDirectory() as folder:
folder = Path(folder)
onnx_path = str(folder / 'tmp.onnx')
export_to_onnx(model, input_sample, onnx_path, dynamic_axes=True, **kwargs)
convert_onnx_to_xml(onnx_path, xml_path)
convert_onnx_to_xml(onnx_path, xml_path, logging)
1 change: 1 addition & 0 deletions python/nano/src/bigdl/nano/pytorch/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,3 +22,4 @@
os.unsetenv('KMP_INIT_AT_FORK')
from bigdl.nano.pytorch.trainer import Trainer
from bigdl.nano.pytorch.torch_nano import TorchNano
from bigdl.nano.pytorch.inference import InferenceOptimizer
2 changes: 1 addition & 1 deletion python/nano/src/bigdl/nano/pytorch/inference/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,4 +15,4 @@
#


from .optimizer import Optimizer
from .optimizer import InferenceOptimizer
Loading

0 comments on commit 22055a4

Please sign in to comment.