Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Nano : Enhancement for output format of InferenceOptimizer #5705

Merged
merged 15 commits into from
Sep 14, 2022
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ def accuracy(pred, target):
validation_data=datamodule.val_dataloader(limit_num_samples=160),
metric=accuracy,
direction="max",
cpu_num=1,
thread_num=1,
latency_sample_num=30)

# 4. Get the best model under specific restrictions or without restrictions
Expand Down
10 changes: 8 additions & 2 deletions python/nano/src/bigdl/nano/deps/openvino/core/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,9 +22,10 @@


class OpenVINOModel:
def __init__(self, ie_network: str, device='CPU'):
def __init__(self, ie_network: str, device='CPU', thread_num=None):
self._ie = Core()
self._device = device
self.thread_num = thread_num
self.ie_network = ie_network

def forward_step(self, *inputs):
Expand All @@ -47,8 +48,13 @@ def ie_network(self, model):
self._ie_network = self._ie.read_model(model=str(model))
else:
self._ie_network = model
if self.thread_num is not None:
config = {"CPU_THREADS_NUM": str(self.thread_num)}
else:
config = {}
self._compiled_model = self._ie.compile_model(model=self.ie_network,
device_name=self._device)
device_name=self._device,
config=config)
self._infer_request = self._compiled_model.create_infer_request()
input_names = [t.any_name for t in self._ie_network.inputs]
self._forward_args = input_names
Expand Down
9 changes: 6 additions & 3 deletions python/nano/src/bigdl/nano/deps/openvino/openvino_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,20 +16,23 @@
from functools import partial


def PytorchOpenVINOModel(model, input_sample=None, logging=True, **export_kwargs):
def PytorchOpenVINOModel(model, input_sample=None, thread_num=None,
logging=True, **export_kwargs):
"""
Create a OpenVINO model from pytorch.

:param model: Pytorch model to be converted to OpenVINO for inference or
path to Openvino saved model.
:param input_sample: A set of inputs for trace, defaults to None if you have trace before or
model is a LightningModule with any dataloader attached, defaults to None
model is a LightningModule with any dataloader attached, defaults to None.
:param thread_num: a int represents how many threads(cores) is needed for
inference. default: None.
:param logging: whether to log detailed information of model conversion. default: True.
:param **export_kwargs: will be passed to torch.onnx.export function.
:return: PytorchOpenVINOModel model for OpenVINO inference.
"""
from .pytorch.model import PytorchOpenVINOModel
return PytorchOpenVINOModel(model, input_sample, logging, **export_kwargs)
return PytorchOpenVINOModel(model, input_sample, thread_num, logging, **export_kwargs)


def load_openvino_model(path):
Expand Down
7 changes: 5 additions & 2 deletions python/nano/src/bigdl/nano/deps/openvino/pytorch/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,8 @@


class PytorchOpenVINOModel(AcceleratedLightningModule):
def __init__(self, model, input_sample=None, logging=True, **export_kwargs):
def __init__(self, model, input_sample=None, thread_num=None,
logging=True, **export_kwargs):
"""
Create a OpenVINO model from pytorch.

Expand All @@ -35,6 +36,8 @@ def __init__(self, model, input_sample=None, logging=True, **export_kwargs):
:param input_sample: A set of inputs for trace, defaults to None if you have trace before or
model is a LightningModule with any dataloader attached,
defaults to None.
:param thread_num: a int represents how many threads(cores) is needed for
inference. default: None.
:param logging: whether to log detailed information of model conversion. default: True.
:param **export_kwargs: will be passed to torch.onnx.export function.
"""
Expand All @@ -44,7 +47,7 @@ def __init__(self, model, input_sample=None, logging=True, **export_kwargs):
if isinstance(model, torch.nn.Module):
export(model, input_sample, str(dir / 'tmp.xml'), logging, **export_kwargs)
ov_model_path = dir / 'tmp.xml'
self.ov_model = OpenVINOModel(ov_model_path)
self.ov_model = OpenVINOModel(ov_model_path, thread_num=thread_num)
super().__init__(self.ov_model)

def on_forward_start(self, inputs):
Expand Down
Loading