Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Standalone onnx export #2155

Merged
merged 22 commits into from
May 19, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ All notable changes to this project will be documented in this file.
- Support label hierarchy through LabelTree in LabelSchema for classification task (<https://github.com/openvinotoolkit/training_extensions/pull/2149>, <https://github.com/openvinotoolkit/training_extensions/pull/2152>).
- Enhance exportable code file structure, video inference and default value for demo (<https://github.com/openvinotoolkit/training_extensions/pull/2051>).
- Speedup OpenVINO inference in image classificaiton, semantic segmentation, object detection and instance segmentation tasks (<https://github.com/openvinotoolkit/training_extensions/pull/2105>).
- Refactoring of ONNX export functionality (<https://github.com/openvinotoolkit/training_extensions/pull/2155>).

### Bug fixes

Expand Down
36 changes: 23 additions & 13 deletions otx/algorithms/action/adapters/mmaction/task.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
import time
from copy import deepcopy
from functools import partial
from typing import Optional, Union
from typing import Dict, Optional, Union

import torch
from mmaction import __version__
Expand Down Expand Up @@ -57,6 +57,7 @@
from otx.api.entities.model_template import TaskType
from otx.api.entities.subset import Subset
from otx.api.entities.task_environment import TaskEnvironment
from otx.api.usecases.tasks.interfaces.export_interface import ExportType
from otx.core.data import caching

logger = get_logger()
Expand Down Expand Up @@ -447,7 +448,7 @@ def dummy_dump_saliency_hook(model, inp, out):

return predictions, metric

def _export_model(self, precision: ModelPrecision, dump_features: bool = True):
def _export_model(self, precision: ModelPrecision, export_format: ExportType, dump_features: bool):
"""Main export function."""
self._init_task(export=True)

Expand All @@ -463,18 +464,27 @@ def _export_model(self, precision: ModelPrecision, dump_features: bool = True):
self._precision[0] = precision
half_precision = precision == ModelPrecision.FP16

exporter = Exporter(cfg, state_dict, deploy_cfg, f"{self._output_path}/openvino", half_precision)
exporter = Exporter(
cfg,
state_dict,
deploy_cfg,
f"{self._output_path}/openvino",
half_precision,
onnx_only=export_format == ExportType.ONNX,
)
exporter.export()
bin_file = [f for f in os.listdir(self._output_path) if f.endswith(".bin")][0]
xml_file = [f for f in os.listdir(self._output_path) if f.endswith(".xml")][0]
onnx_file = [f for f in os.listdir(self._output_path) if f.endswith(".onnx")][0]
results = {
"outputs": {
"bin": os.path.join(self._output_path, bin_file),
"xml": os.path.join(self._output_path, xml_file),
"onnx": os.path.join(self._output_path, onnx_file),
}
}

results: Dict[str, Dict[str, str]] = {"outputs": {}}

if export_format == ExportType.ONNX:
onnx_file = [f for f in os.listdir(self._output_path) if f.endswith(".onnx")][0]
results["outputs"]["onnx"] = os.path.join(self._output_path, onnx_file)
else:
bin_file = [f for f in os.listdir(self._output_path) if f.endswith(".bin")][0]
xml_file = [f for f in os.listdir(self._output_path) if f.endswith(".xml")][0]
results["outputs"]["bin"] = os.path.join(self._output_path, bin_file)
results["outputs"]["xml"] = os.path.join(self._output_path, xml_file)

return results

# This should be removed
Expand Down
13 changes: 12 additions & 1 deletion otx/algorithms/action/adapters/mmaction/utils/export_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,13 @@ class Exporter:
"""Export class for action recognition model using mmdeploy framework."""

def __init__(
self, recipe_cfg: Config, weights: OrderedDict, deploy_cfg: Config, work_dir: str, half_precision: bool
self,
recipe_cfg: Config,
weights: OrderedDict,
deploy_cfg: Config,
work_dir: str,
half_precision: bool,
onnx_only: bool,
):
"""Initialize Exporter.

Expand All @@ -67,6 +73,7 @@ def __init__(
deploy_cfg (Config): deploy config which contains deploy info
work_dir (str): path to save onnx and openvino xml file
half_precision (bool): whether to use half-precision(FP16)
onnx_only (bool): whether to export only onnx model
"""

self.task_processor = build_task_processor(recipe_cfg, deploy_cfg, "cpu")
Expand All @@ -80,6 +87,7 @@ def __init__(
self.context_info = {"deploy_cfg": deploy_cfg}
if half_precision:
self.deploy_cfg.backend_config.mo_options["flags"] = ["--compress_to_fp16"]
self.onnx_only = onnx_only

def _get_model(self) -> torch.nn.Module:
"""Prepare torch model for exporting."""
Expand Down Expand Up @@ -128,6 +136,9 @@ def export(self):
self.deploy_cfg.ir_config.output_names,
)

if self.onnx_only:
return

from_onnx(
self.work_dir + ".onnx",
self.work_dir.replace("openvino", ""),
Expand Down
43 changes: 16 additions & 27 deletions otx/algorithms/action/task.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,8 +48,6 @@
)
from otx.api.entities.model import (
ModelEntity,
ModelFormat,
ModelOptimizationType,
ModelPrecision,
)
from otx.api.entities.model_template import TaskType
Expand Down Expand Up @@ -245,49 +243,40 @@ def export(
"The saliency maps and representation vector outputs will not be dumped in the exported model."
)

# copied from OTX inference_task.py
logger.info("Exporting the model")
if export_type != ExportType.OPENVINO:
raise RuntimeError(f"not supported export type {export_type}")
output_model.model_format = ModelFormat.OPENVINO
output_model.optimization_type = ModelOptimizationType.MO
output_model.has_xai = dump_features

results = self._export_model(precision, dump_features)
self._update_model_export_metadata(output_model, export_type, precision, dump_features)
results = self._export_model(precision, export_type, dump_features)

outputs = results.get("outputs")
logger.debug(f"results of run_task = {outputs}")
if outputs is None:
raise RuntimeError(results.get("msg"))

bin_file = outputs.get("bin")
xml_file = outputs.get("xml")
onnx_file = outputs.get("onnx")

if xml_file is None or bin_file is None or onnx_file is None:
raise RuntimeError("invalid status of exporting. bin and xml should not be None")
with open(bin_file, "rb") as f:
output_model.set_data("openvino.bin", f.read())
with open(xml_file, "rb") as f:
output_model.set_data("openvino.xml", f.read())
with open(onnx_file, "rb") as f:
output_model.set_data("model.onnx", f.read())
if export_type == ExportType.ONNX:
eunwoosh marked this conversation as resolved.
Show resolved Hide resolved
onnx_file = outputs.get("onnx")
with open(onnx_file, "rb") as f:
output_model.set_data("model.onnx", f.read())
else:
bin_file = outputs.get("bin")
xml_file = outputs.get("xml")

with open(bin_file, "rb") as f:
output_model.set_data("openvino.bin", f.read())
with open(xml_file, "rb") as f:
output_model.set_data("openvino.xml", f.read())

output_model.set_data(
"confidence_threshold",
np.array([self.confidence_threshold], dtype=np.float32).tobytes(),
)
output_model.set_data("config.json", config_to_bytes(self._hyperparams))
output_model.precision = self._precision
output_model.optimization_methods = self._optimization_methods
output_model.has_xai = dump_features
output_model.set_data(
"label_schema.json",
label_schema_to_bytes(self._task_environment.label_schema),
)
logger.info("Exporting completed")

@abstractmethod
def _export_model(self, precision: ModelPrecision, dump_features: bool = True):
def _export_model(self, precision: ModelPrecision, export_format: ExportType, dump_features: bool):
raise NotImplementedError

def explain(
Expand Down
42 changes: 26 additions & 16 deletions otx/algorithms/anomaly/tasks/inference.py
Original file line number Diff line number Diff line change
Expand Up @@ -271,29 +271,39 @@ def export(
"The saliency maps and representation vector outputs will not be dumped in the exported model."
)

self.precision[0] = precision
assert export_type == ExportType.OPENVINO, f"Incorrect export_type={export_type}"
if export_type == ExportType.ONNX:
output_model.model_format = ModelFormat.ONNX
output_model.optimization_type = ModelOptimizationType.ONNX
if precision == ModelPrecision.FP16:
raise RuntimeError("Export to FP16 ONNX is not supported")
elif export_type == ExportType.OPENVINO:
output_model.model_format = ModelFormat.OPENVINO
output_model.optimization_type = ModelOptimizationType.MO
else:
raise RuntimeError(f"not supported export type {export_type}")

output_model.model_format = ModelFormat.OPENVINO
output_model.optimization_type = self.optimization_type
self.precision[0] = precision
output_model.has_xai = dump_features

# pylint: disable=no-member; need to refactor this
logger.info("Exporting the OpenVINO model.")
onnx_path = os.path.join(self.config.project.path, "onnx_model.onnx")
self._export_to_onnx(onnx_path)
optimize_command = ["mo", "--input_model", onnx_path, "--output_dir", self.config.project.path]
if precision == ModelPrecision.FP16:
optimize_command.append("--compress_to_fp16")
subprocess.run(optimize_command, check=True)
bin_file = glob(os.path.join(self.config.project.path, "*.bin"))[0]
xml_file = glob(os.path.join(self.config.project.path, "*.xml"))[0]
with open(bin_file, "rb") as file:
output_model.set_data("openvino.bin", file.read())
with open(xml_file, "rb") as file:
output_model.set_data("openvino.xml", file.read())
with open(onnx_path, "rb") as file:
output_model.set_data("model.onnx", file.read())

if export_type == ExportType.ONNX:
with open(onnx_path, "rb") as file:
output_model.set_data("model.onnx", file.read())
else:
optimize_command = ["mo", "--input_model", onnx_path, "--output_dir", self.config.project.path]
if precision == ModelPrecision.FP16:
optimize_command.append("--compress_to_fp16")
subprocess.run(optimize_command, check=True)
bin_file = glob(os.path.join(self.config.project.path, "*.bin"))[0]
xml_file = glob(os.path.join(self.config.project.path, "*.xml"))[0]
with open(bin_file, "rb") as file:
output_model.set_data("openvino.bin", file.read())
with open(xml_file, "rb") as file:
output_model.set_data("openvino.xml", file.read())

output_model.precision = self.precision
output_model.optimization_methods = self.optimization_methods
Expand Down
15 changes: 10 additions & 5 deletions otx/algorithms/classification/adapters/mmcls/task.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,7 @@
from otx.api.entities.model import ModelPrecision
from otx.api.entities.subset import Subset
from otx.api.entities.task_environment import TaskEnvironment
from otx.api.usecases.tasks.interfaces.export_interface import ExportType
from otx.core.data import caching
from otx.core.data.noisy_label_detection import LossDynamicsTrackingHook

Expand Down Expand Up @@ -549,17 +550,18 @@ def hook(module, inp, outp): # pylint: disable=unused-argument

return eval_predictions, saliency_maps

def _export_model(self, precision, dump_features):
def _export_model(self, precision: ModelPrecision, export_format: ExportType, dump_features: bool):
self._init_task(export=True)

cfg = self.configure(False, "test", None)

self._precision[0] = precision
assert len(self._precision) == 1
export_options: Dict[str, Any] = {}
export_options["deploy_cfg"] = self._init_deploy_cfg(cfg)
if export_options.get("precision", None) is None:
assert len(self._precision) == 1
export_options["precision"] = str(self._precision[0])

export_options["precision"] = str(precision)
export_options["type"] = str(export_format)

export_options["deploy_cfg"]["dump_features"] = dump_features
if dump_features:
Expand All @@ -571,9 +573,12 @@ def _export_model(self, precision, dump_features):
output_names.append("saliency_map")
export_options["model_builder"] = getattr(self, "model_builder", build_classifier)

if self._precision[0] == ModelPrecision.FP16:
if precision == ModelPrecision.FP16:
export_options["deploy_cfg"]["backend_config"]["mo_options"]["flags"].append("--compress_to_fp16")

if export_format == ExportType.ONNX:
export_options["deploy_cfg"]["backend_config"] = {"type": "onnxruntime"}

exporter = ClassificationExporter()
results = exporter.run(
cfg,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ def model_builder_helper(*args, **kwargs):
return super().run(cfg, **kwargs)

@staticmethod
def naive_export(output_dir, model_builder, precision, cfg, model_name="model"):
def naive_export(output_dir, model_builder, precision, export_type, cfg, model_name="model"):
"""Export procedure with pytorch backend."""
from mmcls.datasets.pipelines import Compose

Expand All @@ -62,7 +62,7 @@ def get_fake_data(cfg, orig_img_shape=(128, 128, 3)):
fake_data = get_fake_data(cfg)
opset_version = 11

NaiveExporter.export2openvino(
NaiveExporter.export2backend(
output_dir,
model_builder,
cfg,
Expand All @@ -72,4 +72,5 @@ def get_fake_data(cfg, orig_img_shape=(128, 128, 3)):
input_names=["data"],
output_names=["logits"],
opset_version=opset_version,
export_type=export_type,
)
52 changes: 23 additions & 29 deletions otx/algorithms/classification/task.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,10 +56,8 @@
Performance,
ScoreMetric,
)
from otx.api.entities.model import ( # ModelStatus
from otx.api.entities.model import (
ModelEntity,
ModelFormat,
ModelOptimizationType,
ModelPrecision,
)
from otx.api.entities.resultset import ResultSetEntity
Expand Down Expand Up @@ -245,37 +243,33 @@ def export(
"""Export function of OTX Classification Task."""

logger.info("Exporting the model")
if export_type != ExportType.OPENVINO:
raise RuntimeError(f"not supported export type {export_type}")
output_model.model_format = ModelFormat.OPENVINO
output_model.optimization_type = ModelOptimizationType.MO

results = self._export_model(precision, dump_features)
self._update_model_export_metadata(output_model, export_type, precision, dump_features)
results = self._export_model(precision, export_type, dump_features)
outputs = results.get("outputs")
logger.debug(f"results of run_task = {outputs}")
if outputs is None:
raise RuntimeError(results.get("msg"))

bin_file = outputs.get("bin")
xml_file = outputs.get("xml")
onnx_file = outputs.get("onnx")

inference_config = get_cls_inferencer_configuration(self._task_environment.label_schema)
deploy_cfg = get_cls_deploy_config(self._task_environment.label_schema, inference_config)
ir_extra_data = get_cls_model_api_configuration(self._task_environment.label_schema, inference_config)
ir_extra_data[("otx_config",)] = json.dumps(deploy_cfg, ensure_ascii=False)
embed_ir_model_data(xml_file, ir_extra_data)

if xml_file is None or bin_file is None or onnx_file is None:
raise RuntimeError("invalid status of exporting. bin and xml or onnx should not be None")
with open(bin_file, "rb") as f:
output_model.set_data("openvino.bin", f.read())
with open(xml_file, "rb") as f:
output_model.set_data("openvino.xml", f.read())
with open(onnx_file, "rb") as f:
output_model.set_data("model.onnx", f.read())
output_model.precision = self._precision
output_model.has_xai = dump_features
if export_type == ExportType.ONNX:
onnx_file = outputs.get("onnx")
with open(onnx_file, "rb") as f:
output_model.set_data("model.onnx", f.read())
else:
bin_file = outputs.get("bin")
xml_file = outputs.get("xml")

inference_config = get_cls_inferencer_configuration(self._task_environment.label_schema)
deploy_cfg = get_cls_deploy_config(self._task_environment.label_schema, inference_config)
ir_extra_data = get_cls_model_api_configuration(self._task_environment.label_schema, inference_config)
ir_extra_data[("otx_config",)] = json.dumps(deploy_cfg, ensure_ascii=False)
embed_ir_model_data(xml_file, ir_extra_data)

with open(bin_file, "rb") as f:
output_model.set_data("openvino.bin", f.read())
with open(xml_file, "rb") as f:
output_model.set_data("openvino.xml", f.read())

output_model.set_data(
"label_schema.json",
label_schema_to_bytes(self._task_environment.label_schema),
Expand Down Expand Up @@ -506,7 +500,7 @@ def _train_model(self, dataset: DatasetEntity):
raise NotImplementedError

@abstractmethod
def _export_model(self, precision, dump_features):
def _export_model(self, precision: ModelPrecision, export_format: ExportType, dump_features: bool):
"""Export model and return the results."""
raise NotImplementedError

Expand Down
Loading