Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

OpenVino 2020 #1269

Merged
merged 2 commits into from
Mar 20, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- React & Redux & Antd based dashboard
- Yolov3 interpretation script fix and changes to mapping.json
- YOLO format support ([#1151](https://github.com/opencv/cvat/pull/1151))
- Added support for OpenVINO 2020

### Deprecated
-
Expand Down
17 changes: 15 additions & 2 deletions cvat/apps/auto_annotation/inference_engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
#
# SPDX-License-Identifier: MIT

from openvino.inference_engine import IENetwork, IEPlugin
from openvino.inference_engine import IENetwork, IEPlugin, IECore, get_version

import subprocess
import os
Expand All @@ -19,7 +19,20 @@ def _check_instruction(instruction):
)


def make_plugin():
def make_plugin_or_core():
version = get_version()
use_core_openvino = False
try:
major, minor, reference = [int(x) for x in version.split('.')]
if major >= 2 and minor >= 1 and reference >= 37988:
use_core_openvino = True
except Exception:
pass

if use_core_openvino:
ie = IECore()
return ie

if _IE_PLUGINS_PATH is None:
raise OSError('Inference engine plugin path env not found in the system.')

Expand Down
26 changes: 14 additions & 12 deletions cvat/apps/auto_annotation/model_loader.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,25 +8,22 @@
import os
import numpy as np

from cvat.apps.auto_annotation.inference_engine import make_plugin, make_network
from cvat.apps.auto_annotation.inference_engine import make_plugin_or_core, make_network

class ModelLoader():
def __init__(self, model, weights):
self._model = model
self._weights = weights

IE_PLUGINS_PATH = os.getenv("IE_PLUGINS_PATH")
if not IE_PLUGINS_PATH:
raise OSError("Inference engine plugin path env not found in the system.")

plugin = make_plugin()
core_or_plugin = make_plugin_or_core()
network = make_network(self._model, self._weights)

supported_layers = plugin.get_supported_layers(network)
not_supported_layers = [l for l in network.layers.keys() if l not in supported_layers]
if len(not_supported_layers) != 0:
raise Exception("Following layers are not supported by the plugin for specified device {}:\n {}".
format(plugin.device, ", ".join(not_supported_layers)))
if getattr(core_or_plugin, 'get_supported_layers', False):
supported_layers = core_or_plugin.get_supported_layers(network)
not_supported_layers = [l for l in network.layers.keys() if l not in supported_layers]
if len(not_supported_layers) != 0:
raise Exception("Following layers are not supported by the plugin for specified device {}:\n {}".
format(core_or_plugin.device, ", ".join(not_supported_layers)))

iter_inputs = iter(network.inputs)
self._input_blob_name = next(iter_inputs)
Expand All @@ -45,7 +42,12 @@ def __init__(self, model, weights):
if self._input_blob_name in info_names:
self._input_blob_name = next(iter_inputs)

self._net = plugin.load(network=network, num_requests=2)
if getattr(core_or_plugin, 'load_network', False):
self._net = core_or_plugin.load_network(network,
"CPU",
num_requests=2)
else:
self._net = core_or_plugin.load(network=network, num_requests=2)
input_type = network.inputs[self._input_blob_name]
self._input_layout = input_type if isinstance(input_type, list) else input_type.shape

Expand Down
9 changes: 6 additions & 3 deletions cvat/apps/dextr_segmentation/dextr.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
#
# SPDX-License-Identifier: MIT

from cvat.apps.auto_annotation.inference_engine import make_plugin, make_network
from cvat.apps.auto_annotation.inference_engine import make_plugin_or_core, make_network

import os
import cv2
Expand Down Expand Up @@ -32,12 +32,15 @@ def __init__(self):
def handle(self, im_path, points):
# Lazy initialization
if not self._plugin:
self._plugin = make_plugin()
self._plugin = make_plugin_or_core()
self._network = make_network(os.path.join(_DEXTR_MODEL_DIR, 'dextr.xml'),
os.path.join(_DEXTR_MODEL_DIR, 'dextr.bin'))
self._input_blob = next(iter(self._network.inputs))
self._output_blob = next(iter(self._network.outputs))
self._exec_network = self._plugin.load(network=self._network)
if getattr(self._plugin, 'load_network', False):
self._exec_network = self._plugin.load_network(self._network, 'CPU')
else:
self._exec_network = self._plugin.load(network=self._network)

image = PIL.Image.open(im_path)
numpy_image = np.array(image)
Expand Down
9 changes: 6 additions & 3 deletions cvat/apps/tf_annotation/views.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ def load_image_into_numpy(image):


def run_inference_engine_annotation(image_list, labels_mapping, treshold):
from cvat.apps.auto_annotation.inference_engine import make_plugin, make_network
from cvat.apps.auto_annotation.inference_engine import make_plugin_or_core, make_network

def _normalize_box(box, w, h, dw, dh):
xmin = min(int(box[0] * dw * w), w)
Expand All @@ -44,11 +44,14 @@ def _normalize_box(box, w, h, dw, dh):
if MODEL_PATH is None:
raise OSError('Model path env not found in the system.')

plugin = make_plugin()
core_or_plugin = make_plugin_or_core()
network = make_network('{}.xml'.format(MODEL_PATH), '{}.bin'.format(MODEL_PATH))
input_blob_name = next(iter(network.inputs))
output_blob_name = next(iter(network.outputs))
executable_network = plugin.load(network=network)
if getattr(core_or_plugin, 'load_network', False):
executable_network = core_or_plugin.load_network(network, 'CPU')
else:
executable_network = core_or_plugin.load(network=network)
job = rq.get_current_job()

del network
Expand Down