Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[WIP] Preprocessing rcnn #552

Closed
wants to merge 2 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
# Generated by Django 2.1.9 on 2019-07-08 15:45

import cvat.apps.auto_annotation.models
import django.core.files.storage
from django.db import migrations, models


class Migration(migrations.Migration):

dependencies = [
('auto_annotation', '0001_initial'),
]

operations = [
migrations.AddField(
model_name='annotationmodel',
name='preprocessing_file',
field=models.FileField(null=True, storage=django.core.files.storage.FileSystemStorage(), upload_to=cvat.apps.auto_annotation.models.upload_path_handler),
),
]
53 changes: 44 additions & 9 deletions cvat/apps/auto_annotation/model_loader.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
import subprocess

from cvat.apps.auto_annotation.inference_engine import make_plugin, make_network
from cvat.apps.auto_annotation.import_modules import import_modules

class ModelLoader():
def __init__(self, model, weights):
Expand All @@ -28,18 +29,52 @@ def __init__(self, model, weights):
raise Exception("Following layers are not supported by the plugin for specified device {}:\n {}".
format(plugin.device, ", ".join(not_supported_layers)))

self._input_blob_name = next(iter(network.inputs))
self._input_blob_names = network.inputs
self._output_blob_name = next(iter(network.outputs))

self._net = plugin.load(network=network, num_requests=2)
input_type = network.inputs[self._input_blob_name]
self._input_layout = input_type if isinstance(input_type, list) else input_type.shape

def infer(self, image):
_, _, h, w = self._input_layout
in_frame = image if image.shape[:-1] == (h, w) else cv2.resize(image, (w, h))
in_frame = in_frame.transpose((2, 0, 1)) # Change data layout from HWC to CHW
results = self._net.infer(inputs={self._input_blob_name: in_frame})

def _preprocessing_helper(self, image, preprocessing_file, restricted, inputs):
local_vars = {
"image": image,
"input_blob": self._input_blob_names,
"inputs": inputs
}

source_code = open(preprocessing_file).read()
if restricted:
global_vars = {
"__builtins__": {
"str": str,
"int": int,
"float": float,
"max": max,
"min": min,
"range": range,
},
}
else:
global_vars = globals()
imports = import_modules(source_code)
global_vars.update(imports)

exec(source_code, global_vars, local_vars)

def infer(self, image, preprocessing_file=None, restricted=True):
inputs = {}
if not preprocessing_file:
blob_name = list(self._input_blob_names.keys())[0]
input_type = list(self._input_blob_names.values())[0]

self._input_layout = input_type if isinstance(input_type, list) else input_type.shape
_, _, h, w = self._input_layout
in_frame = image if image.shape[:-1] == (h, w) else cv2.resize(image, (w, h))
in_frame = in_frame.transpose((2, 0, 1)) # Change data layout from HWC to CHW
inputs[blob_name] = in_frame
else:
self._preprocessing_helper(image, preprocessing_file, restricted, inputs)

results = self._net.infer(inputs=inputs)
if len(results) == 1:
return results[self._output_blob_name].copy()
else:
Expand Down
26 changes: 17 additions & 9 deletions cvat/apps/auto_annotation/model_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ def _remove_old_file(model_file_field):
os.remove(model_file_field.name)

def _update_dl_model_thread(dl_model_id, name, is_shared, model_file, weights_file, labelmap_file,
interpretation_file, run_tests, is_local_storage, delete_if_test_fails, restricted=True):
interpretation_file, run_tests, is_local_storage, delete_if_test_fails, restricted=True, preprocessing_file=None):
def _get_file_content(filename):
return os.path.basename(filename), open(filename, "rb")

Expand All @@ -51,7 +51,8 @@ def _run_test(model_file, weights_file, labelmap_file, interpretation_file):
labels_mapping=labelmap_file,
attribute_spec={},
convertation_file=interpretation_file,
restricted=restricted
restricted=restricted,
preprocessing_file=preprocessing_file
)
except Exception as e:
return False, str(e)
Expand Down Expand Up @@ -101,6 +102,9 @@ def _run_test(model_file, weights_file, labelmap_file, interpretation_file):
if interpretation_file:
_remove_old_file(dl_model.interpretation_file)
dl_model.interpretation_file.save(*_get_file_content(interpretation_file))
if preprocessing_file:
_remove_old_file(dl_model.preprocessing_file)
dl_model.preprocessing_file.save(*_get_file_content(preprocessing_file))

if name:
dl_model.name = name
Expand All @@ -117,7 +121,7 @@ def _run_test(model_file, weights_file, labelmap_file, interpretation_file):
if not test_res:
raise Exception("Model was not properly created/updated. Test failed: {}".format(message))

def create_or_update(dl_model_id, name, model_file, weights_file, labelmap_file, interpretation_file, owner, storage, is_shared):
def create_or_update(dl_model_id, name, model_file, weights_file, labelmap_file, interpretation_file, owner, storage, is_shared, preprocessing_file=None):
def get_abs_path(share_path):
if not share_path:
return share_path
Expand All @@ -143,17 +147,19 @@ def save_file_as_tmp(data):
if is_create_request:
dl_model_id = create_empty(owner=owner)

run_tests = bool(model_file or weights_file or labelmap_file or interpretation_file)
run_tests = bool(model_file or weights_file or labelmap_file or interpretation_file or preprocessing_file)
if storage != "local":
model_file = get_abs_path(model_file)
weights_file = get_abs_path(weights_file)
labelmap_file = get_abs_path(labelmap_file)
interpretation_file = get_abs_path(interpretation_file)
preprocessing_file = get_abs_path(preprocessing_file)
else:
model_file = save_file_as_tmp(model_file)
weights_file = save_file_as_tmp(weights_file)
labelmap_file = save_file_as_tmp(labelmap_file)
interpretation_file = save_file_as_tmp(interpretation_file)
preprocessing_file = save_file_as_tmp(preprocessing_file)

if owner:
restricted = not has_admin_role(owner)
Expand All @@ -175,7 +181,8 @@ def save_file_as_tmp(data):
run_tests,
storage == "local",
is_create_request,
restricted
restricted,
preprocessing_file
),
job_id=rq_id
)
Expand Down Expand Up @@ -295,7 +302,7 @@ def _process_detections(detections, path_to_conv_script, restricted=True):
return results

def _run_inference_engine_annotation(data, model_file, weights_file,
labels_mapping, attribute_spec, convertation_file, job=None, update_progress=None, restricted=True):
labels_mapping, attribute_spec, convertation_file, job=None, update_progress=None, restricted=True, preprocessing_file=None):
def process_attributes(shape_attributes, label_attr_spec):
attributes = []
for attr_text, attr_value in shape_attributes.items():
Expand Down Expand Up @@ -344,7 +351,7 @@ def add_shapes(shapes, target_container):
"frame_id": frame_counter,
"frame_height": orig_rows,
"frame_width": orig_cols,
"detections": model.infer(frame),
"detections": model.infer(frame, preprocessing_file, restricted),
})

frame_counter += 1
Expand All @@ -358,7 +365,7 @@ def add_shapes(shapes, target_container):

return result

def run_inference_thread(tid, model_file, weights_file, labels_mapping, attributes, convertation_file, reset, user, restricted=True):
def run_inference_thread(tid, model_file, weights_file, labels_mapping, attributes, convertation_file, reset, user, restricted=True, preprocessing_file=None):
def update_progress(job, progress):
job.refresh()
if "cancel" in job.meta:
Expand Down Expand Up @@ -386,7 +393,8 @@ def update_progress(job, progress):
convertation_file= convertation_file,
job=job,
update_progress=update_progress,
restricted=restricted
restricted=restricted,
preprocessing_file=preprocessing_file
)

if result is None:
Expand Down
1 change: 1 addition & 0 deletions cvat/apps/auto_annotation/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@ class AnnotationModel(models.Model):
weights_file = models.FileField(upload_to=upload_path_handler, storage=fs)
labelmap_file = models.FileField(upload_to=upload_path_handler, storage=fs)
interpretation_file = models.FileField(upload_to=upload_path_handler, storage=fs)
preprocessing_file = models.FileField(upload_to=upload_path_handler, storage=fs, null=True)
shared = models.BooleanField(default=False)
primary = models.BooleanField(default=False)
framework = models.CharField(max_length=32, default=FrameworkChoice.OPENVINO)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -243,11 +243,17 @@ class AutoAnnotationModelManagerView {
}

function addFile(file, extention) {
// Note: the below method call always returns false
if (extention in files) {
throw Error(`More than one file with the extension .${extention} have been found`);
}
if (file.name === 'preprocessing.py') {
extractedFiles['preprocessing'] = file;

}
else {
extractedFiles[extention] = file;
}
}

files.forEach((file) => {
Expand Down Expand Up @@ -362,7 +368,7 @@ class AutoAnnotationModelManagerView {
modelData.append('storage', this.source);
modelData.append('shared', this.globallyBox.prop('checked'));

['xml', 'bin', 'json', 'py'].filter(e => e in validatedFiles).forEach((ext) => {
['xml', 'bin', 'json', 'py', 'preprocessing'].filter(e => e in validatedFiles).forEach((ext) => {
modelData.append(ext, validatedFiles[ext]);
});

Expand Down
6 changes: 6 additions & 0 deletions cvat/apps/auto_annotation/views.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,7 @@ def create_model(request):
weights = files["bin"]
labelmap = files["json"]
interpretation_script = files["py"]
preprocessing_script = files.get('preprocessing')
owner = request.user

rq_id = model_manager.create_or_update(
Expand All @@ -72,6 +73,7 @@ def create_model(request):
owner=owner,
storage=storage,
is_shared=is_shared,
preprocessing_file=preprocessing_script
)

return JsonResponse({"id": rq_id})
Expand All @@ -98,6 +100,7 @@ def update_model(request, mid):
weights = files.get("bin")
labelmap = files.get("json")
interpretation_script = files.get("py")
preprocessing_script = files.get('preprocessing')

rq_id = model_manager.create_or_update(
dl_model_id=mid,
Expand All @@ -109,6 +112,7 @@ def update_model(request, mid):
owner=None,
storage=storage,
is_shared=is_shared,
preprocessing_file=preprocessing_script
)

return JsonResponse({"id": rq_id})
Expand Down Expand Up @@ -188,6 +192,7 @@ def start_annotation(request, mid, tid):
weights_file_path = dl_model.weights_file.name
labelmap_file = dl_model.labelmap_file.name
convertation_file_path = dl_model.interpretation_file.name
preprocessing_file_path = dl_model.preprocessing_file.name
restricted = not has_admin_role(dl_model.owner)

db_labels = db_task.label_set.prefetch_related("attributespec_set").all()
Expand Down Expand Up @@ -217,6 +222,7 @@ def start_annotation(request, mid, tid):
should_reset,
request.user,
restricted,
preprocessing_file_path
),
job_id = rq_id,
timeout=604800) # 7 days
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
import cv2
# input_blob
# image
# inputs
image_tensor = input_blob['image_tensor']
_, _, h, w = image_tensor.shape

info = []
info.append(w)
info.append(h)
info.append(1)
inputs['image_info'] = info

in_frame = image if image.shape[:-1] == (h, w) else cv2.resize(image, (w, h))
in_frame = in_frame.transpose((2, 0, 1)) # Change data layout from HWC to CHW

inputs['image_tensor'] = in_frame