Skip to content

Commit

Permalink
Add an extra field into meta section of a dump file (#149)
Browse files Browse the repository at this point in the history
Fix #56
* Added extra field into meta section of a dump file.
* Add SafeCharField.
  • Loading branch information
nmanovic authored Oct 24, 2018
1 parent b353d83 commit 8923449
Show file tree
Hide file tree
Showing 5 changed files with 114 additions and 12 deletions.
2 changes: 2 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -14,10 +14,12 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- Shortcuts for outside/keyframe properties
- OpenVINO for accelerated model inference
- Tensorflow annotation now works without CUDA. It can use CPU only. OpenVINO and CUDA are supported optionally.
- Incremental saving, client ID field for all annotated objects.

### Changed
- Polyshape editing method has been improved. You can redraw part of shape instead of points cloning.
- Unified shortcut (Esc) for close any mode instead of different shortcuts (Alt+N, Alt+G, Alt+M etc.).
- Dump file contains information about data source (e.g. video name, archive name, ...)

### Fixed
- Performance bottleneck has been fixed during you create new objects (draw, copy, merge etc).
Expand Down
14 changes: 8 additions & 6 deletions cvat/apps/engine/annotation.py
Original file line number Diff line number Diff line change
Expand Up @@ -1437,7 +1437,7 @@ def _dump(tid, data_format, scheme, host):
db_task = models.Task.objects.select_for_update().get(id=tid)
annotation = _AnnotationForTask(db_task)
annotation.init_from_db()
annotation.dump(data_format, db_task, scheme, host)
annotation.dump(data_format, scheme, host)

def _calc_box_area(box):
return (box.xbr - box.xtl) * (box.ybr - box.ytl)
Expand Down Expand Up @@ -1816,7 +1816,7 @@ def _merge_boxes(self, boxes, start_frame, overlap):
# We don't have old boxes on the frame. Let's add all new ones.
self.boxes.extend(int_boxes_by_frame[frame])

def dump(self, data_format, db_task, scheme, host):
def dump(self, data_format, scheme, host):
def _flip_box(box, im_w, im_h):
box.xbr, box.xtl = im_w - box.xtl, im_w - box.xbr
box.ybr, box.ytl = im_h - box.ytl, im_h - box.ybr
Expand All @@ -1836,6 +1836,7 @@ def _flip_shape(shape, im_w, im_h):

shape.points = ' '.join(['{},{}'.format(point['x'], point['y']) for point in points])

db_task = self.db_task
db_segments = db_task.segment_set.all().prefetch_related('job_set')
db_labels = db_task.label_set.all().prefetch_related('attributespec_set')
im_meta_data = get_image_meta_cache(db_task)
Expand All @@ -1851,6 +1852,7 @@ def _flip_shape(shape, im_w, im_h):
("flipped", str(db_task.flipped)),
("created", str(timezone.localtime(db_task.created_date))),
("updated", str(timezone.localtime(db_task.updated_date))),
("source", db_task.source),

("labels", [
("label", OrderedDict([
Expand Down Expand Up @@ -1878,19 +1880,19 @@ def _flip_shape(shape, im_w, im_h):
("dumped", str(timezone.localtime(timezone.now())))
])

if self.db_task.mode == "interpolation":
if db_task.mode == "interpolation":
meta["task"]["original_size"] = OrderedDict([
("width", str(im_meta_data["original_size"][0]["width"])),
("height", str(im_meta_data["original_size"][0]["height"]))
])

dump_path = self.db_task.get_dump_path()
dump_path = db_task.get_dump_path()
with open(dump_path, "w") as dump_file:
dumper = _XmlAnnotationWriter(dump_file)
dumper.open_root()
dumper.add_meta(meta)

if self.db_task.mode == "annotation":
if db_task.mode == "annotation":
shapes = {}
shapes["boxes"] = {}
shapes["polygons"] = {}
Expand Down Expand Up @@ -1925,7 +1927,7 @@ def _flip_shape(shape, im_w, im_h):
list(shapes["polylines"].keys()) +
list(shapes["points"].keys()))):

link = get_frame_path(self.db_task.id, frame)
link = get_frame_path(db_task.id, frame)
path = os.readlink(link)

rpath = path.split(os.path.sep)
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
# Generated by Django 2.0.9 on 2018-10-24 10:50

import cvat.apps.engine.models
from django.db import migrations


class Migration(migrations.Migration):

dependencies = [
('engine', '0010_auto_20181011_1517'),
]

operations = [
migrations.AddField(
model_name='task',
name='source',
field=cvat.apps.engine.models.SafeCharField(default='unknown', max_length=256),
),
migrations.AlterField(
model_name='label',
name='name',
field=cvat.apps.engine.models.SafeCharField(max_length=64),
),
migrations.AlterField(
model_name='labeledboxattributeval',
name='value',
field=cvat.apps.engine.models.SafeCharField(max_length=64),
),
migrations.AlterField(
model_name='labeledpointsattributeval',
name='value',
field=cvat.apps.engine.models.SafeCharField(max_length=64),
),
migrations.AlterField(
model_name='labeledpolygonattributeval',
name='value',
field=cvat.apps.engine.models.SafeCharField(max_length=64),
),
migrations.AlterField(
model_name='labeledpolylineattributeval',
name='value',
field=cvat.apps.engine.models.SafeCharField(max_length=64),
),
migrations.AlterField(
model_name='objectpathattributeval',
name='value',
field=cvat.apps.engine.models.SafeCharField(max_length=64),
),
migrations.AlterField(
model_name='task',
name='name',
field=cvat.apps.engine.models.SafeCharField(max_length=256),
),
migrations.AlterField(
model_name='trackedboxattributeval',
name='value',
field=cvat.apps.engine.models.SafeCharField(max_length=64),
),
migrations.AlterField(
model_name='trackedpointsattributeval',
name='value',
field=cvat.apps.engine.models.SafeCharField(max_length=64),
),
migrations.AlterField(
model_name='trackedpolygonattributeval',
name='value',
field=cvat.apps.engine.models.SafeCharField(max_length=64),
),
migrations.AlterField(
model_name='trackedpolylineattributeval',
name='value',
field=cvat.apps.engine.models.SafeCharField(max_length=64),
),
]
13 changes: 10 additions & 3 deletions cvat/apps/engine/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,9 +14,15 @@
import re
import os

class SafeCharField(models.CharField):
def get_prep_value(self, value):
value = super().get_prep_value(value)
if value:
return value[:self.max_length]
return value

class Task(models.Model):
name = models.CharField(max_length=256)
name = SafeCharField(max_length=256)
size = models.PositiveIntegerField()
path = models.CharField(max_length=256)
mode = models.CharField(max_length=32)
Expand All @@ -28,6 +34,7 @@ class Task(models.Model):
overlap = models.PositiveIntegerField(default=0)
z_order = models.BooleanField(default=False)
flipped = models.BooleanField(default=False)
source = SafeCharField(max_length=256, default="unknown")

# Extend default permission model
class Meta:
Expand Down Expand Up @@ -78,7 +85,7 @@ class Job(models.Model):

class Label(models.Model):
task = models.ForeignKey(Task, on_delete=models.CASCADE)
name = models.CharField(max_length=64)
name = SafeCharField(max_length=64)

def __str__(self):
return self.name
Expand Down Expand Up @@ -130,7 +137,7 @@ class AttributeVal(models.Model):
# TODO: add a validator here to be sure that it corresponds to self.label
id = models.BigAutoField(primary_key=True)
spec = models.ForeignKey(AttributeSpec, on_delete=models.CASCADE)
value = models.CharField(max_length=64)
value = SafeCharField(max_length=64)
class Meta:
abstract = True

Expand Down
23 changes: 20 additions & 3 deletions cvat/apps/engine/task.py
Original file line number Diff line number Diff line change
Expand Up @@ -498,6 +498,8 @@ def _find_and_unpack_archive(upload_dir):
else:
raise Exception('Type defined as archive, but archives were not found.')

return archive


'''
Search a video in upload dir and split it by frames. Copy frames to target dirs
Expand Down Expand Up @@ -525,6 +527,8 @@ def _find_and_extract_video(upload_dir, output_dir, db_task, compress_quality, f
else:
raise Exception("Video files were not found")

return video


'''
Recursive search for all images in upload dir and compress it to RGB jpg with specified quality. Create symlinks for them.
Expand Down Expand Up @@ -565,11 +569,14 @@ def _find_and_compress_images(upload_dir, output_dir, db_task, compress_quality,
else:
raise Exception("Image files were not found")

return filenames

def _save_task_to_db(db_task, task_params):
db_task.overlap = min(db_task.size, task_params['overlap'])
db_task.mode = task_params['mode']
db_task.z_order = task_params['z_order']
db_task.flipped = task_params['flip']
db_task.source = task_params['data']

segment_step = task_params['segment'] - db_task.overlap
for x in range(0, db_task.size, segment_step):
Expand Down Expand Up @@ -638,10 +645,11 @@ def raise_exception(images, dirs, videos, archives):
job.save_meta()
_copy_data_from_share(share_files_mapping, share_dirs_mapping)

archive = None
if counters['archive']:
job.meta['status'] = 'Archive is being unpacked..'
job.save_meta()
_find_and_unpack_archive(upload_dir)
archive = _find_and_unpack_archive(upload_dir)

# Define task mode and other parameters
task_params = {
Expand All @@ -657,9 +665,18 @@ def raise_exception(images, dirs, videos, archives):
slogger.glob.info("Task #{} parameters: {}".format(tid, task_params))

if task_params['mode'] == 'interpolation':
_find_and_extract_video(upload_dir, output_dir, db_task, task_params['compress'], task_params['flip'], job)
video = _find_and_extract_video(upload_dir, output_dir, db_task,
task_params['compress'], task_params['flip'], job)
task_params['data'] = os.path.relpath(video, upload_dir)
else:
_find_and_compress_images(upload_dir, output_dir, db_task, task_params['compress'], task_params['flip'], job)
files =_find_and_compress_images(upload_dir, output_dir, db_task,
task_params['compress'], task_params['flip'], job)
if archive:
task_params['data'] = os.path.relpath(archive, upload_dir)
else:
task_params['data'] = '{} images: {}, ...'.format(len(files),
", ".join([os.path.relpath(x, upload_dir) for x in files[0:2]]))

slogger.glob.info("Founded frames {} for task #{}".format(db_task.size, tid))

job.meta['status'] = 'Task is being saved in database'
Expand Down

0 comments on commit 8923449

Please sign in to comment.