Skip to content

Commit

Permalink
Added extra field into meta section of a dump file.
Browse files Browse the repository at this point in the history
Fix #56
  • Loading branch information
Nikita Manovich committed Oct 23, 2018
1 parent 92b3d60 commit bb6dcac
Show file tree
Hide file tree
Showing 5 changed files with 64 additions and 9 deletions.
2 changes: 2 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -14,10 +14,12 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- Shortcuts for outside/keyframe properties
- OpenVINO for accelerated model inference
- Tensorflow annotation now works without CUDA. It can use CPU only. OpenVINO and CUDA are supported optionally.
- Incremental saving, client ID field for all annotated objects.

### Changed
- Polyshape editing method has been improved. You can redraw part of shape instead of points cloning.
- Unified shortcut (Esc) for close any mode instead of different shortcuts (Alt+N, Alt+G, Alt+M etc.).
- Dump file contains information about data source (e.g. video name, archive name, ...)

### Fixed
- Performance bottleneck has been fixed during you create new objects (draw, copy, merge etc).
Expand Down
17 changes: 11 additions & 6 deletions cvat/apps/engine/annotation.py
Original file line number Diff line number Diff line change
Expand Up @@ -1437,7 +1437,7 @@ def _dump(tid, data_format, scheme, host):
db_task = models.Task.objects.select_for_update().get(id=tid)
annotation = _AnnotationForTask(db_task)
annotation.init_from_db()
annotation.dump(data_format, db_task, scheme, host)
annotation.dump(data_format, scheme, host)

def _calc_box_area(box):
return (box.xbr - box.xtl) * (box.ybr - box.ytl)
Expand Down Expand Up @@ -1816,7 +1816,7 @@ def _merge_boxes(self, boxes, start_frame, overlap):
# We don't have old boxes on the frame. Let's add all new ones.
self.boxes.extend(int_boxes_by_frame[frame])

def dump(self, data_format, db_task, scheme, host):
def dump(self, data_format, scheme, host):
def _flip_box(box, im_w, im_h):
box.xbr, box.xtl = im_w - box.xtl, im_w - box.xbr
box.ybr, box.ytl = im_h - box.ytl, im_h - box.ybr
Expand All @@ -1836,6 +1836,7 @@ def _flip_shape(shape, im_w, im_h):

shape.points = ' '.join(['{},{}'.format(point['x'], point['y']) for point in points])

db_task = self.db_task
db_segments = db_task.segment_set.all().prefetch_related('job_set')
db_labels = db_task.label_set.all().prefetch_related('attributespec_set')
im_meta_data = get_image_meta_cache(db_task)
Expand Down Expand Up @@ -1878,19 +1879,23 @@ def _flip_shape(shape, im_w, im_h):
("dumped", str(timezone.localtime(timezone.now())))
])

if self.db_task.mode == "interpolation":
if db_task.mode == "interpolation":
meta["task"]["original_size"] = OrderedDict([
("width", str(im_meta_data["original_size"][0]["width"])),
("height", str(im_meta_data["original_size"][0]["height"]))
])

dump_path = self.db_task.get_dump_path()
# Old tasks don't have such field
if db_task.data:
meta["task"]["data"] = db_task.data.path

dump_path = db_task.get_dump_path()
with open(dump_path, "w") as dump_file:
dumper = _XmlAnnotationWriter(dump_file)
dumper.open_root()
dumper.add_meta(meta)

if self.db_task.mode == "annotation":
if db_task.mode == "annotation":
shapes = {}
shapes["boxes"] = {}
shapes["polygons"] = {}
Expand Down Expand Up @@ -1925,7 +1930,7 @@ def _flip_shape(shape, im_w, im_h):
list(shapes["polylines"].keys()) +
list(shapes["points"].keys()))):

link = get_frame_path(self.db_task.id, frame)
link = get_frame_path(db_task.id, frame)
path = os.readlink(link)

rpath = path.split(os.path.sep)
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
# Generated by Django 2.0.9 on 2018-10-23 13:24

from django.db import migrations, models
import django.db.models.deletion


class Migration(migrations.Migration):

dependencies = [
('engine', '0010_auto_20181011_1517'),
]

operations = [
migrations.CreateModel(
name='Data',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('path', models.CharField(max_length=256)),
('task', models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, to='engine.Task')),
],
),
]
5 changes: 5 additions & 0 deletions cvat/apps/engine/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,11 @@ def get_task_dirname(self):
def __str__(self):
return self.name

class Data(models.Model):
task = models.OneToOneField(Task, on_delete=models.CASCADE, null=True)
# Original path for the images/archive/video
path = models.CharField(max_length=256)

class Segment(models.Model):
task = models.ForeignKey(Task, on_delete=models.CASCADE)
start_frame = models.IntegerField()
Expand Down
27 changes: 24 additions & 3 deletions cvat/apps/engine/task.py
Original file line number Diff line number Diff line change
Expand Up @@ -498,6 +498,8 @@ def _find_and_unpack_archive(upload_dir):
else:
raise Exception('Type defined as archive, but archives were not found.')

return archive


'''
Search a video in upload dir and split it by frames. Copy frames to target dirs
Expand Down Expand Up @@ -525,6 +527,8 @@ def _find_and_extract_video(upload_dir, output_dir, db_task, compress_quality, f
else:
raise Exception("Video files were not found")

return video


'''
Recursive search for all images in upload dir and compress it to RGB jpg with specified quality. Create symlinks for them.
Expand Down Expand Up @@ -565,12 +569,19 @@ def _find_and_compress_images(upload_dir, output_dir, db_task, compress_quality,
else:
raise Exception("Image files were not found")

return filenames

def _save_task_to_db(db_task, task_params):
db_task.overlap = min(db_task.size, task_params['overlap'])
db_task.mode = task_params['mode']
db_task.z_order = task_params['z_order']
db_task.flipped = task_params['flip']

db_data = models.Data()
db_data.task = db_task
db_data.path = task_params['data']
db_data.save()

segment_step = task_params['segment'] - db_task.overlap
for x in range(0, db_task.size, segment_step):
start_frame = x
Expand Down Expand Up @@ -638,10 +649,11 @@ def raise_exception(images, dirs, videos, archives):
job.save_meta()
_copy_data_from_share(share_files_mapping, share_dirs_mapping)

archive = None
if counters['archive']:
job.meta['status'] = 'Archive is being unpacked..'
job.save_meta()
_find_and_unpack_archive(upload_dir)
archive = _find_and_unpack_archive(upload_dir)

# Define task mode and other parameters
task_params = {
Expand All @@ -657,9 +669,18 @@ def raise_exception(images, dirs, videos, archives):
slogger.glob.info("Task #{} parameters: {}".format(tid, task_params))

if task_params['mode'] == 'interpolation':
_find_and_extract_video(upload_dir, output_dir, db_task, task_params['compress'], task_params['flip'], job)
video = _find_and_extract_video(upload_dir, output_dir, db_task,
task_params['compress'], task_params['flip'], job)
task_params['data'] = os.path.relpath(video, upload_dir)
else:
_find_and_compress_images(upload_dir, output_dir, db_task, task_params['compress'], task_params['flip'], job)
files =_find_and_compress_images(upload_dir, output_dir, db_task,
task_params['compress'], task_params['flip'], job)
if archive:
task_params['data'] = os.path.relpath(archive, upload_dir)
else:
task_params['data'] = '{} images: {}, ...'.format(len(files),
", ".join([os.path.relpath(x, upload_dir) for x in files[0:2]]))

slogger.glob.info("Founded frames {} for task #{}".format(db_task.size, tid))

job.meta['status'] = 'Task is being saved in database'
Expand Down

0 comments on commit bb6dcac

Please sign in to comment.