diff --git a/cvat-core/webpack.config.js b/cvat-core/webpack.config.js index 5333a315a5ac..6e16f46a20cd 100644 --- a/cvat-core/webpack.config.js +++ b/cvat-core/webpack.config.js @@ -52,7 +52,27 @@ const webConfig = { sourceType: 'unambiguous', }, }, - }], + }, { + test: /3rdparty\/.*\.worker\.js$/, + use: { + loader: 'worker-loader', + options: { + publicPath: '/static/engine/js/3rdparty/', + name: '[name].js', + }, + }, + }, { + test: /\.worker\.js$/, + exclude: /3rdparty/, + use: { + loader: 'worker-loader', + options: { + publicPath: '/static/engine/js/', + name: '[name].js', + }, + }, + }, + ], }, }; diff --git a/cvat/apps/engine/media_extractors.py b/cvat/apps/engine/media_extractors.py index 7c5644a3fbce..a775365d15e1 100644 --- a/cvat/apps/engine/media_extractors.py +++ b/cvat/apps/engine/media_extractors.py @@ -65,6 +65,10 @@ def slice_by_size(self, size): def image_names(self): pass + @abstractmethod + def get_image_size(self): + pass + #Note step, start, stop have no affect class ImageListReader(IMediaReader): def __init__(self, source_path, step=1, start=0, stop=0): @@ -93,6 +97,10 @@ def save_preview(self, preview_path): def image_names(self): return self._source_path + def get_image_size(self): + img = Image.open(self._source_path[0]) + return img.width, img.height + #Note step, start, stop have no affect class DirectoryReader(ImageListReader): def __init__(self, source_path, step=1, start=0, stop=0): @@ -183,6 +191,10 @@ def save_preview(self, preview_path): with open(preview_path, 'wb') as f: f.write(self._zip_source.read(self._source_path[0])) + def get_image_size(self): + img = Image.open(BytesIO(self._zip_source.read(self._source_path[0]))) + return img.width, img.height + @property def image_names(self): return [os.path.join(os.path.dirname(self._zip_source.filename), p) for p in self._source_path] @@ -234,6 +246,10 @@ def save_preview(self, preview_path): def image_names(self): return self._source_path + def get_image_size(self): + image = (next(iter(self)))[0] + return image.width, image.height + class IChunkWriter(ABC): def __init__(self, quality): self._image_quality = quality diff --git a/cvat/apps/engine/migrations/0025_auto_20200324_1222.py b/cvat/apps/engine/migrations/0025_auto_20200324_1222.py new file mode 100644 index 000000000000..dd908d526850 --- /dev/null +++ b/cvat/apps/engine/migrations/0025_auto_20200324_1222.py @@ -0,0 +1,18 @@ +# Generated by Django 2.2.10 on 2020-03-24 12:22 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('engine', '0024_auto_20191023_1025'), + ] + + operations = [ + migrations.AlterField( + model_name='data', + name='chunk_size', + field=models.PositiveIntegerField(null=True), + ), + ] diff --git a/cvat/apps/engine/models.py b/cvat/apps/engine/models.py index d4137d16d5e4..c43c12c5440e 100644 --- a/cvat/apps/engine/models.py +++ b/cvat/apps/engine/models.py @@ -44,7 +44,7 @@ def __str__(self): return self.value class Data(models.Model): - chunk_size = models.PositiveIntegerField(default=36) + chunk_size = models.PositiveIntegerField(null=True) size = models.PositiveIntegerField(default=0) image_quality = models.PositiveSmallIntegerField(default=50) start_frame = models.PositiveIntegerField(default=0) diff --git a/cvat/apps/engine/task.py b/cvat/apps/engine/task.py index 80a0ed9b1855..aba87f5c5631 100644 --- a/cvat/apps/engine/task.py +++ b/cvat/apps/engine/task.py @@ -227,17 +227,18 @@ def _create_thread(tid, data): job.save_meta() db_images = [] - extractors = [] + extractor = None for media_type, media_files in media.items(): - if not media_files: - continue - extractors.append(MEDIA_TYPES[media_type]['extractor']( - source_path=[os.path.join(upload_dir, f) for f in media_files], - step=db_data.get_frame_step(), - start=db_data.start_frame, - stop=db_data.stop_frame, - )) + if media_files: + if extractor is not None: + raise Exception('Combined data types are not supported') + extractor = MEDIA_TYPES[media_type]['extractor']( + source_path=[os.path.join(upload_dir, f) for f in media_files], + step=db_data.get_frame_step(), + start=db_data.start_frame, + stop=db_data.stop_frame, + ) db_task.mode = task_mode db_data.compressed_chunk_type = models.DataChoice.VIDEO if task_mode == 'interpolation' and not data['use_zip_chunks'] else models.DataChoice.IMAGESET db_data.original_chunk_type = models.DataChoice.VIDEO if task_mode == 'interpolation' else models.DataChoice.IMAGESET @@ -252,25 +253,33 @@ def update_progress(progress): compressed_chunk_writer = compressed_chunk_writer_class(db_data.image_quality) original_chunk_writer = original_chunk_writer_class(100) + # calculate chunk size if it isn't specified + if db_data.chunk_size is None: + if isinstance(compressed_chunk_writer, ZipCompressedChunkWriter): + w, h = extractor.get_image_size() + area = h * w + db_data.chunk_size = max(2, min(72, 36 * 1920 * 1080 // area)) + else: + db_data.chunk_size = 36 + frame_counter = 0 - total_len = sum(len(e) for e in extractors) or 100 + total_len = len(extractor) or 100 image_names = [] image_sizes = [] - for extractor in extractors: - for chunk_idx, chunk_images in enumerate(extractor.slice_by_size(db_data.chunk_size)): - for img in chunk_images: - image_names.append(img[1]) + for chunk_idx, chunk_images in enumerate(extractor.slice_by_size(db_data.chunk_size)): + for img in chunk_images: + image_names.append(img[1]) - original_chunk_path = db_data.get_original_chunk_path(chunk_idx) - original_chunk_writer.save_as_chunk(chunk_images, original_chunk_path) + original_chunk_path = db_data.get_original_chunk_path(chunk_idx) + original_chunk_writer.save_as_chunk(chunk_images, original_chunk_path) - compressed_chunk_path = db_data.get_compressed_chunk_path(chunk_idx) - img_sizes = compressed_chunk_writer.save_as_chunk(chunk_images, compressed_chunk_path) + compressed_chunk_path = db_data.get_compressed_chunk_path(chunk_idx) + img_sizes = compressed_chunk_writer.save_as_chunk(chunk_images, compressed_chunk_path) - image_sizes.extend(img_sizes) + image_sizes.extend(img_sizes) - db_data.size += len(chunk_images) - update_progress(db_data.size / total_len) + db_data.size += len(chunk_images) + update_progress(db_data.size / total_len) if db_task.mode == 'annotation': for image_name, image_size in zip(image_names, image_sizes): @@ -291,7 +300,7 @@ def update_progress(progress): if db_data.stop_frame == 0: db_data.stop_frame = db_data.start_frame + (db_data.size - 1) * db_data.get_frame_step() - extractors[0].save_preview(db_data.get_preview_path()) + extractor.save_preview(db_data.get_preview_path()) slogger.glob.info("Founded frames {} for Data #{}".format(db_data.size, db_data.id)) _save_task_to_db(db_task) diff --git a/datumaro/datumaro/util/tf_util.py b/datumaro/datumaro/util/tf_util.py index beaa4fc74e4c..00bf834a0f0d 100644 --- a/datumaro/datumaro/util/tf_util.py +++ b/datumaro/datumaro/util/tf_util.py @@ -38,4 +38,4 @@ def import_tf(): except AttributeError: pass - return tf \ No newline at end of file + return tf