Skip to content

Commit

Permalink
Merge pull request #4 from azhavoro/cvat_az_migration
Browse files Browse the repository at this point in the history
Cvat az migration
  • Loading branch information
ygnn123 authored Nov 6, 2019
2 parents 2dc3211 + 61ab2c6 commit 6da262b
Show file tree
Hide file tree
Showing 24 changed files with 736 additions and 424 deletions.
23 changes: 15 additions & 8 deletions Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -31,22 +31,29 @@ RUN apt-get update && \
DEBIAN_FRONTEND=noninteractive apt-get install -yq \
apache2 \
apache2-dev \
libapache2-mod-xsendfile \
supervisor \
ffmpeg \
git-core \
gstreamer0.10-ffmpeg \
libapache2-mod-xsendfile \
libavcodec-dev \
libavdevice-dev \
libavfilter-dev \
libavformat-dev \
libavutil-dev \
libldap2-dev \
libsasl2-dev \
libswresample-dev \
libswscale-dev \
libsm6 \
libxext6 \
p7zip-full \
pkg-config \
python3-dev \
python3-pip \
supervisor \
tzdata \
unzip \
unrar \
p7zip-full \
vim \
git-core \
libsm6 \
libxext6 && \
vim && \
pip3 install -U setuptools && \
ln -fs /usr/share/zoneinfo/${TZ} /etc/localtime && \
dpkg-reconfigure -f noninteractive tzdata && \
Expand Down
1 change: 1 addition & 0 deletions Dockerfile.ui
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ RUN npm install

# Build source code
COPY cvat-core/ /tmp/cvat-core/
COPY cvat-data/ /tmp/cvat-data/
COPY cvat-ui/ /tmp/cvat-ui/
RUN mv .env.production .env && npm run build

Expand Down
30 changes: 15 additions & 15 deletions cvat-core/src/frames.js
Original file line number Diff line number Diff line change
Expand Up @@ -92,44 +92,44 @@
reject(this.number);
}

if (isNode) {
if (isNode) {
resolve("Dummy data");
} else if (isBrowser) {
try {
const { provider } = frameDataCache[this.tid];
const { chunkSize } = frameDataCache[this.tid];
const { chunkSize } = frameDataCache[this.tid];

const frame = await provider.frame(this.number);
if (frame === null || frame === 'loading') {
onServerRequest();
onServerRequest();
const start = parseInt(this.number / chunkSize, 10) * chunkSize;
const stop = (parseInt(this.number / chunkSize, 10) + 1) * chunkSize - 1;
const chunkNumber = Math.floor(this.number / chunkSize);
let chunk = null;

if (frame === null) {
if (!provider.is_chunk_cached(start, stop)){
serverProxy.frames.getData(this.tid, chunkNumber).then(chunk =>{
provider.requestDecodeBlock(chunk, start, stop, onDecode.bind(this, provider), rejectRequest.bind(this));
});
});
} else {
provider.requestDecodeBlock(null, start, stop, onDecode.bind(this, provider), rejectRequest.bind(this));
}
}
} else {
}
}
} else {
if (this.number % chunkSize > 1){
if (!provider.isNextChunkExists(this.number)){
const nextChunkNumber = Math.floor(this.number / chunkSize) + 1;
provider.setReadyToLoading(nextChunkNumber);
provider.setReadyToLoading(nextChunkNumber);
serverProxy.frames.getData(this.tid, nextChunkNumber).then(nextChunk =>{
provider.requestDecodeBlock(nextChunk, (nextChunkNumber) * chunkSize, (nextChunkNumber + 1) * chunkSize - 1,
provider.requestDecodeBlock(nextChunk, (nextChunkNumber) * chunkSize, (nextChunkNumber + 1) * chunkSize - 1,
function(){}, rejectRequest.bind(this, provider));
});
}
}
resolve(frame);
}

} catch (exception) {
if (exception instanceof Exception) {
reject(exception);
Expand Down Expand Up @@ -163,9 +163,9 @@
});
}

async function getFrame(taskID, chunkSize, mode, frame) {
async function getFrame(taskID, chunkSize, chunkType, mode, frame) {
if (!(taskID in frameDataCache)) {
const blockType = mode === 'interpolation' ? cvatData.BlockType.TSVIDEO
const blockType = chunkType === 'video' ? cvatData.BlockType.TSVIDEO
: cvatData.BlockType.ARCHIVE;

const value = {
Expand All @@ -178,10 +178,10 @@
frameCache[taskID] = {};
frameDataCache[taskID] = value;
}

let size = null;
if (mode === 'interpolation') {
[size] = frameDataCache[taskID].meta;
[size] = frameDataCache[taskID].meta;
} else if (mode === 'annotation') {
if (frame >= frameDataCache[taskID].meta.length) {
throw new ArgumentError(
Expand Down
20 changes: 12 additions & 8 deletions cvat-core/src/server-proxy.js
Original file line number Diff line number Diff line change
Expand Up @@ -223,7 +223,7 @@
}
}

async function createTask(taskData, files, onUpdate) {
async function createTask(taskSpec, taskDataSpec, onUpdate) {
const { backendAPI } = config;

async function wait(id) {
Expand Down Expand Up @@ -263,11 +263,15 @@
});
}

const batchOfFiles = new FormData();
for (const key in files) {
if (Object.prototype.hasOwnProperty.call(files, key)) {
for (let i = 0; i < files[key].length; i++) {
batchOfFiles.append(`${key}[${i}]`, files[key][i]);
const taskData = new FormData();
for (const key in taskDataSpec) {
if (Object.prototype.hasOwnProperty.call(taskDataSpec, key)) {
if (Array.isArray(taskDataSpec[key])) {
for (let i = 0; i < taskDataSpec[key].length; i++) {
taskData.append(`${key}[${i}]`, taskDataSpec[key][i]);
}
} else {
taskData.set(key, taskDataSpec[key]);
}
}
}
Expand All @@ -276,7 +280,7 @@

onUpdate('The task is being created on the server..');
try {
response = await Axios.post(`${backendAPI}/tasks`, JSON.stringify(taskData), {
response = await Axios.post(`${backendAPI}/tasks`, JSON.stringify(taskSpec), {
proxy: config.proxy,
headers: {
'Content-Type': 'application/json',
Expand All @@ -288,7 +292,7 @@

onUpdate('The data is being uploaded to the server..');
try {
await Axios.post(`${backendAPI}/tasks/${response.data.id}/data`, batchOfFiles, {
await Axios.post(`${backendAPI}/tasks/${response.data.id}/data`, taskData, {
proxy: config.proxy,
});
} catch (errorData) {
Expand Down
41 changes: 24 additions & 17 deletions cvat-core/src/session.js
Original file line number Diff line number Diff line change
Expand Up @@ -711,7 +711,8 @@
start_frame: undefined,
stop_frame: undefined,
frame_filter: undefined,
data_chunk_size: undefined,
data_chunk_size: undefined,
data_chunk_type: undefined,
};

for (const property in data) {
Expand Down Expand Up @@ -1135,13 +1136,16 @@
set: (chunkSize) => {
if (typeof (chunkSize) !== 'number' || chunkSize < 1) {
throw new ArgumentError(
`Chink size value must be a positive number. But value ${chunkSize} has been got.`,
`Chunk size value must be a positive number. But value ${chunkSize} has been got.`,
);
}

data.data_chunk_size = chunkSize;
},
},
dataChunkType: {
get: () => data.data_chunk_type,
},
}));

// When we call a function, for example: task.annotations.get()
Expand Down Expand Up @@ -1262,6 +1266,7 @@
const frameData = await getFrame(
this.task.id,
this.task.dataChunkSize,
this.task.dataChunkType,
this.task.mode,
frame,
);
Expand Down Expand Up @@ -1358,39 +1363,40 @@
return this;
}

const taskData = {
const taskSpec = {
name: this.name,
labels: this.labels.map((el) => el.toJSON()),
image_quality: this.imageQuality,
z_order: Boolean(this.zOrder),
};

if (typeof (this.bugTracker) !== 'undefined') {
taskData.bug_tracker = this.bugTracker;
taskSpec.bug_tracker = this.bugTracker;
}
if (typeof (this.segmentSize) !== 'undefined') {
taskData.segment_size = this.segmentSize;
taskSpec.segment_size = this.segmentSize;
}
if (typeof (this.overlap) !== 'undefined') {
taskData.overlap = this.overlap;
taskSpec.overlap = this.overlap;
}

const taskDataSpec = {
client_files: this.clientFiles,
server_files: this.serverFiles,
remote_files: this.remoteFiles,
image_quality: this.imageQuality,
};

if (typeof (this.startFrame) !== 'undefined') {
taskData.start_frame = this.startFrame;
taskDataSpec.start_frame = this.startFrame;
}
if (typeof (this.stopFrame) !== 'undefined') {
taskData.stop_frame = this.stopFrame;
taskDataSpec.stop_frame = this.stopFrame;
}
if (typeof (this.frameFilter) !== 'undefined') {
taskData.frame_filter = this.frameFilter;
taskDataSpec.frame_filter = this.frameFilter;
}

const taskFiles = {
client_files: this.clientFiles,
server_files: this.serverFiles,
remote_files: this.remoteFiles,
};

const task = await serverProxy.tasks.createTask(taskData, taskFiles, onUpdate);
const task = await serverProxy.tasks.createTask(taskSpec, taskDataSpec, onUpdate);
return new Task(task);
};

Expand All @@ -1415,6 +1421,7 @@
const result = await getFrame(
this.id,
this.dataChunkSize,
this.dataChunkType,
this.mode,
frame,
);
Expand Down
41 changes: 18 additions & 23 deletions cvat/apps/annotation/annotation.py
Original file line number Diff line number Diff line change
Expand Up @@ -180,16 +180,16 @@ def _init_frame_info(self):
self._frame_info = {
frame: {
"path": "frame_{:06d}".format(frame),
"width": self._db_task.video.width,
"height": self._db_task.video.height,
} for frame in range(self._db_task.size)
"width": self._db_task.data.video.width,
"height": self._db_task.data.video.height,
} for frame in range(self._db_task.data.size)
}
else:
self._frame_info = {db_image.frame: {
"path": db_image.path,
"width": db_image.width,
"height": db_image.height,
} for db_image in self._db_task.image_set.all()}
} for db_image in self._db_task.data.image_set.all()}

self._frame_mapping = {
self._get_filename(info["path"]): frame for frame, info in self._frame_info.items()
Expand All @@ -201,15 +201,15 @@ def _init_meta(self):
("task", OrderedDict([
("id", str(self._db_task.id)),
("name", self._db_task.name),
("size", str(self._db_task.size)),
("size", str(self._db_task.data.size)),
("mode", self._db_task.mode),
("overlap", str(self._db_task.overlap)),
("bugtracker", self._db_task.bug_tracker),
("created", str(timezone.localtime(self._db_task.created_date))),
("updated", str(timezone.localtime(self._db_task.updated_date))),
("start_frame", str(self._db_task.start_frame)),
("stop_frame", str(self._db_task.stop_frame)),
("frame_filter", self._db_task.frame_filter),
("start_frame", str(self._db_task.data.start_frame)),
("stop_frame", str(self._db_task.data.stop_frame)),
("frame_filter", self._db_task.data.frame_filter),
("z_order", str(self._db_task.z_order)),

("labels", [
Expand Down Expand Up @@ -251,11 +251,11 @@ def _init_meta(self):

if self._db_task.mode == "interpolation":
self._meta["task"]["original_size"] = OrderedDict([
("width", str(self._db_task.video.width)),
("height", str(self._db_task.video.height))
("width", str(self._db_task.data.video.width)),
("height", str(self._db_task.data.video.height))
])
# Add source to dumped file
self._meta["source"] = str(os.path.basename(self._db_task.video.path))
self._meta["source"] = str(os.path.basename(self._db_task.data.video.path))

def _export_attributes(self, attributes):
exported_attributes = []
Expand All @@ -270,7 +270,7 @@ def _export_attributes(self, attributes):
def _export_tracked_shape(self, shape):
return Annotation.TrackedShape(
type=shape["type"],
frame=self._db_task.start_frame + shape["frame"] * self._db_task.get_frame_step(),
frame=self._db_task.data.start_frame + shape["frame"] * self._db_task.data.get_frame_step(),
points=shape["points"],
occluded=shape["occluded"],
outside=shape.get("outside", False),
Expand All @@ -283,7 +283,7 @@ def _export_labeled_shape(self, shape):
return Annotation.LabeledShape(
type=shape["type"],
label=self._get_label_name(shape["label_id"]),
frame=self._db_task.start_frame + shape["frame"] * self._db_task.get_frame_step(),
frame=self._db_task.data.start_frame + shape["frame"] * self._db_task.data.get_frame_step(),
points=shape["points"],
occluded=shape["occluded"],
z_order=shape.get("z_order", 0),
Expand All @@ -293,7 +293,7 @@ def _export_labeled_shape(self, shape):

def _export_tag(self, tag):
return Annotation.Tag(
frame=self._db_task.start_frame + tag["frame"] * self._db_task.get_frame_step(),
frame=self._db_task.data.start_frame + tag["frame"] * self._db_task.data.get_frame_step(),
label=self._get_label_name(tag["label_id"]),
group=tag.get("group", 0),
attributes=self._export_attributes(tag["attributes"]),
Expand All @@ -302,16 +302,11 @@ def _export_tag(self, tag):
def group_by_frame(self):
def _get_frame(annotations, shape):
db_image = self._frame_info[shape["frame"]]
frame = self._db_task.start_frame + shape["frame"] * self._db_task.get_frame_step()
rpath = db_image['path'].split(os.path.sep)
if len(rpath) != 1:
rpath = os.path.sep.join(rpath[rpath.index(".upload")+1:])
else:
rpath = rpath[0]
frame = self._db_task.data.start_frame + shape["frame"] * self._db_task.data.get_frame_step()
if frame not in annotations:
annotations[frame] = Annotation.Frame(
frame=frame,
name=rpath,
name=db_image['path'],
height=db_image["height"],
width=db_image["width"],
labeled_shapes=[],
Expand All @@ -321,7 +316,7 @@ def _get_frame(annotations, shape):

annotations = {}
data_manager = DataManager(self._annotation_ir)
for shape in data_manager.to_shapes(self._db_task.size):
for shape in data_manager.to_shapes(self._db_task.data.size):
_get_frame(annotations, shape).labeled_shapes.append(self._export_labeled_shape(shape))

for tag in self._annotation_ir.tags:
Expand All @@ -337,7 +332,7 @@ def shapes(self):
@property
def tracks(self):
for track in self._annotation_ir.tracks:
tracked_shapes = TrackManager.get_interpolated_shapes(track, 0, self._db_task.size)
tracked_shapes = TrackManager.get_interpolated_shapes(track, 0, self._db_task.data.size)
for tracked_shape in tracked_shapes:
tracked_shape["attributes"] += track["attributes"]

Expand Down
Loading

0 comments on commit 6da262b

Please sign in to comment.