From 9377c253709dee8644674332c14e422978603b2d Mon Sep 17 00:00:00 2001 From: Philipp Otto Date: Mon, 31 Jan 2022 08:42:42 +0100 Subject: [PATCH 1/7] Fix async volume bugs (#5955) * [WIP] try to fix some volume bugs by maintaining pendingOperations to be applied when merging with back-end data * add test case for erasing in mag 4 which provoked a bug and fix that bug; also improve error notification in case of permanent error * fix that undo logic never restored the correct typed array after decompressing bucket data (instead it was always uint8) * fix bug in undo logic which tried to merge back-end data with front-end data even though a (potentially different) merge already happened * remove error-prone triggeredBucketFetch return value * dirty fix for old undo tests * add new regression test which tests correct data merging with backend data on undo * add more tests, add afterEach clause which saves the annotation and ensures that nothing crashed * add tests which provoke incorrect data merge and fix that; also clean up * further clean up * add more comments * rename to compressed data where it makes sense * rename MaybeUnmergedBucketLoadedPromise * flatten return value of getOrCreateData * remove test for old Bucket.merge approach * fix missing await in lz4 compression/decompression * ensure that comlink wrapper always returns async functions * fix bleeding undo states by clearing bucketsAlreadyInUndoState at the right time; clean up * remove unused import * make initial data loading for a test variable (test both cases) * use set operation on typed array * remove unnecessary null handling for lz4 compression in save_saga * make sure copy-from-prev-slice does not mutate bucket's data directly * refactor data mutation and add comments why the floodfill can stay as-is * make old label code async to avoid merge problems and mark it as deprecated; also add warnings for empty merges * fix cube spec by adding custom mock for pull queue and adapting tests where appropriate * fix specs for temporal bucket manager by using dummy label with new mutation methods and improve docstrings * more clean up * more PR feedback & renaming * update changelog * fix new bug when undoing to initial bucket version when backend data is being awaited * fix bug when compression is slow and add a regression test which uses a mocked version of the compressor * don't allow annotating while wk is busy (e.g., with undo/redo/floodfill) * remove unnecssary imports * Update frontend/javascripts/test/model/binary/temporal_bucket_manager.spec.js Co-authored-by: Daniel * explain why unpackPromise exists * rename to setSlowCompression * DRY the slow compression worker * remove unnecessary import Co-authored-by: Daniel --- CHANGELOG.unreleased.md | 3 + frontend/javascripts/messages.js | 2 + frontend/javascripts/oxalis/api/api_latest.js | 17 +- frontend/javascripts/oxalis/api/api_v2.js | 15 +- .../model/actions/volumetracing_actions.js | 11 +- .../model/bucket_data_handling/bucket.js | 325 +++++++++++--- .../model/bucket_data_handling/data_cube.js | 67 ++- .../model/helpers/bucket_compression.js | 24 ++ .../oxalis/model/sagas/root_saga.js | 6 +- .../oxalis/model/sagas/save_saga.js | 174 ++++---- .../oxalis/model/sagas/volumetracing_saga.js | 42 +- .../volume_annotation_sampling.js | 33 +- .../oxalis/model/volumetracing/volumelayer.js | 53 ++- .../byte_array_lz4_compression.worker.js | 6 + .../oxalis/workers/comlink_wrapper.js | 11 +- .../slow_byte_array_lz4_compression.worker.js | 24 ++ .../javascripts/test/helpers/apiHelpers.js | 11 + .../test/model/binary/cube.spec.js | 119 +++-- .../binary/temporal_bucket_manager.spec.js | 62 ++- .../volume_annotation_sampling.spec.js | 2 +- .../volumetracing/volumetracing_saga.spec.js | 16 + .../volumetracing_saga_integration.spec.js | 408 +++++++++++++++++- package.json | 1 + 23 files changed, 1085 insertions(+), 347 deletions(-) create mode 100644 frontend/javascripts/oxalis/model/helpers/bucket_compression.js create mode 100644 frontend/javascripts/oxalis/workers/slow_byte_array_lz4_compression.worker.js diff --git a/CHANGELOG.unreleased.md b/CHANGELOG.unreleased.md index 11925998cd..87b945e06f 100644 --- a/CHANGELOG.unreleased.md +++ b/CHANGELOG.unreleased.md @@ -15,11 +15,14 @@ For upgrade instructions, please check the [migration guide](MIGRATIONS.released ### Changed - Upgraded webpack build tool to v5 and all other webpack related dependencies to their latest version. Enabled persistent caching which speeds up server restarts during development as well as production builds. [#5969](https://github.com/scalableminds/webknossos/pull/5969) +- The front-end API `labelVoxels` returns a promise now which fulfills as soon as the label operation was carried out. [#5955](https://github.com/scalableminds/webknossos/pull/5955) ### Fixed +- Fixed volume-related bugs which could corrupt the volume data in certain scenarios. [#5955](https://github.com/scalableminds/webknossos/pull/5955) - Fixed the placeholder resolution computation for anisotropic layers with missing base resolutions. [#5983](https://github.com/scalableminds/webknossos/pull/5983) - Fixed a bug where ad-hoc meshes were computed for a mapping, although it was disabled. [#5982](https://github.com/scalableminds/webknossos/pull/5982) + ### Removed ### Breaking Changes diff --git a/frontend/javascripts/messages.js b/frontend/javascripts/messages.js index a50db0a9a8..bfc9a18352 100644 --- a/frontend/javascripts/messages.js +++ b/frontend/javascripts/messages.js @@ -107,6 +107,8 @@ In order to restore the current window, a reload is necessary.`, "save.leave_page_unfinished": "WARNING: You have unsaved progress that may be lost when hitting OK. Please click cancel, wait until the progress is saved and the save button displays a checkmark before leaving the page..", "save.failed": "Failed to save annotation. Retrying.", + "save.failed.permanent": + "Failed to save annotation. Unfortunately, there might be a potential data loss. Please reload the page, check and correct the annotation.", "undo.no_undo": "There is no action that could be undone. However, if you want to restore an earlier version of this annotation, use the 'Restore Older Version' functionality in the dropdown next to the 'Save' button.", "undo.no_redo": "There is no action that could be redone.", diff --git a/frontend/javascripts/oxalis/api/api_latest.js b/frontend/javascripts/oxalis/api/api_latest.js index a59b6c90a0..7909d7043c 100644 --- a/frontend/javascripts/oxalis/api/api_latest.js +++ b/frontend/javascripts/oxalis/api/api_latest.js @@ -1424,19 +1424,24 @@ class DataApi { } /** - * Label voxels with the supplied value. + * Label voxels with the supplied value. Note that this method does not mutate + * the data immediately, but instead returns a promise (since the data might + * have to be downloaded first). + * * _Volume tracing only!_ * * @example // Set the segmentation id for some voxels to 1337 - * api.data.labelVoxels([[1,1,1], [1,2,1], [2,1,1], [2,2,1]], 1337); + * await api.data.labelVoxels([[1,1,1], [1,2,1], [2,1,1], [2,2,1]], 1337); */ - labelVoxels(voxels: Array, label: number): void { + async labelVoxels(voxels: Array, label: number): Promise { assertVolume(Store.getState()); const segmentationLayer = this.model.getEnforcedSegmentationTracingLayer(); - for (const voxel of voxels) { - segmentationLayer.cube.labelVoxelInAllResolutions(voxel, label); - } + await Promise.all( + voxels.map(voxel => + segmentationLayer.cube._labelVoxelInAllResolutions_DEPRECATED(voxel, label), + ), + ); segmentationLayer.cube.pushQueue.push(); } diff --git a/frontend/javascripts/oxalis/api/api_v2.js b/frontend/javascripts/oxalis/api/api_v2.js index f6ad2b1575..dda27b0efd 100644 --- a/frontend/javascripts/oxalis/api/api_v2.js +++ b/frontend/javascripts/oxalis/api/api_v2.js @@ -642,19 +642,24 @@ class DataApi { } /** - * Label voxels with the supplied value. + * Label voxels with the supplied value. Note that this method does not mutate + * the data immediately, but instead returns a promise (since the data might + * have to be downloaded first). + * * _Volume tracing only!_ * * @example // Set the segmentation id for some voxels to 1337 * api.data.labelVoxels([[1,1,1], [1,2,1], [2,1,1], [2,2,1]], 1337); */ - labelVoxels(voxels: Array, label: number): void { + async labelVoxels(voxels: Array, label: number): Promise { assertVolume(Store.getState()); const segmentationLayer = this.model.getEnforcedSegmentationTracingLayer(); - for (const voxel of voxels) { - segmentationLayer.cube.labelVoxelInAllResolutions(voxel, label); - } + await Promise.all( + voxels.map(voxel => + segmentationLayer.cube._labelVoxelInAllResolutions_DEPRECATED(voxel, label), + ), + ); segmentationLayer.cube.pushQueue.push(); } diff --git a/frontend/javascripts/oxalis/model/actions/volumetracing_actions.js b/frontend/javascripts/oxalis/model/actions/volumetracing_actions.js index 88e74dbb1b..a5a4c548b5 100644 --- a/frontend/javascripts/oxalis/model/actions/volumetracing_actions.js +++ b/frontend/javascripts/oxalis/model/actions/volumetracing_actions.js @@ -44,12 +44,13 @@ export type CopySegmentationLayerAction = { type: "COPY_SEGMENTATION_LAYER", source: "previousLayer" | "nextLayer", }; -export type MaybeBucketLoadedPromise = null | Promise; +export type MaybeUnmergedBucketLoadedPromise = null | Promise; export type AddBucketToUndoAction = { type: "ADD_BUCKET_TO_UNDO", zoomedBucketAddress: Vector4, bucketData: BucketDataArray, - maybeBucketLoadedPromise: MaybeBucketLoadedPromise, + maybeUnmergedBucketLoadedPromise: MaybeUnmergedBucketLoadedPromise, + pendingOperations: Array<(BucketDataArray) => void>, tracingId: string, }; type UpdateDirectionAction = { type: "UPDATE_DIRECTION", centroid: Vector3 }; @@ -219,13 +220,15 @@ export const setContourTracingModeAction = (mode: ContourMode): SetContourTracin export const addBucketToUndoAction = ( zoomedBucketAddress: Vector4, bucketData: BucketDataArray, - maybeBucketLoadedPromise: MaybeBucketLoadedPromise, + maybeUnmergedBucketLoadedPromise: MaybeUnmergedBucketLoadedPromise, + pendingOperations: Array<(BucketDataArray) => void>, tracingId: string, ): AddBucketToUndoAction => ({ type: "ADD_BUCKET_TO_UNDO", zoomedBucketAddress, bucketData, - maybeBucketLoadedPromise, + maybeUnmergedBucketLoadedPromise, + pendingOperations: pendingOperations.slice(), tracingId, }); diff --git a/frontend/javascripts/oxalis/model/bucket_data_handling/bucket.js b/frontend/javascripts/oxalis/model/bucket_data_handling/bucket.js index ace8ce48e6..b4a7d5b1bb 100644 --- a/frontend/javascripts/oxalis/model/bucket_data_handling/bucket.js +++ b/frontend/javascripts/oxalis/model/bucket_data_handling/bucket.js @@ -3,29 +3,29 @@ * @flow */ +import { createNanoEvents } from "nanoevents"; import * as THREE from "three"; import _ from "lodash"; -import { createNanoEvents } from "nanoevents"; -import ErrorHandling from "libs/error_handling"; -import { mod } from "libs/utils"; +import { type ElementClass } from "types/api_flow_types"; +import { PullQueueConstants } from "oxalis/model/bucket_data_handling/pullqueue"; +import { + addBucketToUndoAction, + type MaybeUnmergedBucketLoadedPromise, +} from "oxalis/model/actions/volumetracing_actions"; import { bucketPositionToGlobalAddress, zoomedAddressToAnotherZoomStep, } from "oxalis/model/helpers/position_converter"; import { getRequestLogZoomStep } from "oxalis/model/accessors/flycam_accessor"; import { getResolutions } from "oxalis/model/accessors/dataset_accessor"; +import { mod } from "libs/utils"; +import Constants, { type BoundingBoxType, type Vector3, type Vector4 } from "oxalis/constants"; import DataCube from "oxalis/model/bucket_data_handling/data_cube"; +import ErrorHandling from "libs/error_handling"; import Store from "oxalis/store"; import TemporalBucketManager from "oxalis/model/bucket_data_handling/temporal_bucket_manager"; -import Constants, { type Vector3, type Vector4, type BoundingBoxType } from "oxalis/constants"; import window from "libs/window"; -import { type ElementClass } from "types/api_flow_types"; -import { - addBucketToUndoAction, - type MaybeBucketLoadedPromise, -} from "oxalis/model/actions/volumetracing_actions"; -import { PullQueueConstants } from "oxalis/model/bucket_data_handling/pullqueue"; export const BucketStateEnum = { UNREQUESTED: "UNREQUESTED", @@ -54,6 +54,13 @@ type Emitter = { emit: Function, }; +const WARNING_THROTTLE_THRESHOLD = 10000; +const warnMergeWithoutPendingOperations = _.throttle(() => { + ErrorHandling.notify( + new Error("Bucket.merge() was called with an empty list of pending operations."), + ); +}, WARNING_THROTTLE_THRESHOLD); + export class NullBucket { type: "null" = "null"; isOutOfBoundingBox: boolean; @@ -109,15 +116,21 @@ export const NULL_BUCKET_OUT_OF_BB = new NullBucket(true); export type Bucket = DataBucket | NullBucket; // This set saves whether a bucket is already added to the current undo volume batch -// and gets cleared by the save saga after an annotation step has finished. +// and gets cleared when a volume transaction is ended (marked by the action +// FINISH_ANNOTATION_STROKE). export const bucketsAlreadyInUndoState: Set = new Set(); +export function markVolumeTransactionEnd() { + bucketsAlreadyInUndoState.clear(); +} + export class DataBucket { type: "data" = "data"; elementClass: ElementClass; visualizedMesh: ?Object; visualizationColor: number; dirtyCount: number = 0; + pendingOperations: Array<(BucketDataArray) => void> = []; state: BucketStateEnumType; dirty: boolean; @@ -129,7 +142,7 @@ export class DataBucket { _fallbackBucket: ?Bucket; throttledTriggerLabeled: () => void; emitter: Emitter; - maybeBucketLoadedPromise: MaybeBucketLoadedPromise; + maybeUnmergedBucketLoadedPromise: MaybeUnmergedBucketLoadedPromise; constructor( elementClass: ElementClass, @@ -138,7 +151,7 @@ export class DataBucket { cube: DataCube, ) { this.emitter = createNanoEvents(); - this.maybeBucketLoadedPromise = null; + this.maybeUnmergedBucketLoadedPromise = null; this.elementClass = elementClass; this.cube = cube; @@ -235,6 +248,16 @@ export class DataBucket { return this.state === BucketStateEnum.MISSING; } + needsBackendData(): boolean { + /* + "Needs backend data" means that the front-end has not received any data (nor "missing" reply) for this bucket, yet. + The return value does + - not tell whether the data fetching was already initiated (does not differentiate between UNREQUESTED and REQUESTED) + - not tell whether the backend actually has data for the address (does not differentiate between LOADED and MISSING) + */ + return this.state === BucketStateEnum.UNREQUESTED || this.state === BucketStateEnum.REQUESTED; + } + getAddress(): Vector3 { return [this.zoomedAddress[0], this.zoomedAddress[1], this.zoomedAddress[2]]; } @@ -266,46 +289,61 @@ export class DataBucket { return { isVoxelOutside, neighbourBucketAddress, adjustedVoxel }; }; - getCopyOfData(): { dataClone: BucketDataArray, triggeredBucketFetch: boolean } { - const { data: bucketData, triggeredBucketFetch } = this.getOrCreateData(); + getCopyOfData(): BucketDataArray { + const bucketData = this.getOrCreateData(); const TypedArrayClass = getConstructorForElementClass(this.elementClass)[0]; const dataClone = new TypedArrayClass(bucketData); - return { dataClone, triggeredBucketFetch }; + return dataClone; } - label(labelFunc: BucketDataArray => void) { - const { data: bucketData } = this.getOrCreateData(); - this.markAndAddBucketForUndo(); + // eslint-disable-next-line camelcase + async label_DEPRECATED(labelFunc: BucketDataArray => void): Promise { + /* + * It's not recommended to use this method (repeatedly), as it can be + * very slow. See the docstring for Bucket.getOrCreateData() for alternatives. + */ + const bucketData = await this.getDataForMutation(); + this.startDataMutation(); labelFunc(bucketData); - this.throttledTriggerLabeled(); + this.endDataMutation(); } - markAndAddBucketForUndo() { + _markAndAddBucketForUndo() { + // This method adds a snapshot of the current bucket to the undo stack. + // Note that the method may be called multiple times during a volume + // transaction (e.g., when moving the brush over the same buckets for + // multiple frames). Since a snapshot of the "old" data should be + // saved to the undo stack, the snapshot only has to be created once + // for each transaction. + // This is ensured by checking bucketsAlreadyInUndoState. this.dirty = true; - if (!bucketsAlreadyInUndoState.has(this)) { - bucketsAlreadyInUndoState.add(this); - const { dataClone, triggeredBucketFetch } = this.getCopyOfData(); - if (triggeredBucketFetch) { - this.maybeBucketLoadedPromise = new Promise((resolve, _reject) => { - this.once("bucketLoaded", data => { - // Once the bucket was loaded, maybeBucketLoadedPromise can be null'ed - this.maybeBucketLoadedPromise = null; - resolve(data); - }); + if (bucketsAlreadyInUndoState.has(this)) { + return; + } + + bucketsAlreadyInUndoState.add(this); + const dataClone = this.getCopyOfData(); + if (this.needsBackendData() && this.maybeUnmergedBucketLoadedPromise == null) { + this.maybeUnmergedBucketLoadedPromise = new Promise((resolve, _reject) => { + this.once("unmergedBucketDataLoaded", data => { + // Once the bucket was loaded, maybeUnmergedBucketLoadedPromise can be null'ed + this.maybeUnmergedBucketLoadedPromise = null; + resolve(data); }); - } - Store.dispatch( - // Always use the current state of this.maybeBucketLoadedPromise, since - // this bucket could be added to multiple undo batches while it's fetched. All entries - // need to have the corresponding promise for the undo to work correctly. - addBucketToUndoAction( - this.zoomedAddress, - dataClone, - this.maybeBucketLoadedPromise, - this.getTracingId(), - ), - ); + }); } + Store.dispatch( + // Always use the current state of this.maybeUnmergedBucketLoadedPromise, since + // this bucket could be added to multiple undo batches while it's fetched. All entries + // need to have the corresponding promise for the undo to work correctly. + addBucketToUndoAction( + this.zoomedAddress, + dataClone, + this.maybeUnmergedBucketLoadedPromise, + this.pendingOperations, + this.getTracingId(), + ), + ); } hasData(): boolean { @@ -321,17 +359,23 @@ export class DataBucket { return data; } - setData(newData: Uint8Array) { - const TypedArrayClass = getConstructorForElementClass(this.elementClass)[0]; - this.data = new TypedArrayClass( - newData.buffer, - newData.byteOffset, - newData.byteLength / TypedArrayClass.BYTES_PER_ELEMENT, - ); + setData(newData: BucketDataArray) { + this.data = newData; this.dirty = true; this.trigger("bucketLabeled"); } + uint8ToTypedBuffer(arrayBuffer: ?Uint8Array) { + const [TypedArrayClass, channelCount] = getConstructorForElementClass(this.elementClass); + return arrayBuffer != null + ? new TypedArrayClass( + arrayBuffer.buffer, + arrayBuffer.byteOffset, + arrayBuffer.byteLength / TypedArrayClass.BYTES_PER_ELEMENT, + ) + : new TypedArrayClass(channelCount * Constants.BUCKET_SIZE); + } + markAsNeeded(): void { this.accessed = true; } @@ -340,17 +384,130 @@ export class DataBucket { this.accessed = false; } - getOrCreateData(): { data: BucketDataArray, triggeredBucketFetch: boolean } { - let triggeredBucketFetch = false; + getOrCreateData(): BucketDataArray { + /* + * Don't use this method to get the bucket's data, if you want to mutate it. + * Instead, use + * 1) the preferred VoxelMap primitive (via applyVoxelMap) which works for not + * (yet) loaded buckets + * 2) or the async method getDataForMutation(), which ensures that the bucket is + * loaded before mutation (therefore, no async merge operations have to be + * defined). See DataCube.floodFill() for an example usage of this pattern. + */ + if (this.data == null) { const [TypedArrayClass, channelCount] = getConstructorForElementClass(this.elementClass); this.data = new TypedArrayClass(channelCount * Constants.BUCKET_SIZE); if (!this.isMissing()) { - triggeredBucketFetch = true; this.temporalBucketManager.addBucket(this); } } - return { data: this.getData(), triggeredBucketFetch }; + return this.getData(); + } + + async getDataForMutation(): Promise { + /* + * You can use the returned data to inspect it. If you decide to mutate the data, + * please call startDataMutation() before the mutation and endDataMutation() afterwards. + * Example: + * + * const data = await bucket.getDataForMutation(); + * bucket.startDataMutation(); + * data[...] = ...; + * bucket.endDataMutation(); + */ + await this.ensureLoaded(); + return this.getOrCreateData(); + } + + startDataMutation(): void { + /* + * See Bucket.getDataForMutation() for a safe way of using this method. + */ + this._markAndAddBucketForUndo(); + } + + endDataMutation(): void { + /* + * See Bucket.getDataForMutation() for a safe way of using this method. + */ + this.cube.pushQueue.insert(this); + this.trigger("bucketLabeled"); + } + + applyVoxelMap( + voxelMap: Uint8Array, + cellId: number, + get3DAddress: (number, number, Vector3 | Float32Array) => void, + sliceCount: number, + thirdDimensionIndex: 0 | 1 | 2, + // If shouldOverwrite is false, a voxel is only overwritten if + // its old value is equal to overwritableValue. + shouldOverwrite: boolean = true, + overwritableValue: number = 0, + ) { + const data = this.getOrCreateData(); + + if (this.needsBackendData()) { + // If the frontend does not yet have the backend's data + // for this bucket, we apply the voxel map, but also + // save it in this.pendingOperations. See Bucket.merge() + // for more details. + this.pendingOperations.push(_data => + this._applyVoxelMapInPlace( + _data, + voxelMap, + cellId, + get3DAddress, + sliceCount, + thirdDimensionIndex, + shouldOverwrite, + overwritableValue, + ), + ); + } + + this._applyVoxelMapInPlace( + data, + voxelMap, + cellId, + get3DAddress, + sliceCount, + thirdDimensionIndex, + shouldOverwrite, + overwritableValue, + ); + } + + _applyVoxelMapInPlace( + data: BucketDataArray, + voxelMap: Uint8Array, + cellId: number, + get3DAddress: (number, number, Vector3 | Float32Array) => void, + sliceCount: number, + thirdDimensionIndex: 0 | 1 | 2, + // If shouldOverwrite is false, a voxel is only overwritten if + // its old value is equal to overwritableValue. + shouldOverwrite: boolean = true, + overwritableValue: number = 0, + ) { + const out = new Float32Array(3); + for (let firstDim = 0; firstDim < Constants.BUCKET_WIDTH; firstDim++) { + for (let secondDim = 0; secondDim < Constants.BUCKET_WIDTH; secondDim++) { + if (voxelMap[firstDim * Constants.BUCKET_WIDTH + secondDim] === 1) { + get3DAddress(firstDim, secondDim, out); + const voxelToLabel = out; + voxelToLabel[thirdDimensionIndex] = + (voxelToLabel[thirdDimensionIndex] + sliceCount) % Constants.BUCKET_WIDTH; + // The voxelToLabel is already within the bucket and in the correct resolution. + const voxelAddress = this.cube.getVoxelIndexByVoxelOffset(voxelToLabel); + const currentSegmentId = data[voxelAddress]; + if (shouldOverwrite || (!shouldOverwrite && currentSegmentId === overwritableValue)) { + data[voxelAddress] = cellId; + } + } + } + } } markAsPulled(): void { @@ -377,15 +534,9 @@ export class DataBucket { } receiveData(arrayBuffer: ?Uint8Array): void { + const data = this.uint8ToTypedBuffer(arrayBuffer); const [TypedArrayClass, channelCount] = getConstructorForElementClass(this.elementClass); - const data = - arrayBuffer != null - ? new TypedArrayClass( - arrayBuffer.buffer, - arrayBuffer.byteOffset, - arrayBuffer.byteLength / TypedArrayClass.BYTES_PER_ELEMENT, - ) - : new TypedArrayClass(channelCount * Constants.BUCKET_SIZE); + if (data.length !== channelCount * Constants.BUCKET_SIZE) { const debugInfo = // Disable this conditional if you need verbose output here. @@ -403,15 +554,21 @@ export class DataBucket { ); } switch (this.state) { - case BucketStateEnum.REQUESTED: + case BucketStateEnum.REQUESTED: { + // Clone the data for the unmergedBucketDataLoaded event, + // as the following merge operation is done in-place. + const dataClone = new TypedArrayClass(data); + this.trigger("unmergedBucketDataLoaded", dataClone); + if (this.dirty) { this.merge(data); } else { this.data = data; } - this.trigger("bucketLoaded", data); this.state = BucketStateEnum.LOADED; + this.trigger("bucketLoaded", data); break; + } default: this.unexpectedState(); } @@ -466,14 +623,31 @@ export class DataBucket { return fallbackBucket; } - merge(newData: BucketDataArray): void { + merge(fetchedData: BucketDataArray): void { if (this.data == null) { throw new Error("Bucket.merge() called, but data does not exist."); } - for (let i = 0; i < Constants.BUCKET_SIZE; i++) { - // Only overwrite with the new value if the old value was 0 - this.data[i] = this.data[i] || newData[i]; + + // The frontend just received the backend's data for this bucket. + // We apply all pendingOperations on the backends data + // and set it to this.data. + // The old this.data is discarded/overwritten, since it was only + // a preliminary version of the data. + for (const op of this.pendingOperations) { + op(fetchedData); + } + this.data = fetchedData; + + if (this.pendingOperations.length === 0) { + // This can happen when mutating an unloaded bucket and then + // undoing it. The bucket is still marked as dirty, even though, + // no pending operations are necessary (since the bucket was restored + // to an untouched version). + // See this refactoring issue: https://github.com/scalableminds/webknossos/issues/5973 + warnMergeWithoutPendingOperations(); } + + this.pendingOperations = []; } // The following three methods can be used for debugging purposes. @@ -508,6 +682,19 @@ export class DataBucket { } } + // This is a debugging function to enable logging specific + // to a certain bucket. When drilling down on a specific bucket + // you can adapt the if-condition (e.g. for only printing logs + // for a specific bucket address). + // + // Example usage: + // bucket._logMaybe("Data of problematic bucket", bucket.data) + _logMaybe = (...args) => { + if (this.zoomedAddress.join(",") === [93, 0, 0, 0].join(",")) { + console.log(...args); + } + }; + async ensureLoaded(): Promise { let needsToAwaitBucket = false; if (this.isRequested()) { @@ -522,7 +709,7 @@ export class DataBucket { } if (needsToAwaitBucket) { await new Promise(resolve => { - this.on("bucketLoaded", resolve); + this.once("bucketLoaded", resolve); }); } // Bucket has been loaded by now or was loaded already diff --git a/frontend/javascripts/oxalis/model/bucket_data_handling/data_cube.js b/frontend/javascripts/oxalis/model/bucket_data_handling/data_cube.js index b9b7d8064a..884d6077c6 100644 --- a/frontend/javascripts/oxalis/model/bucket_data_handling/data_cube.js +++ b/frontend/javascripts/oxalis/model/bucket_data_handling/data_cube.js @@ -341,39 +341,34 @@ class DataCube { } } - labelTestShape(): void { - // draw a sphere, centered at (100, 100, 100) with radius 50 - - for (let x = 80; x <= 120; x++) { - for (let y = 80; y <= 120; y++) { - for (let z = 80; z <= 120; z++) { - if ( - Math.sqrt((x - 100) * (x - 100) + (y - 100) * (y - 100) + (z - 100) * (z - 100)) <= 20 - ) { - this.labelVoxelInResolution([x, y, z], 0, 5); - } - } - } - } - } - - labelVoxelInAllResolutions(voxel: Vector3, label: number, activeCellId: ?number) { + // eslint-disable-next-line camelcase + async _labelVoxelInAllResolutions_DEPRECATED( + voxel: Vector3, + label: number, + activeCellId: ?number, + ): Promise { // This function is only provided for the wK front-end api and should not be used internally, // since it only operates on one voxel and therefore is not performance-optimized. // Please make use of a LabeledVoxelsMap instead. + const promises = []; for (const [resolutionIndex] of this.resolutionInfo.getResolutionsWithIndices()) { - this.labelVoxelInResolution(voxel, label, resolutionIndex, activeCellId); + promises.push( + this._labelVoxelInResolution_DEPRECATED(voxel, label, resolutionIndex, activeCellId), + ); } + await Promise.all(promises); + this.triggerPushQueue(); } - labelVoxelInResolution( + // eslint-disable-next-line camelcase + async _labelVoxelInResolution_DEPRECATED( voxel: Vector3, label: number, zoomStep: number, activeCellId: ?number, - ): void { + ): Promise { let voxelInCube = true; for (let i = 0; i <= 2; i++) { voxelInCube = voxelInCube && voxel[i] >= 0 && voxel[i] < this.upperBoundary[i]; @@ -394,13 +389,7 @@ class DataCube { const labelFunc = (data: BucketDataArray): void => { data[voxelIndex] = label; }; - bucket.label(labelFunc); - - // Push bucket if it's loaded or missing (i.e., not existent on the server), - // otherwise, TemporalBucketManager will push it once it is available. - if (bucket.isLoaded() || bucket.isMissing()) { - this.pushQueue.insert(bucket); - } + await bucket.label_DEPRECATED(labelFunc); } } } @@ -459,7 +448,7 @@ class DataCube { ); } const seedVoxelIndex = this.getVoxelIndex(globalSeedVoxel, zoomStep); - const sourceCellId = seedBucket.getOrCreateData().data[seedVoxelIndex]; + const sourceCellId = seedBucket.getOrCreateData()[seedVoxelIndex]; if (sourceCellId === cellId) { return { bucketsWithLabeledVoxelsMap, @@ -516,16 +505,21 @@ class DataCube { if (shouldIgnoreBucket) { continue; } + + // Since the floodfill operation needs to read the existing bucket data, we need to + // load (await) the data first. This means that we don't have to define LabeledVoxelMaps + // for the current magnification. This simplifies the algorithm, too, since the floodfill also + // uses the bucket's data array to mark visited voxels (which would not be possible with + // LabeledVoxelMaps). // eslint-disable-next-line no-await-in-loop - await currentBucket.ensureLoaded(); - const { data: bucketData } = currentBucket.getOrCreateData(); + const bucketData = await currentBucket.getDataForMutation(); const initialVoxelIndex = this.getVoxelIndexByVoxelOffset(initialXyzVoxelInBucket); if (bucketData[initialVoxelIndex] !== sourceCellId) { // Ignoring neighbour buckets whose cellId at the initial voxel does not match the source cell id. continue; } // Add the bucket to the current volume undo batch, if it isn't already part of it. - currentBucket.markAndAddBucketForUndo(); + currentBucket.startDataMutation(); // Mark the initial voxel. bucketData[initialVoxelIndex] = cellId; // Create an array saving the labeled voxel of the current slice for the current bucket, if there isn't already one. @@ -629,8 +623,7 @@ class DataCube { if (bucket.type === "null") { continue; } - this.pushQueue.insert(bucket); - bucket.trigger("bucketLabeled"); + bucket.endDataMutation(); } return { @@ -643,12 +636,18 @@ class DataCube { }; } - setBucketData(zoomedAddress: Vector4, data: Uint8Array) { + setBucketData( + zoomedAddress: Vector4, + data: BucketDataArray, + newPendingOperations: Array<(BucketDataArray) => void>, + ) { const bucket = this.getOrCreateBucket(zoomedAddress); if (bucket.type === "null") { return; } bucket.setData(data); + bucket.pendingOperations = newPendingOperations; + this.pushQueue.insert(bucket); } diff --git a/frontend/javascripts/oxalis/model/helpers/bucket_compression.js b/frontend/javascripts/oxalis/model/helpers/bucket_compression.js new file mode 100644 index 0000000000..93b4c54326 --- /dev/null +++ b/frontend/javascripts/oxalis/model/helpers/bucket_compression.js @@ -0,0 +1,24 @@ +// @flow +import { type BucketDataArray, DataBucket } from "oxalis/model/bucket_data_handling/bucket"; +import { createWorker } from "oxalis/workers/comlink_wrapper"; +import compressLz4Block from "oxalis/workers/byte_array_lz4_compression.worker"; + +const _byteArrayToLz4Array = createWorker(compressLz4Block); + +export const decompressToTypedArray = async ( + bucket: DataBucket, + compressedData: Uint8Array, +): Promise => { + const decompressedBackendData = await _byteArrayToLz4Array(compressedData, false); + return bucket.uint8ToTypedBuffer(decompressedBackendData); +}; + +export const compressTypedArray = async (bucketData: BucketDataArray): Promise => { + const bucketDataAsByteArray = new Uint8Array( + bucketData.buffer, + bucketData.byteOffset, + bucketData.byteLength, + ); + const compressedBucketData = await _byteArrayToLz4Array(bucketDataAsByteArray, true); + return compressedBucketData; +}; diff --git a/frontend/javascripts/oxalis/model/sagas/root_saga.js b/frontend/javascripts/oxalis/model/sagas/root_saga.js index d164e9f201..070a349a45 100644 --- a/frontend/javascripts/oxalis/model/sagas/root_saga.js +++ b/frontend/javascripts/oxalis/model/sagas/root_saga.js @@ -58,7 +58,11 @@ function* restartableSaga(): Saga { console.error("The sagas crashed because of the following error:", err); if (process.env.BABEL_ENV !== "test") { ErrorHandling.notify(err, {}); - toggleErrorHighlighting(true); + // Hide potentially old error highlighting which mentions a retry mechanism. + toggleErrorHighlighting(false); + // Show error highlighting which mentions the permanent error. + toggleErrorHighlighting(true, true); + alert(`\ Internal error. Please reload the page to avoid losing data. diff --git a/frontend/javascripts/oxalis/model/sagas/save_saga.js b/frontend/javascripts/oxalis/model/sagas/save_saga.js index 753abec55b..32ce320bfb 100644 --- a/frontend/javascripts/oxalis/model/sagas/save_saga.js +++ b/frontend/javascripts/oxalis/model/sagas/save_saga.js @@ -6,7 +6,7 @@ import { type AddBucketToUndoAction, type FinishAnnotationStrokeAction, type ImportVolumeTracingAction, - type MaybeBucketLoadedPromise, + type MaybeUnmergedBucketLoadedPromise, type UpdateSegmentAction, VolumeTracingSaveRelevantActions, type InitializeVolumeTracingAction, @@ -17,6 +17,7 @@ import { setUserBoundingBoxesAction, type UserBoundingBoxAction, } from "oxalis/model/actions/annotation_actions"; +import { type BucketDataArray } from "oxalis/model/bucket_data_handling/bucket"; import { FlycamActions } from "oxalis/model/actions/flycam_actions"; import { PUSH_THROTTLE_TIME, @@ -69,19 +70,18 @@ import { take, } from "oxalis/model/sagas/effect-generators"; import { - bucketsAlreadyInUndoState, - type BucketDataArray, -} from "oxalis/model/bucket_data_handling/bucket"; -import { createWorker } from "oxalis/workers/comlink_wrapper"; + compressTypedArray, + decompressToTypedArray, +} from "oxalis/model/helpers/bucket_compression"; import { diffSkeletonTracing } from "oxalis/model/sagas/skeletontracing_saga"; import { diffVolumeTracing } from "oxalis/model/sagas/volumetracing_saga"; import { doWithToken } from "admin/admin_rest_api"; +import { getResolutionInfo } from "oxalis/model/accessors/dataset_accessor"; import { getVolumeTracingById, getVolumeTracingByLayerName, getVolumeTracings, } from "oxalis/model/accessors/volumetracing_accessor"; -import { getResolutionInfo } from "oxalis/model/accessors/dataset_accessor"; import { globalPositionToBucketPosition } from "oxalis/model/helpers/position_converter"; import { maybeGetSomeTracing, selectTracing } from "oxalis/model/accessors/tracing_accessor"; import { selectQueue } from "oxalis/model/accessors/save_accessor"; @@ -93,14 +93,17 @@ import Request, { type RequestOptionsWithData } from "libs/request"; import Toast from "libs/toast"; import compactSaveQueue from "oxalis/model/helpers/compaction/compact_save_queue"; import compactUpdateActions from "oxalis/model/helpers/compaction/compact_update_actions"; -import compressLz4Block from "oxalis/workers/byte_array_lz4_compression.worker"; import createProgressCallback from "libs/progress_callback"; import messages from "messages"; import window, { alert, document, location } from "libs/window"; import { enforceSkeletonTracing } from "../accessors/skeletontracing_accessor"; -const byteArrayToLz4Array = createWorker(compressLz4Block); +// This function is needed so that Flow is satisfied +// with how a mere promise is awaited within a saga. +function unpackPromise(p: Promise): Promise { + return p; +} const UndoRedoRelevantBoundingBoxActions = AllUserBoundingBoxActions.filter( action => action !== "SET_USER_BOUNDING_BOXES", @@ -108,9 +111,13 @@ const UndoRedoRelevantBoundingBoxActions = AllUserBoundingBoxActions.filter( type UndoBucket = { zoomedBucketAddress: Vector4, - data: Uint8Array, - backendData?: Uint8Array, - maybeBucketLoadedPromise: MaybeBucketLoadedPromise, + + // The following arrays are Uint8Array due to the compression + compressedData: Uint8Array, + compressedBackendData?: Promise, + + maybeUnmergedBucketLoadedPromise: MaybeUnmergedBucketLoadedPromise, + pendingOperations: Array<(BucketDataArray) => void>, }; type VolumeUndoBuckets = Array; type VolumeAnnotationBatch = { @@ -255,25 +262,36 @@ export function* collectUndoStates(): Saga { const { zoomedBucketAddress, bucketData, - maybeBucketLoadedPromise, + maybeUnmergedBucketLoadedPromise, + pendingOperations, tracingId, } = addBucketToUndoAction; + // The bucket's (old) state should be added to the undo + // stack so that we can revert to its previous version. + // bucketData is compressed asynchronously, which is why + // the corresponding "task" is added to `pendingCompressions`. pendingCompressions.push( yield* fork( compressBucketAndAddToList, zoomedBucketAddress, bucketData, - maybeBucketLoadedPromise, + maybeUnmergedBucketLoadedPromise, + pendingOperations, volumeInfoById[tracingId].currentVolumeUndoBuckets, ), ); } else if (finishAnnotationStrokeAction) { + // FINISH_ANNOTATION_STROKE was dispatched which marks the end + // of a volume transaction. + // All compression tasks (see `pendingCompressions`) need to be + // awaited to add the proper entry to the undo stack. shouldClearRedoState = true; const activeVolumeTracing = yield* select(state => getVolumeTracingById(state.tracing, finishAnnotationStrokeAction.tracingId), ); - yield* join([...pendingCompressions]); - bucketsAlreadyInUndoState.clear(); + yield* join(pendingCompressions); + pendingCompressions = []; + const volumeInfo = volumeInfoById[activeVolumeTracing.tracingId]; undoStack.push({ type: "volume", @@ -286,7 +304,6 @@ export function* collectUndoStates(): Saga { // The SegmentMap is immutable. So, no need to copy. volumeInfo.prevSegments = activeVolumeTracing.segments; volumeInfo.currentVolumeUndoBuckets = []; - pendingCompressions = []; } else if (userBoundingBoxAction) { const boundingBoxUndoState = getBoundingBoxToUndoState( userBoundingBoxAction, @@ -398,37 +415,28 @@ function getBoundingBoxToUndoState( function* compressBucketAndAddToList( zoomedBucketAddress: Vector4, bucketData: BucketDataArray, - maybeBucketLoadedPromise: MaybeBucketLoadedPromise, + maybeUnmergedBucketLoadedPromise: MaybeUnmergedBucketLoadedPromise, + pendingOperations: Array<(BucketDataArray) => void>, undoBucketList: VolumeUndoBuckets, ): Saga { // The given bucket data is compressed, wrapped into a UndoBucket instance // and appended to the passed VolumeAnnotationBatch. - // If backend data is being downloaded (MaybeBucketLoadedPromise exists), + // If backend data is being downloaded (MaybeUnmergedBucketLoadedPromise exists), // the backend data will also be compressed and attached to the UndoBucket. - const bucketDataAsByteArray = new Uint8Array( - bucketData.buffer, - bucketData.byteOffset, - bucketData.byteLength, - ); - const compressedBucketData = yield* call(byteArrayToLz4Array, bucketDataAsByteArray, true); - if (compressedBucketData != null) { + const compressedData = yield* call(compressTypedArray, bucketData); + if (compressedData != null) { const volumeUndoPart: UndoBucket = { zoomedBucketAddress, - data: compressedBucketData, - maybeBucketLoadedPromise, + compressedData, + maybeUnmergedBucketLoadedPromise, + pendingOperations: pendingOperations.slice(), }; - if (maybeBucketLoadedPromise != null) { - maybeBucketLoadedPromise.then(async backendBucketData => { + if (maybeUnmergedBucketLoadedPromise != null) { + maybeUnmergedBucketLoadedPromise.then(backendBucketData => { // Once the backend data is fetched, do not directly merge it with the already saved undo data // as this operation is only needed, when the volume action is undone. Additionally merging is more // expensive than saving the backend data. Thus the data is only merged upon an undo action / when it is needed. - const backendDataAsByteArray = new Uint8Array( - backendBucketData.buffer, - backendBucketData.byteOffset, - backendBucketData.byteLength, - ); - const compressedBackendData = await byteArrayToLz4Array(backendDataAsByteArray, true); - volumeUndoPart.backendData = compressedBackendData; + volumeUndoPart.compressedBackendData = compressTypedArray(backendBucketData); }); } undoBucketList.push(volumeUndoPart); @@ -522,9 +530,17 @@ function* applyStateOfStack( function mergeDataWithBackendDataInPlace( originalData: BucketDataArray, backendData: BucketDataArray, + pendingOperations: Array<(BucketDataArray) => void>, ) { - for (let i = 0; i < originalData.length; ++i) { - originalData[i] = originalData[i] || backendData[i]; + if (originalData.length !== backendData.length) { + throw new Error("Cannot merge data arrays with differing lengths"); + } + + // Transfer backend to originalData + originalData.set(backendData); + + for (const op of pendingOperations) { + op(originalData); } } @@ -543,10 +559,10 @@ function* applyAndGetRevertingVolumeBatch( for (const volumeUndoBucket of volumeAnnotationBatch.buckets) { const { zoomedBucketAddress, - data: compressedBucketData, - backendData: compressedBackendData, + compressedData: compressedBucketData, + compressedBackendData: compressedBackendDataPromise, } = volumeUndoBucket; - let { maybeBucketLoadedPromise } = volumeUndoBucket; + let { maybeUnmergedBucketLoadedPromise } = volumeUndoBucket; const bucket = cube.getOrCreateBucket(zoomedBucketAddress); if (bucket.type === "null") { continue; @@ -555,30 +571,24 @@ function* applyAndGetRevertingVolumeBatch( // Prepare a snapshot of the bucket's current data so that it can be // saved in an VolumeUndoState. let bucketData = null; + const currentPendingOperations = bucket.pendingOperations.slice(); if (bucket.hasData()) { // The bucket's data is currently available. bucketData = bucket.getData(); - if (compressedBackendData != null) { + if (compressedBackendDataPromise != null) { // If the backend data for the bucket has been fetched in the meantime, - // we can first merge the data with the current data and then add this to the undo batch. - const decompressedBackendData = yield* call( - byteArrayToLz4Array, - compressedBackendData, - false, - ); - if (decompressedBackendData) { - mergeDataWithBackendDataInPlace(bucketData, decompressedBackendData); - } - maybeBucketLoadedPromise = null; + // the previous getData() call already returned the newest (merged) data. + // There should be no need to await the data from the backend. + maybeUnmergedBucketLoadedPromise = null; } } else { // The bucket's data is not available, since it was gc'ed in the meantime (which // means its state must have been persisted to the server). Thus, it's enough to // persist an essentially empty data array (which is created by getOrCreateData) - // and passing maybeBucketLoadedPromise around so that + // and passing maybeUnmergedBucketLoadedPromise around so that // the back-end data is fetched upon undo/redo. - bucketData = bucket.getOrCreateData().data; - maybeBucketLoadedPromise = bucket.maybeBucketLoadedPromise; + bucketData = bucket.getOrCreateData(); + maybeUnmergedBucketLoadedPromise = bucket.maybeUnmergedBucketLoadedPromise; } // Append the compressed snapshot to allCompressedBucketsOfCurrentState. @@ -586,34 +596,42 @@ function* applyAndGetRevertingVolumeBatch( compressBucketAndAddToList, zoomedBucketAddress, bucketData, - maybeBucketLoadedPromise, + maybeUnmergedBucketLoadedPromise, + currentPendingOperations, allCompressedBucketsOfCurrentState, ); // Decompress the bucket data which should be applied. let decompressedBucketData = null; - if (compressedBackendData != null) { + let newPendingOperations = volumeUndoBucket.pendingOperations; + + if (compressedBackendDataPromise != null) { + const compressedBackendData = yield* call(unpackPromise, compressedBackendDataPromise); + let decompressedBackendData; [decompressedBucketData, decompressedBackendData] = yield _all([ - _call(byteArrayToLz4Array, compressedBucketData, false), - _call(byteArrayToLz4Array, compressedBackendData, false), + _call(decompressToTypedArray, bucket, compressedBucketData), + _call(decompressToTypedArray, bucket, compressedBackendData), ]); - if (decompressedBucketData && decompressedBackendData) { - mergeDataWithBackendDataInPlace(decompressedBucketData, decompressedBackendData); - } + + mergeDataWithBackendDataInPlace( + decompressedBucketData, + decompressedBackendData, + volumeUndoBucket.pendingOperations, + ); + newPendingOperations = []; } else { - decompressedBucketData = yield* call(byteArrayToLz4Array, compressedBucketData, false); - } - if (decompressedBucketData) { - // Set the new bucket data to add the bucket directly to the pushqueue. - cube.setBucketData(zoomedBucketAddress, decompressedBucketData); + decompressedBucketData = yield* call(decompressToTypedArray, bucket, compressedBucketData); } + + // Set the new bucket data to add the bucket directly to the pushqueue. + cube.setBucketData(zoomedBucketAddress, decompressedBucketData, newPendingOperations); } - // The SegmentMap is immutable. So, no need to copy. const activeVolumeTracing = yield* select(state => getVolumeTracingById(state.tracing, volumeAnnotationBatch.tracingId), ); + // The SegmentMap is immutable. So, no need to copy. const currentSegments = activeVolumeTracing.segments; yield* put(setSegmentsActions(volumeAnnotationBatch.segments, volumeAnnotationBatch.tracingId)); @@ -708,6 +726,7 @@ export function* sendRequestToServer( let retryCount = 0; while (true) { + let exceptionDuringMarkBucketsAsNotDirty = false; try { const startTime = Date.now(); yield* call( @@ -735,11 +754,21 @@ export function* sendRequestToServer( yield* put(setLastSaveTimestampAction(tracingType, tracingId)); yield* put(shiftSaveQueueAction(saveQueue.length, tracingType, tracingId)); if (tracingType === "volume") { - yield _call(markBucketsAsNotDirty, compactedSaveQueue, tracingId); + try { + yield _call(markBucketsAsNotDirty, compactedSaveQueue, tracingId); + } catch (error) { + // If markBucketsAsNotDirty fails some reason, wk cannot recover from this error. + console.warn("Error when marking buckets as clean. No retry possible. Error:", error); + exceptionDuringMarkBucketsAsNotDirty = true; + throw error; + } } yield* call(toggleErrorHighlighting, false); return; } catch (error) { + if (exceptionDuringMarkBucketsAsNotDirty) { + throw error; + } console.warn("Error during saving. Will retry. Error:", error); const controlMode = yield* select(state => state.temporaryConfiguration.controlMode); const isViewOrSandboxMode = @@ -801,14 +830,15 @@ function* markBucketsAsNotDirty(saveQueue: Array, tracingId: str } } -export function toggleErrorHighlighting(state: boolean): void { +export function toggleErrorHighlighting(state: boolean, permanentError: boolean = false): void { if (document.body != null) { document.body.classList.toggle("save-error", state); } + const message = permanentError ? messages["save.failed.permanent"] : messages["save.failed"]; if (state) { - Toast.error(messages["save.failed"], { sticky: true }); + Toast.error(message, { sticky: true }); } else { - Toast.close(messages["save.failed"]); + Toast.close(message); } } diff --git a/frontend/javascripts/oxalis/model/sagas/volumetracing_saga.js b/frontend/javascripts/oxalis/model/sagas/volumetracing_saga.js index 10cb3325ad..cd9c48ca7b 100644 --- a/frontend/javascripts/oxalis/model/sagas/volumetracing_saga.js +++ b/frontend/javascripts/oxalis/model/sagas/volumetracing_saga.js @@ -67,6 +67,7 @@ import { isBrushTool, isTraceTool, } from "oxalis/model/accessors/tool_accessor"; +import { markVolumeTransactionEnd } from "oxalis/model/bucket_data_handling/bucket"; import { setToolAction, setBusyBlockingInfoAction } from "oxalis/model/actions/ui_actions"; import { updateTemporarySettingAction, @@ -95,7 +96,7 @@ import Dimensions, { type DimensionMap } from "oxalis/model/dimensions"; import Model from "oxalis/model"; import Toast from "libs/toast"; import * as Utils from "libs/utils"; -import VolumeLayer from "oxalis/model/volumetracing/volumelayer"; +import VolumeLayer, { getFast3DCoordinateHelper } from "oxalis/model/volumetracing/volumelayer"; import createProgressCallback from "libs/progress_callback"; import getSceneController from "oxalis/controller/scene_controller_provider"; import inferSegmentInViewport, { @@ -137,6 +138,11 @@ export function* editVolumeLayerAsync(): Saga { while (allowUpdate) { const startEditingAction = yield* take("START_EDITING"); + const busyBlockingInfo = yield* select(state => state.uiInformation.busyBlockingInfo); + if (busyBlockingInfo.isBusy) { + console.warn(`Ignoring brush request (reason: ${busyBlockingInfo.reason || "null"})`); + continue; + } if (startEditingAction.type !== "START_EDITING") { throw new Error("Unexpected action. Satisfy flow."); } @@ -153,6 +159,11 @@ export function* editVolumeLayerAsync(): Saga { if (isZoomStepTooHighForAnnotating) { continue; } + if (activeTool === AnnotationToolEnum.MOVE) { + // This warning can be helpful when debugging tests. + console.warn("Volume actions are ignored since current tool is the move tool."); + continue; + } const maybeLabeledResolutionWithZoomStep = yield* select(state => getRenderableResolutionForSegmentationTracing(state, volumeTracing), @@ -398,6 +409,7 @@ function* labelWithVoxelBuffer2D( // thirdDimensionOfSlice needs to be provided in global coordinates const thirdDimensionOfSlice = topLeft3DCoord[dimensionIndices[2]] * labeledResolution[dimensionIndices[2]]; + applyLabeledVoxelMapToAllMissingResolutions( currentLabeledVoxelMap, labeledZoomStep, @@ -464,7 +476,6 @@ function* copySegmentationLayer(action: CopySegmentationLayerAction): Saga // Do not overwrite already labelled voxels if (currentLabelValue === 0) { - cube.labelVoxelInResolution(voxelTargetAddress, templateLabelValue, labeledZoomStep); const bucket = cube.getOrCreateBucket( cube.positionToZoomedAddress(voxelTargetAddress, labeledZoomStep), ); @@ -505,6 +516,19 @@ function* copySegmentationLayer(action: CopySegmentationLayerAction): Saga ); } } + + const thirdDim = dimensionIndices[2]; + applyVoxelMap( + labeledVoxelMapOfCopiedVoxel, + cube, + activeCellId, + getFast3DCoordinateHelper(activeViewport, z), + 1, + thirdDim, + false, + 0, + ); + applyLabeledVoxelMapToAllMissingResolutions( labeledVoxelMapOfCopiedVoxel, labeledZoomStep, @@ -670,7 +694,7 @@ function applyLabeledVoxelMapToAllMissingResolutions( segmentationCube: DataCube, cellId: number, thirdDimensionOfSlice: number, // this value is specified in global (mag1) coords - // if shouldOverwrite is false, a voxel is only overwritten if + // If shouldOverwrite is false, a voxel is only overwritten if // its old value is equal to overwritableValue. shouldOverwrite: boolean, overwritableValue: number = 0, @@ -964,6 +988,17 @@ function* maintainContourGeometry(): Saga { } } +function* maintainVolumeTransactionEnds(): Saga { + // When FINISH_ANNOTATION_STROKE is dispatched, the current volume + // transaction has ended. All following UI actions which + // mutate buckets should operate on a fresh `bucketsAlreadyInUndoState` set. + // Therefore, `markVolumeTransactionEnd` should be called immediately + // when FINISH_ANNOTATION_STROKE is dispatched. There should be no waiting + // on other operations (such as pending compressions) as it has been the case + // before. Otherwise, different undo states would "bleed" into each other. + yield _takeEvery("FINISH_ANNOTATION_STROKE", markVolumeTransactionEnd); +} + export default [ editVolumeLayerAsync, ensureToolIsAllowedInResolution, @@ -972,4 +1007,5 @@ export default [ maintainSegmentsMap, maintainHoveredSegmentId, maintainContourGeometry, + maintainVolumeTransactionEnds, ]; diff --git a/frontend/javascripts/oxalis/model/volumetracing/volume_annotation_sampling.js b/frontend/javascripts/oxalis/model/volumetracing/volume_annotation_sampling.js index 48e950c754..f0588b2703 100644 --- a/frontend/javascripts/oxalis/model/volumetracing/volume_annotation_sampling.js +++ b/frontend/javascripts/oxalis/model/volumetracing/volume_annotation_sampling.js @@ -285,7 +285,7 @@ export function applyVoxelMap( get3DAddress: (number, number, Vector3 | Float32Array) => void, numberOfSlicesToApply: number, thirdDimensionIndex: 0 | 1 | 2, - // if shouldOverwrite is false, a voxel is only overwritten if + // If shouldOverwrite is false, a voxel is only overwritten if // its old value is equal to overwritableValue. shouldOverwrite: boolean = true, overwritableValue: number = 0, @@ -294,15 +294,14 @@ export function applyVoxelMap( if (bucket.type === "null") { return; } - bucket.markAndAddBucketForUndo(); + bucket.startDataMutation(); } function postprocessBucket(bucket: Bucket) { if (bucket.type === "null") { return; } - dataCube.pushQueue.insert(bucket); - bucket.trigger("bucketLabeled"); + bucket.endDataMutation(); } for (const [labeledBucketZoomedAddress, voxelMap] of labeledVoxelMap) { @@ -332,24 +331,16 @@ export function applyVoxelMap( if (bucket.type === "null") { continue; } - const { data } = bucket.getOrCreateData(); - for (let firstDim = 0; firstDim < constants.BUCKET_WIDTH; firstDim++) { - for (let secondDim = 0; secondDim < constants.BUCKET_WIDTH; secondDim++) { - if (voxelMap[firstDim * constants.BUCKET_WIDTH + secondDim] === 1) { - get3DAddress(firstDim, secondDim, out); - const voxelToLabel = out; - voxelToLabel[thirdDimensionIndex] = - (voxelToLabel[thirdDimensionIndex] + sliceCount) % constants.BUCKET_WIDTH; - // The voxelToLabel is already within the bucket and in the correct resolution. - const voxelAddress = dataCube.getVoxelIndexByVoxelOffset(voxelToLabel); - const currentSegmentId = data[voxelAddress]; - if (shouldOverwrite || (!shouldOverwrite && currentSegmentId === overwritableValue)) { - data[voxelAddress] = cellId; - } - } - } - } + bucket.applyVoxelMap( + voxelMap, + cellId, + get3DAddress, + sliceCount, + thirdDimensionIndex, + shouldOverwrite, + overwritableValue, + ); } // Post-processing: add to pushQueue and notify about labeling postprocessBucket(bucket); diff --git a/frontend/javascripts/oxalis/model/volumetracing/volumelayer.js b/frontend/javascripts/oxalis/model/volumetracing/volumelayer.js index f29cc19c93..a56d67b071 100644 --- a/frontend/javascripts/oxalis/model/volumetracing/volumelayer.js +++ b/frontend/javascripts/oxalis/model/volumetracing/volumelayer.js @@ -484,29 +484,7 @@ class VolumeLayer { coordY: number, out: Vector3 | Float32Array, ) => void { - switch (this.plane) { - case OrthoViews.PLANE_XY: - return (coordX, coordY, out) => { - out[0] = coordX; - out[1] = coordY; - out[2] = this.thirdDimensionValue; - }; - case OrthoViews.PLANE_YZ: - return (coordX, coordY, out) => { - out[0] = this.thirdDimensionValue; - out[1] = coordY; - out[2] = coordX; - }; - case OrthoViews.PLANE_XZ: - return (coordX, coordY, out) => { - out[0] = coordX; - out[1] = this.thirdDimensionValue; - out[2] = coordY; - }; - default: { - throw new Error("Unknown plane id"); - } - } + return getFast3DCoordinateHelper(this.plane, this.thirdDimensionValue); } getUnzoomedCentroid(): Vector3 { @@ -535,4 +513,33 @@ class VolumeLayer { } } +export function getFast3DCoordinateHelper( + plane: OrthoView, + thirdDimensionValue: number, +): (coordX: number, coordY: number, out: Vector3 | Float32Array) => void { + switch (plane) { + case OrthoViews.PLANE_XY: + return (coordX, coordY, out) => { + out[0] = coordX; + out[1] = coordY; + out[2] = thirdDimensionValue; + }; + case OrthoViews.PLANE_YZ: + return (coordX, coordY, out) => { + out[0] = thirdDimensionValue; + out[1] = coordY; + out[2] = coordX; + }; + case OrthoViews.PLANE_XZ: + return (coordX, coordY, out) => { + out[0] = coordX; + out[1] = thirdDimensionValue; + out[2] = coordY; + }; + default: { + throw new Error("Unknown plane id"); + } + } +} + export default VolumeLayer; diff --git a/frontend/javascripts/oxalis/workers/byte_array_lz4_compression.worker.js b/frontend/javascripts/oxalis/workers/byte_array_lz4_compression.worker.js index ddf47b9624..f47da522bd 100644 --- a/frontend/javascripts/oxalis/workers/byte_array_lz4_compression.worker.js +++ b/frontend/javascripts/oxalis/workers/byte_array_lz4_compression.worker.js @@ -10,4 +10,10 @@ function compressLz4Block(data: Uint8Array, compress: boolean): Uint8Array { return lz4.decompress(data); } +// This function is only exposed for slow_byte_array_lz4_compression.worker.js +// which is only used for some automated tests. +export function __compressLz4BlockHelper(data: Uint8Array, compress: boolean): Uint8Array { + return compressLz4Block(data, compress); +} + export default expose(compressLz4Block); diff --git a/frontend/javascripts/oxalis/workers/comlink_wrapper.js b/frontend/javascripts/oxalis/workers/comlink_wrapper.js index 0a13f89231..594c0cc313 100644 --- a/frontend/javascripts/oxalis/workers/comlink_wrapper.js +++ b/frontend/javascripts/oxalis/workers/comlink_wrapper.js @@ -55,12 +55,17 @@ export function createWorker(WorkerClass: UseCreateWorkerToUseMe): T { } export function expose(fn: T): UseCreateWorkerToUseMe { - // In a node context (e.g., when executing tests), we don't create web workers if (_expose != null) { _expose(fn, self); } - // $FlowExpectedError[incompatible-return] - return fn; + + // In a node context (e.g., when executing tests), we don't create web workers. + // Therefore, we simply return the passed function with the only change that + // we are wrapping the return value in a promise. That way, the worker and non-worker + // versions both return promises. + // $FlowExpectedError[not-a-function] + // $FlowExpectedError[prop-missing] + return (...args) => Promise.resolve(fn(...args)); } export function pretendPromise(t: T): Promise { diff --git a/frontend/javascripts/oxalis/workers/slow_byte_array_lz4_compression.worker.js b/frontend/javascripts/oxalis/workers/slow_byte_array_lz4_compression.worker.js new file mode 100644 index 0000000000..e5ba7ad791 --- /dev/null +++ b/frontend/javascripts/oxalis/workers/slow_byte_array_lz4_compression.worker.js @@ -0,0 +1,24 @@ +// @flow +// NOTE: This is a mirror of byte_array_lz4_compression.worker.js +// and is ONLY meant for mocking during tests. This implementation +// allows to introduce an artificial delay for compression/decompression. + +import { __compressLz4BlockHelper } from "oxalis/workers/byte_array_lz4_compression.worker"; +import { sleep } from "libs/utils"; + +import { expose } from "./comlink_wrapper"; + +let isSleepEnabled = false; + +export function setSlowCompression(isEnabled: boolean) { + isSleepEnabled = isEnabled; +} + +async function slowCompressLz4Block(data: Uint8Array, compress: boolean): Promise { + if (isSleepEnabled) { + await sleep(400); + } + return __compressLz4BlockHelper(data, compress); +} + +export default expose(slowCompressLz4Block); diff --git a/frontend/javascripts/test/helpers/apiHelpers.js b/frontend/javascripts/test/helpers/apiHelpers.js index 285c3444a7..6b753c588f 100644 --- a/frontend/javascripts/test/helpers/apiHelpers.js +++ b/frontend/javascripts/test/helpers/apiHelpers.js @@ -91,6 +91,16 @@ mockRequire("libs/error_handling", ErrorHandling); mockRequire("app", app); mockRequire("oxalis/model/helpers/proto_helpers", protoHelpers); +// Replace byte_array_lz4_compression.worker with a mock which supports +// intentional slowness. +mockRequire( + "oxalis/workers/byte_array_lz4_compression.worker", + "oxalis/workers/slow_byte_array_lz4_compression.worker", +); +const { setSlowCompression } = mockRequire.reRequire( + "oxalis/workers/byte_array_lz4_compression.worker", +); + // Avoid node caching and make sure all mockRequires are applied const UrlManager = mockRequire.reRequire("oxalis/controller/url_manager").default; const wkstoreAdapter = mockRequire.reRequire("oxalis/model/bucket_data_handling/wkstore_adapter"); @@ -139,6 +149,7 @@ export function __setupOxalis(t, mode, apiVersion) { }; t.context.model = Model; t.context.mocks = { Request }; + t.context.setSlowCompression = setSlowCompression; const webknossos = new OxalisApi(Model); const organizationName = "Connectomics Department"; diff --git a/frontend/javascripts/test/model/binary/cube.spec.js b/frontend/javascripts/test/model/binary/cube.spec.js index c0bfbc2a51..7f8918b79c 100644 --- a/frontend/javascripts/test/model/binary/cube.spec.js +++ b/frontend/javascripts/test/model/binary/cube.spec.js @@ -1,16 +1,14 @@ -/* - * cube.spec.js - * @flow - */ +// @flow import _ from "lodash"; +import { ResolutionInfo } from "oxalis/model/accessors/dataset_accessor"; import { tracing as skeletontracingServerObject } from "test/fixtures/skeletontracing_server_objects"; +import { sleep } from "libs/utils"; import anyTest, { type TestInterface } from "ava"; import datasetServerObject from "test/fixtures/dataset_server_object"; import mockRequire from "mock-require"; import runAsync from "test/helpers/run-async"; import sinon from "sinon"; -import { ResolutionInfo } from "oxalis/model/accessors/dataset_accessor"; mockRequire.stopAll(); @@ -58,10 +56,33 @@ test.beforeEach(t => { }; const resolutionInfo = new ResolutionInfo(mockedLayer.resolutions); const cube = new Cube([100, 100, 100], resolutionInfo, "uint32", false); - const pullQueue = { - add: sinon.stub(), - pull: sinon.stub(), - }; + + class PullQueueMock { + queue = []; + processedQueue = []; + add(item) { + this.queue.push(item); + } + + async pull() { + // If the pull happens synchronously, the bucketLoaded promise + // in Bucket.ensureLoaded() is created too late. Therefore, + // we put a small sleep in here (this mirrors the behavior when + // actually downloading data). + await sleep(10); + for (const item of this.queue) { + const bucket = cube.getBucket(item.bucket, true); + if (bucket.type === "data") { + bucket.markAsPulled(); + bucket.receiveData(new Uint8Array(4 * 32 ** 3)); + } + } + this.processedQueue = this.queue; + this.queue = []; + } + } + + const pullQueue = new PullQueueMock(); const pushQueue = { insert: sinon.stub(), push: sinon.stub(), @@ -101,51 +122,41 @@ test("GetBucket should only create one bucket on getOrCreateBucket()", t => { test("Voxel Labeling should request buckets when temporal buckets are created", t => { const { cube, pullQueue } = t.context; - cube.labelVoxelInResolution([1, 1, 1], 42, 0); - t.plan(2); + cube._labelVoxelInResolution_DEPRECATED([1, 1, 1], 42, 0); + + t.plan(1); return runAsync([ () => { - t.true( - pullQueue.add.calledWith({ - bucket: [0, 0, 0, 0], - priority: -1, - }), - ); - t.true(pullQueue.pull.called); + t.deepEqual(pullQueue.processedQueue[0], { + bucket: [0, 0, 0, 0], + priority: -1, + }); }, ]); }); -test("Voxel Labeling should push buckets after they were pulled", t => { +test("Voxel Labeling should push buckets after they were pulled", async t => { const { cube, pushQueue } = t.context; - cube.labelVoxelInResolution([1, 1, 1], 42, 0); + await cube._labelVoxelInResolution_DEPRECATED([1, 1, 1], 42, 0); + + t.plan(1); + const bucket = cube.getBucket([0, 0, 0, 0]); - t.plan(3); - let bucket; return runAsync([ - () => { - t.is(pushQueue.insert.called, false); - }, - () => { - bucket = cube.getBucket([0, 0, 0, 0]); - bucket.markAsPulled(); - bucket.receiveData(new Uint8Array(32 * 32 * 32 * 3)); - t.pass(); - }, () => { t.true(pushQueue.insert.calledWith(bucket)); }, ]); }); -test("Voxel Labeling should push buckets immediately if they are pulled already", t => { +test("Voxel Labeling should push buckets immediately if they are pulled already", async t => { const { cube, pushQueue } = t.context; const bucket = cube.getOrCreateBucket([0, 0, 0, 0]); bucket.markAsPulled(); - bucket.receiveData(new Uint8Array(32 * 32 * 32 * 3)); + bucket.receiveData(new Uint8Array(4 * 32 ** 3)); - cube.labelVoxelInResolution([0, 0, 0], 42, 0); + await cube._labelVoxelInResolution_DEPRECATED([0, 0, 0], 42, 0); t.plan(1); return runAsync([ @@ -155,12 +166,12 @@ test("Voxel Labeling should push buckets immediately if they are pulled already" ]); }); -test("Voxel Labeling should only create one temporal bucket", t => { +test("Voxel Labeling should only instantiate one bucket when labelling the same bucket twice", async t => { const { cube } = t.context; - // Creates temporal bucket - cube.labelVoxelInResolution([0, 0, 0], 42, 0); - // Uses existing temporal bucket - cube.labelVoxelInResolution([1, 0, 0], 43, 0); + // Creates bucket + await cube._labelVoxelInResolution_DEPRECATED([0, 0, 0], 42, 0); + // Uses existing bucket + await cube._labelVoxelInResolution_DEPRECATED([1, 0, 0], 43, 0); const data = cube.getBucket([0, 0, 0, 0]).getData(); @@ -168,38 +179,18 @@ test("Voxel Labeling should only create one temporal bucket", t => { t.is(data[1], 43); }); -test("Voxel Labeling should merge incoming buckets", t => { - const { cube } = t.context; - const bucket = cube.getOrCreateBucket([0, 0, 0, 0]); - - const oldData = new Uint32Array(32 * 32 * 32); - // First voxel should be overwritten by new data - oldData[0] = 12345; - // Second voxel should be merged into new data - oldData[1] = 67890; - - cube.labelVoxelInResolution([0, 0, 0], 424242, 0); - - bucket.markAsPulled(); - bucket.receiveData(new Uint8Array(oldData.buffer)); - - const newData = bucket.getData(); - t.is(newData[0], 424242); - t.is(newData[1], oldData[1]); -}); - -test("getDataValue() should return the raw value without a mapping", t => { +test("getDataValue() should return the raw value without a mapping", async t => { const { cube } = t.context; const value = 1 * (1 << 16) + 2 * (1 << 8) + 3; - cube.labelVoxelInResolution([0, 0, 0], value, 0); + await cube._labelVoxelInResolution_DEPRECATED([0, 0, 0], value, 0); t.is(cube.getDataValue([0, 0, 0]), value); }); -test("getDataValue() should return the mapping value if available", t => { +test("getDataValue() should return the mapping value if available", async t => { const { cube } = t.context; - cube.labelVoxelInResolution([0, 0, 0], 42, 0); - cube.labelVoxelInResolution([1, 1, 1], 43, 0); + await cube._labelVoxelInResolution_DEPRECATED([0, 0, 0], 42, 0); + await cube._labelVoxelInResolution_DEPRECATED([1, 1, 1], 43, 0); const mapping = []; mapping[42] = 1; diff --git a/frontend/javascripts/test/model/binary/temporal_bucket_manager.spec.js b/frontend/javascripts/test/model/binary/temporal_bucket_manager.spec.js index b4f861506f..ce162c946a 100644 --- a/frontend/javascripts/test/model/binary/temporal_bucket_manager.spec.js +++ b/frontend/javascripts/test/model/binary/temporal_bucket_manager.spec.js @@ -1,10 +1,8 @@ -// @noflow -import _ from "lodash"; - +// @flow import mockRequire from "mock-require"; import runAsync from "test/helpers/run-async"; import sinon from "sinon"; -import test from "ava"; +import anyTest, { type TestInterface } from "ava"; mockRequire("oxalis/model/sagas/root_saga", function*() { yield; @@ -16,70 +14,88 @@ const TemporalBucketManager = mockRequire.reRequire( "oxalis/model/bucket_data_handling/temporal_bucket_manager", ).default; -const mockedCube = { - isSegmentation: true, -}; +// Ava's recommendation for Flow types +// https://github.com/avajs/ava/blob/master/docs/recipes/flow.md#typing-tcontext +const test: TestInterface<{ + cube: { isSegmentation: boolean, pushQueue: any, pullQueue: any }, + manager: typeof TemporalBucketManager, +}> = (anyTest: any); test.beforeEach(t => { const pullQueue = { add: sinon.stub(), pull: sinon.stub(), }; - const pushQueue = { insert: sinon.stub(), push: sinon.stub(), }; + const mockedCube = { + isSegmentation: true, + pushQueue, + pullQueue, + }; + const manager = new TemporalBucketManager(pullQueue, pushQueue); + t.context.cube = mockedCube; t.context.manager = manager; }); +function fakeLabel(bucket) { + // To simulate some labeling on the bucket's data, + // we simply use the start and end mutation methods + // without any action in between. + bucket.startDataMutation(); + bucket.endDataMutation(); +} + test("Add / Remove should be added when bucket has not been requested", t => { const { manager } = t.context; - const bucket = new DataBucket("uint8", [0, 0, 0, 0], manager, mockedCube); - bucket.label(_.noop); + const bucket = new DataBucket("uint8", [0, 0, 0, 0], manager, t.context.cube); + fakeLabel(bucket); t.is(manager.getCount(), 1); }); test("Add / Remove should be added when bucket has not been received", t => { const { manager } = t.context; - const bucket = new DataBucket("uint8", [0, 0, 0, 0], manager, mockedCube); + const bucket = new DataBucket("uint8", [0, 0, 0, 0], manager, t.context.cube); bucket.markAsPulled(); t.is(bucket.needsRequest(), false); - bucket.label(_.noop); + fakeLabel(bucket); t.is(manager.getCount(), 1); }); test("Add / Remove should not be added when bucket has been received", t => { const { manager } = t.context; - const bucket = new DataBucket("uint8", [0, 0, 0, 0], manager, mockedCube); + const bucket = new DataBucket("uint8", [0, 0, 0, 0], manager, t.context.cube); bucket.markAsPulled(); bucket.receiveData(new Uint8Array(1 << 15)); t.is(bucket.isLoaded(), true); - bucket.label(_.noop); + fakeLabel(bucket); t.is(manager.getCount(), 0); }); test("Add / Remove should be removed once it is loaded", t => { const { manager } = t.context; - const bucket = new DataBucket("uint8", [0, 0, 0, 0], manager, mockedCube); - bucket.label(_.noop); + const bucket = new DataBucket("uint8", [0, 0, 0, 0], manager, t.context.cube); + fakeLabel(bucket); bucket.markAsPulled(); bucket.receiveData(new Uint8Array(1 << 15)); t.is(manager.getCount(), 0); }); -function prepareBuckets(manager) { +function prepareBuckets(manager, cube) { // Insert two buckets into manager - const bucket1 = new DataBucket("uint8", [0, 0, 0, 0], manager, mockedCube); - const bucket2 = new DataBucket("uint8", [1, 0, 0, 0], manager, mockedCube); + const bucket1 = new DataBucket("uint8", [0, 0, 0, 0], manager, cube); + const bucket2 = new DataBucket("uint8", [1, 0, 0, 0], manager, cube); for (const bucket of [bucket1, bucket2]) { - bucket.label(_.noop); + bucket.startDataMutation(); + bucket.endDataMutation(); bucket.markAsPulled(); } return { bucket1, bucket2 }; @@ -87,7 +103,7 @@ function prepareBuckets(manager) { test("Make Loaded Promise should be initially unresolved", t => { const { manager } = t.context; - prepareBuckets(manager); + prepareBuckets(manager, t.context.cube); let resolved = false; manager.getAllLoadedPromise().then(() => { resolved = true; @@ -101,7 +117,7 @@ test("Make Loaded Promise should be initially unresolved", t => { test("Make Loaded Promise should be unresolved when only one bucket is loaded", t => { const { manager } = t.context; - const { bucket1 } = prepareBuckets(manager); + const { bucket1 } = prepareBuckets(manager, t.context.cube); let resolved = false; manager.getAllLoadedPromise().then(() => { resolved = true; @@ -117,7 +133,7 @@ test("Make Loaded Promise should be unresolved when only one bucket is loaded", test("Make Loaded Promise should be resolved when both buckets are loaded", t => { const { manager } = t.context; - const { bucket1, bucket2 } = prepareBuckets(manager); + const { bucket1, bucket2 } = prepareBuckets(manager, t.context.cube); let resolved = false; manager.getAllLoadedPromise().then(() => { resolved = true; diff --git a/frontend/javascripts/test/model/volumetracing/volume_annotation_sampling.spec.js b/frontend/javascripts/test/model/volumetracing/volume_annotation_sampling.spec.js index dba267af6d..945f1ff888 100644 --- a/frontend/javascripts/test/model/volumetracing/volume_annotation_sampling.spec.js +++ b/frontend/javascripts/test/model/volumetracing/volume_annotation_sampling.spec.js @@ -473,7 +473,7 @@ test("A labeledVoxelMap should be applied correctly", t => { expectedBucketData[addr] = 1; }); applyVoxelMap(labeledVoxelsMap, cube, 1, get3DAddress, 1, 2, true); - const { data: labeledBucketData } = bucket.getOrCreateData(); + const labeledBucketData = bucket.getOrCreateData(); for (let firstDim = 0; firstDim < Constants.BUCKET_WIDTH; firstDim++) { for (let secondDim = 0; secondDim < Constants.BUCKET_WIDTH; secondDim++) { const addr = cube.getVoxelIndex([firstDim, secondDim, 5], 0); diff --git a/frontend/javascripts/test/sagas/volumetracing/volumetracing_saga.spec.js b/frontend/javascripts/test/sagas/volumetracing/volumetracing_saga.spec.js index 4da990344c..00705e28b5 100644 --- a/frontend/javascripts/test/sagas/volumetracing/volumetracing_saga.spec.js +++ b/frontend/javascripts/test/sagas/volumetracing/volumetracing_saga.spec.js @@ -153,6 +153,7 @@ test("VolumeTracingSaga should create a volume layer (saga test)", t => { saga.next(); expectValueDeepEqual(t, saga.next(true), take("START_EDITING")); saga.next(startEditingAction); + saga.next({ isBusy: false }); saga.next(volumeTracing); saga.next(OverwriteModeEnum.OVERWRITE_ALL); saga.next(AnnotationToolEnum.BRUSH); @@ -186,6 +187,7 @@ test("VolumeTracingSaga should add values to volume layer (saga test)", t => { saga.next(); expectValueDeepEqual(t, saga.next(true), take("START_EDITING")); saga.next(startEditingAction); + saga.next({ isBusy: false }); saga.next(volumeTracing); saga.next(OverwriteModeEnum.OVERWRITE_ALL); saga.next(AnnotationToolEnum.TRACE); @@ -225,6 +227,7 @@ test("VolumeTracingSaga should finish a volume layer (saga test)", t => { saga.next(); expectValueDeepEqual(t, saga.next(true), take("START_EDITING")); saga.next(startEditingAction); + saga.next({ isBusy: false }); saga.next(volumeTracing); saga.next(OverwriteModeEnum.OVERWRITE_ALL); saga.next(AnnotationToolEnum.TRACE); @@ -271,6 +274,7 @@ test("VolumeTracingSaga should finish a volume layer in delete mode (saga test)" saga.next(); expectValueDeepEqual(t, saga.next(true), take("START_EDITING")); saga.next(startEditingAction); + saga.next({ isBusy: false }); saga.next({ ...volumeTracing, contourTracingMode: ContourModeEnum.DELETE }); saga.next(OverwriteModeEnum.OVERWRITE_ALL); saga.next(AnnotationToolEnum.TRACE); @@ -310,3 +314,15 @@ test("VolumeTracingSaga should finish a volume layer in delete mode (saga test)" ), ); }); + +test.only("VolumeTracingSaga should ignore brush action when busy (saga test)", t => { + const saga = editVolumeLayerAsync(); + saga.next(); + saga.next(); + expectValueDeepEqual(t, saga.next(true), take("START_EDITING")); + saga.next(startEditingAction); + + // When isBusy is true, the saga should wait for a new START_EDITING action + // (thus, other actions, such as finishLayer, will be ignored). + expectValueDeepEqual(t, saga.next({ isBusy: true }), take("START_EDITING")); +}); diff --git a/frontend/javascripts/test/sagas/volumetracing/volumetracing_saga_integration.spec.js b/frontend/javascripts/test/sagas/volumetracing/volumetracing_saga_integration.spec.js index 090f3fb957..fdcce1a048 100644 --- a/frontend/javascripts/test/sagas/volumetracing/volumetracing_saga_integration.spec.js +++ b/frontend/javascripts/test/sagas/volumetracing/volumetracing_saga_integration.spec.js @@ -1,26 +1,33 @@ // @flow /* eslint-disable no-await-in-loop */ -import test from "ava"; -import mockRequire from "mock-require"; - import "test/sagas/saga_integration.mock"; + +import _ from "lodash"; + +import { + AnnotationToolEnum, + ContourModeEnum, + FillModeEnum, + OrthoViews, + OverwriteModeEnum, +} from "oxalis/constants"; import { __setupOxalis, createBucketResponseFunction, getFirstVolumeTracingOrFail, } from "test/helpers/apiHelpers"; -import { OrthoViews, FillModeEnum, AnnotationToolEnum } from "oxalis/constants"; +import { hasRootSagaCrashed } from "oxalis/model/sagas/root_saga"; import { restartSagaAction, wkReadyAction } from "oxalis/model/actions/actions"; -import Store from "oxalis/store"; import { updateUserSettingAction } from "oxalis/model/actions/settings_actions"; -import { hasRootSagaCrashed } from "oxalis/model/sagas/root_saga"; +import Store from "oxalis/store"; +import mockRequire from "mock-require"; +import test from "ava"; +import { V3 } from "libs/mjs"; -const { setToolAction } = mockRequire.reRequire("oxalis/model/actions/ui_actions"); -const { setPositionAction, setZoomStepAction } = mockRequire.reRequire( - "oxalis/model/actions/flycam_actions", +const { dispatchUndoAsync, dispatchRedoAsync, discardSaveQueuesAction } = mockRequire.reRequire( + "oxalis/model/actions/save_actions", ); - const { setActiveCellAction, addToLayerAction, @@ -28,10 +35,12 @@ const { copySegmentationLayerAction, startEditingAction, finishEditingAction, + setContourTracingModeAction, } = mockRequire.reRequire("oxalis/model/actions/volumetracing_actions"); -const { dispatchUndoAsync, dispatchRedoAsync, discardSaveQueuesAction } = mockRequire.reRequire( - "oxalis/model/actions/save_actions", +const { setPositionAction, setZoomStepAction } = mockRequire.reRequire( + "oxalis/model/actions/flycam_actions", ); +const { setToolAction } = mockRequire.reRequire("oxalis/model/actions/ui_actions"); test.beforeEach(async t => { // Setup oxalis, this will execute model.fetch(...) and initialize the store with the tracing, etc. @@ -40,10 +49,22 @@ test.beforeEach(async t => { await __setupOxalis(t, "volume"); + // Ensure the slow compression is disabled by default. Tests may change + // this individually. + t.context.setSlowCompression(false); + // Dispatch the wkReadyAction, so the sagas are started Store.dispatch(wkReadyAction()); }); +test.afterEach(async t => { + // Saving after each test and checking that the root saga didn't crash, + // ensures that each test is cleanly exited. Without it weird output can + // occur (e.g., a promise gets resolved which interferes with the next text). + await t.context.api.tracing.save(); + t.false(hasRootSagaCrashed()); +}); + test.serial("Executing a floodfill in mag 1", async t => { t.context.mocks.Request.sendJSONReceiveArraybufferWithHeaders = createBucketResponseFunction( Uint16Array, @@ -332,7 +353,7 @@ test.serial("Executing a floodfill in mag 1 (long operation)", async t => { test.serial( "Executing copySegmentationLayer with a new segment id should update the maxCellId", - t => { + async t => { const newCellId = 13371338; Store.dispatch(setActiveCellAction(newCellId)); Store.dispatch(copySegmentationLayerAction()); @@ -476,7 +497,19 @@ test.serial("Brushing/Tracing with already existing backend data", async t => { ); }); -test.serial("Brushing/Tracing with undo (I)", async t => { +// The binary parameters control whether the test will assert additional +// constraints inbetween. Since getDataValue() has the side effect of awaiting +// the loaded bucket, the test hits different execution paths. For example, +// older code failed for test ii and and iv. +test.serial("Brushing/Tracing with undo (Ia i)", undoTestHelper, false, false); +test.serial("Brushing/Tracing with undo (Ia ii)", undoTestHelper, true, false); +test.serial("Brushing/Tracing with undo (Ia iii)", undoTestHelper, false, true); +test.serial("Brushing/Tracing with undo (Ia iv)", undoTestHelper, true, true); + +test.serial("Brushing/Tracing with undo (Ib)", testBrushingWithUndo, true); +test.serial("Brushing/Tracing with undo (Ic)", testBrushingWithUndo, false); + +async function undoTestHelper(t, assertBeforeUndo, assertAfterUndo) { const oldCellId = 11; t.context.mocks.Request.sendJSONReceiveArraybufferWithHeaders = createBucketResponseFunction( Uint16Array, @@ -491,28 +524,148 @@ test.serial("Brushing/Tracing with undo (I)", async t => { const brushSize = 10; const newCellId = 2; + const volumeTracingLayerName = t.context.api.data.getVolumeTracingLayerIds()[0]; Store.dispatch(updateUserSettingAction("brushSize", brushSize)); Store.dispatch(setPositionAction([0, 0, 0])); Store.dispatch(setToolAction(AnnotationToolEnum.BRUSH)); + // Brush with ${newCellId} Store.dispatch(setActiveCellAction(newCellId)); Store.dispatch(startEditingAction(paintCenter, OrthoViews.PLANE_XY)); Store.dispatch(addToLayerAction(paintCenter)); Store.dispatch(finishEditingAction()); + // Brush with ${newCellId + 1} Store.dispatch(setActiveCellAction(newCellId + 1)); Store.dispatch(startEditingAction(paintCenter, OrthoViews.PLANE_XY)); Store.dispatch(addToLayerAction(paintCenter)); Store.dispatch(finishEditingAction()); + if (assertBeforeUndo) { + t.is( + await t.context.api.data.getDataValue(volumeTracingLayerName, paintCenter), + newCellId + 1, + "Before undo, there should be newCellId + 1", + ); + t.is( + await t.context.api.data.getDataValue(volumeTracingLayerName, [1, 0, 0]), + newCellId + 1, + "Before undo, there should be newCellId + 1", + ); + t.is( + await t.context.api.data.getDataValue(volumeTracingLayerName, [5, 0, 0]), + oldCellId, + "Before undo, there should be oldCellId", + ); + } + await dispatchUndoAsync(Store.dispatch); - const volumeTracingLayerName = t.context.api.data.getVolumeTracingLayerIds()[0]; - t.is(await t.context.api.data.getDataValue(volumeTracingLayerName, paintCenter), newCellId); - t.is(await t.context.api.data.getDataValue(volumeTracingLayerName, [1, 0, 0]), newCellId); + if (assertAfterUndo) { + t.is(await t.context.api.data.getDataValue(volumeTracingLayerName, paintCenter), newCellId); + t.is(await t.context.api.data.getDataValue(volumeTracingLayerName, [1, 0, 0]), newCellId); + t.is(await t.context.api.data.getDataValue(volumeTracingLayerName, [5, 0, 0]), oldCellId); + } + + await dispatchRedoAsync(Store.dispatch); + + t.is(await t.context.api.data.getDataValue(volumeTracingLayerName, paintCenter), newCellId + 1); + t.is(await t.context.api.data.getDataValue(volumeTracingLayerName, [1, 0, 0]), newCellId + 1); t.is(await t.context.api.data.getDataValue(volumeTracingLayerName, [5, 0, 0]), oldCellId); -}); +} + +async function testBrushingWithUndo(t, assertBeforeRedo) { + const oldCellId = 11; + t.context.mocks.Request.sendJSONReceiveArraybufferWithHeaders = createBucketResponseFunction( + Uint16Array, + oldCellId, + 500, + ); + // Reload buckets which might have already been loaded before swapping the sendJSONReceiveArraybufferWithHeaders + // function. + await t.context.api.data.reloadAllBuckets(); + + const paintCenter = [3000, 0, 0]; + const brushSize = 10; + + const newCellId = 2; + const volumeTracingLayerName = t.context.api.data.getVolumeTracingLayerIds()[0]; + + Store.dispatch(updateUserSettingAction("overwriteMode", OverwriteModeEnum.OVERWRITE_ALL)); + Store.dispatch(updateUserSettingAction("brushSize", brushSize)); + Store.dispatch(setPositionAction([0, 0, 0])); + Store.dispatch(setToolAction(AnnotationToolEnum.BRUSH)); + + // Brush with ${newCellId} + Store.dispatch(setActiveCellAction(newCellId)); + Store.dispatch(startEditingAction(paintCenter, OrthoViews.PLANE_XY)); + Store.dispatch(addToLayerAction(paintCenter)); + Store.dispatch(finishEditingAction()); + + // Brush with ${newCellId + 1} + Store.dispatch(setActiveCellAction(newCellId + 1)); + Store.dispatch(startEditingAction(paintCenter, OrthoViews.PLANE_XY)); + Store.dispatch(addToLayerAction(paintCenter)); + Store.dispatch(finishEditingAction()); + + // Erase everything + Store.dispatch(setContourTracingModeAction(ContourModeEnum.DELETE)); + Store.dispatch(setToolAction(AnnotationToolEnum.ERASE_BRUSH)); + Store.dispatch(startEditingAction(paintCenter, OrthoViews.PLANE_XY)); + Store.dispatch(addToLayerAction(paintCenter)); + Store.dispatch(finishEditingAction()); + + // Undo erasure + await dispatchUndoAsync(Store.dispatch); + + const cube = t.context.api.data.model.getCubeByLayerName(volumeTracingLayerName); + const problematicBucket = cube.getOrCreateBucket([93, 0, 0, 0]); + t.true(problematicBucket.needsBackendData()); + + if (assertBeforeRedo) { + t.is( + await t.context.api.data.getDataValue(volumeTracingLayerName, paintCenter), + newCellId + 1, + "After erase + undo", + ); + + t.is( + await t.context.api.data.getDataValue(volumeTracingLayerName, V3.add(paintCenter, [1, 0, 0])), + newCellId + 1, + "After erase + undo", + ); + t.is( + await t.context.api.data.getDataValue(volumeTracingLayerName, V3.add(paintCenter, [5, 0, 0])), + oldCellId, + "After erase + undo", + ); + } + + // Redo erasure + await dispatchRedoAsync(Store.dispatch); + if (assertBeforeRedo) { + t.false(problematicBucket.needsBackendData()); + } else { + t.true(problematicBucket.needsBackendData()); + } + + t.is( + await t.context.api.data.getDataValue(volumeTracingLayerName, paintCenter), + 0, + "After erase + undo + redo", + ); + t.is( + await t.context.api.data.getDataValue(volumeTracingLayerName, V3.add(paintCenter, [1, 0, 0])), + 0, + "After erase + undo + redo", + ); + t.is( + await t.context.api.data.getDataValue(volumeTracingLayerName, V3.add(paintCenter, [5, 0, 0])), + oldCellId, + "After erase + undo + redo", + ); +} test.serial("Brushing/Tracing with undo (II)", async t => { const oldCellId = 11; @@ -556,3 +709,222 @@ test.serial("Brushing/Tracing with undo (II)", async t => { t.is(await t.context.api.data.getDataValue(volumeTracingLayerName, [1, 0, 0]), newCellId + 1); t.is(await t.context.api.data.getDataValue(volumeTracingLayerName, [5, 0, 0]), oldCellId); }); + +test.serial("Brushing/Tracing with upsampling to unloaded data", async t => { + const oldCellId = 11; + t.context.mocks.Request.sendJSONReceiveArraybufferWithHeaders = createBucketResponseFunction( + Uint16Array, + oldCellId, + 500, + ); + + Store.dispatch(setZoomStepAction(4)); + + // Reload buckets which might have already been loaded before swapping the sendJSONReceiveArraybufferWithHeaders + // function. + await t.context.api.data.reloadAllBuckets(); + const volumeTracingLayerName = t.context.api.data.getVolumeTracingLayerIds()[0]; + + const paintCenter = [0, 0, 0]; + const brushSize = 16; + const newCellId = 2; + + Store.dispatch(updateUserSettingAction("overwriteMode", OverwriteModeEnum.OVERWRITE_EMPTY)); + + Store.dispatch(updateUserSettingAction("brushSize", brushSize)); + Store.dispatch(setPositionAction([0, 0, 0])); + Store.dispatch(setToolAction(AnnotationToolEnum.BRUSH)); + + Store.dispatch(setActiveCellAction(newCellId)); + Store.dispatch(startEditingAction(paintCenter, OrthoViews.PLANE_XY)); + Store.dispatch(addToLayerAction(paintCenter)); + Store.dispatch(finishEditingAction()); + + await t.context.api.tracing.save(); + + for (let zoomStep = 0; zoomStep <= 5; zoomStep++) { + t.is( + await t.context.api.data.getDataValue(volumeTracingLayerName, [0, 0, 0], zoomStep), + oldCellId, + `Center should still have old value at zoomstep=${zoomStep}`, + ); + } +}); + +test.serial("Erasing on mag 4 where mag 1 is unloaded", eraseInMag4Helper, false); +test.serial("Erasing on mag 4 where mag 1 is loaded", eraseInMag4Helper, true); + +async function eraseInMag4Helper(t, loadDataAtBeginning) { + const oldCellId = 11; + t.context.mocks.Request.sendJSONReceiveArraybufferWithHeaders = createBucketResponseFunction( + Uint16Array, + oldCellId, + 500, + ); + + Store.dispatch(setZoomStepAction(4)); + + // Reload buckets which might have already been loaded before swapping the sendJSONReceiveArraybufferWithHeaders + // function. + await t.context.api.data.reloadAllBuckets(); + const volumeTracingLayerName = t.context.api.data.getVolumeTracingLayerIds()[0]; + + const paintCenter = [0, 0, 0]; + // This particular brushSize used to trigger a bug. It should not be changed. + const brushSize = 263; + + if (loadDataAtBeginning) { + for (let zoomStep = 0; zoomStep <= 5; zoomStep++) { + t.is( + await t.context.api.data.getDataValue(volumeTracingLayerName, [0, 0, 0], zoomStep), + oldCellId, + `Center should have old value at zoomstep=${zoomStep}`, + ); + } + } + + Store.dispatch(setContourTracingModeAction(ContourModeEnum.DELETE)); + Store.dispatch(updateUserSettingAction("overwriteMode", OverwriteModeEnum.OVERWRITE_ALL)); + + Store.dispatch(updateUserSettingAction("brushSize", brushSize)); + Store.dispatch(setPositionAction([0, 0, 0])); + Store.dispatch(setToolAction(AnnotationToolEnum.ERASE_BRUSH)); + + Store.dispatch(startEditingAction(paintCenter, OrthoViews.PLANE_XY)); + Store.dispatch(addToLayerAction(paintCenter)); + Store.dispatch(finishEditingAction()); + + await t.context.api.tracing.save(); + + const data = await t.context.api.data.getDataFor2DBoundingBox(volumeTracingLayerName, { + min: [0, 0, 0], + max: [35, 1, 1], + }); + + for (let zoomStep = 0; zoomStep <= 5; zoomStep++) { + const readValue = await t.context.api.data.getDataValue( + volumeTracingLayerName, + [32, 0, 0], + zoomStep, + ); + t.is(readValue, 0, `Voxel should be erased at zoomstep=${zoomStep}`); + } + t.is(_.max(data), 0, "All the data should be 0 (== erased)."); +} + +test.serial("Undo erasing in mag 4 (load before undo)", undoEraseInMag4Helper, false); +test.serial("Undo erasing in mag 4 (load after undo)", undoEraseInMag4Helper, true); + +async function undoEraseInMag4Helper(t, loadBeforeUndo) { + const oldCellId = 11; + t.context.mocks.Request.sendJSONReceiveArraybufferWithHeaders = createBucketResponseFunction( + Uint16Array, + oldCellId, + 500, + ); + + Store.dispatch(setZoomStepAction(4)); + + // Reload buckets which might have already been loaded before swapping the sendJSONReceiveArraybufferWithHeaders + // function. + await t.context.api.data.reloadAllBuckets(); + const volumeTracingLayerName = t.context.api.data.getVolumeTracingLayerIds()[0]; + + const paintCenter = [0, 0, 0]; + const brushSize = 10; + + Store.dispatch(setContourTracingModeAction(ContourModeEnum.DELETE)); + Store.dispatch(updateUserSettingAction("overwriteMode", OverwriteModeEnum.OVERWRITE_ALL)); + + Store.dispatch(updateUserSettingAction("brushSize", brushSize)); + Store.dispatch(setPositionAction([0, 0, 0])); + Store.dispatch(setToolAction(AnnotationToolEnum.ERASE_BRUSH)); + + Store.dispatch(startEditingAction(paintCenter, OrthoViews.PLANE_XY)); + Store.dispatch(addToLayerAction(paintCenter)); + Store.dispatch(finishEditingAction()); + + if (loadBeforeUndo) { + for (let zoomStep = 0; zoomStep <= 5; zoomStep++) { + const readValue = await t.context.api.data.getDataValue( + volumeTracingLayerName, + [0, 0, 0], + zoomStep, + ); + t.is(readValue, 0, `Voxel should be erased at zoomstep=${zoomStep}`); + } + } + + await dispatchUndoAsync(Store.dispatch); + + for (let zoomStep = 0; zoomStep <= 5; zoomStep++) { + const readValue = await t.context.api.data.getDataValue( + volumeTracingLayerName, + [0, 0, 0], + zoomStep, + ); + t.is(readValue, oldCellId, `After undo, voxel should have old value at zoomstep=${zoomStep}`); + } +} + +test.serial("Provoke race condition when bucket compression is very slow", async t => { + t.context.setSlowCompression(true); + const oldCellId = 11; + t.context.mocks.Request.sendJSONReceiveArraybufferWithHeaders = createBucketResponseFunction( + Uint16Array, + oldCellId, + 500, + ); + + Store.dispatch(setZoomStepAction(4)); + + // Reload buckets which might have already been loaded before swapping the sendJSONReceiveArraybufferWithHeaders + // function. + await t.context.api.data.reloadAllBuckets(); + const volumeTracingLayerName = t.context.api.data.getVolumeTracingLayerIds()[0]; + + const paintCenter = [0, 0, 0]; + const brushSize = 10; + + Store.dispatch(setContourTracingModeAction(ContourModeEnum.DELETE)); + Store.dispatch(updateUserSettingAction("overwriteMode", OverwriteModeEnum.OVERWRITE_ALL)); + + Store.dispatch(updateUserSettingAction("brushSize", brushSize)); + Store.dispatch(setPositionAction([0, 0, 0])); + Store.dispatch(setToolAction(AnnotationToolEnum.ERASE_BRUSH)); + + Store.dispatch(startEditingAction(paintCenter, OrthoViews.PLANE_XY)); + Store.dispatch(addToLayerAction(paintCenter)); + Store.dispatch(finishEditingAction()); + + for (let zoomStep = 0; zoomStep <= 5; zoomStep++) { + const readValue = await t.context.api.data.getDataValue( + volumeTracingLayerName, + [0, 0, 0], + zoomStep, + ); + t.is(readValue, 0, `Voxel should be erased at zoomstep=${zoomStep}`); + } + + await dispatchUndoAsync(Store.dispatch); + + for (let zoomStep = 0; zoomStep <= 5; zoomStep++) { + const readValue = await t.context.api.data.getDataValue( + volumeTracingLayerName, + [0, 0, 0], + zoomStep, + ); + t.is(readValue, oldCellId, `After undo, voxel should have old value at zoomstep=${zoomStep}`); + } + + await dispatchRedoAsync(Store.dispatch); + + for (let zoomStep = 0; zoomStep <= 5; zoomStep++) { + const readValue = await t.context.api.data.getDataValue( + volumeTracingLayerName, + [0, 0, 0], + zoomStep, + ); + t.is(readValue, 0, `Voxel should be erased at zoomstep=${zoomStep}`); + } +}); diff --git a/package.json b/package.json index 33a0a0989c..48a8a1b95a 100644 --- a/package.json +++ b/package.json @@ -115,6 +115,7 @@ "refresh-schema": "./tools/postgres/refresh_schema.sh && rm -f target/scala-2.12/src_managed/schema/com/scalableminds/webknossos/schema/Tables.scala", "enable-jobs": "sed -i -e 's/jobsEnabled = false/jobsEnabled = true/g' ./conf/application.conf; ./tools/postgres/set_jobs.sh true", "disable-jobs": "sed -i -e 's/jobsEnabled = true/jobsEnabled = false/g' ./conf/application.conf; ./tools/postgres/set_jobs.sh false", + "coverage-local": "nyc report --reporter=html && echo Success! Open coverage/index.html", "coverage": "nyc report --reporter=text-lcov | coveralls", "postcheckout": "echo 'Deleting auto-generated flow files...' && rm -f frontend/javascripts/test/snapshots/flow-check/*.js", "apply-evolutions": "tools/postgres/apply_evolutions.sh", From 3ba276c462b09ee2c59f7be7de61ea5ec3c311fd Mon Sep 17 00:00:00 2001 From: Florian M Date: Mon, 31 Jan 2022 13:01:41 +0100 Subject: [PATCH 2/7] Store layer visibility with annotation (#5967) * [WIP] store layer visibility with annotation * add annotation viewconfiguration to backend code * Add evolution * adapt test db + snapshots * persist annotation-specific view configuration and merge it in place when loading an annotation * fix schema version after merge * fix flow * fix read-only check * changelog Co-authored-by: Philipp Otto --- CHANGELOG.unreleased.md | 1 + MIGRATIONS.unreleased.md | 1 + app/controllers/AnnotationController.scala | 3 ++ app/models/annotation/Annotation.scala | 21 +++++++++++-- app/models/annotation/AnnotationService.scala | 1 + .../081-annotation-viewconfiguration.sql | 11 +++++++ .../081-annotation-viewconfiguration.sql | 11 +++++++ frontend/javascripts/admin/admin_rest_api.js | 4 ++- .../oxalis/model/sagas/annotation_saga.js | 16 ++++++++++ .../oxalis/model_initialization.js | 29 ++++++++++++++++-- .../annotations.e2e.js.md | 8 +++++ .../annotations.e2e.js.snap | Bin 10485 -> 10501 bytes .../backend-snapshot-tests/tasks.e2e.js.md | 2 ++ .../backend-snapshot-tests/tasks.e2e.js.snap | Bin 4772 -> 4779 bytes frontend/javascripts/types/api_flow_types.js | 9 ++++++ test/db/annotations.csv | 22 ++++++------- tools/postgres/schema.sql | 3 +- 17 files changed, 125 insertions(+), 17 deletions(-) create mode 100644 conf/evolutions/081-annotation-viewconfiguration.sql create mode 100644 conf/evolutions/reversions/081-annotation-viewconfiguration.sql diff --git a/CHANGELOG.unreleased.md b/CHANGELOG.unreleased.md index 87b945e06f..66b92c4729 100644 --- a/CHANGELOG.unreleased.md +++ b/CHANGELOG.unreleased.md @@ -16,6 +16,7 @@ For upgrade instructions, please check the [migration guide](MIGRATIONS.released ### Changed - Upgraded webpack build tool to v5 and all other webpack related dependencies to their latest version. Enabled persistent caching which speeds up server restarts during development as well as production builds. [#5969](https://github.com/scalableminds/webknossos/pull/5969) - The front-end API `labelVoxels` returns a promise now which fulfills as soon as the label operation was carried out. [#5955](https://github.com/scalableminds/webknossos/pull/5955) +- When changing which layers are visible in an annotation, this setting is persisted in the annotation, so when you share it, viewers will see the same visibility configuration. [#5967](https://github.com/scalableminds/webknossos/pull/5967) ### Fixed - Fixed volume-related bugs which could corrupt the volume data in certain scenarios. [#5955](https://github.com/scalableminds/webknossos/pull/5955) diff --git a/MIGRATIONS.unreleased.md b/MIGRATIONS.unreleased.md index 3e9adbc296..787aa1daaa 100644 --- a/MIGRATIONS.unreleased.md +++ b/MIGRATIONS.unreleased.md @@ -9,3 +9,4 @@ User-facing changes are documented in the [changelog](CHANGELOG.released.md). [Commits](https://github.com/scalableminds/webknossos/compare/22.02.0...HEAD) ### Postgres Evolutions: +- [081-annotation-viewconfiguration.sql](conf/evolutions/081-annotation-viewconfiguration.sql) diff --git a/app/controllers/AnnotationController.scala b/app/controllers/AnnotationController.scala index f23305bd4e..0f283b3ba2 100755 --- a/app/controllers/AnnotationController.scala +++ b/app/controllers/AnnotationController.scala @@ -332,11 +332,14 @@ class AnnotationController @Inject()( annotationService.updateTeamsForSharedAnnotation(annotation._id, List.empty) else Fox.successful(()) tags = (request.body \ "tags").asOpt[List[String]] + viewConfiguration = (request.body \ "viewConfiguration").asOpt[JsObject] _ <- Fox.runOptional(name)(annotationDAO.updateName(annotation._id, _)) ?~> "annotation.edit.failed" _ <- Fox .runOptional(description)(annotationDAO.updateDescription(annotation._id, _)) ?~> "annotation.edit.failed" _ <- Fox.runOptional(visibility)(annotationDAO.updateVisibility(annotation._id, _)) ?~> "annotation.edit.failed" _ <- Fox.runOptional(tags)(annotationDAO.updateTags(annotation._id, _)) ?~> "annotation.edit.failed" + _ <- Fox + .runOptional(viewConfiguration)(vc => annotationDAO.updateViewConfiguration(annotation._id, Some(vc))) ?~> "annotation.edit.failed" } yield JsonOk(Messages("annotation.edit.success")) } diff --git a/app/models/annotation/Annotation.scala b/app/models/annotation/Annotation.scala index f661962db5..58c39236fb 100755 --- a/app/models/annotation/Annotation.scala +++ b/app/models/annotation/Annotation.scala @@ -1,7 +1,7 @@ package models.annotation import com.scalableminds.util.accesscontext.DBAccessContext -import com.scalableminds.util.tools.{Fox, FoxImplicits} +import com.scalableminds.util.tools.{Fox, FoxImplicits, JsonHelper} import com.scalableminds.webknossos.schema.Tables._ import com.scalableminds.webknossos.tracingstore.tracings.TracingType import javax.inject.Inject @@ -27,6 +27,7 @@ case class Annotation( description: String = "", visibility: AnnotationVisibility.Value = AnnotationVisibility.Internal, name: String = "", + viewConfiguration: Option[JsObject] = None, state: AnnotationState.Value = Active, statistics: JsObject = Json.obj(), tags: Set[String] = Set.empty, @@ -147,6 +148,7 @@ class AnnotationDAO @Inject()(sqlClient: SQLClient, annotationLayerDAO: Annotati for { state <- AnnotationState.fromString(r.state).toFox typ <- AnnotationType.fromString(r.typ).toFox + viewconfigurationOpt <- Fox.runOptional(r.viewconfiguration)(JsonHelper.parseJsonToFox[JsObject](_)) visibility <- AnnotationVisibility.fromString(r.visibility).toFox annotationLayers <- annotationLayerDAO.findAnnotationLayersFor(ObjectId(r._Id)) } yield { @@ -160,6 +162,7 @@ class AnnotationDAO @Inject()(sqlClient: SQLClient, annotationLayerDAO: Annotati r.description, visibility, r.name, + viewconfigurationOpt, state, Json.parse(r.statistics).as[JsObject], parseArrayTuple(r.tags).toSet, @@ -319,11 +322,13 @@ class AnnotationDAO @Inject()(sqlClient: SQLClient, annotationLayerDAO: Annotati // update operations def insertOne(a: Annotation): Fox[Unit] = { + val viewConfigurationStr: Option[String] = a.viewConfiguration.map(Json.toJson(_).toString) val insertAnnotationQuery = sqlu""" insert into webknossos.annotations(_id, _dataSet, _task, _team, _user, description, visibility, - name, state, statistics, tags, tracingTime, typ, created, modified, isDeleted) + name, viewConfiguration, state, statistics, tags, tracingTime, typ, created, modified, isDeleted) values(${a._id.id}, ${a._dataSet.id}, ${a._task.map(_.id)}, ${a._team.id}, ${a._user.id}, ${a.description}, '#${a.visibility.toString}', ${a.name}, + #${optionLiteral(viewConfigurationStr.map(sanitize))}, '#${a.state.toString}', '#${sanitize(a.statistics.toString)}', '#${writeArrayTuple(a.tags.toList.map(sanitize))}', ${a.tracingTime}, '#${a.typ.toString}', ${new java.sql.Timestamp(a.created)}, ${new java.sql.Timestamp(a.modified)}, ${a.isDeleted}) @@ -336,6 +341,7 @@ class AnnotationDAO @Inject()(sqlClient: SQLClient, annotationLayerDAO: Annotati // Task only, thus hard replacing tracing ids def updateInitialized(a: Annotation): Fox[Unit] = { + val viewConfigurationStr: Option[String] = a.viewConfiguration.map(Json.toJson(_).toString) val updateAnnotationQuery = sqlu""" update webknossos.annotations set @@ -345,6 +351,7 @@ class AnnotationDAO @Inject()(sqlClient: SQLClient, annotationLayerDAO: Annotati description = ${a.description}, visibility = '#${a.visibility.toString}', name = ${a.name}, + viewConfiguration = #${optionLiteral(viewConfigurationStr.map(sanitize))}, state = '#${a.state.toString}', statistics = '#${sanitize(a.statistics.toString)}', tags = '#${writeArrayTuple(a.tags.toList.map(sanitize))}', @@ -444,6 +451,16 @@ class AnnotationDAO @Inject()(sqlClient: SQLClient, annotationLayerDAO: Annotati def updateUser(id: ObjectId, userId: ObjectId)(implicit ctx: DBAccessContext): Fox[Unit] = updateObjectIdCol(id, _._User, userId) + + def updateViewConfiguration(id: ObjectId, viewConfiguration: Option[JsObject])( + implicit ctx: DBAccessContext): Fox[Unit] = { + val viewConfigurationStr: Option[String] = viewConfiguration.map(Json.toJson(_).toString) + for { + _ <- assertUpdateAccess(id) + _ <- run(sqlu"update webknossos.annotations set viewConfiguration = #${optionLiteral( + viewConfigurationStr.map(sanitize))} where _id = ${id.id}") + } yield () + } } class SharedAnnotationsDAO @Inject()(annotationDAO: AnnotationDAO, sqlClient: SQLClient)(implicit ec: ExecutionContext) diff --git a/app/models/annotation/AnnotationService.scala b/app/models/annotation/AnnotationService.scala index b2cd0a8f36..dc0041a434 100755 --- a/app/models/annotation/AnnotationService.scala +++ b/app/models/annotation/AnnotationService.scala @@ -792,6 +792,7 @@ class AnnotationService @Inject()( "id" -> annotation.id, "name" -> annotation.name, "description" -> annotation.description, + "viewConfiguration" -> annotation.viewConfiguration, "typ" -> annotation.typ, "task" -> taskJson, "stats" -> annotation.statistics, diff --git a/conf/evolutions/081-annotation-viewconfiguration.sql b/conf/evolutions/081-annotation-viewconfiguration.sql new file mode 100644 index 0000000000..be2332e3d3 --- /dev/null +++ b/conf/evolutions/081-annotation-viewconfiguration.sql @@ -0,0 +1,11 @@ +START TRANSACTION; + +DROP VIEW webknossos.annotations_; + +ALTER TABLE webknossos.annotations ADD COLUMN viewConfiguration JSONB; + +CREATE VIEW webknossos.annotations_ AS SELECT * FROM webknossos.annotations WHERE NOT isDeleted; + +UPDATE webknossos.releaseInformation SET schemaVersion = 81; + +COMMIT TRANSACTION; diff --git a/conf/evolutions/reversions/081-annotation-viewconfiguration.sql b/conf/evolutions/reversions/081-annotation-viewconfiguration.sql new file mode 100644 index 0000000000..df4b36edf7 --- /dev/null +++ b/conf/evolutions/reversions/081-annotation-viewconfiguration.sql @@ -0,0 +1,11 @@ +START TRANSACTION; + +DROP VIEW webknossos.annotations_; + +ALTER TABLE webknossos.annotations DROP COLUMN viewConfiguration; + +CREATE VIEW webknossos.annotations_ AS SELECT * FROM webknossos.annotations WHERE NOT isDeleted; + +UPDATE webknossos.releaseInformation SET schemaVersion = 80; + +COMMIT TRANSACTION; diff --git a/frontend/javascripts/admin/admin_rest_api.js b/frontend/javascripts/admin/admin_rest_api.js index 1f6d866586..5177f7d105 100644 --- a/frontend/javascripts/admin/admin_rest_api.js +++ b/frontend/javascripts/admin/admin_rest_api.js @@ -25,6 +25,7 @@ import { type APIJobState, type APIMapping, type APIMaybeUnimportedDataset, + type APIMeshFile, type APIOpenTasksReport, type APIOrganization, type APIProject, @@ -48,6 +49,7 @@ import { type APIUserLoggedTime, type APIUserTheme, type AnnotationLayerDescriptor, + type AnnotationViewConfiguration, type EditableLayerProperties, type ExperienceDomainList, type MeshMetaData, @@ -55,7 +57,6 @@ import { type ServerTracing, type TracingType, type WkConnectDatasetConfig, - type APIMeshFile, } from "types/api_flow_types"; import { ControlModeEnum, type Vector3, type Vector6, MappingStatusEnum } from "oxalis/constants"; import type { @@ -600,6 +601,7 @@ export type EditableAnnotation = { description: string, visibility: APIAnnotationVisibility, tags: Array, + viewConfiguration?: AnnotationViewConfiguration, }; export function editAnnotation( diff --git a/frontend/javascripts/oxalis/model/sagas/annotation_saga.js b/frontend/javascripts/oxalis/model/sagas/annotation_saga.js index a1add2bcea..9256c57a05 100644 --- a/frontend/javascripts/oxalis/model/sagas/annotation_saga.js +++ b/frontend/javascripts/oxalis/model/sagas/annotation_saga.js @@ -1,4 +1,5 @@ // @flow +import _ from "lodash"; import type { EditAnnotationLayerAction } from "oxalis/model/actions/annotation_actions"; import { type EditableAnnotation, @@ -31,12 +32,23 @@ const MAX_MAG_FOR_AGGLOMERATE_MAPPING = 16; export function* pushAnnotationUpdateAsync(): Saga { const tracing = yield* select(state => state.tracing); + if (!tracing.restrictions.allowUpdate) { + return; + } + + // Persist the visibility of each layer within the annotation-specific + // viewConfiguration. + const { layers } = yield* select(state => state.datasetConfiguration); + const viewConfiguration = { + layers: _.mapValues(layers, layer => ({ isDisabled: layer.isDisabled })), + }; // The extra type annotaton is needed here for flow const editObject: $Shape = { name: tracing.name, visibility: tracing.visibility, description: tracing.description, + viewConfiguration, }; yield* retry( SETTINGS_MAX_RETRY_COUNT, @@ -147,6 +159,10 @@ export function* watchAnnotationAsync(): Saga { yield _takeLatest("SET_ANNOTATION_NAME", pushAnnotationUpdateAsync); yield _takeLatest("SET_ANNOTATION_VISIBILITY", pushAnnotationUpdateAsync); yield _takeLatest("SET_ANNOTATION_DESCRIPTION", pushAnnotationUpdateAsync); + yield _takeLatest( + action => action.type === "UPDATE_LAYER_SETTING" && action.propertyName === "isDisabled", + pushAnnotationUpdateAsync, + ); yield _takeLatest("EDIT_ANNOTATION_LAYER", pushAnnotationLayerUpdateAsync); } diff --git a/frontend/javascripts/oxalis/model_initialization.js b/frontend/javascripts/oxalis/model_initialization.js index 411717ccb4..eb6bf6635c 100644 --- a/frontend/javascripts/oxalis/model_initialization.js +++ b/frontend/javascripts/oxalis/model_initialization.js @@ -30,6 +30,7 @@ import { getSegmentationLayerByNameOrFallbackName, } from "oxalis/model/accessors/dataset_accessor"; import { getNullableSkeletonTracing } from "oxalis/model/accessors/skeletontracing_accessor"; +import { getServerVolumeTracings } from "oxalis/model/accessors/volumetracing_accessor"; import { getSomeServerTracing } from "oxalis/model/accessors/tracing_accessor"; import { getTracingsForAnnotation, @@ -49,7 +50,6 @@ import { setMappingAction, } from "oxalis/model/actions/settings_actions"; import { initializeVolumeTracingAction } from "oxalis/model/actions/volumetracing_actions"; -import { getServerVolumeTracings } from "oxalis/model/accessors/volumetracing_accessor"; import { setActiveNodeAction, initializeSkeletonTracingAction, @@ -67,7 +67,11 @@ import { setupGlobalMappingsObject } from "oxalis/model/bucket_data_handling/map import ConnectionInfo from "oxalis/model/data_connection_info"; import DataLayer from "oxalis/model/data_layer"; import ErrorHandling from "libs/error_handling"; -import Store, { type AnnotationType, type TraceOrViewCommand } from "oxalis/store"; +import Store, { + type AnnotationType, + type DatasetConfiguration, + type TraceOrViewCommand, +} from "oxalis/store"; import Toast from "libs/toast"; import UrlManager, { type PartialUrlManagerState, @@ -143,6 +147,8 @@ export async function initialize( displayedVolumeTracings, getSharingToken(), ); + applyAnnotationSpecificViewConfigurationInplace(annotation, initialDatasetSettings); + initializeSettings(initialUserSettings, initialDatasetSettings); let initializationInformation = null; @@ -652,3 +658,22 @@ function applyLayerState(stateByLayer: UrlStateByLayer) { } } } + +function applyAnnotationSpecificViewConfigurationInplace( + annotation: ?APIAnnotation, + initialDatasetSettings: DatasetConfiguration, +) { + /* + Apply annotation-specific view configurations to the dataset settings which are persisted + per user per dataset. The AnnotationViewConfiguration currently only holds the "isDisabled" information per + layer which should override the isDisabled information in DatasetConfiguration. + */ + if (annotation && annotation.viewConfiguration) { + for (const layerName of Object.keys(annotation.viewConfiguration.layers)) { + _.merge( + initialDatasetSettings.layers[layerName], + annotation.viewConfiguration.layers[layerName], + ); + } + } +} diff --git a/frontend/javascripts/test/snapshots/public/test-bundle/test/backend-snapshot-tests/annotations.e2e.js.md b/frontend/javascripts/test/snapshots/public/test-bundle/test/backend-snapshot-tests/annotations.e2e.js.md index 1c96c9468c..5cff11bee7 100644 --- a/frontend/javascripts/test/snapshots/public/test-bundle/test/backend-snapshot-tests/annotations.e2e.js.md +++ b/frontend/javascripts/test/snapshots/public/test-bundle/test/backend-snapshot-tests/annotations.e2e.js.md @@ -135,6 +135,7 @@ Generated by [AVA](https://avajs.dev). }, ], }, + viewConfiguration: null, visibility: 'Internal', } @@ -199,6 +200,7 @@ Generated by [AVA](https://avajs.dev). tracingTime: null, typ: 'Explorational', user: null, + viewConfiguration: null, visibility: 'Public', } @@ -338,6 +340,7 @@ Generated by [AVA](https://avajs.dev). }, ], }, + viewConfiguration: null, visibility: 'Internal', } @@ -477,6 +480,7 @@ Generated by [AVA](https://avajs.dev). }, ], }, + viewConfiguration: null, visibility: 'Internal', } @@ -571,6 +575,7 @@ Generated by [AVA](https://avajs.dev). }, ], }, + viewConfiguration: null, visibility: 'Internal', } @@ -665,6 +670,7 @@ Generated by [AVA](https://avajs.dev). }, ], }, + viewConfiguration: null, visibility: 'Internal', } @@ -754,6 +760,7 @@ Generated by [AVA](https://avajs.dev). }, ], }, + viewConfiguration: null, visibility: 'Public', } @@ -839,6 +846,7 @@ Generated by [AVA](https://avajs.dev). }, ], }, + viewConfiguration: null, visibility: 'Internal', } diff --git a/frontend/javascripts/test/snapshots/public/test-bundle/test/backend-snapshot-tests/annotations.e2e.js.snap b/frontend/javascripts/test/snapshots/public/test-bundle/test/backend-snapshot-tests/annotations.e2e.js.snap index 56cd565272f51da1bacf3ab4ec1a6eb9ba5dc815..b2f25d84f92abfe2915409d6ee8c9714a40948b1 100644 GIT binary patch literal 10501 zcmZX4bx@o^({mmmocf?HVJ2@rzA;=uwTxE^`m zySn@8=C7%K+NP_X?&+Q%$N)sE>uv?{cJ%O}_2a_;AjDif+Q`c{;?*Q>Kj)uIqSL%b zL-@}^Iw=5#ypq2kzIG0R3ed_RAff=A%4yN)F1RM+WZG8U(xBt!ZJo$*RdMgC;*?16 zweg84O3zD;5Wn>Fx&)&Tam1Nxg*-x0p1yTAE(PzIEPD>!+=fhkxzBU>xAt-xbs4wY zUUQWO_+-`ZyQ{AA`#9YvrM^3Y;5|1dZ?&!F2p`|wCOsWF*cF5cO9+=3*^7775)LPt zFPW~mL%qhzsf>q(dLE9u`Br9Xq$q_a&qtqee7kpIExF!SJ$9pHoun=$*<-`oT%2Q` z8qcRJJV+{6|BM#?(v(VDO6n0x+cv}p={*}zAjyu zY9XWcKY-;~Y&dYe2!vH_wgJzBuB9YHmJT%i%MNv8UbrHoMUn4>`M*GPfzO)zNl2w&b_%&(X$#HMi~J zbrS4fsUDMchFIJ1Ov%S&?UdjD@n>B`O^Vn+NM0Pc!ty?nQOmtG1zT0Oe%-_P&l{osdGgeM53Jd($yT ztd?3f{WM1d;{bkiYQxX_`$V}36?ctn-fm);o_;gYhDV%t1l=GHb6V!vs2T#O1$$TW z`DWXMQReoVHc|FuE(%}!Uqdyu1X_%CKqMI$zm4V?(K1|1Fz_Iwj@!KIy)hAQ?U>?O zgbb2909V`F*;=DAj4%xfS4@f0VZxojpo7Ue^2~ou`GoKch=tJzD2ZT~DxvSTO@WoF zT(;h%bZSjR$pGE4nS>asCmYsMx-gz_eXzFMp1|ZrXw(QEQTT+~b6vd!D9U0#8rL(UOl1yaGBlXpg?Xmh49bZ?Kw6R4*(XX)ph z#@CAcSsD7Um_H9`xi=aeU~1V3iM(aWF#VkQcz1!{{CGY$H|OSGRX)?YW))#DZp!i7 zgJ-Og)H>JUA`MMwlBZky^tHa^>q@=D{K5CdG9h{`3;YshIm#73Rv27_6Yb3fd|8+rekyWwGwdrB90QUtKC*){q-mEz8_$=lme8lM*j{ zSeMq!YTLMc2gdP$f0m6Vv-|qpN0cYSKr|#;fg4qg7-N4c5=0y|mqB`i(XH)p@L5p* zdocHW9w(PNdb4DC^5M#^ZQQy}aX*nn&k~A4x+f^QdTi@yGGX2{rr0yG!{^gb70A!q zBm_0;3RzMLs+7L*&@A>^Z9l(iXj$>Vnq7 z?EJhQsm&n)@+s0e49}Wvc`zeSf?%&IPw{Y(!Q$}nyR!9=+)w@QpD2=!P2AbOE^ISS z$7E}rPJP}gV0m+Yr~~c!w0hVSM-Ubg^*L&|-sR7@_OUeAL&nPQ2f~L3Kt9l%UO9J| z2VIad?F(4QNMrX)LI{wtvlj^G$QIJ|iyd4yl`hbZ1_RUF?M3?;ZVora16WqFzXs@oscv54 zr|d{?hG9QvATL0eET6)Gn5^wYupNV*n9L2L_)&lYW;Q|-idg9N#~*mp2o%Pezme|8 zCOmD+3K)*?8vmvK#ohSR0B7DSGTK&GZQHUAyTmPI-gskxrfO_0w$P-%C02~%q{OVY z;nBZkZ4a#>R0R`(~Fx#1xPD#%zw;;dLvOhD7ZsGiCwS(k= z7Q;s|Q}sKOLF-j^Z1pN%l>?qDunT^t|7m#7vZ!fmo={7&Ae8lvz2;b&BMVR0IT%w> zO<|y1HKSaR{zsp_>qkbD_vHuK#`_q=S-GT4pZiJl&N+X3%dG7SYP+xPONu<8icsy2 zhkjs@_cptCv@|aq`;wvtP=I(hxvWwCo*YjCtF66<<3>94N?EhT==~kLrV|15pg6>f ze7Guz8H7-Vd6Dt1hpKIc(dHoUMT%E<5$O&~m=PBRGo)0z4clskqvYjIO9?G0VD;MhXOmtsfUtp2#vglONNBNTKG-0u+ z=uXb*)x<+4;tzk9MXQzkFpFw-u~AiPs>hbAx$N{Ca}-&PU=-Ti?bnW4`8$KRSI;Rf zY-|GdGc)n~`>#Ik@7R;z{xr3gW^>j49y!#ga#1zjc1ck9Z+ z>B6%}^Xg8pXyTj0$}Yr6(_R}m!VUVHT2Zb}tF;V?$mu`x>c%Lrc{ueM+fAB9`bni; z$&Yx1tZ?OC457Qj%8cqOxpN#5Yyw$Pu7&*&EO~s!zKcY8sXHzr3fe`iE8RRbY1?d` z)@|+-q9ISih+8n{B1)7Kh03xtS^ul8D|?O3v$rPuTMcJ2{#-xZrWECi_d;A9bN+%S(>!h(NN>o`WD>eV-asuuG*hfmD~$g@D}t4z7;qHZ zq17#{jAD%`RWWxO8&D%zDe98M4>Aarq z7C%Z6CSMtvrB^DC4KpbzI_^O$V9L)wB0>P|PZ?qzmiBt16{RGlvbuB-}7 zt+;@8yF68{r%YA8i+Z3~6bQQ@?Pp3Hu7`@77+!PkM7DY4M0jn~KuT&u+dYIq5Q+c* zCE%kB$7)r6OuL62n#JaLXdVzI2)Ae;3E5o<#RRnzq69^<7L&W4pY(Jijv}5(ryxJG zZnWMDVW$~`zi?l>F(6+(lLaOt*@%D$V!M*k6%mnn=158JYX|mCY zZ!Ks;r5SOdkMLJ*C?rj7DU@+XdNcS7Xf3h~dE~rej3UPwqG_dc$J{6x(0wod4KG_GJa~OcZdJg^|N~obGV0HW{$EvS;R|l#aTl;OExxZSc^VE03wi_u^rz3>y z{P0WJ?3c0S)aW=~7itgUCImewT)Vi@YFqJI`P<@-)59-Tv?mlC=r8k z-kh?E)^cl6@xaSi1ZC8VMuPchAqhox!ztbxvKA%Z#EK|wQC*|muY1Pe#z_=w*kaO#7*y58!aUi!r z%e(iPG*#xG9^6l??Tp!zFcHmR0b1xk?=3!c0n>zg4QVH?t#<{K8heq!m)%Bzl8kx1 z?vRO%!LfHY2+fvrro`+(ZIWWn{)B#p94s}k6NJkkDL3ZZ*IskD?2$=XIg4lX?e)&hE_J6$ZhkTN-_(%2&lAsHIJJwrtlK}K422>%dC`?Q`hR{ zUvtiO>oy)?NZXR0zEkp%SbNh_ocG1a#rHUw?;n{!+fmGa;U@Jdyo5TFd7M*Y(Vv<6 zoz*Je@rDR`R1wkcQx7mnAxMh`<(n6_<+GCM(jDyG1lZ$e&Y{+iyA=Celee8J{GAQn z+iU(Xc?}tEL`fo4iz6RKukeR9VY86i*6j#Xoi*3W1$XH&z+@YilUY@+MSF10MA77f z{2|B3@P$_u?dtrb4K2z*q#vQ)Dz_5L9I0J3qoLk&s^_Yz{CtA#%RW-l+>&c#$f@_5 zR4nYgb5M$kjc3K^*0ARvsF(Man>`!f#@xO=Gq*lIy`A^>Bbf7N1=@9{isNYRDc#$0MtD7l7sC7n4& zC6t0fjOqZLG(bUYe5Bu}d{Gzg0X0odv`+dpDtj*>oC<=9HWxb->C%hZfE0o@_5D`y zxLCG{r=I+xtl$)()qG;Egb7p@}i;AHlqqbEaE996t>}pEIT&ich*o>??o+ zx3}!2OTO#u^U$6^meRNuA@chDo`QwKdwlG}cEf$AJNwIuZX~XGUke2TjXGxTuX_Di zw!6-=a|NI%liCQX+tk^lkd5xkJcgq=)1pnyX0#(9!?P&WMUsJ0%hWo?_|MHRhXSG! zZf)i_?)L`2J5PXHXIhZFY4_mW^b`q9oCuFHv)TnQjhBA0e-aR1Y}#~f7`nO`z2Nx&eq69V;spqMSy|=}k?rU=sGo=OPS22jQ$|LL%H3Ps4qdC69lO z(R%+iuN1w|FLnNDde=`k?;8b~+C@W~$(+az0&ks`#M~@Q!U~0iIZ5WheirUwPFzK& zMOkaJD_jrfvn$l)MX0_Xq)ilW%X~AWn`X~yal?XLCGX+)d$#ixxa;`)1i|@VRVY0x zlSg!Ex`u;B6loq7zD!Bo3@l6Clr)E)$qU}W}ciJa8a zs2voDPRgvtrBH$dNg84?!KbU4er_Q-f0RRLiDfemn5OI7ZRKdtUiy6pmBd(SW@!55 zLU5c+k~IF=Hxf%y7&;P31Y$YKx8~L$slq?16wMl`K5RoDw(1dA0axMDAXBp8UD2RF zBoVQ_+Gjwe6mK)GY}CE+P(R9{MXzh~v@nNie(#cw`;9-#YtOMoYmcd~Nwx8fFlP*2 z4R6ltzXmjR-Eg}rplU5=ZT2v~RFL*mYB7Q%f$;BgvsR?qriF_h-;6eOB$w(A-S9w? zL@=uKm^y=$@oKBRuq;)&LGu#bbw6uXj~Y;d_4nJN^Z1h@FmLIv)j=QCbBXea zbG8L%i9e4$DGO$Py`*!kK5`7@&|5O4xw)-6n_JfsrGL;{E)2}h-L{tTANFgLGHYU| z#SgNsg;D8EO*Ie@L!}TcfJL_tLCNQzY{aW=Gy2J-zAcwgqKqOBAATQ{5}wyz#~uxZ z2cA)9D)PwtFi4dJVTT2d$@RqtVNVBAs{bvV&N0*!7|B2L?n)M8D+TSnQ9nWVKy_MC zd?7-}6WqwYd^=;f$~%&_eg@4YFFr@A<|BUAn!Y+WU%)6EvYJa(qAkgaS4hy!-rjUNUp|2#>%o8FVTZ z%Assy?QFNHw-Tta_$(!oMaYIH9s*QE7ckhYmGzB-c9TJR7!Z&G!pd$3_WFBz-;g z?K_`TT==plTKc~-RLnH`I`HDin5%eYo6&ctoyiM0bE|peuwivlI4j;uY&bZq-*Too z_lWRIG6-`rNODSk3((nI+=5l23?AedUvcVH7z%bAWNTv%e$H-TQ|@~_6x_5fTV*!0 z(ko*z^ZXI5)Hbi}t0GdtxcNl2%emR(TLfX9Ni2i}&XCY=vXvfXKrod1+!C)oK!Qux zm{*_4r8l>%x!&|)zpvKq8)ZWGH&1AdS|QyQ+Qbn7Bfr*_l1{B~-%v7{;?fvqiz2rrH#>hwhhGZpW#$l``mxU7D1T7}~ zh3+MTNQzWPi%8hLYJ?kP!-QsrQwQ{q__#HaczNMx)BP*4J01>%SwSdYq|7?%8*DCzTH%He>ZsvuRON-RXcEocprmzUHd%ipBp6W=>oZs4_+WmP zY5Il?c8y0�pFdA{{_^?3Gyhpg_)M%l*nD^7h52T) zxA%?1B2#>`DJ}CmtL^lG21BN${;mDTK23f2>BS18gX?zs;3AG1v5mO6al%Qit=GqT zM)Cfbrs~HwCko!wZZi9b66y*wq8uu8EmL7 z7--ln7!Y4u9}W9h!naHIAOTDh_cn8PD0;k;Pnkge3ln2=5IZEAmOIA~QK^`~Vje6? z8R;yxKi+b&h}z;)iU}uRv-`=)P7G>9b}iAdA1t%RW%LLq)Ek?q#WA#qdWUm?VA6o* zipU+2PFu(Bd|lVm(@Y>mo;D6MYryWp=>gWbJKl!VgMaV2oY>u{DE)+*!yWL}3by%R zq!8>j5{ZR^Y^UjLQLIs32=$J6!Uq&11cQiBI5M1z&R&O%zdFSSslV43%n*P=iHI*L z!i9}>3Yze-n#cg@Ai4wZVOg~0EV(^k`i96|$@m=i(wFK@H<`V+?(#T3NCC3!AJw{G!(0{rewA|yyYyVVV+s{lnX#vl@A94 zXY2sl2kiq9g%0er6F5R-YvzLgwz-!=mufdrmgt%#n$)3w$!WNWZ(y)Z_>xcwX4Rgusy6uZq`DkcSqkJ2Ea*5-y}SmE3H_>Faom770SPo9r!4z^wP5x z^eYL28c~i*bdb`;ia=g~Dqx>3@*Ir50kI1^44DoZN5)`N`LCzsFal5`)I+C1wSXO3 zp1Sbtobha4S!V3fCsN1XhA{!8fN7iwqEU=_n#U0E3nt{qDk%UY+i1G5dXV2ntoMt!1F|I( zvfE<BqEKo9Kq*H3gwzQG%1C0|qG3_X zA9FaIWXR~^e26rJY17h7da{B!cp^*{2*v#y{iq}Mlp%OgNtdU+viRPsno)xinpmJd$$wdUC8etZqKBGuOr?xk-!*jOd3$P1O z>FTL0_d!b0tw}DsU}&Y8^;@~M6W!Q3-xS@3qTGWp$;d%HyPskU1U}%PzUZ8cDanlG zd#jReLrm5%Bc(siB?UCZ%Yn_`f8t`gb(=VDQ>u8HztSpg#i=eZ9r+!mzmX#$CH0>E z(^q0*kvJFToh#8k@rw2;o&1UIAWQp1jZ>qg3ar%Tv5}GEg3RRS#`AOU#=nhU&wshz zFIU$$OOp5b1Ro4e{N6{6#;6@-SGRX@8~Fy}8`4Ar^AT$}){Q#i*z&C}xTgQnqD}u3 z@>P9kJqyS~=3usNmhNdxo9^k~ojwz6UUb}Cr+MBln`Vd^|Kj(BGaU>U6WxMy;NFUE zy*B#i+xJ+v=Cu5_dGV8%-PF9i;iDq;OU&CpWtg{tlR{*f>45A69q?qT%M~!aF7o+Y}H<~TX6h3P7(>2}G?L~33eO|&hR6ep!HMd+L zTyx_SlLf^aE4segXoxgN)Qh^kSd?6SGtQ_xtBATx|7N`N7xjc6QF-JnEKjk>jz`-h zQ$_Oy zFIw9KQ$}yBzN#x+HX$(EOf~JjJW9TtvH4O}VF4Gv?XlzAeL;ZT*kjw4I6UfXt*165 z47Zb$v6>cFt=#J>c=0P;HF9etr{0vi)hz+zeJ=}Km`&4aXJ#(dq*$1gw_rq^=bS)k z>?IXMjEVD|Q>7dOP9tfmx(FEzr{Qf%5d1b*)Kq`_9bLK{oEjJg+qbw>52&qp08$8O z*PQJ3GzIc6`;HVU@2rvCiYPH7AsNZ;lJK9ar~Ip1$yHt449_TO{Io6E+d;1g192E= zutw3e0K__h)C`uGqY7tOA~L(T2@lAO(g>FvwmZge!dMwjfupoq$P;?&MX4JYqZkd? z$7Sc^q_^5iNlMGSwB7Lld7*_M?U>LP4G^oQlt+ev`7&PuqM-B%Otz@9 ztGi^4o)!v!Eh4uopU{<=w4DYLBduUKXa7g{Y9MtjKd&Xd)ljlPIE5Bfg@~DQ{0Yb{ zrXU?4yj4?>`ZtA@GE&|hbp3)n>UTygqOnV^5Qalw$2wdQp07jCZS|h%B}NQJD!a@I zVgHJdVJ|*gz3mHiD%`+sWW?@`yZuf-Eg6yz+=Q*QVUU3FC4WNV#$Igtr{n`lb}1Ly z^G9m`IRgTdH|X4kDD+UK&BJ*cipEI(u@A#UQ$Der*OCP?GX{bI+3LHP|ClS_VP}9$ z%kdHEs2##H_P9dzorNzi?A^UQJnu(#$FubzW8{v+5tm@#{KnMbM&use4bJO*yho}} zJxBs15l>*Y`FilBg5xK!Xh8O%VdI_T_bx;!q;I;9Yz7-+;SLxtcMhbPdaKKd@1(tn zV8Z!&yG!wsK*SI<4eiGu0~(8n7{oSGtUB7ePNPQwNBao!hTOwb`%6ukzgPjl{NlSr zcSHV;rj|jEJFXLrX_0RS!N3Aurm#6-HS>CpyRkCKP^QIu;mF;*PUN?g{=QgGBGjd| zOi-SrjqgOnAX3Gox+1hbk)W{pETB52H5)!Yq9LD#dC{$3dPai9{(4D0SYD?PDI9pIiCrL zpzU}isiHjgs%fFPBbmaaAb^3_A{vYKKJ;tI32RJTsY_x41~vj>L*IuTiO=jiK1O$* zz_C7E)&F0LWx2;_9>s(^UXLk4>P>Mjkf~*qtQ`LP& zrB;He@$r$v+R2e=j`=et)knNl}67H8@l0xfDycPENi zF~{CNu9^F6&i93J2SL}_+;{2kl7tX#<%-y5N#|@yAlGOm9VrW^G2G9@iyx}vHWLMCQ}X96 z_C$2rVemx__5VJ`55lDGz*kVGZlS>a2ZYc){NF%`VaI&rE{6WgddG^`sRjmPS z=$+Oo6;sB{BPY9EV^_=@lx%_Nf>#9x!yQYlRn0!^EFIA9BU1xd-gv-Q@S845u8V{n z+4$zU7UqsukpF{z5Z^CG{vYUv>u8_s2C|HuuC61q8ayHQG`0?Y3@Z|rc-d@Z_YLzw z-)(>YX4<7^$shFYbcgmef!n(>U4^Gx_#wz*-?7P;I5UBAd;mvSAJ4+V`bn3_$7-HR zN!YRLVHKgmyXJFwaw%~JVTM)@6DN0NWs??Wc5zN_25w>Xtoli|gS^7jf{K}dYGKCC zhL{x-+d5o2N_5MA2nANgOF6aIPx}UcrU*+zeXBu>pE~Kp&!6xI#Lp`O)>8d_di{4k z|7m16UA{XV=`4E^Kj#~axyKH~g=4&AQ<_BljFJ4mQcOh;gN9R9TyC)R%14Tdj ziSEy1VnnX_F`eeo6Tbc1`(|oCn6dRVV@!EvXg1`mbSI&MTwfE#Be+8M?42K~VT2$~ z)}b_<2$>FNSuk|f5cXS8%Y6Pxa$Tvsyush01hnn=1B2U)h}_mV$9D`J)e5^{P%NICa3b*n?yDanle1lB5+c@ z3D?hkQ&y3G{~OAJgR5Nng~rCITCTjfo;>siJ1z_KI+iPBUCn;%c6}%HD{NV4>14t& zkDJ#Mjp42ci+3)xFm%y>@>inp-0wVZ(!}HC1%OLUaszy zP~$UcJ}SNsQgEe%{1Qr&_H!cKyItCIMT%Ax=< z!}x_cB#$Thm9?-DD-ELoc~y1yb9RjC_MWBY&qZ}Sk^nJn-Qd1F&|%Lz)Eq*=W|l58 u+ba(<_`!c?c?XLAZ=e(Lxyyv>ShLiOGqm1)PCQr7h-5q%{;a5o2>%Pg_7j-^ literal 10485 zcmZX4bx>SS@F%*sJ1oIHI0X0L1ef3%f&_PG7YGoXgy0@rgFCw<$ifmlI4th&n&W%F ztE;-Y_s5$z-P8S$d9-7|8Aa`bwx*6v;oZr=310@x@>Kk8`nY-X9bMk*%61L_EbYv{`Z4=l#At0s<<#1^oRMYGm0uEA$@o_Hpna)jSO?`+ru{r4SR7I#EuoN(Wd{F z^sQn)4&mAR55_WG?{&z3lj!)8@e-mFoywAWqmvnH-9Aexm+jPK=3%LR=C{w^Y5jd1 z)@^P+Wo~d{YGD4A6i6}F{~h<>b|xpmQ3Al@HpQdqLyNy2wmRrx_8d>s5*jwR(Mkp|mu!JQr9K*w^;aGpRc0u05Kxws_TVr;Q^~D>Sffie>egZ=25L z73Y4V!kT#CuPvb8Q6ETV$>(Eh>zApMTRY`Gwg;=yS5rD15<-`0R&#Uu>R9uqJB z;AiWogWbzLV5_*>!}@HPqnPMkgJ1gbf@A&ok^%Dw&@QJLsjcXwkN&x&i};>slmeY0;>xMUMwrWJ2*lus(DG7h!ltQ<|nY9G&(%H$cID`pSG6eY`4)=06i z`#)iRCF9gARMp+ZGprhQbkB16yY?BqYZ=NxY^}H_MdvIainrmvqoTlaBK7^-*REfMKkqaJBxN(x3T}wjDF>DcV9Pdp1sR<_%C&$ zbKO}NNfu-0a=mQf<#Sp_2763VhkP>LKt#J0*&K@WL~^?7(Sv~EL&gkYW0pU6YDAuu zK%0p=+%Z-x>Ka+)6?z@mO%~`B*TtUt`U~q6Jp4DBE4mig&ykq+U6D9Cj4GM@$W-*_ zCXc=tA!#_(#BY0GGnE$x^9{Hak|ay_%VQbpz(-IW$H2NC@@`i&3as~=lQi4b%!fHo zf;*%bmpslT&lcqsNyG!K0qf+>*dcq1aSfkSV<%Z)Q4GHXV69y^-4r#C?&EJghw9U? zm7V3JpL6j5^{COc9|XS5%j-`PC4GC<`^&}A3c{m32l#REN-JxpVG(CM z{YZO!LJ@Go#<9MbI7TYr@YBP;;}a~u+bJtoYsL!nX(!n>H3VP zXo}loY;e=c&F(3cl&+GeobXjdU)-!2@fMf|aGgxy)KGMmyV^*8Bsm0F?gu;1I;<|t z%PuhgMW4X;Zzli_szpzxudL5%d1C-JXK0V#7+%2y9wP9k-<<~m4 znVHZ~xSXpu8l)9--TZ;vDxqEKz2GFuD7uK(I~$bv05_ zcnw0KK(4Nxr0sRv)YtvM_7UoVz4j}~+*b=|Gf7`BKFhFS49cVflLkC6LSca(VY@Do zhgWYawz~3Gny&+kmY-->%<;V*AEf;1LkSXHucY=fj>H$_rw}|zfn7H?Pr}XD-bK+S zxn0W!WWQ#Ou~AzA7e2>Ph|p8wTT=H^0Rag8e$Mpptj-^WbLZxs*}uBcyR4Y5s9F)- z2)4jchXDpB&_Xd@8}6wUmH(GOk)}1=^|JEoUl8j;6UH7bNmzXk85&iA3||BpLeZV@ z5v8LrU6K?IFojs0;;FptT#mjbCC&(rkG8{puTjEHIjgp-{fP5O@uNTrj$90)UBa*^ za+qt@Ph#q&$)wo_PB&TY9wiS6f-(Ujb7j^*nNEl>^uGXX!53A<)GuW0^rMCnP41 z_Fzw6NM*6d)Kj=SZRcH=BjS;B#dW(zsZ^)3VKMT?asjj_@JhxP=5Q0c6z6f3n&qmk z`l0!OV36hax4+9?L~%r7clpe0CWSsV_NXzt$f?Ixnl&_6La-~HiI~A`!e>gv4FGaml%2^!Hf2}IXl3S3{UzylzLz_Ln*PU zU|lFKxC|19aAEvA<-f~-*@KkzHlSChjZUQ_928!5Zm&TTCdDiQGJ;wY)+u}Pku9O; z&61FeW^%oX*fezqu%`x}{P^$-19#orrS@dwpuE=6GHOs~_ryMfmg<9zWp`m8E^(ZE zla^D-c5m8xSiTXXOn

;e=3JV$!g-(~oADSwu-z`hvC&4Pf>*!L;w2V^iy)e$D#YZVpMLUW4BY?=AysWV(Mbw7IBhwb_2;-U^G1V}1Nvp$N{7+K57 zX*#-wey=DfsBfa@d-~sW*7!fk131J0Hdp z&NG?hr9V9O6%FzTH0O$U ztSyvGaHv1s+kSU50zM!dg)5w|+eRigE@em`eq1{O>m4@3Jxes*>!anXUS5i4G|#v< z|6AHl_0CI>k)5!)@843qi|C_(D3Q18X8{Is@?C8#niy&TjM55O-S&VqCJ+fm_Yz%e zetP~a!68d=)M38X5ZtB$Qu$h7(h1pF0elXf*B_JUoVLR-1w|zVj;+e7@390 zcfKkX=+U550ClEHwn+@TAF6^JOPa{Xz>9Y)Hg==`{E2{Q;%$`eRu1L(nT^)Kh6l!B zh6nm^1z*l}U<0Xf^d}YCoiMYVRhFRA7M^)1&U&ij!G0=U6i5eZ)&L{?7S3WNHHVdm zV!mwFFp}KGo}vB*7tH^H9^nEtZ-__^2K)i3YuA}!9lmsZk|j}|QG1ZjUcQf$H$ky4 zIyf=6!9FkjR)%>T8-&h>h!hBlE5;ZoNx=u!^KYcxZqM2e<|GnbENaGMZ17e6?`EoE zvbt|)So$Xk{9f!HyyMipiGNxz4PNxy3{DHI?=zz;L4>f`h%!h^j5kb|g(q`9#k!t` z3+_cT$F#^1Xeaa6-;UNvvVs5hg7f7Uf`jaMWgAd|$d!m!#uO?7&@{-xW=a%{>B1QN z$Z?C;6tyD^cn=jg74pZ)LEk&=2n2Ch;kUsyQo2zz!9ur`%L?XT$@avR_mva&+$8%u z@L{L8DKmjZCoj=Y#}>~;F`lp%VR*;Jg>Tn2j7KD~c1q>F;1&#)N$=KnIrhSW&;N)nb9+@@ZEkL0xC;l^SXwm9w`6uWh>KrNE!vx4gw9 z!AZ4NF&{6vhdNf%lB-d&q00JyY9F4s{|ZN?JLBAo)I8vQP1kWFIr~8;v5$$ZKzqY4 z3~w77CEBt(#N|7_aQvO{;9xfxg1x^PI``N~*Lr7u)!Bbix%$n$_WsCd)+3L`3bAdq zA?c^!PUnvd1bmb;^}fQGcF8F-xi=`5-H=pgXd{!vTEUP3(Cw(oEl*IBq7)A*`xn(# zg~~*Nv>@~BcMP=Mr>Z|%3h3A|TEmYJNVjxtq_m%*5Zt^gZ^pz3e<9m+49hDC@>EqT zol}EeEroO7tx|H)(J%^=9NBw33rwGxla=13%GJi7Yp?h#b4jK-%&I~M-_+b+HIpt> zu5!*B^b$nA49X^I+`lP1t-0?mcbqvvzsebTs=M#Th@J({M%3L);lp5vs!$0wcQX+_ zN_h2Rlz}az%w`94U0#Kw`PwL zb@pc`pR@XVA5M4VPH}eUaGxpa(JUW3u~U7lXhcba&+3ZA1{FbZkYq$$7}=Q$xC9QG0X0I^srzM|Iwm6B0{PPrcp z@(8NwPB+bR!!PnC6C;QCw9hdF2~+pWmp7S=xz8(Lg9tLg+6i=W>NCpV$42 z)N9L0woma|7Avck0?cc^`O!A2lf3~UP-f0DT8sQy*`pKTQ#z%uv3Yj^HDnj4 zOFwNuJ7`~R3U?w+R_Kp@#R4+90`MxqNA}~yyfdjLRdT403whncww-J_1py&rZBslLljI9xRb$)ghG#$&OuzFG=@-d)30FZBWnq;!o>Eg-!T(m8$7rzl;DI)7Bc#AQoxCQU3qg)Hh9h4h>aKM02;}1zFo@i)RI5&DD1V9?|5tf$ZarX@X45{Q9knDe_5aF zXrVTct&RV;&bE-W&;z^?PUNM4a5gKfMcC?|C9#6)iZZ^h8Bnh*dex z#Pn|7?}|pxNFpECL5|`=Vhz^5LawI`#tp@l7Dz^0RdD5@7 z?sLv&mP$HXr7U;GaVL*kl_vZByX&inUX@_*=$9%5dI77_QQ zI=477*#A!NlKM`ZZ*^|z++*|Q1jkCRl5sKmIBT8334L7OZe$w_w-(>0uK7p# zN}HMN@uzJI(a)EEcNG z@WQ-ixxT^WH$e-z_7gxqtS3SUOME6YIUcD|%Jy@g2--EsP0kEg+`HggA_=|?{Pi_1 zS1raORC+AKnr*BAk$?yl|LkA~_m{B@8WQwn4=(oQ%RnXvQ9v>5fJ6i_y@tZjCozMw z{x^mh+^iB>2L2iP?w#oUwZkq4i8gbZDpj6eC_ zXnYZMY%yDVJ%C*P?j27s{o6p6HD|kNi}-nd9ZKKU>G*5(o-zEc8&i6(yVRWO!$0=9dCQlZ!B!ODVD-`RiHl8vy})lIno(#W&yBT$gT&rxJ7wp((gD) z9LqdF=Pm?ju6baa$a6OFO75H&s(Q*rxZS=7L!s)JW~&rNJCZXR#~}r+Avja{47LY& zN<<95{3b&Ly3^gV(dO)O`q?h;COl!lR?#saI_|`eYUk;yZW>eZ8#Z#-LrCUwyxP-n z5$jrb0$!=e8({1pcpd?8m!YMgV)V0B*h_G^uJ$DS{C)-bMLM;YYcC-;62azTelPLIIKtC{#dNd&Yw9vI$DkP6>S0<%F4PpSQ*H|ug%=8!QH z!#5B@gm2`hST+Ev4!ybqdhL0|e&0dUxpJ0cE9NbBlkH}Gu%)bXfRlC*3GNcWFhUvf z)vC&kJWXU$U#Cv)jIN#n2pEJqx+nhV>K7^bbC9vC+m)zp{bkYPj0CPviQ?@^R)TC+ zdX6-Bq-!*MByJER)+V9oook^+WL~zxQFQPvXWuv1yA~@{bX;U6+t;`<0xaszSXie+g?a8G-j+^V(sozW2??}8PxZc5!x*az)Embc{6P!3oY3Fq{g51>k zkJz~TC64Cy`6y#Wr{W3j_IxBQ$33&8rfMqrMY=L zD|`2sI9_F|KZ3ln7nI;GIyC&P-{07{8nrU`brdIvxM-O2AMLi-EX=s{H$^Xg@mr`^ zj%wFamvD2PZ6fjNY|PDA)I0jURQfSlf;M&SU8jJ8MyHH|M)rh-hUtVw*VMbCWk2c~ z?SJtyW8fxyVEQV9gIO3cvmhk6K5IBrwtGK7#R56H;#(&4$Nfw4uG|z)8!;NEeFd^Z zVlUCPKlNJ3n_tMl1E%2Slo%3edG^TIOcBj zynoSCp>BsKSW(;p9K?%vP+5_KcB~FKeEj4Z0H4rTBO-Z#>8PJ0oS_O#*^`WmCh1T3 z2}2peimc&^>-jRJUOYD4|p0PKWk%=9PY(|iWK?hou zReP1^qK*80lSr`t#S%>BB#=nCHzku4HVlh!`KwB_;N+iVZ&V}QJxnDNNf|!{>^o#rv6R9;Aav?+f&p1%Ygi~uaWR=< z@l;&Qn(hStG&x9EPh!V5d{kz5$46RlQ7XYPxdvJw8mu3fj>n2b<4<{rlut?`ctI)H zt55`lc+f)hU$*?HrVt}~d-_2ipbGr89h_7_fHVWj^b$nTZ&AJJetQ%{tL1*fo8d`; z|LxB7-d38aJ^4pDi3kNjI4aEtoJau0$+CZ}in?QG5)0?IG#{*BKD>Y(y;4+UToR zSb;PGtwQi=<8QQi^P$W^Mt{ZRN*GK0!x)`Hm=j9nY>0yjA(+R=rjVdtvAIF-CCUoK zP<55vm)Nb4xu8lfrD0@~cxGS2M&I+ob`%YaDv13l(d{d5N_00QIHlrYHJ%V=yMulW z27`eo$k`_>u29}4%;o4vFM7%^S+71nWg|=q{L9cqByg6v=>y|hpN87Wv!aW9{kYO! z`9Mta$q(F3_)vLgW1N~7n%ZA^^%LzcCq?19jb5^&OY*)u6&JZMl1)9fK5=JjP(+)( z{9YbYm*PFCLG0In;FA$ozDy~u&S{-5_Ve>%%U(EVy-VOeivaY1c69HSkkRSctQY{# zb;+fT??11IqMfX}0E0K3jjUVw1(aWvqkmGyDU1BIX^L*7{P))(?yTFLe4|~-G=6z7 z@+W0kl4Es$8)Hs?+f!GZPEU>2SC<3RNUOrA>ZobL*FT0$RIs4(UtWrQgs+)LOdOue z{OPqKFF>EfT7LG-Dhz!7Nx37uV6#N{$7X5PAx=k%C%!*JqV^}bt)pn%*`F7by2wUK ztvo;e;nu5ByT$t-FM41mkb5({ABUO5U_ntr^n8 zrgq_HH6t6a=~}ORLHPc%K2I+ItGFNepC@0i*Hx`w4^*)VYvf0Vx-7TW%tl(tF*(c8 zg3<34?oG6#tmIgn<*2~u+to=?GJ~k?uX6}Czh8vXGj&rYy-ZS%?Pc|WLSpg#qOB9; z8=qg)N580V)_XCeyHx0}&1;;G{I%k}HkVI?pw+sEhgEZte)uU|c%b>X$ zdKePA)$6pz)n9q&s$^~G)n%T(ro-v<^H|czAIl|kQ`j^V96`rcLqd#hBSY#86-aSV z6Hd3mp?r`X*Bok3$@z7>nMCf!Iz+Kx%$xB9GQ=h?fCoqCs(55n>LE&fhgd~;ai07h z3X6)&hI-VCbS-4P@|;$E@<{T#7UZfrK6 z7^qOxpu+kVkRc5GPrZ zcmtmN8S|k-WwHV!a11}40IZkM4Om@RiMs8el>Df-<^vvk1x8dU=?yc)%o`xG*JYkG zOkrrLga1Wf&N`ba7L`!I=l{=63{3?Rrs z+MjK`p?nE@1NLQNfg|P+>$=3UHLWM+mk<*(qrJZ5v*lT4E7 zqGIT))#54^CcY%gDKc`R4CTby{1}KNAhgacDdWF}i7g4VdN2gDMcjyco>Sg-C+7Sg z^1BnCsUTX_fKP#SH22*-+&2FY_x}@m(>?nl!MyczuNz-DDRp^Omtt*|1`2Q|%t7WR z1BwvBF}R>p2yS=sA6#O!=^x8x#SpBgnEvR&_%;T>*-TfYfn&n-b!sU(miagOZuKTo z8#}-7aQr2f$NE+c-Brbm=fyIOy9K4bN0xnSv#gMy(u zNxtK&#iQ&Afa};iHROW+Q|Zp?XV=>L=(u?s-!IvX&dNsVCUB)SDczoM_%Z!rFXx^w zb|=FeYACU9>5RLXbQ`vry5Gm*&Z+y1nekD^Jo)pF&}f8FvZkNo)FgXn3js5#EXRA~ zJhTV!u|p+M3)@V=`7S^STv_mtSa}?2Cp<^KWh!d7BORqS2Rud+Rdz|0+Wu{R-cV$o z_Fi%jPU!14e4G(Zw?Jq$c=f)qc+S4Lubdk~zRqtyNH6(m+tV3xu@Uc|7*F;*_7L37 zH^6xK-q_k}T*}X2#y9Tfv{Na?ooX&U!3H56N)7iRObW?}!*@@@c= zMpE?;m&9lxwzVK@MG;Ohp|5=^dt~hLF|A&(h}X?*M~c*PG3|OwL1<~x>?%pjtQTr1 zm<>YUeWK)>y*W)eEk{ompLiB>^z_H)BI}FwJr>-xB(Ed1d~U(Q4sp?N(RdML|9va{ zyZttm=Y;@H1J1eNyQqPs$vorvfObHHMiBkHvBA#%hr4G?anj(Znx$pKK9bL6vhE z;9GplnfRkPr3dS@*z@ixdf9Ti^UmR}V61{PZx!Pp%`2x$Rv2w~l1F>`dAcMQ6DfMO%#NE*@u8QWB4#PyyhBiL7>)+a zQvMqka!6S7RE<37Vr|#_VRl$ilNv z;=W=~=|Kt$CvS?T9vNz6fMFENilqq#WjWVH*m1O>R-=buWEX`n&2>3&`4KqfoMNOM80YhnOVjOQDGd;dHBRJq08`cKaV6hwjb_>SH;6|)*PbxLBu>k@73q5{qLXH Mw)#E{4H@Zw0Wmi&Z~y=R diff --git a/frontend/javascripts/test/snapshots/public/test-bundle/test/backend-snapshot-tests/tasks.e2e.js.md b/frontend/javascripts/test/snapshots/public/test-bundle/test/backend-snapshot-tests/tasks.e2e.js.md index 44f83f9bec..328e0bb132 100644 --- a/frontend/javascripts/test/snapshots/public/test-bundle/test/backend-snapshot-tests/tasks.e2e.js.md +++ b/frontend/javascripts/test/snapshots/public/test-bundle/test/backend-snapshot-tests/tasks.e2e.js.md @@ -515,6 +515,7 @@ Generated by [AVA](https://avajs.dev). }, ], }, + viewConfiguration: null, visibility: 'Internal', }, ] @@ -769,5 +770,6 @@ Generated by [AVA](https://avajs.dev). }, ], }, + viewConfiguration: null, visibility: 'Internal', } diff --git a/frontend/javascripts/test/snapshots/public/test-bundle/test/backend-snapshot-tests/tasks.e2e.js.snap b/frontend/javascripts/test/snapshots/public/test-bundle/test/backend-snapshot-tests/tasks.e2e.js.snap index 35fd95f78eca06a6bf1c849b4a001d4123740447..309b9facf831b61b3192008883d93f33b36c9cc3 100644 GIT binary patch literal 4779 zcmV;c5>)L$RzVHMw(tkD2-HBZSz9>0$Hgzg@lO-GUc3jGOx0;4~95 z7>opSK`vwUFxCkTb9rBwVPaq1ZZ~0Egy~0^EQVPL!92!H7>kE-bHSh(=E8y)ypC(* zLlR-K(Tb`4c&48VbCdb-c&>$SFDvkg!A8-?1&UiDfv`|NFDOcqC@F1k(X?$>>W0M- zuWOW-d?BG#?o}-#l!&WQ47G6KFz>&HlbREkQdLZXU-MNN&F7lYSX+3hnU9Xr!*-P0 zP%d#zJUw$;i|7{`1>UbKpaj`q$GM@W;S@tnTu}HCrP*uSLSo82gQ^wWz*;S8D%KQ(KXWHo9F?%!aw9Xz2&>rP4g5 zbgV@)yl{x~VE_q#S@qZyp<}sd%P4VkxUDtT*_8Iq=4(1*BN88?YIzn2{K8RKgF;gz z)Ny9bE=ta^K#+t6As~d?VkfUI80JGkE+Aixrl---7K?h-SZYHdu8lFZf6LC!mW48Q zLf|EE1Y}zY83HDNh?TnB*!auM7T2tWYSy$C)gQCQ_id$wEWd@oHt;quVHdeDgX+^p z^~qSrLgYFy7pwrAzyZ3NJ>gK>k2>i_%^5aAT%g<*KVTO%VE1?kOaTkPYOn)*0rKpG zlmZ^y3?2e|!BOCF5OO(~<)B8Hr5lF?kv6b~7VSri+FympKH$v6u)*~}1b2hY;6NtT zL}7dr4#F^m$z+(%GL_96i_1!1+*}|aE|jLW21G8pOPI3h>p_sPP=jbx3`Hf}l!RT7 z#^O2>nu47kpX|db_cbRzuHAhYdqCcnn?)&1Uucm~p#6J(b2!{ObjXmH?9dvo*Q=TF zcb)AsX6flDGp_aduw^EmuuG8`EiMcuIVprgXcDcxS+3jq9_EzQf*W&aes~$KGejA&wliI-h%rpZu0M|t@L@5ioCn?cY7iqqKe`+# z-jH}N%FcH@&2lVoa)z!e%?RkhE_f#@IjwWjkShEc_+|XJ+3ME+3P1p50q0CtWkR=cE-?OflFh1 z$XYi-FBUe;b1kvcD*sVGy`;+u6w|CMgjmHwS!>tV zj*w7~YY6ZyLeMW=?h{++&8<-gNpWY{8m(#YbH*=7L8|qmhfa6k+MpP0YY`(-;vLj2 zNTac&VOD*Q3&Ic#C2q(T;NpkMjuhK!mGi$$bN(&z?gLf*DyPGIdR@K=u2;da#vhDw|U4>~09W4*mf?rG(v- zu$j$UJ*{d#25MaKtL)IBjJHv*|Q2mJa)qtJ_s$8*ptDw zQsy!C`mlOhTDVZm!O7$2)q5){%j!o8tzv)+jgp5)pE7xi>R7{t6;Pv1-XP=3dVxDxs><-o%o#)SphbJy|nl_ zx`0d6U_G9jCWG~&yk0d}6I^adSG7x!reH}VRiklMNXH85jjocqMrv1O>6=x#vS3Ez zC%G2zU`bxQ!pTpjvI}(nXs>DjO;nOC$LCNm8cZPZV`SpS7`u39lJ<-D_at`l7BTG? zujj}3dK-9D4Q7503QdI6fhMpNYy|HEtC^5WFd57PKLH!*V5W(_6g_`}$jjhkkdLjs z2K*2#2akdm!RO!-3w{Bx2>cvu0B?XVEL0adBgx~m5|RVT!A!6OJP7^@K9_?xIETYQ zNDa2^1n+_4;9MJy*^+NpSc>$bHFo8)H4@+FeU(aiMpB25wT?KvJ99d%9lPP}c}l${ zrN!RL;?nB+QCNQb4a&wd)t1cByO}=PB}dO%%~h0Dly-i}7FJZ3lvP%7)uoLb-%wV? zL4m1N(D@U#p80)Ft>0(t_Pu3GiQ6ZeE@<6uPvq(KjWu+gJ=q#+qmOoD4bfivDXgJ# zrSrA+UW^^kD>=a;dd=RQMMO8;*&-s2)3}J%J-4P;7E!XW(mDEZf?cz!*@p&t% z4OUWxucRirlFEHpNoo~zzS@!2-DsC1mAlcG`h^5*VrV8IBftbupBZmWnCZxZ9FmWK zTfhoRn582N4?$o(*iFaoEjl~>Fho8APA4IQos_th7Pnsmk?~*|*a|)btc#FpAcA}8 z5RN`J^9P8$2;KyTUGXz<($hW$ffK-$1w$b#`ArpbF$BuMR60PUyFtRvhsZ)8M|Ym0 z#j|zOegz_X0XiZh!F+H#couvC&Z7e=Ze5SpLS!PS2ckQveao;nL0~CZ0Un}+3+QP- z4S_9SKRpQD%^vm`L{5MKa2JPpDDgtww5LI27PuF@1dhv9;3{4XmVmXi+xQ&=TU@># z;zA&Vihz)AR_bxog!DXU!kkPJHF2aaYSJE;@I~Vi%0?>3CFpJPlwv8b=k(oJN?%!s zbabu@!MpS?!^Xz$EJJ&4H5NJpv!pdDb%8aNQK?DRz8fQ7X-&3L5J?DG(jquGamm5%@FBb(<#>GY3X z=Ct}Y)#g`wj&D2|fcXo7@pH=Ru%9xPqltGyTFW z!%m0DY_JI2PmAkZA^S5B`91iOjmDK`00}${-Ue3s6|K;cBY(3x|DNquPf(qXz_Xw z087tI<(w15XIRb;2DX8B)vQ|bZSQFy0v-f!fRBJ1+F}Hl1tRLL@2$p4+S7OZ?AMa2 z@)M_1l@HQrKTTdP`B9zpdfVdCMsN|2J^iyDBHR`xCC4avBVk4*mR~vZXrfCqE z#Twj#Y=S@w_zA7rb@vom`R&S$U=R3+7T4WBv^${b-Reu2vi5%rya2ueCD=x1&|9hP zbXfwi6i7?0?J1DXXv-k?EU^q8>!X%|=^!TdFM9dvY+>s_=&Ost7$AbXz-EJ_Y^RVa zq)<+oSELp;MgoDhB6-J_vpU7hkAI}g{K&TTw)s(v7R!K~RIZ82JlB9R{BjxiIe5eiE089u2q>Esg@S!NM~$8ekfX&r+j@=A$P5744P!J%djO?)WPnDX}6bZYHQMN z|C-h6@bmMOxT0En|51g%q7mW{rz-h+Fpf@)>x8T8ySy4;0f?L_)H#|Ci7BK zb0V%;T@vlvQT=Z4l$yBdxWQhHEq(xYl!0n)CjtB~c+x2Mc2XY%#?*c^AbVph<;u{H zP`l-az%K-2!CbH$YzO^?-UX}!$LJ@iUAl2x0g;iQnHDY3S!t`( zT-$rW*T9TFt8!{{&SyAl-=$a`PBffUv-S=91y2J zefEgN>9bozAkC^L&4+kJ}P)(`Sk4mT1#V3`Apn3%u4W>{{WzoSDKLqB2HgKCk z5^eJ;2&@6m(a*l?lPVp6$iXbdswD#r(`ehTgvcnc7;FIVfa9RpO^6@dL1$3XrPgLQ zLu5PH1^z;1W-9D@UhE6KRgHdcgTJb{v5KoKF0b^L6muL`U0hMkl~tGcxpIGnH;IX= zNtvCF)YoV8lO^AXcWT~t_r+jRESqkAEEV?Xj#SuEM=I=J;p;l^0{8@YatNuRla%Sx zG58=NfCs=1gPdj%e;*oaHb5Sa&l0iHL=oE=oBJ1?1@&rfX#^1sg6naR0u F000e@7W4oB literal 4772 zcmV;V5?k#-RzV_w3qsc`8y|}Z00000000B+ zoC|ytRldj1niN(eiQ_~EBNFByQ1p@6>xFY#m8m!iiqB;xOi1$cjf+OGEFCE(o9R62vt6x zFGgb#G1F`(DutninZuQKZwsz+#L8P9-;;Zm@6ngTj9mKcNJ7Gu#`E9VzIXaxw(Vpm zjb8IMBbP2~ASANbFn{*&8Gm?i{=6HD!-wu>{3jti#z{?nT;uAC<{ZTupG zkxL(#PRR4Mv;M&!+VZz;O`gz^ogZOrQfbw{60-Z?5Uc!tpUNuH}jlNQ$Putg5Bqao<^1sYGH%?b*yTtO!gF2VbfLei0r2B5oLuqF&yH( z(&6ZMN2!ilMJ^oXgDv!U%_4rr*l%R1NA+ttxpr7pTb7D8x=K}S3bQTI((Uo3Vw+rA zVpa_=9Ado~K*C>EIW~juUM}h~O57OkY>#y|qpPzCs?JOikqc3^?AP)l@8SJ?xHEQA z8iQdj6lDF<6=qn92JSE`SA)4e6kM}CL zUq;rrX3bQyhU-xMXO{TBEtHV?=MY#49tCfLV|1TZs!zsJf(@h;j0UY>8Q79RRkI}= zYS+;$x>4ghXyAQdwZ;$FK@HeB6avG*B(MOi1}}rdz+xk$4qOQqf{ow>Z~z$Wgp}K< zQD$q#F%cq{0g)CRLW|n&hsY-I9GR&?_c|*po zXrV7!*6$ZO#F_1WfsJ1Mh8%jG^J0S#45CpX6qRsN5;k7E1lI@O672E#bZ>|ads`D9 zSMMr}%`a`Yt%4Y)FQ7=s-}Sw)H5_gqanVIF*%5UvmrFI{FJdz`(bJJ>ea$w)?tvDoFz4F-JFOLD%;1fOPS&i91aptp6Z zzz4%(eN--SfA#RUyAeb;i-biOU*|W3w?B&Vqv62$3Ly=%^Bo4ALpB}u$>F> zT+pjgPwgQ=+6mOwjAI6V_NJ-P165Q7OVmz@oiX(HiK;(~q)j9AVljm|HV`|l(jQG% zmG@auqN_6;YF*sf&dH0!kg}b^A*l??s^9`FAFEg_Y2&8)u_EfR9zPf0gFf*>uMnU& zw`M*h#+_wLw5HC_8NVQTvEGLsdfb8QgF>(~AVkE(JE)Tv$74yuT)K=6!oUk9Zpi9q zV!uGt^~`$6Hcm$d`*wn@NHz$fjI) zjXrHj5{uP3UA=cR^zs!W-TD-N=gk%(r7c3Zlr0T%9i>sz8mhxcs}8#m3bzU^f^5xF z>LNq%`1u79uIoVeM%C=+TUx^jS~N)`Q#WIZX)C3=N%}8AJriBeOxLp{Je?LFlqDZ? zcZ{sZOYbVzvi=CyJv@ipd3+gD`?u~dkJ&+W^eRUG258wR)_v=9u$X1*m7>h}E<4U&oV{V*ayA@ z#RfteK?_&{wt$1cVkD#nOb2aX1=vCdDGl_cnEN6`_JAXx5Lkb*gz!Rz2FGu4I0NXpH!5RwO~z#MP`xDWgpd@03Za1O_S5LgYKQ{p7wfU~VQ zW=+0bVJXs!R^OG&QAu3Md28hIjHC{q$ujov?#!!b?RXO2UT%e}yrRriQ&!<_YQXa2 z7RnpXOlvYn?rA&8lt`SUaX-ix%2h50gN5cD>=a; z+GjhNMMO8;(;_1F?{E=4_{^^dWDz9`J2lvot)zDPXdhQnxz|%o_-txNWN~-c= zB`Hme=#LZ}D`Imt@>j+&7E z2Thn0Nunn9)J09Y;u79yTtePRrMLvWP4+65@><^Djin5hg-A!|`VhQJ|1xZDKAB}` z%Rk9wSW{comt|N}=XO`XjCJ|k?mCaV%2nawntfIDn0s1=JzA1KEJN*fsa}V+(_}jw zd$Q~Bl->@ljHLq;TMX_2&x1oCyMR*X+NWmR5E%`w2DgF@;8nVs=KhuC8;G0$XBWbF z0IgtUVe*)BEGoRMFilb6j|vAmDjc1FxhPE&s3=O!1g55I0*eQB0qv^cQa>SC>H6%UfoVqpC zn2{*5c{@s1f&0|YRQD9wd^hOnMhHv+^FYw96WP200=Izmx{rRF zo`r}M%{~Mybo^Hv*&GUyVZZ~{fakygke5YBBUnJkgq=DT$PX^?ciF6 z--}T5>w|J3S{X~GgOFTs5tt3G1HYkfjA>&lyCL!#_#BjELjwUXeQzpPWBEs2^yS%U za?!VCrzXa*KV1_r=L~d=L3%R#JTM;2&FNQ+A%g0Qz@6ZcoRg0+oK|jp?lt&q2dltV z@UHR}Z#CTS>0kj^2mT0-f}wD|$ADR25m;x?eN%T3`NxkLlKD38(MQwiA3Ka`^=;h7 zRQfh8#zel&CX{XlPlH#D@mDDQPORxK5Rl%D{y$(cC3l331rR6!7n`WnOusP8n684z zJg^AdLyK!%A=^_B`5pM$6pbs*1|rx14gd@Nx~$~~BmeX^@+&NZ)VAU=F##+GPl5e_ z(64Q0^k5{NpOGdTV^+pM+ZYkFxCq<{9-*Qn)?|i(EwzS$N{u_4@#zP5fJea_AQS5P zJTMW6N@RT(I0g!#s>;9>K-5>^{d}1>J<54mQsw;Qcd48|Fr-yEXQxIvE3J=8sGNU> z(p$kA@PM9jegXp9!OP%nqmFX^1OlIfJbmSSAw;Uc6<~qBa+Y4h_;;`e5Gdn|KqFWT z9tZEzH-COm&ZoI@whmV1JO&e+0KAHV{;iUZrB}+Cq)R!M4Y+cij25o|ez4@!RL*%p ze1^fDUFvrbU@!QL7T4TAwArEQoytp?lJO4J<1yLyfPGUckZ1?b>@Leg*uPwXzHTrITl}mB+xu~08 z6Ls_dlqlVH{M8zlR9n0A4MZyKBdSqoQ)x>Mw%+#Db1LoGs@kel+8rjf{_aYpeQ$iJ zlHK}*s?Ty_ zz`ucqz#g6b7u)s`RnDeS$~JZ@?Ss)zuO;;>U?6Qq15bke)P6`m7VSI=fn%Tqe#~gV zf>mHM*b9z>G8-XNz>Q!ncpU5p$LUA39hz}C?SvG8i|Gfci!_#3K*^uI5$pq|On4+< zJZJ+e>4&R_XtwX@2D;(rBq*k0fj9Pc}jV7{C}u?N_~9#*qBX-4HSc6RJNsTIzZxvz-Vw8n5&Z*+Zcd= z2-eWgvuhJ2ZGp&BO1`Bpbsm3*n$FE3WC*wd+yWj0djR7k#0}=tS&=m9vQ4)`WEEHo zHc;7?iu<1H`k!63%|4gMS6kLx%hr@t)%eQGSeA8{RlC_rce#(P@>RQ%n4YRM*y%`T zeL6oa^1XOZ=27Qh3=qXKW7-r8wGbHv z+QDj_JY^8y3X$J~zk)(KU$r(Hb{#~8*RaZc>)?y0|)F y5~oU9*{3WOb`Jxn>CFIopCH>@Ku9L|F&IN7$Sz2x=L=ICR{VcFCIkUtZ~y?cC?XdC diff --git a/frontend/javascripts/types/api_flow_types.js b/frontend/javascripts/types/api_flow_types.js index a3c572278a..42ca344c2f 100644 --- a/frontend/javascripts/types/api_flow_types.js +++ b/frontend/javascripts/types/api_flow_types.js @@ -415,10 +415,19 @@ export type MeshMetaData = {| ...RemoteMeshMetaData, |}; +export type AnnotationViewConfiguration = { + layers: { + [layerName: string]: { + visibility: boolean, + }, + }, +}; + type APIAnnotationBase = APIAnnotationCompact & { +dataStore: APIDataStore, +tracingStore: APITracingStore, +restrictions: APIRestrictions, + +viewConfiguration?: ?AnnotationViewConfiguration, +settings: APISettings, +user?: APIUserBase, +meshes: Array, diff --git a/test/db/annotations.csv b/test/db/annotations.csv index 1aa0356dc7..17e27238df 100644 --- a/test/db/annotations.csv +++ b/test/db/annotations.csv @@ -1,11 +1,11 @@ -_id,_dataSet,_task,_team,_user,description,visibility,name,state,statistics,tags,tracingTime,typ,created,modified,isDeleted -'570b9ff12a7c0e980056fe8f','570b9f4e4bb848d0885ee711','581367a82faeb37a008a5352','570b9f4b2a7c0e3b008da6ec','570b9f4d2a7c0e4d008da6ef','','Internal','','Active','{}','{}',,'TracingBase','1970-01-01T00:00:00.000Z','1970-01-01T00:00:00.000Z',f -'570ba0092a7c0e980056fe9b','570b9f4e4bb848d0885ee711',,'570b9f4b2a7c0e3b008da6ec','570b9f4d2a7c0e4d008da6ef','','Internal','','Active','{"treeCount":2,"nodeCount":28967,"edgeCount":28965,"branchPointCount":0}','{}',,'Explorational','1970-01-01T00:00:00.000Z','1970-01-01T00:00:00.000Z',f -'58135c192faeb34c0081c05c','59e9cfbdba632ac2ab8b23b3','58135c192faeb34c0081c058','570b9f4b2a7c0e3b008da6ec','570b9f4d2a7c0e4d008da6ef','','Internal','','Active','{}','{}',,'TracingBase','1970-01-01T00:00:00.000Z','1970-01-01T00:00:00.000Z',f -'58135c402faeb34e0081c068','570b9f4e4bb848d0885ee711','581367a82faeb37a008a5352','570b9f4b2a7c0e3b008da6ec','570b9f4d2a7c0e4d008da6ef','','Internal','','Active','{}','{}',,'Task','1970-01-01T00:00:00.000Z','1970-01-01T00:00:00.000Z',f -'58135c192faeb34c0081c05d','570b9f4e4bb848d0885ee711','581367a82faeb37a008a5354','570b9f4b2a7c0e3b008da6ec','570b9f4d2a7c0e4d008da6ef','','Internal','','Active','{}','{}',,'TracingBase','1970-01-01T00:00:00.000Z','1970-01-01T00:00:00.000Z',f -'68135c192faeb34c0081c05d','570b9f4e4bb848d0885ee711',,'570b9f4b2a7c0e3b008da6ec','570b9f4d2a7c0e4d008da6ef','','Internal','','Active','{"treeCount":2,"nodeCount":28967,"edgeCount":28965,"branchPointCount":0}','{}',,'Explorational','1970-01-01T00:00:00.000Z','1970-01-01T00:00:00.000Z',f -'68135c192faeb34c0081c05e','570b9f4e4bb848d0885ee711','681367a82faeb37a008a5354','570b9f4b2a7c0e3b008da6ec','570b9f4d2a7c0e4d008da6ef','','Internal','','Active','{}','{}',,'TracingBase','1970-01-01T00:00:00.000Z','1970-01-01T00:00:00.000Z',f -'78135c192faeb34c0081c05d','570b9f4e4bb848d0885ee711','681367a82faeb37a008a5354','570b9f4b2a7c0e3b008da6ec','570b9f4d2a7c0e4d008da6ef','','Internal','','Active','{}','{}',,'Task','1970-01-01T00:00:00.000Z','1970-01-01T00:00:00.000Z',f -'78135c192faeb34c0081c05e','570b9f4e4bb848d0885ee711','681367a82faeb37a008a5354','570b9f4b2a7c0e3b008da6ec','570b9f4d2a7c0e4d008da6ef','','Internal','','Active','{}','{}',,'Task','1970-01-01T00:00:00.000Z','1970-01-01T00:00:00.000Z',f -'88135c192faeb34c0081c05d','570b9f4e4bb848d0885ee711',,'570b9f4b2a7c0e3b008da6ec','570b9f4d2a7c0e4d008da6ef','','Public','','Active','{"treeCount":2,"nodeCount":28967,"edgeCount":28965,"branchPointCount":0}','{}',,'Explorational','1970-01-01T00:00:00.000Z','1970-01-01T00:00:00.000Z',f +_id,_dataSet,_task,_team,_user,description,visibility,name,viewConfiguration,state,statistics,tags,tracingTime,typ,created,modified,isDeleted +'570b9ff12a7c0e980056fe8f','570b9f4e4bb848d0885ee711','581367a82faeb37a008a5352','570b9f4b2a7c0e3b008da6ec','570b9f4d2a7c0e4d008da6ef','','Internal','',,'Active','{}','{}',,'TracingBase','1970-01-01T00:00:00.000Z','1970-01-01T00:00:00.000Z',f +'570ba0092a7c0e980056fe9b','570b9f4e4bb848d0885ee711',,'570b9f4b2a7c0e3b008da6ec','570b9f4d2a7c0e4d008da6ef','','Internal','',,'Active','{"treeCount":2,"nodeCount":28967,"edgeCount":28965,"branchPointCount":0}','{}',,'Explorational','1970-01-01T00:00:00.000Z','1970-01-01T00:00:00.000Z',f +'58135c192faeb34c0081c05c','59e9cfbdba632ac2ab8b23b3','58135c192faeb34c0081c058','570b9f4b2a7c0e3b008da6ec','570b9f4d2a7c0e4d008da6ef','','Internal','',,'Active','{}','{}',,'TracingBase','1970-01-01T00:00:00.000Z','1970-01-01T00:00:00.000Z',f +'58135c402faeb34e0081c068','570b9f4e4bb848d0885ee711','581367a82faeb37a008a5352','570b9f4b2a7c0e3b008da6ec','570b9f4d2a7c0e4d008da6ef','','Internal','',,'Active','{}','{}',,'Task','1970-01-01T00:00:00.000Z','1970-01-01T00:00:00.000Z',f +'58135c192faeb34c0081c05d','570b9f4e4bb848d0885ee711','581367a82faeb37a008a5354','570b9f4b2a7c0e3b008da6ec','570b9f4d2a7c0e4d008da6ef','','Internal','',,'Active','{}','{}',,'TracingBase','1970-01-01T00:00:00.000Z','1970-01-01T00:00:00.000Z',f +'68135c192faeb34c0081c05d','570b9f4e4bb848d0885ee711',,'570b9f4b2a7c0e3b008da6ec','570b9f4d2a7c0e4d008da6ef','','Internal','',,'Active','{"treeCount":2,"nodeCount":28967,"edgeCount":28965,"branchPointCount":0}','{}',,'Explorational','1970-01-01T00:00:00.000Z','1970-01-01T00:00:00.000Z',f +'68135c192faeb34c0081c05e','570b9f4e4bb848d0885ee711','681367a82faeb37a008a5354','570b9f4b2a7c0e3b008da6ec','570b9f4d2a7c0e4d008da6ef','','Internal','',,'Active','{}','{}',,'TracingBase','1970-01-01T00:00:00.000Z','1970-01-01T00:00:00.000Z',f +'78135c192faeb34c0081c05d','570b9f4e4bb848d0885ee711','681367a82faeb37a008a5354','570b9f4b2a7c0e3b008da6ec','570b9f4d2a7c0e4d008da6ef','','Internal','',,'Active','{}','{}',,'Task','1970-01-01T00:00:00.000Z','1970-01-01T00:00:00.000Z',f +'78135c192faeb34c0081c05e','570b9f4e4bb848d0885ee711','681367a82faeb37a008a5354','570b9f4b2a7c0e3b008da6ec','570b9f4d2a7c0e4d008da6ef','','Internal','',,'Active','{}','{}',,'Task','1970-01-01T00:00:00.000Z','1970-01-01T00:00:00.000Z',f +'88135c192faeb34c0081c05d','570b9f4e4bb848d0885ee711',,'570b9f4b2a7c0e3b008da6ec','570b9f4d2a7c0e4d008da6ef','','Public','',,'Active','{"treeCount":2,"nodeCount":28967,"edgeCount":28965,"branchPointCount":0}','{}',,'Explorational','1970-01-01T00:00:00.000Z','1970-01-01T00:00:00.000Z',f diff --git a/tools/postgres/schema.sql b/tools/postgres/schema.sql index ec43c53986..68d5509710 100644 --- a/tools/postgres/schema.sql +++ b/tools/postgres/schema.sql @@ -21,7 +21,7 @@ START TRANSACTION; CREATE TABLE webknossos.releaseInformation ( schemaVersion BIGINT NOT NULL ); -INSERT INTO webknossos.releaseInformation(schemaVersion) values(80); +INSERT INTO webknossos.releaseInformation(schemaVersion) values(81); COMMIT TRANSACTION; @@ -37,6 +37,7 @@ CREATE TABLE webknossos.annotations( description TEXT NOT NULL DEFAULT '', visibility webknossos.ANNOTATION_VISIBILITY NOT NULL DEFAULT 'Internal', name VARCHAR(256) NOT NULL DEFAULT '', + viewConfiguration JSONB, state webknossos.ANNOTATION_STATE NOT NULL DEFAULT 'Active', statistics JSONB NOT NULL, tags VARCHAR(256)[] NOT NULL DEFAULT '{}', From f6fe9da94be508baf67506841598b925e70144d0 Mon Sep 17 00:00:00 2001 From: Youri K Date: Mon, 31 Jan 2022 14:23:42 +0100 Subject: [PATCH 3/7] Allow cancelling uploads (#5958) * closable upload modal to cancel running uploads * fix formatting * unreserve names also for failed jobs and deleted datasets * modified frontend to confirm cancel * apply pr feedback in frontend * remove unnecessary import * Update app/controllers/WKRemoteDataStoreController.scala Co-authored-by: Florian M * Update webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/UploadService.scala Co-authored-by: Florian M * Update webknossos-datastore/app/com/scalableminds/webknossos/datastore/helpers/DataSetDeleter.scala Co-authored-by: Florian M * Update webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/DataSourceController.scala Co-authored-by: Florian M * Update app/models/job/Job.scala Co-authored-by: Florian M * Update frontend/javascripts/admin/dataset/dataset_upload_view.js Co-authored-by: Florian M * backend pr feedback * merge save-upload-info * use saved uploadinfo for cancel request too * more backend pr feedback * Update CHANGELOG.unreleased.md * implement backend pr feedback * better name * Update frontend/javascripts/admin/admin_rest_api.js Co-authored-by: Philipp Otto Co-authored-by: Florian M Co-authored-by: Philipp Otto --- CHANGELOG.unreleased.md | 1 + .../WKRemoteDataStoreController.scala | 2 +- .../WKRemoteWorkerController.scala | 1 + app/models/job/Job.scala | 17 ++- conf/messages | 3 + conf/webknossos.latest.routes | 2 +- frontend/javascripts/admin/admin_rest_api.js | 23 +-- .../admin/dataset/dataset_upload_view.js | 56 +++++-- .../dataset/import_delete_component.js | 6 +- frontend/javascripts/messages.js | 1 + .../controllers/DataSourceController.scala | 62 ++++++-- .../datastore/helpers/DataSetDeleter.scala | 38 ++--- .../helpers/DirectoryConstants.scala | 7 + .../services/DSRemoteWebKnossosClient.scala | 4 +- .../services/DataSourceRepository.scala | 2 +- .../services/SampleDataSourceService.scala | 9 +- .../datastore/services/UploadService.scala | 138 ++++++++++++------ ....scalableminds.webknossos.datastore.routes | 1 + 18 files changed, 254 insertions(+), 119 deletions(-) create mode 100644 webknossos-datastore/app/com/scalableminds/webknossos/datastore/helpers/DirectoryConstants.scala diff --git a/CHANGELOG.unreleased.md b/CHANGELOG.unreleased.md index 66b92c4729..e0dc9c1a68 100644 --- a/CHANGELOG.unreleased.md +++ b/CHANGELOG.unreleased.md @@ -12,6 +12,7 @@ For upgrade instructions, please check the [migration guide](MIGRATIONS.released ### Added - Added a button next to the histogram which adapts the contrast and brightness to the currently visible data. [#5961](https://github.com/scalableminds/webknossos/pull/5961) +- Running uploads can now be cancelled. [#5958](https://github.com/scalableminds/webknossos/pull/5958) ### Changed - Upgraded webpack build tool to v5 and all other webpack related dependencies to their latest version. Enabled persistent caching which speeds up server restarts during development as well as production builds. [#5969](https://github.com/scalableminds/webknossos/pull/5969) diff --git a/app/controllers/WKRemoteDataStoreController.scala b/app/controllers/WKRemoteDataStoreController.scala index ef2f0c927b..b15ea33cb7 100644 --- a/app/controllers/WKRemoteDataStoreController.scala +++ b/app/controllers/WKRemoteDataStoreController.scala @@ -145,7 +145,7 @@ class WKRemoteDataStoreController @Inject()( } } - def deleteErroneous(name: String, key: String): Action[JsValue] = Action.async(parse.json) { implicit request => + def deleteDataset(name: String, key: String): Action[JsValue] = Action.async(parse.json) { implicit request => dataStoreService.validateAccess(name, key) { _ => for { datasourceId <- request.body.validate[DataSourceId].asOpt.toFox ?~> "dataStore.upload.invalid" diff --git a/app/controllers/WKRemoteWorkerController.scala b/app/controllers/WKRemoteWorkerController.scala index 9c46e60727..c59247c0b3 100644 --- a/app/controllers/WKRemoteWorkerController.scala +++ b/app/controllers/WKRemoteWorkerController.scala @@ -51,6 +51,7 @@ class WKRemoteWorkerController @Inject()(jobDAO: JobDAO, jobService: JobService, _ <- jobDAO.updateStatus(jobIdParsed, request.body) jobAfterChange <- jobDAO.findOne(jobIdParsed)(GlobalAccessContext) _ = jobService.trackStatusChange(jobBeforeChange, jobAfterChange) + _ <- jobService.cleanUpIfFailed(jobAfterChange) } yield Ok } diff --git a/app/models/job/Job.scala b/app/models/job/Job.scala index a06d4f7ae9..37e79b7f07 100644 --- a/app/models/job/Job.scala +++ b/app/models/job/Job.scala @@ -1,7 +1,6 @@ package models.job import java.sql.Timestamp - import akka.actor.ActorSystem import com.scalableminds.util.accesscontext.{DBAccessContext, GlobalAccessContext} import com.scalableminds.util.geometry.BoundingBox @@ -9,9 +8,10 @@ import com.scalableminds.util.mvc.Formatter import com.scalableminds.util.tools.{Fox, FoxImplicits} import com.scalableminds.webknossos.schema.Tables._ import com.typesafe.scalalogging.LazyLogging + import javax.inject.Inject import models.analytics.{AnalyticsService, FailedJobEvent, RunJobEvent} -import models.binary.DataStoreDAO +import models.binary.{DataSetDAO, DataStoreDAO} import models.job.JobState.JobState import models.organization.OrganizationDAO import models.user.{MultiUserDAO, User, UserDAO} @@ -257,6 +257,7 @@ class JobService @Inject()(wkConf: WkConf, jobDAO: JobDAO, dataStoreDAO: DataStoreDAO, organizationDAO: OrganizationDAO, + dataSetDAO: DataSetDAO, analyticsService: AnalyticsService, slackNotificationService: SlackNotificationService, val lifecycle: ApplicationLifecycle, @@ -309,6 +310,18 @@ class JobService @Inject()(wkConf: WkConf, () } + def cleanUpIfFailed(job: Job): Fox[Unit] = + if (job.state == JobState.FAILURE && job.command == "convert_to_wkw") { + logger.info(s"WKW conversion job ${job._id} failed. Deleting dataset from the database, freeing the name...") + val commandArgs = job.commandArgs.value + for { + datasetName <- commandArgs.get("dataset_name").map(_.as[String]).toFox + organizationName <- commandArgs.get("organization_name").map(_.as[String]).toFox + dataset <- dataSetDAO.findOneByNameAndOrganizationName(datasetName, organizationName)(GlobalAccessContext) + _ <- dataSetDAO.deleteDataset(dataset._id) + } yield () + } else Fox.successful(()) + def publicWrites(job: Job)(implicit ctx: DBAccessContext): Fox[JsObject] = for { owner <- userDAO.findOne(job._owner) ?~> "user.notFound" diff --git a/conf/messages b/conf/messages index 68bb4d3fb1..1e18f301a5 100644 --- a/conf/messages +++ b/conf/messages @@ -129,10 +129,13 @@ dataset.initialTeams.invalidTeams=Can only assign teams of user dataset.initialTeams.timeout=Timeout while setting initial teams. Was the request sent manually? Received at {0}, dataset created at {1} dataSet.uploader.notEmpty=Dataset already has non-empty uploader dataset.delete.disabled=Dataset deletion is disabled for this webKnossos instance +dataSet.delete.webknossos.failed=Could not delete dataset from webKnossos database +dataSet.delete.failed=Could not delete the dataset on disk. dataSet.upload.Datastore.restricted=Your organization is not allowed to upload datasets to this datastore. Please choose another datastore. dataSet.upload.validation.failed=Failed to validate Dataset information for upload. dataSet.upload.linkPublicOnly=Only layers of existing public datasets can be linked dataSet.upload.invalidLinkedLayers=Could not link all requested layers +dataSet.upload.cancel.failed=Could not cancel the upload. dataSource.notFound=Datasource not found on datastore server diff --git a/conf/webknossos.latest.routes b/conf/webknossos.latest.routes index b9cd98ba28..7a7e82c097 100644 --- a/conf/webknossos.latest.routes +++ b/conf/webknossos.latest.routes @@ -87,7 +87,7 @@ PUT /datastores/:name/datasources c PATCH /datastores/:name/status controllers.WKRemoteDataStoreController.statusUpdate(name: String, key: String) POST /datastores/:name/verifyUpload controllers.WKRemoteDataStoreController.validateDataSetUpload(name: String, key: String, token: String) POST /datastores/:name/reportDatasetUpload controllers.WKRemoteDataStoreController.reportDatasetUpload(name: String, key: String, token: String, dataSetName: String, dataSetSizeBytes: Long) -POST /datastores/:name/deleteErroneous controllers.WKRemoteDataStoreController.deleteErroneous(name: String, key: String) +POST /datastores/:name/deleteDataset controllers.WKRemoteDataStoreController.deleteDataset(name: String, key: String) GET /datastores/:name/jobExportProperties controllers.WKRemoteDataStoreController.jobExportProperties(name: String, key: String, jobId: String) POST /datastores/:name/validateUserAccess controllers.UserTokenController.validateAccessViaDatastore(name: String, key: String, token: Option[String]) POST /datastores controllers.DataStoreController.create diff --git a/frontend/javascripts/admin/admin_rest_api.js b/frontend/javascripts/admin/admin_rest_api.js index 5177f7d105..dd557f3602 100644 --- a/frontend/javascripts/admin/admin_rest_api.js +++ b/frontend/javascripts/admin/admin_rest_api.js @@ -1175,11 +1175,7 @@ export function getDatasetAccessList(datasetId: APIDatasetId): Promise { +export function createResumableUpload(datastoreUrl: string, uploadId: string): Promise<*> { const generateUniqueIdentifier = file => { if (file.path == null) { // file.path should be set by react-dropzone (which uses file-selector::toFileWithPath). @@ -1190,16 +1186,11 @@ export function createResumableUpload( return `${uploadId}/${file.path || file.name}`; }; - const additionalParameters = { - ...datasetId, - }; - return doWithToken( token => new ResumableJS({ testChunks: false, target: `${datastoreUrl}/data/datasets?token=${token}`, - query: additionalParameters, chunkSize: 10 * 1024 * 1024, // set chunk size to 10MB permanentErrors: [400, 403, 404, 409, 415, 500, 501], simultaneousUploads: 3, @@ -1239,6 +1230,18 @@ export function finishDatasetUpload(datastoreHost: string, uploadInformation: {} ); } +export function cancelDatasetUpload( + datastoreHost: string, + cancelUploadInformation: { uploadId: string }, +): Promise { + return doWithToken(token => + Request.sendJSONReceiveJSON(`/data/datasets/cancelUpload?token=${token}`, { + data: cancelUploadInformation, + host: datastoreHost, + }), + ); +} + export function addWkConnectDataset( datastoreHost: string, datasetConfig: WkConnectDatasetConfig, diff --git a/frontend/javascripts/admin/dataset/dataset_upload_view.js b/frontend/javascripts/admin/dataset/dataset_upload_view.js index 600ee4f10c..fa94792a6e 100644 --- a/frontend/javascripts/admin/dataset/dataset_upload_view.js +++ b/frontend/javascripts/admin/dataset/dataset_upload_view.js @@ -28,6 +28,7 @@ import type { OxalisState } from "oxalis/store"; import { reserveDatasetUpload, finishDatasetUpload, + cancelDatasetUpload, createResumableUpload, startConvertToWkwJob, sendAnalyticsEvent, @@ -48,7 +49,7 @@ import TeamSelectionComponent from "dashboard/dataset/team_selection_component"; import features from "features"; import { syncValidator } from "types/validation"; import { FormInstance } from "antd/lib/form"; -import { FormItemWithInfo } from "../../dashboard/dataset/helper_components"; +import { FormItemWithInfo, confirmAsync } from "../../dashboard/dataset/helper_components"; const FormItem = Form.Item; @@ -79,6 +80,9 @@ type State = { isRetrying: boolean, uploadProgress: number, selectedTeams: APITeam | Array, + uploadId: string, + resumableUpload: any, + datastoreUrl: string, }; function WkwExample() { @@ -151,6 +155,9 @@ class DatasetUploadView extends React.Component { isRetrying: false, uploadProgress: 0, selectedTeams: [], + uploadId: "", + resumableUpload: {}, + datastoreUrl: "", }; unblock: ?Function; @@ -251,13 +258,12 @@ class DatasetUploadView extends React.Component { initialTeams: formValues.initialTeams.map(team => team.id), }; - await reserveDatasetUpload(formValues.datastoreUrl, reserveUploadInformation); + const datastoreUrl = formValues.datastoreUrl; + await reserveDatasetUpload(datastoreUrl, reserveUploadInformation); - const resumableUpload = await createResumableUpload( - datasetId, - formValues.datastoreUrl, - uploadId, - ); + const resumableUpload = await createResumableUpload(datastoreUrl, uploadId); + + this.setState({ uploadId, resumableUpload, datastoreUrl }); resumableUpload.on("complete", () => { const newestForm = this.formRef.current; @@ -267,15 +273,12 @@ class DatasetUploadView extends React.Component { const uploadInfo = { uploadId, - organization: datasetId.owningOrganization, - name: datasetId.name, - layersToLink: [], needsConversion: this.state.needsConversion, }; this.setState({ isFinishing: true }); - finishDatasetUpload(formValues.datastoreUrl, uploadInfo).then( + finishDatasetUpload(datastoreUrl, uploadInfo).then( async () => { trackAction("Upload dataset"); await Utils.sleep(3000); // wait for 3 seconds so the server can catch up / do its thing @@ -283,7 +286,7 @@ class DatasetUploadView extends React.Component { let maybeError; if (this.state.needsConversion) { try { - const datastore = this.getDatastoreForUrl(formValues.datastoreUrl); + const datastore = this.getDatastoreForUrl(datastoreUrl); if (!datastore) { throw new Error("Selected datastore does not match available datastores"); } @@ -362,6 +365,31 @@ class DatasetUploadView extends React.Component { } }; + cancelUpload = async () => { + const { uploadId, resumableUpload, datastoreUrl } = this.state; + resumableUpload.pause(); + const shouldCancel = await confirmAsync({ + title: + "Cancelling the running upload will delete already uploaded files on the server and cannot be undone. Are you sure you want to cancel the upload?", + okText: "Yes, cancel the upload", + cancelText: "No, keep it running", + }); + if (!shouldCancel) { + resumableUpload.upload(); + return; + } + + resumableUpload.cancel(); + await cancelDatasetUpload(datastoreUrl, { uploadId }); + this.setState({ + isUploading: false, + isFinishing: false, + isRetrying: false, + uploadProgress: 0, + }); + Toast.success(messages["dataset.upload_cancel"]); + }; + getUploadModal = () => { const form = this.formRef.current; if (!form) { @@ -372,12 +400,12 @@ class DatasetUploadView extends React.Component { return (

diff --git a/frontend/javascripts/dashboard/dataset/import_delete_component.js b/frontend/javascripts/dashboard/dataset/import_delete_component.js index 53eb262e35..3e23df452d 100644 --- a/frontend/javascripts/dashboard/dataset/import_delete_component.js +++ b/frontend/javascripts/dashboard/dataset/import_delete_component.js @@ -2,7 +2,6 @@ import { Button } from "antd"; import React, { useState, useEffect, useContext } from "react"; -import * as Utils from "libs/utils"; import type { APIDataset, APIDatasetId } from "types/api_flow_types"; import { getDataset, deleteDatasetOnDisk } from "admin/admin_rest_api"; @@ -53,9 +52,8 @@ const ImportDeleteComponent = ({ datasetId, history }: Props) => { }), ); setIsDeleting(false); - // Trigger dataset check to make sure the dataset list is up-to-date - // but also make sure that the toast can be read - await Promise.all([datasetContext.checkDatasets(), Utils.sleep(2000)]); + // Refresh the dataset list to exclude the deleted dataset + await datasetContext.fetchDatasets(); history.push("/dashboard"); } diff --git a/frontend/javascripts/messages.js b/frontend/javascripts/messages.js index bfc9a18352..a08b30f7ac 100644 --- a/frontend/javascripts/messages.js +++ b/frontend/javascripts/messages.js @@ -247,6 +247,7 @@ instead. Only enable this option if you understand its effect. All layers will n "task.no_tasks_to_download": "There are no tasks available to download.", "dataset.upload_success": "The dataset was uploaded successfully.", "dataset.upload_failed": "The dataset upload failed.", + "dataset.upload_cancel": "The dataset upload was cancelled.", "dataset.unsupported_file_type": "It looks like the selected file is not supported. WebKnossos only supports uploading zipped WKW datasets or image files.", "dataset.upload_invalid_zip": diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/DataSourceController.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/DataSourceController.scala index 85e954efb3..7320f98111 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/DataSourceController.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/DataSourceController.scala @@ -155,13 +155,11 @@ Expects: Action.async(parse.multipartFormData) { implicit request => val uploadForm = Form( tuple( - "name" -> nonEmptyText.verifying("dataSet.name.invalid", n => n.matches("[A-Za-z0-9_\\-]*")), - "owningOrganization" -> nonEmptyText, "resumableChunkNumber" -> number, "resumableChunkSize" -> number, "resumableTotalChunks" -> longNumber, "resumableIdentifier" -> nonEmptyText - )).fill(("", "", -1, -1, -1, "")) + )).fill((-1, -1, -1, "")) accessTokenService.validateAccess(UserAccessRequest.administrateDataSources, Some(token)) { AllowRemoteOrigin { @@ -170,14 +168,12 @@ Expects: .fold( hasErrors = formWithErrors => Fox.successful(JsonBadRequest(formWithErrors.errors.head.message)), success = { - case (name, organization, chunkNumber, chunkSize, totalChunkCount, uploadId) => - val id = DataSourceId(name, organization) + case (chunkNumber, chunkSize, totalChunkCount, uploadId) => for { isKnownUpload <- uploadService.isKnownUploadByFileId(uploadId) _ <- bool2Fox(isKnownUpload) ?~> "dataSet.upload.validation.failed" chunkFile <- request.body.file("file") ?~> "zip.file.notFound" _ <- uploadService.handleUploadChunk(uploadId, - id, chunkSize, totalChunkCount, chunkNumber, @@ -213,7 +209,7 @@ Expects: paramType = "body"))) @ApiResponses( Array( - new ApiResponse(code = 200, message = "Empty body, chunk was saved on the server"), + new ApiResponse(code = 200, message = "Empty body, upload was successfully finished"), new ApiResponse(code = 400, message = "Operation could not be performed. See JSON body for more information.") )) def finishUpload(token: String): Action[UploadInformation] = Action.async(validateJson[UploadInformation]) { @@ -229,6 +225,45 @@ Expects: } + @ApiOperation( + value = """Cancel a running dataset upload +Expects: + - As JSON object body with keys: + - uploadId (string): upload id that was also used in chunk upload (this time without file paths) + - As GET parameter: + - token (string): datastore token identifying the uploading user +""", + nickname = "datasetCancelUpload" + ) + @ApiImplicitParams( + Array( + new ApiImplicitParam(name = "cancelUploadInformation", + required = true, + dataTypeClass = classOf[CancelUploadInformation], + paramType = "body"))) + @ApiResponses( + Array( + new ApiResponse(code = 200, message = "Empty body, upload was cancelled"), + new ApiResponse(code = 400, message = "Operation could not be performed. See JSON body for more information.") + )) + def cancelUpload(token: String): Action[CancelUploadInformation] = + Action.async(validateJson[CancelUploadInformation]) { implicit request => + val dataSourceIdFox = uploadService.isKnownUpload(request.body.uploadId).flatMap { + case false => Fox.failure("dataSet.upload.validation.failed") + case true => uploadService.getDataSourceIdByUploadId(request.body.uploadId) + } + dataSourceIdFox.flatMap { dataSourceId => + accessTokenService.validateAccess(UserAccessRequest.deleteDataSource(dataSourceId), Some(token)) { + AllowRemoteOrigin { + for { + _ <- remoteWebKnossosClient.deleteDataSource(dataSourceId) ?~> "dataSet.delete.webknossos.failed" + _ <- uploadService.cancelUpload(request.body) ?~> "Could not cancel the upload." + } yield Ok + } + } + } + } + @ApiOperation(hidden = true, value = "") def fetchSampleDataSource(token: Option[String], organizationName: String, dataSetName: String): Action[AnyContent] = Action.async { implicit request => @@ -463,14 +498,15 @@ Expects: @ApiOperation(hidden = true, value = "") def deleteOnDisk(token: Option[String], organizationName: String, dataSetName: String): Action[AnyContent] = Action.async { implicit request => - accessTokenService.validateAccess(UserAccessRequest.deleteDataSource(DataSourceId(dataSetName, organizationName)), - token) { + val dataSourceId = DataSourceId(dataSetName, organizationName) + accessTokenService.validateAccess(UserAccessRequest.deleteDataSource(dataSourceId), token) { AllowRemoteOrigin { for { - _ <- binaryDataServiceHolder.binaryDataService.deleteOnDisk(organizationName, - dataSetName, - reason = - Some("the user wants to delete the dataset")) + _ <- binaryDataServiceHolder.binaryDataService.deleteOnDisk( + organizationName, + dataSetName, + reason = Some("the user wants to delete the dataset")) ?~> "dataSet.delete.failed" + _ <- dataSourceRepository.cleanUpDataSource(dataSourceId) // also frees the name in the wk-side database } yield Ok } } diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/helpers/DataSetDeleter.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/helpers/DataSetDeleter.scala index 77142e93d0..4da04b0ae1 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/helpers/DataSetDeleter.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/helpers/DataSetDeleter.scala @@ -5,38 +5,42 @@ import net.liftweb.common.Full import java.io.File import java.nio.file.{Files, Path} +import scala.annotation.tailrec import scala.concurrent.ExecutionContext -trait DataSetDeleter extends LazyLogging { +trait DataSetDeleter extends LazyLogging with DirectoryConstants { def dataBaseDir: Path def deleteOnDisk(organizationName: String, dataSetName: String, isInConversion: Boolean = false, reason: Option[String] = None)(implicit ec: ExecutionContext): Fox[Unit] = { + @tailrec + def deleteWithRetry(sourcePath: Path, targetPath: Path, retryCount: Int = 0): Fox[Unit] = + try { + val deduplicatedTargetPath = + if (retryCount == 0) targetPath else targetPath.resolveSibling(targetPath.getFileName + s"($retryCount)") + val path = Files.move(sourcePath, deduplicatedTargetPath) + if (path == null) { + throw new Exception("Deleting dataset failed") + } + logger.info(s"Successfully moved dataset from $sourcePath to $targetPath...") + Fox.successful(()) + } catch { + case _: java.nio.file.FileAlreadyExistsException => deleteWithRetry(sourcePath, targetPath, retryCount + 1) + case e: Exception => Fox.failure(s"Deleting dataset failed: ${e.toString}", Full(e)) + } + val dataSourcePath = - if (isInConversion) dataBaseDir.resolve(organizationName).resolve(".forConversion").resolve(dataSetName) + if (isInConversion) dataBaseDir.resolve(organizationName).resolve(forConversionDir).resolve(dataSetName) else dataBaseDir.resolve(organizationName).resolve(dataSetName) - val trashPath: Path = dataBaseDir.resolve(organizationName).resolve(".trash") + val trashPath: Path = dataBaseDir.resolve(organizationName).resolve(trashDir) val targetPath = trashPath.resolve(dataSetName) new File(trashPath.toString).mkdirs() logger.info( s"Deleting dataset by moving it from $dataSourcePath to $targetPath${if (reason.isDefined) s" because ${reason.getOrElse("")}" else "..."}") - - try { - val path = Files.move( - dataSourcePath, - targetPath - ) - if (path == null) { - throw new Exception("Deleting dataset failed") - } - logger.info(s"Successfully moved dataset from $dataSourcePath to $targetPath...") - Fox.successful(()) - } catch { - case e: Exception => Fox.failure(s"Deleting dataset failed: ${e.toString}", Full(e)) - } + deleteWithRetry(dataSourcePath, targetPath) } } diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/helpers/DirectoryConstants.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/helpers/DirectoryConstants.scala new file mode 100644 index 0000000000..bacbabe09d --- /dev/null +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/helpers/DirectoryConstants.scala @@ -0,0 +1,7 @@ +package com.scalableminds.webknossos.datastore.helpers + +trait DirectoryConstants { + val forConversionDir = ".forConversion" + val trashDir = ".trash" + val uploadingDir: String = ".uploading" +} diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/DSRemoteWebKnossosClient.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/DSRemoteWebKnossosClient.scala index 2330524004..6ef33a3193 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/DSRemoteWebKnossosClient.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/DSRemoteWebKnossosClient.scala @@ -89,8 +89,8 @@ class DSRemoteWebKnossosClient @Inject()( .post(info) } yield () - def deleteErroneousDataSource(id: DataSourceId): Fox[_] = - rpc(s"$webKnossosUri/api/datastores/$dataStoreName/deleteErroneous").addQueryString("key" -> dataStoreKey).post(id) + def deleteDataSource(id: DataSourceId): Fox[_] = + rpc(s"$webKnossosUri/api/datastores/$dataStoreName/deleteDataset").addQueryString("key" -> dataStoreKey).post(id) def getJobExportProperties(jobId: String): Fox[JobExportProperties] = rpc(s"$webKnossosUri/api/datastores/$dataStoreName/jobExportProperties") diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/DataSourceRepository.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/DataSourceRepository.scala index 7264f60c52..e249883c2b 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/DataSourceRepository.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/DataSourceRepository.scala @@ -39,6 +39,6 @@ class DataSourceRepository @Inject()( def cleanUpDataSource(dataSourceId: DataSourceId): Fox[Unit] = for { _ <- Fox.successful(remove(dataSourceId)) - _ <- remoteWebKnossosClient.deleteErroneousDataSource(dataSourceId) + _ <- remoteWebKnossosClient.deleteDataSource(dataSourceId) } yield () } diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/SampleDataSourceService.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/SampleDataSourceService.scala index 3daa06131f..664a29657d 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/SampleDataSourceService.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/SampleDataSourceService.scala @@ -76,12 +76,9 @@ class SampleDataSourceService @Inject()(rpc: RPC, tmpfile.write(bytes.toArray) tmpfile.close() - uploadService - .finishUpload(UploadInformation(downloadId, id.name, id.team, None, needsConversion = None), - checkCompletion = false) - .map { _ => - runningDownloads.remove(id) - } + uploadService.finishUpload(UploadInformation(downloadId, None), checkCompletion = false).map { _ => + runningDownloads.remove(id) + } case _ => runningDownloads.remove(id) } } yield () diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/UploadService.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/UploadService.scala index bf8ff845fb..97b5ede013 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/UploadService.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/UploadService.scala @@ -2,20 +2,19 @@ package com.scalableminds.webknossos.datastore.services import java.io.{File, RandomAccessFile} import java.nio.file.{Files, Path} - import com.google.inject.Inject import com.scalableminds.util.io.PathUtils.ensureDirectoryBox import com.scalableminds.util.io.{PathUtils, ZipIO} import com.scalableminds.util.tools.{Fox, FoxImplicits} import com.scalableminds.webknossos.datastore.dataformats.wkw.{WKWDataLayer, WKWSegmentationLayer} -import com.scalableminds.webknossos.datastore.helpers.DataSetDeleter +import com.scalableminds.webknossos.datastore.helpers.{DataSetDeleter, DirectoryConstants} import com.scalableminds.webknossos.datastore.models.datasource._ import com.scalableminds.webknossos.datastore.storage.DataStoreRedisStore import com.typesafe.scalalogging.LazyLogging import net.liftweb.common._ import net.liftweb.util.Helpers.tryo import org.apache.commons.io.FileUtils -import play.api.libs.json.{Json, OFormat} +import play.api.libs.json.{Json, OFormat, Reads} import scala.concurrent.ExecutionContext.Implicits.global @@ -40,29 +39,37 @@ object LinkedLayerIdentifier { implicit val jsonFormat: OFormat[LinkedLayerIdentifier] = Json.format[LinkedLayerIdentifier] } -case class UploadInformation(uploadId: String, - name: String, - organization: String, - layersToLink: Option[List[LinkedLayerIdentifier]], - needsConversion: Option[Boolean]) +case class LinkedLayerIdentifiers(layersToLink: Option[List[LinkedLayerIdentifier]]) +object LinkedLayerIdentifiers { + implicit val jsonFormat: OFormat[LinkedLayerIdentifiers] = Json.format[LinkedLayerIdentifiers] +} + +case class UploadInformation(uploadId: String, needsConversion: Option[Boolean]) object UploadInformation { implicit val jsonFormat: OFormat[UploadInformation] = Json.format[UploadInformation] } +case class CancelUploadInformation(uploadId: String) +object CancelUploadInformation { + implicit val jsonFormat: OFormat[CancelUploadInformation] = Json.format[CancelUploadInformation] +} + class UploadService @Inject()(dataSourceRepository: DataSourceRepository, dataSourceService: DataSourceService, runningUploadMetadataStore: DataStoreRedisStore) extends LazyLogging with DataSetDeleter + with DirectoryConstants with FoxImplicits { val dataBaseDir: Path = dataSourceService.dataBaseDir - private val uploadingDir: String = ".uploading" /* Redis stores different information for each upload, with different prefixes in the keys: * uploadId -> fileCount - * uploadId -> set(fileName), + * uploadId -> set(fileName) + * uploadId -> dataSourceId + * uploadId -> linkedLayerIdentifier * uploadId#fileName -> totalChunkCount * uploadId#fileName -> set(chunkIndices) * Note that Redis synchronizes all db accesses, so we do not need to do it @@ -71,6 +78,10 @@ class UploadService @Inject()(dataSourceRepository: DataSourceRepository, s"upload___${uploadId}___fileCount" private def redisKeyForFileNameSet(uploadId: String): String = s"upload___${uploadId}___fileNameSet" + private def redisKeyForDataSourceId(uploadId: String): String = + s"upload___${uploadId}___dataSourceId" + private def redisKeyForLinkedLayerIdentifier(uploadId: String): String = + s"upload___${uploadId}___linkedLayerIdentifier" private def redisKeyForFileChunkCount(uploadId: String, fileName: String): String = s"upload___${uploadId}___file___${fileName}___chunkCount" private def redisKeyForFileChunkSet(uploadId: String, fileName: String): String = @@ -88,10 +99,21 @@ class UploadService @Inject()(dataSourceRepository: DataSourceRepository, def uploadDirectory(organizationName: String, uploadId: String): Path = dataBaseDir.resolve(organizationName).resolve(uploadingDir).resolve(uploadId) + def getDataSourceIdByUploadId(uploadId: String): Fox[DataSourceId] = + getObjectFromRedis[DataSourceId](redisKeyForDataSourceId(uploadId)) + def reserveUpload(reserveUploadInformation: ReserveUploadInformation): Fox[Unit] = for { _ <- runningUploadMetadataStore.insert(redisKeyForFileCount(reserveUploadInformation.uploadId), String.valueOf(reserveUploadInformation.totalFileCount)) + _ <- runningUploadMetadataStore.insert( + redisKeyForDataSourceId(reserveUploadInformation.uploadId), + Json.stringify(Json.toJson(DataSourceId(reserveUploadInformation.name, reserveUploadInformation.organization))) + ) + _ <- runningUploadMetadataStore.insert( + redisKeyForLinkedLayerIdentifier(reserveUploadInformation.uploadId), + Json.stringify(Json.toJson(LinkedLayerIdentifiers(reserveUploadInformation.layersToLink))) + ) _ = logger.info( f"Reserving dataset upload of ${reserveUploadInformation.organization}/${reserveUploadInformation.name} with id ${reserveUploadInformation.uploadId}...") } yield () @@ -100,35 +122,29 @@ class UploadService @Inject()(dataSourceRepository: DataSourceRepository, uploadDir.relativize(uploadDir.resolve(filePath)).startsWith("../") def handleUploadChunk(uploadFileId: String, - datasourceId: DataSourceId, chunkSize: Long, totalChunkCount: Long, currentChunkNumber: Long, chunkFile: File): Fox[Unit] = { val uploadId = extractDatasetUploadId(uploadFileId) - val uploadDir = uploadDirectory(datasourceId.team, uploadId) - val filePathRaw = uploadFileId.split("/").tail.mkString("/") - val filePath = if (filePathRaw.charAt(0) == '/') filePathRaw.drop(1) else filePathRaw - - if (isOutsideUploadDir(uploadDir, filePath)) return Fox.failure(s"Invalid file path: $filePath") - - val isNewChunk = for { + for { + dataSourceId <- getDataSourceIdByUploadId(uploadId) + uploadDir = uploadDirectory(dataSourceId.team, uploadId) + filePathRaw = uploadFileId.split("/").tail.mkString("/") + filePath = if (filePathRaw.charAt(0) == '/') filePathRaw.drop(1) else filePathRaw + _ <- bool2Fox(!isOutsideUploadDir(uploadDir, filePath)) ?~> s"Invalid file path: $filePath" isFileKnown <- runningUploadMetadataStore.contains(redisKeyForFileChunkCount(uploadId, filePath)) - _ <- if (isFileKnown) Fox.successful(()) - else { + _ <- Fox.runIf(!isFileKnown) { runningUploadMetadataStore .insertIntoSet(redisKeyForFileNameSet(uploadId), filePath) - .flatMap( - _ => - runningUploadMetadataStore.insert(redisKeyForFileChunkCount(uploadId, filePath), - String.valueOf(totalChunkCount))) + .flatMap(_ => + runningUploadMetadataStore.insert(redisKeyForFileChunkCount(uploadId, filePath), + String.valueOf(totalChunkCount))) } isNewChunk <- runningUploadMetadataStore.insertIntoSet(redisKeyForFileChunkSet(uploadId, filePath), String.valueOf(currentChunkNumber)) - } yield isNewChunk - - isNewChunk map { - case true => + } yield + if (isNewChunk) { try { val bytes = Files.readAllBytes(chunkFile.toPath) this.synchronized { @@ -138,29 +154,43 @@ class UploadService @Inject()(dataSourceRepository: DataSourceRepository, tempFile.write(bytes) tempFile.close() } + Fox.successful(()) } catch { case e: Exception => runningUploadMetadataStore.removeFromSet(redisKeyForFileChunkSet(uploadId, filePath), String.valueOf(currentChunkNumber)) - val errorMsg = s"Error receiving chunk $currentChunkNumber for upload ${datasourceId.name}: ${e.getMessage}" + val errorMsg = s"Error receiving chunk $currentChunkNumber for upload ${dataSourceId.name}: ${e.getMessage}" logger.warn(errorMsg) - return Fox.failure(errorMsg) + Fox.failure(errorMsg) } - case false => () - } + } else Fox.successful(()) + } + + def cancelUpload(cancelUploadInformation: CancelUploadInformation): Fox[Unit] = { + val uploadId = cancelUploadInformation.uploadId + for { + dataSourceId <- getDataSourceIdByUploadId(uploadId) + knownUpload <- isKnownUpload(uploadId) + } yield + if (knownUpload) { + logger.info(f"Cancelling dataset upload of ${dataSourceId.team}/${dataSourceId.name} with id ${uploadId}...") + removeFromRedis(uploadId).flatMap(_ => + PathUtils.deleteDirectoryRecursively(uploadDirectory(dataSourceId.team, uploadId))) + } else { + Fox.failure(s"Unknown upload") + } } def finishUpload(uploadInformation: UploadInformation, checkCompletion: Boolean = true): Fox[(DataSourceId, Long)] = { val uploadId = uploadInformation.uploadId - val dataSourceId = DataSourceId(uploadInformation.name, uploadInformation.organization) - val datasetNeedsConversion = uploadInformation.needsConversion.getOrElse(false) - val uploadDir = uploadDirectory(uploadInformation.organization, uploadId) - val unpackToDir = dataSourceDirFor(dataSourceId, datasetNeedsConversion) - - logger.info( - s"Finishing dataset upload of ${uploadInformation.organization}/${uploadInformation.name} with id $uploadId...") for { + dataSourceId <- getDataSourceIdByUploadId(uploadId) + datasetNeedsConversion = uploadInformation.needsConversion.getOrElse(false) + uploadDir = uploadDirectory(dataSourceId.team, uploadId) + unpackToDir = dataSourceDirFor(dataSourceId, datasetNeedsConversion) + + _ = logger.info(s"Finishing dataset upload of ${dataSourceId.team}/${dataSourceId.name} with id $uploadId...") _ <- Fox.runIf(checkCompletion)(ensureAllChunksUploaded(uploadId)) _ <- ensureDirectoryBox(unpackToDir.getParent) ?~> "dataSet.import.fileAccessDenied" unpackResult <- unpackDataset(uploadDir, unpackToDir).futureBox @@ -169,12 +199,16 @@ class UploadService @Inject()(dataSourceRepository: DataSourceRepository, dataSourceId, datasetNeedsConversion, label = s"unpacking to dataset to $unpackToDir") - postProcessingResult <- postProcessUploadedDataSource(datasetNeedsConversion, unpackToDir, uploadInformation).futureBox + linkedLayerInfo <- getObjectFromRedis[LinkedLayerIdentifiers](redisKeyForLinkedLayerIdentifier(uploadId)) + postProcessingResult <- postProcessUploadedDataSource(datasetNeedsConversion, + unpackToDir, + dataSourceId, + linkedLayerInfo.layersToLink).futureBox _ <- cleanUpOnFailure(postProcessingResult, dataSourceId, datasetNeedsConversion, label = s"processing to dataset at $unpackToDir") - dataSource = dataSourceService.dataSourceFromFolder(unpackToDir, uploadInformation.organization) + dataSource = dataSourceService.dataSourceFromFolder(unpackToDir, dataSourceId.team) _ <- dataSourceRepository.updateDataSource(dataSource) dataSetSizeBytes <- tryo(FileUtils.sizeOfDirectoryAsBigInteger(new File(unpackToDir.toString)).longValue) } yield (dataSourceId, dataSetSizeBytes) @@ -182,16 +216,15 @@ class UploadService @Inject()(dataSourceRepository: DataSourceRepository, private def postProcessUploadedDataSource(datasetNeedsConversion: Boolean, unpackToDir: Path, - uploadInformation: UploadInformation) = + dataSourceId: DataSourceId, + layersToLink: Option[List[LinkedLayerIdentifier]]) = if (datasetNeedsConversion) Fox.successful(()) else { for { _ <- tryo(addLayerAndResolutionDirIfMissing(unpackToDir)).toFox - _ <- addSymlinksToOtherDatasetLayers(unpackToDir, uploadInformation.layersToLink.getOrElse(List.empty)) - _ <- addLinkedLayersToDataSourceProperties(unpackToDir, - uploadInformation.organization, - uploadInformation.layersToLink.getOrElse(List.empty)) + _ <- addSymlinksToOtherDatasetLayers(unpackToDir, layersToLink.getOrElse(List.empty)) + _ <- addLinkedLayersToDataSourceProperties(unpackToDir, dataSourceId.team, layersToLink.getOrElse(List.empty)) } yield () } @@ -233,7 +266,7 @@ class UploadService @Inject()(dataSourceRepository: DataSourceRepository, private def dataSourceDirFor(dataSourceId: DataSourceId, datasetNeedsConversion: Boolean): Path = { val dataSourceDir = if (datasetNeedsConversion) - dataBaseDir.resolve(dataSourceId.team).resolve(".forConversion").resolve(dataSourceId.name) + dataBaseDir.resolve(dataSourceId.team).resolve(forConversionDir).resolve(dataSourceId.name) else dataBaseDir.resolve(dataSourceId.team).resolve(dataSourceId.name) dataSourceDir @@ -353,6 +386,10 @@ class UploadService @Inject()(dataSourceRepository: DataSourceRepository, this.synchronized { PathUtils.deleteDirectoryRecursively(uploadDir) } + removeFromRedis(uploadId) + } + + private def removeFromRedis(uploadId: String): Fox[Unit] = for { fileNames <- runningUploadMetadataStore.findSet(redisKeyForFileNameSet(uploadId)) _ <- Fox.serialCombined(fileNames.toList) { fileName => @@ -363,7 +400,6 @@ class UploadService @Inject()(dataSourceRepository: DataSourceRepository, _ <- runningUploadMetadataStore.remove(redisKeyForFileCount(uploadId)) _ <- runningUploadMetadataStore.remove(redisKeyForFileNameSet(uploadId)) } yield () - } private def cleanUpOrphanUploads(): Fox[Unit] = for { @@ -391,4 +427,10 @@ class UploadService @Inject()(dataSourceRepository: DataSourceRepository, } yield () } + private def getObjectFromRedis[T: Reads](key: String): Fox[T] = + for { + objectStringOption <- runningUploadMetadataStore.find(key) + obj <- objectStringOption.toFox.flatMap(o => Json.fromJson[T](Json.parse(o)).asOpt) + } yield obj + } diff --git a/webknossos-datastore/conf/com.scalableminds.webknossos.datastore.routes b/webknossos-datastore/conf/com.scalableminds.webknossos.datastore.routes index 2a4b268930..2c1ec10503 100644 --- a/webknossos-datastore/conf/com.scalableminds.webknossos.datastore.routes +++ b/webknossos-datastore/conf/com.scalableminds.webknossos.datastore.routes @@ -41,6 +41,7 @@ GET /datasets POST /datasets @com.scalableminds.webknossos.datastore.controllers.DataSourceController.uploadChunk(token: String) POST /datasets/reserveUpload @com.scalableminds.webknossos.datastore.controllers.DataSourceController.reserveUpload(token: String) POST /datasets/finishUpload @com.scalableminds.webknossos.datastore.controllers.DataSourceController.finishUpload(token: String) +POST /datasets/cancelUpload @com.scalableminds.webknossos.datastore.controllers.DataSourceController.cancelUpload(token: String) GET /datasets/:organizationName/:dataSetName/readInboxDataSourceLike @com.scalableminds.webknossos.datastore.controllers.DataSourceController.read(token: Option[String], organizationName: String, dataSetName: String, returnFormatLike: Boolean ?= true) GET /datasets/:organizationName/:dataSetName/readInboxDataSource @com.scalableminds.webknossos.datastore.controllers.DataSourceController.read(token: Option[String], organizationName: String, dataSetName: String, returnFormatLike: Boolean ?= false) GET /datasets/sample/:organizationName @com.scalableminds.webknossos.datastore.controllers.DataSourceController.listSampleDataSources(token: Option[String], organizationName: String) From e498fd7cf8322b633dbf6a3b5679922976e86b51 Mon Sep 17 00:00:00 2001 From: Philipp Otto Date: Mon, 31 Jan 2022 15:02:54 +0100 Subject: [PATCH 4/7] Increase maximum bucket count limit for segmentation layers (#6000) * double maximum bucket count limit for segmentation layers * also send start time of session to airbrake * update changelog --- CHANGELOG.unreleased.md | 1 + frontend/javascripts/libs/error_handling.js | 6 +++++- .../oxalis/model/bucket_data_handling/data_cube.js | 3 +++ 3 files changed, 9 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.unreleased.md b/CHANGELOG.unreleased.md index e0dc9c1a68..2c2f0c67c8 100644 --- a/CHANGELOG.unreleased.md +++ b/CHANGELOG.unreleased.md @@ -16,6 +16,7 @@ For upgrade instructions, please check the [migration guide](MIGRATIONS.released ### Changed - Upgraded webpack build tool to v5 and all other webpack related dependencies to their latest version. Enabled persistent caching which speeds up server restarts during development as well as production builds. [#5969](https://github.com/scalableminds/webknossos/pull/5969) +- Improved stability when quickly volume-annotating large structures. [#6000](https://github.com/scalableminds/webknossos/pull/6000) - The front-end API `labelVoxels` returns a promise now which fulfills as soon as the label operation was carried out. [#5955](https://github.com/scalableminds/webknossos/pull/5955) - When changing which layers are visible in an annotation, this setting is persisted in the annotation, so when you share it, viewers will see the same visibility configuration. [#5967](https://github.com/scalableminds/webknossos/pull/5967) diff --git a/frontend/javascripts/libs/error_handling.js b/frontend/javascripts/libs/error_handling.js index 7165ee1894..98149eacd6 100644 --- a/frontend/javascripts/libs/error_handling.js +++ b/frontend/javascripts/libs/error_handling.js @@ -63,6 +63,7 @@ class ErrorHandling { commitHash: ?string; airbrake: typeof Notifier; numberOfErrors: number = 0; + sessionStartTime: Date = new Date(); initialize(options: ErrorHandlingOptions) { if (options == null) { @@ -166,7 +167,10 @@ class ErrorHandling { const actionLog = getActionLog(); const error = maybeError instanceof Error ? maybeError : new Error(JSON.stringify(maybeError)); - this.airbrake.notify({ error, params: { ...optParams, actionLog } }); + this.airbrake.notify({ + error, + params: { ...optParams, actionLog, sessionStartTime: this.sessionStartTime }, + }); } assertExtendContext(additionalContext: Object) { diff --git a/frontend/javascripts/oxalis/model/bucket_data_handling/data_cube.js b/frontend/javascripts/oxalis/model/bucket_data_handling/data_cube.js index 884d6077c6..004bb9cc13 100644 --- a/frontend/javascripts/oxalis/model/bucket_data_handling/data_cube.js +++ b/frontend/javascripts/oxalis/model/bucket_data_handling/data_cube.js @@ -111,6 +111,9 @@ class DataCube { _.extend(this, BackboneEvents); this.cubes = []; + if (isSegmentation) { + this.MAXIMUM_BUCKET_COUNT *= 2; + } this.buckets = new Array(this.MAXIMUM_BUCKET_COUNT); // Initializing the cube-arrays with boundaries From 573b1a20165d9d67f47566ef9ee806cc2fccd1ad Mon Sep 17 00:00:00 2001 From: Florian M Date: Mon, 31 Jan 2022 16:46:26 +0100 Subject: [PATCH 5/7] Allow Download of Public Annotations While Logged Out (#6001) --- CHANGELOG.unreleased.md | 1 + app/controllers/AnnotationIOController.scala | 24 ++++++++++++-------- 2 files changed, 16 insertions(+), 9 deletions(-) diff --git a/CHANGELOG.unreleased.md b/CHANGELOG.unreleased.md index 2c2f0c67c8..efe90dd12d 100644 --- a/CHANGELOG.unreleased.md +++ b/CHANGELOG.unreleased.md @@ -19,6 +19,7 @@ For upgrade instructions, please check the [migration guide](MIGRATIONS.released - Improved stability when quickly volume-annotating large structures. [#6000](https://github.com/scalableminds/webknossos/pull/6000) - The front-end API `labelVoxels` returns a promise now which fulfills as soon as the label operation was carried out. [#5955](https://github.com/scalableminds/webknossos/pull/5955) - When changing which layers are visible in an annotation, this setting is persisted in the annotation, so when you share it, viewers will see the same visibility configuration. [#5967](https://github.com/scalableminds/webknossos/pull/5967) +- Downloading public annotations is now also allowed without being authenticated. [#6001](https://github.com/scalableminds/webknossos/pull/6001) ### Fixed - Fixed volume-related bugs which could corrupt the volume data in certain scenarios. [#5955](https://github.com/scalableminds/webknossos/pull/5955) diff --git a/app/controllers/AnnotationIOController.scala b/app/controllers/AnnotationIOController.scala index 5049aec296..333e08c163 100755 --- a/app/controllers/AnnotationIOController.scala +++ b/app/controllers/AnnotationIOController.scala @@ -238,11 +238,11 @@ Expects: skeletonVersion: Option[Long], volumeVersion: Option[Long], skipVolumeData: Option[Boolean]): Action[AnyContent] = - sil.SecuredAction.async { implicit request => + sil.UserAwareAction.async { implicit request => logger.trace(s"Requested download for annotation: $typ/$id") for { identifier <- AnnotationIdentifier.parse(typ, id) - _ = analyticsService.track(DownloadAnnotationEvent(request.identity, id, typ)) + _ = request.identity.foreach(user => analyticsService.track(DownloadAnnotationEvent(user, id, typ))) result <- identifier.annotationType match { case AnnotationType.View => Fox.failure("Cannot download View annotation") case AnnotationType.CompoundProject => downloadProject(id, request.identity, skipVolumeData.getOrElse(false)) @@ -263,7 +263,7 @@ Expects: // TODO: select versions per layer private def downloadExplorational(annotationId: String, typ: String, - issuingUser: User, + issuingUser: Option[User], skeletonVersion: Option[Long], volumeVersion: Option[Long], skipVolumeData: Boolean)(implicit ctx: DBAccessContext) = { @@ -347,9 +347,11 @@ Expects: } } - private def downloadProject(projectId: String, user: User, skipVolumeData: Boolean)(implicit ctx: DBAccessContext, - m: MessagesProvider) = + private def downloadProject(projectId: String, userOpt: Option[User], skipVolumeData: Boolean)( + implicit ctx: DBAccessContext, + m: MessagesProvider) = for { + user <- userOpt.toFox ?~> Messages("notAllowed") ~> FORBIDDEN projectIdValidated <- ObjectId.parse(projectId) project <- projectDAO.findOne(projectIdValidated) ?~> Messages("project.notFound", projectId) ~> NOT_FOUND _ <- Fox.assertTrue(userService.isTeamManagerOrAdminOf(user, project._team)) ?~> "notAllowed" ~> FORBIDDEN @@ -360,8 +362,9 @@ Expects: Ok.sendFile(file, inline = false, fileName = _ => Some(TextUtils.normalize(project.name + "_nmls.zip"))) } - private def downloadTask(taskId: String, user: User, skipVolumeData: Boolean)(implicit ctx: DBAccessContext, - m: MessagesProvider) = { + private def downloadTask(taskId: String, userOpt: Option[User], skipVolumeData: Boolean)( + implicit ctx: DBAccessContext, + m: MessagesProvider) = { def createTaskZip(task: Task): Fox[TemporaryFile] = annotationService.annotationsFor(task._id).flatMap { annotations => val finished = annotations.filter(_.state == Finished) @@ -369,6 +372,7 @@ Expects: } for { + user <- userOpt.toFox ?~> Messages("notAllowed") ~> FORBIDDEN task <- taskDAO.findOne(ObjectId(taskId)).toFox ?~> Messages("task.notFound") ~> NOT_FOUND project <- projectDAO.findOne(task._project) ?~> Messages("project.notFound") ~> NOT_FOUND _ <- Fox.assertTrue(userService.isTeamManagerOrAdminOf(user, project._team)) ?~> Messages("notAllowed") ~> FORBIDDEN @@ -379,8 +383,9 @@ Expects: } } - private def downloadTaskType(taskTypeId: String, user: User, skipVolumeData: Boolean)( - implicit ctx: DBAccessContext) = { + private def downloadTaskType(taskTypeId: String, userOpt: Option[User], skipVolumeData: Boolean)( + implicit ctx: DBAccessContext, + m: MessagesProvider) = { def createTaskTypeZip(taskType: TaskType) = for { tasks <- taskDAO.findAllByTaskType(taskType._id) @@ -393,6 +398,7 @@ Expects: } yield zip for { + user <- userOpt.toFox ?~> Messages("notAllowed") ~> FORBIDDEN taskTypeIdValidated <- ObjectId.parse(taskTypeId) ?~> "taskType.id.invalid" taskType <- taskTypeDAO.findOne(taskTypeIdValidated) ?~> "taskType.notFound" ~> NOT_FOUND _ <- Fox.assertTrue(userService.isTeamManagerOrAdminOf(user, taskType._team)) ?~> "notAllowed" ~> FORBIDDEN From ccd10db8796046573a052802b8ddedbfe73035cc Mon Sep 17 00:00:00 2001 From: Jonathan Striebel Date: Tue, 1 Feb 2022 12:36:17 +0100 Subject: [PATCH 6/7] ship expanded testdata (#6004) * add test/db to docker image, add more timespans to testdata * add yarn pretty-backend to README * update e2e snapshots --- README.md | 3 ++ .../backend-snapshot-tests/users.e2e.js.md | 39 +++++++++++++++++- .../backend-snapshot-tests/users.e2e.js.snap | Bin 2193 -> 2404 bytes project/AssetCompilation.scala | 24 ++++++----- test/db/timeSpans.csv | 10 +++++ 5 files changed, 64 insertions(+), 12 deletions(-) diff --git a/README.md b/README.md index ddab0bb3cc..e9c32b680a 100644 --- a/README.md +++ b/README.md @@ -179,6 +179,9 @@ yarn run lint # Format frontend code yarn run pretty +# Format backend code +yarn pretty-backend + # Frontend type checking yarn flow diff --git a/frontend/javascripts/test/snapshots/public/test-bundle/test/backend-snapshot-tests/users.e2e.js.md b/frontend/javascripts/test/snapshots/public/test-bundle/test/backend-snapshot-tests/users.e2e.js.md index 87d2bb9b52..befdb1ae2b 100644 --- a/frontend/javascripts/test/snapshots/public/test-bundle/test/backend-snapshot-tests/users.e2e.js.md +++ b/frontend/javascripts/test/snapshots/public/test-bundle/test/backend-snapshot-tests/users.e2e.js.md @@ -403,6 +403,34 @@ Generated by [AVA](https://avajs.dev). ## users-loggedTimes [ + { + durationInSeconds: 265, + paymentInterval: { + month: 8, + year: 2018, + }, + }, + { + durationInSeconds: 158, + paymentInterval: { + month: 5, + year: 2021, + }, + }, + { + durationInSeconds: 12, + paymentInterval: { + month: 11, + year: 2021, + }, + }, + { + durationInSeconds: 510, + paymentInterval: { + month: 1, + year: 2021, + }, + }, { durationInSeconds: 58, paymentInterval: { @@ -411,10 +439,17 @@ Generated by [AVA](https://avajs.dev). }, }, { - durationInSeconds: 265, + durationInSeconds: 24, + paymentInterval: { + month: 3, + year: 2021, + }, + }, + { + durationInSeconds: 14, paymentInterval: { month: 8, - year: 2018, + year: 2021, }, }, ] diff --git a/frontend/javascripts/test/snapshots/public/test-bundle/test/backend-snapshot-tests/users.e2e.js.snap b/frontend/javascripts/test/snapshots/public/test-bundle/test/backend-snapshot-tests/users.e2e.js.snap index b7c67b5b54e57973819c1e6f610de8cf8f74d1c6..98dd52c76046cd366beed4cc84f7d522d4e6b50b 100644 GIT binary patch literal 2404 zcmV-q37hsoRzVSg%uwY@DI(L>R!hfrtbL)raJ4e!rnN6>TWUMPs31ZeWcuArF65pK$-x~G z?cU61_Q{j)KF^b9fBSo$-}Ad6LWr06-eiA$@U_IlFC92~!?ze&?;5-~HJw(;2gN_tk{l`jE<=MP_VBpGDH)GZ&BZREzUGv}dZSO7JIP3W2 z=hB^jW6avMA|a3b@-v&3{^Ii+k9=?8>X*NGqKPqU@3?`GLI32T$q#kBy|3ksRa@t0 z4l`zLu$>T=c+6kGco~<3E2_zCum~Bu_R99%l!;9T=M(^w8vT5K;%O1vPJv5ULZ$;1d>!lo2fzvNX(u6F z;A_tOsN9{(N?pw|jzTdI2$wyC913v`+ z1U?VdO^v0SDuMZpx@~x2$yQiFi`Wd{#{*2;*az z4h&&fS*PFGqo?C~Qq?n5lQpq;I+I->C-mVOcdl7V=ZI^Ce#0WCL}MY1mqnG+LmJ0P znjFz%)Ggk4W`>%L_vKo4_~{>vuQiOh^tW~RHknHHCsMtcoFYIe0`W|{oRu?rcD|gH zm+R@$4b{dov$S~Dn4r>v`ffRsH3u5c_UC8+gHp5aHH5`8HJR$uyBA}oXB8gJoMbFTKc4zjdbym8ua>j%RI<=VXHn&{CDbhq z8NC~JrfXd*^rErKz1fsGjEzOZaAkElkr`KeY&(lR?j4&www1BR$OW)R+S)F%Jw9@O z?da^0o~JVQI8+_>`0j!aq8Dr01!OJ&Fxg zVULR&E~q`WpT!7$Ct0jn&Wt?gh6^4 z$DdLCdIPo3WU1}!$ZwtWX4iQN4LE#+)Pok^$oRuGBL1M7sx8}59o>;*3L3wP>Y%<{ z2mZ1h1S}o!Xk4N?Xe`%3bJ-3iS~_S$%^2-V zzTp^dov#qX{lJ%t;cAB~Ud^crpMCz3pRf1lLrG^hN@=hIJnyG$!fko#M<6){t_%<| zH$cff7I_kqEZ7NNq~%_V{4Ge1gR2?|nb%0k8!Yl3NczAoFi6XrEb@0CIRUOlw7;N< zlDAmGEVgnSqvZUepyd6cO244-Oc&m|vb#EGvdGVPjF)uTl3?WlL?ww=1d-DuDXK^z zj_354CeV(#)BtF=S!w{nc@7{lngNVv20|Z!M{xcF7|kQ7Jb*~aM-UpXN5B`II5j14 z&4Ml9ci>I)#EFm&po8^b2iOmefJqKQJ_qgq-vmDgZ#bxTj``4!HcmV2qmAl1+PH$P zimm%hw&YH!2i1LSg=5U}`zXfw02+80JPICXM{Wb0=eZ5|2|K(E_!nE)226D3wgKp~ z9mTm|EqIhZhjm$o_!~%G1gF6iSH)dG2PE^rU0{Hgd+e38s#a1vabPD7?8L$U=jv)F z4#r@QcH+QJ9N3A2syV2&69;zUpqlK_P8`^Y13PhGCl2i6j;cED_y{}#J8@tq4(y{1 zJ8@8cA7v*Fs@#o#+*i^fH}05*D_<#DkTa&~1>Zk_>n<<|Soro&f!SaU_!ig;Udd$< z$Q1Ae&;vGuXTVW#X@!^QL-73qYy1A{^8L3}hws0$wC~@6>fN;2o@?}qH4MAV?7pe->L#+MwY2_Z=d z%26&H=6IQpL}F4*jf7&HAO=+}5)Ma&sGy0w&Iv({Q+XgOvKB^bYAhNS6@?FDG*TqS zg_W?T3J7sxF+mIKf)-UdL5j)|ju$m45|sI%E=sz<^Lz~B2rKwyp|CNhd{B%8^`fQF z+m7p8&;^!(m7o{g4K`LgmznkA4AEc!|i&Jq>w%@4rrcK{JC%H&hQ&{zLZqrf2m>&MU zA`kEHk^2*RGCL=k)zf`)_qjE=jhGD@|Kcz%4Knp+d-KMtJ;&+XFaFB74e&a%z3t=H zUaQ&O;JCHtGut~^wmn$aw?! zccp(e>vB48 zeSh~L{j@A(F9fyF(VF_4boFYFL*yhFHat0RP)UpjZAJLppU1_#2{Xn`cs5dnt%w6%+Bo2%wnPK^?J62_Nu)~A=)}W-f6eo?p}81 zE?jHHQk7`wA10nOHbF{lDwIY=#TqoIQAFqoG;%>yei#)K+CW?CRV>8s&hBmZ-repV zcP(9TF8O3;-+A_Zo_XJApLw2nZiEoxC&AbF-#qtn@}8%UoVejT^64gyj* zJ^ZnF^s5UOpZ=M?_4GBIGa8>w$buDzkDT0pXzp9xw+y}c^PA^$&gg-w3AyDjtG8V8 z#}^iC`-10Rn_eHjjB`da0wLCq{;_gqqNk^E=>ENXubkb$Iio3=kd6IY{;{j4Q=r{P$aNqI?gkHn!{8*C-ayFpKnHh$ zAA-MwU?ZcZsZ7lk5V;0)gRg-5!IR)^aH$_E!DqpC@LezrPJmfWge(F!xVMQ>6D(8n z6No$tUIH_k2@%1qU?+G8JPD41X)T0A!RNr8U_W>kcw4dHt&Ey!sl|N7pU5sU?8HERW>1K{VO%z0bLFcoxSC3*29xRjY+fSFNQ4vFPR-V`)Lx;b zv<);mZhvTyFIPx%myu$*sR%!bgHPMyJT?vTIMT( zEbWD#QL|?wEwNSWx6_UpJ4(#(*;Gqr&#OOnoWmavP0b%Ws`#UP0sN8mwu|hK_ubt* zF@I#&sfs_2)Q3O5d13u=@j3kQLxCyzE}Fu z#_%fvCeP%VbawnzFPrRo&tL*ikdRg|D>%OX@QtfKn5mkpDr#XBdFEpBs~82X)f9xP zDhQV;5HY#RC}^vuV0u*rGs+ZnVDhDmf|=tK1e|JY+~)F{ZsdKG^Eovf@6>3Y4vy7u z+k(X!?gznq4c9z2@KRo?_&FFF|M~h*p_KIY;w%I9f!~K1pJ*uC^wzC2^)P_aj}uHd5PwxT=b}qzI;}#&tC+ zhyt}ti4Dw^3}C}%B?F{M8X!-^fQhU?^nI`elQ&=@TTt5ox#AW?&({`+C2ySCQ#jgS z7x)8s&3WTQNEe`BJJ<&fgJWQ(hmcQz&x5anUw~IV%x{i+&tGl4<8fbY)Yq$xEBLxd z-52u}Go?NZ5AZc!V^+_jIPW(w!98FPc!(dL26!i#2KzcDPtuYw?wN<9SgTo(cc%Hp*uNLY2f#4k5!*ikmVzzd+u#s*A>TzH zbHRw|n`;Dfh;M<&Ht=KcJS*;Z zV}E_dejjOrb3}AiHFZ^%q=+V|h8WQVU6f)kqT186bun)x@N{On_Sfk385YuC( zA)&;vEXj;f$&4F6fM{h({X3fSZ+zwe_!JQ3Xp+0ReNmKT+lua`OTJO7>&(p-^yNy*R#J%i-K<6E>(aI0R{+XtzZDVY6oW9{$zt9<`&jkPM>%V7N~kOn)!_rVAl z&1FN#ZkB*e;G5ug;P}O_bp4afcEW4=%$y-zV;9`K=X~cq*Vaegv!Zg|vl+u*nrh9v zyHM}u5~xScQ_cIUV&3z{*_wCG*}P|E;s1{A0taf>yiY^qIp9YUGp}~dy9y$o0bc_L T-I}+)^Pc|!74bZkJU{>d?oUt7 diff --git a/project/AssetCompilation.scala b/project/AssetCompilation.scala index 8d5fee5519..9b909d1057 100644 --- a/project/AssetCompilation.scala +++ b/project/AssetCompilation.scala @@ -68,22 +68,26 @@ object AssetCompilation { try { val destination = target.value / "universal" / "stage" / "tools" / "postgres" destination.mkdirs - (baseDirectory.value / "tools" / "postgres") - .listFiles() - .foreach( - file => - Files.copy( - file.toPath, - (destination / file.name).toPath, - StandardCopyOption.REPLACE_EXISTING - ) - ) + deleteRecursively(destination) + copyRecursively(baseDirectory.value / "tools" / "postgres", destination) } catch { case e: Exception => streams.value.log .error("Could not copy SQL schema to stage dir: " + e.getMessage) } + // copy test/db + try { + val destination = target.value / "universal" / "stage" / "test" / "db" + destination.mkdirs + deleteRecursively(destination) + copyRecursively(baseDirectory.value / "test" / "db", destination) + } catch { + case e: Exception => + streams.value.log + .error("Could not test database entries to stage dir: " + e.getMessage) + } + // copy node_modules for diff_schema.js { val nodeModules = diff --git a/test/db/timeSpans.csv b/test/db/timeSpans.csv index 661aff7de0..e12bb77572 100644 --- a/test/db/timeSpans.csv +++ b/test/db/timeSpans.csv @@ -25,3 +25,13 @@ _id,_user,_annotation,time,lastUpdate,numberOfUpdates,created,isDeleted '5b72df4b9400009e0000f110','570b9f4d2a7c0e4d008da6ef','570b9ff12a7c0e980056fe8f',33636,'2018-08-14 15:55:47.94+02',2,'2018-08-14 15:55:14.304+02',f '5b72df70940000eb0000f113','570b9f4d2a7c0e4d008da6ef','570b9ff12a7c0e980056fe8f',10826,'2018-08-14 15:55:58.766+02',1,'2018-08-14 15:55:47.94+02',f '5b741092940000190400f167','570b9f4d2a7c0e4d008da6ef','570b9ff12a7c0e980056fe8f',0,'2018-08-15 13:37:24.785+02',1,'2018-08-15 13:37:24.785+02',f +'5b741092940000190400f180','770b9f4d2a7c0e4d008da6ef','68135c192faeb34c0081c05e',32908,'2021-01-15 13:37:24.785+02',1,'2021-01-15 13:38:24.785+02',f +'5b741092940000190400f181','770b9f4d2a7c0e4d008da6ef','68135c192faeb34c0081c05e',210150,'2021-02-15 13:37:24.785+02',1,'2021-02-15 13:37:24.785+02',f +'5b741092940000190400f182','970b9f4d2a7c0e4d008da6ef','68135c192faeb34c0081c05e',4213,'2021-03-15 13:37:24.785+02',1,'2021-03-15 13:37:24.785+02',f +'5b741092940000190400f183','570b9f4d2a7c0e4d008da6ef','570b9ff12a7c0e980056fe8f',124034,'2021-05-15 13:37:24.785+02',1,'2021-05-15 13:38:24.785+02',f +'5b741092940000190400f191','570b9f4d2a7c0e4d008da6ef','570b9ff12a7c0e980056fe8f',98421,'2021-01-15 13:37:24.785+02',1,'2021-01-15 13:37:24.785+02',f +'5b741092940000190400f192','570b9f4d2a7c0e4d008da6ef','570ba0092a7c0e980056fe9b',24214,'2021-03-15 13:37:24.785+02',1,'2021-03-15 13:37:24.785+02',f +'5b741092940000190400f193','570b9f4d2a7c0e4d008da6ef','570ba0092a7c0e980056fe9b',34250,'2021-05-15 13:37:24.785+02',1,'2021-05-15 13:37:24.785+02',f +'5b741092940000190400f194','570b9f4d2a7c0e4d008da6ef','570b9ff12a7c0e980056fe8f',12087,'2021-11-15 13:37:24.785+02',1,'2021-11-15 13:37:24.785+02',f +'5b741092940000190400f195','570b9f4d2a7c0e4d008da6ef','68135c192faeb34c0081c05e',14302,'2021-08-15 13:37:24.785+02',1,'2021-08-15 13:37:24.785+02',f +'5b741092940000190400f196','570b9f4d2a7c0e4d008da6ef','570b9ff12a7c0e980056fe8f',411709,'2021-01-15 13:37:24.785+02',1,'2021-01-15 13:37:24.785+02',f From 8a0ecac8ef5bb0f6b0dcd988a1e52935feade181 Mon Sep 17 00:00:00 2001 From: Arthur Hilbert <46814136+Dagobert42@users.noreply.github.com> Date: Wed, 2 Feb 2022 09:58:16 +0100 Subject: [PATCH 7/7] Add option to change active segement (#6006) * change active segment id from context menu * yarn pretty * edit changelog, edit menu item text --- CHANGELOG.unreleased.md | 1 + .../segments_tab/segment_list_item.js | 23 +++++++++++++++++++ .../segments_tab/segments_view.js | 8 ++++++- 3 files changed, 31 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.unreleased.md b/CHANGELOG.unreleased.md index efe90dd12d..8552adad1a 100644 --- a/CHANGELOG.unreleased.md +++ b/CHANGELOG.unreleased.md @@ -11,6 +11,7 @@ For upgrade instructions, please check the [migration guide](MIGRATIONS.released [Commits](https://github.com/scalableminds/webknossos/compare/22.02.0...HEAD) ### Added +- Added the option to make a segment's ID active via the right-click context menu in the segments list. [#5935](https://github.com/scalableminds/webknossos/pull/6006) - Added a button next to the histogram which adapts the contrast and brightness to the currently visible data. [#5961](https://github.com/scalableminds/webknossos/pull/5961) - Running uploads can now be cancelled. [#5958](https://github.com/scalableminds/webknossos/pull/5958) diff --git a/frontend/javascripts/oxalis/view/right-border-tabs/segments_tab/segment_list_item.js b/frontend/javascripts/oxalis/view/right-border-tabs/segments_tab/segment_list_item.js index 9bc593d18b..41c1d6dc72 100644 --- a/frontend/javascripts/oxalis/view/right-border-tabs/segments_tab/segment_list_item.js +++ b/frontend/javascripts/oxalis/view/right-border-tabs/segments_tab/segment_list_item.js @@ -100,6 +100,26 @@ const getComputeMeshAdHocMenuItem = ( ); }; +const getMakeSegmentActiveMenuItem = ( + segment, + setActiveCell, + activeCellId, + andCloseContextMenu, +) => { + const disabled = segment.id === activeCellId; + const title = disabled + ? "This segment ID is already active." + : "Make this the active segment ID."; + return ( + andCloseContextMenu(setActiveCell(segment.id, segment.somePosition))} + disabled={disabled} + > + Activate Segment ID + + ); +}; + type Props = { segment: Segment, mapId: number => number, @@ -117,6 +137,7 @@ type Props = { onSelectSegment: Segment => void, visibleSegmentationLayer: ?APISegmentationLayer, changeActiveIsosurfaceId: (?number, Vector3, boolean) => void, + setActiveCell: (number, somePosition?: Vector3) => void, isosurface: ?IsosurfaceInformation, setPosition: (Vector3, boolean) => void, loadPrecomputedMeshForSegment: Segment => Promise, @@ -149,6 +170,7 @@ function _SegmentListItem({ onSelectSegment, visibleSegmentationLayer, changeActiveIsosurfaceId, + setActiveCell, isosurface, setPosition, loadPrecomputedMeshForSegment, @@ -175,6 +197,7 @@ function _SegmentListItem({ visibleSegmentationLayer != null, andCloseContextMenu, )} + {getMakeSegmentActiveMenuItem(segment, setActiveCell, activeCellId, andCloseContextMenu)} ); diff --git a/frontend/javascripts/oxalis/view/right-border-tabs/segments_tab/segments_view.js b/frontend/javascripts/oxalis/view/right-border-tabs/segments_tab/segments_view.js index 0d14b4f89d..b6bd5277ab 100644 --- a/frontend/javascripts/oxalis/view/right-border-tabs/segments_tab/segments_view.js +++ b/frontend/javascripts/oxalis/view/right-border-tabs/segments_tab/segments_view.js @@ -44,7 +44,10 @@ import { updateDatasetSettingAction, updateTemporarySettingAction, } from "oxalis/model/actions/settings_actions"; -import { updateSegmentAction } from "oxalis/model/actions/volumetracing_actions"; +import { + updateSegmentAction, + setActiveCellAction, +} from "oxalis/model/actions/volumetracing_actions"; import DataLayer from "oxalis/model/data_layer"; import DomVisibilityObserver from "oxalis/view/components/dom_visibility_observer"; import Model from "oxalis/model"; @@ -164,6 +167,9 @@ const mapDispatchToProps = (dispatch: Dispatch<*>): * => ({ } dispatch(changeActiveIsosurfaceCellAction(cellId, seedPosition, shouldReload)); }, + setActiveCell(segmentId: number, somePosition?: Vector3) { + dispatch(setActiveCellAction(segmentId, somePosition)); + }, setCurrentMeshFile(layerName: string, fileName: string) { dispatch(updateCurrentMeshFileAction(layerName, fileName)); },