diff --git a/CHANGELOG.unreleased.md b/CHANGELOG.unreleased.md index a32b693c8e..93813a9245 100644 --- a/CHANGELOG.unreleased.md +++ b/CHANGELOG.unreleased.md @@ -11,6 +11,7 @@ For upgrade instructions, please check the [migration guide](MIGRATIONS.released [Commits](https://github.com/scalableminds/webknossos/compare/24.02.0...HEAD) ### Added +- Added support for uploading N5 and Neuroglancer Precomputed datasets. [#7578](https://github.com/scalableminds/webknossos/pull/7578) ### Changed - Datasets stored in WKW format are no longer loaded with memory mapping, reducing memory demands. [#7528](https://github.com/scalableminds/webknossos/pull/7528) diff --git a/README.md b/README.md index edb53cae2f..d83d54b43a 100644 --- a/README.md +++ b/README.md @@ -27,7 +27,7 @@ WEBKNOSSOS is an open-source tool for annotating and exploring large 3D image da * Sharing and collaboration features * Proof-Reading tools for working with large (over)-segmentations * [Standalone datastore component](https://github.com/scalableminds/webknossos/tree/master/webknossos-datastore) for flexible deployments -* Supported dataset formats: [WKW](https://github.com/scalableminds/webknossos-wrap), [Neuroglancer Precomputed, and BossDB](https://github.com/scalableminds/webknossos-connect), [Zarr](https://zarr.dev), [N5](https://github.com/saalfeldlab/n5) +* Supported dataset formats: [WKW](https://github.com/scalableminds/webknossos-wrap), [Neuroglancer Precomputed](https://github.com/google/neuroglancer/tree/master/src/datasource/precomputed), [Zarr](https://zarr.dev), [N5](https://github.com/saalfeldlab/n5) * Supported image formats: Grayscale, Segmentation Maps, RGB, Multi-Channel * [Support for 3D mesh rendering and ad-hoc mesh generation](https://docs.webknossos.org/webknossos/mesh_visualization.html) * Export and streaming of any dataset and annotation as [Zarr](https://zarr.dev) to third-party tools diff --git a/app/models/dataset/explore/ExploreRemoteLayerService.scala b/app/models/dataset/explore/ExploreRemoteLayerService.scala index c905d5b1f3..29a85380cb 100644 --- a/app/models/dataset/explore/ExploreRemoteLayerService.scala +++ b/app/models/dataset/explore/ExploreRemoteLayerService.scala @@ -107,7 +107,7 @@ class ExploreRemoteLayerService @Inject()(credentialService: CredentialService, credentialIdentifier: Option[String], credentialSecret: Option[String], reportMutable: ListBuffer[String], - requestingUser: User)(implicit ec: ExecutionContext): Fox[List[(DataLayer, Vec3Double)]] = + requestingUser: User)(implicit ec: ExecutionContext): Fox[List[(DataLayerWithMagLocators, Vec3Double)]] = for { uri <- tryo(new URI(exploreLayerService.removeHeaderFileNamesFromUriSuffix(layerUri))) ?~> s"Received invalid URI: $layerUri" _ <- bool2Fox(uri.getScheme != null) ?~> s"Received invalid URI: $layerUri" @@ -142,11 +142,11 @@ class ExploreRemoteLayerService @Inject()(credentialService: CredentialService, bool2Fox(wkConf.Datastore.localFolderWhitelist.exists(whitelistEntry => uri.getPath.startsWith(whitelistEntry))) ?~> s"Absolute path ${uri.getPath} in local file system is not in path whitelist. Consider adding it to datastore.pathWhitelist" } else Fox.successful(()) - private def exploreRemoteLayersForRemotePath( - remotePath: VaultPath, - credentialId: Option[String], - reportMutable: ListBuffer[String], - explorers: List[RemoteLayerExplorer])(implicit ec: ExecutionContext): Fox[List[(DataLayer, Vec3Double)]] = + private def exploreRemoteLayersForRemotePath(remotePath: VaultPath, + credentialId: Option[String], + reportMutable: ListBuffer[String], + explorers: List[RemoteLayerExplorer])( + implicit ec: ExecutionContext): Fox[List[(DataLayerWithMagLocators, Vec3Double)]] = explorers match { case Nil => Fox.empty case currentExplorer :: remainingExplorers => diff --git a/docs/animations.md b/docs/animations.md index 32294573c0..d6df720982 100644 --- a/docs/animations.md +++ b/docs/animations.md @@ -16,6 +16,6 @@ Creating an animation is easy: 6. Click the `Start animation` button to launch the animation creation. -Either periodically check the [background jobs page](./jobs.md) or wait for a an email confirmation to download the animation video file. Creating an animation may take a while, depending on the selected bounding box size and the number of included 3D meshes. +Either periodically check the [background jobs page](./jobs.md) or wait for an email confirmation to download the animation video file. Creating an animation may take a while, depending on the selected bounding box size and the number of included 3D meshes. WEBKNOSSOS Team plans and above have access to high definition (HD) resolution videos and more options. diff --git a/docs/automated_analysis.md b/docs/automated_analysis.md index 60e448b296..5454346ca1 100644 --- a/docs/automated_analysis.md +++ b/docs/automated_analysis.md @@ -14,7 +14,7 @@ We would love to integrate analysis solutions for more modalities and use cases. ## Neuron Segmentation As a first trial, WEBKNOSSOS includes neuron segmentation. This analysis is designed to work with serial block-face electron microscopy (SBEM) data of neural tissue (brain/cortex) and will segment all neurons within the dataset. -You can launch the AI analysis modal using the `AI Analysis` button in the tool bar at the top. Use the `Start AI neuron segmentation` button in the modal to start the analysis. +You can launch the AI analysis modal using the `AI Analysis` button in the toolbar at the top. Use the `Start AI neuron segmentation` button in the modal to start the analysis. ![Neuron segmentations can be launched from the tool bar.](images/process_dataset.jpg) @@ -28,4 +28,4 @@ The finished analysis will be available as a new dataset from your dashboard. Yo ## Custom Analysis At the moment, WEBKNOSSOS can not be used to train custom classifiers. This might be something that we add in the future if there is enough interest in this. -If you are interested in specialized, automated analysis, image segmentation, object detection etc. than feel free to [contact us](mailto:hello@webknossos.org). The WEBKNOSSOS development teams offers [commercial analysis services](https://webknossos.org/services/automated-segmentation) for that. \ No newline at end of file +If you are interested in specialized, automated analysis, image segmentation, object detection etc. then feel free to [contact us](mailto:hello@webknossos.org). The WEBKNOSSOS development teams offers [commercial analysis services](https://webknossos.org/services/automated-segmentation) for that. diff --git a/docs/connectome_viewer.md b/docs/connectome_viewer.md index ef7048834d..3e57a7c71c 100644 --- a/docs/connectome_viewer.md +++ b/docs/connectome_viewer.md @@ -24,7 +24,7 @@ Several segments/cells can be loaded at the same time to highlight their matchin In addition to loading the synapse locations and visualizing them as nodes, WEBKNOSSOS will also load the agglomerate skeleton representation of the selected segment(s) for context. ## Configuration -For WEBKNOSSOS to detect and load your Connectome file, you need to place it into a `connectome` sub-directory for a respective segmentation layer, e.g.: +For WEBKNOSSOS to detect and load your Connectome file, you need to place it into a `connectome` subdirectory for a respective segmentation layer, e.g.: ``` my_dataset # Dataset root @@ -36,4 +36,4 @@ my_dataset # Dataset root ## Connectome File Format -The connectome file format is under active development and experiences frequent changes. [Please reach out to us for the latest file format spec and configuration help](mailto://hello@webknossos.org). \ No newline at end of file +The connectome file format is under active development and experiences frequent changes. [Please reach out to us for the latest file format spec and configuration help](mailto://hello@webknossos.org). diff --git a/docs/data_formats.md b/docs/data_formats.md index 91ef3ead2e..97325c5346 100644 --- a/docs/data_formats.md +++ b/docs/data_formats.md @@ -68,7 +68,7 @@ The underlying data type limits the maximum number of IDs: ### Dataset Metadata -For each datasets, we stored metadata in a `datasource-properties.json` file. +For each dataset, we stored metadata in a `datasource-properties.json` file. See below for the [full specification](#dataset-metadata-specification). This is an example: diff --git a/docs/datasets.md b/docs/datasets.md index 03bc468fa1..0f0059b0af 100644 --- a/docs/datasets.md +++ b/docs/datasets.md @@ -41,8 +41,10 @@ In particular, the following file formats are supported for uploading (and conve - [Image file sequence](#Single-Layer-Image-File-Sequence) in one folder (TIFF, JPEG, PNG, DM3, DM4) - [Multi Layer file sequence](#Multi-Layer-Image-File-Sequence) containing multiple folders with image sequences that are interpreted as separate layers - [Single-file images](#single-file-images) (OME-Tiff, TIFF, PNG, czi, raw, etc) +- [Neuroglancer Precomputed datasets](./neuroglancer_precomputed.md) +- [N5 datasets](./n5.md) -Once the data is uploaded (and potentially converted), you can further configure a dataset's [Settings](#configuring-datasets) and double-check layer properties, finetune access rights & permissions, or set default values for rendering. +Once the data is uploaded (and potentially converted), you can further configure a dataset's [Settings](#configuring-datasets) and double-check layer properties, fine tune access rights & permissions, or set default values for rendering. ### Streaming from remote servers and the cloud WEBKNOSSOS supports loading and remotely streaming [Zarr](https://zarr.dev), [Neuroglancer precomputed format](https://github.com/google/neuroglancer/tree/master/src/neuroglancer/datasource/precomputed) and [N5](https://github.com/saalfeldlab/n5) datasets from a remote source, e.g. Cloud storage (S3) or HTTP server. diff --git a/docs/getting_started.md b/docs/getting_started.md index 5bca5c1baf..df0418fecf 100644 --- a/docs/getting_started.md +++ b/docs/getting_started.md @@ -56,7 +56,7 @@ You can also change the size of the viewports to see more details in your data a To create your first annotation, click the `Create Annotation`` button while in “View” mode. WEBKNOSSOS will launch the main annotation screen allowing you to navigate your dataset, place markers to reconstruct skeletons, or annotate segments as volume annotations. -You can perform various actions depending on the current tool - selectable in the tool bar at the top of the screen. +You can perform various actions depending on the current tool - selectable in the toolbar at the top of the screen. Note that the most important controls are always shown in the status bar at the bottom of your screen. The first tool is the _Move_ tool which allows navigating the dataset by moving the mouse while holding the left mouse button. With the _Skeleton_ tool, a left mouse click can be used to place markers in the data, called nodes. diff --git a/docs/jobs.md b/docs/jobs.md index a83656c867..93e4f59ee0 100644 --- a/docs/jobs.md +++ b/docs/jobs.md @@ -30,4 +30,4 @@ Depending on the job workflow you may: ![Overview of the Jobs page](./images/jobs.jpeg) -We constantly monitor job executions. In rare cases, jobs can fail and we aim to re-run them as quickly as possible. In case you run into any trouble please [contact us](mailto:hello@webknossos.org). \ No newline at end of file +We constantly monitor job executions. In rare cases, jobs can fail, and we aim to re-run them as quickly as possible. In case you run into any trouble please [contact us](mailto:hello@webknossos.org). diff --git a/docs/mesh_visualization.md b/docs/mesh_visualization.md index 6762280e4e..6781eddac4 100644 --- a/docs/mesh_visualization.md +++ b/docs/mesh_visualization.md @@ -37,6 +37,6 @@ Instead of having to slowly compute individual mesh every time you open a datase You can start mesh generation from the `Segments` tab in the right-hand side panel. Click on the little plus button to initiate the mesh generation. We recommend computing the meshes in the medium quality (default) to strike a good balance between visual fidelity, compute time, and GPU resource usage. !!! info - Pre-computated meshes are exclusive to webknossos.org. Contact [sales](mailto:sales@webknossos.org) for access to the integrated WEBKNOSSOS worker for meshing or the [Voxelytics software](https://voxelytics.com) for standalone meshing from the command line. + Pre-computed meshes are exclusive to webknossos.org. Contact [sales](mailto:sales@webknossos.org) for access to the integrated WEBKNOSSOS worker for meshing or the [Voxelytics software](https://voxelytics.com) for standalone meshing from the command line. [Check the `Processing Jobs` page](./jobs.md) from the `Admin` menu at the top of the screen to track progress or cancel the operation. The finished, pre-computed mesh will be available on page reload. diff --git a/docs/n5.md b/docs/n5.md index 79f674019d..6b40c604c1 100644 --- a/docs/n5.md +++ b/docs/n5.md @@ -2,9 +2,7 @@ WEBKNOSSOS can read [N5 datasets](https://github.com/saalfeldlab/n5). -!!!info - N5 datasets can only be opened as [remote dataset](./datasets.md#streaming-from-remote-servers-and-the-cloud) at the moment. Provide a URI pointing directly to an N5 group. For several layers, import the first N5 group and then use the UI to add more URIs/groups. Uploading the through the web uploader is not supported. - +N5 datasets can both be uploaded to WEBKNOSSOS through the [web uploader](./datasets.md#uploading-through-the-web-browser) or [streamed from a remote server or the cloud](./datasets.md#streaming-from-remote-servers-and-the-cloud). ## Examples diff --git a/docs/neuroglancer_precomputed.md b/docs/neuroglancer_precomputed.md index 86558c865e..b73a82c8c3 100644 --- a/docs/neuroglancer_precomputed.md +++ b/docs/neuroglancer_precomputed.md @@ -2,8 +2,7 @@ WEBKNOSSOS can read [Neuroglancer precomputed datasets](https://github.com/google/neuroglancer/tree/master/src/neuroglancer/datasource/precomputed). -!!!info - Neuroglancer datasets can only be opened as [remote dataset](./datasets.md#streaming-from-remote-servers-and-the-cloud) at the moment. Uploading the through the web uploader is not supported. +Neuroglancer Precomputed datasets can both be uploaded to WEBKNOSSOS through the [web uploader](./datasets.md#uploading-through-the-web-browser) or [streamed from a remote server or the cloud](./datasets.md#streaming-from-remote-servers-and-the-cloud). ## Examples @@ -42,4 +41,4 @@ For details see the [Neuroglancer spec](https://github.com/google/neuroglancer/t To get the best streaming performance for Neuroglancer Precomputed datasets consider the following settings. - Use chunk sizes of 32 - 128 voxels^3 -- Enable sharding \ No newline at end of file +- Enable sharding diff --git a/docs/tasks.md b/docs/tasks.md index e0a51a2449..aab77fa251 100644 --- a/docs/tasks.md +++ b/docs/tasks.md @@ -20,7 +20,7 @@ It is possible to download all annotations that belong to either a _Project_ or First, a _Task Type_ needs to be created: -1. Open the `Task Types` screen of the admininstration section and click on `Add Task Type`. +1. Open the `Task Types` screen of the administration section and click on `Add Task Type`. 2. Fill out the form to create the Task Type: - Note that the `Description` field supports Markdown formatting. - If you don't have a sophisticated team structure, select the [default Team](./users.md#organizations). @@ -38,7 +38,7 @@ Next, you need to set up a _Project_: Now, you are ready to create _Tasks_: -1. Open the `Tasks` screen of the admininstration section and click on `Add Task`. +1. Open the `Tasks` screen of the administration section and click on `Add Task`. 2. Fill out the form create the Task. - Enter the starting positions in the lower part of the form. - Alternatively, you can upload an NML file that contains nodes that will be used as starting positions. @@ -80,9 +80,9 @@ When users request a new task from their dashboard ("Tasks" tab), a set of crite ## Manual Task Assignment In contrast to the automated task distribution system, an admin user can also manually assign a task instance to users. -Note, manual assignments bypass the assignment criterias enforced by the automated system and allow for fine-grained and direct assignments to individual user. +Note, manual assignments bypass the assignment criteria enforced by the automated system and allow for fine-grained and direct assignments to individual user. -Manual assignments can done by: +Manual assignments can be done by: 1. Navigate to the task list 2. Search for your task by setting the appropriate filters @@ -90,7 +90,7 @@ Manual assignments can done by: 4. Select a user for the assignment from the dropdown 5. Confirm the assignment with "ok" -Existing, active and finished task instances can also be transfered to other users, e.g. for proofreading, continued annotation or to change ownership: +Existing, active and finished task instances can also be transferred to other users, e.g. for proofreading, continued annotation or to change ownership: 1. Navigate to the task list 2. Search for your task by setting the appropriate filters diff --git a/docs/terminology.md b/docs/terminology.md index 3ed3144d59..a2b431a29f 100644 --- a/docs/terminology.md +++ b/docs/terminology.md @@ -42,8 +42,8 @@ See also the [task and projects guide](./tasks.md). ## Segments At its lowest-level a **segment** is the collection of several annotated voxels. At a larger level, segments can grow to be the size of whole cell bodies or partial cells, e.g. a single axon. -Typically many segments make up a segmentation. Segments can be painted manually using the WEBKNOSSOS volume annotation tools or created through third-party programs typically resulting in larger segmentations of a dataset. +Typically, many segments make up a segmentation. Segments can be painted manually using the WEBKNOSSOS volume annotation tools or created through third-party programs typically resulting in larger segmentations of a dataset. ## Agglomerates An agglomerate is the combination of several (smaller) segments to reconstruct a larger biological structure. Typically an agglomerate combines the fragments of an over-segmentation created by some automated method, e.g. a machine learning system. -Sometimes this is also referred to as a super-voxel graph. \ No newline at end of file +Sometimes this is also referred to as a super-voxel graph. diff --git a/docs/today_i_learned.md b/docs/today_i_learned.md index 4441e03fba..a9c75394e6 100644 --- a/docs/today_i_learned.md +++ b/docs/today_i_learned.md @@ -1,6 +1,6 @@ # Today I learned -We reguarly publish tips and tricks videos for beginners and pros on YouTube to share new features, highlight efficient workflows, and show you hidden gems. +We regularly publish tips and tricks videos for beginners and pros on YouTube to share new features, highlight efficient workflows, and show you hidden gems. Subscribe to our YouTube channel [@webknossos](https://www.youtube.com/@webknossos) or [@webknossos](https://twitter.com/webknossos) on Twitter to stay up-to-date. diff --git a/docs/tracing_ui.md b/docs/tracing_ui.md index cff1a7be5c..9a4855eedb 100644 --- a/docs/tracing_ui.md +++ b/docs/tracing_ui.md @@ -25,7 +25,7 @@ The most common buttons are: - `Menu`: - `Archive`: Closes the annotation and archives it, removing it from a user's dashboard. Archived annotations can be found on a user's dashboard under "Annotations" and by clicking on "Show Archived Annotations". Use this to declutter your dashboard. (Not available for tasks) - `Download`: Starts a download of the current annotation including any skeleton and volume data. Skeleton annotations are downloaded as [NML](./data_formats.md#nml) files. Volume annotation downloads contain the raw segmentation data as [WKW](./data_formats.md#wkw) files. - - `Share`: Create a custumizable, shareable link to your dataset containing the current position, rotation, zoom level etc with fine-grained access controls. Use this to collaboratively work with colleagues. Read more about [data sharing](./sharing.md). + - `Share`: Create a customizable, shareable link to your dataset containing the current position, rotation, zoom level etc. with fine-grained access controls. Use this to collaboratively work with colleagues. Read more about [data sharing](./sharing.md). - `Duplicate`: Create a duplicate of this annotation. The duplicate will be created in your account, even if the original annotation belongs to somebody else. - `Screenshot`: Takes a screenshot of current datasets/annotation from each of the three viewports and downloads them as PNG files. - `Create Animation`: Creates an eye-catching animation of the dataset as a video clip. [Read more about animations](./animations.md). @@ -70,7 +70,7 @@ Each dataset consists of one or more data and annotation layers. A dataset typic #### Histogram & General Layer Properties - `Histogram`: The Histogram displays sampled color values of the dataset on a logarithmic scale. The slider below the Histogram can be used to adjust the dynamic range of the displayed values. In order to increase the contrast of data, reduce the dynamic range. To decrease the contrast, widen the range. In order to increase the brightness, move the range to the left. To decrease the brightness, move the range to the right. - Above the the histogram, there is a three-dots context menu with more options to further adjust the histogram or otherwise interact with the layer: + Above the histogram, there is a three-dots context menu with more options to further adjust the histogram or otherwise interact with the layer: - `Edit histogram range`: Manipulate the min/max value of the histogram range. Clips values above/below these limits. - `Clip histogram`: Automatically adjust the histogram for best contrast and brightness. Contrast estimation is based on the data currently available in your viewport. This is especially useful for light microscopy datasets saved as `float` values. @@ -78,8 +78,8 @@ Each dataset consists of one or more data and annotation layers. A dataset typic - `Jump to data`: Navigates the WEBKNOSSOS camera to the center position within the dataset where there is data available for the respective layer. This is especially useful for working with smaller layers - likely segmentations - that might not cover the whole dataset and are hard to find manually. - `Opacity`: Increase / Decrease the opacity of a layer. 0% opacity makes a layer invisible. 100% opacity makes it totally opaque. Useful for overlaying several layers above one another. -- `Gamma Correction`: Increase / Decrease the lumincance, brightness and contrast of a layer through a non-linear gamma correction. Low values darken the image, high values increase the perceived brightness. (Color layers only.) -- `Visibility`: Use the eye icon on the left side of layer name to enable/disable it. Toggeling the visibility of a layer, is often the quickest way to make information available in the dataset or hide to get an overview. +- `Gamma Correction`: Increase / Decrease the luminance, brightness and contrast of a layer through a non-linear gamma correction. Low values darken the image, high values increase the perceived brightness. (Color layers only.) +- `Visibility`: Use the eye icon on the left side of layer name to enable/disable it. Toggling the visibility of a layer, is often the quickest way to make information available in the dataset or hide to get an overview. Disabling the visibility, unloads/frees these resources from your GPU hardware and can make viewing larger datasets more performant. Also, depending on your GPU hardware, there is a physical upper limit for how many layers - typically 16 or more - can be displayed at any time (WebGL limitation). Toggle layers as needed to mitigate this. ![The Histogram overview](images/histogram.jpeg) @@ -165,4 +165,4 @@ The status bar at the bottom of the screen serves three functions: ##### References -[1] Bosch, C., Ackels, T., Pacureanu, A. et al. Functional and multiscale 3D structural investigation of brain tissue through correlative in vivo physiology, synchrotron microtomography and volume electron microscopy. Nat Commun 13, 2923 (2022). https://doi.org/10.1038/s41467-022-30199-6 \ No newline at end of file +[1] Bosch, C., Ackels, T., Pacureanu, A. et al. Functional and multiscale 3D structural investigation of brain tissue through correlative in vivo physiology, synchrotron microtomography and volume electron microscopy. Nat Commun 13, 2923 (2022). https://doi.org/10.1038/s41467-022-30199-6 diff --git a/docs/volume_annotation.md b/docs/volume_annotation.md index 5700502c19..81c3051a2a 100644 --- a/docs/volume_annotation.md +++ b/docs/volume_annotation.md @@ -68,7 +68,7 @@ This button opens up a modal that starts a long-running job which will materiali See the section on [proofreading](./proof_reading.md). ### AI Quick Select -The built-in quick select tools allows you draw a a selection around a cell or object and WEBKNOSSOS will use machine-learning to automatically do the segmentation for you. +The built-in quick select tools allows you draw a selection around a cell or object and WEBKNOSSOS will use machine-learning to automatically do the segmentation for you. The feature is based on the [Segment Anything Model](https://arxiv.org/abs/2304.02643) and works across a wide range of imaging modalities. @@ -76,7 +76,7 @@ The AI quick select tool in combination with the volume interpolation feature sp ![type:video](https://static.webknossos.org/assets/docs/tutorial-volume-annotation/04_new_AI_quick_select.mp4) -To use the AI quick selection tool, select it from the tool bar at the top of the screen. Make sure the AI option is toggled (default setting) otherwise the quick select tool will default to using flood-fills which depending on your situation is also very handy. +To use the AI quick selection tool, select it from the toolbar at the top of the screen. Make sure the AI option is toggled (default setting) otherwise the quick select tool will default to using flood-fills which depending on your situation is also very handy. ![type:video](https://static.webknossos.org/assets/docs/tutorial-volume-annotation/05_interpolating.mp4) @@ -100,7 +100,7 @@ The extrusion can be triggered by using the extrude button in the toolbar (also ### Volume Flood Fills -WEBKNOSSOS supports volumetric flood fills (3D) to relabel a segment with a new ID. Instead of having the relabel segment slice-by-slice, WEBKNOSSOS can do this for you. This operation allows you to fix both split and merge errors: +WEBKNOSSOS supports volumetric flood fills (3D) to relabel a segment with a new ID. Instead of having to relabel segment slice-by-slice, WEBKNOSSOS can do this for you. This operation allows you to fix both split and merge errors: - For split errors: Combine two segments by relabeling one segment with the ID of the other. Since this operation is fairly compute-intensive you might be better of with the `Merger Mode`, explained above. - For merge errors: You have to manually split two segments at their intersection/border, e.g. a cell boundary. Use the eraser brush and make sure to establish a clear cut between both segments on a slice-by-slice basis. Both segments must not touch any longer. Create a new segment ID from the toolbar and apply it to one of the partial segments that you just divided. diff --git a/docs/zarr.md b/docs/zarr.md index 4d321b16d0..b5b83e202c 100644 --- a/docs/zarr.md +++ b/docs/zarr.md @@ -2,9 +2,9 @@ WEBKNOSSOS works great with [OME-Zarr datasets](https://ngff.openmicroscopy.org/latest/index.html), sometimes called next-generation file format (NGFF). -We strongly believe in this community-driven, cloud-native data fromat for n-dimensional datasets. Zarr is a first-class citizen in WEBKNOSSOS and will likely replace [WKW](./wkw.md) long term. +We strongly believe in this community-driven, cloud-native data format for n-dimensional datasets. Zarr is a first-class citizen in WEBKNOSSOS and will likely replace [WKW](./wkw.md) long term. -Zarr datasets can both be uploaded to WEBKNOSSOS through the [web uploader](./datasets.md#uploading-through-the-web-browser) or [streamed from a remote server or the cloud](./datasets.md#streaming-from-remote-servers-and-the-cloud). For several layers, import the first Zarr group and then use the UI to add more URIs/groups. +Zarr datasets can both be uploaded to WEBKNOSSOS through the [web uploader](./datasets.md#uploading-through-the-web-browser) or [streamed from a remote server or the cloud](./datasets.md#streaming-from-remote-servers-and-the-cloud). When streaming and using several layers, import the first Zarr group and then use the UI to add more URIs/groups. ## Examples @@ -126,4 +126,4 @@ This feature in currently only supported for Zarr dataset due to their flexbile To get the best streaming performance for Zarr datasets consider the following settings. - Use chunk sizes of 32 - 128 voxels^3 -- Enable sharding (only available in Zarr 3+) \ No newline at end of file +- Enable sharding (only available in Zarr 3+) diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/dataformats/n5/N5DataLayers.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/dataformats/n5/N5DataLayers.scala index 205567872a..af7b4b42a2 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/dataformats/n5/N5DataLayers.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/dataformats/n5/N5DataLayers.scala @@ -9,7 +9,7 @@ import com.scalableminds.webknossos.datastore.storage.RemoteSourceDescriptorServ import play.api.libs.json.{Json, OFormat} import ucar.ma2.{Array => MultiArray} -trait N5Layer extends DataLayer { +trait N5Layer extends DataLayerWithMagLocators { val dataFormat: DataFormat.Value = DataFormat.n5 @@ -20,8 +20,6 @@ trait N5Layer extends DataLayer { def resolutions: List[Vec3Int] = mags.map(_.mag) - def mags: List[MagLocator] - def lengthOfUnderlyingCubes(resolution: Vec3Int): Int = Int.MaxValue // Prevents the wkw-shard-specific handle caching def numChannels: Option[Int] = Some(if (elementClass == ElementClass.uint24) 3 else 1) diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/dataformats/precomputed/PrecomputedDataLayers.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/dataformats/precomputed/PrecomputedDataLayers.scala index 4c0fac10bd..744e0c82e3 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/dataformats/precomputed/PrecomputedDataLayers.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/dataformats/precomputed/PrecomputedDataLayers.scala @@ -9,7 +9,7 @@ import com.scalableminds.webknossos.datastore.models.datasource.{ Category, CoordinateTransformation, DataFormat, - DataLayer, + DataLayerWithMagLocators, DataSourceId, ElementClass, SegmentationLayer @@ -18,7 +18,7 @@ import com.scalableminds.webknossos.datastore.storage.RemoteSourceDescriptorServ import play.api.libs.json.{Json, OFormat} import ucar.ma2.{Array => MultiArray} -trait PrecomputedLayer extends DataLayer { +trait PrecomputedLayer extends DataLayerWithMagLocators { val dataFormat: DataFormat.Value = DataFormat.neuroglancerPrecomputed @@ -29,8 +29,6 @@ trait PrecomputedLayer extends DataLayer { def resolutions: List[Vec3Int] = mags.map(_.mag) - def mags: List[MagLocator] - def lengthOfUnderlyingCubes(resolution: Vec3Int): Int = Int.MaxValue // Prevents the wkw-shard-specific handle caching def numChannels: Option[Int] = Some(if (elementClass == ElementClass.uint24) 3 else 1) diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/dataformats/zarr/ZarrDataLayers.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/dataformats/zarr/ZarrDataLayers.scala index f4c81efe24..6bbbdc4861 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/dataformats/zarr/ZarrDataLayers.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/dataformats/zarr/ZarrDataLayers.scala @@ -9,7 +9,7 @@ import com.scalableminds.webknossos.datastore.storage.RemoteSourceDescriptorServ import play.api.libs.json.{Json, OFormat} import ucar.ma2.{Array => MultiArray} -trait ZarrLayer extends DataLayer { +trait ZarrLayer extends DataLayerWithMagLocators { val dataFormat: DataFormat.Value = DataFormat.zarr @@ -20,8 +20,6 @@ trait ZarrLayer extends DataLayer { def resolutions: List[Vec3Int] = mags.map(_.mag) - def mags: List[MagLocator] - def lengthOfUnderlyingCubes(resolution: Vec3Int): Int = Int.MaxValue // Prevents the wkw-shard-specific handle caching def numChannels: Option[Int] = Some(if (elementClass == ElementClass.uint24) 3 else 1) diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/dataformats/zarr3/Zarr3DataLayers.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/dataformats/zarr3/Zarr3DataLayers.scala index beac981fc8..241567040d 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/dataformats/zarr3/Zarr3DataLayers.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/dataformats/zarr3/Zarr3DataLayers.scala @@ -8,7 +8,7 @@ import com.scalableminds.webknossos.datastore.models.datasource.{ Category, CoordinateTransformation, DataFormat, - DataLayer, + DataLayerWithMagLocators, DataSourceId, ElementClass, SegmentationLayer @@ -18,7 +18,7 @@ import com.scalableminds.webknossos.datastore.storage.RemoteSourceDescriptorServ import play.api.libs.json.{Json, OFormat} import ucar.ma2.{Array => MultiArray} -trait Zarr3Layer extends DataLayer { +trait Zarr3Layer extends DataLayerWithMagLocators { val dataFormat: DataFormat.Value = DataFormat.zarr3 @@ -29,8 +29,6 @@ trait Zarr3Layer extends DataLayer { def resolutions: List[Vec3Int] = mags.map(_.mag) - def mags: List[MagLocator] - def lengthOfUnderlyingCubes(resolution: Vec3Int): Int = Int.MaxValue // Prevents the wkw-shard-specific handle caching def numChannels: Option[Int] = Some(if (elementClass == ElementClass.uint24) 3 else 1) diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/explore/ExploreLayerService.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/explore/ExploreLayerService.scala index cefa7b7396..bbe4fac39d 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/explore/ExploreLayerService.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/explore/ExploreLayerService.scala @@ -2,13 +2,6 @@ package com.scalableminds.webknossos.datastore.explore import com.scalableminds.util.geometry.{Vec3Double, Vec3Int} import com.scalableminds.util.tools.{Fox, FoxImplicits} -import com.scalableminds.webknossos.datastore.dataformats.n5.{N5DataLayer, N5SegmentationLayer} -import com.scalableminds.webknossos.datastore.dataformats.precomputed.{ - PrecomputedDataLayer, - PrecomputedSegmentationLayer -} -import com.scalableminds.webknossos.datastore.dataformats.zarr.{ZarrDataLayer, ZarrSegmentationLayer} -import com.scalableminds.webknossos.datastore.dataformats.zarr3.{Zarr3DataLayer, Zarr3SegmentationLayer} import com.scalableminds.webknossos.datastore.datareaders.n5.N5Header import com.scalableminds.webknossos.datastore.datareaders.precomputed.PrecomputedHeader import com.scalableminds.webknossos.datastore.datareaders.zarr.{NgffGroupHeader, NgffMetadata, ZarrHeader} @@ -16,16 +9,17 @@ import com.scalableminds.webknossos.datastore.datareaders.zarr3.Zarr3ArrayHeader import com.scalableminds.webknossos.datastore.models.datasource.{ CoordinateTransformation, CoordinateTransformationType, - DataLayer + DataLayerWithMagLocators } + import scala.concurrent.ExecutionContext import scala.util.Try class ExploreLayerService extends FoxImplicits { - def adaptLayersAndVoxelSize( - layersWithVoxelSizes: List[(DataLayer, Vec3Double)], - preferredVoxelSize: Option[Vec3Double])(implicit ec: ExecutionContext): Fox[(List[DataLayer], Vec3Double)] = + def adaptLayersAndVoxelSize(layersWithVoxelSizes: List[(DataLayerWithMagLocators, Vec3Double)], + preferredVoxelSize: Option[Vec3Double])( + implicit ec: ExecutionContext): Fox[(List[DataLayerWithMagLocators], Vec3Double)] = for { rescaledLayersAndVoxelSize <- rescaleLayersByCommonVoxelSize(layersWithVoxelSizes, preferredVoxelSize) ?~> "Could not extract common voxel size from layers" rescaledLayers = rescaledLayersAndVoxelSize._1 @@ -36,9 +30,9 @@ class ExploreLayerService extends FoxImplicits { voxelSize) } yield (layersWithCoordinateTransformations, voxelSize) - def makeLayerNamesUnique(layers: List[DataLayer]): List[DataLayer] = { + def makeLayerNamesUnique(layers: List[DataLayerWithMagLocators]): List[DataLayerWithMagLocators] = { val namesSetMutable = scala.collection.mutable.Set[String]() - layers.map { layer: DataLayer => + layers.map { layer => var nameCandidate = layer.name var index = 1 while (namesSetMutable.contains(nameCandidate)) { @@ -49,32 +43,16 @@ class ExploreLayerService extends FoxImplicits { if (nameCandidate == layer.name) { layer } else - layer match { - case l: ZarrDataLayer => l.copy(name = nameCandidate) - case l: ZarrSegmentationLayer => l.copy(name = nameCandidate) - case l: N5DataLayer => l.copy(name = nameCandidate) - case l: N5SegmentationLayer => l.copy(name = nameCandidate) - case _ => throw new Exception("Encountered unsupported layer format during explore remote") - } + layer.mapped(name = nameCandidate) } } - private def addCoordinateTransformationsToLayers(layers: List[DataLayer], + private def addCoordinateTransformationsToLayers(layers: List[DataLayerWithMagLocators], preferredVoxelSize: Option[Vec3Double], - voxelSize: Vec3Double): List[DataLayer] = + voxelSize: Vec3Double): List[DataLayerWithMagLocators] = layers.map(l => { val coordinateTransformations = coordinateTransformationForVoxelSize(voxelSize, preferredVoxelSize) - l match { - case l: ZarrDataLayer => l.copy(coordinateTransformations = coordinateTransformations) - case l: ZarrSegmentationLayer => l.copy(coordinateTransformations = coordinateTransformations) - case l: N5DataLayer => l.copy(coordinateTransformations = coordinateTransformations) - case l: N5SegmentationLayer => l.copy(coordinateTransformations = coordinateTransformations) - case l: PrecomputedDataLayer => l.copy(coordinateTransformations = coordinateTransformations) - case l: PrecomputedSegmentationLayer => l.copy(coordinateTransformations = coordinateTransformations) - case l: Zarr3DataLayer => l.copy(coordinateTransformations = coordinateTransformations) - case l: Zarr3SegmentationLayer => l.copy(coordinateTransformations = coordinateTransformations) - case _ => throw new Exception("Encountered unsupported layer format during explore remote") - } + l.mapped(coordinateTransformations = coordinateTransformations) }) private def isPowerOfTwo(x: Int): Boolean = @@ -135,9 +113,9 @@ class ExploreLayerService extends FoxImplicits { } } - private def rescaleLayersByCommonVoxelSize( - layersWithVoxelSizes: List[(DataLayer, Vec3Double)], - preferredVoxelSize: Option[Vec3Double])(implicit ec: ExecutionContext): Fox[(List[DataLayer], Vec3Double)] = { + private def rescaleLayersByCommonVoxelSize(layersWithVoxelSizes: List[(DataLayerWithMagLocators, Vec3Double)], + preferredVoxelSize: Option[Vec3Double])( + implicit ec: ExecutionContext): Fox[(List[DataLayerWithMagLocators], Vec3Double)] = { val allVoxelSizes = layersWithVoxelSizes .flatMap(layerWithVoxelSize => { val layer = layerWithVoxelSize._1 @@ -159,33 +137,7 @@ class ExploreLayerService extends FoxImplicits { val layer = layerWithVoxelSize._1 val layerVoxelSize = layerWithVoxelSize._2 val magFactors = (layerVoxelSize / baseVoxelSize).toVec3Int - layer match { - case l: ZarrDataLayer => - l.copy(mags = l.mags.map(mag => mag.copy(mag = mag.mag * magFactors)), - boundingBox = l.boundingBox * magFactors) - case l: ZarrSegmentationLayer => - l.copy(mags = l.mags.map(mag => mag.copy(mag = mag.mag * magFactors)), - boundingBox = l.boundingBox * magFactors) - case l: N5DataLayer => - l.copy(mags = l.mags.map(mag => mag.copy(mag = mag.mag * magFactors)), - boundingBox = l.boundingBox * magFactors) - case l: N5SegmentationLayer => - l.copy(mags = l.mags.map(mag => mag.copy(mag = mag.mag * magFactors)), - boundingBox = l.boundingBox * magFactors) - case l: PrecomputedDataLayer => - l.copy(mags = l.mags.map(mag => mag.copy(mag = mag.mag * magFactors)), - boundingBox = l.boundingBox * magFactors) - case l: PrecomputedSegmentationLayer => - l.copy(mags = l.mags.map(mag => mag.copy(mag = mag.mag * magFactors)), - boundingBox = l.boundingBox * magFactors) - case l: Zarr3DataLayer => - l.copy(mags = l.mags.map(mag => mag.copy(mag = mag.mag * magFactors)), - boundingBox = l.boundingBox * magFactors) - case l: Zarr3SegmentationLayer => - l.copy(mags = l.mags.map(mag => mag.copy(mag = mag.mag * magFactors)), - boundingBox = l.boundingBox * magFactors) - case _ => throw new Exception("Encountered unsupported layer format during explore remote") - } + layer.mapped(boundingBoxMapping = _ * magFactors, magMapping = mag => mag.copy(mag = mag.mag * magFactors)) }) } yield (rescaledLayers, baseVoxelSize) } diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/explore/ExploreLocalLayerService.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/explore/ExploreLocalLayerService.scala index a08a0010f6..0d8998763b 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/explore/ExploreLocalLayerService.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/explore/ExploreLocalLayerService.scala @@ -2,8 +2,13 @@ package com.scalableminds.webknossos.datastore.explore import com.scalableminds.util.geometry.Vec3Int import com.scalableminds.util.tools.{Fox, FoxImplicits} -import com.scalableminds.webknossos.datastore.dataformats.zarr.ZarrDataLayer -import com.scalableminds.webknossos.datastore.models.datasource.{DataLayer, DataSource, DataSourceId, GenericDataSource} +import com.scalableminds.webknossos.datastore.models.datasource.{ + DataLayerWithMagLocators, + DataSource, + DataSourceId, + DataSourceWithMagLocators, + GenericDataSource +} import com.scalableminds.webknossos.datastore.storage.{DataVaultService, RemoteSourceDescriptor} import net.liftweb.common.Box.tryo import play.api.libs.json.Json @@ -18,19 +23,21 @@ class ExploreLocalLayerService @Inject()(dataVaultService: DataVaultService) extends ExploreLayerService with FoxImplicits { - def exploreLocal(path: Path, dataSourceId: DataSourceId, layerDirectory: String = "color")( - implicit ec: ExecutionContext): Fox[DataSource] = + def exploreLocal(path: Path, dataSourceId: DataSourceId, layerDirectory: String = "")( + implicit ec: ExecutionContext): Fox[DataSourceWithMagLocators] = for { _ <- Fox.successful(()) explored = Seq( exploreLocalNgffArray(path, dataSourceId), - exploreLocalZarrArray(path, dataSourceId, layerDirectory) + exploreLocalZarrArray(path, dataSourceId, layerDirectory), + exploreLocalNeuroglancerPrecomputed(path, dataSourceId, layerDirectory), + exploreLocalN5Multiscales(path, dataSourceId, layerDirectory) ) dataSource <- Fox.firstSuccess(explored) ?~> "Could not explore local data source" } yield dataSource private def exploreLocalZarrArray(path: Path, dataSourceId: DataSourceId, layerDirectory: String)( - implicit ec: ExecutionContext): Fox[DataSource] = + implicit ec: ExecutionContext): Fox[DataSourceWithMagLocators] = for { magDirectories <- tryo(Files.list(path.resolve(layerDirectory)).iterator().asScala.toList).toFox ?~> s"Could not resolve color directory as child of $path" layersWithVoxelSizes <- Fox.combined(magDirectories.map(dir => @@ -42,26 +49,51 @@ class ExploreLocalLayerService @Inject()(dataVaultService: DataVaultService) layersWithVoxelSizes <- (new ZarrArrayExplorer(mag, ec)).explore(vaultPath, None) } yield layersWithVoxelSizes)) (layers, voxelSize) <- adaptLayersAndVoxelSize(layersWithVoxelSizes.flatten, None) - zarrLayers = layers.map(_.asInstanceOf[ZarrDataLayer]) - relativeLayers = makePathsRelative(zarrLayers).toList - dataSource = new DataSource(dataSourceId, relativeLayers, voxelSize) + relativeLayers = layers.map(selectLastTwoDirectories) + dataSource = new DataSourceWithMagLocators(dataSourceId, relativeLayers, voxelSize) } yield dataSource private def exploreLocalNgffArray(path: Path, dataSourceId: DataSourceId)( - implicit ec: ExecutionContext): Fox[DataSource] = + implicit ec: ExecutionContext): Fox[DataSourceWithMagLocators] = + exploreLocalLayer( + layers => layers.map(selectLastTwoDirectories), + new NgffExplorer + )(path, dataSourceId, "") + + private def exploreLocalNeuroglancerPrecomputed(path: Path, dataSourceId: DataSourceId, layerDirectory: String)( + implicit ec: ExecutionContext): Fox[DataSourceWithMagLocators] = + exploreLocalLayer( + layers => layers.map(selectLastDirectory), + new PrecomputedExplorer + )(path, dataSourceId, layerDirectory) + + private def exploreLocalN5Multiscales(path: Path, dataSourceId: DataSourceId, layerDirectory: String)( + implicit ec: ExecutionContext): Fox[DataSourceWithMagLocators] = + exploreLocalLayer( + layers => layers.map(selectLastDirectory), + new N5MultiscalesExplorer + )(path, dataSourceId, layerDirectory) + + private def selectLastDirectory(l: DataLayerWithMagLocators) = + l.mapped(magMapping = m => m.copy(path = m.path.map(_.split("/").last))) + + private def selectLastTwoDirectories(l: DataLayerWithMagLocators) = + l.mapped(magMapping = m => m.copy(path = m.path.map(_.split("/").takeRight(2).mkString("/")))) + + private def exploreLocalLayer( + makeLayersRelative: List[DataLayerWithMagLocators] => List[DataLayerWithMagLocators], + explorer: RemoteLayerExplorer)(path: Path, dataSourceId: DataSourceId, layerDirectory: String)( + implicit ec: ExecutionContext): Fox[DataSourceWithMagLocators] = for { - remoteSourceDescriptor <- Fox.successful(RemoteSourceDescriptor(path.toUri, None)) + fullPath <- Fox.successful(path.resolve(layerDirectory)) + remoteSourceDescriptor <- Fox.successful(RemoteSourceDescriptor(fullPath.toUri, None)) vaultPath <- dataVaultService.getVaultPath(remoteSourceDescriptor) ?~> "dataVault.setup.failed" - layersWithVoxelSizes <- (new NgffExplorer).explore(vaultPath, None) + layersWithVoxelSizes <- explorer.explore(vaultPath, None) (layers, voxelSize) <- adaptLayersAndVoxelSize(layersWithVoxelSizes, None) - zarrLayers = layers.map(_.asInstanceOf[ZarrDataLayer]) - relativeLayers = makePathsRelative(zarrLayers).toList - dataSource = new DataSource(dataSourceId, relativeLayers, voxelSize) + relativeLayers = makeLayersRelative(layers) + dataSource = new DataSourceWithMagLocators(dataSourceId, relativeLayers, voxelSize) } yield dataSource - private def makePathsRelative(layers: Seq[ZarrDataLayer]): Seq[DataLayer] = - layers.map(l => l.copy(mags = l.mags.map(m => m.copy(path = m.path.map(_.split("/").takeRight(2).mkString("/")))))) - def writeLocalDatasourceProperties(dataSource: DataSource, path: Path)(implicit ec: ExecutionContext): Fox[Path] = tryo { val properties = Json.toJson(dataSource).toString().getBytes(StandardCharsets.UTF_8) diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/explore/NeuroglancerUriExplorer.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/explore/NeuroglancerUriExplorer.scala index b072afe331..db1c7da4bf 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/explore/NeuroglancerUriExplorer.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/explore/NeuroglancerUriExplorer.scala @@ -2,16 +2,9 @@ package com.scalableminds.webknossos.datastore.explore import com.scalableminds.util.geometry.{Vec3Double, Vec3Int} import com.scalableminds.util.tools.Fox -import com.scalableminds.webknossos.datastore.dataformats.n5.{N5DataLayer, N5SegmentationLayer} -import com.scalableminds.webknossos.datastore.dataformats.precomputed.{ - PrecomputedDataLayer, - PrecomputedSegmentationLayer -} -import com.scalableminds.webknossos.datastore.dataformats.zarr.{ZarrDataLayer, ZarrSegmentationLayer} -import com.scalableminds.webknossos.datastore.dataformats.zarr3.{Zarr3DataLayer, Zarr3SegmentationLayer} import com.scalableminds.webknossos.datastore.datavault.VaultPath import com.scalableminds.webknossos.datastore.models.datasource.LayerViewConfiguration.LayerViewConfiguration -import com.scalableminds.webknossos.datastore.models.datasource.{DataLayer, LayerViewConfiguration} +import com.scalableminds.webknossos.datastore.models.datasource.{DataLayerWithMagLocators, LayerViewConfiguration} import com.scalableminds.webknossos.datastore.storage.{DataVaultService, RemoteSourceDescriptor} import net.liftweb.common.Box.tryo import play.api.libs.json._ @@ -26,7 +19,8 @@ class NeuroglancerUriExplorer @Inject()(dataVaultService: DataVaultService, extends RemoteLayerExplorer { override def name: String = "Neuroglancer URI Explorer" - override def explore(remotePath: VaultPath, credentialId: Option[String]): Fox[List[(DataLayer, Vec3Double)]] = + override def explore(remotePath: VaultPath, + credentialId: Option[String]): Fox[List[(DataLayerWithMagLocators, Vec3Double)]] = for { _ <- Fox.successful(()) uriFragment <- tryo(remotePath.toUri.getFragment.drop(1)) ?~> "URI has no matching fragment part" @@ -39,7 +33,7 @@ class NeuroglancerUriExplorer @Inject()(dataVaultService: DataVaultService, renamedLayers = exploreLayerService.makeLayerNamesUnique(layers.map(_._1)) } yield renamedLayers.zip(layers.map(_._2)) - private def exploreNeuroglancerLayer(layerSpec: JsValue): Fox[List[(DataLayer, Vec3Double)]] = + private def exploreNeuroglancerLayer(layerSpec: JsValue): Fox[List[(DataLayerWithMagLocators, Vec3Double)]] = for { _ <- Fox.successful(()) obj <- layerSpec.validate[JsObject].toFox @@ -54,7 +48,9 @@ class NeuroglancerUriExplorer @Inject()(dataVaultService: DataVaultService, layerWithViewConfiguration <- assignViewConfiguration(layer, viewConfiguration) } yield layerWithViewConfiguration - private def exploreLayer(layerType: String, remotePath: VaultPath, name: String): Fox[List[(DataLayer, Vec3Double)]] = + private def exploreLayer(layerType: String, + remotePath: VaultPath, + name: String): Fox[List[(DataLayerWithMagLocators, Vec3Double)]] = layerType match { case "n5" => Fox.firstSuccess( @@ -76,22 +72,12 @@ class NeuroglancerUriExplorer @Inject()(dataVaultService: DataVaultService, } private def assignViewConfiguration( - value: List[(DataLayer, Vec3Double)], - configuration: LayerViewConfiguration.LayerViewConfiguration): Fox[List[(DataLayer, Vec3Double)]] = + value: List[(DataLayerWithMagLocators, Vec3Double)], + configuration: LayerViewConfiguration.LayerViewConfiguration): Fox[List[(DataLayerWithMagLocators, Vec3Double)]] = for { _ <- Fox.successful(()) layers = value.map(_._1) - layersWithViewConfigs = layers.map { - case l: ZarrDataLayer => l.copy(defaultViewConfiguration = Some(configuration)) - case l: ZarrSegmentationLayer => l.copy(defaultViewConfiguration = Some(configuration)) - case l: N5DataLayer => l.copy(defaultViewConfiguration = Some(configuration)) - case l: N5SegmentationLayer => l.copy(defaultViewConfiguration = Some(configuration)) - case l: PrecomputedDataLayer => l.copy(defaultViewConfiguration = Some(configuration)) - case l: PrecomputedSegmentationLayer => l.copy(defaultViewConfiguration = Some(configuration)) - case l: Zarr3DataLayer => l.copy(defaultViewConfiguration = Some(configuration)) - case l: Zarr3SegmentationLayer => l.copy(defaultViewConfiguration = Some(configuration)) - - } + layersWithViewConfigs = layers.map(l => l.mapped(defaultViewConfigurationMapping = _ => Some(configuration))) } yield layersWithViewConfigs.zip(value.map(_._2)) } diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/explore/RemoteLayerExplorer.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/explore/RemoteLayerExplorer.scala index b8342619c2..cbd8ffd9e7 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/explore/RemoteLayerExplorer.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/explore/RemoteLayerExplorer.scala @@ -5,7 +5,7 @@ import com.scalableminds.util.tools.TextUtils.normalizeStrong import com.scalableminds.util.tools.{Fox, FoxImplicits, JsonHelper} import com.scalableminds.webknossos.datastore.dataformats.MagLocator import com.scalableminds.webknossos.datastore.datavault.VaultPath -import com.scalableminds.webknossos.datastore.models.datasource.{AdditionalAxis, DataLayer, ElementClass} +import com.scalableminds.webknossos.datastore.models.datasource.{AdditionalAxis, DataLayerWithMagLocators, ElementClass} import net.liftweb.common.Box import net.liftweb.common.Box.tryo import play.api.libs.json.Reads @@ -22,7 +22,7 @@ trait RemoteLayerExplorer extends FoxImplicits { implicit def ec: ExecutionContext - def explore(remotePath: VaultPath, credentialId: Option[String]): Fox[List[(DataLayer, Vec3Double)]] + def explore(remotePath: VaultPath, credentialId: Option[String]): Fox[List[(DataLayerWithMagLocators, Vec3Double)]] def name: String diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/explore/WebknossosZarrExplorer.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/explore/WebknossosZarrExplorer.scala index 3951cc3c94..8835ba7a53 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/explore/WebknossosZarrExplorer.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/explore/WebknossosZarrExplorer.scala @@ -8,7 +8,11 @@ import com.scalableminds.webknossos.datastore.dataformats.zarr3.{Zarr3DataLayer, import com.scalableminds.webknossos.datastore.datareaders.zarr.ZarrHeader import com.scalableminds.webknossos.datastore.datareaders.zarr3.Zarr3ArrayHeader import com.scalableminds.webknossos.datastore.datavault.VaultPath -import com.scalableminds.webknossos.datastore.models.datasource.{DataLayer, DataSource, GenericDataSource} +import com.scalableminds.webknossos.datastore.models.datasource.{ + DataLayerWithMagLocators, + DataSource, + GenericDataSource +} import scala.concurrent.ExecutionContext @@ -16,7 +20,8 @@ class WebknossosZarrExplorer(implicit val ec: ExecutionContext) extends RemoteLa override def name: String = "WEBKNOSSOS-based Zarr" - override def explore(remotePath: VaultPath, credentialId: Option[String]): Fox[List[(DataLayer, Vec3Double)]] = + override def explore(remotePath: VaultPath, + credentialId: Option[String]): Fox[List[(DataLayerWithMagLocators, Vec3Double)]] = for { dataSourcePropertiesPath <- Fox.successful(remotePath / GenericDataSource.FILENAME_DATASOURCE_PROPERTIES_JSON) dataSource <- parseJsonFromPath[DataSource](dataSourcePropertiesPath) diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/models/datasource/DataLayer.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/models/datasource/DataLayer.scala index 7a3c86df07..377c95f01b 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/models/datasource/DataLayer.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/models/datasource/DataLayer.scala @@ -3,7 +3,7 @@ package com.scalableminds.webknossos.datastore.models.datasource import com.scalableminds.util.cache.AlfuCache import com.scalableminds.util.enumeration.ExtendedEnumeration import com.scalableminds.webknossos.datastore.dataformats.wkw.{WKWDataLayer, WKWSegmentationLayer} -import com.scalableminds.webknossos.datastore.dataformats.{BucketProvider, MappingProvider} +import com.scalableminds.webknossos.datastore.dataformats.{BucketProvider, MagLocator, MappingProvider} import com.scalableminds.webknossos.datastore.models.BucketPosition import com.scalableminds.util.geometry.{BoundingBox, Vec3Int} import com.scalableminds.webknossos.datastore.dataformats.n5.{N5DataLayer, N5SegmentationLayer} @@ -277,6 +277,86 @@ object DataLayer { } } +trait DataLayerWithMagLocators extends DataLayer { + + def mags: List[MagLocator] + + def mapped(boundingBoxMapping: BoundingBox => BoundingBox = b => b, + defaultViewConfigurationMapping: Option[LayerViewConfiguration] => Option[LayerViewConfiguration] = l => l, + magMapping: MagLocator => MagLocator = m => m, + name: String = this.name, + coordinateTransformations: Option[List[CoordinateTransformation]] = this.coordinateTransformations) + : DataLayerWithMagLocators = + this match { + case l: ZarrDataLayer => + l.copy( + boundingBox = boundingBoxMapping(l.boundingBox), + defaultViewConfiguration = defaultViewConfigurationMapping(l.defaultViewConfiguration), + mags = l.mags.map(magMapping), + name = name, + coordinateTransformations = coordinateTransformations + ) + case l: ZarrSegmentationLayer => + l.copy( + boundingBox = boundingBoxMapping(l.boundingBox), + defaultViewConfiguration = defaultViewConfigurationMapping(l.defaultViewConfiguration), + mags = l.mags.map(magMapping), + name = name, + coordinateTransformations = coordinateTransformations + ) + case l: N5DataLayer => + l.copy( + boundingBox = boundingBoxMapping(l.boundingBox), + defaultViewConfiguration = defaultViewConfigurationMapping(l.defaultViewConfiguration), + mags = l.mags.map(magMapping), + name = name, + coordinateTransformations = coordinateTransformations + ) + case l: N5SegmentationLayer => + l.copy( + boundingBox = boundingBoxMapping(l.boundingBox), + defaultViewConfiguration = defaultViewConfigurationMapping(l.defaultViewConfiguration), + mags = l.mags.map(magMapping), + name = name, + coordinateTransformations = coordinateTransformations + ) + case l: PrecomputedDataLayer => + l.copy( + boundingBox = boundingBoxMapping(l.boundingBox), + defaultViewConfiguration = defaultViewConfigurationMapping(l.defaultViewConfiguration), + mags = l.mags.map(magMapping), + name = name, + coordinateTransformations = coordinateTransformations + ) + case l: PrecomputedSegmentationLayer => + l.copy( + boundingBox = boundingBoxMapping(l.boundingBox), + defaultViewConfiguration = defaultViewConfigurationMapping(l.defaultViewConfiguration), + mags = l.mags.map(magMapping), + name = name, + coordinateTransformations = coordinateTransformations + ) + case l: Zarr3DataLayer => + l.copy( + boundingBox = boundingBoxMapping(l.boundingBox), + defaultViewConfiguration = defaultViewConfigurationMapping(l.defaultViewConfiguration), + mags = l.mags.map(magMapping), + name = name, + coordinateTransformations = coordinateTransformations + ) + case l: Zarr3SegmentationLayer => + l.copy( + boundingBox = boundingBoxMapping(l.boundingBox), + defaultViewConfiguration = defaultViewConfigurationMapping(l.defaultViewConfiguration), + mags = l.mags.map(magMapping), + name = name, + coordinateTransformations = coordinateTransformations + ) + case _ => throw new Exception("Encountered unsupported layer format") + } + +} + trait SegmentationLayer extends DataLayer with SegmentationLayerLike { val category: Category.Value = Category.segmentation lazy val mappingProvider: MappingProvider = new MappingProvider(this) diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/models/datasource/DataSource.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/models/datasource/DataSource.scala index 6684871b57..fa18670fdb 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/models/datasource/DataSource.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/models/datasource/DataSource.scala @@ -63,4 +63,5 @@ package object datasource { type DataSource = GenericDataSource[DataLayer] type DataSourceLike = GenericDataSource[DataLayerLike] + type DataSourceWithMagLocators = GenericDataSource[DataLayerWithMagLocators] } diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadService.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadService.scala index 2aee68b846..f05e812949 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadService.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/uploading/UploadService.scala @@ -3,9 +3,12 @@ package com.scalableminds.webknossos.datastore.services.uploading import com.google.inject.Inject import com.scalableminds.util.io.PathUtils.ensureDirectoryBox import com.scalableminds.util.io.{PathUtils, ZipIO} -import com.scalableminds.util.tools.{Fox, FoxImplicits} +import com.scalableminds.util.tools.{BoxImplicits, Fox, FoxImplicits} import com.scalableminds.webknossos.datastore.dataformats.wkw.WKWDataFormat.FILENAME_HEADER_WKW import com.scalableminds.webknossos.datastore.dataformats.wkw.{WKWDataLayer, WKWSegmentationLayer} +import com.scalableminds.webknossos.datastore.datareaders.n5.N5Header.FILENAME_ATTRIBUTES_JSON +import com.scalableminds.webknossos.datastore.datareaders.n5.N5Metadata +import com.scalableminds.webknossos.datastore.datareaders.precomputed.PrecomputedHeader.FILENAME_INFO import com.scalableminds.webknossos.datastore.datareaders.zarr.NgffMetadata.FILENAME_DOT_ZATTRS import com.scalableminds.webknossos.datastore.datareaders.zarr.ZarrHeader.FILENAME_DOT_ZARRAY import com.scalableminds.webknossos.datastore.explore.ExploreLocalLayerService @@ -71,6 +74,7 @@ class UploadService @Inject()(dataSourceRepository: DataSourceRepository, extends DatasetDeleter with DirectoryConstants with FoxImplicits + with BoxImplicits with LazyLogging { /* Redis stores different information for each upload, with different prefixes in the keys: @@ -235,34 +239,46 @@ class UploadService @Inject()(dataSourceRepository: DataSourceRepository, _ <- Fox.successful(()) uploadedDataSourceType = guessTypeOfUploadedDataSource(unpackToDir) _ <- uploadedDataSourceType match { - case UploadedDataSourceType.ZARR => exploreLocalDatasource(unpackToDir, dataSourceId) - case UploadedDataSourceType.EXPLORED => Fox.successful(()) - case UploadedDataSourceType.ZARR_MULTILAYER => tryExploringMultipleZarrLayers(unpackToDir, dataSourceId) - case UploadedDataSourceType.WKW => addLayerAndResolutionDirIfMissing(unpackToDir).toFox + case UploadedDataSourceType.ZARR | UploadedDataSourceType.NEUROGLANCER_PRECOMPUTED | + UploadedDataSourceType.N5 => + exploreLocalDatasource(unpackToDir, dataSourceId, uploadedDataSourceType) + case UploadedDataSourceType.EXPLORED => Fox.successful(()) + case UploadedDataSourceType.ZARR_MULTILAYER | UploadedDataSourceType.NEUROGLANCER_MULTILAYER | + UploadedDataSourceType.N5_MULTILAYER => + tryExploringMultipleLayers(unpackToDir, dataSourceId, uploadedDataSourceType) + case UploadedDataSourceType.WKW => addLayerAndResolutionDirIfMissing(unpackToDir).toFox } _ <- datasetSymlinkService.addSymlinksToOtherDatasetLayers(unpackToDir, layersToLink.getOrElse(List.empty)) _ <- addLinkedLayersToDataSourceProperties(unpackToDir, dataSourceId.team, layersToLink.getOrElse(List.empty)) } yield () } - private def exploreLocalDatasource(path: Path, dataSourceId: DataSourceId): Fox[Unit] = + private def exploreLocalDatasource(path: Path, + dataSourceId: DataSourceId, + typ: UploadedDataSourceType.Value): Fox[Unit] = for { - _ <- addLayerAndResolutionDirIfMissing(path, FILENAME_DOT_ZARRAY).toFox + _ <- Fox.runIf(typ == UploadedDataSourceType.ZARR)( + addLayerAndResolutionDirIfMissing(path, FILENAME_DOT_ZARRAY).toFox) explored <- exploreLocalLayerService.exploreLocal(path, dataSourceId) _ <- exploreLocalLayerService.writeLocalDatasourceProperties(explored, path) } yield () - private def tryExploringMultipleZarrLayers(path: Path, dataSourceId: DataSourceId): Fox[Option[Path]] = + private def tryExploringMultipleLayers(path: Path, + dataSourceId: DataSourceId, + typ: UploadedDataSourceType.Value): Fox[Option[Path]] = for { - layerDirs <- getZarrLayerDirectories(path) + layerDirs <- typ match { + case UploadedDataSourceType.ZARR_MULTILAYER => getZarrLayerDirectories(path) + case UploadedDataSourceType.NEUROGLANCER_MULTILAYER | UploadedDataSourceType.N5_MULTILAYER => + PathUtils.listDirectories(path, silent = false).toFox + } dataSources <- Fox.combined( layerDirs .map(layerDir => for { _ <- addLayerAndResolutionDirIfMissing(layerDir).toFox - explored: DataSource <- exploreLocalLayerService.exploreLocal(path, - dataSourceId, - layerDir.getFileName.toString) + explored: DataSourceWithMagLocators <- exploreLocalLayerService + .exploreLocal(path, dataSourceId, layerDir.getFileName.toString) } yield explored) .toList) combinedLayers = exploreLocalLayerService.makeLayerNamesUnique(dataSources.flatMap(_.dataLayers)) @@ -354,27 +370,51 @@ class UploadService @Inject()(dataSourceRepository: DataSourceRepository, UploadedDataSourceType.EXPLORED } else if (looksLikeZarrArray(dataSourceDir, maxDepth = 3).openOr(false)) { UploadedDataSourceType.ZARR_MULTILAYER + } else if (looksLikeNeuroglancerPrecomputed(dataSourceDir, 1).openOr(false)) { + UploadedDataSourceType.NEUROGLANCER_PRECOMPUTED + } else if (looksLikeNeuroglancerPrecomputed(dataSourceDir, 2).openOr(false)) { + UploadedDataSourceType.NEUROGLANCER_MULTILAYER + } else if (looksLikeN5Multilayer(dataSourceDir).openOr(false)) { + UploadedDataSourceType.N5_MULTILAYER + } else if (looksLikeN5Layer(dataSourceDir).openOr(false)) { + UploadedDataSourceType.N5 } else { UploadedDataSourceType.WKW } - private def looksLikeZarrArray(dataSourceDir: Path, maxDepth: Int): Box[Boolean] = + private def containsMatchingFile(fileNames: List[String], dataSourceDir: Path, maxDepth: Int): Box[Boolean] = for { - listing: Seq[Path] <- PathUtils.listFilesRecursive( - dataSourceDir, - maxDepth = maxDepth, - silent = false, - filters = p => p.getFileName.toString == FILENAME_DOT_ZARRAY || p.getFileName.toString == FILENAME_DOT_ZATTRS) + listing: Seq[Path] <- PathUtils.listFilesRecursive(dataSourceDir, + maxDepth = maxDepth, + silent = false, + filters = p => fileNames.contains(p.getFileName.toString)) } yield listing.nonEmpty - private def looksLikeExploredDataSource(dataSourceDir: Path): Box[Boolean] = + private def looksLikeZarrArray(dataSourceDir: Path, maxDepth: Int): Box[Boolean] = + containsMatchingFile(List(FILENAME_DOT_ZARRAY, FILENAME_DOT_ZATTRS), dataSourceDir, maxDepth) + + private def looksLikeNeuroglancerPrecomputed(dataSourceDir: Path, maxDepth: Int): Box[Boolean] = + containsMatchingFile(List(FILENAME_INFO), dataSourceDir, maxDepth) + + private def looksLikeN5Layer(dataSourceDir: Path): Box[Boolean] = for { - listing: Seq[Path] <- PathUtils.listFilesRecursive( - dataSourceDir, - maxDepth = 1, - silent = false, - filters = p => p.getFileName.toString == FILENAME_DATASOURCE_PROPERTIES_JSON) - } yield listing.nonEmpty + attributesFiles <- PathUtils.listFilesRecursive(dataSourceDir, + silent = false, + maxDepth = 1, + filters = p => p.getFileName.toString == FILENAME_ATTRIBUTES_JSON) + _ <- Json.parse(new String(Files.readAllBytes(attributesFiles.head))).validate[N5Metadata] + } yield true + + private def looksLikeN5Multilayer(dataSourceDir: Path): Box[Boolean] = + for { + _ <- containsMatchingFile(List(FILENAME_ATTRIBUTES_JSON), dataSourceDir, 1) // root attributes.json + directories <- PathUtils.listDirectories(dataSourceDir, silent = false) + detectedLayerBoxes = directories.map(looksLikeN5Layer) + _ <- bool2Box(detectedLayerBoxes.forall(_.openOr(false))) + } yield true + + private def looksLikeExploredDataSource(dataSourceDir: Path): Box[Boolean] = + containsMatchingFile(List(FILENAME_DATASOURCE_PROPERTIES_JSON), dataSourceDir, 1) private def getZarrLayerDirectories(dataSourceDir: Path): Fox[Seq[Path]] = for { @@ -501,5 +541,5 @@ class UploadService @Inject()(dataSourceRepository: DataSourceRepository, } object UploadedDataSourceType extends Enumeration { - val ZARR, EXPLORED, ZARR_MULTILAYER, WKW = Value + val ZARR, EXPLORED, ZARR_MULTILAYER, WKW, NEUROGLANCER_PRECOMPUTED, NEUROGLANCER_MULTILAYER, N5, N5_MULTILAYER = Value }