Skip to content

Commit

Permalink
Install jhdf5 via maven (#5953)
Browse files Browse the repository at this point in the history
* Install jhdf5 via maven

* remove copying lib from dockerfile

* also in datastore dockerfile...
  • Loading branch information
fm3 authored Jan 13, 2022
1 parent 4908b5d commit f0396ab
Show file tree
Hide file tree
Showing 23 changed files with 14 additions and 209 deletions.
1 change: 0 additions & 1 deletion Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@ RUN mkdir -p /webknossos
WORKDIR /webknossos

COPY target/universal/stage .
COPY webknossos-datastore/lib/native target/universal/stage/lib/native


RUN addgroup --system --gid 999 webknossos \
Expand Down
4 changes: 3 additions & 1 deletion project/Dependencies.scala
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@ object Dependencies {
private val spire = "org.typelevel" %% "spire" % "0.14.1"
private val jgrapht = "org.jgrapht" % "jgrapht-core" % "1.4.0"
private val swagger = "io.swagger" %% "swagger-play2" % "1.7.1"
private val jhdf = "cisd" % "jhdf5" % "19.04.0"

private val sql = Seq(
"com.typesafe.slick" %% "slick" % "3.2.3",
Expand Down Expand Up @@ -81,7 +82,8 @@ object Dependencies {
swagger,
spire,
akkaHttp,
redis
redis,
jhdf
)

val webknossosTracingstoreDependencies: Seq[ModuleID] = Seq(
Expand Down
3 changes: 2 additions & 1 deletion project/DependencyResolvers.scala
Original file line number Diff line number Diff line change
Expand Up @@ -2,12 +2,13 @@ import sbt._

object DependencyResolvers {
val atlassian = "Atlassian Releases" at "https://maven.atlassian.com/public/"
val sciJava = "SciJava Public" at "https://maven.scijava.org/content/repositories/public/"

val dependencyResolvers = Seq(
Resolver.sonatypeRepo("releases"),
Resolver.sonatypeRepo("snapshots"),
Resolver.typesafeRepo("releases"),
Resolver.bintrayRepo("scalaz", "releases"),
sciJava,
atlassian
)
}
1 change: 0 additions & 1 deletion webknossos-datastore/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@ WORKDIR /webknossos-datastore
VOLUME /webknossos-datastore/binaryData /tmp

COPY target/universal/stage .
COPY lib/native target/universal/stage/lib/native

RUN chown -R webknossos . \
&& chmod go+x bin/webknossos-datastore \
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,8 @@ class AgglomerateFileCache(val maxEntries: Int) extends LRUConcurrentCache[Agglo
}

class AgglomerateIdCache(val maxEntries: Int, val standardBlockSize: Int) extends LRUConcurrentCache[Long, Long] {
// On cache miss, reads whole blocks of IDs (number of elements is standardBlockSize)

def withCache(segmentId: ULong, reader: IHDF5Reader, dataSet: HDF5DataSet)(
readFromFile: (IHDF5Reader, HDF5DataSet, Long, Long) => Array[Long]): Long = {

Expand Down Expand Up @@ -98,13 +100,14 @@ case class BoundingBoxFinder(
}

// This bounding box cache uses the cumsum.json to speed up the agglomerate mapping. The cumsum.json contains information about all bounding boxes of the dataset and the contained segment ids.
// Thus, we can load the correct part from the AgglomerateFile without having to maintain a cache because we can translate the whole input array.
// The typical query for the agglomerate file is to map a complete bucket/bbox, not to translate individual IDs.
// Thus, we can load the correct part from the AgglomerateFile without having to maintain an ID cache because we can translate the whole input array.
// One special case is an input value of 0, which is automatically mapped to 0.

class BoundingBoxCache(
val cache: mutable.HashMap[(Long, Long, Long), BoundingBoxValues], // maps bounding box top left to range and bb dimensions
val boundingBoxFinder: BoundingBoxFinder, // saves the bb top left positions
val maxReaderRange: ULong) // config value for maximum amount of elements which are allowed to be read as once
val maxReaderRange: ULong) // config value for maximum amount of elements that are allowed to be read as once
extends LazyLogging {
private def getGlobalCuboid(cuboid: Cuboid): Cuboid = {
val res = cuboid.resolution
Expand All @@ -115,6 +118,7 @@ class BoundingBoxCache(
cuboid.depth * res.z)
}

// get the segment ID range for one cuboid
private def getReaderRange(request: DataServiceDataRequest): (ULong, ULong) = {
// convert cuboid to global coordinates (in res 1)
val globalCuboid = getGlobalCuboid(request.cuboid)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,9 @@ import scala.collection.mutable

object CumsumParser extends LazyLogging {

// the cumsum json object contains "max_ids" and "cumsum"
// the jsonReader can only go through the file in forward direction, but we need max_ids first
// if the json contains cumsum first, it is skipped at first and parseImpl will call itself again to read it second
def parseImpl(f: File,
maxReaderRange: ULong,
initialBoundingBoxList: List[(Long, Long, Long, Long, Long, Long)],
Expand Down
202 changes: 0 additions & 202 deletions webknossos-datastore/lib/LICENSE

This file was deleted.

1 change: 0 additions & 1 deletion webknossos-datastore/lib/README

This file was deleted.

Binary file removed webknossos-datastore/lib/args4j-2.33.jar
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file removed webknossos-datastore/lib/sis-base-18.09.0.jar
Binary file not shown.
Binary file removed webknossos-datastore/lib/sis-jhdf5-19.04.0.jar
Binary file not shown.
Binary file not shown.

0 comments on commit f0396ab

Please sign in to comment.