diff --git a/CHANGELOG.unreleased.md b/CHANGELOG.unreleased.md index 2a0df460a5..4ca2815109 100644 --- a/CHANGELOG.unreleased.md +++ b/CHANGELOG.unreleased.md @@ -14,7 +14,7 @@ For upgrade instructions, please check the [migration guide](MIGRATIONS.released - Added a new bounding box tool that allows resizing and creating bounding boxes more easily. Additionally, the context menu now contains options to modify the bounding box close to the clicked position. [#5767](https://github.com/scalableminds/webknossos/pull/5767) ### Changed -- +- The docker setup has been restructured, which requires changes to existing docker-compose setups. See the migration guide for details. [#5843](https://github.com/scalableminds/webknossos/pull/5843) ### Fixed - Fixed a bug where admins could not share annotations with teams they were not explicitly a member of. [#5845](https://github.com/scalableminds/webknossos/pull/5845) diff --git a/Dockerfile b/Dockerfile index f2ac88524a..ac2ce014a2 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,10 +1,10 @@ FROM openjdk:8-jdk RUN apt-get update \ - && apt-get -y install postgresql-client \ - && rm -rf /var/lib/apt/lists/* + && apt-get -y install postgresql-client \ + && rm -rf /var/lib/apt/lists/* -RUN mkdir -p /srv/webknossos -WORKDIR /srv/webknossos +RUN mkdir -p /webknossos +WORKDIR /webknossos COPY target/universal/stage . COPY webknossos-datastore/lib/native target/universal/stage/lib/native @@ -17,6 +17,9 @@ RUN addgroup --system --gid 999 webknossos \ && chmod go+x bin/webknossos \ && chmod go+w . +RUN echo '#!/bin/bash\numask 002\nbin/webknossos "$@"\n' > /docker-entrypoint.sh \ + && chmod +x /docker-entrypoint.sh + HEALTHCHECK \ --interval=1m --timeout=5s --retries=10 \ CMD curl --fail http://localhost:9000/api/buildinfo || exit 1 @@ -25,4 +28,4 @@ USER webknossos EXPOSE 9000 -ENTRYPOINT [ "bin/webknossos" ] +ENTRYPOINT [ "/docker-entrypoint.sh" ] diff --git a/MIGRATIONS.unreleased.md b/MIGRATIONS.unreleased.md index b7d958cbf9..96cf842afe 100644 --- a/MIGRATIONS.unreleased.md +++ b/MIGRATIONS.unreleased.md @@ -7,7 +7,8 @@ User-facing changes are documented in the [changelog](CHANGELOG.released.md). ## Unreleased -- +- The docker files now place the webKnossos installation under `/webknossos` instead of `/srv/webknossos`. All mounts, most importantly `/srv/webknossos/binaryData`, need to be changed accordingly. +- The entrypoint of the docker files have changed. Therefore, any existing `docker-compose.yml` setups need to be adapted. In most cases, only the `entrypoint: bin/webknossos` lines need to be removed (if existant). ### Postgres Evolutions: diff --git a/docker-compose.yml b/docker-compose.yml index e53738251c..f6c12516c2 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -32,7 +32,7 @@ services: # - -Dplay.modules.enabled-="com.scalableminds.webknossos.datastore.DataStoreModule" # - -Dplay.http.router="noDS.Routes" volumes: - - ./binaryData:/srv/webknossos/binaryData + - ./binaryData:/webknossos/binaryData environment: - POSTGRES_URL=jdbc:postgresql://postgres/webknossos user: ${USER_UID:-1000}:${USER_GID:-1000} @@ -42,9 +42,8 @@ services: image: scalableminds/webknossos-datastore:${DOCKER_TAG:-master} ports: - "9090:9090" - entrypoint: bin/webknossos-datastore volumes: - - ./binaryData:/srv/webknossos-datastore/binaryData + - ./binaryData:/webknossos-datastore/binaryData command: - -J-Xmx20G - -J-Xms1G @@ -58,7 +57,6 @@ services: image: scalableminds/webknossos-tracingstore:${DOCKER_TAG:-master} ports: - "9050:9050" - entrypoint: bin/webknossos-tracingstore command: - -J-Xmx20G - -J-Xms1G diff --git a/docs/datasets.md b/docs/datasets.md index 5112785ce7..a8f9cb135a 100644 --- a/docs/datasets.md +++ b/docs/datasets.md @@ -52,7 +52,7 @@ services: webknossos: ... volumes: - - ./data:/srv/webknossos/binaryData + - ./data:/webknossos/binaryData - /cluster:/cluster ... ``` diff --git a/tools/binary_data/download_knossos_data.py b/tools/binary_data/download_knossos_data.py deleted file mode 100755 index f6eaf90e6e..0000000000 --- a/tools/binary_data/download_knossos_data.py +++ /dev/null @@ -1,22 +0,0 @@ -#!/usr/bin/python - -user = "" -dataset = "2012-09-28_ex145_07x2_segNew" -filename = "2012-09-28_ex145_07x2" -layer = "color" -section = "." -X = [28, 29, 30] -Y = [21, 22, 23] -Z = [12] -R = [1] - -cmd = "mkdir -p ./%s/%s/%d/x%04d/y%04d/z%04d/ && scp %s@oxalis.at:data/%s/%s/%s/%d/x%04d/y%04d/z%04d/%s_mag%d_x%04d_y%04d_z%04d.raw ./%s/%s/%d/x%04d/y%04d/z%04d/%s_mag%d_x%04d_y%04d_z%04d.raw" - -for x in X: - for y in Y: - for z in Z: - for r in R: - x /= r - y /= r - z /= r - print cmd % (layer, section, r, x, y, z, user, dataset, layer, section, r, x, y, z, filename, r, x, y, z, layer, section, r, x, y, z, dataset, r, x, y, z) diff --git a/tools/binary_data/generate_voronoi_cubes.py b/tools/binary_data/generate_voronoi_cubes.py deleted file mode 100644 index b842292290..0000000000 --- a/tools/binary_data/generate_voronoi_cubes.py +++ /dev/null @@ -1,13 +0,0 @@ -import np - -d = 128 -points = np.random.randint(0, d, (32, 3)) -hull_space=np.zeros([d,d,d], dtype=np.int8) -for x in range(d): - for y in range(d): - for z in range(d): - coord = np.array([x,y,z]) - diff = points - coord - dist = diff[:,0]**2 + diff[:,1]**2 + diff[:,2]**2 - closest = np.argmin(dist) - hull_space[x][y][z] = closest * 8 diff --git a/tools/binary_data/grabData.sh b/tools/binary_data/grabData.sh deleted file mode 100755 index 5f32cc15a8..0000000000 --- a/tools/binary_data/grabData.sh +++ /dev/null @@ -1,139 +0,0 @@ -#!/bin/bash -set -e -if [ $# -lt 1 ]; then - echo "Usage: $0 " - exit 1 -fi - -USER=$1 -SERVER="dev.oxalis.at" -CONTROL_PATH="/tmp/%r@%h:%p" -CONTROL_PATH_LITERAL="/tmp/$USER@$SERVER:22" -BINARY_DATA_DIR="/srv/binaryData/Structure\ of\ Neocortical\ Circuits\ Group" -LOCAL_BINARY_DATA_DIR="../../binaryData" - -echo "Select a dataset to download" -echo "cancel with CTRL+C" - -function download_segmentation_oriented() { - echo "Which section do you want to download?(including color data)" - select Section in `ssh $USER@$SERVER -S $CONTROL_PATH ls $BINARY_DATA_DIR/$DataSet/segmentation | grep -P "^section"`; - do - ssh $USER@$SERVER -S $CONTROL_PATH "find $BINARY_DATA_DIR/$DataSet/segmentation/$Section -name '*.raw' -printf '+_%f\\n'" > sectionfiles - #loading json and directory structure - rsync -e "ssh -S $CONTROL_PATH" -rtvuczm --progress --filter=+_*.json --filter=-_*.raw "$USER@$SERVER:$BINARY_DATA_DIR/$DataSet" $LOCAL_BINARY_DATA_DIR - #loading color data - rsync -e "ssh -S $CONTROL_PATH" -rtvuczm --progress --filter=._sectionfiles --filter=-_*.raw "$USER@$SERVER:$BINARY_DATA_DIR/$DataSet/color" $LOCAL_BINARY_DATA_DIR/$DataSet - #loading segmentation - rsync -e "ssh -S $CONTROL_PATH" -rtvuczm --progress --filter=._sectionfiles --filter=-_*.raw "$USER@$SERVER:$BINARY_DATA_DIR/$DataSet/segmentation/$Section" $LOCAL_BINARY_DATA_DIR/$DataSet/segmentation - rm sectionfiles - break - done -} - -function download_block_range() { - X_MIN=`ssh $USER@$SERVER -S $CONTROL_PATH ls $BINARY_DATA_DIR/$DataSet/color/1/ | grep -P "x[0-9]{4}" | cut -c2- | head -n 1` #| sed 's/0*//' | sed 's/^$/0/'` - X_MAX=`ssh $USER@$SERVER -S $CONTROL_PATH ls $BINARY_DATA_DIR/$DataSet/color/1/ | grep -P "x[0-9]{4}" | cut -c2- | tail -n 1` #| sed 's/0*//' | sed 's/^$/0/'` - echo "Dataset consists of x$X_MIN - x$X_MAX" - echo "Lower bound for x?" - read X_LOWER_B - if [ $X_LOWER_B -lt $X_MIN -o $X_LOWER_B -gt $X_MAX ]; then - echo "Bad lower bound..." - exit 1 - fi - echo "Upper bound for x?" - read X_UPPER_B - if [ $X_UPPER_B -lt $X_LOWER_B -o $X_UPPER_B -gt $X_MAX ]; then - echo "Bad upper bound..." - exit 1 - fi - - Y_MIN=`ssh $USER@$SERVER -S $CONTROL_PATH ls $BINARY_DATA_DIR/$DataSet/color/1/x$X_MIN/ | grep -P "y[0-9]{4}" | cut -c2- | head -n 1` - Y_MAX=`ssh $USER@$SERVER -S $CONTROL_PATH ls $BINARY_DATA_DIR/$DataSet/color/1/x$X_MIN/ | grep -P "y[0-9]{4}" | cut -c2- | tail -n 1` - echo "Dataset consists of y$Y_MIN - y$Y_MAX" - echo "Lower bound for y?" - read Y_LOWER_B - if [ $Y_LOWER_B -lt $Y_MIN -o $Y_LOWER_B -gt $Y_MAX ]; then - echo "Bad lower bound..." - exit 1 - fi - echo "Upper bound for y?" - read Y_UPPER_B - if [ $Y_UPPER_B -lt $Y_LOWER_B -o $Y_UPPER_B -gt $Y_MAX ]; then - echo "Bad upper bound..." - exit 1 - fi - - Z_MIN=`ssh $USER@$SERVER -S $CONTROL_PATH ls $BINARY_DATA_DIR/$DataSet/color/1/x$X_MIN/y$Y_MIN/ | grep -P "z[0-9]{4}" | cut -c2- | head -n 1` - Z_MAX=`ssh $USER@$SERVER -S $CONTROL_PATH ls $BINARY_DATA_DIR/$DataSet/color/1/x$X_MIN/y$Y_MIN/ | grep -P "z[0-9]{4}" | cut -c2- | tail -n 1` - echo "Dataset consists of z$Z_MIN - z$Z_MAX" - echo "Lower bound for z?" - read Z_LOWER_B - if [ $Z_LOWER_B -lt $Z_MIN -o $Z_LOWER_B -gt $Z_MAX ]; then - echo "Bad lower bound..." - exit 1 - fi - echo "Upper bound for z?" - read Z_UPPER_B - if [ $Z_UPPER_B -lt $Z_LOWER_B -o $Z_UPPER_B -gt $Z_MAX ]; then - echo "Bad upper bound..." - exit 1 - fi - if [ -f tempfilter ]; then - rm tempfilter - fi - for x in `seq $X_LOWER_B $X_UPPER_B`; do - for y in `seq $Y_LOWER_B $Y_UPPER_B`; do - for z in `seq $Z_LOWER_B $Z_UPPER_B`; do - printf "+_%s_mag1_x%04d_y%04d_z%04d.raw\n" $DataSet $x $y $z >> tempfilter - done - done - done - rsync -e "ssh -S $CONTROL_PATH" -rtvuczm --progress --filter=._tempfilter --filter=+_*.json --filter=-_*.raw "$USER@$SERVER:$BINARY_DATA_DIR/$DataSet" $LOCAL_BINARY_DATA_DIR - rm tempfilter -} - -cd `dirname $0` -echo "setting up ssh connection" -ssh $USER@$SERVER -fN -M -S $CONTROL_PATH -PS3="Your choice: " -select DataSet in `ssh -S $CONTROL_PATH $USER@$SERVER ls $BINARY_DATA_DIR`; -do - echo "You picked $DataSet ($REPLY)" - echo "Download full or partial dataset?" - select Choice in `echo full; echo partial`; - do - case $Choice in - "full") - rsync -e "ssh -S $CONTROL_PATH" -rtvucz --progress "$USER@$SERVER:$BINARY_DATA_DIR/$DataSet" $LOCAL_BINARY_DATA_DIR - ;; - "partial") - RANGE=4 - RESOLUTION=1 - ssh $USER@$SERVER -S $CONTROL_PATH test -d $BINARY_DATA_DIR/$DataSet/segmentation - - if [[ $? -eq 0 ]]; then - echo "For $DataSet segmentation is available, do you want to download segmentation block oriented or do you want to give ranges for blocks?" - select Choice in `echo "segmentation-oriented"; echo "range-oriented"`; - do - case $Choice in - "segmentation-oriented") - download_segmentation_oriented - ;; - "range-oriented") - download_block_range - ;; - esac - break - done - else - download_block_range - fi - ;; - esac - break - done - break -done - -rm $CONTROL_PATH_LITERAL \ No newline at end of file diff --git a/tools/binary_data/knossos_to_wkw.py b/tools/binary_data/knossos_to_wkw.py deleted file mode 100644 index 39b0b787ff..0000000000 --- a/tools/binary_data/knossos_to_wkw.py +++ /dev/null @@ -1,112 +0,0 @@ -import itertools -import json -import math -import os -import re - -def mortonEncode(x, y, z): - p = 0 - m = 0 - while x + y + z > 0: - m |= ((x & 1) << p) | ((y & 1) << (p + 1)) | ((z & 1) << (p + 2)) - x = x >> 1 - y = y >> 1 - z = z >> 1 - p += 3 - return m - -def mortonDecode(m): - p = 0 - x = 0 - y = 0 - z = 0 - while m > 0: - x |= (m & 1) << p - m = m >> 1 - y |= (m & 1) << p - m = m >> 1 - z |= (m & 1) << p - m = m >> 1 - p += 1 - return x, y, z - -def reorganizeInputCube(input, settings): - buckets = [bytearray() for _ in range(pow(settings['inputCubeSize'] / settings['bucketSize'], 3))] - bytesPerElement = len(input) / pow(settings['inputCubeSize'], 3) - offset = 0 - c = 0 - for z in range(settings['inputCubeSize']): - for y in range(settings['inputCubeSize']): - for x in range(settings['inputCubeSize'] / settings['bucketSize']): - data = input[offset:offset + settings['bucketSize'] * bytesPerElement] - c += len(data) - bucketIndex = mortonEncode( - x, - int(y / settings['bucketSize']), - int(z / settings['bucketSize']) - ) - buckets[bucketIndex] += data - offset += settings['bucketSize'] * bytesPerElement - return bytearray().join(buckets) - -def convertZoomstep(input, output, settings): - os.makedirs(output) - - # Crawl all raw input files of the zoomStep to - # a) determinte the datasets size and - # b) keep a mapping from each file's voxel offset to its filename. - rawFiles = {} - rawFileNames = itertools.chain(*map(lambda x: map(lambda y: x[0] + '/' + y, x[2]), os.walk(input))) - for rawFileName in rawFileNames: - match = re.search('^.*x(\d+)_y(\d+)_z(\d+)\.raw$', rawFileName) - if match: - # Compute voxel offset from filename. - offset = ( - int(match.group(1)) * settings['inputCubeSize'], - int(match.group(2)) * settings['inputCubeSize'], - int(match.group(3)) * settings['inputCubeSize'] - ) - rawFiles[offset] = rawFileName - - # Determine size of the data in voxels. - inputSize = map(lambda i: (max(map(lambda key: key[i], rawFiles.keys())) + settings['inputCubeSize']), range(3)) - - inputCubesPerOutputCube = pow(settings['outputCubeSize'] / settings['inputCubeSize'], 3) - inputCubeSize = os.stat(rawFiles.values()[0]).st_size - emptyInputCube = bytearray(inputCubeSize) - - for x in range(0, inputSize[0], settings['outputCubeSize']): - for y in range(0, inputSize[1], settings['outputCubeSize']): - for z in range(0, inputSize[2], settings['outputCubeSize']): - - # Create new output cube. - outputCubeFileName = output + '/%d_%d_%d.raw' % (x, y, z) - print('Creating output file: "%s"' % outputCubeFileName) - with open(outputCubeFileName, 'wb') as outputCubeFile: - - # Loop through all input cubes for the current output cube in morton order. - # Each input cube is opened, split into buckets and appended to the output cube. - for inputCubeIndex in range(inputCubesPerOutputCube): - print '%d/%d' % (inputCubeIndex, inputCubesPerOutputCube) - inputCubeCoords = mortonDecode(inputCubeIndex) - inputCubeOffset = ( - x + inputCubeCoords[0] * settings['inputCubeSize'], - y + inputCubeCoords[1] * settings['inputCubeSize'], - z + inputCubeCoords[2] * settings['inputCubeSize'] - ) - - if rawFiles.has_key(inputCubeOffset): - with open(rawFiles[inputCubeOffset], 'rb') as inputCubeFile: - inputCube = inputCubeFile.read() - outputSubcube = reorganizeInputCube(inputCube, settings) - outputCubeFile.write(outputSubcube) - else: - # Input cube does not exist -> write an appropriate number of 0-bytes. - outputCubeFile.write(emptyInputCube) - -settings = {} -settings['bucketSize'] = 32 -settings['inputCubeSize'] = 128 -settings['outputCubeSize'] = 1024 - -convertZoomstep('./dataset/color/1', './output/color/1', settings) diff --git a/tools/binary_data/segmentation-deploy.sh b/tools/binary_data/segmentation-deploy.sh deleted file mode 100644 index 1b803047ee..0000000000 --- a/tools/binary_data/segmentation-deploy.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/sh - -ZSTOR_SERVER="newton" -SCM_SERVER="78.47.16.186" -ZSTOR_BINARY_DATA_DIR="/zdata/thomas/binaryData" -SCM_BINARY_DATA_DIR="/srv/binaryData" - -if [ $# -lt 3 ]; then - echo "Usage: $0 " - exit 1 -fi - -SYNCDIR="$1" -USER_LEVELCREATOR="$2" -USER_NEWTON="$3" - -function syncToRemote -{ - dataset_path=$1 - remote=$2 - rsync -rtucz --progress $dataset_path"/segmentation/" $remote/segmentation - rsync -tuc $dataset_path"/settings.json" $remote -} - -read -p "Do you really want to distribute the segmentation data in ${SYNCDIR}? [y/N]" -n 1 -r -if [[ $REPLY == "y" ]] -then - for dataset in `find ${SYNCDIR} -follow -mindepth 2 -maxdepth 2 -type d -name segmentation | cut -d/ -f2` - do - syncToRemote ${SYNCDIR}/${dataset} "${USER_NEWTON}@${ZSTOR_SERVER}:${ZSTOR_BINARY_DATA_DIR}/${dataset}" - syncToRemote ${SYNCDIR}/${dataset} "${USER_LEVELCREATOR}@${SCM_SERVER}:${SCM_BINARY_DATA_DIR}/${dataset}" - done -fi diff --git a/tools/dev_deployment/refresh_schema.sh b/tools/dev_deployment/refresh_schema.sh index 01c0092571..4d52a29343 100755 --- a/tools/dev_deployment/refresh_schema.sh +++ b/tools/dev_deployment/refresh_schema.sh @@ -20,5 +20,5 @@ POD=$( echo "Going to run 'refresh_schema.sh' in pod $POD" ssh -t kube.scm.io " - sudo kubectl exec $POD -n $NAMESPACE -- /srv/webknossos/tools/postgres/refresh_schema.sh + sudo kubectl exec $POD -n $NAMESPACE -- /webknossos/tools/postgres/refresh_schema.sh " diff --git a/tools/hosting/docker-compose.yml b/tools/hosting/docker-compose.yml index 779fad1844..09a54b4a85 100644 --- a/tools/hosting/docker-compose.yml +++ b/tools/hosting/docker-compose.yml @@ -22,7 +22,7 @@ services: - -Dtracingstore.publicUri=https://${PUBLIC_HOST} - -Ddatastore.publicUri=https://${PUBLIC_HOST} volumes: - - ./binaryData:/srv/webknossos/binaryData + - ./binaryData:/webknossos/binaryData environment: - POSTGRES_URL=jdbc:postgresql://postgres/webknossos - VIRTUAL_HOST=${PUBLIC_HOST} diff --git a/webknossos-datastore/Dockerfile b/webknossos-datastore/Dockerfile index b65e8113c6..bfc0c4aed7 100644 --- a/webknossos-datastore/Dockerfile +++ b/webknossos-datastore/Dockerfile @@ -1,12 +1,12 @@ FROM openjdk:8-jre -RUN mkdir -p /srv/webknossos-datastore \ +RUN mkdir -p /webknossos-datastore \ && groupadd -g 1000 -r webknossos \ && useradd -u 1000 -r -g webknossos webknossos -WORKDIR /srv/webknossos-datastore +WORKDIR /webknossos-datastore -VOLUME /srv/webknossos-datastore/binaryData /tmp +VOLUME /webknossos-datastore/binaryData /tmp COPY target/universal/stage . COPY lib/native target/universal/stage/lib/native @@ -15,6 +15,9 @@ RUN chown -R webknossos . \ && chmod go+x bin/webknossos-datastore \ && chmod go+w . +RUN echo '#!/bin/bash\numask 002\nbin/webknossos-datastore "$@"\n' > /docker-entrypoint.sh \ + && chmod +x /docker-entrypoint.sh + USER webknossos HEALTHCHECK \ @@ -23,5 +26,5 @@ HEALTHCHECK \ EXPOSE 9090 -ENTRYPOINT ["bin/webknossos-datastore"] +ENTRYPOINT ["/docker-entrypoint.sh"] CMD ["-J-Xmx20G", "-J-Xms1G", "-Dconfig.file=conf/standalone-datastore.conf", "-Dlogger.file=conf/logback-docker.xml", "-Dlogback.configurationFile=conf/logback-docker.xml", "-Dhttp.port=9090", "-Dhttp.address=0.0.0.0"] diff --git a/webknossos-datastore/deployment/docker-compose.yml b/webknossos-datastore/deployment/docker-compose.yml index 5d8c5ebd74..108c6f955c 100644 --- a/webknossos-datastore/deployment/docker-compose.yml +++ b/webknossos-datastore/deployment/docker-compose.yml @@ -1,9 +1,8 @@ -version: '2.1' +version: "2.1" services: webknossos-datastore: image: scalableminds/webknossos-datastore:${DATASTORE_TAG} - entrypoint: bin/webknossos-datastore command: - -J-Xmx20G - -J-Xms1G @@ -21,9 +20,6 @@ services: - $USER_GROUP_1 - $USER_GROUP_2 volumes: - - $BINARY_DATA_PATH:/srv/webknossos-datastore/binaryData + - $BINARY_DATA_PATH:/webknossos-datastore/binaryData - ./tmp:/tmp - - ./config/datastore-docker.conf:/srv/webknossos-datastore/conf/docker.conf - environment: - - NEW_RELIC_LICENSE_KEY - - NEW_RELIC_APP_NAME + - ./config/datastore-docker.conf:/webknossos-datastore/conf/docker.conf diff --git a/webknossos-tracingstore/Dockerfile b/webknossos-tracingstore/Dockerfile index a79beb055e..53a498c8f4 100644 --- a/webknossos-tracingstore/Dockerfile +++ b/webknossos-tracingstore/Dockerfile @@ -1,13 +1,13 @@ FROM openjdk:8-jre -RUN mkdir -p /srv/webknossos-tracingstore \ +RUN mkdir -p /webknossos-tracingstore \ && groupadd -g 1000 -r webknossos \ && useradd -u 1000 -r -g webknossos webknossos \ - && mkdir /srv/webknossos-tracingstore/tracingData + && mkdir /webknossos-tracingstore/tracingData -WORKDIR /srv/webknossos-tracingstore +WORKDIR /webknossos-tracingstore -VOLUME /srv/webknossos-tracingstore/tracingData /tmp +VOLUME /webknossos-tracingstore/tracingData /tmp COPY target/universal/stage . @@ -15,6 +15,9 @@ RUN chown -R webknossos . \ && chmod go+x bin/webknossos-tracingstore \ && chmod go+w . +RUN echo '#!/bin/bash\numask 002\nbin/webknossos-tracingstore "$@"\n' > /docker-entrypoint.sh \ + && chmod +x /docker-entrypoint.sh + USER webknossos HEALTHCHECK \ @@ -23,5 +26,5 @@ HEALTHCHECK \ EXPOSE 9050 -ENTRYPOINT ["bin/webknossos-tracingstore"] +ENTRYPOINT ["/docker-entrypoint.sh"] CMD ["-J-Xmx20G", "-J-Xms1G", "-Dconfig.file=conf/standalone-tracingstore.conf", "-Dlogger.file=conf/logback-docker.xml", "-Dlogback.configurationFile=conf/logback-docker.xml", "-Dhttp.port=9090", "-Dhttp.address=0.0.0.0"] diff --git a/webknossos-tracingstore/deployment/docker-compose.yml b/webknossos-tracingstore/deployment/docker-compose.yml index 6fcea9bc59..9133df3c97 100644 --- a/webknossos-tracingstore/deployment/docker-compose.yml +++ b/webknossos-tracingstore/deployment/docker-compose.yml @@ -1,9 +1,8 @@ -version: '2.1' +version: "2.1" services: webknossos-tracingstore: image: scalableminds/webknossos-tracingstore:${DATASTORE_TAG} - entrypoint: bin/webknossos-tracingstore command: - -J-Xmx20G - -J-Xms1G @@ -25,10 +24,7 @@ services: - $USER_GROUP_2 volumes: - ./tmp:/tmp - - ./config/tracingstore-docker.conf:/srv/webknossos-tracingstore/conf/docker.conf - environment: - - NEW_RELIC_LICENSE_KEY - - NEW_RELIC_APP_NAME + - ./config/tracingstore-docker.conf:/webknossos-tracingstore/conf/docker.conf fossildb: image: scalableminds/fossildb:${FOSSILDB_TAG}