From b3967ed3caedecc5cb15a2b43496575e18ed994c Mon Sep 17 00:00:00 2001 From: Florian M Date: Wed, 24 Aug 2022 11:36:28 +0200 Subject: [PATCH 1/7] [WIP] Unsigned Access to Pubilc S3 Data --- .../java.nio.file.spi.FileSystemProvider | 2 +- project/Dependencies.scala | 10 +- .../datastore/storage/FileSystemsHolder.scala | 7 +- .../s3fsfork/AmazonS3ClientFactory.java | 15 + .../upplication/s3fsfork/AmazonS3Factory.java | 151 ++++ .../s3fsfork/S3AccessControlList.java | 64 ++ .../upplication/s3fsfork/S3FileChannel.java | 168 +++++ .../com/upplication/s3fsfork/S3FileStore.java | 146 ++++ .../s3fsfork/S3FileStoreAttributeView.java | 47 ++ .../upplication/s3fsfork/S3FileSystem.java | 177 +++++ .../S3FileSystemConfigurationException.java | 9 + .../s3fsfork/S3FileSystemProvider.java | 645 ++++++++++++++++++ .../com/upplication/s3fsfork/S3Iterator.java | 192 ++++++ .../app/com/upplication/s3fsfork/S3Path.java | 614 +++++++++++++++++ .../S3ReadOnlySeekableByteChannel.java | 170 +++++ .../s3fsfork/S3SeekableByteChannel.java | 147 ++++ .../attribute/S3BasicFileAttributeView.java | 33 + .../attribute/S3BasicFileAttributes.java | 85 +++ .../attribute/S3PosixFileAttributeView.java | 61 ++ .../attribute/S3PosixFileAttributes.java | 37 + .../s3fsfork/attribute/S3UserPrincipal.java | 17 + .../util/AnonymousAWSCredentialsProvider.java | 18 + .../s3fsfork/util/AttributesUtils.java | 100 +++ .../com/upplication/s3fsfork/util/Cache.java | 29 + .../upplication/s3fsfork/util/IOUtils.java | 32 + .../upplication/s3fsfork/util/S3Utils.java | 198 ++++++ .../java.nio.file.spi.FileSystemProvider | 2 +- 27 files changed, 3169 insertions(+), 7 deletions(-) create mode 100644 webknossos-datastore/app/com/upplication/s3fsfork/AmazonS3ClientFactory.java create mode 100644 webknossos-datastore/app/com/upplication/s3fsfork/AmazonS3Factory.java create mode 100644 webknossos-datastore/app/com/upplication/s3fsfork/S3AccessControlList.java create mode 100644 webknossos-datastore/app/com/upplication/s3fsfork/S3FileChannel.java create mode 100644 webknossos-datastore/app/com/upplication/s3fsfork/S3FileStore.java create mode 100644 webknossos-datastore/app/com/upplication/s3fsfork/S3FileStoreAttributeView.java create mode 100644 webknossos-datastore/app/com/upplication/s3fsfork/S3FileSystem.java create mode 100644 webknossos-datastore/app/com/upplication/s3fsfork/S3FileSystemConfigurationException.java create mode 100644 webknossos-datastore/app/com/upplication/s3fsfork/S3FileSystemProvider.java create mode 100644 webknossos-datastore/app/com/upplication/s3fsfork/S3Iterator.java create mode 100644 webknossos-datastore/app/com/upplication/s3fsfork/S3Path.java create mode 100644 webknossos-datastore/app/com/upplication/s3fsfork/S3ReadOnlySeekableByteChannel.java create mode 100644 webknossos-datastore/app/com/upplication/s3fsfork/S3SeekableByteChannel.java create mode 100644 webknossos-datastore/app/com/upplication/s3fsfork/attribute/S3BasicFileAttributeView.java create mode 100644 webknossos-datastore/app/com/upplication/s3fsfork/attribute/S3BasicFileAttributes.java create mode 100644 webknossos-datastore/app/com/upplication/s3fsfork/attribute/S3PosixFileAttributeView.java create mode 100644 webknossos-datastore/app/com/upplication/s3fsfork/attribute/S3PosixFileAttributes.java create mode 100644 webknossos-datastore/app/com/upplication/s3fsfork/attribute/S3UserPrincipal.java create mode 100644 webknossos-datastore/app/com/upplication/s3fsfork/util/AnonymousAWSCredentialsProvider.java create mode 100644 webknossos-datastore/app/com/upplication/s3fsfork/util/AttributesUtils.java create mode 100644 webknossos-datastore/app/com/upplication/s3fsfork/util/Cache.java create mode 100644 webknossos-datastore/app/com/upplication/s3fsfork/util/IOUtils.java create mode 100644 webknossos-datastore/app/com/upplication/s3fsfork/util/S3Utils.java diff --git a/conf/META-INF/services/java.nio.file.spi.FileSystemProvider b/conf/META-INF/services/java.nio.file.spi.FileSystemProvider index 3934e340ed..12cd6e4c29 100644 --- a/conf/META-INF/services/java.nio.file.spi.FileSystemProvider +++ b/conf/META-INF/services/java.nio.file.spi.FileSystemProvider @@ -1,3 +1,3 @@ -com.upplication.s3fs.S3FileSystemProvider +com.upplication.s3fsfork.S3FileSystemProvider com.scalableminds.webknossos.datastore.storage.httpsfilesystem.HttpsFileSystemProvider com.scalableminds.webknossos.datastore.storage.httpsfilesystem.HttpFileSystemProvider diff --git a/project/Dependencies.scala b/project/Dependencies.scala index e2c23f2a80..7eb753fb84 100644 --- a/project/Dependencies.scala +++ b/project/Dependencies.scala @@ -45,9 +45,13 @@ object Dependencies { private val swagger = "io.swagger" %% "swagger-play2" % "1.7.1" private val jhdf = "cisd" % "jhdf5" % "19.04.0" private val ucarCdm = "edu.ucar" % "cdm-core" % "5.3.3" - private val s3fs = "org.lasersonlab" % "s3fs" % "2.2.3" + // private val s3fs = "org.lasersonlab" % "s3fs" % "2.2.3" private val jblosc = "org.lasersonlab" % "jblosc" % "1.0.1" private val scalajHttp = "org.scalaj" %% "scalaj-http" % "2.4.2" + private val guava = "com.google.guava" % "guava" % "18.0" + private val awsS3 = "com.amazonaws" % "aws-java-sdk-s3" % "1.11.232" + private val tika = "org.apache.tika" % "tika-core" % "1.5" + private val sql = Seq( "com.typesafe.slick" %% "slick" % "3.2.3", @@ -91,7 +95,9 @@ object Dependencies { redis, jhdf, ucarCdm, - s3fs, + guava, + awsS3, + tika, jblosc, scalajHttp ) diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/storage/FileSystemsHolder.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/storage/FileSystemsHolder.scala index 0091e7cece..d029ee01a0 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/storage/FileSystemsHolder.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/storage/FileSystemsHolder.scala @@ -5,11 +5,12 @@ import java.net.URI import java.nio.file.spi.FileSystemProvider import java.nio.file.{FileSystem, FileSystemAlreadyExistsException, FileSystems} import java.util.ServiceLoader - import com.google.common.collect.ImmutableMap import com.scalableminds.util.cache.LRUConcurrentCache import com.scalableminds.webknossos.datastore.dataformats.zarr.RemoteSourceDescriptor import com.typesafe.scalalogging.LazyLogging +import com.upplication.s3fsfork +import com.upplication.s3fsfork.AmazonS3Factory import scala.collection.JavaConverters._ @@ -77,8 +78,8 @@ object FileSystemsHolder extends LazyLogging { if (scheme == schemeS3) { ImmutableMap .builder[String, Any] - .put(com.upplication.s3fs.AmazonS3Factory.ACCESS_KEY, user) - .put(com.upplication.s3fs.AmazonS3Factory.SECRET_KEY, password) + .put(AmazonS3Factory.ACCESS_KEY, user) + .put(s3fsfork.AmazonS3Factory.SECRET_KEY, password) .build } else if (scheme == schemeHttps) { ImmutableMap.builder[String, Any].put("user", user).put("password", password).build diff --git a/webknossos-datastore/app/com/upplication/s3fsfork/AmazonS3ClientFactory.java b/webknossos-datastore/app/com/upplication/s3fsfork/AmazonS3ClientFactory.java new file mode 100644 index 0000000000..2be52fc5f0 --- /dev/null +++ b/webknossos-datastore/app/com/upplication/s3fsfork/AmazonS3ClientFactory.java @@ -0,0 +1,15 @@ +package com.upplication.s3fsfork; + +import com.amazonaws.ClientConfiguration; +import com.amazonaws.auth.AWSCredentialsProvider; +import com.amazonaws.metrics.RequestMetricCollector; +import com.amazonaws.services.s3.AmazonS3; +import com.amazonaws.services.s3.AmazonS3Client; + +public class AmazonS3ClientFactory extends AmazonS3Factory { + + @Override + protected AmazonS3 createAmazonS3(AWSCredentialsProvider credentialsProvider, ClientConfiguration clientConfiguration, RequestMetricCollector requestMetricsCollector) { + return new AmazonS3Client(credentialsProvider, clientConfiguration, requestMetricsCollector); + } +} diff --git a/webknossos-datastore/app/com/upplication/s3fsfork/AmazonS3Factory.java b/webknossos-datastore/app/com/upplication/s3fsfork/AmazonS3Factory.java new file mode 100644 index 0000000000..3640308164 --- /dev/null +++ b/webknossos-datastore/app/com/upplication/s3fsfork/AmazonS3Factory.java @@ -0,0 +1,151 @@ +package com.upplication.s3fsfork; + +import com.amazonaws.ClientConfiguration; +import com.amazonaws.Protocol; +import com.amazonaws.auth.AWSCredentialsProvider; +import com.amazonaws.auth.AWSStaticCredentialsProvider; +import com.amazonaws.auth.BasicAWSCredentials; +import com.amazonaws.auth.AnonymousAWSCredentials; +import com.amazonaws.auth.DefaultAWSCredentialsProviderChain; +import com.amazonaws.metrics.RequestMetricCollector; +import com.amazonaws.services.s3.AmazonS3; +import com.amazonaws.services.s3.S3ClientOptions; +import com.upplication.s3fsfork.util.AnonymousAWSCredentialsProvider; + +import java.net.URI; +import java.util.Properties; + + +/** + * Factory base class to create a new AmazonS3 instance. + */ +public abstract class AmazonS3Factory { + + public static final String ACCESS_KEY = "s3fs_access_key"; + public static final String SECRET_KEY = "s3fs_secret_key"; + public static final String REQUEST_METRIC_COLLECTOR_CLASS = "s3fs_request_metric_collector_class"; + public static final String CONNECTION_TIMEOUT = "s3fs_connection_timeout"; + public static final String MAX_CONNECTIONS = "s3fs_max_connections"; + public static final String MAX_ERROR_RETRY = "s3fs_max_retry_error"; + public static final String PROTOCOL = "s3fs_protocol"; + public static final String PROXY_DOMAIN = "s3fs_proxy_domain"; + public static final String PROXY_HOST = "s3fs_proxy_host"; + public static final String PROXY_PASSWORD = "s3fs_proxy_password"; + public static final String PROXY_PORT = "s3fs_proxy_port"; + public static final String PROXY_USERNAME = "s3fs_proxy_username"; + public static final String PROXY_WORKSTATION = "s3fs_proxy_workstation"; + public static final String SOCKET_SEND_BUFFER_SIZE_HINT = "s3fs_socket_send_buffer_size_hint"; + public static final String SOCKET_RECEIVE_BUFFER_SIZE_HINT = "s3fs_socket_receive_buffer_size_hint"; + public static final String SOCKET_TIMEOUT = "s3fs_socket_timeout"; + public static final String USER_AGENT = "s3fs_user_agent"; + public static final String SIGNER_OVERRIDE = "s3fs_signer_override"; + public static final String PATH_STYLE_ACCESS = "s3fs_path_style_access"; + + /** + * Build a new Amazon S3 instance with the URI and the properties provided + * @param uri URI mandatory + * @param props Properties with the credentials and others options + * @return AmazonS3 + */ + public AmazonS3 getAmazonS3(URI uri, Properties props) { + AmazonS3 client = createAmazonS3(getCredentialsProvider(props), getClientConfiguration(props), getRequestMetricsCollector(props)); + if (uri.getHost() != null) { + if (uri.getPort() != -1) + client.setEndpoint(uri.getHost() + ':' + uri.getPort()); + else + client.setEndpoint(uri.getHost()); + } + + client.setS3ClientOptions(getClientOptions(props)); + + return client; + } + + /** + * should return a new AmazonS3 + * + * @param credentialsProvider AWSCredentialsProvider mandatory + * @param clientConfiguration ClientConfiguration mandatory + * @param requestMetricsCollector RequestMetricCollector mandatory + * @return {@link com.amazonaws.services.s3.AmazonS3} + */ + protected abstract AmazonS3 createAmazonS3(AWSCredentialsProvider credentialsProvider, ClientConfiguration clientConfiguration, RequestMetricCollector requestMetricsCollector); + + protected AWSCredentialsProvider getCredentialsProvider(Properties props) { + System.out.println("Hello from getCredentialsProvider"); + AWSCredentialsProvider credentialsProvider; + if (props.getProperty(ACCESS_KEY) == null && props.getProperty(SECRET_KEY) == null) { + System.out.println("No access key and secret key present, returning anonymous credentials provider"); + // credentialsProvider = new DefaultAWSCredentialsProviderChain(); + credentialsProvider = new AnonymousAWSCredentialsProvider(); + } + else { + System.out.println("Access key and secret key present, returning static credentials provider"); + credentialsProvider = new AWSStaticCredentialsProvider(getAWSCredentials(props)); + } + return credentialsProvider; + } + + protected RequestMetricCollector getRequestMetricsCollector(Properties props) { + RequestMetricCollector requestMetricCollector = null; + if (props.containsKey(REQUEST_METRIC_COLLECTOR_CLASS)) { + try { + requestMetricCollector = (RequestMetricCollector) Class.forName(props.getProperty(REQUEST_METRIC_COLLECTOR_CLASS)).newInstance(); + } catch (Throwable t) { + throw new IllegalArgumentException("Can't instantiate REQUEST_METRIC_COLLECTOR_CLASS " + props.getProperty(REQUEST_METRIC_COLLECTOR_CLASS), t); + } + } + return requestMetricCollector; + } + + protected S3ClientOptions getClientOptions(Properties props) { + S3ClientOptions.Builder builder = S3ClientOptions.builder(); + if (props.getProperty(PATH_STYLE_ACCESS) != null && + Boolean.parseBoolean(props.getProperty(PATH_STYLE_ACCESS))) + builder.setPathStyleAccess(true); + + return builder.build(); + } + + protected ClientConfiguration getClientConfiguration(Properties props) { + ClientConfiguration clientConfiguration = new ClientConfiguration(); + if (props.getProperty(CONNECTION_TIMEOUT) != null) + clientConfiguration.setConnectionTimeout(Integer.parseInt(props.getProperty(CONNECTION_TIMEOUT))); + if (props.getProperty(MAX_CONNECTIONS) != null) + clientConfiguration.setMaxConnections(Integer.parseInt(props.getProperty(MAX_CONNECTIONS))); + if (props.getProperty(MAX_ERROR_RETRY) != null) + clientConfiguration.setMaxErrorRetry(Integer.parseInt(props.getProperty(MAX_ERROR_RETRY))); + if (props.getProperty(PROTOCOL) != null) + clientConfiguration.setProtocol(Protocol.valueOf(props.getProperty(PROTOCOL))); + if (props.getProperty(PROXY_DOMAIN) != null) + clientConfiguration.setProxyDomain(props.getProperty(PROXY_DOMAIN)); + if (props.getProperty(PROXY_HOST) != null) + clientConfiguration.setProxyHost(props.getProperty(PROXY_HOST)); + if (props.getProperty(PROXY_PASSWORD) != null) + clientConfiguration.setProxyPassword(props.getProperty(PROXY_PASSWORD)); + if (props.getProperty(PROXY_PORT) != null) + clientConfiguration.setProxyPort(Integer.parseInt(props.getProperty(PROXY_PORT))); + if (props.getProperty(PROXY_USERNAME) != null) + clientConfiguration.setProxyUsername(props.getProperty(PROXY_USERNAME)); + if (props.getProperty(PROXY_WORKSTATION) != null) + clientConfiguration.setProxyWorkstation(props.getProperty(PROXY_WORKSTATION)); + int socketSendBufferSizeHint = 0; + if (props.getProperty(SOCKET_SEND_BUFFER_SIZE_HINT) != null) + socketSendBufferSizeHint = Integer.parseInt(props.getProperty(SOCKET_SEND_BUFFER_SIZE_HINT)); + int socketReceiveBufferSizeHint = 0; + if (props.getProperty(SOCKET_RECEIVE_BUFFER_SIZE_HINT) != null) + socketReceiveBufferSizeHint = Integer.parseInt(props.getProperty(SOCKET_RECEIVE_BUFFER_SIZE_HINT)); + clientConfiguration.setSocketBufferSizeHints(socketSendBufferSizeHint, socketReceiveBufferSizeHint); + if (props.getProperty(SOCKET_TIMEOUT) != null) + clientConfiguration.setSocketTimeout(Integer.parseInt(props.getProperty(SOCKET_TIMEOUT))); + if (props.getProperty(USER_AGENT) != null) + clientConfiguration.setUserAgentPrefix(props.getProperty(USER_AGENT)); + if (props.getProperty(SIGNER_OVERRIDE) != null) + clientConfiguration.setSignerOverride(props.getProperty(SIGNER_OVERRIDE)); + return clientConfiguration; + } + + protected BasicAWSCredentials getAWSCredentials(Properties props) { + return new BasicAWSCredentials(props.getProperty(ACCESS_KEY), props.getProperty(SECRET_KEY)); + } +} diff --git a/webknossos-datastore/app/com/upplication/s3fsfork/S3AccessControlList.java b/webknossos-datastore/app/com/upplication/s3fsfork/S3AccessControlList.java new file mode 100644 index 0000000000..ce80910601 --- /dev/null +++ b/webknossos-datastore/app/com/upplication/s3fsfork/S3AccessControlList.java @@ -0,0 +1,64 @@ +package com.upplication.s3fsfork; + +import static java.lang.String.format; + +import java.nio.file.AccessDeniedException; +import java.nio.file.AccessMode; +import java.util.EnumSet; + +import com.amazonaws.services.s3.model.AccessControlList; +import com.amazonaws.services.s3.model.Grant; +import com.amazonaws.services.s3.model.Owner; +import com.amazonaws.services.s3.model.Permission; + +public class S3AccessControlList { + private String fileStoreName; + private String key; + private AccessControlList acl; + private Owner owner; + + public S3AccessControlList(String fileStoreName, String key, AccessControlList acl, Owner owner) { + this.fileStoreName = fileStoreName; + this.acl = acl; + this.key = key; + this.owner = owner; + } + + public String getKey() { + return key; + } + + /** + * have almost one of the permission set in the parameter permissions + * + * @param permissions almost one + * @return + */ + private boolean hasPermission(EnumSet permissions) { + for (Grant grant : acl.getGrantsAsList()) + if (grant.getGrantee().getIdentifier().equals(owner.getId()) && permissions.contains(grant.getPermission())) + return true; + return false; + } + + public void checkAccess(AccessMode[] modes) throws AccessDeniedException { + for (AccessMode accessMode : modes) { + switch (accessMode) { + case EXECUTE: + throw new AccessDeniedException(fileName(), null, "file is not executable"); + case READ: + if (!hasPermission(EnumSet.of(Permission.FullControl, Permission.Read))) + throw new AccessDeniedException(fileName(), null, "file is not readable"); + break; + case WRITE: + if (!hasPermission(EnumSet.of(Permission.FullControl, Permission.Write))) + throw new AccessDeniedException(fileName(), null, format("bucket '%s' is not writable", fileStoreName)); + break; + } + } + } + + private String fileName() { + return fileStoreName + S3Path.PATH_SEPARATOR + key; + } +} diff --git a/webknossos-datastore/app/com/upplication/s3fsfork/S3FileChannel.java b/webknossos-datastore/app/com/upplication/s3fsfork/S3FileChannel.java new file mode 100644 index 0000000000..41badbdc05 --- /dev/null +++ b/webknossos-datastore/app/com/upplication/s3fsfork/S3FileChannel.java @@ -0,0 +1,168 @@ +package com.upplication.s3fsfork; + +import com.amazonaws.services.s3.model.ObjectMetadata; +import com.amazonaws.services.s3.model.S3Object; +import org.apache.tika.Tika; + +import java.io.*; +import java.nio.ByteBuffer; +import java.nio.MappedByteBuffer; +import java.nio.channels.FileChannel; +import java.nio.channels.FileLock; +import java.nio.channels.ReadableByteChannel; +import java.nio.channels.WritableByteChannel; +import java.nio.file.*; +import java.util.Collections; +import java.util.HashSet; +import java.util.Set; + +import static java.lang.String.format; + +public class S3FileChannel extends FileChannel { + + private com.upplication.s3fsfork.S3Path path; + private Set options; + private FileChannel filechannel; + private Path tempFile; + + public S3FileChannel(S3Path path, Set options) throws IOException { + this.path = path; + this.options = Collections.unmodifiableSet(new HashSet<>(options)); + String key = path.getKey(); + boolean exists = path.getFileSystem().provider().exists(path); + + if (exists && this.options.contains(StandardOpenOption.CREATE_NEW)) + throw new FileAlreadyExistsException(format("target already exists: %s", path)); + else if (!exists && !this.options.contains(StandardOpenOption.CREATE_NEW) && + !this.options.contains(StandardOpenOption.CREATE)) + throw new NoSuchFileException(format("target not exists: %s", path)); + + tempFile = Files.createTempFile("temp-s3-", key.replaceAll("/", "_")); + boolean removeTempFile = true; + try { + if (exists) { + try (S3Object object = path.getFileSystem() + .getClient() + .getObject(path.getFileStore().getBucket().getName(), key)) { + Files.copy(object.getObjectContent(), tempFile, StandardCopyOption.REPLACE_EXISTING); + } + } + + Set fileChannelOptions = new HashSet<>(this.options); + fileChannelOptions.remove(StandardOpenOption.CREATE_NEW); + filechannel = FileChannel.open(tempFile, fileChannelOptions); + removeTempFile = false; + } finally { + if (removeTempFile) { + Files.deleteIfExists(tempFile); + } + } + } + + @Override + public int read(ByteBuffer dst) throws IOException { + return filechannel.read(dst); + } + + @Override + public long read(ByteBuffer[] dsts, int offset, int length) throws IOException { + return filechannel.read(dsts, offset, length); + } + + @Override + public int write(ByteBuffer src) throws IOException { + return filechannel.write(src); + } + + @Override + public long write(ByteBuffer[] srcs, int offset, int length) throws IOException { + return filechannel.write(srcs, offset, length); + } + + @Override + public long position() throws IOException { + return filechannel.position(); + } + + @Override + public FileChannel position(long newPosition) throws IOException { + return filechannel.position(newPosition); + } + + @Override + public long size() throws IOException { + return filechannel.size(); + } + + @Override + public FileChannel truncate(long size) throws IOException { + return filechannel.truncate(size); + } + + @Override + public void force(boolean metaData) throws IOException { + filechannel.force(metaData); + } + + @Override + public long transferTo(long position, long count, WritableByteChannel target) throws IOException { + return filechannel.transferTo(position, count, target); + } + + @Override + public long transferFrom(ReadableByteChannel src, long position, long count) throws IOException { + return filechannel.transferFrom(src, position, count); + } + + @Override + public int read(ByteBuffer dst, long position) throws IOException { + return filechannel.read(dst, position); + } + + @Override + public int write(ByteBuffer src, long position) throws IOException { + return filechannel.write(src, position); + } + + @Override + public MappedByteBuffer map(MapMode mode, long position, long size) throws IOException { + return filechannel.map(mode, position, size); + } + + @Override + public FileLock lock(long position, long size, boolean shared) throws IOException { + return filechannel.lock(position, size, shared); + } + + @Override + public FileLock tryLock(long position, long size, boolean shared) throws IOException { + return filechannel.tryLock(position, size, shared); + } + + @Override + protected void implCloseChannel() throws IOException { + super.close(); + filechannel.close(); + if (!this.options.contains(StandardOpenOption.READ)) { + sync(); + } + Files.deleteIfExists(tempFile); + } + + /** + * try to sync the temp file with the remote s3 path. + * + * @throws IOException if the tempFile fails to open a newInputStream + */ + protected void sync() throws IOException { + try (InputStream stream = new BufferedInputStream(Files.newInputStream(tempFile))) { + ObjectMetadata metadata = new ObjectMetadata(); + metadata.setContentLength(Files.size(tempFile)); + metadata.setContentType(new Tika().detect(stream, path.getFileName().toString())); + + String bucket = path.getFileStore().name(); + String key = path.getKey(); + path.getFileSystem().getClient().putObject(bucket, key, stream, metadata); + } + } +} diff --git a/webknossos-datastore/app/com/upplication/s3fsfork/S3FileStore.java b/webknossos-datastore/app/com/upplication/s3fsfork/S3FileStore.java new file mode 100644 index 0000000000..cc09dc74c6 --- /dev/null +++ b/webknossos-datastore/app/com/upplication/s3fsfork/S3FileStore.java @@ -0,0 +1,146 @@ +package com.upplication.s3fsfork; + +import java.io.IOException; +import java.nio.file.FileStore; +import java.nio.file.attribute.FileAttributeView; +import java.nio.file.attribute.FileStoreAttributeView; + +import com.amazonaws.services.s3.AmazonS3; +import com.amazonaws.services.s3.model.Bucket; +import com.amazonaws.services.s3.model.Owner; + +public class S3FileStore extends FileStore implements Comparable { + + private com.upplication.s3fsfork.S3FileSystem fileSystem; + private String name; + + public S3FileStore(com.upplication.s3fsfork.S3FileSystem s3FileSystem, String name) { + this.fileSystem = s3FileSystem; + this.name = name; + } + + @Override + public String name() { + return name; + } + + @Override + public String type() { + return "S3Bucket"; + } + + @Override + public boolean isReadOnly() { + return false; + } + + @Override + public long getTotalSpace() throws IOException { + return Long.MAX_VALUE; + } + + @Override + public long getUsableSpace() throws IOException { + return Long.MAX_VALUE; + } + + @Override + public long getUnallocatedSpace() throws IOException { + return Long.MAX_VALUE; + } + + @Override + public boolean supportsFileAttributeView(Class type) { + return false; + } + + @Override + public boolean supportsFileAttributeView(String attributeViewName) { + return false; + } + + @SuppressWarnings("unchecked") + @Override + public V getFileStoreAttributeView(Class type) { + if (type != S3FileStoreAttributeView.class) + throw new IllegalArgumentException("FileStoreAttributeView of type '" + type.getName() + "' is not supported."); + Bucket buck = getBucket(); + Owner owner = buck.getOwner(); + return (V) new S3FileStoreAttributeView(buck.getCreationDate(), buck.getName(), owner.getId(), owner.getDisplayName()); + } + + @Override + public Object getAttribute(String attribute) throws IOException { + return getFileStoreAttributeView(S3FileStoreAttributeView.class).getAttribute(attribute); + } + + public S3FileSystem getFileSystem() { + return fileSystem; + } + + public Bucket getBucket() { + return getBucket(name); + } + + private Bucket getBucket(String bucketName) { + for (Bucket buck : getClient().listBuckets()) + if (buck.getName().equals(bucketName)) + return buck; + return null; + } + + public S3Path getRootDirectory() { + return new S3Path(fileSystem, "/" + this.name()); + } + + private AmazonS3 getClient() { + return fileSystem.getClient(); + } + + public Owner getOwner() { + Bucket buck = getBucket(); + if (buck != null) + return buck.getOwner(); + return fileSystem.getClient().getS3AccountOwner(); + } + + @Override + public int compareTo(S3FileStore o) { + if (this == o) + return 0; + return o.name().compareTo(name); + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + ((fileSystem == null) ? 0 : fileSystem.hashCode()); + result = prime * result + ((name == null) ? 0 : name.hashCode()); + return result; + } + + + @Override + public boolean equals(Object obj) { + if (this == obj) + return true; + if (obj == null) + return false; + if (!(obj instanceof S3FileStore)) + return false; + S3FileStore other = (S3FileStore) obj; + + if (fileSystem == null) { + if (other.fileSystem != null) + return false; + } else if (!fileSystem.equals(other.fileSystem)) + return false; + if (name == null) { + if (other.name != null) + return false; + } else if (!name.equals(other.name)) + return false; + return true; + } +} diff --git a/webknossos-datastore/app/com/upplication/s3fsfork/S3FileStoreAttributeView.java b/webknossos-datastore/app/com/upplication/s3fsfork/S3FileStoreAttributeView.java new file mode 100644 index 0000000000..7da0be3886 --- /dev/null +++ b/webknossos-datastore/app/com/upplication/s3fsfork/S3FileStoreAttributeView.java @@ -0,0 +1,47 @@ +package com.upplication.s3fsfork; + +import java.nio.file.attribute.FileStoreAttributeView; +import java.util.Date; + +public class S3FileStoreAttributeView implements FileStoreAttributeView { + + public static final String ATTRIBUTE_VIEW_NAME = "S3FileStoreAttributeView"; + + private Date creationDate; + private String name; + private String ownerId; + private String ownerDisplayName; + + public static enum AttrID { + creationDate, name, ownerId, ownerDisplayName + } + + public S3FileStoreAttributeView(Date creationDate, String name, String ownerId, String ownerDisplayName) { + this.creationDate = creationDate; + this.name = name; + this.ownerId = ownerId; + this.ownerDisplayName = ownerDisplayName; + } + + @Override + public String name() { + return ATTRIBUTE_VIEW_NAME; + } + + public Object getAttribute(String attribute) { + return getAttribute(AttrID.valueOf(attribute)); + } + + private Object getAttribute(AttrID attrID) { + switch (attrID) { + case creationDate: + return creationDate; + case ownerDisplayName: + return ownerDisplayName; + case ownerId: + return ownerId; + default: + return name; + } + } +} diff --git a/webknossos-datastore/app/com/upplication/s3fsfork/S3FileSystem.java b/webknossos-datastore/app/com/upplication/s3fsfork/S3FileSystem.java new file mode 100644 index 0000000000..319b998dc7 --- /dev/null +++ b/webknossos-datastore/app/com/upplication/s3fsfork/S3FileSystem.java @@ -0,0 +1,177 @@ +package com.upplication.s3fsfork; + +import java.io.IOException; +import java.nio.file.FileStore; +import java.nio.file.FileSystem; +import java.nio.file.Path; +import java.nio.file.PathMatcher; +import java.nio.file.WatchService; +import java.nio.file.attribute.UserPrincipalLookupService; +import java.util.Set; + +import com.amazonaws.services.s3.AmazonS3; +import com.amazonaws.services.s3.model.Bucket; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableSet; + +/** + * S3FileSystem with a concrete client configured and ready to use. + * + * @see AmazonS3 configured by {@link AmazonS3Factory} + */ +public class S3FileSystem extends FileSystem implements Comparable { + + private final com.upplication.s3fsfork.S3FileSystemProvider provider; + private final String key; + private final AmazonS3 client; + private final String endpoint; + private int cache; + + public S3FileSystem(com.upplication.s3fsfork.S3FileSystemProvider provider, String key, AmazonS3 client, String endpoint) { + this.provider = provider; + this.key = key; + this.client = client; + this.endpoint = endpoint; + this.cache = 60000; // 1 minute cache for the s3Path + } + + @Override + public S3FileSystemProvider provider() { + return provider; + } + + public String getKey() { + return key; + } + + @Override + public void close() throws IOException { + this.provider.close(this); + } + + @Override + public boolean isOpen() { + return this.provider.isOpen(this); + } + + @Override + public boolean isReadOnly() { + return false; + } + + @Override + public String getSeparator() { + return S3Path.PATH_SEPARATOR; + } + + @Override + public Iterable getRootDirectories() { + ImmutableList.Builder builder = ImmutableList.builder(); + for (FileStore fileStore : getFileStores()) { + builder.add(((S3FileStore) fileStore).getRootDirectory()); + } + return builder.build(); + } + + @Override + public Iterable getFileStores() { + ImmutableList.Builder builder = ImmutableList.builder(); + for (Bucket bucket : client.listBuckets()) { + builder.add(new S3FileStore(this, bucket.getName())); + } + return builder.build(); + } + + @Override + public Set supportedFileAttributeViews() { + return ImmutableSet.of("basic", "posix"); + } + + @Override + public S3Path getPath(String first, String... more) { + if (more.length == 0) { + return new S3Path(this, first); + } + + return new S3Path(this, first, more); + } + + @Override + public PathMatcher getPathMatcher(String syntaxAndPattern) { + throw new UnsupportedOperationException(); + } + + @Override + public UserPrincipalLookupService getUserPrincipalLookupService() { + throw new UnsupportedOperationException(); + } + + @Override + public WatchService newWatchService() throws IOException { + throw new UnsupportedOperationException(); + } + + public AmazonS3 getClient() { + return client; + } + + /** + * get the endpoint associated with this fileSystem. + * + * @return string + * @see http://docs.aws.amazon.com/general/latest/gr/rande.html + */ + public String getEndpoint() { + return endpoint; + } + + public String[] key2Parts(String keyParts) { + String[] parts = keyParts.split(S3Path.PATH_SEPARATOR); + String[] split = new String[parts.length]; + int i = 0; + for (String part : parts) { + split[i++] = part; + } + return split; + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + ((endpoint == null) ? 0 : endpoint.hashCode()); + result = prime * result + ((key == null) ? 0 : key.hashCode()); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) + return true; + if (obj == null) + return false; + if (!(obj instanceof S3FileSystem)) + return false; + S3FileSystem other = (S3FileSystem) obj; + if (endpoint == null) { + if (other.endpoint != null) + return false; + } else if (!endpoint.equals(other.endpoint)) + return false; + if (key == null) { + if (other.key != null) + return false; + } else if (!key.equals(other.key)) + return false; + return true; + } + + @Override + public int compareTo(S3FileSystem o) { + return key.compareTo(o.getKey()); + } + + public int getCache() { + return cache; + } +} diff --git a/webknossos-datastore/app/com/upplication/s3fsfork/S3FileSystemConfigurationException.java b/webknossos-datastore/app/com/upplication/s3fsfork/S3FileSystemConfigurationException.java new file mode 100644 index 0000000000..f9c209dbd3 --- /dev/null +++ b/webknossos-datastore/app/com/upplication/s3fsfork/S3FileSystemConfigurationException.java @@ -0,0 +1,9 @@ +package com.upplication.s3fsfork; + +public class S3FileSystemConfigurationException extends RuntimeException { + private static final long serialVersionUID = 1L; + + public S3FileSystemConfigurationException(String message, Throwable cause) { + super(message, cause); + } +} diff --git a/webknossos-datastore/app/com/upplication/s3fsfork/S3FileSystemProvider.java b/webknossos-datastore/app/com/upplication/s3fsfork/S3FileSystemProvider.java new file mode 100644 index 0000000000..9e862c85a1 --- /dev/null +++ b/webknossos-datastore/app/com/upplication/s3fsfork/S3FileSystemProvider.java @@ -0,0 +1,645 @@ +package com.upplication.s3fsfork; + +import com.amazonaws.services.s3.AmazonS3; +import com.amazonaws.services.s3.internal.Constants; +import com.amazonaws.services.s3.model.AmazonS3Exception; +import com.amazonaws.services.s3.model.Bucket; +import com.amazonaws.services.s3.model.ObjectMetadata; +import com.amazonaws.services.s3.model.S3Object; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Sets; +import com.upplication.s3fsfork.attribute.S3BasicFileAttributeView; +import com.upplication.s3fsfork.attribute.S3BasicFileAttributes; +import com.upplication.s3fsfork.attribute.S3PosixFileAttributeView; +import com.upplication.s3fsfork.attribute.S3PosixFileAttributes; +import com.upplication.s3fsfork.util.AttributesUtils; +import com.upplication.s3fsfork.util.Cache; +import com.upplication.s3fsfork.util.S3Utils; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.net.URI; +import java.nio.channels.FileChannel; +import java.nio.channels.SeekableByteChannel; +import java.nio.file.*; +import java.nio.file.attribute.*; +import java.nio.file.spi.FileSystemProvider; +import java.util.*; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; + +import static com.google.common.collect.Sets.difference; +import static java.lang.String.format; + +/** + * Spec: + *

+ * URI: s3://[endpoint]/{bucket}/{key} If endpoint is missing, it's assumed to + * be the default S3 endpoint (s3.amazonaws.com) + *

+ *

+ * FileSystem roots: /{bucket}/ + *

+ *

+ * Treatment of S3 objects: - If a key ends in "/" it's considered a directory + * *and* a regular file. Otherwise, it's just a regular file. - It is legal for + * a key "xyz" and "xyz/" to exist at the same time. The latter is treated as a + * directory. - If a file "a/b/c" exists but there's no "a" or "a/b/", these are + * considered "implicit" directories. They can be listed, traversed and deleted. + *

+ *

+ * Deviations from FileSystem provider API: - Deleting a file or directory + * always succeeds, regardless of whether the file/directory existed before the + * operation was issued i.e. Files.delete() and Files.deleteIfExists() are + * equivalent. + *

+ *

+ * Future versions of this provider might allow for a strict mode that mimics + * the semantics of the FileSystem provider API on a best effort basis, at an + * increased processing cost. + *

+ */ +public class S3FileSystemProvider extends FileSystemProvider { + + public static final String CHARSET_KEY = "s3fs_charset"; + public static final String AMAZON_S3_FACTORY_CLASS = "s3fs_amazon_s3_factory"; + + private static final ConcurrentMap fileSystems = new ConcurrentHashMap<>(); + private static final List PROPS_TO_OVERLOAD = Arrays.asList(AmazonS3Factory.ACCESS_KEY, AmazonS3Factory.SECRET_KEY, AmazonS3Factory.REQUEST_METRIC_COLLECTOR_CLASS, AmazonS3Factory.CONNECTION_TIMEOUT, AmazonS3Factory.MAX_CONNECTIONS, AmazonS3Factory.MAX_ERROR_RETRY, AmazonS3Factory.PROTOCOL, AmazonS3Factory.PROXY_DOMAIN, + AmazonS3Factory.PROXY_HOST, AmazonS3Factory.PROXY_PASSWORD, AmazonS3Factory.PROXY_PORT, AmazonS3Factory.PROXY_USERNAME, AmazonS3Factory.PROXY_WORKSTATION, AmazonS3Factory.SOCKET_SEND_BUFFER_SIZE_HINT, AmazonS3Factory.SOCKET_RECEIVE_BUFFER_SIZE_HINT, AmazonS3Factory.SOCKET_TIMEOUT, + AmazonS3Factory.USER_AGENT, AMAZON_S3_FACTORY_CLASS, AmazonS3Factory.SIGNER_OVERRIDE, AmazonS3Factory.PATH_STYLE_ACCESS); + + private com.upplication.s3fsfork.util.S3Utils s3Utils = new S3Utils(); + private com.upplication.s3fsfork.util.Cache cache = new com.upplication.s3fsfork.util.Cache(); + + @Override + public String getScheme() { + return "s3"; + } + + @Override + public FileSystem newFileSystem(URI uri, Map env) { + validateUri(uri); + // get properties for the env or properties or system + Properties props = getProperties(uri, env); + validateProperties(props); + // try to get the filesystem by the key + String key = getFileSystemKey(uri, props); + if (fileSystems.containsKey(key)) { + throw new FileSystemAlreadyExistsException("File system " + uri.getScheme() + ':' + key + " already exists"); + } + // create the filesystem with the final properties, store and return + S3FileSystem fileSystem = createFileSystem(uri, props); + fileSystems.put(fileSystem.getKey(), fileSystem); + return fileSystem; + } + + private void validateProperties(Properties props) { + Preconditions.checkArgument( + (props.getProperty(AmazonS3Factory.ACCESS_KEY) == null && props.getProperty(AmazonS3Factory.SECRET_KEY) == null) + || (props.getProperty(AmazonS3Factory.ACCESS_KEY) != null && props.getProperty(AmazonS3Factory.SECRET_KEY) != null), "%s and %s should both be provided or should both be omitted", + AmazonS3Factory.ACCESS_KEY, AmazonS3Factory.SECRET_KEY); + } + + private Properties getProperties(URI uri, Map env) { + Properties props = loadAmazonProperties(); + addEnvProperties(props, env); + // and access key and secret key can be override + String userInfo = uri.getUserInfo(); + if (userInfo != null) { + String[] keys = userInfo.split(":"); + props.setProperty(AmazonS3Factory.ACCESS_KEY, keys[0]); + if (keys.length > 1) { + props.setProperty(AmazonS3Factory.SECRET_KEY, keys[1]); + } + } + return props; + } + + private String getFileSystemKey(URI uri) { + return getFileSystemKey(uri, getProperties(uri, null)); + } + + /** + * get the file system key represented by: the access key @ endpoint. + * Example: access-key@s3.amazonaws.com + * If uri host is empty then s3.amazonaws.com are used as host + * + * @param uri URI with the endpoint + * @param props with the access key property + * @return String + */ + protected String getFileSystemKey(URI uri, Properties props) { + // we don`t use uri.getUserInfo and uri.getHost because secret key and access key have special chars + // and dont return the correct strings + String uriString = uri.toString().replace("s3://", ""); + String authority = null; + int authoritySeparator = uriString.indexOf("@"); + + if (authoritySeparator > 0) { + authority = uriString.substring(0, authoritySeparator); + } + + if (authority != null) { + String host = uriString.substring(uriString.indexOf("@") + 1, uriString.length()); + int lastPath = host.indexOf("/"); + if (lastPath > -1) { + host = host.substring(0, lastPath); + } + if (host.length() == 0) { + host = Constants.S3_HOSTNAME; + } + return authority + "@" + host; + } else { + String accessKey = (String) props.get(AmazonS3Factory.ACCESS_KEY); + return (accessKey != null ? accessKey + "@" : "") + + (uri.getHost() != null ? uri.getHost() : Constants.S3_HOSTNAME); + } + } + + protected void validateUri(URI uri) { + Preconditions.checkNotNull(uri, "uri is null"); + Preconditions.checkArgument(uri.getScheme().equals(getScheme()), "uri scheme must be 's3': '%s'", uri); + } + + protected void addEnvProperties(Properties props, Map env) { + if (env == null) + env = new HashMap<>(); + for (String key : PROPS_TO_OVERLOAD) { + // but can be overloaded by envs vars + overloadProperty(props, env, key); + } + + for (String key : env.keySet()) { + Object value = env.get(key); + if (!PROPS_TO_OVERLOAD.contains(key)) { + props.put(key, value); + } + } + } + + /** + * try to override the properties props with: + *
    + *
  1. the map or if not setted:
  2. + *
  3. the system property or if not setted:
  4. + *
  5. the system vars
  6. + *
+ * + * @param props Properties to override + * @param env Map the first option + * @param key String the key + */ + private void overloadProperty(Properties props, Map env, String key) { + boolean overloaded = overloadPropertiesWithEnv(props, env, key); + + if (!overloaded) { + overloaded = overloadPropertiesWithSystemProps(props, key); + } + + if (!overloaded) { + overloadPropertiesWithSystemEnv(props, key); + } + } + + /** + * @return true if the key are overloaded by the map parameter + */ + protected boolean overloadPropertiesWithEnv(Properties props, Map env, String key) { + if (env.get(key) != null && env.get(key) instanceof String) { + props.setProperty(key, (String) env.get(key)); + return true; + } + return false; + } + + /** + * @return true if the key are overloaded by a system property + */ + public boolean overloadPropertiesWithSystemProps(Properties props, String key) { + if (System.getProperty(key) != null) { + props.setProperty(key, System.getProperty(key)); + return true; + } + return false; + } + + /** + * The system envs have preference over the properties files. + * So we overload it + * @param props Properties + * @param key String + * @return true if the key are overloaded by a system property + */ + public boolean overloadPropertiesWithSystemEnv(Properties props, String key) { + if (systemGetEnv(key) != null) { + props.setProperty(key, systemGetEnv(key)); + return true; + } + return false; + } + + /** + * Get the system env with the key param + * @param key String + * @return String or null + */ + public String systemGetEnv(String key) { + return System.getenv(key); + } + + /** + * Get existing filesystem based on a combination of URI and env settings. Create new filesystem otherwise. + * + * @param uri URI of existing, or to be created filesystem. + * @param env environment settings. + * @return new or existing filesystem. + */ + public FileSystem getFileSystem(URI uri, Map env) { + validateUri(uri); + Properties props = getProperties(uri, env); + String key = this.getFileSystemKey(uri, props); // s3fs_access_key is part of the key here. + if (fileSystems.containsKey(key)) + return fileSystems.get(key); + return newFileSystem(uri, env); + } + + @Override + public S3FileSystem getFileSystem(URI uri) { + validateUri(uri); + String key = this.getFileSystemKey(uri); + if (fileSystems.containsKey(key)) { + return fileSystems.get(key); + } else { + throw new FileSystemNotFoundException("S3 filesystem not yet created. Use newFileSystem() instead"); + } + } + + private S3Path toS3Path(Path path) { + Preconditions.checkArgument(path instanceof S3Path, "path must be an instance of %s", S3Path.class.getName()); + return (S3Path) path; + } + + /** + * Deviation from spec: throws FileSystemNotFoundException if FileSystem + * hasn't yet been initialized. Call newFileSystem() first. + * Need credentials. Maybe set credentials after? how? + * TODO: we can create a new one if the credentials are present by: + * s3://access-key:secret-key@endpoint.com/ + */ + @Override + public Path getPath(URI uri) { + FileSystem fileSystem = getFileSystem(uri); + /** + * TODO: set as a list. one s3FileSystem by region + */ + return fileSystem.getPath(uri.getPath()); + } + + @Override + public DirectoryStream newDirectoryStream(Path dir, DirectoryStream.Filter filter) throws IOException { + final S3Path s3Path = toS3Path(dir); + return new DirectoryStream() { + @Override + public void close() throws IOException { + // nothing to do here + } + + @Override + public Iterator iterator() { + return new S3Iterator(s3Path); + } + }; + } + + @Override + public InputStream newInputStream(Path path, OpenOption... options) throws IOException { + S3Path s3Path = toS3Path(path); + String key = s3Path.getKey(); + + Preconditions.checkArgument(options.length == 0, "OpenOptions not yet supported: %s", ImmutableList.copyOf(options)); // TODO + Preconditions.checkArgument(!key.equals(""), "cannot create InputStream for root directory: %s", path); + + try { + S3Object object = s3Path.getFileSystem().getClient().getObject(s3Path.getFileStore().name(), key); + InputStream res = object.getObjectContent(); + + if (res == null) + throw new IOException(String.format("The specified path is a directory: %s", path)); + + return res; + } catch (AmazonS3Exception e) { + if (e.getStatusCode() == 404) + throw new NoSuchFileException(path.toString()); + // otherwise throws a generic IO exception + throw new IOException(String.format("Cannot access file: %s", path), e); + } + } + + @Override + public SeekableByteChannel newByteChannel(Path path, Set options, FileAttribute... attrs) throws IOException { + S3Path s3Path = toS3Path(path); + if (options.isEmpty() || options.contains(StandardOpenOption.READ)) { + if (options.contains(StandardOpenOption.WRITE)) + throw new UnsupportedOperationException( + "Can't read and write one on channel" + ); + return new S3ReadOnlySeekableByteChannel(s3Path, options); + } else { + return new S3SeekableByteChannel(s3Path, options); + } + } + + @Override + public FileChannel newFileChannel(Path path, Set options, FileAttribute... attrs) throws IOException { + S3Path s3Path = toS3Path(path); + return new S3FileChannel(s3Path, options); + } + + /** + * Deviations from spec: Does not perform atomic check-and-create. Since a + * directory is just an S3 object, all directories in the hierarchy are + * created or it already existed. + */ + @Override + public void createDirectory(Path dir, FileAttribute... attrs) throws IOException { + S3Path s3Path = toS3Path(dir); + Preconditions.checkArgument(attrs.length == 0, "attrs not yet supported: %s", ImmutableList.copyOf(attrs)); // TODO + if (exists(s3Path)) + throw new FileAlreadyExistsException(format("target already exists: %s", s3Path)); + // create bucket if necesary + Bucket bucket = s3Path.getFileStore().getBucket(); + String bucketName = s3Path.getFileStore().name(); + if (bucket == null) { + s3Path.getFileSystem().getClient().createBucket(bucketName); + } + // create the object as directory + ObjectMetadata metadata = new ObjectMetadata(); + metadata.setContentLength(0); + String directoryKey = s3Path.getKey().endsWith("/") ? s3Path.getKey() : s3Path.getKey() + "/"; + s3Path.getFileSystem().getClient().putObject(bucketName, directoryKey, new ByteArrayInputStream(new byte[0]), metadata); + } + + @Override + public void delete(Path path) throws IOException { + S3Path s3Path = toS3Path(path); + if (Files.notExists(s3Path)) + throw new NoSuchFileException("the path: " + this + " not exists"); + if (Files.isDirectory(s3Path) && Files.newDirectoryStream(s3Path).iterator().hasNext()) + throw new DirectoryNotEmptyException("the path: " + this + " is a directory and is not empty"); + + String key = s3Path.getKey(); + String bucketName = s3Path.getFileStore().name(); + s3Path.getFileSystem().getClient().deleteObject(bucketName, key); + // we delete the two objects (sometimes exists the key '/' and sometimes not) + s3Path.getFileSystem().getClient().deleteObject(bucketName, key + "/"); + } + + @Override + public void copy(Path source, Path target, CopyOption... options) throws IOException { + if (isSameFile(source, target)) + return; + + S3Path s3Source = toS3Path(source); + S3Path s3Target = toS3Path(target); + // TODO: implements support for copying directories + + Preconditions.checkArgument(!Files.isDirectory(source), "copying directories is not yet supported: %s", source); + Preconditions.checkArgument(!Files.isDirectory(target), "copying directories is not yet supported: %s", target); + + ImmutableSet actualOptions = ImmutableSet.copyOf(options); + verifySupportedOptions(EnumSet.of(StandardCopyOption.REPLACE_EXISTING), actualOptions); + + if (exists(s3Target) && !actualOptions.contains(StandardCopyOption.REPLACE_EXISTING)) { + throw new FileAlreadyExistsException(format("target already exists: %s", target)); + } + + String bucketNameOrigin = s3Source.getFileStore().name(); + String keySource = s3Source.getKey(); + String bucketNameTarget = s3Target.getFileStore().name(); + String keyTarget = s3Target.getKey(); + s3Source.getFileSystem() + .getClient().copyObject( + bucketNameOrigin, + keySource, + bucketNameTarget, + keyTarget); + } + + @Override + public void move(Path source, Path target, CopyOption... options) throws IOException { + if (options != null && Arrays.asList(options).contains(StandardCopyOption.ATOMIC_MOVE)) + throw new AtomicMoveNotSupportedException(source.toString(), target.toString(), "Atomic not supported"); + copy(source, target, options); + delete(source); + } + + @Override + public boolean isSameFile(Path path1, Path path2) throws IOException { + return path1.isAbsolute() && path2.isAbsolute() && path1.equals(path2); + } + + @Override + public boolean isHidden(Path path) throws IOException { + return false; + } + + @Override + public FileStore getFileStore(Path path) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public void checkAccess(Path path, AccessMode... modes) throws IOException { + S3Path s3Path = toS3Path(path); + Preconditions.checkArgument(s3Path.isAbsolute(), "path must be absolute: %s", s3Path); + if (modes.length == 0) { + if (exists(s3Path)) + return; + throw new NoSuchFileException(toString()); + } + + String key = s3Utils.getS3ObjectSummary(s3Path).getKey(); + S3AccessControlList accessControlList = + new S3AccessControlList(s3Path.getFileStore().name(), key, s3Path.getFileSystem().getClient().getObjectAcl(s3Path.getFileStore().name(), key), s3Path.getFileStore().getOwner()); + + accessControlList.checkAccess(modes); + } + + + @Override + public V getFileAttributeView(Path path, Class type, LinkOption... options) { + S3Path s3Path = toS3Path(path); + if (type == BasicFileAttributeView.class) { + return (V) new S3BasicFileAttributeView(s3Path); + } else if (type == PosixFileAttributeView.class) { + return (V) new S3PosixFileAttributeView(s3Path); + } else if (type == null) { + throw new NullPointerException("Type is mandatory"); + } else { + return null; + } + } + + @Override + public A readAttributes(Path path, Class type, LinkOption... options) throws IOException { + S3Path s3Path = toS3Path(path); + if (type == BasicFileAttributes.class) { + if (cache.isInTime(s3Path.getFileSystem().getCache(), s3Path.getFileAttributes())) { + A result = type.cast(s3Path.getFileAttributes()); + s3Path.setFileAttributes(null); + return result; + } else { + S3BasicFileAttributes attrs = s3Utils.getS3FileAttributes(s3Path); + s3Path.setFileAttributes(attrs); + return type.cast(attrs); + } + } else if (type == PosixFileAttributes.class) { + if (s3Path.getFileAttributes() instanceof PosixFileAttributes && + cache.isInTime(s3Path.getFileSystem().getCache(), s3Path.getFileAttributes())) { + A result = type.cast(s3Path.getFileAttributes()); + s3Path.setFileAttributes(null); + return result; + } + + S3PosixFileAttributes attrs = s3Utils.getS3PosixFileAttributes(s3Path); + s3Path.setFileAttributes(attrs); + return type.cast(attrs); + } + + throw new UnsupportedOperationException(format("only %s or %s supported", BasicFileAttributes.class, PosixFileAttributes.class)); + } + + @Override + public Map readAttributes(Path path, String attributes, LinkOption... options) throws IOException { + if (attributes == null) { + throw new IllegalArgumentException("Attributes null"); + } + + if (attributes.contains(":") && !attributes.contains("basic:") && !attributes.contains("posix:")) { + throw new UnsupportedOperationException(format("attributes %s are not supported, only basic / posix are supported", attributes)); + } + + if (attributes.equals("*") || attributes.equals("basic:*")) { + BasicFileAttributes attr = readAttributes(path, BasicFileAttributes.class, options); + return com.upplication.s3fsfork.util.AttributesUtils.fileAttributeToMap(attr); + } else if (attributes.equals("posix:*")) { + PosixFileAttributes attr = readAttributes(path, PosixFileAttributes.class, options); + return com.upplication.s3fsfork.util.AttributesUtils.fileAttributeToMap(attr); + } else { + String[] filters = new String[]{attributes}; + if (attributes.contains(",")) { + filters = attributes.split(","); + } + Class filter = BasicFileAttributes.class; + if (attributes.startsWith("posix:")) { + filter = PosixFileAttributes.class; + } + return AttributesUtils.fileAttributeToMap(readAttributes(path, filter, options), filters); + } + } + + @Override + public void setAttribute(Path path, String attribute, Object value, LinkOption... options) throws IOException { + throw new UnsupportedOperationException(); + } + + // ~~ + + /** + * Create the fileSystem + * + * @param uri URI + * @param props Properties + * @return S3FileSystem never null + */ + public S3FileSystem createFileSystem(URI uri, Properties props) { + return new S3FileSystem(this, getFileSystemKey(uri, props), getAmazonS3(uri, props), uri.getHost()); + } + + protected AmazonS3 getAmazonS3(URI uri, Properties props) { + return getAmazonS3Factory(props).getAmazonS3(uri, props); + } + + protected AmazonS3Factory getAmazonS3Factory(Properties props) { + if (props.containsKey(AMAZON_S3_FACTORY_CLASS)) { + String amazonS3FactoryClass = props.getProperty(AMAZON_S3_FACTORY_CLASS); + try { + return (AmazonS3Factory) Class.forName(amazonS3FactoryClass).newInstance(); + } catch (InstantiationException | IllegalAccessException | ClassNotFoundException | ClassCastException e) { + throw new S3FileSystemConfigurationException("Configuration problem, couldn't instantiate AmazonS3Factory (" + amazonS3FactoryClass + "): ", e); + } + } + return new AmazonS3ClientFactory(); + } + + /** + * find /amazon.properties in the classpath + * + * @return Properties amazon.properties + */ + public Properties loadAmazonProperties() { + Properties props = new Properties(); + // http://www.javaworld.com/javaworld/javaqa/2003-06/01-qa-0606-load.html + // http://www.javaworld.com/javaqa/2003-08/01-qa-0808-property.html + try (InputStream in = Thread.currentThread().getContextClassLoader().getResourceAsStream("amazon.properties")) { + if (in != null) + props.load(in); + } catch (IOException e) { + // If amazon.properties can't be loaded that's ok. + } + return props; + } + + // ~~~ + + private void verifySupportedOptions(Set allowedOptions, Set actualOptions) { + Sets.SetView unsupported = difference(actualOptions, allowedOptions); + Preconditions.checkArgument(unsupported.isEmpty(), "the following options are not supported: %s", unsupported); + } + + /** + * check that the paths exists or not + * + * @param path S3Path + * @return true if exists + */ + boolean exists(S3Path path) { + System.out.println("Exists check!"); + S3Path s3Path = toS3Path(path); + try { + s3Utils.getS3ObjectSummary(s3Path); + return true; + } catch (NoSuchFileException e) { + return false; + } + } + + public void close(S3FileSystem fileSystem) { + if (fileSystem.getKey() != null && fileSystems.containsKey(fileSystem.getKey())) + fileSystems.remove(fileSystem.getKey()); + } + + public boolean isOpen(S3FileSystem s3FileSystem) { + return fileSystems.containsKey(s3FileSystem.getKey()); + } + + /** + * only 4 testing + */ + + protected static ConcurrentMap getFilesystems() { + return fileSystems; + } + + public com.upplication.s3fsfork.util.Cache getCache() { + return cache; + } + + public void setCache(Cache cache) { + this.cache = cache; + } +} diff --git a/webknossos-datastore/app/com/upplication/s3fsfork/S3Iterator.java b/webknossos-datastore/app/com/upplication/s3fsfork/S3Iterator.java new file mode 100644 index 0000000000..6b00ef3068 --- /dev/null +++ b/webknossos-datastore/app/com/upplication/s3fsfork/S3Iterator.java @@ -0,0 +1,192 @@ +package com.upplication.s3fsfork; + +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.NoSuchElementException; +import java.util.Set; + +import com.amazonaws.services.s3.model.ListObjectsRequest; +import com.amazonaws.services.s3.model.ObjectListing; +import com.amazonaws.services.s3.model.S3ObjectSummary; +import com.google.common.collect.Lists; +import com.google.common.collect.Sets; +import com.upplication.s3fsfork.util.S3Utils; + +/** + * S3 iterator over folders at first level. + * Future versions of this class should be return the elements + * in a incremental way when the #next() method is called. + */ +public class S3Iterator implements Iterator { + private S3FileSystem fileSystem; + private S3FileStore fileStore; + private String key; + private List items = Lists.newArrayList(); + private Set addedVirtualDirectories = Sets.newHashSet(); + private ObjectListing current; + private int cursor; // index of next element to return + private int size; + private boolean incremental; + + private com.upplication.s3fsfork.util.S3Utils s3Utils = new S3Utils(); + + public S3Iterator(S3Path path) { + this(path, false); + } + + public S3Iterator(S3Path path, boolean incremental) { + this(path.getFileStore(), path.getKey() + (!incremental && !path.getKey().isEmpty() && !path.getKey().endsWith("/") ? "/" : ""), incremental); + } + + public S3Iterator(S3FileStore fileStore, String key, boolean incremental) { + ListObjectsRequest listObjectsRequest = buildRequest(fileStore.name(), key, incremental); + + this.fileStore = fileStore; + this.fileSystem = fileStore.getFileSystem(); + this.key = key; + this.current = fileSystem.getClient().listObjects(listObjectsRequest); + this.incremental = incremental; + loadObjects(); + } + + @Override + public boolean hasNext() { + return cursor != size || current.isTruncated(); + } + + @Override + public S3Path next() { + if (cursor == size && current.isTruncated()) { + this.current = fileSystem.getClient().listNextBatchOfObjects(current); + loadObjects(); + } + if (cursor == size) + throw new NoSuchElementException(); + return items.get(cursor++); + } + + @Override + public void remove() { + throw new UnsupportedOperationException(); + } + + private void loadObjects() { + this.items.clear(); + if (incremental) + parseObjects(); + else + parseObjectListing(key, items, current); + this.size = items.size(); + this.cursor = 0; + } + + private void parseObjects() { + for (final S3ObjectSummary objectSummary : current.getObjectSummaries()) { + final String objectSummaryKey = objectSummary.getKey(); + String[] keyParts = fileSystem.key2Parts(objectSummaryKey); + addParentPaths(keyParts); + S3Path path = new S3Path(fileSystem, "/" + fileStore.name(), keyParts); + if (!items.contains(path)) { + items.add(path); + } + } + } + + private void addParentPaths(String[] keyParts) { + if (keyParts.length <= 1) + return; + String[] subParts = Arrays.copyOf(keyParts, keyParts.length - 1); + List parentPaths = new ArrayList<>(); + while (subParts.length > 0) { + S3Path path = new S3Path(fileSystem, "/" + fileStore.name(), subParts); + String prefix = current.getPrefix(); + + String parentKey = path.getKey(); + if (prefix.length() > parentKey.length() && prefix.contains(parentKey)) + break; + if (items.contains(path) || addedVirtualDirectories.contains(path)) { + subParts = Arrays.copyOf(subParts, subParts.length - 1); + continue; + } + parentPaths.add(path); + addedVirtualDirectories.add(path); + subParts = Arrays.copyOf(subParts, subParts.length - 1); + } + Collections.reverse(parentPaths); + items.addAll(parentPaths); + } + + + /** + * add to the listPath the elements at the same level that s3Path + * + * @param key the uri to parse + * @param listPath List not null list to add + * @param current ObjectListing to walk + */ + private void parseObjectListing(String key, List listPath, ObjectListing current) { + for (String commonPrefix : current.getCommonPrefixes()) { + if (!commonPrefix.equals("/")) { + listPath.add(new S3Path(fileSystem, "/" + fileStore.name(), fileSystem.key2Parts(commonPrefix))); + } + } + // TODO: figure our a way to efficiently preprocess commonPrefix basicFileAttributes + for (final S3ObjectSummary objectSummary : current.getObjectSummaries()) { + final String objectSummaryKey = objectSummary.getKey(); + // we only want the first level + String immediateDescendantKey = getImmediateDescendant(key, objectSummaryKey); + if (immediateDescendantKey != null) { + S3Path descendentPart = new S3Path(fileSystem, "/" + fileStore.name(), fileSystem.key2Parts(immediateDescendantKey)); + descendentPart.setFileAttributes(s3Utils.toS3FileAttributes(objectSummary, descendentPart.getKey())); + if (!listPath.contains(descendentPart)) { + listPath.add(descendentPart); + } + } + } + } + + /** + * The current #buildRequest() get all subdirectories and her content. + * This method filter the keyChild and check if is a inmediate + * descendant of the keyParent parameter + * + * @param keyParent String + * @param keyChild String + * @return String parsed + * or null when the keyChild and keyParent are the same and not have to be returned + */ + private String getImmediateDescendant(String keyParent, String keyChild) { + keyParent = deleteExtraPath(keyParent); + keyChild = deleteExtraPath(keyChild); + final int parentLen = keyParent.length(); + final String childWithoutParent = deleteExtraPath(keyChild.substring(parentLen)); + String[] parts = childWithoutParent.split("/"); + if (parts.length > 0 && !parts[0].isEmpty()) + return keyParent + "/" + parts[0]; + return null; + + } + + private String deleteExtraPath(String keyChild) { + if (keyChild.startsWith("/")) + keyChild = keyChild.substring(1); + if (keyChild.endsWith("/")) + keyChild = keyChild.substring(0, keyChild.length() - 1); + return keyChild; + } + + + ListObjectsRequest buildRequest(String bucketName, String key, boolean incremental) { + return buildRequest(bucketName, key, incremental, null); + } + + ListObjectsRequest buildRequest(String bucketName, String key, boolean incremental, Integer maxKeys) { + if (incremental) + return new ListObjectsRequest(bucketName, key, null, null, maxKeys); + return new ListObjectsRequest(bucketName, key, key, "/", maxKeys); + } +} diff --git a/webknossos-datastore/app/com/upplication/s3fsfork/S3Path.java b/webknossos-datastore/app/com/upplication/s3fsfork/S3Path.java new file mode 100644 index 0000000000..64ad373b75 --- /dev/null +++ b/webknossos-datastore/app/com/upplication/s3fsfork/S3Path.java @@ -0,0 +1,614 @@ +package com.upplication.s3fsfork; + +import com.google.common.base.*; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Lists; +import com.upplication.s3fsfork.attribute.S3BasicFileAttributes; + +import java.io.File; +import java.io.IOException; +import java.io.UnsupportedEncodingException; +import java.net.URI; +import java.net.URL; +import java.net.URLDecoder; +import java.nio.file.*; +import java.util.Iterator; +import java.util.List; + +import static com.google.common.collect.Iterables.*; +import static java.lang.String.format; + +public class S3Path implements Path { + + public static final String PATH_SEPARATOR = "/"; + /** + * S3FileStore which represents the Bucket this path resides in. + */ + private final com.upplication.s3fsfork.S3FileStore fileStore; + + /** + * URI not encoded + * Is the key for AmazonS3 + */ + private String uri; + + /** + * actual filesystem + */ + private com.upplication.s3fsfork.S3FileSystem fileSystem; + + /** + * S3BasicFileAttributes cache + */ + private com.upplication.s3fsfork.attribute.S3BasicFileAttributes fileAttributes; + + /** + * Build an S3Path from path segments. '/' are stripped from each segment. + * + * @param fileSystem S3FileSystem + * @param first should be start with a '/' and is the bucket name + * @param more directories and files + */ + public S3Path(com.upplication.s3fsfork.S3FileSystem fileSystem, String first, String... more) { + + Preconditions.checkArgument(first != null, "first path must be not null"); + Preconditions.checkArgument(!first.startsWith("//"), "first path doesnt start with '//'. Miss bucket"); + // see tests com.upplication.s3fs.Path.EndsWithTest#endsWithRelativeBlankAbsolute() + // Preconditions.checkArgument(!first.isEmpty(), "first path must be not empty"); + + boolean hasBucket = first.startsWith("/"); + + + List pathsURI = Lists + .newArrayList(Splitter.on(PATH_SEPARATOR) + .omitEmptyStrings() + .split(first)); + + if (hasBucket) { // absolute path + + Preconditions.checkArgument(pathsURI.size() >= 1, "path must start with bucket name"); + Preconditions.checkArgument(!pathsURI.get(0).isEmpty(), "bucket name must be not empty"); + String bucket = pathsURI.get(0); + this.fileStore = new com.upplication.s3fsfork.S3FileStore(fileSystem, bucket); + // the filestore is not part of the uri + pathsURI.remove(0); + } + else { + // relative uri + this.fileStore = null; + } + + StringBuilder uriBuilder = new StringBuilder(); + if (hasBucket) { + uriBuilder.append(PATH_SEPARATOR); + } + for (String path : pathsURI) { + uriBuilder.append(path + PATH_SEPARATOR); + } + if (more != null) { + for (String path : more) { + uriBuilder.append(path + PATH_SEPARATOR); + } + } + this.uri = normalizeURI(uriBuilder.toString()); + // remove last PATH_SEPARATOR + if (!first.isEmpty() && + // only first param and not ended with PATH_SEPARATOR + ((!first.endsWith(PATH_SEPARATOR) && (more == null || more.length == 0)) + // we have more param and not ended with PATH_SEPARATOR + || more != null && more.length > 0 && !more[more.length-1].endsWith(PATH_SEPARATOR))) { + this.uri = this.uri.substring(0, this.uri.length() - 1); + } + + this.fileSystem = fileSystem; + } + + /** + * Remove duplicated slash + */ + private String normalizeURI(String uri) { + return uri.replace("//", "/"); + } + + public S3FileStore getFileStore() { + return fileStore; + } + + /** + * key for amazon without final slash. + * note: the final slash need to be added to save a directory (Amazon s3 spec) + * + * @return the key for AmazonS3Client + */ + public String getKey() { + + String key = this.uri; + + if (key.startsWith("/")) { + key = key.substring(1, key.length()); + } + + return key; + } + + @Override + public S3FileSystem getFileSystem() { + return this.fileSystem; + } + + @Override + public boolean isAbsolute() { + return fileStore != null; + } + + @Override + public Path getRoot() { + if (isAbsolute()) { + return new S3Path(fileSystem, PATH_SEPARATOR + fileStore.name() + PATH_SEPARATOR); + } + + return null; + } + + @Override + public Path getFileName() { + List paths = uriToList(); + if (paths.isEmpty()) { + // get FileName of root directory is null + return null; + } + String filename = paths.get(paths.size()-1); + return new S3Path(fileSystem, filename); + } + + @Override + public Path getParent() { + // bucket is not present in the parts + if (uri.isEmpty()) { + return null; + } + + String newUri = this.uri; + + if (this.uri.endsWith("/")) { + newUri = this.uri.substring(0, this.uri.length()-1); + } + int lastPathSeparatorPosition = newUri.lastIndexOf(PATH_SEPARATOR); + + if (lastPathSeparatorPosition == -1) { + return null; + } + + newUri = uri.substring(0, lastPathSeparatorPosition + 1); + + if (newUri.isEmpty()) + return null; + + String filestore = isAbsolute() ? PATH_SEPARATOR + fileStore.name() + PATH_SEPARATOR : ""; + + return new S3Path(fileSystem, filestore + newUri); + } + + @Override + public int getNameCount() { + return uriToList().size(); + } + + @Override + public Path getName(int index) { + + List paths = uriToList(); + + if (index < 0 || index >= paths.size()) { + throw new IllegalArgumentException("index out of range"); + } + + String path = paths.get(index); + StringBuilder pathsBuilder = new StringBuilder(); + if (isAbsolute() && index == 0) { + pathsBuilder.append(PATH_SEPARATOR + fileStore.name() + PATH_SEPARATOR); + } + pathsBuilder.append(path); + + if (index < paths.size() - 1) { + pathsBuilder.append(PATH_SEPARATOR); + } + + // if is the last path, check if end with path separator + if (index == paths.size() - 1 && this.uri.endsWith(PATH_SEPARATOR)) { + pathsBuilder.append(PATH_SEPARATOR); + } + + return new S3Path(fileSystem, pathsBuilder.toString()); + } + + private List uriToList() { + return Splitter.on(PATH_SEPARATOR).omitEmptyStrings().splitToList(this.uri); + } + + /** + * The bucket name not count + */ + @Override + public Path subpath(int beginIndex, int endIndex) { + + List paths = uriToList(); + + if (beginIndex < 0 || endIndex > paths.size()) { + throw new IllegalArgumentException("index out of range"); + } + + List pathSubList = paths.subList(beginIndex, endIndex); + StringBuilder pathsStringBuilder = new StringBuilder(); + + // build path string + + if (this.isAbsolute() && beginIndex == 0) { + pathsStringBuilder.append(PATH_SEPARATOR + fileStore.name() + PATH_SEPARATOR); + } + for (String path : pathSubList) { + pathsStringBuilder.append(path).append(PATH_SEPARATOR); + } + String pathsResult = pathsStringBuilder.toString(); + // if the uri doesnt have last PATH_SEPARATOR we must remove it. + if (endIndex == paths.size() && !this.uri.endsWith(PATH_SEPARATOR)) { + pathsResult = pathsResult.substring(0, pathsResult.length() - 1); + } + + return new S3Path(fileSystem, pathsResult); + } + + @Override + public boolean startsWith(Path other) { + + if (other.getNameCount() > this.getNameCount()) { + return false; + } + + if (!(other instanceof S3Path)) { + return false; + } + + if (this.isAbsolute() && !other.isAbsolute()) { + return false; + } + + S3Path path = (S3Path) other; + + if (this.isAbsolute() && other.isAbsolute() && + !this.fileStore.name().equals(path.fileStore.name())) { + return false; + } + + if (path.uri.isEmpty() && !this.uri.isEmpty()) { + return false; + } + + List pathsOther = path.uriToList(); + List paths = this.uriToList(); + + + for (int i = 0; i < pathsOther.size(); i++) { + if (!pathsOther.get(i).equals(paths.get(i))) { + return false; + } + } + return true; + } + + @Override + public boolean startsWith(String path) { + S3Path other = new S3Path(this.fileSystem, path); + return this.startsWith(other); + } + + @Override + public boolean endsWith(Path other) { + if (other.getNameCount() > this.getNameCount()) { + return false; + } + // empty + if (other.getNameCount() == 0 && this.getNameCount() != 0) { + return false; + } + + if (!(other instanceof S3Path)) { + return false; + } + + S3Path path = (S3Path) other; + + if ((path.getFileStore() != null && !path.getFileStore().equals(this.getFileStore())) || (path.getFileStore() != null && this.getFileStore() == null)) { + return false; + } + + // check subkeys + + List pathsOther = path.uriToList(); + List paths = this.uriToList(); + + + int i = pathsOther.size() - 1; + int j = paths.size() - 1; + for (; i >= 0 && j >= 0; ) { + + if (!pathsOther.get(i).equals(paths.get(j))) { + return false; + } + i--; + j--; + } + return true; + } + + @Override + public boolean endsWith(String other) { + return this.endsWith(new S3Path(this.fileSystem, other)); + } + + @Override + public Path normalize() { + return this; + } + + @Override + public Path resolve(Path other) { + if (other.isAbsolute()) { + Preconditions.checkArgument(other instanceof S3Path, "other must be an instance of %s", S3Path.class.getName()); + return other; + } + + S3Path otherS3Path = (S3Path) other; + StringBuilder pathBuilder = new StringBuilder(); + + if (this.isAbsolute()) { + pathBuilder.append(PATH_SEPARATOR + this.fileStore.name() + PATH_SEPARATOR); + } + pathBuilder.append(this.uri); + if (!otherS3Path.uri.isEmpty()) + pathBuilder.append(PATH_SEPARATOR + otherS3Path.uri); + + return new S3Path(this.fileSystem, pathBuilder.toString()); + } + + @Override + public Path resolve(String other) { + return resolve(new S3Path(this.getFileSystem(), other)); + } + + @Override + public Path resolveSibling(Path other) { + Preconditions.checkArgument(other instanceof S3Path, "other must be an instance of %s", S3Path.class.getName()); + + S3Path s3Path = (S3Path) other; + + Path parent = getParent(); + + if (parent == null || s3Path.isAbsolute()) { + return s3Path; + } + + List othersPaths = s3Path.uriToList(); + + if (othersPaths.isEmpty()) { // other is relative and empty + return parent; + } + + List paths = this.uriToList(); + + StringBuilder pathBuilder = new StringBuilder(); + String lastPath = othersPaths.get(othersPaths.size() - 1); + if (isAbsolute()) { + pathBuilder.append(PATH_SEPARATOR + fileStore.name() + PATH_SEPARATOR); + } + for (String path : concat(paths.subList(0, paths.size() - 1), othersPaths)) { + pathBuilder.append(path); + if (!lastPath.equals(path) || s3Path.uri.endsWith(PATH_SEPARATOR)) { + pathBuilder.append(PATH_SEPARATOR); + } + } + + return new S3Path(fileSystem, pathBuilder.toString()); + } + + @Override + public Path resolveSibling(String other) { + return resolveSibling(new S3Path(this.getFileSystem(), other)); + } + + @Override + public Path relativize(Path other) { + Preconditions.checkArgument(other instanceof S3Path, "other must be an instance of %s", S3Path.class.getName()); + S3Path s3Path = (S3Path) other; + + if (this.equals(other)) { + return new S3Path(this.getFileSystem(), ""); + } + + Preconditions.checkArgument(isAbsolute(), "Path is already relative: %s", this); + Preconditions.checkArgument(s3Path.isAbsolute(), "Cannot relativize against a relative path: %s", s3Path); + Preconditions.checkArgument(fileStore.equals(s3Path.getFileStore()), "Cannot relativize paths with different buckets: '%s', '%s'", this, other); + // Preconditions.checkArgument(parts.size() <= s3Path.parts.size(), "Cannot relativize against a parent path: '%s', '%s'", this, other); + + String uriPath = decode(URI.create(encode(this.uri)).relativize(URI.create(encode(s3Path.uri)))); + return new S3Path(fileSystem, uriPath); + } + + /** + * Examples: + * + * Relative: + * -------- + * NO use fileSystem and not used fileStore. + * - path/file + * + * Absolute: + * -------- + * Use the fileSystem to get the host and the filestore to get the first path (in the future the filestore can be attached to the host) + * http://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html + * - s3://AMAZONACCESSKEY@s3.amazonaws.com/bucket/path/file + * - s3://AMAZONACCESSKEY@bucket.s3.amazonaws.com/path/file + * - s3://s3-aws-region.amazonaws.com/bucket/path/file + * + * @return URI never null + */ + @Override + public URI toUri() { + + String uri = encode(this.uri); + // absolute + if (this.isAbsolute()) { + StringBuilder builder = new StringBuilder(); + builder.append(fileSystem.getKey()); + builder.append(PATH_SEPARATOR + fileStore.name() + PATH_SEPARATOR); + builder.append(uri); + return URI.create("s3://" + normalizeURI(builder.toString())); + } + else { + return URI.create(uri); + } + } + + /** + * Get the url for the s3Path. + * + * The url represents a Uniform Resource + * Locator, a pointer to a "resource" on the World + * Wide Web. + * + * All S3Path has a URL if is absolute + * + * @see com.amazonaws.services.s3.AmazonS3#getUrl(String, String) + * @see S3Path#toUri() for unique resource identifier + * @return URL or null if is not absoulte + */ + public URL toURL() { + if (!this.isAbsolute()) + return null; + + return this.getFileSystem().getClient().getUrl(this.fileStore.name(), this.getKey()); + } + + @Override + public Path toAbsolutePath() { + if (isAbsolute()) { + return this; + } + + throw new IllegalStateException(format("Relative path cannot be made absolute: %s", this)); + } + + @Override + public Path toRealPath(LinkOption... options) throws IOException { + return toAbsolutePath(); + } + + @Override + public File toFile() { + throw new UnsupportedOperationException(); + } + + @Override + public WatchKey register(WatchService watcher, WatchEvent.Kind[] events, WatchEvent.Modifier... modifiers) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public WatchKey register(WatchService watcher, WatchEvent.Kind... events) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public Iterator iterator() { + + ImmutableList.Builder builder = ImmutableList.builder(); + + if (isAbsolute()) { + builder.add(new S3Path(fileSystem, PATH_SEPARATOR + fileStore.name() + PATH_SEPARATOR)); + } + + List paths = uriToList(); + + if (uriToList().isEmpty()) + return builder.build().iterator(); + + String lastPath = paths.get(paths.size() - 1); + + for (String path : uriToList()) { + String pathFinal = path + PATH_SEPARATOR; + if (path.equals(lastPath) && !lastPath.endsWith(PATH_SEPARATOR)) { + pathFinal = pathFinal.substring(0, pathFinal.length() - 1); + } + builder.add(new S3Path(fileSystem, pathFinal)); + } + + return builder.build().iterator(); + } + + @Override + public int compareTo(Path other) { + return toString().compareTo(other.toString()); + } + + @Override + public String toString() { + return toUri().toString(); + } + + @Override + public boolean equals(Object o) { + if (this == o) + return true; + if (o == null || getClass() != o.getClass()) + return false; + + S3Path path = (S3Path) o; + if (fileStore != null ? !fileStore.equals(path.fileStore) : path.fileStore != null) + return false; + if (!uri.equals(path.uri)) + return false; + return true; + } + + @Override + public int hashCode() { + int result = fileStore != null ? fileStore.name().hashCode() : 0; + result = 31 * result + uri.hashCode(); + return result; + } + + /** + * Encode special URI characters for path. + * @param uri String the uri path + * @return String + */ + private String encode(String uri) { + // remove special case URI starting with // + uri = uri.replace("//", "/"); + uri = uri.replaceAll(" ", "%20"); + return uri; + } + + /** + * Decode uri special characters + * + * @param uri URI mandatory + * @return String decoded + */ + private String decode(URI uri) { + try { + return URLDecoder.decode(uri.toString(), "UTF-8"); + } + catch (UnsupportedEncodingException e) { + throw new IllegalStateException("Error decoding key: " + this.uri, e); + } + } + + public com.upplication.s3fsfork.attribute.S3BasicFileAttributes getFileAttributes() { + return fileAttributes; + } + + public void setFileAttributes(S3BasicFileAttributes fileAttributes) { + this.fileAttributes = fileAttributes; + } +} diff --git a/webknossos-datastore/app/com/upplication/s3fsfork/S3ReadOnlySeekableByteChannel.java b/webknossos-datastore/app/com/upplication/s3fsfork/S3ReadOnlySeekableByteChannel.java new file mode 100644 index 0000000000..370c1ac424 --- /dev/null +++ b/webknossos-datastore/app/com/upplication/s3fsfork/S3ReadOnlySeekableByteChannel.java @@ -0,0 +1,170 @@ +package com.upplication.s3fsfork; + +import com.amazonaws.services.s3.model.GetObjectRequest; +import com.amazonaws.services.s3.model.S3Object; +import java.io.BufferedInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.nio.ByteBuffer; +import java.nio.channels.Channels; +import java.nio.channels.NonWritableChannelException; +import java.nio.channels.ReadableByteChannel; +import java.nio.channels.SeekableByteChannel; +import java.nio.file.NoSuchFileException; +import java.nio.file.OpenOption; +import java.nio.file.ReadOnlyFileSystemException; +import java.nio.file.StandardOpenOption; +import java.util.Collections; +import java.util.HashSet; +import java.util.Set; + +import static java.lang.String.format; + +public class S3ReadOnlySeekableByteChannel implements SeekableByteChannel { + + private static final int DEFAULT_BUFFER_SIZE = 64000; + + private S3Path path; + private Set options; + private long length; + private ExtBufferedInputStream bufferedStream; + private ReadableByteChannel rbc; + private long position = 0; + + /** + * Open or creates a file, returning a seekable byte channel + * + * @param path the path open or create + * @param options options specifying how the file is opened + * @throws IOException if an I/O error occurs + */ + public S3ReadOnlySeekableByteChannel(S3Path path, Set options) throws IOException { + this.path = path; + this.options = Collections.unmodifiableSet(new HashSet<>(options)); + + String key = path.getKey(); + //boolean exists = path.getFileSystem().provider().exists(path); + + //if (!exists) { + // throw new NoSuchFileException(format("target not exists: %s", path)); + //} else if ( + if ( + this.options.contains(StandardOpenOption.WRITE) || + this.options.contains(StandardOpenOption.CREATE) || + this.options.contains(StandardOpenOption.CREATE_NEW) || + this.options.contains(StandardOpenOption.APPEND) + ) { + throw new ReadOnlyFileSystemException(); + } + + + System.out.println("Determining length..."); + this.length = 10 ; +/* path + .getFileSystem() + .getClient() + .getObjectMetadata( + path + .getFileStore() + .name(), + key + ) + .getContentLength();*/ + + System.out.println("Opening Stream..."); + openStreamAt(0); + } + + private void openStreamAt(long position) throws IOException { + if (rbc != null) { + rbc.close(); + } + + GetObjectRequest rangeObjectRequest = + new GetObjectRequest( + path.getFileStore().name(), + path.getKey() + ) + .withRange(position); + + + System.out.println("Get object..."); + S3Object s3Object = + path + .getFileSystem() + .getClient() + .getObject(rangeObjectRequest); + + + System.out.println("Get object content..."); + bufferedStream = + new ExtBufferedInputStream( + s3Object.getObjectContent(), + DEFAULT_BUFFER_SIZE + ); + + + System.out.println("Wrap in stream..."); + rbc = Channels.newChannel(bufferedStream); + this.position = position; + } + + public boolean isOpen() { + return rbc.isOpen(); + } + + public long position() { return position; } + + public SeekableByteChannel position(long targetPosition) + throws IOException + { + long offset = targetPosition - position(); + if (offset > 0 && offset < bufferedStream.getBytesInBufferAvailable()) { + long skipped = bufferedStream.skip(offset); + if (skipped != offset) { + // shouldn't happen since we are within the buffer + throw new IOException("Could not seek to " + targetPosition); + } + position += offset; + } else if (offset != 0) { + openStreamAt(targetPosition); + } + return this; + } + + public int read(ByteBuffer dst) throws IOException { + int n = rbc.read(dst); + if (n > 0) { + position += n; + } + return n; + } + + public SeekableByteChannel truncate(long size) { + throw new NonWritableChannelException(); + } + + public int write (ByteBuffer src) { + throw new NonWritableChannelException(); + } + + public long size() { + return length; + } + + public void close() throws IOException { + rbc.close(); + } + + private class ExtBufferedInputStream extends BufferedInputStream { + private ExtBufferedInputStream(final InputStream inputStream, final int size) { + super(inputStream, size); + } + + /** Returns the number of bytes that can be read from the buffer without reading more into the buffer. */ + int getBytesInBufferAvailable() { + if (this.count == this.pos) return 0; + else return this.buf.length - this.pos; + } + } +} diff --git a/webknossos-datastore/app/com/upplication/s3fsfork/S3SeekableByteChannel.java b/webknossos-datastore/app/com/upplication/s3fsfork/S3SeekableByteChannel.java new file mode 100644 index 0000000000..fdc28bba7f --- /dev/null +++ b/webknossos-datastore/app/com/upplication/s3fsfork/S3SeekableByteChannel.java @@ -0,0 +1,147 @@ +package com.upplication.s3fsfork; + +import static java.lang.String.format; + +import java.io.BufferedInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.nio.ByteBuffer; +import java.nio.channels.SeekableByteChannel; +import java.nio.file.*; +import java.util.Collections; +import java.util.HashSet; +import java.util.Set; + +import org.apache.tika.Tika; + +import com.amazonaws.services.s3.model.ObjectMetadata; +import com.amazonaws.services.s3.model.S3Object; + +public class S3SeekableByteChannel implements SeekableByteChannel { + + private com.upplication.s3fsfork.S3Path path; + private Set options; + private SeekableByteChannel seekable; + private Path tempFile; + + /** + * Open or creates a file, returning a seekable byte channel + * + * @param path the path open or create + * @param options options specifying how the file is opened + * @throws IOException if an I/O error occurs + */ + public S3SeekableByteChannel(S3Path path, Set options) throws IOException { + System.out.println("Hello from S3SeekableByteChannel"); + this.path = path; + this.options = Collections.unmodifiableSet(new HashSet<>(options)); + String key = path.getKey(); + boolean exists = path.getFileSystem().provider().exists(path); + + if (exists && this.options.contains(StandardOpenOption.CREATE_NEW)) + throw new FileAlreadyExistsException(format("target already exists: %s", path)); + else if (!exists && !this.options.contains(StandardOpenOption.CREATE_NEW) && + !this.options.contains(StandardOpenOption.CREATE)) + throw new NoSuchFileException(format("target not exists: %s", path)); + + tempFile = Files.createTempFile("temp-s3-", key.replaceAll("/", "_")); + boolean removeTempFile = true; + try { + if (exists) { + System.out.println("Exists! Let's call getObject!"); + try (S3Object object = path.getFileSystem() + .getClient() + .getObject(path.getFileStore().getBucket().getName(), key)) { + Files.copy(object.getObjectContent(), tempFile, StandardCopyOption.REPLACE_EXISTING); + } + } + + Set seekOptions = new HashSet<>(this.options); + seekOptions.remove(StandardOpenOption.CREATE_NEW); + seekable = Files.newByteChannel(tempFile, seekOptions); + removeTempFile = false; + } finally { + if (removeTempFile) { + Files.deleteIfExists(tempFile); + } + } + } + + @Override + public boolean isOpen() { + return seekable.isOpen(); + } + + @Override + public void close() throws IOException { + try { + if (!seekable.isOpen()) + return; + + seekable.close(); + + if (options.contains(StandardOpenOption.DELETE_ON_CLOSE)) { + path.getFileSystem().provider().delete(path); + return; + } + + if (options.contains(StandardOpenOption.READ) && options.size() == 1) { + return; + } + + sync(); + + } finally { + Files.deleteIfExists(tempFile); + } + } + + /** + * try to sync the temp file with the remote s3 path. + * + * @throws IOException if the tempFile fails to open a newInputStream + */ + protected void sync() throws IOException { + try (InputStream stream = new BufferedInputStream(Files.newInputStream(tempFile))) { + ObjectMetadata metadata = new ObjectMetadata(); + metadata.setContentLength(Files.size(tempFile)); + if (path.getFileName() != null) { + metadata.setContentType(new Tika().detect(stream, path.getFileName().toString())); + } + + String bucket = path.getFileStore().name(); + String key = path.getKey(); + path.getFileSystem().getClient().putObject(bucket, key, stream, metadata); + } + } + + @Override + public int write(ByteBuffer src) throws IOException { + return seekable.write(src); + } + + @Override + public SeekableByteChannel truncate(long size) throws IOException { + return seekable.truncate(size); + } + + @Override + public long size() throws IOException { + return seekable.size(); + } + + @Override + public int read(ByteBuffer dst) throws IOException { + return seekable.read(dst); + } + + @Override + public SeekableByteChannel position(long newPosition) throws IOException { + return seekable.position(newPosition); + } + + @Override + public long position() throws IOException { + return seekable.position(); + } +} diff --git a/webknossos-datastore/app/com/upplication/s3fsfork/attribute/S3BasicFileAttributeView.java b/webknossos-datastore/app/com/upplication/s3fsfork/attribute/S3BasicFileAttributeView.java new file mode 100644 index 0000000000..a2cd23b284 --- /dev/null +++ b/webknossos-datastore/app/com/upplication/s3fsfork/attribute/S3BasicFileAttributeView.java @@ -0,0 +1,33 @@ +package com.upplication.s3fsfork.attribute; + +import com.upplication.s3fsfork.S3Path; + +import java.io.IOException; +import java.nio.file.attribute.BasicFileAttributeView; +import java.nio.file.attribute.BasicFileAttributes; +import java.nio.file.attribute.FileTime; + + +public class S3BasicFileAttributeView implements BasicFileAttributeView { + + private S3Path s3Path; + + public S3BasicFileAttributeView(S3Path s3Path) { + this.s3Path = s3Path; + } + + @Override + public String name() { + return "basic"; + } + + @Override + public BasicFileAttributes readAttributes() throws IOException { + return s3Path.getFileSystem().provider().readAttributes(s3Path, BasicFileAttributes.class); + } + + @Override + public void setTimes(FileTime lastModifiedTime, FileTime lastAccessTime, FileTime createTime) throws IOException { + // not implemented + } +} diff --git a/webknossos-datastore/app/com/upplication/s3fsfork/attribute/S3BasicFileAttributes.java b/webknossos-datastore/app/com/upplication/s3fsfork/attribute/S3BasicFileAttributes.java new file mode 100644 index 0000000000..315d24fb9f --- /dev/null +++ b/webknossos-datastore/app/com/upplication/s3fsfork/attribute/S3BasicFileAttributes.java @@ -0,0 +1,85 @@ +package com.upplication.s3fsfork.attribute; + +import static java.lang.String.format; + +import java.nio.file.attribute.BasicFileAttributes; +import java.nio.file.attribute.FileTime; + +public class S3BasicFileAttributes implements BasicFileAttributes { + private final FileTime lastModifiedTime; + private final long size; + private final boolean directory; + private final boolean regularFile; + private final String key; + private long cacheCreated; + + public S3BasicFileAttributes(String key, FileTime lastModifiedTime, long size, boolean isDirectory, boolean isRegularFile) { + this.key = key; + this.lastModifiedTime = lastModifiedTime; + this.size = size; + this.directory = isDirectory; + this.regularFile = isRegularFile; + + this.cacheCreated = System.currentTimeMillis(); + } + + @Override + public FileTime lastModifiedTime() { + return lastModifiedTime; + } + + @Override + public FileTime lastAccessTime() { + return lastModifiedTime; + } + + @Override + public FileTime creationTime() { + return lastModifiedTime; + } + + @Override + public boolean isRegularFile() { + return regularFile; + } + + @Override + public boolean isDirectory() { + return directory; + } + + @Override + public boolean isSymbolicLink() { + return false; + } + + @Override + public boolean isOther() { + return false; + } + + @Override + public long size() { + return size; + } + + @Override + public Object fileKey() { + return key; + } + + @Override + public String toString() { + return format("[%s: lastModified=%s, size=%s, isDirectory=%s, isRegularFile=%s]", key, lastModifiedTime, size, directory, regularFile); + } + + public long getCacheCreated() { + return cacheCreated; + } + + // for testing + + public void setCacheCreated(long time) { + this.cacheCreated = time; + } +} diff --git a/webknossos-datastore/app/com/upplication/s3fsfork/attribute/S3PosixFileAttributeView.java b/webknossos-datastore/app/com/upplication/s3fsfork/attribute/S3PosixFileAttributeView.java new file mode 100644 index 0000000000..8a8ca710c3 --- /dev/null +++ b/webknossos-datastore/app/com/upplication/s3fsfork/attribute/S3PosixFileAttributeView.java @@ -0,0 +1,61 @@ +package com.upplication.s3fsfork.attribute; + +import com.upplication.s3fsfork.S3Path; + +import java.io.IOException; +import java.nio.file.attribute.*; +import java.util.Set; + + +public class S3PosixFileAttributeView implements PosixFileAttributeView { + + private S3Path s3Path; + private PosixFileAttributes posixFileAttributes; + + public S3PosixFileAttributeView(S3Path s3Path) { + this.s3Path = s3Path; + } + + @Override + public String name() { + return "posix"; + } + + @Override + public PosixFileAttributes readAttributes() throws IOException { + return read(); + } + + @Override + public UserPrincipal getOwner() throws IOException { + return read().owner(); + } + + @Override + public void setOwner(UserPrincipal owner) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public void setPermissions(Set perms) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public void setGroup(GroupPrincipal group) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public void setTimes(FileTime lastModifiedTime, FileTime lastAccessTime, FileTime createTime) throws IOException { + // not implemented + } + + public PosixFileAttributes read() throws IOException { + if (posixFileAttributes == null) { + posixFileAttributes = s3Path.getFileSystem() + .provider().readAttributes(s3Path, PosixFileAttributes.class); + } + return posixFileAttributes; + } +} diff --git a/webknossos-datastore/app/com/upplication/s3fsfork/attribute/S3PosixFileAttributes.java b/webknossos-datastore/app/com/upplication/s3fsfork/attribute/S3PosixFileAttributes.java new file mode 100644 index 0000000000..9416063927 --- /dev/null +++ b/webknossos-datastore/app/com/upplication/s3fsfork/attribute/S3PosixFileAttributes.java @@ -0,0 +1,37 @@ +package com.upplication.s3fsfork.attribute; + +import java.nio.file.attribute.*; +import java.util.Set; + +import static java.lang.String.format; + +public class S3PosixFileAttributes extends S3BasicFileAttributes implements PosixFileAttributes { + + private UserPrincipal userPrincipal; + private GroupPrincipal groupPrincipal; + private Set posixFilePermissions; + + public S3PosixFileAttributes(String key, FileTime lastModifiedTime, long size, boolean isDirectory, boolean isRegularFile, UserPrincipal userPrincipal, GroupPrincipal groupPrincipal, Set posixFilePermissionSet) { + + super(key, lastModifiedTime, size, isDirectory, isRegularFile); + + this.userPrincipal = userPrincipal; + this.groupPrincipal = groupPrincipal; + this.posixFilePermissions = posixFilePermissionSet; + } + + @Override + public UserPrincipal owner() { + return this.userPrincipal; + } + + @Override + public GroupPrincipal group() { + return this.groupPrincipal; + } + + @Override + public Set permissions() { + return this.posixFilePermissions; + } +} diff --git a/webknossos-datastore/app/com/upplication/s3fsfork/attribute/S3UserPrincipal.java b/webknossos-datastore/app/com/upplication/s3fsfork/attribute/S3UserPrincipal.java new file mode 100644 index 0000000000..78607d345b --- /dev/null +++ b/webknossos-datastore/app/com/upplication/s3fsfork/attribute/S3UserPrincipal.java @@ -0,0 +1,17 @@ +package com.upplication.s3fsfork.attribute; + +import java.nio.file.attribute.UserPrincipal; + +public class S3UserPrincipal implements UserPrincipal { + + private String name; + + public S3UserPrincipal(String name) { + this.name = name; + } + + @Override + public String getName() { + return name; + } +} diff --git a/webknossos-datastore/app/com/upplication/s3fsfork/util/AnonymousAWSCredentialsProvider.java b/webknossos-datastore/app/com/upplication/s3fsfork/util/AnonymousAWSCredentialsProvider.java new file mode 100644 index 0000000000..f0314cfb56 --- /dev/null +++ b/webknossos-datastore/app/com/upplication/s3fsfork/util/AnonymousAWSCredentialsProvider.java @@ -0,0 +1,18 @@ +package com.upplication.s3fsfork.util; + +import com.amazonaws.auth.AWSCredentials; +import com.amazonaws.auth.AWSCredentialsProvider; +import com.amazonaws.auth.AnonymousAWSCredentials; + +public class AnonymousAWSCredentialsProvider implements AWSCredentialsProvider { + + @Override + public AWSCredentials getCredentials() { + return new AnonymousAWSCredentials(); + } + + @Override + public void refresh() { + + } +} diff --git a/webknossos-datastore/app/com/upplication/s3fsfork/util/AttributesUtils.java b/webknossos-datastore/app/com/upplication/s3fsfork/util/AttributesUtils.java new file mode 100644 index 0000000000..50be7788a6 --- /dev/null +++ b/webknossos-datastore/app/com/upplication/s3fsfork/util/AttributesUtils.java @@ -0,0 +1,100 @@ +package com.upplication.s3fsfork.util; + + +import java.nio.file.attribute.BasicFileAttributes; +import java.nio.file.attribute.PosixFileAttributes; +import java.util.HashMap; +import java.util.Map; + +/** + * Utilities to help transforming BasicFileAttributes to Map + */ +public abstract class AttributesUtils { + + /** + * Given a BasicFileAttributes not null then return a Map + * with the keys as the fields of the BasicFileAttributes or PosixFileAttributes and the values + * with the content of the fields + * + * @param attr BasicFileAttributes + * @return Map String Object never null + */ + public static Map fileAttributeToMap(BasicFileAttributes attr) { + Map result = new HashMap<>(); + result.put("creationTime", attr.creationTime()); + result.put("fileKey", attr.fileKey()); + result.put("isDirectory", attr.isDirectory()); + result.put("isOther", attr.isOther()); + result.put("isRegularFile", attr.isRegularFile()); + result.put("isSymbolicLink", attr.isSymbolicLink()); + result.put("lastAccessTime", attr.lastAccessTime()); + result.put("lastModifiedTime", attr.lastModifiedTime()); + result.put("size", attr.size()); + + if (attr instanceof PosixFileAttributes) { + PosixFileAttributes posixAttr = (PosixFileAttributes) attr; + result.put("permissions", posixAttr.permissions()); + result.put("owner", posixAttr.owner()); + result.put("group", posixAttr.group()); + } + + return result; + } + + /** + * transform the java.nio.file.attribute.BasicFileAttributes to Map filtering by the keys + * given in the filters param + * + * @param attr BasicFileAttributes not null to tranform to map + * @param filters String[] filters + * @return Map String Object with the same keys as the filters + */ + public static Map fileAttributeToMap(BasicFileAttributes attr, String[] filters) { + Map result = new HashMap<>(); + + for (String filter : filters) { + filter = filter.replace("basic:", ""); + filter = filter.replace("posix:", ""); + switch (filter) { + case "creationTime": + result.put("creationTime", attr.creationTime()); + break; + case "fileKey": + result.put("fileKey", attr.fileKey()); + break; + case "isDirectory": + result.put("isDirectory", attr.isDirectory()); + break; + case "isOther": + result.put("isOther", attr.isOther()); + break; + case "isRegularFile": + result.put("isRegularFile", attr.isRegularFile()); + break; + case "isSymbolicLink": + result.put("isSymbolicLink", attr.isSymbolicLink()); + break; + case "lastAccessTime": + result.put("lastAccessTime", attr.lastAccessTime()); + break; + case "lastModifiedTime": + result.put("lastModifiedTime", attr.lastModifiedTime()); + break; + case "size": + result.put("size", attr.size()); + break; + case "permissions": + result.put("permissions", ((PosixFileAttributes)attr).permissions()); + break; + case "group": + result.put("group", ((PosixFileAttributes)attr).group()); + break; + case "owner": + result.put("owner", ((PosixFileAttributes)attr).owner()); + break; + } + } + + return result; + } +} diff --git a/webknossos-datastore/app/com/upplication/s3fsfork/util/Cache.java b/webknossos-datastore/app/com/upplication/s3fsfork/util/Cache.java new file mode 100644 index 0000000000..8f4b7adf6e --- /dev/null +++ b/webknossos-datastore/app/com/upplication/s3fsfork/util/Cache.java @@ -0,0 +1,29 @@ +package com.upplication.s3fsfork.util; + +import com.upplication.s3fsfork.attribute.S3BasicFileAttributes; + +public class Cache { + + /** + * check if the cache of the S3FileAttributes is still valid + * + * @param cache int cache time of the fileAttributes in milliseconds + * @param fileAttributes S3FileAttributes to check if is still valid, can be null + * @return true or false, if cache are -1 and fileAttributes are not null then always return true + */ + public boolean isInTime(int cache, S3BasicFileAttributes fileAttributes) { + if (fileAttributes == null) { + return false; + } + + if (cache == -1) { + return true; + } + + return getCurrentTime() - cache <= fileAttributes.getCacheCreated(); + } + + public long getCurrentTime() { + return System.currentTimeMillis(); + } +} diff --git a/webknossos-datastore/app/com/upplication/s3fsfork/util/IOUtils.java b/webknossos-datastore/app/com/upplication/s3fsfork/util/IOUtils.java new file mode 100644 index 0000000000..893547d384 --- /dev/null +++ b/webknossos-datastore/app/com/upplication/s3fsfork/util/IOUtils.java @@ -0,0 +1,32 @@ +package com.upplication.s3fsfork.util; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.InputStream; + +/** + * Utilities for streams + */ +public abstract class IOUtils { + /** + * get the stream content and return as a byte array + * + * @param is InputStream + * @return byte array + * @throws IOException if the stream is closed + */ + public static byte[] toByteArray(InputStream is) throws IOException { + ByteArrayOutputStream buffer = new ByteArrayOutputStream(); + + int nRead; + byte[] data = new byte[16384]; + + while ((nRead = is.read(data, 0, data.length)) != -1) { + buffer.write(data, 0, nRead); + } + + buffer.flush(); + + return buffer.toByteArray(); + } +} diff --git a/webknossos-datastore/app/com/upplication/s3fsfork/util/S3Utils.java b/webknossos-datastore/app/com/upplication/s3fsfork/util/S3Utils.java new file mode 100644 index 0000000000..5bbea222b6 --- /dev/null +++ b/webknossos-datastore/app/com/upplication/s3fsfork/util/S3Utils.java @@ -0,0 +1,198 @@ +package com.upplication.s3fsfork.util; + +import com.amazonaws.services.s3.AmazonS3; +import com.amazonaws.services.s3.model.*; +import com.google.common.collect.Sets; +import com.upplication.s3fsfork.S3Path; +import com.upplication.s3fsfork.attribute.S3BasicFileAttributes; +import com.upplication.s3fsfork.attribute.S3PosixFileAttributes; +import com.upplication.s3fsfork.attribute.S3UserPrincipal; + +import java.nio.file.NoSuchFileException; +import java.nio.file.attribute.FileTime; +import java.nio.file.attribute.PosixFilePermission; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.concurrent.TimeUnit; + +/** + * Utilities to work with Amazon S3 Objects. + */ +public class S3Utils { + + /** + * Get the {@link S3ObjectSummary} that represent this Path or her first child if this path not exists + * + * @param s3Path {@link com.upplication.s3fsfork.S3Path} + * @return {@link S3ObjectSummary} + * @throws NoSuchFileException if not found the path and any child + */ + public S3ObjectSummary getS3ObjectSummary(com.upplication.s3fsfork.S3Path s3Path) throws NoSuchFileException { + String key = s3Path.getKey(); + String bucketName = s3Path.getFileStore().name(); + AmazonS3 client = s3Path.getFileSystem().getClient(); + // try to find the element with the current key (maybe with end slash or maybe not.) + try { + ObjectMetadata metadata = client.getObjectMetadata(bucketName, key); + S3ObjectSummary result = new S3ObjectSummary(); + result.setBucketName(bucketName); + result.setETag(metadata.getETag()); + result.setKey(key); + result.setLastModified(metadata.getLastModified()); + result.setSize(metadata.getContentLength()); + try { + AccessControlList objectAcl = client.getObjectAcl(bucketName, key); + result.setOwner(objectAcl.getOwner()); + } catch (AmazonS3Exception e) { + // If we don't have permission to view the ACL, that's fine, we can leave `owner` empty + if (e.getStatusCode() != 403) + throw e; + } + return result; + } catch (AmazonS3Exception e) { + if (e.getStatusCode() != 404) + throw e; + } + + // if not found (404 err) with the original key. + // try to find the elment as a directory. + try { + // is a virtual directory + ListObjectsRequest request = new ListObjectsRequest(); + request.setBucketName(bucketName); + String keyFolder = key; + if (!keyFolder.endsWith("/")) { + keyFolder += "/"; + } + request.setPrefix(keyFolder); + request.setMaxKeys(1); + ObjectListing current = client.listObjects(request); + if (!current.getObjectSummaries().isEmpty()) + return current.getObjectSummaries().get(0); + } catch (Exception e) { + // + } + throw new NoSuchFileException(bucketName + com.upplication.s3fsfork.S3Path.PATH_SEPARATOR + key); + } + + /** + * getS3FileAttributes for the s3Path + * + * @param s3Path S3Path mandatory not null + * @return S3FileAttributes never null + */ + public com.upplication.s3fsfork.attribute.S3BasicFileAttributes getS3FileAttributes(com.upplication.s3fsfork.S3Path s3Path) throws NoSuchFileException { + S3ObjectSummary objectSummary = getS3ObjectSummary(s3Path); + return toS3FileAttributes(objectSummary, s3Path.getKey()); + } + + /** + * get the S3PosixFileAttributes for a S3Path + * @param s3Path Path mandatory not null + * @return S3PosixFileAttributes never null + * @throws NoSuchFileException if the Path doesnt exists + */ + public com.upplication.s3fsfork.attribute.S3PosixFileAttributes getS3PosixFileAttributes(S3Path s3Path) throws NoSuchFileException { + S3ObjectSummary objectSummary = getS3ObjectSummary(s3Path); + + String key = s3Path.getKey(); + String bucketName = s3Path.getFileStore().name(); + + com.upplication.s3fsfork.attribute.S3BasicFileAttributes attrs = toS3FileAttributes(objectSummary, key); + com.upplication.s3fsfork.attribute.S3UserPrincipal userPrincipal = null; + Set permissions = null; + + if (!attrs.isDirectory()) { + AmazonS3 client = s3Path.getFileSystem().getClient(); + AccessControlList acl = client.getObjectAcl(bucketName, key); + Owner owner = acl.getOwner(); + + userPrincipal = new S3UserPrincipal(owner.getId() + ":" + owner.getDisplayName()); + permissions = toPosixFilePermissions(acl.getGrantsAsList()); + } + + return new S3PosixFileAttributes((String)attrs.fileKey(), attrs.lastModifiedTime(), + attrs.size(), attrs.isDirectory(), attrs.isRegularFile(), userPrincipal, null, permissions); + } + + + /** + * transform com.amazonaws.services.s3.model.Grant to java.nio.file.attribute.PosixFilePermission + * @see #toPosixFilePermission(Permission) + * @param grants Set grants mandatory, must be not null + * @return Set PosixFilePermission never null + */ + public Set toPosixFilePermissions(List grants) { + Set filePermissions = new HashSet<>(); + for (Grant grant : grants) { + filePermissions.addAll(toPosixFilePermission(grant.getPermission())); + } + + return filePermissions; + } + + /** + * transform a com.amazonaws.services.s3.model.Permission to a java.nio.file.attribute.PosixFilePermission + * We use the follow rules: + * - transform only to the Owner permission, S3 doesnt have concepts like owner, group or other so we map only to owner. + * - ACP is a special permission: WriteAcp are mapped to Owner execute permission and ReadAcp are mapped to owner read + * @param permission Permission to map, mandatory must be not null + * @return Set PosixFilePermission never null + */ + public Set toPosixFilePermission(Permission permission){ + switch (permission) { + case FullControl: + return Sets.newHashSet(PosixFilePermission.OWNER_EXECUTE, + PosixFilePermission.OWNER_READ, + PosixFilePermission.OWNER_WRITE); + case Write: + return Sets.newHashSet(PosixFilePermission.OWNER_WRITE); + case Read: + return Sets.newHashSet(PosixFilePermission.OWNER_READ); + case ReadAcp: + return Sets.newHashSet(PosixFilePermission.OWNER_READ); + case WriteAcp: + return Sets.newHashSet(PosixFilePermission.OWNER_EXECUTE); + } + throw new IllegalStateException("Unknown Permission: " + permission); + } + + /** + * transform S3ObjectSummary to S3FileAttributes + * + * @param objectSummary S3ObjectSummary mandatory not null, the real objectSummary with + * exactly the same key than the key param or the immediate descendant + * if it is a virtual directory + * @param key String the real key that can be exactly equal than the objectSummary or + * @return S3FileAttributes + */ + public com.upplication.s3fsfork.attribute.S3BasicFileAttributes toS3FileAttributes(S3ObjectSummary objectSummary, String key) { + // parse the data to BasicFileAttributes. + FileTime lastModifiedTime = null; + if (objectSummary.getLastModified() != null) { + lastModifiedTime = FileTime.from(objectSummary.getLastModified().getTime(), TimeUnit.MILLISECONDS); + } + long size = objectSummary.getSize(); + boolean directory = false; + boolean regularFile = false; + String resolvedKey = objectSummary.getKey(); + // check if is a directory and exists the key of this directory at amazon s3 + if (key.endsWith("/") && resolvedKey.equals(key) || + resolvedKey.equals(key + "/")) { + directory = true; + } else if (key.isEmpty()) { // is a bucket (no key) + directory = true; + resolvedKey = "/"; + } else if (!resolvedKey.equals(key) && resolvedKey.startsWith(key)) { // is a directory but not exists at amazon s3 + directory = true; + // no metadata, we fake one + size = 0; + // delete extra part + resolvedKey = key + "/"; + } else { + regularFile = true; + } + return new S3BasicFileAttributes(resolvedKey, lastModifiedTime, size, directory, regularFile); + } +} diff --git a/webknossos-datastore/conf/META-INF/services/java.nio.file.spi.FileSystemProvider b/webknossos-datastore/conf/META-INF/services/java.nio.file.spi.FileSystemProvider index 3934e340ed..12cd6e4c29 100644 --- a/webknossos-datastore/conf/META-INF/services/java.nio.file.spi.FileSystemProvider +++ b/webknossos-datastore/conf/META-INF/services/java.nio.file.spi.FileSystemProvider @@ -1,3 +1,3 @@ -com.upplication.s3fs.S3FileSystemProvider +com.upplication.s3fsfork.S3FileSystemProvider com.scalableminds.webknossos.datastore.storage.httpsfilesystem.HttpsFileSystemProvider com.scalableminds.webknossos.datastore.storage.httpsfilesystem.HttpFileSystemProvider From 427693afb9723d5c2eada471241160c1323445ad Mon Sep 17 00:00:00 2001 From: Florian M Date: Wed, 24 Aug 2022 16:52:58 +0200 Subject: [PATCH 2/7] try with normalized uri --- project/Dependencies.scala | 4 ++- .../datastore/controllers/Application.scala | 26 ++++++++++++++++++- .../datastore/jzarr/BytesConverter.scala | 7 ++--- .../datastore/jzarr/ZarrHeader.scala | 2 +- .../s3fsfork/AmazonS3ClientFactory.java | 9 +++++++ ....scalableminds.webknossos.datastore.routes | 1 + 6 files changed, 43 insertions(+), 6 deletions(-) diff --git a/project/Dependencies.scala b/project/Dependencies.scala index 7eb753fb84..a1c99a4d3b 100644 --- a/project/Dependencies.scala +++ b/project/Dependencies.scala @@ -39,6 +39,7 @@ object Dependencies { private val webknossosWrap = "com.scalableminds" %% "webknossos-wrap" % webknossosWrapVersion private val xmlWriter = "org.glassfish.jaxb" % "txw2" % "2.2.11" private val woodstoxXml = "org.codehaus.woodstox" % "wstx-asl" % "3.2.3" + private val jackson = "com.fasterxml.jackson.module" %% "jackson-module-scala" % "2.12.7" private val redis = "net.debasishg" %% "redisclient" % "3.9" private val spire = "org.typelevel" %% "spire" % "0.14.1" private val jgrapht = "org.jgrapht" % "jgrapht-core" % "1.4.0" @@ -49,7 +50,7 @@ object Dependencies { private val jblosc = "org.lasersonlab" % "jblosc" % "1.0.1" private val scalajHttp = "org.scalaj" %% "scalaj-http" % "2.4.2" private val guava = "com.google.guava" % "guava" % "18.0" - private val awsS3 = "com.amazonaws" % "aws-java-sdk-s3" % "1.11.232" + private val awsS3 = "com.amazonaws" % "aws-java-sdk-s3" % "1.12.288" // "1.11.232" private val tika = "org.apache.tika" % "tika-core" % "1.5" @@ -95,6 +96,7 @@ object Dependencies { redis, jhdf, ucarCdm, + jackson, guava, awsS3, tika, diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/Application.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/Application.scala index f506148509..d5c43ae2d8 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/Application.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/Application.scala @@ -1,10 +1,15 @@ package com.scalableminds.webknossos.datastore.controllers +import com.amazonaws.ClientConfiguration +import com.amazonaws.client.builder.AwsClientBuilder +import com.amazonaws.regions.Regions +import com.amazonaws.services.s3.AmazonS3ClientBuilder import com.scalableminds.util.tools.Fox import com.scalableminds.webknossos.datastore.storage.DataStoreRedisStore -import javax.inject.Inject +import com.upplication.s3fsfork.util.AnonymousAWSCredentialsProvider import play.api.mvc.{Action, AnyContent} +import javax.inject.Inject import scala.concurrent.ExecutionContext class Application @Inject()(redisClient: DataStoreRedisStore)(implicit ec: ExecutionContext) extends Controller { @@ -22,4 +27,23 @@ class Application @Inject()(redisClient: DataStoreRedisStore)(implicit ec: Execu } } + def testS3: Action[AnyContent] = Action { implicit request => + val credentialsProvider = new AnonymousAWSCredentialsProvider + val clientConfiguration = new ClientConfiguration + val client = AmazonS3ClientBuilder + .standard() + .withCredentials(credentialsProvider) + .withClientConfiguration(clientConfiguration) + .withMetricsCollector(null) + .withEndpointConfiguration(new AwsClientBuilder.EndpointConfiguration("s3.amazonaws.com", Regions.US_WEST_2.toString)) + .build() + + val o = client.getObject("power-analysis-ready-datastore", "power_901_annual_meteorology_utc.zarr/.zattrs") + val c = o.getObjectContent() + + logger.info(client.toString) + + Ok + } + } diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/jzarr/BytesConverter.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/jzarr/BytesConverter.scala index c0b3d22994..6191dbd4dc 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/jzarr/BytesConverter.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/jzarr/BytesConverter.scala @@ -38,17 +38,18 @@ object BytesConverter { case ZarrDataType.i8 | ZarrDataType.u8 => val arrayTyped = array.asInstanceOf[Array[Long]] val byteBuffer = makeByteBuffer(arrayTyped.length * bytesPerElement, byteOrder) - byteBuffer.asLongBuffer().put(arrayTyped).array() + byteBuffer.asLongBuffer().put(arrayTyped) byteBuffer.array() case ZarrDataType.f4 => val arrayTyped = array.asInstanceOf[Array[Float]] val byteBuffer = makeByteBuffer(arrayTyped.length * bytesPerElement, byteOrder) - byteBuffer.asFloatBuffer().put(arrayTyped).array() + val asFloat = byteBuffer.asFloatBuffer() + asFloat.put(arrayTyped) byteBuffer.array() case ZarrDataType.f8 => val arrayTyped = array.asInstanceOf[Array[Double]] val byteBuffer = makeByteBuffer(arrayTyped.length * bytesPerElement, byteOrder) - byteBuffer.asDoubleBuffer().put(arrayTyped).array() + byteBuffer.asDoubleBuffer().put(arrayTyped) byteBuffer.array() } } diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/jzarr/ZarrHeader.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/jzarr/ZarrHeader.scala index bf19fc0c0c..4013a33741 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/jzarr/ZarrHeader.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/jzarr/ZarrHeader.scala @@ -133,7 +133,7 @@ object ZarrHeader { implicit object ZarrHeaderFormat extends Format[ZarrHeader] { override def reads(json: JsValue): JsResult[ZarrHeader] = - Json.reads[ZarrHeader].reads(json) + Json.using[Json.WithDefaultValues].reads[ZarrHeader].reads(json) override def writes(zarrHeader: ZarrHeader): JsValue = Json.obj( diff --git a/webknossos-datastore/app/com/upplication/s3fsfork/AmazonS3ClientFactory.java b/webknossos-datastore/app/com/upplication/s3fsfork/AmazonS3ClientFactory.java index 2be52fc5f0..f1c9a43bda 100644 --- a/webknossos-datastore/app/com/upplication/s3fsfork/AmazonS3ClientFactory.java +++ b/webknossos-datastore/app/com/upplication/s3fsfork/AmazonS3ClientFactory.java @@ -11,5 +11,14 @@ public class AmazonS3ClientFactory extends AmazonS3Factory { @Override protected AmazonS3 createAmazonS3(AWSCredentialsProvider credentialsProvider, ClientConfiguration clientConfiguration, RequestMetricCollector requestMetricsCollector) { return new AmazonS3Client(credentialsProvider, clientConfiguration, requestMetricsCollector); + /* + return AmazonS3ClientBuilder + .standard() + .withCredentials(credentialsProvider) + .withClientConfiguration(clientConfiguration) + .withRegion(Regions.EU_WEST_1) + .withMetricsCollector(requestMetricsCollector) + .build(); + */ } } diff --git a/webknossos-datastore/conf/com.scalableminds.webknossos.datastore.routes b/webknossos-datastore/conf/com.scalableminds.webknossos.datastore.routes index 0231cf11a9..d992688134 100644 --- a/webknossos-datastore/conf/com.scalableminds.webknossos.datastore.routes +++ b/webknossos-datastore/conf/com.scalableminds.webknossos.datastore.routes @@ -3,6 +3,7 @@ # Health endpoint GET /health @com.scalableminds.webknossos.datastore.controllers.Application.health +GET /testS3 @com.scalableminds.webknossos.datastore.controllers.Application.testS3 # Read image data POST /datasets/:organizationName/:dataSetName/layers/:dataLayerName/data @com.scalableminds.webknossos.datastore.controllers.BinaryDataController.requestViaWebKnossos(token: Option[String], organizationName: String, dataSetName: String, dataLayerName: String) From bd3abbb14ff7a1d5e040466d669e88bfd02f3f34 Mon Sep 17 00:00:00 2001 From: Florian M Date: Thu, 25 Aug 2022 09:29:03 +0200 Subject: [PATCH 3/7] normalize s3 host. get rid of javac warnings. clean up --- build.sbt | 5 ++ .../java.nio.file.spi.FileSystemProvider | 2 +- project/Dependencies.scala | 5 +- .../datastore/controllers/Application.scala | 26 +------ .../datastore/s3fs}/AmazonS3Factory.java | 71 ++++++------------- .../webknossos/datastore/s3fs/LICENSE | 31 ++++++++ .../datastore/s3fs}/S3AccessControlList.java | 2 +- .../datastore/s3fs}/S3FileChannel.java | 4 +- .../datastore/s3fs}/S3FileStore.java | 6 +- .../s3fs}/S3FileStoreAttributeView.java | 2 +- .../datastore/s3fs}/S3FileSystem.java | 6 +- .../S3FileSystemConfigurationException.java | 2 +- .../datastore/s3fs}/S3FileSystemProvider.java | 64 ++++++++--------- .../datastore/s3fs}/S3Iterator.java | 6 +- .../webknossos/datastore/s3fs}/S3Path.java | 16 ++--- .../s3fs}/S3ReadOnlySeekableByteChannel.java | 3 +- .../s3fs}/S3SeekableByteChannel.java | 4 +- .../attribute/S3BasicFileAttributeView.java | 4 +- .../attribute/S3BasicFileAttributes.java | 2 +- .../attribute/S3PosixFileAttributeView.java | 4 +- .../attribute/S3PosixFileAttributes.java | 2 +- .../s3fs}/attribute/S3UserPrincipal.java | 2 +- .../util/AnonymousAWSCredentialsProvider.java | 2 +- .../datastore/s3fs}/util/AttributesUtils.java | 2 +- .../datastore/s3fs}/util/Cache.java | 4 +- .../datastore/s3fs}/util/IOUtils.java | 2 +- .../datastore/s3fs}/util/S3Utils.java | 26 +++---- .../datastore/storage/FileSystemsHolder.scala | 7 +- .../s3fsfork/AmazonS3ClientFactory.java | 24 ------- .../java.nio.file.spi.FileSystemProvider | 2 +- ....scalableminds.webknossos.datastore.routes | 1 - 31 files changed, 144 insertions(+), 195 deletions(-) rename webknossos-datastore/app/com/{upplication/s3fsfork => scalableminds/webknossos/datastore/s3fs}/AmazonS3Factory.java (67%) create mode 100644 webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/LICENSE rename webknossos-datastore/app/com/{upplication/s3fsfork => scalableminds/webknossos/datastore/s3fs}/S3AccessControlList.java (97%) rename webknossos-datastore/app/com/{upplication/s3fsfork => scalableminds/webknossos/datastore/s3fs}/S3FileChannel.java (98%) rename webknossos-datastore/app/com/{upplication/s3fsfork => scalableminds/webknossos/datastore/s3fs}/S3FileStore.java (95%) rename webknossos-datastore/app/com/{upplication/s3fsfork => scalableminds/webknossos/datastore/s3fs}/S3FileStoreAttributeView.java (95%) rename webknossos-datastore/app/com/{upplication/s3fsfork => scalableminds/webknossos/datastore/s3fs}/S3FileSystem.java (95%) rename webknossos-datastore/app/com/{upplication/s3fsfork => scalableminds/webknossos/datastore/s3fs}/S3FileSystemConfigurationException.java (82%) rename webknossos-datastore/app/com/{upplication/s3fsfork => scalableminds/webknossos/datastore/s3fs}/S3FileSystemProvider.java (91%) rename webknossos-datastore/app/com/{upplication/s3fsfork => scalableminds/webknossos/datastore/s3fs}/S3Iterator.java (97%) rename webknossos-datastore/app/com/{upplication/s3fsfork => scalableminds/webknossos/datastore/s3fs}/S3Path.java (96%) rename webknossos-datastore/app/com/{upplication/s3fsfork => scalableminds/webknossos/datastore/s3fs}/S3ReadOnlySeekableByteChannel.java (98%) rename webknossos-datastore/app/com/{upplication/s3fsfork => scalableminds/webknossos/datastore/s3fs}/S3SeekableByteChannel.java (98%) rename webknossos-datastore/app/com/{upplication/s3fsfork => scalableminds/webknossos/datastore/s3fs}/attribute/S3BasicFileAttributeView.java (86%) rename webknossos-datastore/app/com/{upplication/s3fsfork => scalableminds/webknossos/datastore/s3fs}/attribute/S3BasicFileAttributes.java (96%) rename webknossos-datastore/app/com/{upplication/s3fsfork => scalableminds/webknossos/datastore/s3fs}/attribute/S3PosixFileAttributeView.java (92%) rename webknossos-datastore/app/com/{upplication/s3fsfork => scalableminds/webknossos/datastore/s3fs}/attribute/S3PosixFileAttributes.java (94%) rename webknossos-datastore/app/com/{upplication/s3fsfork => scalableminds/webknossos/datastore/s3fs}/attribute/S3UserPrincipal.java (81%) rename webknossos-datastore/app/com/{upplication/s3fsfork => scalableminds/webknossos/datastore/s3fs}/util/AnonymousAWSCredentialsProvider.java (86%) rename webknossos-datastore/app/com/{upplication/s3fsfork => scalableminds/webknossos/datastore/s3fs}/util/AttributesUtils.java (98%) rename webknossos-datastore/app/com/{upplication/s3fsfork => scalableminds/webknossos/datastore/s3fs}/util/Cache.java (84%) rename webknossos-datastore/app/com/{upplication/s3fsfork => scalableminds/webknossos/datastore/s3fs}/util/IOUtils.java (92%) rename webknossos-datastore/app/com/{upplication/s3fsfork => scalableminds/webknossos/datastore/s3fs}/util/S3Utils.java (86%) delete mode 100644 webknossos-datastore/app/com/upplication/s3fsfork/AmazonS3ClientFactory.java diff --git a/build.sbt b/build.sbt index 0ff22ecd93..31b0df50e9 100644 --- a/build.sbt +++ b/build.sbt @@ -14,10 +14,15 @@ ThisBuild / scalacOptions ++= Seq( "-language:implicitConversions", "-language:postfixOps", "-Xlint:unused", + "-Xlint:deprecation", s"-Wconf:src=target/.*:s", s"-Wconf:src=webknossos-datastore/target/.*:s", s"-Wconf:src=webknossos-tracingstore/target/.*:s" ) +ThisBuild / javacOptions ++= Seq( + "-Xlint:unchecked", + "-Xlint:deprecation" +) ThisBuild / dependencyCheckAssemblyAnalyzerEnabled := Some(false) diff --git a/conf/META-INF/services/java.nio.file.spi.FileSystemProvider b/conf/META-INF/services/java.nio.file.spi.FileSystemProvider index 12cd6e4c29..6739b09b3c 100644 --- a/conf/META-INF/services/java.nio.file.spi.FileSystemProvider +++ b/conf/META-INF/services/java.nio.file.spi.FileSystemProvider @@ -1,3 +1,3 @@ -com.upplication.s3fsfork.S3FileSystemProvider +com.scalableminds.webknossos.datastore.s3fs.S3FileSystemProvider com.scalableminds.webknossos.datastore.storage.httpsfilesystem.HttpsFileSystemProvider com.scalableminds.webknossos.datastore.storage.httpsfilesystem.HttpFileSystemProvider diff --git a/project/Dependencies.scala b/project/Dependencies.scala index a1c99a4d3b..a042b882d2 100644 --- a/project/Dependencies.scala +++ b/project/Dependencies.scala @@ -39,19 +39,18 @@ object Dependencies { private val webknossosWrap = "com.scalableminds" %% "webknossos-wrap" % webknossosWrapVersion private val xmlWriter = "org.glassfish.jaxb" % "txw2" % "2.2.11" private val woodstoxXml = "org.codehaus.woodstox" % "wstx-asl" % "3.2.3" - private val jackson = "com.fasterxml.jackson.module" %% "jackson-module-scala" % "2.12.7" private val redis = "net.debasishg" %% "redisclient" % "3.9" private val spire = "org.typelevel" %% "spire" % "0.14.1" private val jgrapht = "org.jgrapht" % "jgrapht-core" % "1.4.0" private val swagger = "io.swagger" %% "swagger-play2" % "1.7.1" private val jhdf = "cisd" % "jhdf5" % "19.04.0" private val ucarCdm = "edu.ucar" % "cdm-core" % "5.3.3" - // private val s3fs = "org.lasersonlab" % "s3fs" % "2.2.3" private val jblosc = "org.lasersonlab" % "jblosc" % "1.0.1" private val scalajHttp = "org.scalaj" %% "scalaj-http" % "2.4.2" private val guava = "com.google.guava" % "guava" % "18.0" - private val awsS3 = "com.amazonaws" % "aws-java-sdk-s3" % "1.12.288" // "1.11.232" + private val awsS3 = "com.amazonaws" % "aws-java-sdk-s3" % "1.12.288" private val tika = "org.apache.tika" % "tika-core" % "1.5" + private val jackson = "com.fasterxml.jackson.module" %% "jackson-module-scala" % "2.12.7" private val sql = Seq( diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/Application.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/Application.scala index d5c43ae2d8..f506148509 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/Application.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/Application.scala @@ -1,15 +1,10 @@ package com.scalableminds.webknossos.datastore.controllers -import com.amazonaws.ClientConfiguration -import com.amazonaws.client.builder.AwsClientBuilder -import com.amazonaws.regions.Regions -import com.amazonaws.services.s3.AmazonS3ClientBuilder import com.scalableminds.util.tools.Fox import com.scalableminds.webknossos.datastore.storage.DataStoreRedisStore -import com.upplication.s3fsfork.util.AnonymousAWSCredentialsProvider +import javax.inject.Inject import play.api.mvc.{Action, AnyContent} -import javax.inject.Inject import scala.concurrent.ExecutionContext class Application @Inject()(redisClient: DataStoreRedisStore)(implicit ec: ExecutionContext) extends Controller { @@ -27,23 +22,4 @@ class Application @Inject()(redisClient: DataStoreRedisStore)(implicit ec: Execu } } - def testS3: Action[AnyContent] = Action { implicit request => - val credentialsProvider = new AnonymousAWSCredentialsProvider - val clientConfiguration = new ClientConfiguration - val client = AmazonS3ClientBuilder - .standard() - .withCredentials(credentialsProvider) - .withClientConfiguration(clientConfiguration) - .withMetricsCollector(null) - .withEndpointConfiguration(new AwsClientBuilder.EndpointConfiguration("s3.amazonaws.com", Regions.US_WEST_2.toString)) - .build() - - val o = client.getObject("power-analysis-ready-datastore", "power_901_annual_meteorology_utc.zarr/.zattrs") - val c = o.getObjectContent() - - logger.info(client.toString) - - Ok - } - } diff --git a/webknossos-datastore/app/com/upplication/s3fsfork/AmazonS3Factory.java b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/AmazonS3Factory.java similarity index 67% rename from webknossos-datastore/app/com/upplication/s3fsfork/AmazonS3Factory.java rename to webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/AmazonS3Factory.java index 3640308164..a76493b384 100644 --- a/webknossos-datastore/app/com/upplication/s3fsfork/AmazonS3Factory.java +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/AmazonS3Factory.java @@ -1,16 +1,17 @@ -package com.upplication.s3fsfork; +package com.scalableminds.webknossos.datastore.s3fs; import com.amazonaws.ClientConfiguration; import com.amazonaws.Protocol; import com.amazonaws.auth.AWSCredentialsProvider; import com.amazonaws.auth.AWSStaticCredentialsProvider; import com.amazonaws.auth.BasicAWSCredentials; -import com.amazonaws.auth.AnonymousAWSCredentials; -import com.amazonaws.auth.DefaultAWSCredentialsProviderChain; -import com.amazonaws.metrics.RequestMetricCollector; +import com.amazonaws.client.builder.AwsClientBuilder; +import com.amazonaws.regions.Regions; import com.amazonaws.services.s3.AmazonS3; +import com.amazonaws.services.s3.AmazonS3Client; +import com.amazonaws.services.s3.AmazonS3ClientBuilder; import com.amazonaws.services.s3.S3ClientOptions; -import com.upplication.s3fsfork.util.AnonymousAWSCredentialsProvider; +import com.scalableminds.webknossos.datastore.s3fs.util.AnonymousAWSCredentialsProvider; import java.net.URI; import java.util.Properties; @@ -19,7 +20,7 @@ /** * Factory base class to create a new AmazonS3 instance. */ -public abstract class AmazonS3Factory { +public class AmazonS3Factory { public static final String ACCESS_KEY = "s3fs_access_key"; public static final String SECRET_KEY = "s3fs_secret_key"; @@ -47,55 +48,27 @@ public abstract class AmazonS3Factory { * @param props Properties with the credentials and others options * @return AmazonS3 */ - public AmazonS3 getAmazonS3(URI uri, Properties props) { - AmazonS3 client = createAmazonS3(getCredentialsProvider(props), getClientConfiguration(props), getRequestMetricsCollector(props)); - if (uri.getHost() != null) { - if (uri.getPort() != -1) - client.setEndpoint(uri.getHost() + ':' + uri.getPort()); - else - client.setEndpoint(uri.getHost()); - } - - client.setS3ClientOptions(getClientOptions(props)); - - return client; + public AmazonS3 getAmazonS3Client(URI uri, Properties props) { + return AmazonS3ClientBuilder + .standard() + .withCredentials(getCredentialsProvider(props)) + .withClientConfiguration(getClientConfiguration(props)) + .withEndpointConfiguration(getEndpointConfiguration(uri)) + .build(); } - /** - * should return a new AmazonS3 - * - * @param credentialsProvider AWSCredentialsProvider mandatory - * @param clientConfiguration ClientConfiguration mandatory - * @param requestMetricsCollector RequestMetricCollector mandatory - * @return {@link com.amazonaws.services.s3.AmazonS3} - */ - protected abstract AmazonS3 createAmazonS3(AWSCredentialsProvider credentialsProvider, ClientConfiguration clientConfiguration, RequestMetricCollector requestMetricsCollector); + protected AwsClientBuilder.EndpointConfiguration getEndpointConfiguration(URI uri) { + String endpoint = uri.getHost(); + if (uri.getPort() != -1) + endpoint = uri.getHost() + ':' + uri.getPort(); + return new AwsClientBuilder.EndpointConfiguration(endpoint, Regions.DEFAULT_REGION.toString()); + } protected AWSCredentialsProvider getCredentialsProvider(Properties props) { - System.out.println("Hello from getCredentialsProvider"); - AWSCredentialsProvider credentialsProvider; if (props.getProperty(ACCESS_KEY) == null && props.getProperty(SECRET_KEY) == null) { - System.out.println("No access key and secret key present, returning anonymous credentials provider"); - // credentialsProvider = new DefaultAWSCredentialsProviderChain(); - credentialsProvider = new AnonymousAWSCredentialsProvider(); - } - else { - System.out.println("Access key and secret key present, returning static credentials provider"); - credentialsProvider = new AWSStaticCredentialsProvider(getAWSCredentials(props)); - } - return credentialsProvider; - } - - protected RequestMetricCollector getRequestMetricsCollector(Properties props) { - RequestMetricCollector requestMetricCollector = null; - if (props.containsKey(REQUEST_METRIC_COLLECTOR_CLASS)) { - try { - requestMetricCollector = (RequestMetricCollector) Class.forName(props.getProperty(REQUEST_METRIC_COLLECTOR_CLASS)).newInstance(); - } catch (Throwable t) { - throw new IllegalArgumentException("Can't instantiate REQUEST_METRIC_COLLECTOR_CLASS " + props.getProperty(REQUEST_METRIC_COLLECTOR_CLASS), t); - } + return new AnonymousAWSCredentialsProvider(); } - return requestMetricCollector; + return new AWSStaticCredentialsProvider(getAWSCredentials(props)); } protected S3ClientOptions getClientOptions(Properties props) { diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/LICENSE b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/LICENSE new file mode 100644 index 0000000000..66de3f67b7 --- /dev/null +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/LICENSE @@ -0,0 +1,31 @@ +The s3fs package is based on + +https://github.com/lasersonlab/Amazon-S3-FileSystem-NIO2 + +version 5117da7c5a75a455951d7a1788c1a4b7a0719692 +Aug 25, 2022 + +Published under: + +MIT License (MIT) + +Copyright (c) 2014 Javier Arnáiz @arnaix +Copyright (c) 2014 Better.be Application Services BV + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/webknossos-datastore/app/com/upplication/s3fsfork/S3AccessControlList.java b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3AccessControlList.java similarity index 97% rename from webknossos-datastore/app/com/upplication/s3fsfork/S3AccessControlList.java rename to webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3AccessControlList.java index ce80910601..4957aaa933 100644 --- a/webknossos-datastore/app/com/upplication/s3fsfork/S3AccessControlList.java +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3AccessControlList.java @@ -1,4 +1,4 @@ -package com.upplication.s3fsfork; +package com.scalableminds.webknossos.datastore.s3fs; import static java.lang.String.format; diff --git a/webknossos-datastore/app/com/upplication/s3fsfork/S3FileChannel.java b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3FileChannel.java similarity index 98% rename from webknossos-datastore/app/com/upplication/s3fsfork/S3FileChannel.java rename to webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3FileChannel.java index 41badbdc05..1cd2ea5e8e 100644 --- a/webknossos-datastore/app/com/upplication/s3fsfork/S3FileChannel.java +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3FileChannel.java @@ -1,4 +1,4 @@ -package com.upplication.s3fsfork; +package com.scalableminds.webknossos.datastore.s3fs; import com.amazonaws.services.s3.model.ObjectMetadata; import com.amazonaws.services.s3.model.S3Object; @@ -20,7 +20,7 @@ public class S3FileChannel extends FileChannel { - private com.upplication.s3fsfork.S3Path path; + private S3Path path; private Set options; private FileChannel filechannel; private Path tempFile; diff --git a/webknossos-datastore/app/com/upplication/s3fsfork/S3FileStore.java b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3FileStore.java similarity index 95% rename from webknossos-datastore/app/com/upplication/s3fsfork/S3FileStore.java rename to webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3FileStore.java index cc09dc74c6..31f3852db0 100644 --- a/webknossos-datastore/app/com/upplication/s3fsfork/S3FileStore.java +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3FileStore.java @@ -1,4 +1,4 @@ -package com.upplication.s3fsfork; +package com.scalableminds.webknossos.datastore.s3fs; import java.io.IOException; import java.nio.file.FileStore; @@ -11,10 +11,10 @@ public class S3FileStore extends FileStore implements Comparable { - private com.upplication.s3fsfork.S3FileSystem fileSystem; + private S3FileSystem fileSystem; private String name; - public S3FileStore(com.upplication.s3fsfork.S3FileSystem s3FileSystem, String name) { + public S3FileStore(S3FileSystem s3FileSystem, String name) { this.fileSystem = s3FileSystem; this.name = name; } diff --git a/webknossos-datastore/app/com/upplication/s3fsfork/S3FileStoreAttributeView.java b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3FileStoreAttributeView.java similarity index 95% rename from webknossos-datastore/app/com/upplication/s3fsfork/S3FileStoreAttributeView.java rename to webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3FileStoreAttributeView.java index 7da0be3886..7f202373e7 100644 --- a/webknossos-datastore/app/com/upplication/s3fsfork/S3FileStoreAttributeView.java +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3FileStoreAttributeView.java @@ -1,4 +1,4 @@ -package com.upplication.s3fsfork; +package com.scalableminds.webknossos.datastore.s3fs; import java.nio.file.attribute.FileStoreAttributeView; import java.util.Date; diff --git a/webknossos-datastore/app/com/upplication/s3fsfork/S3FileSystem.java b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3FileSystem.java similarity index 95% rename from webknossos-datastore/app/com/upplication/s3fsfork/S3FileSystem.java rename to webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3FileSystem.java index 319b998dc7..348d9043e4 100644 --- a/webknossos-datastore/app/com/upplication/s3fsfork/S3FileSystem.java +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3FileSystem.java @@ -1,4 +1,4 @@ -package com.upplication.s3fsfork; +package com.scalableminds.webknossos.datastore.s3fs; import java.io.IOException; import java.nio.file.FileStore; @@ -21,13 +21,13 @@ */ public class S3FileSystem extends FileSystem implements Comparable { - private final com.upplication.s3fsfork.S3FileSystemProvider provider; + private final S3FileSystemProvider provider; private final String key; private final AmazonS3 client; private final String endpoint; private int cache; - public S3FileSystem(com.upplication.s3fsfork.S3FileSystemProvider provider, String key, AmazonS3 client, String endpoint) { + public S3FileSystem(S3FileSystemProvider provider, String key, AmazonS3 client, String endpoint) { this.provider = provider; this.key = key; this.client = client; diff --git a/webknossos-datastore/app/com/upplication/s3fsfork/S3FileSystemConfigurationException.java b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3FileSystemConfigurationException.java similarity index 82% rename from webknossos-datastore/app/com/upplication/s3fsfork/S3FileSystemConfigurationException.java rename to webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3FileSystemConfigurationException.java index f9c209dbd3..9b7e464df7 100644 --- a/webknossos-datastore/app/com/upplication/s3fsfork/S3FileSystemConfigurationException.java +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3FileSystemConfigurationException.java @@ -1,4 +1,4 @@ -package com.upplication.s3fsfork; +package com.scalableminds.webknossos.datastore.s3fs; public class S3FileSystemConfigurationException extends RuntimeException { private static final long serialVersionUID = 1L; diff --git a/webknossos-datastore/app/com/upplication/s3fsfork/S3FileSystemProvider.java b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3FileSystemProvider.java similarity index 91% rename from webknossos-datastore/app/com/upplication/s3fsfork/S3FileSystemProvider.java rename to webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3FileSystemProvider.java index 9e862c85a1..14e1b17dc6 100644 --- a/webknossos-datastore/app/com/upplication/s3fsfork/S3FileSystemProvider.java +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3FileSystemProvider.java @@ -1,4 +1,4 @@ -package com.upplication.s3fsfork; +package com.scalableminds.webknossos.datastore.s3fs; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.internal.Constants; @@ -10,18 +10,18 @@ import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableSet; import com.google.common.collect.Sets; -import com.upplication.s3fsfork.attribute.S3BasicFileAttributeView; -import com.upplication.s3fsfork.attribute.S3BasicFileAttributes; -import com.upplication.s3fsfork.attribute.S3PosixFileAttributeView; -import com.upplication.s3fsfork.attribute.S3PosixFileAttributes; -import com.upplication.s3fsfork.util.AttributesUtils; -import com.upplication.s3fsfork.util.Cache; -import com.upplication.s3fsfork.util.S3Utils; +import com.scalableminds.webknossos.datastore.s3fs.attribute.S3BasicFileAttributes; +import com.scalableminds.webknossos.datastore.s3fs.attribute.S3PosixFileAttributes; +import com.scalableminds.webknossos.datastore.s3fs.util.AttributesUtils; +import com.scalableminds.webknossos.datastore.s3fs.util.Cache; +import com.scalableminds.webknossos.datastore.s3fs.util.S3Utils; +import org.apache.commons.lang3.NotImplementedException; import java.io.ByteArrayInputStream; import java.io.IOException; import java.io.InputStream; import java.net.URI; +import java.net.URISyntaxException; import java.nio.channels.FileChannel; import java.nio.channels.SeekableByteChannel; import java.nio.file.*; @@ -72,8 +72,8 @@ public class S3FileSystemProvider extends FileSystemProvider { AmazonS3Factory.PROXY_HOST, AmazonS3Factory.PROXY_PASSWORD, AmazonS3Factory.PROXY_PORT, AmazonS3Factory.PROXY_USERNAME, AmazonS3Factory.PROXY_WORKSTATION, AmazonS3Factory.SOCKET_SEND_BUFFER_SIZE_HINT, AmazonS3Factory.SOCKET_RECEIVE_BUFFER_SIZE_HINT, AmazonS3Factory.SOCKET_TIMEOUT, AmazonS3Factory.USER_AGENT, AMAZON_S3_FACTORY_CLASS, AmazonS3Factory.SIGNER_OVERRIDE, AmazonS3Factory.PATH_STYLE_ACCESS); - private com.upplication.s3fsfork.util.S3Utils s3Utils = new S3Utils(); - private com.upplication.s3fsfork.util.Cache cache = new com.upplication.s3fsfork.util.Cache(); + private S3Utils s3Utils = new S3Utils(); + private Cache cache = new Cache(); @Override public String getScheme() { @@ -472,16 +472,7 @@ public void checkAccess(Path path, AccessMode... modes) throws IOException { @Override public V getFileAttributeView(Path path, Class type, LinkOption... options) { - S3Path s3Path = toS3Path(path); - if (type == BasicFileAttributeView.class) { - return (V) new S3BasicFileAttributeView(s3Path); - } else if (type == PosixFileAttributeView.class) { - return (V) new S3PosixFileAttributeView(s3Path); - } else if (type == null) { - throw new NullPointerException("Type is mandatory"); - } else { - return null; - } + throw new NotImplementedException(); } @Override @@ -525,10 +516,10 @@ public Map readAttributes(Path path, String attributes, LinkOpti if (attributes.equals("*") || attributes.equals("basic:*")) { BasicFileAttributes attr = readAttributes(path, BasicFileAttributes.class, options); - return com.upplication.s3fsfork.util.AttributesUtils.fileAttributeToMap(attr); + return AttributesUtils.fileAttributeToMap(attr); } else if (attributes.equals("posix:*")) { PosixFileAttributes attr = readAttributes(path, PosixFileAttributes.class, options); - return com.upplication.s3fsfork.util.AttributesUtils.fileAttributeToMap(attr); + return AttributesUtils.fileAttributeToMap(attr); } else { String[] filters = new String[]{attributes}; if (attributes.contains(",")) { @@ -557,23 +548,24 @@ public void setAttribute(Path path, String attribute, Object value, LinkOption.. * @return S3FileSystem never null */ public S3FileSystem createFileSystem(URI uri, Properties props) { - return new S3FileSystem(this, getFileSystemKey(uri, props), getAmazonS3(uri, props), uri.getHost()); + URI uriWithNormalizedHost = resolveShortcutHost(uri); + return new S3FileSystem(this, getFileSystemKey(uriWithNormalizedHost, props), getAmazonS3(uriWithNormalizedHost, props), uriWithNormalizedHost.getHost()); } - protected AmazonS3 getAmazonS3(URI uri, Properties props) { - return getAmazonS3Factory(props).getAmazonS3(uri, props); + private URI resolveShortcutHost(URI uri) { + String host = uri.getHost(); + if (!uri.getHost().contains(".")) { + host += ".s3.amazonaws.com"; + } + try { + return new URI(uri.getScheme(), uri.getUserInfo(), host, uri.getPort(), uri.getPath(), uri.getQuery(), uri.getFragment()); + } catch (URISyntaxException e) { + return uri; + } } - protected AmazonS3Factory getAmazonS3Factory(Properties props) { - if (props.containsKey(AMAZON_S3_FACTORY_CLASS)) { - String amazonS3FactoryClass = props.getProperty(AMAZON_S3_FACTORY_CLASS); - try { - return (AmazonS3Factory) Class.forName(amazonS3FactoryClass).newInstance(); - } catch (InstantiationException | IllegalAccessException | ClassNotFoundException | ClassCastException e) { - throw new S3FileSystemConfigurationException("Configuration problem, couldn't instantiate AmazonS3Factory (" + amazonS3FactoryClass + "): ", e); - } - } - return new AmazonS3ClientFactory(); + protected AmazonS3 getAmazonS3(URI uri, Properties props) { + return new AmazonS3Factory().getAmazonS3Client(uri, props); } /** @@ -635,7 +627,7 @@ protected static ConcurrentMap getFilesystems() { return fileSystems; } - public com.upplication.s3fsfork.util.Cache getCache() { + public Cache getCache() { return cache; } diff --git a/webknossos-datastore/app/com/upplication/s3fsfork/S3Iterator.java b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3Iterator.java similarity index 97% rename from webknossos-datastore/app/com/upplication/s3fsfork/S3Iterator.java rename to webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3Iterator.java index 6b00ef3068..5538075597 100644 --- a/webknossos-datastore/app/com/upplication/s3fsfork/S3Iterator.java +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3Iterator.java @@ -1,4 +1,4 @@ -package com.upplication.s3fsfork; +package com.scalableminds.webknossos.datastore.s3fs; import java.nio.file.Path; import java.util.ArrayList; @@ -14,7 +14,7 @@ import com.amazonaws.services.s3.model.S3ObjectSummary; import com.google.common.collect.Lists; import com.google.common.collect.Sets; -import com.upplication.s3fsfork.util.S3Utils; +import com.scalableminds.webknossos.datastore.s3fs.util.S3Utils; /** * S3 iterator over folders at first level. @@ -32,7 +32,7 @@ public class S3Iterator implements Iterator { private int size; private boolean incremental; - private com.upplication.s3fsfork.util.S3Utils s3Utils = new S3Utils(); + private S3Utils s3Utils = new S3Utils(); public S3Iterator(S3Path path) { this(path, false); diff --git a/webknossos-datastore/app/com/upplication/s3fsfork/S3Path.java b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3Path.java similarity index 96% rename from webknossos-datastore/app/com/upplication/s3fsfork/S3Path.java rename to webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3Path.java index 64ad373b75..37d1261e67 100644 --- a/webknossos-datastore/app/com/upplication/s3fsfork/S3Path.java +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3Path.java @@ -1,9 +1,9 @@ -package com.upplication.s3fsfork; +package com.scalableminds.webknossos.datastore.s3fs; import com.google.common.base.*; import com.google.common.collect.ImmutableList; import com.google.common.collect.Lists; -import com.upplication.s3fsfork.attribute.S3BasicFileAttributes; +import com.scalableminds.webknossos.datastore.s3fs.attribute.S3BasicFileAttributes; import java.io.File; import java.io.IOException; @@ -24,7 +24,7 @@ public class S3Path implements Path { /** * S3FileStore which represents the Bucket this path resides in. */ - private final com.upplication.s3fsfork.S3FileStore fileStore; + private final S3FileStore fileStore; /** * URI not encoded @@ -35,12 +35,12 @@ public class S3Path implements Path { /** * actual filesystem */ - private com.upplication.s3fsfork.S3FileSystem fileSystem; + private S3FileSystem fileSystem; /** * S3BasicFileAttributes cache */ - private com.upplication.s3fsfork.attribute.S3BasicFileAttributes fileAttributes; + private S3BasicFileAttributes fileAttributes; /** * Build an S3Path from path segments. '/' are stripped from each segment. @@ -49,7 +49,7 @@ public class S3Path implements Path { * @param first should be start with a '/' and is the bucket name * @param more directories and files */ - public S3Path(com.upplication.s3fsfork.S3FileSystem fileSystem, String first, String... more) { + public S3Path(S3FileSystem fileSystem, String first, String... more) { Preconditions.checkArgument(first != null, "first path must be not null"); Preconditions.checkArgument(!first.startsWith("//"), "first path doesnt start with '//'. Miss bucket"); @@ -69,7 +69,7 @@ public S3Path(com.upplication.s3fsfork.S3FileSystem fileSystem, String first, St Preconditions.checkArgument(pathsURI.size() >= 1, "path must start with bucket name"); Preconditions.checkArgument(!pathsURI.get(0).isEmpty(), "bucket name must be not empty"); String bucket = pathsURI.get(0); - this.fileStore = new com.upplication.s3fsfork.S3FileStore(fileSystem, bucket); + this.fileStore = new S3FileStore(fileSystem, bucket); // the filestore is not part of the uri pathsURI.remove(0); } @@ -604,7 +604,7 @@ private String decode(URI uri) { } } - public com.upplication.s3fsfork.attribute.S3BasicFileAttributes getFileAttributes() { + public S3BasicFileAttributes getFileAttributes() { return fileAttributes; } diff --git a/webknossos-datastore/app/com/upplication/s3fsfork/S3ReadOnlySeekableByteChannel.java b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3ReadOnlySeekableByteChannel.java similarity index 98% rename from webknossos-datastore/app/com/upplication/s3fsfork/S3ReadOnlySeekableByteChannel.java rename to webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3ReadOnlySeekableByteChannel.java index 370c1ac424..959b553a17 100644 --- a/webknossos-datastore/app/com/upplication/s3fsfork/S3ReadOnlySeekableByteChannel.java +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3ReadOnlySeekableByteChannel.java @@ -1,4 +1,4 @@ -package com.upplication.s3fsfork; +package com.scalableminds.webknossos.datastore.s3fs; import com.amazonaws.services.s3.model.GetObjectRequest; import com.amazonaws.services.s3.model.S3Object; @@ -10,7 +10,6 @@ import java.nio.channels.NonWritableChannelException; import java.nio.channels.ReadableByteChannel; import java.nio.channels.SeekableByteChannel; -import java.nio.file.NoSuchFileException; import java.nio.file.OpenOption; import java.nio.file.ReadOnlyFileSystemException; import java.nio.file.StandardOpenOption; diff --git a/webknossos-datastore/app/com/upplication/s3fsfork/S3SeekableByteChannel.java b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3SeekableByteChannel.java similarity index 98% rename from webknossos-datastore/app/com/upplication/s3fsfork/S3SeekableByteChannel.java rename to webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3SeekableByteChannel.java index fdc28bba7f..fca98f59f5 100644 --- a/webknossos-datastore/app/com/upplication/s3fsfork/S3SeekableByteChannel.java +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3SeekableByteChannel.java @@ -1,4 +1,4 @@ -package com.upplication.s3fsfork; +package com.scalableminds.webknossos.datastore.s3fs; import static java.lang.String.format; @@ -19,7 +19,7 @@ public class S3SeekableByteChannel implements SeekableByteChannel { - private com.upplication.s3fsfork.S3Path path; + private S3Path path; private Set options; private SeekableByteChannel seekable; private Path tempFile; diff --git a/webknossos-datastore/app/com/upplication/s3fsfork/attribute/S3BasicFileAttributeView.java b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/attribute/S3BasicFileAttributeView.java similarity index 86% rename from webknossos-datastore/app/com/upplication/s3fsfork/attribute/S3BasicFileAttributeView.java rename to webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/attribute/S3BasicFileAttributeView.java index a2cd23b284..80d0b62240 100644 --- a/webknossos-datastore/app/com/upplication/s3fsfork/attribute/S3BasicFileAttributeView.java +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/attribute/S3BasicFileAttributeView.java @@ -1,6 +1,6 @@ -package com.upplication.s3fsfork.attribute; +package com.scalableminds.webknossos.datastore.s3fs.attribute; -import com.upplication.s3fsfork.S3Path; +import com.scalableminds.webknossos.datastore.s3fs.S3Path; import java.io.IOException; import java.nio.file.attribute.BasicFileAttributeView; diff --git a/webknossos-datastore/app/com/upplication/s3fsfork/attribute/S3BasicFileAttributes.java b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/attribute/S3BasicFileAttributes.java similarity index 96% rename from webknossos-datastore/app/com/upplication/s3fsfork/attribute/S3BasicFileAttributes.java rename to webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/attribute/S3BasicFileAttributes.java index 315d24fb9f..99318debfb 100644 --- a/webknossos-datastore/app/com/upplication/s3fsfork/attribute/S3BasicFileAttributes.java +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/attribute/S3BasicFileAttributes.java @@ -1,4 +1,4 @@ -package com.upplication.s3fsfork.attribute; +package com.scalableminds.webknossos.datastore.s3fs.attribute; import static java.lang.String.format; diff --git a/webknossos-datastore/app/com/upplication/s3fsfork/attribute/S3PosixFileAttributeView.java b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/attribute/S3PosixFileAttributeView.java similarity index 92% rename from webknossos-datastore/app/com/upplication/s3fsfork/attribute/S3PosixFileAttributeView.java rename to webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/attribute/S3PosixFileAttributeView.java index 8a8ca710c3..ae25ad465e 100644 --- a/webknossos-datastore/app/com/upplication/s3fsfork/attribute/S3PosixFileAttributeView.java +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/attribute/S3PosixFileAttributeView.java @@ -1,6 +1,6 @@ -package com.upplication.s3fsfork.attribute; +package com.scalableminds.webknossos.datastore.s3fs.attribute; -import com.upplication.s3fsfork.S3Path; +import com.scalableminds.webknossos.datastore.s3fs.S3Path; import java.io.IOException; import java.nio.file.attribute.*; diff --git a/webknossos-datastore/app/com/upplication/s3fsfork/attribute/S3PosixFileAttributes.java b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/attribute/S3PosixFileAttributes.java similarity index 94% rename from webknossos-datastore/app/com/upplication/s3fsfork/attribute/S3PosixFileAttributes.java rename to webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/attribute/S3PosixFileAttributes.java index 9416063927..d00d332c0d 100644 --- a/webknossos-datastore/app/com/upplication/s3fsfork/attribute/S3PosixFileAttributes.java +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/attribute/S3PosixFileAttributes.java @@ -1,4 +1,4 @@ -package com.upplication.s3fsfork.attribute; +package com.scalableminds.webknossos.datastore.s3fs.attribute; import java.nio.file.attribute.*; import java.util.Set; diff --git a/webknossos-datastore/app/com/upplication/s3fsfork/attribute/S3UserPrincipal.java b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/attribute/S3UserPrincipal.java similarity index 81% rename from webknossos-datastore/app/com/upplication/s3fsfork/attribute/S3UserPrincipal.java rename to webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/attribute/S3UserPrincipal.java index 78607d345b..5a0bacc178 100644 --- a/webknossos-datastore/app/com/upplication/s3fsfork/attribute/S3UserPrincipal.java +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/attribute/S3UserPrincipal.java @@ -1,4 +1,4 @@ -package com.upplication.s3fsfork.attribute; +package com.scalableminds.webknossos.datastore.s3fs.attribute; import java.nio.file.attribute.UserPrincipal; diff --git a/webknossos-datastore/app/com/upplication/s3fsfork/util/AnonymousAWSCredentialsProvider.java b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/util/AnonymousAWSCredentialsProvider.java similarity index 86% rename from webknossos-datastore/app/com/upplication/s3fsfork/util/AnonymousAWSCredentialsProvider.java rename to webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/util/AnonymousAWSCredentialsProvider.java index f0314cfb56..513431a8ff 100644 --- a/webknossos-datastore/app/com/upplication/s3fsfork/util/AnonymousAWSCredentialsProvider.java +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/util/AnonymousAWSCredentialsProvider.java @@ -1,4 +1,4 @@ -package com.upplication.s3fsfork.util; +package com.scalableminds.webknossos.datastore.s3fs.util; import com.amazonaws.auth.AWSCredentials; import com.amazonaws.auth.AWSCredentialsProvider; diff --git a/webknossos-datastore/app/com/upplication/s3fsfork/util/AttributesUtils.java b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/util/AttributesUtils.java similarity index 98% rename from webknossos-datastore/app/com/upplication/s3fsfork/util/AttributesUtils.java rename to webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/util/AttributesUtils.java index 50be7788a6..12a4983023 100644 --- a/webknossos-datastore/app/com/upplication/s3fsfork/util/AttributesUtils.java +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/util/AttributesUtils.java @@ -1,4 +1,4 @@ -package com.upplication.s3fsfork.util; +package com.scalableminds.webknossos.datastore.s3fs.util; import java.nio.file.attribute.BasicFileAttributes; diff --git a/webknossos-datastore/app/com/upplication/s3fsfork/util/Cache.java b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/util/Cache.java similarity index 84% rename from webknossos-datastore/app/com/upplication/s3fsfork/util/Cache.java rename to webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/util/Cache.java index 8f4b7adf6e..7ca13ab00a 100644 --- a/webknossos-datastore/app/com/upplication/s3fsfork/util/Cache.java +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/util/Cache.java @@ -1,6 +1,6 @@ -package com.upplication.s3fsfork.util; +package com.scalableminds.webknossos.datastore.s3fs.util; -import com.upplication.s3fsfork.attribute.S3BasicFileAttributes; +import com.scalableminds.webknossos.datastore.s3fs.attribute.S3BasicFileAttributes; public class Cache { diff --git a/webknossos-datastore/app/com/upplication/s3fsfork/util/IOUtils.java b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/util/IOUtils.java similarity index 92% rename from webknossos-datastore/app/com/upplication/s3fsfork/util/IOUtils.java rename to webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/util/IOUtils.java index 893547d384..65d15be9e5 100644 --- a/webknossos-datastore/app/com/upplication/s3fsfork/util/IOUtils.java +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/util/IOUtils.java @@ -1,4 +1,4 @@ -package com.upplication.s3fsfork.util; +package com.scalableminds.webknossos.datastore.s3fs.util; import java.io.ByteArrayOutputStream; import java.io.IOException; diff --git a/webknossos-datastore/app/com/upplication/s3fsfork/util/S3Utils.java b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/util/S3Utils.java similarity index 86% rename from webknossos-datastore/app/com/upplication/s3fsfork/util/S3Utils.java rename to webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/util/S3Utils.java index 5bbea222b6..0716563ad8 100644 --- a/webknossos-datastore/app/com/upplication/s3fsfork/util/S3Utils.java +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/util/S3Utils.java @@ -1,12 +1,12 @@ -package com.upplication.s3fsfork.util; +package com.scalableminds.webknossos.datastore.s3fs.util; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.model.*; import com.google.common.collect.Sets; -import com.upplication.s3fsfork.S3Path; -import com.upplication.s3fsfork.attribute.S3BasicFileAttributes; -import com.upplication.s3fsfork.attribute.S3PosixFileAttributes; -import com.upplication.s3fsfork.attribute.S3UserPrincipal; +import com.scalableminds.webknossos.datastore.s3fs.attribute.S3BasicFileAttributes; +import com.scalableminds.webknossos.datastore.s3fs.attribute.S3PosixFileAttributes; +import com.scalableminds.webknossos.datastore.s3fs.attribute.S3UserPrincipal; +import com.scalableminds.webknossos.datastore.s3fs.S3Path; import java.nio.file.NoSuchFileException; import java.nio.file.attribute.FileTime; @@ -24,11 +24,11 @@ public class S3Utils { /** * Get the {@link S3ObjectSummary} that represent this Path or her first child if this path not exists * - * @param s3Path {@link com.upplication.s3fsfork.S3Path} + * @param s3Path {@link S3Path} * @return {@link S3ObjectSummary} * @throws NoSuchFileException if not found the path and any child */ - public S3ObjectSummary getS3ObjectSummary(com.upplication.s3fsfork.S3Path s3Path) throws NoSuchFileException { + public S3ObjectSummary getS3ObjectSummary(S3Path s3Path) throws NoSuchFileException { String key = s3Path.getKey(); String bucketName = s3Path.getFileStore().name(); AmazonS3 client = s3Path.getFileSystem().getClient(); @@ -73,7 +73,7 @@ public S3ObjectSummary getS3ObjectSummary(com.upplication.s3fsfork.S3Path s3Path } catch (Exception e) { // } - throw new NoSuchFileException(bucketName + com.upplication.s3fsfork.S3Path.PATH_SEPARATOR + key); + throw new NoSuchFileException(bucketName + S3Path.PATH_SEPARATOR + key); } /** @@ -82,7 +82,7 @@ public S3ObjectSummary getS3ObjectSummary(com.upplication.s3fsfork.S3Path s3Path * @param s3Path S3Path mandatory not null * @return S3FileAttributes never null */ - public com.upplication.s3fsfork.attribute.S3BasicFileAttributes getS3FileAttributes(com.upplication.s3fsfork.S3Path s3Path) throws NoSuchFileException { + public S3BasicFileAttributes getS3FileAttributes(S3Path s3Path) throws NoSuchFileException { S3ObjectSummary objectSummary = getS3ObjectSummary(s3Path); return toS3FileAttributes(objectSummary, s3Path.getKey()); } @@ -93,14 +93,14 @@ public com.upplication.s3fsfork.attribute.S3BasicFileAttributes getS3FileAttribu * @return S3PosixFileAttributes never null * @throws NoSuchFileException if the Path doesnt exists */ - public com.upplication.s3fsfork.attribute.S3PosixFileAttributes getS3PosixFileAttributes(S3Path s3Path) throws NoSuchFileException { + public S3PosixFileAttributes getS3PosixFileAttributes(S3Path s3Path) throws NoSuchFileException { S3ObjectSummary objectSummary = getS3ObjectSummary(s3Path); String key = s3Path.getKey(); String bucketName = s3Path.getFileStore().name(); - com.upplication.s3fsfork.attribute.S3BasicFileAttributes attrs = toS3FileAttributes(objectSummary, key); - com.upplication.s3fsfork.attribute.S3UserPrincipal userPrincipal = null; + S3BasicFileAttributes attrs = toS3FileAttributes(objectSummary, key); + S3UserPrincipal userPrincipal = null; Set permissions = null; if (!attrs.isDirectory()) { @@ -167,7 +167,7 @@ public Set toPosixFilePermission(Permission permission){ * @param key String the real key that can be exactly equal than the objectSummary or * @return S3FileAttributes */ - public com.upplication.s3fsfork.attribute.S3BasicFileAttributes toS3FileAttributes(S3ObjectSummary objectSummary, String key) { + public S3BasicFileAttributes toS3FileAttributes(S3ObjectSummary objectSummary, String key) { // parse the data to BasicFileAttributes. FileTime lastModifiedTime = null; if (objectSummary.getLastModified() != null) { diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/storage/FileSystemsHolder.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/storage/FileSystemsHolder.scala index d029ee01a0..ff41d7cf9b 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/storage/FileSystemsHolder.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/storage/FileSystemsHolder.scala @@ -8,9 +8,8 @@ import java.util.ServiceLoader import com.google.common.collect.ImmutableMap import com.scalableminds.util.cache.LRUConcurrentCache import com.scalableminds.webknossos.datastore.dataformats.zarr.RemoteSourceDescriptor +import com.scalableminds.webknossos.datastore.s3fs.AmazonS3Factory import com.typesafe.scalalogging.LazyLogging -import com.upplication.s3fsfork -import com.upplication.s3fsfork.AmazonS3Factory import scala.collection.JavaConverters._ @@ -79,9 +78,9 @@ object FileSystemsHolder extends LazyLogging { ImmutableMap .builder[String, Any] .put(AmazonS3Factory.ACCESS_KEY, user) - .put(s3fsfork.AmazonS3Factory.SECRET_KEY, password) + .put(AmazonS3Factory.SECRET_KEY, password) .build - } else if (scheme == schemeHttps) { + } else if (scheme == schemeHttps || scheme == schemeHttp) { ImmutableMap.builder[String, Any].put("user", user).put("password", password).build } else emptyEnv }).getOrElse(emptyEnv) diff --git a/webknossos-datastore/app/com/upplication/s3fsfork/AmazonS3ClientFactory.java b/webknossos-datastore/app/com/upplication/s3fsfork/AmazonS3ClientFactory.java deleted file mode 100644 index f1c9a43bda..0000000000 --- a/webknossos-datastore/app/com/upplication/s3fsfork/AmazonS3ClientFactory.java +++ /dev/null @@ -1,24 +0,0 @@ -package com.upplication.s3fsfork; - -import com.amazonaws.ClientConfiguration; -import com.amazonaws.auth.AWSCredentialsProvider; -import com.amazonaws.metrics.RequestMetricCollector; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.AmazonS3Client; - -public class AmazonS3ClientFactory extends AmazonS3Factory { - - @Override - protected AmazonS3 createAmazonS3(AWSCredentialsProvider credentialsProvider, ClientConfiguration clientConfiguration, RequestMetricCollector requestMetricsCollector) { - return new AmazonS3Client(credentialsProvider, clientConfiguration, requestMetricsCollector); - /* - return AmazonS3ClientBuilder - .standard() - .withCredentials(credentialsProvider) - .withClientConfiguration(clientConfiguration) - .withRegion(Regions.EU_WEST_1) - .withMetricsCollector(requestMetricsCollector) - .build(); - */ - } -} diff --git a/webknossos-datastore/conf/META-INF/services/java.nio.file.spi.FileSystemProvider b/webknossos-datastore/conf/META-INF/services/java.nio.file.spi.FileSystemProvider index 12cd6e4c29..6739b09b3c 100644 --- a/webknossos-datastore/conf/META-INF/services/java.nio.file.spi.FileSystemProvider +++ b/webknossos-datastore/conf/META-INF/services/java.nio.file.spi.FileSystemProvider @@ -1,3 +1,3 @@ -com.upplication.s3fsfork.S3FileSystemProvider +com.scalableminds.webknossos.datastore.s3fs.S3FileSystemProvider com.scalableminds.webknossos.datastore.storage.httpsfilesystem.HttpsFileSystemProvider com.scalableminds.webknossos.datastore.storage.httpsfilesystem.HttpFileSystemProvider diff --git a/webknossos-datastore/conf/com.scalableminds.webknossos.datastore.routes b/webknossos-datastore/conf/com.scalableminds.webknossos.datastore.routes index d992688134..0231cf11a9 100644 --- a/webknossos-datastore/conf/com.scalableminds.webknossos.datastore.routes +++ b/webknossos-datastore/conf/com.scalableminds.webknossos.datastore.routes @@ -3,7 +3,6 @@ # Health endpoint GET /health @com.scalableminds.webknossos.datastore.controllers.Application.health -GET /testS3 @com.scalableminds.webknossos.datastore.controllers.Application.testS3 # Read image data POST /datasets/:organizationName/:dataSetName/layers/:dataLayerName/data @com.scalableminds.webknossos.datastore.controllers.BinaryDataController.requestViaWebKnossos(token: Option[String], organizationName: String, dataSetName: String, dataLayerName: String) From 78fd957aa58da1f9ef5cd5dcc306fcd42249614b Mon Sep 17 00:00:00 2001 From: Florian M Date: Thu, 25 Aug 2022 09:30:41 +0200 Subject: [PATCH 4/7] un-hide radio button in frontend --- frontend/javascripts/admin/dataset/dataset_add_zarr_view.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frontend/javascripts/admin/dataset/dataset_add_zarr_view.tsx b/frontend/javascripts/admin/dataset/dataset_add_zarr_view.tsx index cf919ee6cc..ae4d8a57fd 100644 --- a/frontend/javascripts/admin/dataset/dataset_add_zarr_view.tsx +++ b/frontend/javascripts/admin/dataset/dataset_add_zarr_view.tsx @@ -206,7 +206,7 @@ function DatasetAddZarrView(props: Props) { value={showCredentialsFields ? "show" : "hide"} onChange={(e) => setShowCredentialsFields(e.target.value === "show")} > - + {selectedProtocol === "https" ? "None" : "Anonymous"} From 6bf73f1287edcd6bdee60a925aa2f4081508ab81 Mon Sep 17 00:00:00 2001 From: Florian M Date: Thu, 25 Aug 2022 09:37:53 +0200 Subject: [PATCH 5/7] changelog --- CHANGELOG.unreleased.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.unreleased.md b/CHANGELOG.unreleased.md index 93254ac583..2e680ae0e2 100644 --- a/CHANGELOG.unreleased.md +++ b/CHANGELOG.unreleased.md @@ -23,6 +23,7 @@ For upgrade instructions, please check the [migration guide](MIGRATIONS.released - Added new filter options for the dataset list api at `api/datasets`: `organizationName: String`, `onlyMyOrganization: Boolean`, `uploaderId: String` [#6377](https://github.com/scalableminds/webknossos/pull/6377) - The Add Dataset view now hosts an Add Zarr Dataset tab to explore, configure and import remote Zarr datasets. [#6335](https://github.com/scalableminds/webknossos/pull/6335) - Added a UI to manage Zarr links for an annotation so that the annotation can be streamed to 3rd party tools. This change also includes new backend API routes for using the (private) zarr links to annotations as well as creating them. [#6367](https://github.com/scalableminds/webknossos/pull/6367) +- Zarr-based remote dataset import now also works for public AWS S3 endpoints with no credentials. [#6421](https://github.com/scalableminds/webknossos/pull/6421) ### Changed - webKnossos uses WebGL 2 instead of WebGL 1 now. In case your browser/hardware does not support this, webKnossos will alert you and you need to upgrade your system. [#6350](https://github.com/scalableminds/webknossos/pull/6350) From 4925ff5f1aff73b0301bd4a2acfca39025382f30 Mon Sep 17 00:00:00 2001 From: Jonathan Striebel Date: Thu, 25 Aug 2022 14:04:42 +0200 Subject: [PATCH 6/7] pretty frontend --- frontend/javascripts/admin/dataset/dataset_add_zarr_view.tsx | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/frontend/javascripts/admin/dataset/dataset_add_zarr_view.tsx b/frontend/javascripts/admin/dataset/dataset_add_zarr_view.tsx index ae4d8a57fd..db27e0c2d9 100644 --- a/frontend/javascripts/admin/dataset/dataset_add_zarr_view.tsx +++ b/frontend/javascripts/admin/dataset/dataset_add_zarr_view.tsx @@ -206,9 +206,7 @@ function DatasetAddZarrView(props: Props) { value={showCredentialsFields ? "show" : "hide"} onChange={(e) => setShowCredentialsFields(e.target.value === "show")} > - - {selectedProtocol === "https" ? "None" : "Anonymous"} - + {selectedProtocol === "https" ? "None" : "Anonymous"} {selectedProtocol === "https" ? "Basic authentication" : "With credentials"} From fd30bc5a246c4d4bfcd528448a76f5a7080d2973 Mon Sep 17 00:00:00 2001 From: Florian M Date: Mon, 29 Aug 2022 10:14:22 +0200 Subject: [PATCH 7/7] remove debug output --- .../datastore/s3fs/S3FileSystemProvider.java | 1 - .../s3fs/S3ReadOnlySeekableByteChannel.java | 15 ++------------- .../datastore/s3fs/S3SeekableByteChannel.java | 2 -- 3 files changed, 2 insertions(+), 16 deletions(-) diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3FileSystemProvider.java b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3FileSystemProvider.java index 14e1b17dc6..40591c9aeb 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3FileSystemProvider.java +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3FileSystemProvider.java @@ -600,7 +600,6 @@ private void verifySupportedOptions(Set allowedOptions, Set opti throw new ReadOnlyFileSystemException(); } - - System.out.println("Determining length..."); - this.length = 10 ; -/* path + this.length = path .getFileSystem() .getClient() .getObjectMetadata( @@ -68,9 +65,7 @@ public S3ReadOnlySeekableByteChannel(S3Path path, Set opti .name(), key ) - .getContentLength();*/ - - System.out.println("Opening Stream..."); + .getContentLength(); openStreamAt(0); } @@ -86,24 +81,18 @@ private void openStreamAt(long position) throws IOException { ) .withRange(position); - - System.out.println("Get object..."); S3Object s3Object = path .getFileSystem() .getClient() .getObject(rangeObjectRequest); - - System.out.println("Get object content..."); bufferedStream = new ExtBufferedInputStream( s3Object.getObjectContent(), DEFAULT_BUFFER_SIZE ); - - System.out.println("Wrap in stream..."); rbc = Channels.newChannel(bufferedStream); this.position = position; } diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3SeekableByteChannel.java b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3SeekableByteChannel.java index fca98f59f5..4d4ff0afbb 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3SeekableByteChannel.java +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3SeekableByteChannel.java @@ -32,7 +32,6 @@ public class S3SeekableByteChannel implements SeekableByteChannel { * @throws IOException if an I/O error occurs */ public S3SeekableByteChannel(S3Path path, Set options) throws IOException { - System.out.println("Hello from S3SeekableByteChannel"); this.path = path; this.options = Collections.unmodifiableSet(new HashSet<>(options)); String key = path.getKey(); @@ -48,7 +47,6 @@ else if (!exists && !this.options.contains(StandardOpenOption.CREATE_NEW) && boolean removeTempFile = true; try { if (exists) { - System.out.println("Exists! Let's call getObject!"); try (S3Object object = path.getFileSystem() .getClient() .getObject(path.getFileStore().getBucket().getName(), key)) {