From c35b49c7693a48cad334b815a077fbdcb916cddb Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Fri, 27 Apr 2018 16:49:06 +0200 Subject: [PATCH] Move repository-s3 fixture tests to QA test project (#29372) This commit moves the repository-s3 fixture test added in #29296 in a new `repository-s3/qa/amazon-s3` project. This new project allows the REST integration tests to be executed using the real S3 service when all the required environment variables are provided. When no env var is provided, then the tests are executed using the fixture added in #29296. The REST tests located at the `repository-s3`plugin project now only verify that the plugin is correctly loaded. The REST tests have been adapted to allow a bucket name and a base path to be specified as env vars. This way it is possible to run the tests with different base paths (could be anything, like a CI job name or a branch name) without multiplicating buckets. Related to #29349 --- plugins/repository-s3/build.gradle | 9 +- .../repository-s3/qa/amazon-s3/build.gradle | 83 +++ .../repositories/s3/AmazonS3Fixture.java | 137 +++++ ...azonS3RepositoryClientYamlTestSuiteIT.java | 37 ++ .../repositories/s3/AmazonS3TestServer.java | 542 ++++++++++++++++++ .../test/repository_s3/10_repository.yml | 183 ++++++ plugins/repository-s3/qa/build.gradle | 0 .../repositories/s3/S3Repository.java | 2 +- .../test/repository_s3/20_repository.yml | 24 - qa/smoke-test-plugins/build.gradle | 4 +- qa/vagrant/build.gradle | 2 +- x-pack/qa/smoke-test-plugins-ssl/build.gradle | 6 +- x-pack/qa/smoke-test-plugins/build.gradle | 6 +- 13 files changed, 1001 insertions(+), 34 deletions(-) create mode 100644 plugins/repository-s3/qa/amazon-s3/build.gradle create mode 100644 plugins/repository-s3/qa/amazon-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3Fixture.java create mode 100644 plugins/repository-s3/qa/amazon-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3RepositoryClientYamlTestSuiteIT.java create mode 100644 plugins/repository-s3/qa/amazon-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3TestServer.java create mode 100644 plugins/repository-s3/qa/amazon-s3/src/test/resources/rest-api-spec/test/repository_s3/10_repository.yml create mode 100644 plugins/repository-s3/qa/build.gradle delete mode 100644 plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/20_repository.yml diff --git a/plugins/repository-s3/build.gradle b/plugins/repository-s3/build.gradle index ae971cfe4e1ec..23252881cd75f 100644 --- a/plugins/repository-s3/build.gradle +++ b/plugins/repository-s3/build.gradle @@ -64,9 +64,14 @@ test { exclude '**/*CredentialsTests.class' } +check { + // also execute the QA tests when testing the plugin + dependsOn 'qa:amazon-s3:check' +} + integTestCluster { - keystoreSetting 's3.client.default.access_key', 'myaccesskey' - keystoreSetting 's3.client.default.secret_key', 'mysecretkey' + keystoreSetting 's3.client.integration_test.access_key', "s3_integration_test_access_key" + keystoreSetting 's3.client.integration_test.secret_key', "s3_integration_test_secret_key" } thirdPartyAudit.excludes = [ diff --git a/plugins/repository-s3/qa/amazon-s3/build.gradle b/plugins/repository-s3/qa/amazon-s3/build.gradle new file mode 100644 index 0000000000000..5e288899021a1 --- /dev/null +++ b/plugins/repository-s3/qa/amazon-s3/build.gradle @@ -0,0 +1,83 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +import org.elasticsearch.gradle.MavenFilteringHack +import org.elasticsearch.gradle.test.AntFixture + +apply plugin: 'elasticsearch.standalone-rest-test' +apply plugin: 'elasticsearch.rest-test' + +dependencies { + testCompile project(path: ':plugins:repository-s3', configuration: 'runtime') +} + +integTestCluster { + plugin ':plugins:repository-s3' +} + +forbiddenApisTest { + // we are using jdk-internal instead of jdk-non-portable to allow for com.sun.net.httpserver.* usage + bundledSignatures -= 'jdk-non-portable' + bundledSignatures += 'jdk-internal' +} + +boolean useFixture = false + +String s3AccessKey = System.getenv("amazon_s3_access_key") +String s3SecretKey = System.getenv("amazon_s3_secret_key") +String s3Bucket = System.getenv("amazon_s3_bucket") +String s3BasePath = System.getenv("amazon_s3_base_path") + +if (!s3AccessKey && !s3SecretKey && !s3Bucket && !s3BasePath) { + s3AccessKey = 's3_integration_test_access_key' + s3SecretKey = 's3_integration_test_secret_key' + s3Bucket = 'bucket_test' + s3BasePath = 'integration_test' + useFixture = true +} + +/** A task to start the AmazonS3Fixture which emulates a S3 service **/ +task s3Fixture(type: AntFixture) { + dependsOn compileTestJava + env 'CLASSPATH', "${ -> project.sourceSets.test.runtimeClasspath.asPath }" + executable = new File(project.runtimeJavaHome, 'bin/java') + args 'org.elasticsearch.repositories.s3.AmazonS3Fixture', baseDir, s3Bucket +} + +Map expansions = [ + 'bucket': s3Bucket, + 'base_path': s3BasePath +] +processTestResources { + inputs.properties(expansions) + MavenFilteringHack.filter(it, expansions) +} + +integTestCluster { + keystoreSetting 's3.client.integration_test.access_key', s3AccessKey + keystoreSetting 's3.client.integration_test.secret_key', s3SecretKey + + if (useFixture) { + dependsOn s3Fixture + /* Use a closure on the string to delay evaluation until tests are executed */ + setting 's3.client.integration_test.endpoint', "http://${-> s3Fixture.addressAndPort}" + } else { + println "Using an external service to test the repository-s3 plugin" + } +} \ No newline at end of file diff --git a/plugins/repository-s3/qa/amazon-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3Fixture.java b/plugins/repository-s3/qa/amazon-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3Fixture.java new file mode 100644 index 0000000000000..c8321e83d1390 --- /dev/null +++ b/plugins/repository-s3/qa/amazon-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3Fixture.java @@ -0,0 +1,137 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.repositories.s3; + +import com.sun.net.httpserver.HttpExchange; +import com.sun.net.httpserver.HttpHandler; +import com.sun.net.httpserver.HttpServer; +import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.common.io.Streams; +import org.elasticsearch.mocksocket.MockHttpServer; +import org.elasticsearch.repositories.s3.AmazonS3TestServer.Response; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.lang.management.ManagementFactory; +import java.net.Inet6Address; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.SocketAddress; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.StandardCopyOption; +import java.util.List; +import java.util.Map; + +import static java.util.Collections.singleton; +import static java.util.Collections.singletonList; + +/** + * {@link AmazonS3Fixture} is a fixture that emulates a S3 service. + *

+ * It starts an asynchronous socket server that binds to a random local port. The server parses + * HTTP requests and uses a {@link AmazonS3TestServer} to handle them before returning + * them to the client as HTTP responses. + */ +public class AmazonS3Fixture { + + public static void main(String[] args) throws Exception { + if (args == null || args.length != 2) { + throw new IllegalArgumentException("AmazonS3Fixture "); + } + + final InetSocketAddress socketAddress = new InetSocketAddress(InetAddress.getLoopbackAddress(), 0); + final HttpServer httpServer = MockHttpServer.createHttp(socketAddress, 0); + + try { + final Path workingDirectory = workingDir(args[0]); + /// Writes the PID of the current Java process in a `pid` file located in the working directory + writeFile(workingDirectory, "pid", ManagementFactory.getRuntimeMXBean().getName().split("@")[0]); + + final String addressAndPort = addressToString(httpServer.getAddress()); + // Writes the address and port of the http server in a `ports` file located in the working directory + writeFile(workingDirectory, "ports", addressAndPort); + + // Emulates S3 + final String storageUrl = "http://" + addressAndPort; + final AmazonS3TestServer storageTestServer = new AmazonS3TestServer(storageUrl); + storageTestServer.createBucket(args[1]); + + httpServer.createContext("/", new ResponseHandler(storageTestServer)); + httpServer.start(); + + // Wait to be killed + Thread.sleep(Long.MAX_VALUE); + + } finally { + httpServer.stop(0); + } + } + + @SuppressForbidden(reason = "Paths#get is fine - we don't have environment here") + private static Path workingDir(final String dir) { + return Paths.get(dir); + } + + private static void writeFile(final Path dir, final String fileName, final String content) throws IOException { + final Path tempPidFile = Files.createTempFile(dir, null, null); + Files.write(tempPidFile, singleton(content)); + Files.move(tempPidFile, dir.resolve(fileName), StandardCopyOption.ATOMIC_MOVE); + } + + private static String addressToString(final SocketAddress address) { + final InetSocketAddress inetSocketAddress = (InetSocketAddress) address; + if (inetSocketAddress.getAddress() instanceof Inet6Address) { + return "[" + inetSocketAddress.getHostString() + "]:" + inetSocketAddress.getPort(); + } else { + return inetSocketAddress.getHostString() + ":" + inetSocketAddress.getPort(); + } + } + + static class ResponseHandler implements HttpHandler { + + private final AmazonS3TestServer storageServer; + + private ResponseHandler(final AmazonS3TestServer storageServer) { + this.storageServer = storageServer; + } + + @Override + public void handle(HttpExchange exchange) throws IOException { + String method = exchange.getRequestMethod(); + String path = storageServer.getEndpoint() + exchange.getRequestURI().getRawPath(); + String query = exchange.getRequestURI().getRawQuery(); + Map> headers = exchange.getRequestHeaders(); + ByteArrayOutputStream out = new ByteArrayOutputStream(); + Streams.copy(exchange.getRequestBody(), out); + + final Response storageResponse = storageServer.handle(method, path, query, headers, out.toByteArray()); + + Map> responseHeaders = exchange.getResponseHeaders(); + responseHeaders.put("Content-Type", singletonList(storageResponse.contentType)); + storageResponse.headers.forEach((k, v) -> responseHeaders.put(k, singletonList(v))); + exchange.sendResponseHeaders(storageResponse.status.getStatus(), storageResponse.body.length); + if (storageResponse.body.length > 0) { + exchange.getResponseBody().write(storageResponse.body); + } + exchange.close(); + } + } +} diff --git a/plugins/repository-s3/qa/amazon-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3RepositoryClientYamlTestSuiteIT.java b/plugins/repository-s3/qa/amazon-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3RepositoryClientYamlTestSuiteIT.java new file mode 100644 index 0000000000000..afcc0fa353482 --- /dev/null +++ b/plugins/repository-s3/qa/amazon-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3RepositoryClientYamlTestSuiteIT.java @@ -0,0 +1,37 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.repositories.s3; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; +import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; + +public class AmazonS3RepositoryClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { + + public AmazonS3RepositoryClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { + super(testCandidate); + } + + @ParametersFactory + public static Iterable parameters() throws Exception { + return ESClientYamlSuiteTestCase.createParameters(); + } +} diff --git a/plugins/repository-s3/qa/amazon-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3TestServer.java b/plugins/repository-s3/qa/amazon-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3TestServer.java new file mode 100644 index 0000000000000..a3ea287b7f829 --- /dev/null +++ b/plugins/repository-s3/qa/amazon-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3TestServer.java @@ -0,0 +1,542 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.repositories.s3; + +import com.amazonaws.util.DateUtils; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.Streams; +import org.elasticsearch.common.path.PathTrie; +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.rest.RestUtils; + +import java.io.BufferedInputStream; +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStreamReader; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Date; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.concurrent.atomic.AtomicLong; + +import static java.nio.charset.StandardCharsets.UTF_8; +import static java.util.Collections.emptyList; +import static java.util.Collections.emptyMap; +import static java.util.Collections.singletonMap; + +/** + * {@link AmazonS3TestServer} emulates a S3 service through a {@link #handle(String, String, String, Map, byte[])} + * method that provides appropriate responses for specific requests like the real S3 platform would do. + * It is largely based on official documentation available at https://docs.aws.amazon.com/AmazonS3/latest/API/. + */ +public class AmazonS3TestServer { + + private static byte[] EMPTY_BYTE = new byte[0]; + /** List of the buckets stored on this test server **/ + private final Map buckets = ConcurrentCollections.newConcurrentMap(); + + /** Request handlers for the requests made by the S3 client **/ + private final PathTrie handlers; + + /** Server endpoint **/ + private final String endpoint; + + /** Increments for the requests ids **/ + private final AtomicLong requests = new AtomicLong(0); + + /** + * Creates a {@link AmazonS3TestServer} with a custom endpoint + */ + AmazonS3TestServer(final String endpoint) { + this.endpoint = Objects.requireNonNull(endpoint, "endpoint must not be null"); + this.handlers = defaultHandlers(endpoint, buckets); + } + + /** Creates a bucket in the test server **/ + void createBucket(final String bucketName) { + buckets.put(bucketName, new Bucket(bucketName)); + } + + public String getEndpoint() { + return endpoint; + } + + /** + * Returns a response for the given request + * + * @param method the HTTP method of the request + * @param path the path of the URL of the request + * @param query the queryString of the URL of request + * @param headers the HTTP headers of the request + * @param body the HTTP request body + * @return a {@link Response} + * @throws IOException if something goes wrong + */ + public Response handle(final String method, + final String path, + final String query, + final Map> headers, + byte[] body) throws IOException { + + final long requestId = requests.incrementAndGet(); + + final Map params = new HashMap<>(); + if (query != null) { + RestUtils.decodeQueryString(query, 0, params); + } + + final List authorizations = headers.get("Authorization"); + if (authorizations == null + || (authorizations.isEmpty() == false & authorizations.get(0).contains("s3_integration_test_access_key") == false)) { + return newError(requestId, RestStatus.FORBIDDEN, "AccessDenied", "Access Denied", ""); + } + + final RequestHandler handler = handlers.retrieve(method + " " + path, params); + if (handler != null) { + return handler.execute(params, headers, body, requestId); + } else { + return newInternalError(requestId, "No handler defined for request [method: " + method + ", path: " + path + "]"); + } + } + + @FunctionalInterface + interface RequestHandler { + + /** + * Simulates the execution of a S3 request and returns a corresponding response. + * + * @param params the request's query string parameters + * @param headers the request's headers + * @param body the request body provided as a byte array + * @param requestId a unique id for the incoming request + * @return the corresponding response + * + * @throws IOException if something goes wrong + */ + Response execute(Map params, Map> headers, byte[] body, long requestId) throws IOException; + } + + /** Builds the default request handlers **/ + private static PathTrie defaultHandlers(final String endpoint, final Map buckets) { + final PathTrie handlers = new PathTrie<>(RestUtils.REST_DECODER); + + // HEAD Object + // + // https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectHEAD.html + objectsPaths("HEAD " + endpoint + "/{bucket}").forEach(path -> + handlers.insert(path, (params, headers, body, id) -> { + final String bucketName = params.get("bucket"); + + final Bucket bucket = buckets.get(bucketName); + if (bucket == null) { + return newBucketNotFoundError(id, bucketName); + } + + final String objectName = objectName(params); + for (Map.Entry object : bucket.objects.entrySet()) { + if (object.getKey().equals(objectName)) { + return new Response(RestStatus.OK, emptyMap(), "text/plain", EMPTY_BYTE); + } + } + return newObjectNotFoundError(id, objectName); + }) + ); + + // PUT Object & PUT Object Copy + // + // https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html + // https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectCOPY.html + objectsPaths("PUT " + endpoint + "/{bucket}").forEach(path -> + handlers.insert(path, (params, headers, body, id) -> { + final String destBucketName = params.get("bucket"); + + final Bucket destBucket = buckets.get(destBucketName); + if (destBucket == null) { + return newBucketNotFoundError(id, destBucketName); + } + + final String destObjectName = objectName(params); + + // Request is a copy request + List headerCopySource = headers.getOrDefault("x-amz-copy-source", emptyList()); + if (headerCopySource.isEmpty() == false) { + String srcObjectName = headerCopySource.get(0); + + Bucket srcBucket = null; + for (Bucket bucket : buckets.values()) { + String prefix = "/" + bucket.name + "/"; + if (srcObjectName.startsWith(prefix)) { + srcObjectName = srcObjectName.replaceFirst(prefix, ""); + srcBucket = bucket; + break; + } + } + + if (srcBucket == null || srcBucket.objects.containsKey(srcObjectName) == false) { + return newObjectNotFoundError(id, srcObjectName); + } + + byte[] bytes = srcBucket.objects.get(srcObjectName); + if (bytes != null) { + destBucket.objects.put(destObjectName, bytes); + return newCopyResultResponse(id); + } else { + return newObjectNotFoundError(id, srcObjectName); + } + } else { + // This is a chunked upload request. We should have the header "Content-Encoding : aws-chunked,gzip" + // to detect it but it seems that the AWS SDK does not follow the S3 guidelines here. + // + // See https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html + // + List headerDecodedContentLength = headers.getOrDefault("X-amz-decoded-content-length", emptyList()); + if (headerDecodedContentLength.size() == 1) { + int contentLength = Integer.valueOf(headerDecodedContentLength.get(0)); + + // Chunked requests have a payload like this: + // + // 105;chunk-signature=01d0de6be013115a7f4794db8c4b9414e6ec71262cc33ae562a71f2eaed1efe8 + // ... bytes of data .... + // 0;chunk-signature=f890420b1974c5469aaf2112e9e6f2e0334929fd45909e03c0eff7a84124f6a4 + // + try (BufferedInputStream inputStream = new BufferedInputStream(new ByteArrayInputStream(body))) { + int b; + // Moves to the end of the first signature line + while ((b = inputStream.read()) != -1) { + if (b == '\n') { + break; + } + } + + final byte[] bytes = new byte[contentLength]; + inputStream.read(bytes, 0, contentLength); + + destBucket.objects.put(destObjectName, bytes); + return new Response(RestStatus.OK, emptyMap(), "text/plain", EMPTY_BYTE); + } + } + } + return newInternalError(id, "Something is wrong with this PUT request"); + }) + ); + + // DELETE Object + // + // https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html + objectsPaths("DELETE " + endpoint + "/{bucket}").forEach(path -> + handlers.insert(path, (params, headers, body, id) -> { + final String bucketName = params.get("bucket"); + + final Bucket bucket = buckets.get(bucketName); + if (bucket == null) { + return newBucketNotFoundError(id, bucketName); + } + + final String objectName = objectName(params); + if (bucket.objects.remove(objectName) != null) { + return new Response(RestStatus.OK, emptyMap(), "text/plain", EMPTY_BYTE); + } + return newObjectNotFoundError(id, objectName); + }) + ); + + // GET Object + // + // https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectGET.html + objectsPaths("GET " + endpoint + "/{bucket}").forEach(path -> + handlers.insert(path, (params, headers, body, id) -> { + final String bucketName = params.get("bucket"); + + final Bucket bucket = buckets.get(bucketName); + if (bucket == null) { + return newBucketNotFoundError(id, bucketName); + } + + final String objectName = objectName(params); + if (bucket.objects.containsKey(objectName)) { + return new Response(RestStatus.OK, emptyMap(), "application/octet-stream", bucket.objects.get(objectName)); + + } + return newObjectNotFoundError(id, objectName); + }) + ); + + // HEAD Bucket + // + // https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketHEAD.html + handlers.insert("HEAD " + endpoint + "/{bucket}", (params, headers, body, id) -> { + String bucket = params.get("bucket"); + if (Strings.hasText(bucket) && buckets.containsKey(bucket)) { + return new Response(RestStatus.OK, emptyMap(), "text/plain", EMPTY_BYTE); + } else { + return newBucketNotFoundError(id, bucket); + } + }); + + // GET Bucket (List Objects) Version 1 + // + // https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGET.html + handlers.insert("GET " + endpoint + "/{bucket}/", (params, headers, body, id) -> { + final String bucketName = params.get("bucket"); + + final Bucket bucket = buckets.get(bucketName); + if (bucket == null) { + return newBucketNotFoundError(id, bucketName); + } + + String prefix = params.get("prefix"); + if (prefix == null) { + List prefixes = headers.get("Prefix"); + if (prefixes != null && prefixes.size() == 1) { + prefix = prefixes.get(0); + } + } + return newListBucketResultResponse(id, bucket, prefix); + }); + + // Delete Multiple Objects + // + // https://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html + handlers.insert("POST " + endpoint + "/", (params, headers, body, id) -> { + final List deletes = new ArrayList<>(); + final List errors = new ArrayList<>(); + + if (params.containsKey("delete")) { + // The request body is something like: + // ...... + String request = Streams.copyToString(new InputStreamReader(new ByteArrayInputStream(body), StandardCharsets.UTF_8)); + if (request.startsWith("")) { + final String startMarker = ""; + final String endMarker = ""; + + int offset = 0; + while (offset != -1) { + offset = request.indexOf(startMarker, offset); + if (offset > 0) { + int closingOffset = request.indexOf(endMarker, offset); + if (closingOffset != -1) { + offset = offset + startMarker.length(); + final String objectName = request.substring(offset, closingOffset); + + boolean found = false; + for (Bucket bucket : buckets.values()) { + if (bucket.objects.remove(objectName) != null) { + found = true; + } + } + + if (found) { + deletes.add(objectName); + } else { + errors.add(objectName); + } + } + } + } + return newDeleteResultResponse(id, deletes, errors); + } + } + return newInternalError(id, "Something is wrong with this POST multiple deletes request"); + }); + + return handlers; + } + + /** + * Represents a S3 bucket. + */ + static class Bucket { + + /** Bucket name **/ + final String name; + + /** Blobs contained in the bucket **/ + final Map objects; + + Bucket(final String name) { + this.name = Objects.requireNonNull(name); + this.objects = ConcurrentCollections.newConcurrentMap(); + } + } + + /** + * Represents a HTTP Response. + */ + static class Response { + + final RestStatus status; + final Map headers; + final String contentType; + final byte[] body; + + Response(final RestStatus status, final Map headers, final String contentType, final byte[] body) { + this.status = Objects.requireNonNull(status); + this.headers = Objects.requireNonNull(headers); + this.contentType = Objects.requireNonNull(contentType); + this.body = Objects.requireNonNull(body); + } + } + + /** + * Decline a path like "http://host:port/{bucket}" into 10 derived paths like: + * - http://host:port/{bucket}/{path0} + * - http://host:port/{bucket}/{path0}/{path1} + * - http://host:port/{bucket}/{path0}/{path1}/{path2} + * - etc + */ + private static List objectsPaths(final String path) { + final List paths = new ArrayList<>(); + String p = path; + for (int i = 0; i < 10; i++) { + p = p + "/{path" + i + "}"; + paths.add(p); + } + return paths; + } + + /** + * Retrieves the object name from all derives paths named {pathX} where 0 <= X < 10. + * + * This is the counterpart of {@link #objectsPaths(String)} + */ + private static String objectName(final Map params) { + final StringBuilder name = new StringBuilder(); + for (int i = 0; i < 10; i++) { + String value = params.getOrDefault("path" + i, null); + if (value != null) { + if (name.length() > 0) { + name.append('/'); + } + name.append(value); + } + } + return name.toString(); + } + + /** + * S3 ListBucketResult Response + */ + private static Response newListBucketResultResponse(final long requestId, final Bucket bucket, final String prefix) { + final String id = Long.toString(requestId); + final StringBuilder response = new StringBuilder(); + response.append(""); + response.append(""); + response.append(""); + if (prefix != null) { + response.append(prefix); + } + response.append(""); + response.append(""); + response.append("1000"); + response.append("false"); + + int count = 0; + for (Map.Entry object : bucket.objects.entrySet()) { + String objectName = object.getKey(); + if (prefix == null || objectName.startsWith(prefix)) { + response.append(""); + response.append("").append(objectName).append(""); + response.append("").append(DateUtils.formatISO8601Date(new Date())).append(""); + response.append(""").append(count++).append("""); + response.append("").append(object.getValue().length).append(""); + response.append(""); + } + } + response.append(""); + return new Response(RestStatus.OK, singletonMap("x-amz-request-id", id), "application/xml", response.toString().getBytes(UTF_8)); + } + + /** + * S3 Copy Result Response + */ + private static Response newCopyResultResponse(final long requestId) { + final String id = Long.toString(requestId); + final StringBuilder response = new StringBuilder(); + response.append(""); + response.append(""); + response.append("").append(DateUtils.formatISO8601Date(new Date())).append(""); + response.append("").append(requestId).append(""); + response.append(""); + return new Response(RestStatus.OK, singletonMap("x-amz-request-id", id), "application/xml", response.toString().getBytes(UTF_8)); + } + + /** + * S3 DeleteResult Response + */ + private static Response newDeleteResultResponse(final long requestId, + final List deletedObjects, + final List ignoredObjects) { + final String id = Long.toString(requestId); + + final StringBuilder response = new StringBuilder(); + response.append(""); + response.append(""); + for (String deletedObject : deletedObjects) { + response.append(""); + response.append("").append(deletedObject).append(""); + response.append(""); + } + for (String ignoredObject : ignoredObjects) { + response.append(""); + response.append("").append(ignoredObject).append(""); + response.append("NoSuchKey"); + response.append(""); + } + response.append(""); + return new Response(RestStatus.OK, singletonMap("x-amz-request-id", id), "application/xml", response.toString().getBytes(UTF_8)); + } + + private static Response newBucketNotFoundError(final long requestId, final String bucket) { + return newError(requestId, RestStatus.NOT_FOUND, "NoSuchBucket", "The specified bucket does not exist", bucket); + } + + private static Response newObjectNotFoundError(final long requestId, final String object) { + return newError(requestId, RestStatus.NOT_FOUND, "NoSuchKey", "The specified key does not exist", object); + } + + private static Response newInternalError(final long requestId, final String resource) { + return newError(requestId, RestStatus.INTERNAL_SERVER_ERROR, "InternalError", "We encountered an internal error", resource); + } + + /** + * S3 Error + * + * https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html + */ + private static Response newError(final long requestId, + final RestStatus status, + final String code, + final String message, + final String resource) { + final String id = Long.toString(requestId); + final StringBuilder response = new StringBuilder(); + response.append(""); + response.append(""); + response.append("").append(code).append(""); + response.append("").append(message).append(""); + response.append("").append(resource).append(""); + response.append("").append(id).append(""); + response.append(""); + return new Response(status, singletonMap("x-amz-request-id", id), "application/xml", response.toString().getBytes(UTF_8)); + } +} diff --git a/plugins/repository-s3/qa/amazon-s3/src/test/resources/rest-api-spec/test/repository_s3/10_repository.yml b/plugins/repository-s3/qa/amazon-s3/src/test/resources/rest-api-spec/test/repository_s3/10_repository.yml new file mode 100644 index 0000000000000..8b3daccf0a2d7 --- /dev/null +++ b/plugins/repository-s3/qa/amazon-s3/src/test/resources/rest-api-spec/test/repository_s3/10_repository.yml @@ -0,0 +1,183 @@ +# Integration tests for repository-s3 +--- +"Snapshot/Restore with repository-s3": + + # Register repository + - do: + snapshot.create_repository: + repository: repository + body: + type: s3 + settings: + bucket: ${bucket} + client: integration_test + base_path: ${base_path} + canned_acl: private + storage_class: standard + + - match: { acknowledged: true } + + # Get repository + - do: + snapshot.get_repository: + repository: repository + + - match: { repository.settings.bucket : ${bucket} } + - match: { repository.settings.client : "integration_test" } + - match: { repository.settings.base_path : ${base_path} } + - match: { repository.settings.canned_acl : "private" } + - match: { repository.settings.storage_class : "standard" } + - is_false: repository.settings.access_key + - is_false: repository.settings.secret_key + + # Index documents + - do: + bulk: + refresh: true + body: + - index: + _index: docs + _type: doc + _id: 1 + - snapshot: one + - index: + _index: docs + _type: doc + _id: 2 + - snapshot: one + - index: + _index: docs + _type: doc + _id: 3 + - snapshot: one + + - do: + count: + index: docs + + - match: {count: 3} + + # Create a first snapshot + - do: + snapshot.create: + repository: repository + snapshot: snapshot-one + wait_for_completion: true + + - match: { snapshot.snapshot: snapshot-one } + - match: { snapshot.state : SUCCESS } + - match: { snapshot.include_global_state: true } + - match: { snapshot.shards.failed : 0 } + + - do: + snapshot.status: + repository: repository + snapshot: snapshot-one + + - is_true: snapshots + - match: { snapshots.0.snapshot: snapshot-one } + - match: { snapshots.0.state : SUCCESS } + + # Index more documents + - do: + bulk: + refresh: true + body: + - index: + _index: docs + _type: doc + _id: 4 + - snapshot: two + - index: + _index: docs + _type: doc + _id: 5 + - snapshot: two + - index: + _index: docs + _type: doc + _id: 6 + - snapshot: two + - index: + _index: docs + _type: doc + _id: 7 + - snapshot: two + + - do: + count: + index: docs + + - match: {count: 7} + + # Create a second snapshot + - do: + snapshot.create: + repository: repository + snapshot: snapshot-two + wait_for_completion: true + + - match: { snapshot.snapshot: snapshot-two } + - match: { snapshot.state : SUCCESS } + - match: { snapshot.shards.failed : 0 } + + - do: + snapshot.get: + repository: repository + snapshot: snapshot-one,snapshot-two + + - is_true: snapshots + - match: { snapshots.0.state : SUCCESS } + - match: { snapshots.1.state : SUCCESS } + + # Delete the index + - do: + indices.delete: + index: docs + + # Restore the second snapshot + - do: + snapshot.restore: + repository: repository + snapshot: snapshot-two + wait_for_completion: true + + - do: + count: + index: docs + + - match: {count: 7} + + # Delete the index again + - do: + indices.delete: + index: docs + + # Restore the first snapshot + - do: + snapshot.restore: + repository: repository + snapshot: snapshot-one + wait_for_completion: true + + - do: + count: + index: docs + + - match: {count: 3} + + # Remove the snapshots + - do: + snapshot.delete: + repository: repository + snapshot: snapshot-two + + - do: + snapshot.delete: + repository: repository + snapshot: snapshot-one + + # Remove our repository + - do: + snapshot.delete_repository: + repository: repository diff --git a/plugins/repository-s3/qa/build.gradle b/plugins/repository-s3/qa/build.gradle new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java index 51bb6f2024cd4..e784415b8c999 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java @@ -157,7 +157,7 @@ class S3Repository extends BlobStoreRepository { String bucket = BUCKET_SETTING.get(metadata.settings()); if (bucket == null) { - throw new RepositoryException(metadata.name(), "No bucket defined for s3 gateway"); + throw new RepositoryException(metadata.name(), "No bucket defined for s3 repository"); } boolean serverSideEncryption = SERVER_SIDE_ENCRYPTION_SETTING.get(metadata.settings()); diff --git a/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/20_repository.yml b/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/20_repository.yml deleted file mode 100644 index 74cab3edcb705..0000000000000 --- a/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/20_repository.yml +++ /dev/null @@ -1,24 +0,0 @@ -# Integration tests for Repository S3 component -# -"S3 repository can be registered": - - do: - snapshot.create_repository: - repository: test_repo_s3_1 - verify: false - body: - type: s3 - settings: - bucket: "my_bucket_name" - canned_acl: "public-read" - storage_class: "standard" - - # Get repository - - do: - snapshot.get_repository: - repository: test_repo_s3_1 - - - is_true: test_repo_s3_1 - - is_true: test_repo_s3_1.settings.bucket - - is_false: test_repo_s3_1.settings.access_key - - is_false: test_repo_s3_1.settings.secret_key - - match: {test_repo_s3_1.settings.canned_acl : "public-read"} diff --git a/qa/smoke-test-plugins/build.gradle b/qa/smoke-test-plugins/build.gradle index d60216dad194f..602dfa2d6ea4f 100644 --- a/qa/smoke-test-plugins/build.gradle +++ b/qa/smoke-test-plugins/build.gradle @@ -23,9 +23,9 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' ext.pluginsCount = 0 -project.rootProject.subprojects.findAll { it.parent.path == ':plugins' }.each { subproj -> +project(':plugins').getChildProjects().each { pluginName, pluginProject -> integTestCluster { - plugin subproj.path + plugin pluginProject.path } pluginsCount += 1 } diff --git a/qa/vagrant/build.gradle b/qa/vagrant/build.gradle index 4086cf2205785..2b1ffb280819c 100644 --- a/qa/vagrant/build.gradle +++ b/qa/vagrant/build.gradle @@ -22,7 +22,7 @@ apply plugin: 'elasticsearch.vagrant' List plugins = [] for (Project subproj : project.rootProject.subprojects) { - if (subproj.path.startsWith(':plugins:') || subproj.path.equals(':example-plugins:custom-settings')) { + if (subproj.parent.path == ':plugins' || subproj.path.equals(':example-plugins:custom-settings')) { // add plugin as a dep dependencies { packaging project(path: "${subproj.path}", configuration: 'zip') diff --git a/x-pack/qa/smoke-test-plugins-ssl/build.gradle b/x-pack/qa/smoke-test-plugins-ssl/build.gradle index bc7aa9fd39328..28fd4d2db49ed 100644 --- a/x-pack/qa/smoke-test-plugins-ssl/build.gradle +++ b/x-pack/qa/smoke-test-plugins-ssl/build.gradle @@ -1,5 +1,7 @@ import org.elasticsearch.gradle.LoggedExec import org.elasticsearch.gradle.MavenFilteringHack +import org.elasticsearch.gradle.plugin.MetaPluginBuildPlugin +import org.elasticsearch.gradle.plugin.PluginBuildPlugin import org.elasticsearch.gradle.test.NodeInfo import javax.net.ssl.HttpsURLConnection @@ -160,9 +162,9 @@ integTestCluster.dependsOn(importClientCertificateInNodeKeyStore, importNodeCert ext.pluginsCount = 0 -project.rootProject.subprojects.findAll { it.path.startsWith(':plugins:') }.each { subproj -> +project(':plugins').getChildProjects().each { pluginName, pluginProject -> // need to get a non-decorated project object, so must re-lookup the project by path - integTestCluster.plugin(subproj.path) + integTestCluster.plugin(pluginProject.path) pluginsCount += 1 } diff --git a/x-pack/qa/smoke-test-plugins/build.gradle b/x-pack/qa/smoke-test-plugins/build.gradle index 8c232bc5f3a51..207fa8204db00 100644 --- a/x-pack/qa/smoke-test-plugins/build.gradle +++ b/x-pack/qa/smoke-test-plugins/build.gradle @@ -1,4 +1,6 @@ import org.elasticsearch.gradle.MavenFilteringHack +import org.elasticsearch.gradle.plugin.MetaPluginBuildPlugin +import org.elasticsearch.gradle.plugin.PluginBuildPlugin apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' @@ -8,9 +10,9 @@ dependencies { } ext.pluginsCount = 0 -project.rootProject.subprojects.findAll { it.path.startsWith(':plugins:') }.each { subproj -> +project(':plugins').getChildProjects().each { pluginName, pluginProject -> // need to get a non-decorated project object, so must re-lookup the project by path - integTestCluster.plugin(subproj.path) + integTestCluster.plugin(pluginProject.path) pluginsCount += 1 }