diff --git a/buildSrc/reaper/src/main/java/org/elasticsearch/gradle/reaper/Reaper.java b/buildSrc/reaper/src/main/java/org/elasticsearch/gradle/reaper/Reaper.java index 361e483d4a840..27cde3b6e1bf7 100644 --- a/buildSrc/reaper/src/main/java/org/elasticsearch/gradle/reaper/Reaper.java +++ b/buildSrc/reaper/src/main/java/org/elasticsearch/gradle/reaper/Reaper.java @@ -37,7 +37,7 @@ * Since how to reap a given service is platform and service dependent, this tool * operates on system commands to execute. It takes a single argument, a directory * that will contain files with reaping commands. Each line in each file will be - * executed with {@link Runtime#getRuntime()#exec}. + * executed with {@link Runtime#exec(String)}. * * The main method will wait indefinitely on the parent process (Gradle) by * reading from stdin. When Gradle shuts down, whether normally or abruptly, the diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index d958cb8a69bb8..346480b3d5ae5 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -676,6 +676,10 @@ class BuildPlugin implements Plugin { */ (javadoc.options as CoreJavadocOptions).addBooleanOption('html5', true) } + // ensure javadoc task is run with 'check' + project.pluginManager.withPlugin('lifecycle-base') { + project.tasks.getByName(LifecycleBasePlugin.CHECK_TASK_NAME).dependsOn(project.tasks.withType(Javadoc)) + } configureJavadocJar(project) } @@ -889,7 +893,6 @@ class BuildPlugin implements Plugin { test.systemProperty('io.netty.noUnsafe', 'true') test.systemProperty('io.netty.noKeySetOptimization', 'true') test.systemProperty('io.netty.recycler.maxCapacityPerThread', '0') - test.systemProperty('io.netty.allocator.numDirectArenas', '0') test.testLogging { TestLoggingContainer logging -> logging.showExceptions = true diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/JarHellTask.java b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/JarHellTask.java index c9152486a1c51..1fb22812683ee 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/JarHellTask.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/JarHellTask.java @@ -42,7 +42,7 @@ public JarHellTask() { @TaskAction public void runJarHellCheck() { LoggedExec.javaexec(getProject(), spec -> { - spec.classpath(getClasspath()); + spec.environment("CLASSPATH", getClasspath().getAsPath()); spec.setMain("org.elasticsearch.bootstrap.JarHell"); }); } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/core/TermVectorsResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/core/TermVectorsResponse.java index 8f4232cca6e2c..f53f125cbf788 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/core/TermVectorsResponse.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/core/TermVectorsResponse.java @@ -133,7 +133,7 @@ public boolean equals(Object obj) { && Objects.equals(id, other.id) && docVersion == other.docVersion && found == other.found - && tookInMillis == tookInMillis + && tookInMillis == other.tookInMillis && Objects.equals(termVectorList, other.termVectorList); } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/enrich/NamedPolicy.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/enrich/NamedPolicy.java index ea0ea52e892bd..4e158fc532667 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/enrich/NamedPolicy.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/enrich/NamedPolicy.java @@ -20,7 +20,6 @@ import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParsingException; -import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -60,7 +59,7 @@ private static void declareParserOptions(ConstructingObjectParser parser) parser.declareObject(ConstructingObjectParser.optionalConstructorArg(), (p, c) -> { XContentBuilder builder = XContentBuilder.builder(p.contentType().xContent()); builder.copyCurrentStructure(p); - return BytesArray.bytes(builder); + return BytesReference.bytes(builder); }, QUERY_FIELD); parser.declareStringArray(ConstructingObjectParser.constructorArg(), INDICES_FIELD); parser.declareString(ConstructingObjectParser.constructorArg(), MATCH_FIELD_FIELD); diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ilm/IndexLifecycleExplainResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ilm/IndexLifecycleExplainResponse.java index 11a401271fcac..b5e87b496f80d 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ilm/IndexLifecycleExplainResponse.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ilm/IndexLifecycleExplainResponse.java @@ -21,7 +21,6 @@ import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ConstructingObjectParser; @@ -86,7 +85,7 @@ public class IndexLifecycleExplainResponse implements ToXContentObject { PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), (p, c) -> { XContentBuilder builder = JsonXContent.contentBuilder(); builder.copyCurrentStructure(p); - return BytesArray.bytes(builder); + return BytesReference.bytes(builder); }, STEP_INFO_FIELD); PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), (p, c) -> PhaseExecutionInfo.parse(p, ""), PHASE_EXECUTION_INFO); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/EnrichDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/EnrichDocumentationIT.java index 14e46bc9ef09f..96bae2d62152e 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/EnrichDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/EnrichDocumentationIT.java @@ -58,6 +58,10 @@ public void cleanup() { public void testPutPolicy() throws Exception { RestHighLevelClient client = highLevelClient(); + CreateIndexRequest createIndexRequest = new CreateIndexRequest("users") + .mapping(Map.of("properties", Map.of("email", Map.of("type", "keyword")))); + client.indices().create(createIndexRequest, RequestOptions.DEFAULT); + // tag::enrich-put-policy-request PutPolicyRequest putPolicyRequest = new PutPolicyRequest( "users-policy", "match", List.of("users"), @@ -104,6 +108,10 @@ public void testDeletePolicy() throws Exception { RestHighLevelClient client = highLevelClient(); { + CreateIndexRequest createIndexRequest = new CreateIndexRequest("users") + .mapping(Map.of("properties", Map.of("email", Map.of("type", "keyword")))); + client.indices().create(createIndexRequest, RequestOptions.DEFAULT); + // Add a policy, so that it can be deleted: PutPolicyRequest putPolicyRequest = new PutPolicyRequest( "users-policy", "match", List.of("users"), @@ -155,6 +163,10 @@ public void onFailure(Exception e) { public void testGetPolicy() throws Exception { RestHighLevelClient client = highLevelClient(); + CreateIndexRequest createIndexRequest = new CreateIndexRequest("users") + .mapping(Map.of("properties", Map.of("email", Map.of("type", "keyword")))); + client.indices().create(createIndexRequest, RequestOptions.DEFAULT); + PutPolicyRequest putPolicyRequest = new PutPolicyRequest( "users-policy", "match", List.of("users"), "email", List.of("address", "zip", "city", "state")); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/enrich/GetPolicyResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/enrich/GetPolicyResponseTests.java index fc0cfb733390f..ee9b25cd6a6c0 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/enrich/GetPolicyResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/enrich/GetPolicyResponseTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.client.enrich; import org.elasticsearch.client.AbstractResponseTestCase; -import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; @@ -80,7 +79,7 @@ private static EnrichPolicy createRandomEnrichPolicy(XContentType xContentType){ try (XContentBuilder builder = XContentBuilder.builder(xContentType.xContent())) { builder.startObject(); builder.endObject(); - BytesReference querySource = BytesArray.bytes(builder); + BytesReference querySource = BytesReference.bytes(builder); return new EnrichPolicy( randomAlphaOfLength(4), randomBoolean() ? new EnrichPolicy.QuerySource(querySource, xContentType) : null, diff --git a/distribution/tools/launchers/src/main/java/org/elasticsearch/tools/launchers/JvmErgonomics.java b/distribution/tools/launchers/src/main/java/org/elasticsearch/tools/launchers/JvmErgonomics.java index d0d5bef9cfcf4..4a0eab45fb6ee 100644 --- a/distribution/tools/launchers/src/main/java/org/elasticsearch/tools/launchers/JvmErgonomics.java +++ b/distribution/tools/launchers/src/main/java/org/elasticsearch/tools/launchers/JvmErgonomics.java @@ -55,14 +55,6 @@ static List choose(final List userDefinedJvmOptions) throws Inte final List ergonomicChoices = new ArrayList<>(); final Map> finalJvmOptions = finalJvmOptions(userDefinedJvmOptions); final long heapSize = extractHeapSize(finalJvmOptions); - final Map systemProperties = extractSystemProperties(userDefinedJvmOptions); - if (systemProperties.containsKey("io.netty.allocator.type") == false) { - if (heapSize <= 1 << 30) { - ergonomicChoices.add("-Dio.netty.allocator.type=unpooled"); - } else { - ergonomicChoices.add("-Dio.netty.allocator.type=pooled"); - } - } final long maxDirectMemorySize = extractMaxDirectMemorySize(finalJvmOptions); if (maxDirectMemorySize == 0) { ergonomicChoices.add("-XX:MaxDirectMemorySize=" + heapSize / 2); diff --git a/distribution/tools/launchers/src/test/java/org/elasticsearch/tools/launchers/JvmErgonomicsTests.java b/distribution/tools/launchers/src/test/java/org/elasticsearch/tools/launchers/JvmErgonomicsTests.java index 7fe5cd0cf98b0..ee049a57d8528 100644 --- a/distribution/tools/launchers/src/test/java/org/elasticsearch/tools/launchers/JvmErgonomicsTests.java +++ b/distribution/tools/launchers/src/test/java/org/elasticsearch/tools/launchers/JvmErgonomicsTests.java @@ -117,20 +117,6 @@ public void testExtractNoSystemProperties() { assertTrue(parsedSystemProperties.isEmpty()); } - public void testPooledMemoryChoiceOnSmallHeap() throws InterruptedException, IOException { - final String smallHeap = randomFrom(Arrays.asList("64M", "512M", "1024M", "1G")); - assertThat( - JvmErgonomics.choose(Arrays.asList("-Xms" + smallHeap, "-Xmx" + smallHeap)), - hasItem("-Dio.netty.allocator.type=unpooled")); - } - - public void testPooledMemoryChoiceOnNotSmallHeap() throws InterruptedException, IOException { - final String largeHeap = randomFrom(Arrays.asList("1025M", "2048M", "2G", "8G")); - assertThat( - JvmErgonomics.choose(Arrays.asList("-Xms" + largeHeap, "-Xmx" + largeHeap)), - hasItem("-Dio.netty.allocator.type=pooled")); - } - public void testMaxDirectMemorySizeChoice() throws InterruptedException, IOException { final Map heapMaxDirectMemorySize = Map.of( "64M", Long.toString((64L << 20) / 2), diff --git a/docs/java-rest/high-level/document/index.asciidoc b/docs/java-rest/high-level/document/index.asciidoc index 9cdd8d3c557fa..5a201a02a88b5 100644 --- a/docs/java-rest/high-level/document/index.asciidoc +++ b/docs/java-rest/high-level/document/index.asciidoc @@ -85,7 +85,7 @@ include-tagged::{doc-tests-file}[{api}-request-version-type] include-tagged::{doc-tests-file}[{api}-request-op-type] -------------------------------------------------- <1> Operation type provided as an `DocWriteRequest.OpType` value -<2> Operation type provided as a `String`: can be `create` or `update` (default) +<2> Operation type provided as a `String`: can be `create` or `index` (default) ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- diff --git a/docs/reference/ingest/apis/enrich/delete-enrich-policy.asciidoc b/docs/reference/ingest/apis/enrich/delete-enrich-policy.asciidoc index f0ebd40ff4112..3052e33cb5e65 100644 --- a/docs/reference/ingest/apis/enrich/delete-enrich-policy.asciidoc +++ b/docs/reference/ingest/apis/enrich/delete-enrich-policy.asciidoc @@ -12,6 +12,13 @@ Deletes an existing enrich policy and its enrich index. [source,console] ---- PUT /users +{ + "mappings" : { + "properties" : { + "email" : { "type" : "keyword" } + } + } +} PUT /_enrich/policy/my-policy { diff --git a/docs/reference/ingest/apis/enrich/get-enrich-policy.asciidoc b/docs/reference/ingest/apis/enrich/get-enrich-policy.asciidoc index 2ddc4c8ffc951..4c41a31643654 100644 --- a/docs/reference/ingest/apis/enrich/get-enrich-policy.asciidoc +++ b/docs/reference/ingest/apis/enrich/get-enrich-policy.asciidoc @@ -12,6 +12,13 @@ Returns information about an enrich policy. [source,console] ---- PUT /users +{ + "mappings" : { + "properties" : { + "email" : { "type" : "keyword" } + } + } +} PUT /_enrich/policy/my-policy { diff --git a/docs/reference/ingest/apis/enrich/put-enrich-policy.asciidoc b/docs/reference/ingest/apis/enrich/put-enrich-policy.asciidoc index 59054fd5aa6b8..df92ef265312c 100644 --- a/docs/reference/ingest/apis/enrich/put-enrich-policy.asciidoc +++ b/docs/reference/ingest/apis/enrich/put-enrich-policy.asciidoc @@ -12,6 +12,13 @@ Creates an enrich policy. [source,console] ---- PUT /users +{ + "mappings" : { + "properties" : { + "email" : { "type" : "keyword" } + } + } +} ---- //// diff --git a/docs/reference/modules/cross-cluster-search.asciidoc b/docs/reference/modules/cross-cluster-search.asciidoc index e85fe99944684..48526e4f66549 100644 --- a/docs/reference/modules/cross-cluster-search.asciidoc +++ b/docs/reference/modules/cross-cluster-search.asciidoc @@ -242,9 +242,31 @@ PUT _cluster/settings If `cluster_two` is disconnected or unavailable during a {ccs}, {es} won't include matching documents from that cluster in the final results. -[float] +[discrete] [[ccs-works]] == How {ccs} works + +include::./remote-clusters.asciidoc[tag=how-remote-clusters-work] + +[discrete] +[[ccs-gateway-seed-nodes]] +=== Selecting gateway and seed nodes + +Gateway and seed nodes need to be accessible from the local cluster via your +network. + +By default, any master-ineligible node can act as a gateway node. If wanted, +you can define the gateway nodes for a cluster by setting +`cluster.remote.node.attr.gateway` to `true`. + +For {ccs}, we recommend you use gateway nodes that are capable of serving as +<> for search requests. If +wanted, the seed nodes for a cluster can be a subset of these gateway nodes. + +[discrete] +[[ccs-network-delays]] +=== How {ccs} handles network delays + Because {ccs} involves sending requests to remote clusters, any network delays can impact search speed. To avoid slow searches, {ccs} offers two options for handling network delays: @@ -268,11 +290,9 @@ latency. + See <> to learn how this option works. - - [float] [[ccs-min-roundtrips]] -=== Minimize network roundtrips +==== Minimize network roundtrips Here's how {ccs} works when you minimize network roundtrips. @@ -297,7 +317,7 @@ image:images/ccs/ccs-min-roundtrip-client-response.png[] [float] [[ccs-unmin-roundtrips]] -=== Don't minimize network roundtrips +==== Don't minimize network roundtrips Here's how {ccs} works when you don't minimize network roundtrips. diff --git a/docs/reference/modules/remote-clusters.asciidoc b/docs/reference/modules/remote-clusters.asciidoc index 1ab58ac2bf54b..e7e820d71cf67 100644 --- a/docs/reference/modules/remote-clusters.asciidoc +++ b/docs/reference/modules/remote-clusters.asciidoc @@ -13,12 +13,16 @@ connections to a remote cluster. This functionality is used in <>. endif::[] +// tag::how-remote-clusters-work[] Remote cluster connections work by configuring a remote cluster and connecting only to a limited number of nodes in that remote cluster. Each remote cluster is referenced by a name and a list of seed nodes. When a remote cluster is registered, its cluster state is retrieved from one of the seed nodes and up to three _gateway nodes_ are selected to be connected to as part of remote -cluster requests. All the communication required between different clusters +cluster requests. +// end::how-remote-clusters-work[] + +All the communication required between different clusters goes through the <>. Remote cluster connections consist of uni-directional connections from the coordinating node to the selected remote _gateway nodes_ only. diff --git a/docs/reference/transform/limitations.asciidoc b/docs/reference/transform/limitations.asciidoc index 1d3d38c943691..463f66f24e3e5 100644 --- a/docs/reference/transform/limitations.asciidoc +++ b/docs/reference/transform/limitations.asciidoc @@ -33,6 +33,14 @@ upgrade from 7.2 to a newer version, and {transforms} have been created in 7.2, the {transforms} UI (earler {dataframe} UI) will not work. Please wait until all nodes have been upgraded to the newer version before using the {transforms} UI. +[float] +[[transform-rolling-upgrade-limitation]] +==== {transforms-cap} reassignment suspended during a rolling upgrade from 7.2 and 7.3 + +If your cluster contains mixed version nodes, for example during a rolling +upgrade from 7.2 or 7.3 to a newer version, {transforms} whose nodes are stopped will +not be reassigned until the upgrade is complete. After the upgrade is done, {transforms} +resume automatically; no action is required. [float] [[transform-datatype-limitations]] @@ -181,9 +189,9 @@ for the {transform} checkpoint to complete. [float] [[transform-scheduling-limitations]] -==== {cdataframe-cap} scheduling limitations +==== {ctransform-cap} scheduling limitations -A {cdataframe} periodically checks for changes to source data. The functionality +A {ctransform} periodically checks for changes to source data. The functionality of the scheduler is currently limited to a basic periodic timer which can be within the `frequency` range from 1s to 1h. The default is 1m. This is designed to run little and often. When choosing a `frequency` for this timer consider @@ -206,7 +214,7 @@ When using the API to delete a failed {transform}, first stop it using [float] [[transform-availability-limitations]] -==== {cdataframes-cap} may give incorrect results if documents are not yet available to search +==== {ctransforms-cap} may give incorrect results if documents are not yet available to search After a document is indexed, there is a very small delay until it is available to search. diff --git a/modules/transport-netty4/build.gradle b/modules/transport-netty4/build.gradle index 62e2d6aa2bf86..98cb23ae28515 100644 --- a/modules/transport-netty4/build.gradle +++ b/modules/transport-netty4/build.gradle @@ -66,7 +66,7 @@ integTestRunner { TaskProvider pooledTest = tasks.register("pooledTest", Test) { include '**/*Tests.class' systemProperty 'es.set.netty.runtime.available.processors', 'false' - systemProperty 'io.netty.allocator.type', 'pooled' + systemProperty 'es.use_unpooled_allocator', 'false' } // TODO: we can't use task avoidance here because RestIntegTestTask does the testcluster creation RestIntegTestTask pooledIntegTest = tasks.create("pooledIntegTest", RestIntegTestTask) { @@ -75,7 +75,7 @@ RestIntegTestTask pooledIntegTest = tasks.create("pooledIntegTest", RestIntegTes } } testClusters.pooledIntegTest { - systemProperty 'io.netty.allocator.type', 'pooled' + systemProperty 'es.use_unpooled_allocator', 'false' } check.dependsOn(pooledTest, pooledIntegTest) diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java index 6c1579bc28362..cefa589a103f7 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java @@ -20,7 +20,6 @@ package org.elasticsearch.http.netty4; import io.netty.bootstrap.ServerBootstrap; -import io.netty.buffer.ByteBufAllocator; import io.netty.channel.Channel; import io.netty.channel.ChannelFuture; import io.netty.channel.ChannelHandler; @@ -32,7 +31,6 @@ import io.netty.channel.RecvByteBufAllocator; import io.netty.channel.nio.NioEventLoopGroup; import io.netty.channel.socket.nio.NioChannelOption; -import io.netty.channel.socket.nio.NioServerSocketChannel; import io.netty.handler.codec.ByteToMessageDecoder; import io.netty.handler.codec.http.HttpContentCompressor; import io.netty.handler.codec.http.HttpContentDecompressor; @@ -63,7 +61,7 @@ import org.elasticsearch.http.HttpServerChannel; import org.elasticsearch.http.netty4.cors.Netty4CorsHandler; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.CopyBytesServerSocketChannel; +import org.elasticsearch.transport.NettyAllocator; import org.elasticsearch.transport.netty4.Netty4Utils; import java.net.InetSocketAddress; @@ -186,14 +184,12 @@ protected void doStart() { serverBootstrap.group(new NioEventLoopGroup(workerCount, daemonThreadFactory(settings, HTTP_SERVER_WORKER_THREAD_NAME_PREFIX))); - // If direct buffer pooling is disabled, use the CopyBytesServerSocketChannel which will create child - // channels of type CopyBytesSocketChannel. CopyBytesSocketChannel pool a single direct buffer - // per-event-loop thread to be used for IO operations. - if (ByteBufAllocator.DEFAULT.isDirectBufferPooled()) { - serverBootstrap.channel(NioServerSocketChannel.class); - } else { - serverBootstrap.channel(CopyBytesServerSocketChannel.class); - } + // NettyAllocator will return the channel type designed to work with the configuredAllocator + serverBootstrap.channel(NettyAllocator.getServerChannelType()); + + // Set the allocators for both the server channel and the child channels created + serverBootstrap.option(ChannelOption.ALLOCATOR, NettyAllocator.getAllocator()); + serverBootstrap.childOption(ChannelOption.ALLOCATOR, NettyAllocator.getAllocator()); serverBootstrap.childHandler(configureServerChannelHandler()); serverBootstrap.handler(new ServerChannelExceptionHandler(this)); diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/NettyAllocator.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/NettyAllocator.java new file mode 100644 index 0000000000000..bfe0a92a9f2b8 --- /dev/null +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/NettyAllocator.java @@ -0,0 +1,214 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport; + +import io.netty.buffer.ByteBuf; +import io.netty.buffer.ByteBufAllocator; +import io.netty.buffer.CompositeByteBuf; +import io.netty.buffer.PooledByteBufAllocator; +import io.netty.buffer.UnpooledByteBufAllocator; +import io.netty.channel.Channel; +import io.netty.channel.ServerChannel; +import io.netty.channel.socket.nio.NioServerSocketChannel; +import io.netty.channel.socket.nio.NioSocketChannel; +import org.elasticsearch.common.Booleans; +import org.elasticsearch.monitor.jvm.JvmInfo; + +public class NettyAllocator { + + private static final ByteBufAllocator ALLOCATOR; + + private static final String USE_UNPOOLED = "es.use_unpooled_allocator"; + private static final String USE_NETTY_DEFAULT = "es.unsafe.use_netty_default_allocator"; + + static { + if (Booleans.parseBoolean(System.getProperty(USE_NETTY_DEFAULT), false)) { + ALLOCATOR = ByteBufAllocator.DEFAULT; + } else { + ByteBufAllocator delegate; + if (useUnpooled()) { + delegate = new NoDirectBuffers(UnpooledByteBufAllocator.DEFAULT); + } else { + int nHeapArena = PooledByteBufAllocator.defaultNumHeapArena(); + int pageSize = PooledByteBufAllocator.defaultPageSize(); + int maxOrder = PooledByteBufAllocator.defaultMaxOrder(); + int tinyCacheSize = PooledByteBufAllocator.defaultTinyCacheSize(); + int smallCacheSize = PooledByteBufAllocator.defaultSmallCacheSize(); + int normalCacheSize = PooledByteBufAllocator.defaultNormalCacheSize(); + boolean useCacheForAllThreads = PooledByteBufAllocator.defaultUseCacheForAllThreads(); + delegate = new PooledByteBufAllocator(false, nHeapArena, 0, pageSize, maxOrder, tinyCacheSize, + smallCacheSize, normalCacheSize, useCacheForAllThreads); + } + ALLOCATOR = new NoDirectBuffers(delegate); + } + } + + public static boolean useCopySocket() { + return ALLOCATOR instanceof NoDirectBuffers; + } + + public static ByteBufAllocator getAllocator() { + return ALLOCATOR; + } + + public static Class getChannelType() { + if (ALLOCATOR instanceof NoDirectBuffers) { + return CopyBytesSocketChannel.class; + } else { + return NioSocketChannel.class; + } + } + + public static Class getServerChannelType() { + if (ALLOCATOR instanceof NoDirectBuffers) { + return CopyBytesServerSocketChannel.class; + } else { + return NioServerSocketChannel.class; + } + } + + private static boolean useUnpooled() { + if (System.getProperty(USE_UNPOOLED) != null) { + return Booleans.parseBoolean(System.getProperty(USE_UNPOOLED)); + } else { + long heapSize = JvmInfo.jvmInfo().getMem().getHeapMax().getBytes(); + return heapSize <= 1 << 30; + } + } + + private static class NoDirectBuffers implements ByteBufAllocator { + + private final ByteBufAllocator delegate; + + private NoDirectBuffers(ByteBufAllocator delegate) { + this.delegate = delegate; + } + + @Override + public ByteBuf buffer() { + return heapBuffer(); + } + + @Override + public ByteBuf buffer(int initialCapacity) { + return heapBuffer(initialCapacity); + } + + @Override + public ByteBuf buffer(int initialCapacity, int maxCapacity) { + return heapBuffer(initialCapacity, maxCapacity); + } + + @Override + public ByteBuf ioBuffer() { + return heapBuffer(); + } + + @Override + public ByteBuf ioBuffer(int initialCapacity) { + return heapBuffer(initialCapacity); + } + + @Override + public ByteBuf ioBuffer(int initialCapacity, int maxCapacity) { + return heapBuffer(initialCapacity, maxCapacity); + } + + @Override + public ByteBuf heapBuffer() { + return delegate.heapBuffer(); + } + + @Override + public ByteBuf heapBuffer(int initialCapacity) { + return delegate.heapBuffer(initialCapacity); + } + + @Override + public ByteBuf heapBuffer(int initialCapacity, int maxCapacity) { + return delegate.heapBuffer(initialCapacity, maxCapacity); + } + + @Override + public ByteBuf directBuffer() { + // TODO: Currently the Netty SslHandler requests direct ByteBufs even when interacting with the + // JDK SSLEngine. This will be fixed in a future version of Netty. For now, return a heap + // ByteBuf. After a Netty upgrade, return to throwing UnsupportedOperationException + return heapBuffer(); + } + + @Override + public ByteBuf directBuffer(int initialCapacity) { + // TODO: Currently the Netty SslHandler requests direct ByteBufs even when interacting with the + // JDK SSLEngine. This will be fixed in a future version of Netty. For now, return a heap + // ByteBuf. After a Netty upgrade, return to throwing UnsupportedOperationException + return heapBuffer(initialCapacity); + } + + @Override + public ByteBuf directBuffer(int initialCapacity, int maxCapacity) { + // TODO: Currently the Netty SslHandler requests direct ByteBufs even when interacting with the + // JDK SSLEngine. This will be fixed in a future version of Netty. For now, return a heap + // ByteBuf. After a Netty upgrade, return to throwing UnsupportedOperationException + return heapBuffer(initialCapacity, maxCapacity); + } + + @Override + public CompositeByteBuf compositeBuffer() { + return compositeHeapBuffer(); + } + + @Override + public CompositeByteBuf compositeBuffer(int maxNumComponents) { + return compositeHeapBuffer(maxNumComponents); + } + + @Override + public CompositeByteBuf compositeHeapBuffer() { + return delegate.compositeHeapBuffer(); + } + + @Override + public CompositeByteBuf compositeHeapBuffer(int maxNumComponents) { + return delegate.compositeHeapBuffer(maxNumComponents); + } + + @Override + public CompositeByteBuf compositeDirectBuffer() { + throw new UnsupportedOperationException("Direct buffers not supported."); + } + + @Override + public CompositeByteBuf compositeDirectBuffer(int maxNumComponents) { + throw new UnsupportedOperationException("Direct buffers not supported."); + } + + @Override + public boolean isDirectBufferPooled() { + assert delegate.isDirectBufferPooled() == false; + return false; + } + + @Override + public int calculateNewCapacity(int minNewCapacity, int maxCapacity) { + return delegate.calculateNewCapacity(minNewCapacity, maxCapacity); + } + } +} diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/ByteBufBytesReference.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/ByteBufBytesReference.java index d8af523bf17df..3f7ff0d6e2a0f 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/ByteBufBytesReference.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/ByteBufBytesReference.java @@ -20,6 +20,7 @@ import io.netty.buffer.ByteBuf; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.bytes.AbstractBytesReference; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; @@ -27,7 +28,7 @@ import java.io.OutputStream; import java.nio.charset.StandardCharsets; -final class ByteBufBytesReference extends BytesReference { +final class ByteBufBytesReference extends AbstractBytesReference { private final ByteBuf buffer; private final int length; diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java index d3e43e16dd5f4..93f41285c5f3d 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java @@ -21,7 +21,6 @@ import io.netty.bootstrap.Bootstrap; import io.netty.bootstrap.ServerBootstrap; -import io.netty.buffer.ByteBufAllocator; import io.netty.channel.AdaptiveRecvByteBufAllocator; import io.netty.channel.Channel; import io.netty.channel.ChannelFuture; @@ -34,8 +33,6 @@ import io.netty.channel.RecvByteBufAllocator; import io.netty.channel.nio.NioEventLoopGroup; import io.netty.channel.socket.nio.NioChannelOption; -import io.netty.channel.socket.nio.NioServerSocketChannel; -import io.netty.channel.socket.nio.NioSocketChannel; import io.netty.util.AttributeKey; import io.netty.util.concurrent.Future; import org.apache.logging.log4j.LogManager; @@ -59,8 +56,7 @@ import org.elasticsearch.core.internal.net.NetUtils; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.CopyBytesServerSocketChannel; -import org.elasticsearch.transport.CopyBytesSocketChannel; +import org.elasticsearch.transport.NettyAllocator; import org.elasticsearch.transport.TcpTransport; import org.elasticsearch.transport.TransportSettings; @@ -152,13 +148,9 @@ private Bootstrap createClientBootstrap(NioEventLoopGroup eventLoopGroup) { final Bootstrap bootstrap = new Bootstrap(); bootstrap.group(eventLoopGroup); - // If direct buffer pooling is disabled, use the CopyBytesSocketChannel which will pool a single - // direct buffer per-event-loop thread which will be used for IO operations. - if (ByteBufAllocator.DEFAULT.isDirectBufferPooled()) { - bootstrap.channel(NioSocketChannel.class); - } else { - bootstrap.channel(CopyBytesSocketChannel.class); - } + // NettyAllocator will return the channel type designed to work with the configured allocator + bootstrap.channel(NettyAllocator.getChannelType()); + bootstrap.option(ChannelOption.ALLOCATOR, NettyAllocator.getAllocator()); bootstrap.option(ChannelOption.TCP_NODELAY, TransportSettings.TCP_NO_DELAY.get(settings)); bootstrap.option(ChannelOption.SO_KEEPALIVE, TransportSettings.TCP_KEEP_ALIVE.get(settings)); @@ -216,14 +208,12 @@ private void createServerBootstrap(ProfileSettings profileSettings, NioEventLoop serverBootstrap.group(eventLoopGroup); - // If direct buffer pooling is disabled, use the CopyBytesServerSocketChannel which will create child - // channels of type CopyBytesSocketChannel. CopyBytesSocketChannel pool a single direct buffer - // per-event-loop thread to be used for IO operations. - if (ByteBufAllocator.DEFAULT.isDirectBufferPooled()) { - serverBootstrap.channel(NioServerSocketChannel.class); - } else { - serverBootstrap.channel(CopyBytesServerSocketChannel.class); - } + // NettyAllocator will return the channel type designed to work with the configuredAllocator + serverBootstrap.channel(NettyAllocator.getServerChannelType()); + + // Set the allocators for both the server channel and the child channels created + serverBootstrap.option(ChannelOption.ALLOCATOR, NettyAllocator.getAllocator()); + serverBootstrap.childOption(ChannelOption.ALLOCATOR, NettyAllocator.getAllocator()); serverBootstrap.childHandler(getServerChannelInitializer(name)); serverBootstrap.handler(new ServerChannelExceptionHandler()); diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/ESNetty4IntegTestCase.java b/modules/transport-netty4/src/test/java/org/elasticsearch/ESNetty4IntegTestCase.java index 9d8baf9e3f871..d4a6706153a9b 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/ESNetty4IntegTestCase.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/ESNetty4IntegTestCase.java @@ -18,6 +18,7 @@ */ package org.elasticsearch; +import io.netty.buffer.PooledByteBufAllocator; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.Plugin; @@ -25,8 +26,8 @@ import org.elasticsearch.transport.Netty4Plugin; import org.elasticsearch.transport.netty4.Netty4Transport; -import java.util.Arrays; import java.util.Collection; +import java.util.Collections; public abstract class ESNetty4IntegTestCase extends ESIntegTestCase { @@ -54,6 +55,13 @@ protected Settings nodeSettings(int nodeOrdinal) { @Override protected Collection> nodePlugins() { - return Arrays.asList(Netty4Plugin.class); + return Collections.singletonList(Netty4Plugin.class); + } + + @Override + public void tearDown() throws Exception { + super.tearDown(); + assertEquals(0, PooledByteBufAllocator.DEFAULT.metric().usedHeapMemory()); + assertEquals(0, PooledByteBufAllocator.DEFAULT.metric().usedDirectMemory()); } } diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpClient.java b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpClient.java index a595de3a47ed9..558e833c74bed 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpClient.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpClient.java @@ -25,10 +25,10 @@ import io.netty.channel.ChannelFuture; import io.netty.channel.ChannelHandlerContext; import io.netty.channel.ChannelInitializer; +import io.netty.channel.ChannelOption; import io.netty.channel.SimpleChannelInboundHandler; import io.netty.channel.nio.NioEventLoopGroup; import io.netty.channel.socket.SocketChannel; -import io.netty.channel.socket.nio.NioSocketChannel; import io.netty.handler.codec.http.DefaultFullHttpRequest; import io.netty.handler.codec.http.FullHttpRequest; import io.netty.handler.codec.http.FullHttpResponse; @@ -45,6 +45,7 @@ import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.tasks.Task; +import org.elasticsearch.transport.NettyAllocator; import java.io.Closeable; import java.net.SocketAddress; @@ -84,7 +85,10 @@ static Collection returnOpaqueIds(Collection responses private final Bootstrap clientBootstrap; Netty4HttpClient() { - clientBootstrap = new Bootstrap().channel(NioSocketChannel.class).group(new NioEventLoopGroup()); + clientBootstrap = new Bootstrap() + .channel(NettyAllocator.getChannelType()) + .option(ChannelOption.ALLOCATOR, NettyAllocator.getAllocator()) + .group(new NioEventLoopGroup(1)); } public Collection get(SocketAddress remoteAddress, String... uris) throws InterruptedException { diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java index 0b210995795bf..884cf46a1688f 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.transport.netty4; +import io.netty.buffer.PooledByteBufAllocator; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -45,6 +46,13 @@ public class SimpleNetty4TransportTests extends AbstractSimpleTransportTestCase { + @Override + public void tearDown() throws Exception { + super.tearDown(); + assertEquals(0, PooledByteBufAllocator.DEFAULT.metric().usedHeapMemory()); + assertEquals(0, PooledByteBufAllocator.DEFAULT.metric().usedDirectMemory()); + } + @Override protected Transport build(Settings settings, final Version version, ClusterSettings clusterSettings, boolean doHandshake) { NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(Collections.emptyList()); @@ -73,5 +81,4 @@ public void testConnectException() throws UnknownHostException { assertThat(e.getMessage(), containsString("[127.0.0.1:9876]")); } } - } diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryTests.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryTests.java index a43a96a438d03..28993bd475a06 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryTests.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryTests.java @@ -265,10 +265,4 @@ protected String requestUniqueId(final HttpExchange exchange) { + (range != null ? " " + range : ""); } } - - @Override - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/47948") - public void testIndicesDeletedFromRepository() throws Exception { - - } } diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/ByteBufUtils.java b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/ByteBufUtils.java index f805773c46c0c..78bc8f3728209 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/ByteBufUtils.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/ByteBufUtils.java @@ -23,6 +23,7 @@ import io.netty.buffer.Unpooled; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefIterator; +import org.elasticsearch.common.bytes.AbstractBytesReference; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; @@ -72,7 +73,7 @@ static BytesReference toBytesReference(final ByteBuf buffer) { return new ByteBufBytesReference(buffer, buffer.readableBytes()); } - private static class ByteBufBytesReference extends BytesReference { + private static class ByteBufBytesReference extends AbstractBytesReference { private final ByteBuf buffer; private final int length; diff --git a/server/src/main/java/org/elasticsearch/common/bytes/AbstractBytesReference.java b/server/src/main/java/org/elasticsearch/common/bytes/AbstractBytesReference.java new file mode 100644 index 0000000000000..6bfeb023bf3aa --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/bytes/AbstractBytesReference.java @@ -0,0 +1,264 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.common.bytes; + +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.BytesRefIterator; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.EOFException; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.util.function.ToIntBiFunction; + +public abstract class AbstractBytesReference implements BytesReference { + + private Integer hash = null; // we cache the hash of this reference since it can be quite costly to re-calculated it + + @Override + public int getInt(int index) { + return (get(index) & 0xFF) << 24 | (get(index + 1) & 0xFF) << 16 | (get(index + 2) & 0xFF) << 8 | get(index + 3) & 0xFF; + } + + @Override + public int indexOf(byte marker, int from) { + final int to = length(); + for (int i = from; i < to; i++) { + if (get(i) == marker) { + return i; + } + } + return -1; + } + + @Override + public StreamInput streamInput() throws IOException { + return new MarkSupportingStreamInputWrapper(this); + } + + @Override + public void writeTo(OutputStream os) throws IOException { + final BytesRefIterator iterator = iterator(); + BytesRef ref; + while ((ref = iterator.next()) != null) { + os.write(ref.bytes, ref.offset, ref.length); + } + } + + @Override + public String utf8ToString() { + return toBytesRef().utf8ToString(); + } + + @Override + public BytesRefIterator iterator() { + return new BytesRefIterator() { + BytesRef ref = length() == 0 ? null : toBytesRef(); + @Override + public BytesRef next() throws IOException { + BytesRef r = ref; + ref = null; // only return it once... + return r; + } + }; + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + if (other instanceof BytesReference) { + final BytesReference otherRef = (BytesReference) other; + if (length() != otherRef.length()) { + return false; + } + return compareIterators(this, otherRef, (a, b) -> + a.bytesEquals(b) ? 0 : 1 // this is a call to BytesRef#bytesEquals - this method is the hot one in the comparison + ) == 0; + } + return false; + } + + @Override + public int hashCode() { + if (hash == null) { + final BytesRefIterator iterator = iterator(); + BytesRef ref; + int result = 1; + try { + while ((ref = iterator.next()) != null) { + for (int i = 0; i < ref.length; i++) { + result = 31 * result + ref.bytes[ref.offset + i]; + } + } + } catch (IOException ex) { + throw new AssertionError("wont happen", ex); + } + return hash = result; + } else { + return hash.intValue(); + } + } + + @Override + public int compareTo(final BytesReference other) { + return compareIterators(this, other, BytesRef::compareTo); + } + + /** + * Compares the two references using the given int function. + */ + private static int compareIterators(final BytesReference a, final BytesReference b, final ToIntBiFunction f) { + try { + // we use the iterators since it's a 0-copy comparison where possible! + final long lengthToCompare = Math.min(a.length(), b.length()); + final BytesRefIterator aIter = a.iterator(); + final BytesRefIterator bIter = b.iterator(); + BytesRef aRef = aIter.next(); + BytesRef bRef = bIter.next(); + if (aRef != null && bRef != null) { // do we have any data? + aRef = aRef.clone(); // we clone since we modify the offsets and length in the iteration below + bRef = bRef.clone(); + if (aRef.length == a.length() && bRef.length == b.length()) { // is it only one array slice we are comparing? + return f.applyAsInt(aRef, bRef); + } else { + for (int i = 0; i < lengthToCompare;) { + if (aRef.length == 0) { + aRef = aIter.next().clone(); // must be non null otherwise we have a bug + } + if (bRef.length == 0) { + bRef = bIter.next().clone(); // must be non null otherwise we have a bug + } + final int aLength = aRef.length; + final int bLength = bRef.length; + final int length = Math.min(aLength, bLength); // shrink to the same length and use the fast compare in lucene + aRef.length = bRef.length = length; + // now we move to the fast comparison - this is the hot part of the loop + int diff = f.applyAsInt(aRef, bRef); + aRef.length = aLength; + bRef.length = bLength; + + if (diff != 0) { + return diff; + } + advance(aRef, length); + advance(bRef, length); + i += length; + } + } + } + // One is a prefix of the other, or, they are equal: + return a.length() - b.length(); + } catch (IOException ex) { + throw new AssertionError("can not happen", ex); + } + } + + private static void advance(final BytesRef ref, final int length) { + assert ref.length >= length : " ref.length: " + ref.length + " length: " + length; + assert ref.offset+length < ref.bytes.length || (ref.offset+length == ref.bytes.length && ref.length-length == 0) + : "offset: " + ref.offset + " ref.bytes.length: " + ref.bytes.length + " length: " + length + " ref.length: " + ref.length; + ref.length -= length; + ref.offset += length; + } + + /** + * Instead of adding the complexity of {@link InputStream#reset()} etc to the actual impl + * this wrapper builds it on top of the BytesReferenceStreamInput which is much simpler + * that way. + */ + private static final class MarkSupportingStreamInputWrapper extends StreamInput { + // can't use FilterStreamInput it needs to reset the delegate + private final BytesReference reference; + private BytesReferenceStreamInput input; + private int mark = 0; + + private MarkSupportingStreamInputWrapper(BytesReference reference) throws IOException { + this.reference = reference; + this.input = new BytesReferenceStreamInput(reference.iterator(), reference.length()); + } + + @Override + public byte readByte() throws IOException { + return input.readByte(); + } + + @Override + public void readBytes(byte[] b, int offset, int len) throws IOException { + input.readBytes(b, offset, len); + } + + @Override + public int read(byte[] b, int off, int len) throws IOException { + return input.read(b, off, len); + } + + @Override + public void close() throws IOException { + input.close(); + } + + @Override + public int read() throws IOException { + return input.read(); + } + + @Override + public int available() throws IOException { + return input.available(); + } + + @Override + protected void ensureCanReadBytes(int length) throws EOFException { + input.ensureCanReadBytes(length); + } + + @Override + public void reset() throws IOException { + input = new BytesReferenceStreamInput(reference.iterator(), reference.length()); + input.skip(mark); + } + + @Override + public boolean markSupported() { + return true; + } + + @Override + public void mark(int readLimit) { + // readLimit is optional it only guarantees that the stream remembers data upto this limit but it can remember more + // which we do in our case + this.mark = input.getOffset(); + } + + @Override + public long skip(long n) throws IOException { + return input.skip(n); + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + BytesRef bytes = toBytesRef(); + return builder.value(bytes.bytes, bytes.offset, bytes.length); + } +} diff --git a/server/src/main/java/org/elasticsearch/common/bytes/ByteBufferReference.java b/server/src/main/java/org/elasticsearch/common/bytes/ByteBufferReference.java index d696f060802f3..07353ea67eced 100644 --- a/server/src/main/java/org/elasticsearch/common/bytes/ByteBufferReference.java +++ b/server/src/main/java/org/elasticsearch/common/bytes/ByteBufferReference.java @@ -31,7 +31,7 @@ * changed, those changes will not be reflected in this reference. Any changes to the underlying data in the * byte buffer will be reflected in this reference. */ -public class ByteBufferReference extends BytesReference { +public class ByteBufferReference extends AbstractBytesReference { private final ByteBuffer buffer; private final int length; diff --git a/server/src/main/java/org/elasticsearch/common/bytes/BytesArray.java b/server/src/main/java/org/elasticsearch/common/bytes/BytesArray.java index 9761ad0c42c67..b54617dec1248 100644 --- a/server/src/main/java/org/elasticsearch/common/bytes/BytesArray.java +++ b/server/src/main/java/org/elasticsearch/common/bytes/BytesArray.java @@ -23,7 +23,7 @@ import java.util.Objects; -public final class BytesArray extends BytesReference { +public final class BytesArray extends AbstractBytesReference { public static final BytesArray EMPTY = new BytesArray(BytesRef.EMPTY_BYTES, 0, 0); private final byte[] bytes; diff --git a/server/src/main/java/org/elasticsearch/common/bytes/BytesReference.java b/server/src/main/java/org/elasticsearch/common/bytes/BytesReference.java index 2c4867cbdfed9..8e4ecd2d3d3ca 100644 --- a/server/src/main/java/org/elasticsearch/common/bytes/BytesReference.java +++ b/server/src/main/java/org/elasticsearch/common/bytes/BytesReference.java @@ -16,6 +16,7 @@ * specific language governing permissions and limitations * under the License. */ + package org.elasticsearch.common.bytes; import org.apache.lucene.util.BytesRef; @@ -26,26 +27,22 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.ByteArrayOutputStream; -import java.io.EOFException; import java.io.IOException; -import java.io.InputStream; import java.io.OutputStream; import java.nio.ByteBuffer; import java.util.ArrayList; -import java.util.function.ToIntBiFunction; + /** * A reference to bytes. */ -public abstract class BytesReference implements Comparable, ToXContentFragment { - - private Integer hash = null; // we cache the hash of this reference since it can be quite costly to re-calculated it +public interface BytesReference extends Comparable, ToXContentFragment { /** * Convert an {@link XContentBuilder} into a BytesReference. This method closes the builder, * so no further fields may be added. */ - public static BytesReference bytes(XContentBuilder xContentBuilder) { + static BytesReference bytes(XContentBuilder xContentBuilder) { xContentBuilder.close(); OutputStream stream = xContentBuilder.getOutputStream(); if (stream instanceof ByteArrayOutputStream) { @@ -55,139 +52,11 @@ public static BytesReference bytes(XContentBuilder xContentBuilder) { } } - /** - * Returns the byte at the specified index. Need to be between 0 and length. - */ - public abstract byte get(int index); - - /** - * Returns the integer read from the 4 bytes (BE) starting at the given index. - */ - public int getInt(int index) { - return (get(index) & 0xFF) << 24 | (get(index + 1) & 0xFF) << 16 | (get(index + 2) & 0xFF) << 8 | get(index + 3) & 0xFF; - } - - /** - * Finds the index of the first occurrence of the given marker between within the given bounds. - * @param marker marker byte to search - * @param from lower bound for the index to check (inclusive) - * @return first index of the marker or {@code -1} if not found - */ - public int indexOf(byte marker, int from) { - final int to = length(); - for (int i = from; i < to; i++) { - if (get(i) == marker) { - return i; - } - } - return -1; - } - - /** - * The length. - */ - public abstract int length(); - - /** - * Slice the bytes from the {@code from} index up to {@code length}. - */ - public abstract BytesReference slice(int from, int length); - - /** - * The amount of memory used by this BytesReference - */ - public abstract long ramBytesUsed(); - - /** - * A stream input of the bytes. - */ - public StreamInput streamInput() throws IOException { - return new MarkSupportingStreamInputWrapper(this); - } - - /** - * Writes the bytes directly to the output stream. - */ - public void writeTo(OutputStream os) throws IOException { - final BytesRefIterator iterator = iterator(); - BytesRef ref; - while ((ref = iterator.next()) != null) { - os.write(ref.bytes, ref.offset, ref.length); - } - } - - /** - * Interprets the referenced bytes as UTF8 bytes, returning the resulting string - */ - public String utf8ToString() { - return toBytesRef().utf8ToString(); - } - - /** - * Converts to Lucene BytesRef. - */ - public abstract BytesRef toBytesRef(); - - /** - * Returns a BytesRefIterator for this BytesReference. This method allows - * access to the internal pages of this reference without copying them. Use with care! - * @see BytesRefIterator - */ - public BytesRefIterator iterator() { - return new BytesRefIterator() { - BytesRef ref = length() == 0 ? null : toBytesRef(); - @Override - public BytesRef next() throws IOException { - BytesRef r = ref; - ref = null; // only return it once... - return r; - } - }; - } - - @Override - public boolean equals(Object other) { - if (this == other) { - return true; - } - if (other instanceof BytesReference) { - final BytesReference otherRef = (BytesReference) other; - if (length() != otherRef.length()) { - return false; - } - return compareIterators(this, otherRef, (a, b) -> - a.bytesEquals(b) ? 0 : 1 // this is a call to BytesRef#bytesEquals - this method is the hot one in the comparison - ) == 0; - } - return false; - } - - @Override - public int hashCode() { - if (hash == null) { - final BytesRefIterator iterator = iterator(); - BytesRef ref; - int result = 1; - try { - while ((ref = iterator.next()) != null) { - for (int i = 0; i < ref.length; i++) { - result = 31 * result + ref.bytes[ref.offset + i]; - } - } - } catch (IOException ex) { - throw new AssertionError("wont happen", ex); - } - return hash = result; - } else { - return hash.intValue(); - } - } - /** * Returns a compact array from the given BytesReference. The returned array won't be copied unless necessary. If you need * to modify the returned array use {@code BytesRef.deepCopyOf(reference.toBytesRef()} instead */ - public static byte[] toBytes(BytesReference reference) { + static byte[] toBytes(BytesReference reference) { final BytesRef bytesRef = reference.toBytesRef(); if (bytesRef.offset == 0 && bytesRef.length == bytesRef.bytes.length) { return bytesRef.bytes; @@ -198,7 +67,7 @@ public static byte[] toBytes(BytesReference reference) { /** * Returns an array of byte buffers from the given BytesReference. */ - public static ByteBuffer[] toByteBuffers(BytesReference reference) { + static ByteBuffer[] toByteBuffers(BytesReference reference) { BytesRefIterator byteRefIterator = reference.iterator(); BytesRef r; try { @@ -217,7 +86,7 @@ public static ByteBuffer[] toByteBuffers(BytesReference reference) { /** * Returns BytesReference composed of the provided ByteBuffers. */ - public static BytesReference fromByteBuffers(ByteBuffer[] buffers) { + static BytesReference fromByteBuffers(ByteBuffer[] buffers) { int bufferCount = buffers.length; if (bufferCount == 0) { return BytesArray.EMPTY; @@ -233,146 +102,63 @@ public static BytesReference fromByteBuffers(ByteBuffer[] buffers) { } } - @Override - public int compareTo(final BytesReference other) { - return compareIterators(this, other, BytesRef::compareTo); - } - /** - * Compares the two references using the given int function. + * Returns the byte at the specified index. Need to be between 0 and length. */ - private static int compareIterators(final BytesReference a, final BytesReference b, final ToIntBiFunction f) { - try { - // we use the iterators since it's a 0-copy comparison where possible! - final long lengthToCompare = Math.min(a.length(), b.length()); - final BytesRefIterator aIter = a.iterator(); - final BytesRefIterator bIter = b.iterator(); - BytesRef aRef = aIter.next(); - BytesRef bRef = bIter.next(); - if (aRef != null && bRef != null) { // do we have any data? - aRef = aRef.clone(); // we clone since we modify the offsets and length in the iteration below - bRef = bRef.clone(); - if (aRef.length == a.length() && bRef.length == b.length()) { // is it only one array slice we are comparing? - return f.applyAsInt(aRef, bRef); - } else { - for (int i = 0; i < lengthToCompare;) { - if (aRef.length == 0) { - aRef = aIter.next().clone(); // must be non null otherwise we have a bug - } - if (bRef.length == 0) { - bRef = bIter.next().clone(); // must be non null otherwise we have a bug - } - final int aLength = aRef.length; - final int bLength = bRef.length; - final int length = Math.min(aLength, bLength); // shrink to the same length and use the fast compare in lucene - aRef.length = bRef.length = length; - // now we move to the fast comparison - this is the hot part of the loop - int diff = f.applyAsInt(aRef, bRef); - aRef.length = aLength; - bRef.length = bLength; - - if (diff != 0) { - return diff; - } - advance(aRef, length); - advance(bRef, length); - i += length; - } - } - } - // One is a prefix of the other, or, they are equal: - return a.length() - b.length(); - } catch (IOException ex) { - throw new AssertionError("can not happen", ex); - } - } - - private static void advance(final BytesRef ref, final int length) { - assert ref.length >= length : " ref.length: " + ref.length + " length: " + length; - assert ref.offset+length < ref.bytes.length || (ref.offset+length == ref.bytes.length && ref.length-length == 0) - : "offset: " + ref.offset + " ref.bytes.length: " + ref.bytes.length + " length: " + length + " ref.length: " + ref.length; - ref.length -= length; - ref.offset += length; - } + byte get(int index); /** - * Instead of adding the complexity of {@link InputStream#reset()} etc to the actual impl - * this wrapper builds it on top of the BytesReferenceStreamInput which is much simpler - * that way. + * Returns the integer read from the 4 bytes (BE) starting at the given index. */ - private static final class MarkSupportingStreamInputWrapper extends StreamInput { - // can't use FilterStreamInput it needs to reset the delegate - private final BytesReference reference; - private BytesReferenceStreamInput input; - private int mark = 0; + int getInt(int index); - private MarkSupportingStreamInputWrapper(BytesReference reference) throws IOException { - this.reference = reference; - this.input = new BytesReferenceStreamInput(reference.iterator(), reference.length()); - } - - @Override - public byte readByte() throws IOException { - return input.readByte(); - } - - @Override - public void readBytes(byte[] b, int offset, int len) throws IOException { - input.readBytes(b, offset, len); - } - - @Override - public int read(byte[] b, int off, int len) throws IOException { - return input.read(b, off, len); - } - - @Override - public void close() throws IOException { - input.close(); - } + /** + * Finds the index of the first occurrence of the given marker between within the given bounds. + * @param marker marker byte to search + * @param from lower bound for the index to check (inclusive) + * @return first index of the marker or {@code -1} if not found + */ + int indexOf(byte marker, int from); - @Override - public int read() throws IOException { - return input.read(); - } + /** + * The length. + */ + int length(); - @Override - public int available() throws IOException { - return input.available(); - } + /** + * Slice the bytes from the {@code from} index up to {@code length}. + */ + BytesReference slice(int from, int length); - @Override - protected void ensureCanReadBytes(int length) throws EOFException { - input.ensureCanReadBytes(length); - } + /** + * The amount of memory used by this BytesReference + */ + long ramBytesUsed(); - @Override - public void reset() throws IOException { - input = new BytesReferenceStreamInput(reference.iterator(), reference.length()); - input.skip(mark); - } + /** + * A stream input of the bytes. + */ + StreamInput streamInput() throws IOException; - @Override - public boolean markSupported() { - return true; - } + /** + * Writes the bytes directly to the output stream. + */ + void writeTo(OutputStream os) throws IOException; - @Override - public void mark(int readLimit) { - // readLimit is optional it only guarantees that the stream remembers data upto this limit but it can remember more - // which we do in our case - this.mark = input.getOffset(); - } + /** + * Interprets the referenced bytes as UTF8 bytes, returning the resulting string + */ + String utf8ToString(); - @Override - public long skip(long n) throws IOException { - return input.skip(n); - } - } + /** + * Converts to Lucene BytesRef. + */ + BytesRef toBytesRef(); - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - BytesRef bytes = toBytesRef(); - return builder.value(bytes.bytes, bytes.offset, bytes.length); - } + /** + * Returns a BytesRefIterator for this BytesReference. This method allows + * access to the internal pages of this reference without copying them. Use with care! + * @see BytesRefIterator + */ + BytesRefIterator iterator(); } diff --git a/server/src/main/java/org/elasticsearch/common/bytes/CompositeBytesReference.java b/server/src/main/java/org/elasticsearch/common/bytes/CompositeBytesReference.java index 4845102b89bcd..84a2b06eb801f 100644 --- a/server/src/main/java/org/elasticsearch/common/bytes/CompositeBytesReference.java +++ b/server/src/main/java/org/elasticsearch/common/bytes/CompositeBytesReference.java @@ -34,7 +34,7 @@ * * Note, {@link #toBytesRef()} will materialize all pages in this BytesReference. */ -public final class CompositeBytesReference extends BytesReference { +public final class CompositeBytesReference extends AbstractBytesReference { private final BytesReference[] references; private final int[] offsets; diff --git a/server/src/main/java/org/elasticsearch/common/bytes/PagedBytesReference.java b/server/src/main/java/org/elasticsearch/common/bytes/PagedBytesReference.java index 9f2619cd1aa72..cfb13608ac63e 100644 --- a/server/src/main/java/org/elasticsearch/common/bytes/PagedBytesReference.java +++ b/server/src/main/java/org/elasticsearch/common/bytes/PagedBytesReference.java @@ -31,7 +31,7 @@ * A page based bytes reference, internally holding the bytes in a paged * data structure. */ -public class PagedBytesReference extends BytesReference { +public class PagedBytesReference extends AbstractBytesReference { private static final int PAGE_SIZE = PageCacheRecycler.BYTE_PAGE_SIZE; diff --git a/server/src/main/java/org/elasticsearch/common/bytes/ReleasableBytesReference.java b/server/src/main/java/org/elasticsearch/common/bytes/ReleasableBytesReference.java new file mode 100644 index 0000000000000..ad23f8156bf58 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/bytes/ReleasableBytesReference.java @@ -0,0 +1,130 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.bytes; + +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.BytesRefIterator; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.lease.Releasable; +import org.elasticsearch.common.lease.Releasables; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.io.OutputStream; + +/** + * An extension to {@link BytesReference} that requires releasing its content. This + * class exists to make it explicit when a bytes reference needs to be released, and when not. + */ +public final class ReleasableBytesReference implements Releasable, BytesReference { + + private final BytesReference delegate; + private final Releasable releasable; + + public ReleasableBytesReference(BytesReference delegate, Releasable releasable) { + this.delegate = delegate; + this.releasable = releasable; + } + + @Override + public void close() { + Releasables.close(releasable); + } + + @Override + public byte get(int index) { + return delegate.get(index); + } + + @Override + public int getInt(int index) { + return delegate.getInt(index); + } + + @Override + public int indexOf(byte marker, int from) { + return delegate.indexOf(marker, from); + } + + @Override + public int length() { + return delegate.length(); + } + + @Override + public BytesReference slice(int from, int length) { + return delegate.slice(from, length); + } + + @Override + public long ramBytesUsed() { + return delegate.ramBytesUsed(); + } + + @Override + public StreamInput streamInput() throws IOException { + return delegate.streamInput(); + } + + @Override + public void writeTo(OutputStream os) throws IOException { + delegate.writeTo(os); + } + + @Override + public String utf8ToString() { + return delegate.utf8ToString(); + } + + @Override + public BytesRef toBytesRef() { + return delegate.toBytesRef(); + } + + @Override + public BytesRefIterator iterator() { + return delegate.iterator(); + } + + @Override + public int compareTo(BytesReference o) { + return delegate.compareTo(o); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return delegate.toXContent(builder, params); + } + + @Override + public boolean isFragment() { + return delegate.isFragment(); + } + + @Override + public boolean equals(Object obj) { + return delegate.equals(obj); + } + + @Override + public int hashCode() { + return delegate.hashCode(); + } +} diff --git a/server/src/main/java/org/elasticsearch/common/bytes/ReleasablePagedBytesReference.java b/server/src/main/java/org/elasticsearch/common/bytes/ReleasablePagedBytesReference.java deleted file mode 100644 index 209a6edc5696a..0000000000000 --- a/server/src/main/java/org/elasticsearch/common/bytes/ReleasablePagedBytesReference.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.bytes; - -import org.elasticsearch.common.lease.Releasable; -import org.elasticsearch.common.lease.Releasables; -import org.elasticsearch.common.util.ByteArray; - -/** - * An extension to {@link PagedBytesReference} that requires releasing its content. This - * class exists to make it explicit when a bytes reference needs to be released, and when not. - */ -public final class ReleasablePagedBytesReference extends PagedBytesReference implements Releasable { - - private final Releasable releasable; - - public ReleasablePagedBytesReference(ByteArray byteArray, int length, Releasable releasable) { - super(byteArray, length); - this.releasable = releasable; - } - - @Override - public void close() { - Releasables.close(releasable); - } - -} diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/ReleasableBytesStreamOutput.java b/server/src/main/java/org/elasticsearch/common/io/stream/ReleasableBytesStreamOutput.java index 725ecd1c3cc4f..64a91e9cdd891 100644 --- a/server/src/main/java/org/elasticsearch/common/io/stream/ReleasableBytesStreamOutput.java +++ b/server/src/main/java/org/elasticsearch/common/io/stream/ReleasableBytesStreamOutput.java @@ -19,7 +19,8 @@ package org.elasticsearch.common.io.stream; -import org.elasticsearch.common.bytes.ReleasablePagedBytesReference; +import org.elasticsearch.common.bytes.PagedBytesReference; +import org.elasticsearch.common.bytes.ReleasableBytesReference; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.util.BigArrays; @@ -31,7 +32,7 @@ * expecting it to require releasing its content ({@link #bytes()}) once done. *

* Please note, closing this stream will release the bytes that are in use by any - * {@link ReleasablePagedBytesReference} returned from {@link #bytes()}, so this + * {@link ReleasableBytesReference} returned from {@link #bytes()}, so this * stream should only be closed after the bytes have been output or copied * elsewhere. */ @@ -55,8 +56,8 @@ public ReleasableBytesStreamOutput(int expectedSize, BigArrays bigArrays) { * the bytes in the stream. */ @Override - public ReleasablePagedBytesReference bytes() { - return new ReleasablePagedBytesReference(bytes, count, releasable); + public ReleasableBytesReference bytes() { + return new ReleasableBytesReference(new PagedBytesReference(bytes, count), releasable); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/translog/Translog.java b/server/src/main/java/org/elasticsearch/index/translog/Translog.java index e4d977c1ebb21..e38880797785b 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/Translog.java +++ b/server/src/main/java/org/elasticsearch/index/translog/Translog.java @@ -26,7 +26,7 @@ import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.bytes.ReleasablePagedBytesReference; +import org.elasticsearch.common.bytes.ReleasableBytesReference; import org.elasticsearch.common.io.stream.ReleasableBytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -535,7 +535,7 @@ public Location add(final Operation operation) throws IOException { out.seek(start); out.writeInt(operationSize); out.seek(end); - final ReleasablePagedBytesReference bytes = out.bytes(); + final ReleasableBytesReference bytes = out.bytes(); try (ReleasableLock ignored = readLock.acquire()) { ensureOpen(); if (operation.primaryTerm() > current.getPrimaryTerm()) { @@ -1593,7 +1593,7 @@ public static void writeOperations(StreamOutput outStream, List toWri out.seek(start); out.writeInt(operationSize); out.seek(end); - ReleasablePagedBytesReference bytes = out.bytes(); + ReleasableBytesReference bytes = out.bytes(); bytes.writeTo(outStream); } } finally { diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/MultiFileTransfer.java b/server/src/main/java/org/elasticsearch/indices/recovery/MultiFileTransfer.java index 09366a38a9957..fedf601961476 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/MultiFileTransfer.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/MultiFileTransfer.java @@ -57,7 +57,7 @@ * one of the networking threads which receive/handle the responses of the current pending file chunk requests. This process will continue * until all chunk requests are sent/responded. */ -abstract class MultiFileTransfer implements Closeable { +public abstract class MultiFileTransfer implements Closeable { private Status status = Status.PROCESSING; private final Logger logger; private final ActionListener listener; @@ -121,7 +121,7 @@ private void handleItems(List>> return; } final long requestSeqId = requestSeqIdTracker.generateSeqNo(); - sendChunkRequest(request.v2(), ActionListener.wrap( + executeChunkRequest(request.v2(), ActionListener.wrap( r -> addItem(requestSeqId, request.v1(), null), e -> addItem(requestSeqId, request.v1(), e))); } @@ -179,7 +179,7 @@ private Tuple getNextRequest() throws Exception { protected abstract Request nextChunkRequest(StoreFileMetaData md) throws IOException; - protected abstract void sendChunkRequest(Request request, ActionListener listener); + protected abstract void executeChunkRequest(Request request, ActionListener listener); protected abstract void handleError(StoreFileMetaData md, Exception e) throws Exception; @@ -195,7 +195,7 @@ private static class FileChunkResponseItem { } } - protected interface ChunkRequest { + public interface ChunkRequest { /** * @return {@code true} if this chunk request is the last chunk of the current file */ diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/MultiFileWriter.java b/server/src/main/java/org/elasticsearch/indices/recovery/MultiFileWriter.java index 87a6d18671a6f..b6a5d2c908842 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/MultiFileWriter.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/MultiFileWriter.java @@ -27,9 +27,11 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.lease.Releasable; +import org.elasticsearch.common.util.concurrent.AbstractRefCounted; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.index.store.Store; import org.elasticsearch.index.store.StoreFileMetaData; +import org.elasticsearch.transport.Transports; import java.io.IOException; import java.util.Arrays; @@ -39,10 +41,12 @@ import java.util.Map; import java.util.PriorityQueue; import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.atomic.AtomicBoolean; -public class MultiFileWriter implements Releasable { +public class MultiFileWriter extends AbstractRefCounted implements Releasable { public MultiFileWriter(Store store, RecoveryState.Index indexState, String tempFilePrefix, Logger logger, Runnable ensureOpen) { + super("multi_file_writer"); this.store = store; this.indexState = indexState; this.tempFilePrefix = tempFilePrefix; @@ -51,6 +55,7 @@ public MultiFileWriter(Store store, RecoveryState.Index indexState, String tempF } private final Runnable ensureOpen; + private final AtomicBoolean closed = new AtomicBoolean(false); private final Logger logger; private final Store store; private final RecoveryState.Index indexState; @@ -64,6 +69,7 @@ public MultiFileWriter(Store store, RecoveryState.Index indexState, String tempF public void writeFileChunk(StoreFileMetaData fileMetaData, long position, BytesReference content, boolean lastChunk) throws IOException { + assert Transports.assertNotTransportThread("multi_file_writer"); final FileChunkWriter writer = fileChunkWriters.computeIfAbsent(fileMetaData.name(), name -> new FileChunkWriter()); writer.writeChunk(new FileChunk(fileMetaData, content, position, lastChunk)); } @@ -138,6 +144,13 @@ private void innerWriteFileChunk(StoreFileMetaData fileMetaData, long position, @Override public void close() { + if (closed.compareAndSet(false, true)) { + decRef(); + } + } + + @Override + protected void closeInternal() { fileChunkWriters.clear(); // clean open index outputs Iterator> iterator = openIndexOutputs.entrySet().iterator(); diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java index c14fbca308910..1d45d048c9ba4 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java @@ -890,7 +890,7 @@ protected FileChunk nextChunkRequest(StoreFileMetaData md) throws IOException { } @Override - protected void sendChunkRequest(FileChunk request, ActionListener listener) { + protected void executeChunkRequest(FileChunk request, ActionListener listener) { cancellableThreads.checkForCancel(); recoveryTarget.writeFileChunk( request.md, request.position, request.content, request.lastChunk, translogOps.getAsInt(), listener); diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index 82af167a04f6d..2ceca4da5f4e3 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -108,8 +108,10 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.BlockingQueue; import java.util.concurrent.Executor; -import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -957,11 +959,9 @@ public void snapshotShard(Store store, MapperService mapperService, SnapshotId s IndexCommit snapshotIndexCommit, IndexShardSnapshotStatus snapshotStatus, ActionListener listener) { final ShardId shardId = store.shardId(); final long startTime = threadPool.absoluteTimeInMillis(); - final StepListener snapshotDoneListener = new StepListener<>(); - snapshotDoneListener.whenComplete(listener::onResponse, e -> { + final ActionListener snapshotDoneListener = ActionListener.wrap(listener::onResponse, e -> { snapshotStatus.moveToFailed(threadPool.absoluteTimeInMillis(), ExceptionsHelper.stackTrace(e)); - listener.onFailure(e instanceof IndexShardSnapshotFailedException ? (IndexShardSnapshotFailedException) e - : new IndexShardSnapshotFailedException(store.shardId(), e)); + listener.onFailure(e instanceof IndexShardSnapshotFailedException ? e : new IndexShardSnapshotFailedException(shardId, e)); }); try { logger.debug("[{}] [{}] snapshot to [{}] ...", shardId, snapshotId, metadata.name()); @@ -984,7 +984,7 @@ public void snapshotShard(Store store, MapperService mapperService, SnapshotId s } final List indexCommitPointFiles = new ArrayList<>(); - ArrayList filesToSnapshot = new ArrayList<>(); + final BlockingQueue filesToSnapshot = new LinkedBlockingQueue<>(); store.incRef(); final Collection fileNames; final Store.MetadataSnapshot metadataFromStore; @@ -1103,42 +1103,29 @@ public void snapshotShard(Store store, MapperService mapperService, SnapshotId s allFilesUploadedListener.onResponse(Collections.emptyList()); return; } - final GroupedActionListener filesListener = - new GroupedActionListener<>(allFilesUploadedListener, indexIncrementalFileCount); final Executor executor = threadPool.executor(ThreadPool.Names.SNAPSHOT); - // Flag to signal that the snapshot has been aborted/failed so we can stop any further blob uploads from starting - final AtomicBoolean alreadyFailed = new AtomicBoolean(); - for (BlobStoreIndexShardSnapshot.FileInfo snapshotFileInfo : filesToSnapshot) { - executor.execute(new ActionRunnable<>(filesListener) { - @Override - protected void doRun() { + // Start as many workers as fit into the snapshot pool at once at the most + final int workers = Math.min(threadPool.info(ThreadPool.Names.SNAPSHOT).getMax(), indexIncrementalFileCount); + final ActionListener filesListener = ActionListener.delegateResponse( + new GroupedActionListener<>(allFilesUploadedListener, workers), (l, e) -> { + filesToSnapshot.clear(); // Stop uploading the remaining files if we run into any exception + l.onFailure(e); + }); + for (int i = 0; i < workers; ++i) { + executor.execute(ActionRunnable.run(filesListener, () -> { + BlobStoreIndexShardSnapshot.FileInfo snapshotFileInfo = filesToSnapshot.poll(0L, TimeUnit.MILLISECONDS); + if (snapshotFileInfo != null) { + store.incRef(); try { - if (alreadyFailed.get() == false) { - if (store.tryIncRef()) { - try { - snapshotFile(snapshotFileInfo, indexId, shardId, snapshotId, snapshotStatus, store); - } finally { - store.decRef(); - } - } else if (snapshotStatus.isAborted()) { - throw new IndexShardSnapshotFailedException(shardId, "Aborted"); - } else { - assert false : "Store was closed before aborting the snapshot"; - throw new IllegalStateException("Store is closed already"); - } - } - filesListener.onResponse(null); - } catch (IOException e) { - throw new IndexShardSnapshotFailedException(shardId, "Failed to perform snapshot (index files)", e); + do { + snapshotFile(snapshotFileInfo, indexId, shardId, snapshotId, snapshotStatus, store); + snapshotFileInfo = filesToSnapshot.poll(0L, TimeUnit.MILLISECONDS); + } while (snapshotFileInfo != null); + } finally { + store.decRef(); } } - - @Override - public void onFailure(Exception e) { - alreadyFailed.set(true); - super.onFailure(e); - } - }); + })); } } catch (Exception e) { snapshotDoneListener.onFailure(e); diff --git a/server/src/test/java/org/elasticsearch/common/bytes/PagedBytesReferenceTests.java b/server/src/test/java/org/elasticsearch/common/bytes/PagedBytesReferenceTests.java index 40e02c560a9c0..64f960040aab7 100644 --- a/server/src/test/java/org/elasticsearch/common/bytes/PagedBytesReferenceTests.java +++ b/server/src/test/java/org/elasticsearch/common/bytes/PagedBytesReferenceTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.common.bytes; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.common.io.stream.ReleasableBytesStreamOutput; import org.elasticsearch.common.util.ByteArray; import org.hamcrest.Matchers; @@ -35,13 +34,12 @@ protected BytesReference newBytesReference(int length) throws IOException { @Override protected BytesReference newBytesReferenceWithOffsetOfZero(int length) throws IOException { - // we know bytes stream output always creates a paged bytes reference, we use it to create randomized content - ReleasableBytesStreamOutput out = new ReleasableBytesStreamOutput(length, bigarrays); + ByteArray byteArray = bigarrays.newByteArray(length); for (int i = 0; i < length; i++) { - out.writeByte((byte) random().nextInt(1 << 8)); + byteArray.set(i, (byte) random().nextInt(1 << 8)); } - assertThat(out.size(), Matchers.equalTo(length)); - BytesReference ref = out.bytes(); + assertThat(byteArray.size(), Matchers.equalTo((long) length)); + BytesReference ref = new PagedBytesReference(byteArray, length); assertThat(ref.length(), Matchers.equalTo(length)); assertThat(ref, Matchers.instanceOf(PagedBytesReference.class)); return ref; diff --git a/server/src/test/java/org/elasticsearch/common/bytes/ReleasableBytesReferenceTests.java b/server/src/test/java/org/elasticsearch/common/bytes/ReleasableBytesReferenceTests.java new file mode 100644 index 0000000000000..58818a83cca9a --- /dev/null +++ b/server/src/test/java/org/elasticsearch/common/bytes/ReleasableBytesReferenceTests.java @@ -0,0 +1,102 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.bytes; + +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.ReleasableBytesStreamOutput; +import org.elasticsearch.common.util.ByteArray; +import org.hamcrest.Matchers; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import static org.hamcrest.CoreMatchers.equalTo; + +public class ReleasableBytesReferenceTests extends AbstractBytesReferenceTestCase { + + @Override + protected BytesReference newBytesReference(int length) throws IOException { + return newBytesReferenceWithOffsetOfZero(length); + } + + @Override + protected BytesReference newBytesReferenceWithOffsetOfZero(int length) throws IOException { + BytesReference delegate; + String composite = "composite"; + String paged = "paged"; + String array = "array"; + String type = randomFrom(composite, paged, array); + if (array.equals(type)) { + final BytesStreamOutput out = new BytesStreamOutput(length); + for (int i = 0; i < length; i++) { + out.writeByte((byte) random().nextInt(1 << 8)); + } + assertThat(length, equalTo(out.size())); + BytesArray ref = new BytesArray(out.bytes().toBytesRef().bytes, 0, length); + assertThat(length, equalTo(ref.length())); + assertThat(ref.length(), Matchers.equalTo(length)); + delegate = ref; + } else if (paged.equals(type)) { + ByteArray byteArray = bigarrays.newByteArray(length); + for (int i = 0; i < length; i++) { + byteArray.set(i, (byte) random().nextInt(1 << 8)); + } + assertThat(byteArray.size(), Matchers.equalTo((long) length)); + BytesReference ref = new PagedBytesReference(byteArray, length); + assertThat(ref.length(), Matchers.equalTo(length)); + delegate = ref; + } else { + assert composite.equals(type); + List referenceList = new ArrayList<>(); + for (int i = 0; i < length; ) { + int remaining = length - i; + int sliceLength = randomIntBetween(1, remaining); + ReleasableBytesStreamOutput out = new ReleasableBytesStreamOutput(sliceLength, bigarrays); + for (int j = 0; j < sliceLength; j++) { + out.writeByte((byte) random().nextInt(1 << 8)); + } + assertThat(sliceLength, equalTo(out.size())); + referenceList.add(out.bytes()); + i += sliceLength; + } + BytesReference ref = new CompositeBytesReference(referenceList.toArray(new BytesReference[0])); + assertThat(length, equalTo(ref.length())); + delegate = ref; + } + return new ReleasableBytesReference(delegate, () -> { + }); + } + + @Override + public void testToBytesRefSharedPage() throws IOException { + // CompositeBytesReference doesn't share pages + } + + @Override + public void testSliceArrayOffset() throws IOException { + // the assertions in this test only work on no-composite buffers + } + + @Override + public void testSliceToBytesRef() throws IOException { + // CompositeBytesReference shifts offsets + } +} diff --git a/server/src/test/java/org/elasticsearch/http/DefaultRestChannelTests.java b/server/src/test/java/org/elasticsearch/http/DefaultRestChannelTests.java index 3bfc649820fee..85670e893b970 100644 --- a/server/src/test/java/org/elasticsearch/http/DefaultRestChannelTests.java +++ b/server/src/test/java/org/elasticsearch/http/DefaultRestChannelTests.java @@ -22,7 +22,8 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.bytes.ReleasablePagedBytesReference; +import org.elasticsearch.common.bytes.PagedBytesReference; +import org.elasticsearch.common.bytes.ReleasableBytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.ReleasableBytesStreamOutput; import org.elasticsearch.common.lease.Releasable; @@ -331,7 +332,7 @@ public RestRequest.Method method() { // ESTestCase#after will invoke ensureAllArraysAreReleased which will fail if the response content was not released final BigArrays bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()); final ByteArray byteArray = bigArrays.newByteArray(0, false); - final BytesReference content = new ReleasablePagedBytesReference(byteArray, 0 , byteArray); + final BytesReference content = new ReleasableBytesReference(new PagedBytesReference(byteArray, 0) , byteArray); channel.sendResponse(new TestRestResponse(RestStatus.METHOD_NOT_ALLOWED, content)); Class> listenerClass = (Class>) (Class) ActionListener.class; @@ -368,7 +369,7 @@ public HttpResponse createResponse(RestStatus status, BytesReference content) { // ESTestCase#after will invoke ensureAllArraysAreReleased which will fail if the response content was not released final BigArrays bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()); final ByteArray byteArray = bigArrays.newByteArray(0, false); - final BytesReference content = new ReleasablePagedBytesReference(byteArray, 0 , byteArray); + final BytesReference content = new ReleasableBytesReference(new PagedBytesReference(byteArray, 0) , byteArray); expectThrows(IllegalArgumentException.class, () -> channel.sendResponse(new TestRestResponse(RestStatus.OK, content))); diff --git a/server/src/test/java/org/elasticsearch/index/IndexingSlowLogTests.java b/server/src/test/java/org/elasticsearch/index/IndexingSlowLogTests.java index 9141481b88219..dbff49460015b 100644 --- a/server/src/test/java/org/elasticsearch/index/IndexingSlowLogTests.java +++ b/server/src/test/java/org/elasticsearch/index/IndexingSlowLogTests.java @@ -104,12 +104,12 @@ public void testSlowLogParsedDocumentPrinterSourceToLog() throws IOException { () -> IndexingSlowLogMessage.of(index, doc, 10, true, 3)); assertThat(e, hasToString(containsString("_failed_to_convert_[Unrecognized token 'invalid':" + " was expecting ('true', 'false' or 'null')\\n" - + " at [Source: org.elasticsearch.common.bytes.BytesReference$MarkSupportingStreamInputWrapper"))); + + " at [Source: org.elasticsearch.common.bytes.AbstractBytesReference$MarkSupportingStreamInputWrapper"))); assertNotNull(e.getCause()); assertThat(e.getCause(), instanceOf(JsonParseException.class)); assertThat(e.getCause(), hasToString(containsString("Unrecognized token 'invalid':" + " was expecting ('true', 'false' or 'null')\n" - + " at [Source: org.elasticsearch.common.bytes.BytesReference$MarkSupportingStreamInputWrapper"))); + + " at [Source: org.elasticsearch.common.bytes.AbstractBytesReference$MarkSupportingStreamInputWrapper"))); } public void testReformatSetting() { diff --git a/server/src/test/java/org/elasticsearch/indices/IndicesRequestCacheTests.java b/server/src/test/java/org/elasticsearch/indices/IndicesRequestCacheTests.java index e27aefdf13fff..6575503341c4e 100644 --- a/server/src/test/java/org/elasticsearch/indices/IndicesRequestCacheTests.java +++ b/server/src/test/java/org/elasticsearch/indices/IndicesRequestCacheTests.java @@ -32,6 +32,7 @@ import org.apache.lucene.search.TopDocs; import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.bytes.AbstractBytesReference; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; @@ -455,7 +456,7 @@ public void testEqualsKey() throws IOException { assertNotEquals(key1, key5); } - private class TestBytesReference extends BytesReference { + private class TestBytesReference extends AbstractBytesReference { int dummyValue; TestBytesReference(int dummyValue) { diff --git a/server/src/test/java/org/elasticsearch/snapshots/mockstore/MockEventuallyConsistentRepositoryTests.java b/server/src/test/java/org/elasticsearch/snapshots/mockstore/MockEventuallyConsistentRepositoryTests.java index 9d94fc3ed4368..ee766ef7360b5 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/mockstore/MockEventuallyConsistentRepositoryTests.java +++ b/server/src/test/java/org/elasticsearch/snapshots/mockstore/MockEventuallyConsistentRepositoryTests.java @@ -138,6 +138,8 @@ public void testOverwriteSnapshotInfoBlob() { MockEventuallyConsistentRepository.Context blobStoreContext = new MockEventuallyConsistentRepository.Context(); final ThreadPool threadPool = mock(ThreadPool.class); when(threadPool.executor(ThreadPool.Names.SNAPSHOT)).thenReturn(new SameThreadExecutorService()); + when(threadPool.info(ThreadPool.Names.SNAPSHOT)).thenReturn( + new ThreadPool.Info(ThreadPool.Names.SNAPSHOT, ThreadPool.ThreadPoolType.FIXED, randomIntBetween(1, 10))); try (BlobStoreRepository repository = new MockEventuallyConsistentRepository( new RepositoryMetaData("testRepo", "mockEventuallyConsistent", Settings.EMPTY), xContentRegistry(), threadPool, blobStoreContext)) { diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/coordination/DeterministicTaskQueue.java b/test/framework/src/main/java/org/elasticsearch/cluster/coordination/DeterministicTaskQueue.java index 4567b97700604..0837f431fff9c 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/coordination/DeterministicTaskQueue.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/coordination/DeterministicTaskQueue.java @@ -30,7 +30,9 @@ import java.util.ArrayList; import java.util.Collection; +import java.util.HashMap; import java.util.List; +import java.util.Map; import java.util.Random; import java.util.concurrent.Callable; import java.util.concurrent.Delayed; @@ -288,6 +290,8 @@ public ThreadPool getThreadPool() { public ThreadPool getThreadPool(Function runnableWrapper) { return new ThreadPool(settings) { + private final Map infos = new HashMap<>(); + { stopCachedTimeThread(); } @@ -309,7 +313,7 @@ public ThreadPoolInfo info() { @Override public Info info(String name) { - throw new UnsupportedOperationException(); + return infos.computeIfAbsent(name, n -> new Info(n, ThreadPoolType.FIXED, random.nextInt(10) + 1)); } @Override diff --git a/test/framework/src/main/java/org/elasticsearch/common/bytes/AbstractBytesReferenceTestCase.java b/test/framework/src/main/java/org/elasticsearch/common/bytes/AbstractBytesReferenceTestCase.java index 478ac149dd1f8..2e8a44faa0d06 100644 --- a/test/framework/src/main/java/org/elasticsearch/common/bytes/AbstractBytesReferenceTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/common/bytes/AbstractBytesReferenceTestCase.java @@ -591,7 +591,7 @@ public void testCompareTo() throws IOException { for (int j = crazyStream.size(); j < crazyLength; j++) { crazyStream.writeByte((byte) random().nextInt(1 << 8)); } - PagedBytesReference crazyReference = crazyStream.bytes(); + ReleasableBytesReference crazyReference = crazyStream.bytes(); assertFalse(crazyReference.compareTo(bytesReference) == 0); assertEquals(0, crazyReference.slice(offset, length).compareTo( diff --git a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESMockAPIBasedRepositoryIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESMockAPIBasedRepositoryIntegTestCase.java index e47bdeee3c225..4876f64301204 100644 --- a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESMockAPIBasedRepositoryIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESMockAPIBasedRepositoryIntegTestCase.java @@ -157,7 +157,7 @@ public void handle(final HttpExchange exchange) throws IOException { final boolean canFailRequest = canFailRequest(exchange); final int count = requests.computeIfAbsent(requestId, req -> new AtomicInteger(0)).incrementAndGet(); - if (count >= maxErrorsPerRequest || canFailRequest == false || randomBoolean()) { + if (count >= maxErrorsPerRequest || canFailRequest == false) { requests.remove(requestId); delegate.handle(exchange); } else { diff --git a/test/framework/src/test/java/org/elasticsearch/test/AbstractXContentTestCaseTests.java b/test/framework/src/test/java/org/elasticsearch/test/AbstractXContentTestCaseTests.java index 2acb89befabf4..09524111fd6a8 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/AbstractXContentTestCaseTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/AbstractXContentTestCaseTests.java @@ -55,4 +55,4 @@ public void testInsertRandomFieldsAndShuffle() throws Exception { assertThat(mapOrdered.keySet().iterator().next(), not(equalTo("field"))); } } -} \ No newline at end of file +} diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/GetCcrRestoreFileChunkAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/GetCcrRestoreFileChunkAction.java index 9f348c5a470b4..58afee18ddbf2 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/GetCcrRestoreFileChunkAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/GetCcrRestoreFileChunkAction.java @@ -12,7 +12,8 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.bytes.ReleasablePagedBytesReference; +import org.elasticsearch.common.bytes.PagedBytesReference; +import org.elasticsearch.common.bytes.ReleasableBytesReference; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -59,7 +60,8 @@ protected void doExecute(Task task, GetCcrRestoreFileChunkRequest request, String sessionUUID = request.getSessionUUID(); // This is currently safe to do because calling `onResponse` will serialize the bytes to the network layer data // structure on the same thread. So the bytes will be copied before the reference is released. - try (ReleasablePagedBytesReference reference = new ReleasablePagedBytesReference(array, bytesRequested, array)) { + PagedBytesReference pagedBytesReference = new PagedBytesReference(array, bytesRequested); + try (ReleasableBytesReference reference = new ReleasableBytesReference(pagedBytesReference, array)) { try (CcrRestoreSourceService.SessionReader sessionReader = restoreSourceService.getSessionReader(sessionUUID)) { long offsetAfterRead = sessionReader.readFileBytes(fileName, reference); long offsetBeforeRead = offsetAfterRead - reference.length(); diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java index 8ab3b12b7a756..12c8e72a0cb02 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java @@ -12,7 +12,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.index.IndexCommit; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchSecurityException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; @@ -21,6 +20,7 @@ import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; import org.elasticsearch.action.support.ListenerTimeouts; import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.ThreadedActionListener; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -31,18 +31,16 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.collect.ImmutableOpenMap; -import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.component.AbstractLifecycleComponent; +import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.metrics.CounterMetric; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.index.Index; import org.elasticsearch.index.engine.EngineException; import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.seqno.LocalCheckpointTracker; import org.elasticsearch.index.seqno.RetentionLeaseAlreadyExistsException; import org.elasticsearch.index.seqno.RetentionLeaseInvalidRetainingSeqNoException; import org.elasticsearch.index.seqno.RetentionLeaseNotFoundException; @@ -54,6 +52,7 @@ import org.elasticsearch.index.snapshots.blobstore.SnapshotFiles; import org.elasticsearch.index.store.Store; import org.elasticsearch.index.store.StoreFileMetaData; +import org.elasticsearch.indices.recovery.MultiFileTransfer; import org.elasticsearch.indices.recovery.MultiFileWriter; import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.repositories.IndexId; @@ -87,12 +86,11 @@ import java.util.Map; import java.util.Optional; import java.util.Set; -import java.util.concurrent.atomic.AtomicReference; import java.util.function.LongConsumer; import java.util.function.Supplier; +import java.util.stream.Collectors; import static org.elasticsearch.index.seqno.RetentionLeaseActions.RETAIN_ALL; -import static org.elasticsearch.index.seqno.SequenceNumbers.NO_OPS_PERFORMED; import static org.elasticsearch.xpack.ccr.CcrRetentionLeases.retentionLeaseId; import static org.elasticsearch.xpack.ccr.CcrRetentionLeases.syncAddRetentionLease; import static org.elasticsearch.xpack.ccr.CcrRetentionLeases.syncRenewRetentionLease; @@ -473,97 +471,82 @@ void restoreFiles(Store store) { } @Override - protected void restoreFiles(List filesToRecover, Store store) throws IOException { + protected void restoreFiles(List filesToRecover, Store store) { logger.trace("[{}] starting CCR restore of {} files", shardId, filesToRecover); + final PlainActionFuture restoreFilesFuture = new PlainActionFuture<>(); + final List mds = filesToRecover.stream().map(FileInfo::metadata).collect(Collectors.toList()); + final MultiFileTransfer multiFileTransfer = new MultiFileTransfer<>( + logger, threadPool.getThreadContext(), restoreFilesFuture, ccrSettings.getMaxConcurrentFileChunks(), mds) { - try (MultiFileWriter multiFileWriter = new MultiFileWriter(store, recoveryState.getIndex(), "", logger, () -> { - })) { - final LocalCheckpointTracker requestSeqIdTracker = new LocalCheckpointTracker(NO_OPS_PERFORMED, NO_OPS_PERFORMED); - final AtomicReference> error = new AtomicReference<>(); + final MultiFileWriter multiFileWriter = new MultiFileWriter(store, recoveryState.getIndex(), "", logger, () -> {}); + long offset = 0; - for (FileInfo fileInfo : filesToRecover) { - final long fileLength = fileInfo.length(); - long offset = 0; - while (offset < fileLength && error.get() == null) { - final long requestSeqId = requestSeqIdTracker.generateSeqNo(); - try { - requestSeqIdTracker.waitForProcessedOpsToComplete(requestSeqId - ccrSettings.getMaxConcurrentFileChunks()); - - if (error.get() != null) { - requestSeqIdTracker.markSeqNoAsProcessed(requestSeqId); - break; - } - - final int bytesRequested = Math.toIntExact( - Math.min(ccrSettings.getChunkSize().getBytes(), fileLength - offset)); - offset += bytesRequested; - - final GetCcrRestoreFileChunkRequest request = - new GetCcrRestoreFileChunkRequest(node, sessionUUID, fileInfo.name(), bytesRequested); - logger.trace("[{}] [{}] fetching chunk for file [{}], expected offset: {}, size: {}", shardId, snapshotId, - fileInfo.name(), offset, bytesRequested); - - TimeValue timeout = ccrSettings.getRecoveryActionTimeout(); - ActionListener listener = - ListenerTimeouts.wrapWithTimeout(threadPool, ActionListener.wrap( - r -> threadPool.generic().execute(new AbstractRunnable() { - @Override - public void onFailure(Exception e) { - error.compareAndSet(null, Tuple.tuple(fileInfo.metadata(), e)); - requestSeqIdTracker.markSeqNoAsProcessed(requestSeqId); - } - - @Override - protected void doRun() throws Exception { - final int actualChunkSize = r.getChunk().length(); - logger.trace("[{}] [{}] got response for file [{}], offset: {}, length: {}", shardId, - snapshotId, fileInfo.name(), r.getOffset(), actualChunkSize); - final long nanosPaused = ccrSettings.getRateLimiter().maybePause(actualChunkSize); - throttleListener.accept(nanosPaused); - final boolean lastChunk = r.getOffset() + actualChunkSize >= fileLength; - multiFileWriter.writeFileChunk(fileInfo.metadata(), r.getOffset(), r.getChunk(), lastChunk); - requestSeqIdTracker.markSeqNoAsProcessed(requestSeqId); - } - }), - e -> { - error.compareAndSet(null, Tuple.tuple(fileInfo.metadata(), e)); - requestSeqIdTracker.markSeqNoAsProcessed(requestSeqId); - } - ), timeout, ThreadPool.Names.GENERIC, GetCcrRestoreFileChunkAction.NAME); - remoteClient.execute(GetCcrRestoreFileChunkAction.INSTANCE, request, listener); - } catch (Exception e) { - error.compareAndSet(null, Tuple.tuple(fileInfo.metadata(), e)); - requestSeqIdTracker.markSeqNoAsProcessed(requestSeqId); - } - } + @Override + protected void onNewFile(StoreFileMetaData md) { + offset = 0; } - try { - requestSeqIdTracker.waitForProcessedOpsToComplete(requestSeqIdTracker.getMaxSeqNo()); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - throw new ElasticsearchException(e); + @Override + protected FileChunk nextChunkRequest(StoreFileMetaData md) { + final int bytesRequested = Math.toIntExact(Math.min(ccrSettings.getChunkSize().getBytes(), md.length() - offset)); + offset += bytesRequested; + return new FileChunk(md, bytesRequested, offset == md.length()); } - if (error.get() != null) { - handleError(store, error.get().v2()); + + @Override + protected void executeChunkRequest(FileChunk request, ActionListener listener) { + final ActionListener threadedListener + = new ThreadedActionListener<>(logger, threadPool, ThreadPool.Names.GENERIC, ActionListener.wrap( + r -> { + writeFileChunk(request.md, r); + listener.onResponse(null); + }, listener::onFailure), false); + + remoteClient.execute(GetCcrRestoreFileChunkAction.INSTANCE, + new GetCcrRestoreFileChunkRequest(node, sessionUUID, request.md.name(), request.bytesRequested), + ListenerTimeouts.wrapWithTimeout(threadPool, threadedListener, ccrSettings.getRecoveryActionTimeout(), + ThreadPool.Names.GENERIC, GetCcrRestoreFileChunkAction.NAME)); } - } - logger.trace("[{}] completed CCR restore", shardId); - } + private void writeFileChunk(StoreFileMetaData md, + GetCcrRestoreFileChunkAction.GetCcrRestoreFileChunkResponse r) throws Exception { + final int actualChunkSize = r.getChunk().length(); + logger.trace("[{}] [{}] got response for file [{}], offset: {}, length: {}", + shardId, snapshotId, md.name(), r.getOffset(), actualChunkSize); + final long nanosPaused = ccrSettings.getRateLimiter().maybePause(actualChunkSize); + throttleListener.accept(nanosPaused); + multiFileWriter.incRef(); + try (Releasable ignored = multiFileWriter::decRef) { + final boolean lastChunk = r.getOffset() + actualChunkSize >= md.length(); + multiFileWriter.writeFileChunk(md, r.getOffset(), r.getChunk(), lastChunk); + } catch (Exception e) { + handleError(md, e); + throw e; + } + } + + @Override + protected void handleError(StoreFileMetaData md, Exception e) throws Exception { + final IOException corruptIndexException; + if ((corruptIndexException = ExceptionsHelper.unwrapCorruption(e)) != null) { + try { + store.markStoreCorrupted(corruptIndexException); + } catch (IOException ioe) { + logger.warn("store cannot be marked as corrupted", e); + } + throw corruptIndexException; + } + throw e; + } - private void handleError(Store store, Exception e) throws IOException { - final IOException corruptIndexException; - if ((corruptIndexException = ExceptionsHelper.unwrapCorruption(e)) != null) { - try { - store.markStoreCorrupted(corruptIndexException); - } catch (IOException ioe) { - logger.warn("store cannot be marked as corrupted", e); + @Override + public void close() { + multiFileWriter.close(); } - throw corruptIndexException; - } else { - ExceptionsHelper.reThrowIfNotNull(e); - } + }; + multiFileTransfer.start(); + restoreFilesFuture.actionGet(); + logger.trace("[{}] completed CCR restore", shardId); } @Override @@ -572,5 +555,22 @@ public void close() { ClearCcrRestoreSessionAction.ClearCcrRestoreSessionResponse response = remoteClient.execute(ClearCcrRestoreSessionAction.INSTANCE, clearRequest).actionGet(ccrSettings.getRecoveryActionTimeout()); } + + private static class FileChunk implements MultiFileTransfer.ChunkRequest { + final StoreFileMetaData md; + final int bytesRequested; + final boolean lastChunk; + + FileChunk(StoreFileMetaData md, int bytesRequested, boolean lastChunk) { + this.md = md; + this.bytesRequested = bytesRequested; + this.lastChunk = lastChunk; + } + + @Override + public boolean lastChunk() { + return lastChunk; + } + } } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java index 6adee04991500..69ff6dddcbe48 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java @@ -16,10 +16,12 @@ import org.elasticsearch.xpack.core.monitoring.MonitoringField; import java.util.Collections; +import java.util.EnumSet; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Set; import java.util.concurrent.CopyOnWriteArrayList; import java.util.function.BiFunction; @@ -28,6 +30,9 @@ */ public class XPackLicenseState { + public static final Set FIPS_ALLOWED_LICENSE_OPERATION_MODES = + EnumSet.of(License.OperationMode.PLATINUM, License.OperationMode.TRIAL); + /** Messages for each feature which are printed when the license expires. */ static final Map EXPIRATION_MESSAGES; static { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ClientHelper.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ClientHelper.java index 4de7c51f8daac..61f2cbaccd0f2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ClientHelper.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ClientHelper.java @@ -50,6 +50,7 @@ public final class ClientHelper { public static final String DEPRECATION_ORIGIN = "deprecation"; public static final String PERSISTENT_TASK_ORIGIN = "persistent_tasks"; public static final String ROLLUP_ORIGIN = "rollup"; + public static final String ENRICH_ORIGIN = "enrich"; public static final String TRANSFORM_ORIGIN = "transform"; private ClientHelper() {} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/enrich/action/ExecuteEnrichPolicyAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/enrich/action/ExecuteEnrichPolicyAction.java index e5c6ab5eb67ee..661cc4f9467b0 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/enrich/action/ExecuteEnrichPolicyAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/enrich/action/ExecuteEnrichPolicyAction.java @@ -68,12 +68,6 @@ public ActionRequestValidationException validate() { return null; } - // This will be displayed in tasks api and allows stats api to figure out which policies are being executed. - @Override - public String getDescription() { - return name; - } - @Override public boolean equals(Object o) { if (this == o) return true; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleExplainResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleExplainResponse.java index f9f851a2af908..0a0b082e5bd1b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleExplainResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleExplainResponse.java @@ -8,7 +8,6 @@ import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -78,7 +77,7 @@ public class IndexLifecycleExplainResponse implements ToXContentObject, Writeabl PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), (p, c) -> { XContentBuilder builder = JsonXContent.contentBuilder(); builder.copyCurrentStructure(p); - return BytesArray.bytes(builder); + return BytesReference.bytes(builder); }, STEP_INFO_FIELD); PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), (p, c) -> PhaseExecutionInfo.parse(p, ""), PHASE_EXECUTION_INFO); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationSettings.java index ae31966a34712..48aa1d4024be5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationSettings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationSettings.java @@ -242,7 +242,7 @@ public static String getKeyStoreType(Setting> setting, Settings return setting.get(settings).orElseGet(() -> inferKeyStoreType(path)); } - private static String inferKeyStoreType(String path) { + public static String inferKeyStoreType(String path) { String name = path == null ? "" : path.toLowerCase(Locale.ROOT); if (name.endsWith(".p12") || name.endsWith(".pfx") || name.endsWith(".pkcs12")) { return PKCS12_KEYSTORE_TYPE; diff --git a/x-pack/plugin/enrich/qa/common/src/main/java/org/elasticsearch/test/enrich/CommonEnrichRestTestCase.java b/x-pack/plugin/enrich/qa/common/src/main/java/org/elasticsearch/test/enrich/CommonEnrichRestTestCase.java index 06e15730788bf..75d7d18b45d30 100644 --- a/x-pack/plugin/enrich/qa/common/src/main/java/org/elasticsearch/test/enrich/CommonEnrichRestTestCase.java +++ b/x-pack/plugin/enrich/qa/common/src/main/java/org/elasticsearch/test/enrich/CommonEnrichRestTestCase.java @@ -10,6 +10,7 @@ import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.json.JsonXContent; @@ -38,6 +39,15 @@ public void deletePolicies() throws Exception { for (Map entry: policies) { client().performRequest(new Request("DELETE", "/_enrich/policy/" + XContentMapValues.extractValue("config.match.name", entry))); + + List sourceIndices = (List) XContentMapValues.extractValue("config.match.indices", entry); + for (Object sourceIndex : sourceIndices) { + try { + client().performRequest(new Request("DELETE", "/" + sourceIndex)); + } catch (ResponseException e) { + // and that is ok + } + } } } @@ -48,6 +58,8 @@ protected boolean preserveIndicesUponCompletion() { } private void setupGenericLifecycleTest(boolean deletePipeilne) throws Exception { + // Create source index: + createSourceIndex("my-source-index"); // Create the policy: Request putPolicyRequest = new Request("PUT", "/_enrich/policy/my_policy"); putPolicyRequest.setJsonEntity(generatePolicySource("my-source-index")); @@ -99,6 +111,7 @@ public void testBasicFlow() throws Exception { } public void testImmutablePolicy() throws IOException { + createSourceIndex("my-source-index"); Request putPolicyRequest = new Request("PUT", "/_enrich/policy/my_policy"); putPolicyRequest.setJsonEntity(generatePolicySource("my-source-index")); assertOK(client().performRequest(putPolicyRequest)); @@ -108,6 +121,7 @@ public void testImmutablePolicy() throws IOException { } public void testDeleteIsCaseSensitive() throws Exception { + createSourceIndex("my-source-index"); Request putPolicyRequest = new Request("PUT", "/_enrich/policy/my_policy"); putPolicyRequest.setJsonEntity(generatePolicySource("my-source-index")); assertOK(client().performRequest(putPolicyRequest)); @@ -155,6 +169,20 @@ public static String generatePolicySource(String index) throws IOException { return Strings.toString(source); } + public static void createSourceIndex(String index) throws IOException { + String mapping = createSourceIndexMapping(); + createIndex(index, Settings.EMPTY, mapping); + } + + public static String createSourceIndexMapping() { + return "\"properties\":" + + "{\"host\": {\"type\":\"keyword\"}," + + "\"globalRank\":{\"type\":\"keyword\"}," + + "\"tldRank\":{\"type\":\"keyword\"}," + + "\"tld\":{\"type\":\"keyword\"}" + + "}"; + } + private static Map toMap(Response response) throws IOException { return toMap(EntityUtils.toString(response.getEntity())); } diff --git a/x-pack/plugin/enrich/qa/rest-with-security/src/test/java/org/elasticsearch/xpack/enrich/EnrichSecurityIT.java b/x-pack/plugin/enrich/qa/rest-with-security/src/test/java/org/elasticsearch/xpack/enrich/EnrichSecurityIT.java index 0f7838c4a45c5..7ea64a121c32b 100644 --- a/x-pack/plugin/enrich/qa/rest-with-security/src/test/java/org/elasticsearch/xpack/enrich/EnrichSecurityIT.java +++ b/x-pack/plugin/enrich/qa/rest-with-security/src/test/java/org/elasticsearch/xpack/enrich/EnrichSecurityIT.java @@ -36,6 +36,9 @@ protected Settings restAdminSettings() { public void testInsufficientPermissionsOnNonExistentIndex() throws Exception { // This test is here because it requires a valid user that has permission to execute policy PUTs but should fail if the user // does not have access to read the backing indices used to enrich the data. + Request request = new Request("PUT", "/some-other-index"); + request.setJsonEntity("{\n \"mappings\" : {" + createSourceIndexMapping() + "} }"); + adminClient().performRequest(request); Request putPolicyRequest = new Request("PUT", "/_enrich/policy/my_policy"); putPolicyRequest.setJsonEntity(generatePolicySource("some-other-index")); ResponseException exc = expectThrows(ResponseException.class, () -> client().performRequest(putPolicyRequest)); diff --git a/x-pack/plugin/enrich/qa/rest/src/test/resources/rest-api-spec/test/enrich/10_basic.yml b/x-pack/plugin/enrich/qa/rest/src/test/resources/rest-api-spec/test/enrich/10_basic.yml index 2a837d9c3b645..e580b188c9ba4 100644 --- a/x-pack/plugin/enrich/qa/rest/src/test/resources/rest-api-spec/test/enrich/10_basic.yml +++ b/x-pack/plugin/enrich/qa/rest/src/test/resources/rest-api-spec/test/enrich/10_basic.yml @@ -1,6 +1,20 @@ --- "Test enrich crud apis": + - do: + indices.create: + index: bar + body: + mappings: + properties: + baz: + type: keyword + a: + type: keyword + b: + type: keyword + - is_true: acknowledged + - do: enrich.put_policy: name: policy-crud diff --git a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPolicyExecutor.java b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPolicyExecutor.java index 361f4f6b285cb..916fe8afd491b 100644 --- a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPolicyExecutor.java +++ b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPolicyExecutor.java @@ -29,6 +29,8 @@ public class EnrichPolicyExecutor { + public static final String TASK_ACTION = "policy_execution"; + private final ClusterService clusterService; private final Client client; private final TaskManager taskManager; @@ -165,7 +167,7 @@ private Task runPolicy(ExecuteEnrichPolicyAction.Request request, EnrichPolicy p private Task runPolicyTask(final ExecuteEnrichPolicyAction.Request request, EnrichPolicy policy, BiConsumer onResponse, BiConsumer onFailure) { - Task asyncTask = taskManager.register("enrich", "policy_execution", new TaskAwareRequest() { + Task asyncTask = taskManager.register("enrich", TASK_ACTION, new TaskAwareRequest() { @Override public void setParentTask(TaskId taskId) { request.setParentTask(taskId); diff --git a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPolicyMaintenanceService.java b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPolicyMaintenanceService.java index 0c734083755b6..594f0a264c4f1 100644 --- a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPolicyMaintenanceService.java +++ b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPolicyMaintenanceService.java @@ -14,6 +14,7 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.Client; +import org.elasticsearch.client.OriginSettingClient; import org.elasticsearch.cluster.LocalNodeMasterListener; import org.elasticsearch.cluster.metadata.AliasMetaData; import org.elasticsearch.cluster.metadata.MappingMetaData; @@ -32,6 +33,8 @@ import java.util.Map; import java.util.concurrent.Semaphore; +import static org.elasticsearch.xpack.core.ClientHelper.ENRICH_ORIGIN; + public class EnrichPolicyMaintenanceService implements LocalNodeMasterListener { private static final Logger logger = LogManager.getLogger(EnrichPolicyMaintenanceService.class); @@ -52,7 +55,7 @@ public class EnrichPolicyMaintenanceService implements LocalNodeMasterListener { EnrichPolicyMaintenanceService(Settings settings, Client client, ClusterService clusterService, ThreadPool threadPool, EnrichPolicyLocks enrichPolicyLocks) { this.settings = settings; - this.client = client; + this.client = new OriginSettingClient(client, ENRICH_ORIGIN); this.clusterService = clusterService; this.threadPool = threadPool; this.enrichPolicyLocks = enrichPolicyLocks; diff --git a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPolicyRunner.java b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPolicyRunner.java index cbd18755718cc..409438f7e1fed 100644 --- a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPolicyRunner.java +++ b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPolicyRunner.java @@ -134,27 +134,34 @@ private void validateMappings(final GetIndexResponse getIndexResponse) { logger.debug("Policy [{}]: Validating [{}] source mappings", policyName, sourceIndices); for (String sourceIndex : sourceIndices) { Map mapping = getMappings(getIndexResponse, sourceIndex); - // First ensure mapping is set - if (mapping.get("properties") == null) { - throw new ElasticsearchException( - "Enrich policy execution for [{}] failed. Could not read mapping for source [{}] included by pattern [{}]", - policyName, sourceIndex, policy.getIndices()); - } - // Validate the key and values - try { - validateField(mapping, policy.getMatchField(), true); - for (String valueFieldName : policy.getEnrichFields()) { - validateField(mapping, valueFieldName, false); - } - } catch (ElasticsearchException e) { - throw new ElasticsearchException( - "Enrich policy execution for [{}] failed while validating field mappings for index [{}]", - e, policyName, sourceIndex); + validateMappings(policyName, policy, sourceIndex, mapping); + } + } + + static void validateMappings(final String policyName, + final EnrichPolicy policy, + final String sourceIndex, + final Map mapping) { + // First ensure mapping is set + if (mapping.get("properties") == null) { + throw new ElasticsearchException( + "Enrich policy execution for [{}] failed. Could not read mapping for source [{}] included by pattern [{}]", + policyName, sourceIndex, policy.getIndices()); + } + // Validate the key and values + try { + validateField(mapping, policy.getMatchField(), true); + for (String valueFieldName : policy.getEnrichFields()) { + validateField(mapping, valueFieldName, false); } + } catch (ElasticsearchException e) { + throw new ElasticsearchException( + "Enrich policy execution for [{}] failed while validating field mappings for index [{}]", + e, policyName, sourceIndex); } } - private void validateField(Map properties, String fieldName, boolean fieldRequired) { + private static void validateField(Map properties, String fieldName, boolean fieldRequired) { assert Strings.isEmpty(fieldName) == false: "Field name cannot be null or empty"; String[] fieldParts = fieldName.split("\\."); StringBuilder parent = new StringBuilder(); diff --git a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichStore.java b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichStore.java index cab3f6994b490..ebc28b0be180c 100644 --- a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichStore.java +++ b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichStore.java @@ -8,8 +8,12 @@ import org.elasticsearch.ResourceAlreadyExistsException; import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.Version; +import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.MetaDataCreateIndexService; import org.elasticsearch.cluster.service.ClusterService; @@ -39,7 +43,11 @@ private EnrichStore() {} * @param policy The policy to store * @param handler The handler that gets invoked if policy has been stored or a failure has occurred. */ - public static void putPolicy(String name, EnrichPolicy policy, ClusterService clusterService, Consumer handler) { + public static void putPolicy(final String name, + final EnrichPolicy policy, + final ClusterService clusterService, + final IndexNameExpressionResolver indexNameExpressionResolver, + final Consumer handler) { assert clusterService.localNode().isMasterNode(); if (Strings.isNullOrEmpty(name)) { @@ -75,6 +83,22 @@ public static void putPolicy(String name, EnrichPolicy policy, ClusterService cl finalPolicy = policy; } updateClusterState(clusterService, handler, current -> { + for (String indexExpression : finalPolicy.getIndices()) { + // indices field in policy can contain wildcards, aliases etc. + String[] concreteIndices = + indexNameExpressionResolver.concreteIndexNames(current, IndicesOptions.strictExpandOpen(), indexExpression); + for (String concreteIndex : concreteIndices) { + IndexMetaData imd = current.getMetaData().index(concreteIndex); + assert imd != null; + MappingMetaData mapping = imd.mapping(); + if (mapping == null) { + throw new IllegalArgumentException("source index [" + concreteIndex + "] has no mapping"); + } + Map mappingSource = mapping.getSourceAsMap(); + EnrichPolicyRunner.validateMappings(name, finalPolicy, concreteIndex, mappingSource); + } + } + final Map policies = getPolicies(current); if (policies.get(name) != null) { throw new ResourceAlreadyExistsException("policy [{}] already exists", name); diff --git a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/action/TransportEnrichStatsAction.java b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/action/TransportEnrichStatsAction.java index 30c84a127fcf6..b57c231effcce 100644 --- a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/action/TransportEnrichStatsAction.java +++ b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/action/TransportEnrichStatsAction.java @@ -23,7 +23,7 @@ import org.elasticsearch.xpack.core.enrich.action.EnrichStatsAction; import org.elasticsearch.xpack.core.enrich.action.EnrichStatsAction.Response.CoordinatorStats; import org.elasticsearch.xpack.core.enrich.action.EnrichStatsAction.Response.ExecutingPolicy; -import org.elasticsearch.xpack.core.enrich.action.ExecuteEnrichPolicyAction; +import org.elasticsearch.xpack.enrich.EnrichPolicyExecutor; import java.io.IOException; import java.util.Comparator; @@ -80,7 +80,7 @@ protected void masterOperation(Task task, .sorted(Comparator.comparing(CoordinatorStats::getNodeId)) .collect(Collectors.toList()); List policyExecutionTasks = taskManager.getTasks().values().stream() - .filter(t -> t.getAction().equals(ExecuteEnrichPolicyAction.NAME)) + .filter(t -> t.getAction().equals(EnrichPolicyExecutor.TASK_ACTION)) .map(t -> t.taskInfo(clusterService.localNode().getId(), true)) .map(t -> new ExecutingPolicy(t.getDescription(), t)) .sorted(Comparator.comparing(ExecutingPolicy::getName)) diff --git a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/action/TransportExecuteEnrichPolicyAction.java b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/action/TransportExecuteEnrichPolicyAction.java index 7402c2daef6e7..23d756f3a83a4 100644 --- a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/action/TransportExecuteEnrichPolicyAction.java +++ b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/action/TransportExecuteEnrichPolicyAction.java @@ -62,6 +62,12 @@ protected ExecuteEnrichPolicyAction.Response read(StreamInput in) throws IOExcep @Override protected void masterOperation(Task task, ExecuteEnrichPolicyAction.Request request, ClusterState state, ActionListener listener) { + if (state.getNodes().getIngestNodes().isEmpty()) { + // if we don't fail here then reindex will fail with a more complicated error. + // (EnrichPolicyRunner uses a pipeline with reindex) + throw new IllegalStateException("no ingest nodes in this cluster"); + } + if (request.isWaitForCompletion()) { executor.runPolicy(request, new ActionListener<>() { @Override diff --git a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/action/TransportPutEnrichPolicyAction.java b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/action/TransportPutEnrichPolicyAction.java index ec1d80a355d5f..2753172469c6a 100644 --- a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/action/TransportPutEnrichPolicyAction.java +++ b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/action/TransportPutEnrichPolicyAction.java @@ -103,7 +103,7 @@ protected void masterOperation(Task task, PutEnrichPolicyAction.Request request, } private void putPolicy(PutEnrichPolicyAction.Request request, ActionListener listener ) { - EnrichStore.putPolicy(request.getName(), request.getPolicy(), clusterService, e -> { + EnrichStore.putPolicy(request.getName(), request.getPolicy(), clusterService, indexNameExpressionResolver, e -> { if (e == null) { listener.onResponse(new AcknowledgedResponse(true)); } else { diff --git a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/AbstractEnrichTestCase.java b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/AbstractEnrichTestCase.java index 0b8ddd0288008..7a5a3aef8c883 100644 --- a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/AbstractEnrichTestCase.java +++ b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/AbstractEnrichTestCase.java @@ -5,6 +5,10 @@ */ package org.elasticsearch.xpack.enrich; +import org.elasticsearch.ResourceAlreadyExistsException; +import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; @@ -24,9 +28,13 @@ protected Collection> getPlugins() { protected AtomicReference saveEnrichPolicy(String name, EnrichPolicy policy, ClusterService clusterService) throws InterruptedException { + if (policy != null) { + createSourceIndices(policy); + } + IndexNameExpressionResolver resolver = new IndexNameExpressionResolver(); CountDownLatch latch = new CountDownLatch(1); AtomicReference error = new AtomicReference<>(); - EnrichStore.putPolicy(name, policy, clusterService, e -> { + EnrichStore.putPolicy(name, policy, clusterService, resolver, e -> { error.set(e); latch.countDown(); }); @@ -46,4 +54,20 @@ protected void deleteEnrichPolicy(String name, ClusterService clusterService) th throw error.get(); } } + + protected void createSourceIndices(EnrichPolicy policy) { + createSourceIndices(client(), policy); + } + + protected static void createSourceIndices(Client client, EnrichPolicy policy) { + for (String sourceIndex : policy.getIndices()) { + CreateIndexRequest createIndexRequest = new CreateIndexRequest(sourceIndex); + createIndexRequest.mapping("_doc", policy.getMatchField(), "type=keyword"); + try { + client.admin().indices().create(createIndexRequest).actionGet(); + } catch (ResourceAlreadyExistsException e) { + // and that is okay + } + } + } } diff --git a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichMultiNodeIT.java b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichMultiNodeIT.java index 1b19958ee2ad5..63b92cea674b6 100644 --- a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichMultiNodeIT.java +++ b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichMultiNodeIT.java @@ -119,6 +119,19 @@ public void testEnrichDedicatedIngestNode() { enrich(keys, ingestOnlyNode); } + public void testEnrichNoIngestNodes() { + Settings settings = Settings.builder() + .put(Node.NODE_MASTER_SETTING.getKey(), true) + .put(Node.NODE_DATA_SETTING.getKey(), true) + .put(Node.NODE_INGEST_SETTING.getKey(), false) + .build(); + internalCluster().startNode(settings); + + createSourceIndex(64); + Exception e = expectThrows(IllegalStateException.class, EnrichMultiNodeIT::createAndExecutePolicy); + assertThat(e.getMessage(), equalTo("no ingest nodes in this cluster")); + } + private static void enrich(List keys, String coordinatingNode) { int numDocs = 256; BulkRequest bulkRequest = new BulkRequest("my-index"); diff --git a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichPolicyMaintenanceServiceTests.java b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichPolicyMaintenanceServiceTests.java index fc5e77e377971..ad984f92f014b 100644 --- a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichPolicyMaintenanceServiceTests.java +++ b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichPolicyMaintenanceServiceTests.java @@ -10,6 +10,7 @@ import java.util.Collection; import java.util.HashSet; import java.util.List; +import java.util.Locale; import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.Phaser; @@ -23,6 +24,7 @@ import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.get.GetIndexRequest; import org.elasticsearch.action.admin.indices.get.GetIndexResponse; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.json.JsonXContent; @@ -33,6 +35,7 @@ import org.elasticsearch.xpack.core.enrich.EnrichPolicy; import static org.elasticsearch.xpack.core.enrich.EnrichPolicy.MATCH_TYPE; +import static org.elasticsearch.xpack.enrich.AbstractEnrichTestCase.createSourceIndices; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.is; @@ -113,12 +116,15 @@ private EnrichPolicy randomPolicy() { for (int i = 0; i < randomIntBetween(1, 3); i++) { enrichKeys.add(randomAlphaOfLength(10)); } - return new EnrichPolicy(MATCH_TYPE, null, List.of(randomAlphaOfLength(10)), randomAlphaOfLength(10), enrichKeys); + String sourceIndex = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); + return new EnrichPolicy(MATCH_TYPE, null, List.of(sourceIndex), randomAlphaOfLength(10), enrichKeys); } private void addPolicy(String policyName, EnrichPolicy policy) throws InterruptedException { + IndexNameExpressionResolver resolver = new IndexNameExpressionResolver(); + createSourceIndices(client(), policy); doSyncronously((clusterService, exceptionConsumer) -> - EnrichStore.putPolicy(policyName, policy, clusterService, exceptionConsumer)); + EnrichStore.putPolicy(policyName, policy, clusterService, resolver, exceptionConsumer)); } private void removePolicy(String policyName) throws InterruptedException { diff --git a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichPolicyTests.java b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichPolicyTests.java index 3c87867f9dfe5..645bd0277de61 100644 --- a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichPolicyTests.java +++ b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichPolicyTests.java @@ -22,6 +22,8 @@ import java.io.IOException; import java.io.UncheckedIOException; import java.util.Arrays; +import java.util.Locale; +import java.util.stream.Collectors; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.nullValue; @@ -59,7 +61,9 @@ public static EnrichPolicy randomEnrichPolicy(XContentType xContentType) { return new EnrichPolicy( randomFrom(EnrichPolicy.SUPPORTED_POLICY_TYPES), randomBoolean() ? querySource : null, - Arrays.asList(generateRandomStringArray(8, 4, false, false)), + Arrays.stream(generateRandomStringArray(8, 4, false, false)) + .map(s -> s.toLowerCase(Locale.ROOT)) + .collect(Collectors.toList()), randomAlphaOfLength(4), Arrays.asList(generateRandomStringArray(8, 4, false, false)) ); diff --git a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichPolicyUpdateTests.java b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichPolicyUpdateTests.java index 91cb2776bad93..5fff3c12e2c3a 100644 --- a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichPolicyUpdateTests.java +++ b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichPolicyUpdateTests.java @@ -24,6 +24,7 @@ import java.util.List; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.xpack.enrich.AbstractEnrichTestCase.createSourceIndices; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.Matchers.instanceOf; @@ -40,6 +41,7 @@ public void testUpdatePolicyOnly() { EnrichPolicy instance1 = new EnrichPolicy(EnrichPolicy.MATCH_TYPE, null, List.of("index"), "key1", List.of("field1")); + createSourceIndices(client(), instance1); PutEnrichPolicyAction.Request putPolicyRequest = new PutEnrichPolicyAction.Request("my_policy", instance1); assertAcked(client().execute(PutEnrichPolicyAction.INSTANCE, putPolicyRequest).actionGet()); assertThat("Execute failed", client().execute(ExecuteEnrichPolicyAction.INSTANCE, @@ -53,7 +55,8 @@ public void testUpdatePolicyOnly() { assertThat(pipelineInstance1.getProcessors().get(0), instanceOf(MatchProcessor.class)); EnrichPolicy instance2 = - new EnrichPolicy(EnrichPolicy.MATCH_TYPE, null, List.of("index"), "key2", List.of("field2")); + new EnrichPolicy(EnrichPolicy.MATCH_TYPE, null, List.of("index2"), "key2", List.of("field2")); + createSourceIndices(client(), instance2); ResourceAlreadyExistsException exc = expectThrows(ResourceAlreadyExistsException.class, () -> client().execute(PutEnrichPolicyAction.INSTANCE, new PutEnrichPolicyAction.Request("my_policy", instance2)).actionGet()); assertTrue(exc.getMessage().contains("policy [my_policy] already exists")); diff --git a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichRestartIT.java b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichRestartIT.java index 5abd83893a606..2462534308b38 100644 --- a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichRestartIT.java +++ b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichRestartIT.java @@ -16,6 +16,7 @@ import java.util.List; import java.util.Optional; +import static org.elasticsearch.xpack.enrich.AbstractEnrichTestCase.createSourceIndices; import static org.elasticsearch.xpack.enrich.EnrichMultiNodeIT.DECORATE_FIELDS; import static org.elasticsearch.xpack.enrich.EnrichMultiNodeIT.MATCH_FIELD; import static org.elasticsearch.xpack.enrich.EnrichMultiNodeIT.POLICY_NAME; @@ -37,6 +38,7 @@ public void testRestart() throws Exception { EnrichPolicy enrichPolicy = new EnrichPolicy(EnrichPolicy.MATCH_TYPE, null, List.of(SOURCE_INDEX_NAME), MATCH_FIELD, List.of(DECORATE_FIELDS)); + createSourceIndices(client(), enrichPolicy); for (int i = 0; i < numPolicies; i++) { String policyName = POLICY_NAME + i; PutEnrichPolicyAction.Request request = new PutEnrichPolicyAction.Request(policyName, enrichPolicy); diff --git a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/action/TransportDeleteEnrichPolicyActionTests.java b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/action/TransportDeleteEnrichPolicyActionTests.java index 622c1fb164f4a..293fc7883e19b 100644 --- a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/action/TransportDeleteEnrichPolicyActionTests.java +++ b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/action/TransportDeleteEnrichPolicyActionTests.java @@ -32,7 +32,7 @@ public class TransportDeleteEnrichPolicyActionTests extends AbstractEnrichTestCase { @After - private void cleanupPolicy() { + public void cleanupPolicy() { ClusterService clusterService = getInstanceFromNode(ClusterService.class); String name = "my-policy"; @@ -57,7 +57,7 @@ public void testDeletePolicyDoesNotExistUnlocksPolicy() throws InterruptedExcept final TransportDeleteEnrichPolicyAction transportAction = node().injector().getInstance(TransportDeleteEnrichPolicyAction.class); ActionTestUtils.execute(transportAction, null, new DeleteEnrichPolicyAction.Request(fakeId), - new ActionListener() { + new ActionListener<>() { @Override public void onResponse(AcknowledgedResponse acknowledgedResponse) { fail(); @@ -91,7 +91,7 @@ public void testDeleteWithoutIndex() throws Exception { final TransportDeleteEnrichPolicyAction transportAction = node().injector().getInstance(TransportDeleteEnrichPolicyAction.class); ActionTestUtils.execute(transportAction, null, new DeleteEnrichPolicyAction.Request(name), - new ActionListener() { + new ActionListener<>() { @Override public void onResponse(AcknowledgedResponse acknowledgedResponse) { reference.set(acknowledgedResponse); @@ -132,7 +132,7 @@ public void testDeleteIsNotLocked() throws Exception { final TransportDeleteEnrichPolicyAction transportAction = node().injector().getInstance(TransportDeleteEnrichPolicyAction.class); ActionTestUtils.execute(transportAction, null, new DeleteEnrichPolicyAction.Request(name), - new ActionListener() { + new ActionListener<>() { @Override public void onResponse(AcknowledgedResponse acknowledgedResponse) { reference.set(acknowledgedResponse); @@ -179,7 +179,7 @@ public void testDeleteLocked() throws InterruptedException { final AtomicReference reference = new AtomicReference<>(); ActionTestUtils.execute(transportAction, null, new DeleteEnrichPolicyAction.Request(name), - new ActionListener() { + new ActionListener<>() { @Override public void onResponse(AcknowledgedResponse acknowledgedResponse) { fail(); @@ -205,7 +205,7 @@ public void onFailure(final Exception e) { ActionTestUtils.execute(transportAction, null, new DeleteEnrichPolicyAction.Request(name), - new ActionListener() { + new ActionListener<>() { @Override public void onResponse(AcknowledgedResponse acknowledgedResponse) { reference.set(acknowledgedResponse); diff --git a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/action/TransportGetEnrichPolicyActionTests.java b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/action/TransportGetEnrichPolicyActionTests.java index 31212218c771b..e6470b87f1225 100644 --- a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/action/TransportGetEnrichPolicyActionTests.java +++ b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/action/TransportGetEnrichPolicyActionTests.java @@ -27,7 +27,7 @@ public class TransportGetEnrichPolicyActionTests extends AbstractEnrichTestCase { @After - private void cleanupPolicies() throws InterruptedException { + public void cleanupPolicies() throws InterruptedException { ClusterService clusterService = getInstanceFromNode(ClusterService.class); final CountDownLatch latch = new CountDownLatch(1); @@ -35,7 +35,7 @@ private void cleanupPolicies() throws InterruptedException { final TransportGetEnrichPolicyAction transportAction = node().injector().getInstance(TransportGetEnrichPolicyAction.class); ActionTestUtils.execute(transportAction, null, new GetEnrichPolicyAction.Request(), - new ActionListener() { + new ActionListener<>() { @Override public void onResponse(GetEnrichPolicyAction.Response response) { reference.set(response); @@ -108,7 +108,7 @@ public void testListEmptyPolicies() throws InterruptedException { final TransportGetEnrichPolicyAction transportAction = node().injector().getInstance(TransportGetEnrichPolicyAction.class); ActionTestUtils.execute(transportAction, null, new GetEnrichPolicyAction.Request(), - new ActionListener() { + new ActionListener<>() { @Override public void onResponse(GetEnrichPolicyAction.Response response) { reference.set(response); @@ -144,7 +144,7 @@ public void testGetPolicy() throws InterruptedException { final TransportGetEnrichPolicyAction transportAction = node().injector().getInstance(TransportGetEnrichPolicyAction.class); ActionTestUtils.execute(transportAction, null, new GetEnrichPolicyAction.Request(new String[]{name}), - new ActionListener() { + new ActionListener<>() { @Override public void onResponse(GetEnrichPolicyAction.Response response) { reference.set(response); @@ -187,7 +187,7 @@ public void testGetMultiplePolicies() throws InterruptedException { final TransportGetEnrichPolicyAction transportAction = node().injector().getInstance(TransportGetEnrichPolicyAction.class); ActionTestUtils.execute(transportAction, null, new GetEnrichPolicyAction.Request(new String[]{name, anotherName}), - new ActionListener() { + new ActionListener<>() { @Override public void onResponse(GetEnrichPolicyAction.Response response) { reference.set(response); @@ -218,7 +218,7 @@ public void testGetPolicyThrowsError() throws InterruptedException { final TransportGetEnrichPolicyAction transportAction = node().injector().getInstance(TransportGetEnrichPolicyAction.class); ActionTestUtils.execute(transportAction, null, new GetEnrichPolicyAction.Request(new String[]{"non-exists"}), - new ActionListener() { + new ActionListener<>() { @Override public void onResponse(GetEnrichPolicyAction.Response response) { reference.set(response); diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ClassificationIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ClassificationIT.java index ce344ec97d3db..a2fd3b194e29c 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ClassificationIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ClassificationIT.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.ml.integration; import com.google.common.collect.Ordering; +import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; import org.elasticsearch.action.bulk.BulkRequestBuilder; @@ -39,6 +40,7 @@ import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.startsWith; +@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/48337") public class ClassificationIT extends MlNativeDataFrameAnalyticsIntegTestCase { private static final String BOOLEAN_FIELD = "boolean-field"; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140JKSKeystoreBootstrapCheck.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140JKSKeystoreBootstrapCheck.java deleted file mode 100644 index 6961c377f55e5..0000000000000 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140JKSKeystoreBootstrapCheck.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.security; - -import org.elasticsearch.bootstrap.BootstrapCheck; -import org.elasticsearch.bootstrap.BootstrapContext; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.xpack.core.XPackSettings; - - -public class FIPS140JKSKeystoreBootstrapCheck implements BootstrapCheck { - - /** - * Test if the node fails the check. - * - * @param context the bootstrap context - * @return the result of the bootstrap check - */ - @Override - public BootstrapCheckResult check(BootstrapContext context) { - - if (XPackSettings.FIPS_MODE_ENABLED.get(context.settings())) { - final Settings settings = context.settings(); - Settings keystoreTypeSettings = settings.filter(k -> k.endsWith("keystore.type")) - .filter(k -> settings.get(k).equalsIgnoreCase("jks")); - if (keystoreTypeSettings.isEmpty() == false) { - return BootstrapCheckResult.failure("JKS Keystores cannot be used in a FIPS 140 compliant JVM. Please " + - "revisit [" + keystoreTypeSettings.toDelimitedString(',') + "] settings"); - } - // Default Keystore type is JKS if not explicitly set - Settings keystorePathSettings = settings.filter(k -> k.endsWith("keystore.path")) - .filter(k -> settings.hasValue(k.replace(".path", ".type")) == false); - if (keystorePathSettings.isEmpty() == false) { - return BootstrapCheckResult.failure("JKS Keystores cannot be used in a FIPS 140 compliant JVM. Please " + - "revisit [" + keystorePathSettings.toDelimitedString(',') + "] settings"); - } - - } - return BootstrapCheckResult.success(); - } - - @Override - public boolean alwaysEnforce() { - return true; - } -} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140LicenseBootstrapCheck.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140LicenseBootstrapCheck.java deleted file mode 100644 index 4b0d9cd2f8c58..0000000000000 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140LicenseBootstrapCheck.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -package org.elasticsearch.xpack.security; - -import org.elasticsearch.bootstrap.BootstrapCheck; -import org.elasticsearch.bootstrap.BootstrapContext; -import org.elasticsearch.license.License; -import org.elasticsearch.license.LicenseService; -import org.elasticsearch.xpack.core.XPackSettings; - -import java.util.EnumSet; - -/** - * A bootstrap check which enforces the licensing of FIPS - */ -final class FIPS140LicenseBootstrapCheck implements BootstrapCheck { - - static final EnumSet ALLOWED_LICENSE_OPERATION_MODES = - EnumSet.of(License.OperationMode.PLATINUM, License.OperationMode.TRIAL); - - @Override - public BootstrapCheckResult check(BootstrapContext context) { - if (XPackSettings.FIPS_MODE_ENABLED.get(context.settings())) { - License license = LicenseService.getLicense(context.metaData()); - if (license != null && ALLOWED_LICENSE_OPERATION_MODES.contains(license.operationMode()) == false) { - return BootstrapCheckResult.failure("FIPS mode is only allowed with a Platinum or Trial license"); - } - } - return BootstrapCheckResult.success(); - } -} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140PasswordHashingAlgorithmBootstrapCheck.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140PasswordHashingAlgorithmBootstrapCheck.java deleted file mode 100644 index 8a754a2f25b93..0000000000000 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140PasswordHashingAlgorithmBootstrapCheck.java +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.security; - -import org.elasticsearch.bootstrap.BootstrapCheck; -import org.elasticsearch.bootstrap.BootstrapContext; -import org.elasticsearch.xpack.core.XPackSettings; - -import java.util.Locale; - -public class FIPS140PasswordHashingAlgorithmBootstrapCheck implements BootstrapCheck { - - /** - * Test if the node fails the check. - * - * @param context the bootstrap context - * @return the result of the bootstrap check - */ - @Override - public BootstrapCheckResult check(final BootstrapContext context) { - if (XPackSettings.FIPS_MODE_ENABLED.get(context.settings())) { - final String selectedAlgorithm = XPackSettings.PASSWORD_HASHING_ALGORITHM.get(context.settings()); - if (selectedAlgorithm.toLowerCase(Locale.ROOT).startsWith("pbkdf2") == false) { - return BootstrapCheckResult.failure("Only PBKDF2 is allowed for password hashing in a FIPS-140 JVM. Please set the " + - "appropriate value for [ " + XPackSettings.PASSWORD_HASHING_ALGORITHM.getKey() + " ] setting."); - } - } - return BootstrapCheckResult.success(); - } - -} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140SecureSettingsBootstrapCheck.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140SecureSettingsBootstrapCheck.java deleted file mode 100644 index 82a58b94a83fe..0000000000000 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140SecureSettingsBootstrapCheck.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.security; - -import org.elasticsearch.bootstrap.BootstrapCheck; -import org.elasticsearch.bootstrap.BootstrapContext; -import org.elasticsearch.common.settings.KeyStoreWrapper; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.env.Environment; -import org.elasticsearch.xpack.core.XPackSettings; - -import java.io.IOException; -import java.io.UncheckedIOException; - -public class FIPS140SecureSettingsBootstrapCheck implements BootstrapCheck { - - private final boolean fipsModeEnabled; - private final Environment environment; - - FIPS140SecureSettingsBootstrapCheck(Settings settings, Environment environment) { - this.fipsModeEnabled = XPackSettings.FIPS_MODE_ENABLED.get(settings); - this.environment = environment; - } - - /** - * Test if the node fails the check. - * - * @param context the bootstrap context - * @return the result of the bootstrap check - */ - @Override - public BootstrapCheckResult check(BootstrapContext context) { - if (fipsModeEnabled) { - try (KeyStoreWrapper secureSettings = KeyStoreWrapper.load(environment.configFile())) { - if (secureSettings != null && secureSettings.getFormatVersion() < 3) { - return BootstrapCheckResult.failure("Secure settings store is not of the appropriate version. Please use " + - "bin/elasticsearch-keystore create to generate a new secure settings store and migrate the secure settings there."); - } - } catch (IOException e) { - throw new UncheckedIOException(e); - } - } - return BootstrapCheckResult.success(); - } - - @Override - public boolean alwaysEnforce() { - return fipsModeEnabled; - } -} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java index b2ebf9733c25b..9c9aea7111442 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java @@ -263,6 +263,7 @@ import static java.util.Collections.emptyList; import static java.util.Collections.singletonList; import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_FORMAT_SETTING; +import static org.elasticsearch.license.XPackLicenseState.FIPS_ALLOWED_LICENSE_OPERATION_MODES; import static org.elasticsearch.xpack.core.XPackSettings.API_KEY_SERVICE_ENABLED_SETTING; import static org.elasticsearch.xpack.core.XPackSettings.HTTP_SSL_ENABLED; import static org.elasticsearch.xpack.core.security.index.RestrictedIndicesNames.SECURITY_MAIN_ALIAS; @@ -311,11 +312,7 @@ public Security(Settings settings, final Path configPath) { new ApiKeySSLBootstrapCheck(), new TokenSSLBootstrapCheck(), new PkiRealmBootstrapCheck(getSslService()), - new TLSLicenseBootstrapCheck(), - new FIPS140SecureSettingsBootstrapCheck(settings, env), - new FIPS140JKSKeystoreBootstrapCheck(), - new FIPS140PasswordHashingAlgorithmBootstrapCheck(), - new FIPS140LicenseBootstrapCheck())); + new TLSLicenseBootstrapCheck())); checks.addAll(InternalRealms.getBootstrapChecks(settings, env)); this.bootstrapChecks = Collections.unmodifiableList(checks); Automatons.updateConfiguration(settings); @@ -328,6 +325,9 @@ public Security(Settings settings, final Path configPath) { private static void runStartupChecks(Settings settings) { validateRealmSettings(settings); + if (XPackSettings.FIPS_MODE_ENABLED.get(settings)) { + validateForFips(settings); + } } // overridable by tests @@ -830,6 +830,37 @@ static void validateRealmSettings(Settings settings) { } } + static void validateForFips(Settings settings) { + final List validationErrors = new ArrayList<>(); + Settings keystoreTypeSettings = settings.filter(k -> k.endsWith("keystore.type")) + .filter(k -> settings.get(k).equalsIgnoreCase("jks")); + if (keystoreTypeSettings.isEmpty() == false) { + validationErrors.add("JKS Keystores cannot be used in a FIPS 140 compliant JVM. Please " + + "revisit [" + keystoreTypeSettings.toDelimitedString(',') + "] settings"); + } + Settings keystorePathSettings = settings.filter(k -> k.endsWith("keystore.path")) + .filter(k -> settings.hasValue(k.replace(".path", ".type")) == false); + if (keystorePathSettings.isEmpty() == false && SSLConfigurationSettings.inferKeyStoreType(null).equals("jks")) { + validationErrors.add("JKS Keystores cannot be used in a FIPS 140 compliant JVM. Please " + + "revisit [" + keystorePathSettings.toDelimitedString(',') + "] settings"); + } + final String selectedAlgorithm = XPackSettings.PASSWORD_HASHING_ALGORITHM.get(settings); + if (selectedAlgorithm.toLowerCase(Locale.ROOT).startsWith("pbkdf2") == false) { + validationErrors.add("Only PBKDF2 is allowed for password hashing in a FIPS 140 JVM. Please set the " + + "appropriate value for [ " + XPackSettings.PASSWORD_HASHING_ALGORITHM.getKey() + " ] setting."); + } + + if (validationErrors.isEmpty() == false) { + final StringBuilder sb = new StringBuilder(); + sb.append("Validation for FIPS 140 mode failed: \n"); + int index = 0; + for (String error : validationErrors) { + sb.append(++index).append(": ").append(error).append(";\n"); + } + throw new IllegalArgumentException(sb.toString()); + } + } + @Override public List getTransportInterceptors(NamedWriteableRegistry namedWriteableRegistry, ThreadContext threadContext) { if (enabled == false) { // don't register anything if we are not enabled @@ -998,7 +1029,7 @@ public void accept(DiscoveryNode node, ClusterState state) { if (inFipsMode) { License license = LicenseService.getLicense(state.metaData()); if (license != null && - FIPS140LicenseBootstrapCheck.ALLOWED_LICENSE_OPERATION_MODES.contains(license.operationMode()) == false) { + FIPS_ALLOWED_LICENSE_OPERATION_MODES.contains(license.operationMode()) == false) { throw new IllegalStateException("FIPS mode cannot be used with a [" + license.operationMode() + "] license. It is only allowed with a Platinum or Trial license."); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationUtils.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationUtils.java index 2097c5176d1fb..cd33ff10f0f05 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationUtils.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationUtils.java @@ -19,6 +19,7 @@ import java.util.function.Predicate; import static org.elasticsearch.action.admin.cluster.node.tasks.get.GetTaskAction.TASKS_ORIGIN; +import static org.elasticsearch.xpack.core.ClientHelper.ENRICH_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.TRANSFORM_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.DEPRECATION_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.INDEX_LIFECYCLE_ORIGIN; @@ -111,6 +112,7 @@ public static void switchUserBasedOnActionOriginAndExecute(ThreadContext threadC case PERSISTENT_TASK_ORIGIN: case ROLLUP_ORIGIN: case INDEX_LIFECYCLE_ORIGIN: + case ENRICH_ORIGIN: case TASKS_ORIGIN: // TODO use a more limited user for tasks securityContext.executeAsUser(XPackUser.INSTANCE, consumer, Version.CURRENT); break; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/FIPS140JKSKeystoreBootstrapCheckTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/FIPS140JKSKeystoreBootstrapCheckTests.java deleted file mode 100644 index b35b8009f12ee..0000000000000 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/FIPS140JKSKeystoreBootstrapCheckTests.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.security; - -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.test.AbstractBootstrapCheckTestCase; - -public class FIPS140JKSKeystoreBootstrapCheckTests extends AbstractBootstrapCheckTestCase { - - public void testNoKeystoreIsAllowed() { - final Settings.Builder settings = Settings.builder() - .put("xpack.security.fips_mode.enabled", "true"); - assertFalse(new FIPS140JKSKeystoreBootstrapCheck().check(createTestContext(settings.build(), null)).isFailure()); - } - - public void testTransportSSLKeystoreTypeIsNotAllowed() { - final Settings.Builder settings = Settings.builder() - .put("xpack.security.fips_mode.enabled", "true") - .put("xpack.security.transport.ssl.keystore.path", "/this/is/the/path") - .put("xpack.security.transport.ssl.keystore.type", "JKS"); - assertTrue(new FIPS140JKSKeystoreBootstrapCheck().check(createTestContext(settings.build(), null)).isFailure()); - } - - public void testHttpSSLKeystoreTypeIsNotAllowed() { - final Settings.Builder settings = Settings.builder() - .put("xpack.security.fips_mode.enabled", "true") - .put("xpack.security.http.ssl.keystore.path", "/this/is/the/path") - .put("xpack.security.http.ssl.keystore.type", "JKS"); - assertTrue(new FIPS140JKSKeystoreBootstrapCheck().check(createTestContext(settings.build(), null)).isFailure()); - } - - public void testRealmKeystoreTypeIsNotAllowed() { - final Settings.Builder settings = Settings.builder() - .put("xpack.security.fips_mode.enabled", "true") - .put("xpack.security.authc.realms.ldap.ssl.keystore.path", "/this/is/the/path") - .put("xpack.security.authc.realms.ldap.ssl.keystore.type", "JKS"); - assertTrue(new FIPS140JKSKeystoreBootstrapCheck().check(createTestContext(settings.build(), null)).isFailure()); - } - - public void testImplicitRealmKeystoreTypeIsNotAllowed() { - final Settings.Builder settings = Settings.builder() - .put("xpack.security.fips_mode.enabled", "true") - .put("xpack.security.authc.realms.ldap.ssl.keystore.path", "/this/is/the/path"); - assertTrue(new FIPS140JKSKeystoreBootstrapCheck().check(createTestContext(settings.build(), null)).isFailure()); - } -} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/FIPS140LicenseBootstrapCheckTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/FIPS140LicenseBootstrapCheckTests.java deleted file mode 100644 index 9f3cc0ef951bf..0000000000000 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/FIPS140LicenseBootstrapCheckTests.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -package org.elasticsearch.xpack.security; - -import org.elasticsearch.cluster.metadata.MetaData; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.license.License; -import org.elasticsearch.license.TestUtils; -import org.elasticsearch.test.AbstractBootstrapCheckTestCase; - -public class FIPS140LicenseBootstrapCheckTests extends AbstractBootstrapCheckTestCase { - - public void testBootstrapCheck() throws Exception { - assertTrue(new FIPS140LicenseBootstrapCheck() - .check(emptyContext).isSuccess()); - assertTrue(new FIPS140LicenseBootstrapCheck() - .check(createTestContext(Settings.builder().put("xpack.security.fips_mode.enabled", randomBoolean()).build(), MetaData - .EMPTY_META_DATA)).isSuccess()); - - MetaData.Builder builder = MetaData.builder(); - License license = TestUtils.generateSignedLicense(TimeValue.timeValueHours(24)); - TestUtils.putLicense(builder, license); - MetaData metaData = builder.build(); - - if (FIPS140LicenseBootstrapCheck.ALLOWED_LICENSE_OPERATION_MODES.contains(license.operationMode())) { - assertTrue(new FIPS140LicenseBootstrapCheck().check(createTestContext( - Settings.builder().put("xpack.security.fips_mode.enabled", true).build(), metaData)).isSuccess()); - assertTrue(new FIPS140LicenseBootstrapCheck().check(createTestContext( - Settings.builder().put("xpack.security.fips_mode.enabled", false).build(), metaData)).isSuccess()); - } else { - assertTrue(new FIPS140LicenseBootstrapCheck().check(createTestContext( - Settings.builder().put("xpack.security.fips_mode.enabled", false).build(), metaData)).isSuccess()); - assertTrue(new FIPS140LicenseBootstrapCheck().check(createTestContext( - Settings.builder().put("xpack.security.fips_mode.enabled", true).build(), metaData)).isFailure()); - assertEquals("FIPS mode is only allowed with a Platinum or Trial license", - new FIPS140LicenseBootstrapCheck().check(createTestContext( - Settings.builder().put("xpack.security.fips_mode.enabled", true).build(), metaData)).getMessage()); - } - } -} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/FIPS140PasswordHashingAlgorithmBootstrapCheckTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/FIPS140PasswordHashingAlgorithmBootstrapCheckTests.java deleted file mode 100644 index 0dcaf1128f988..0000000000000 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/FIPS140PasswordHashingAlgorithmBootstrapCheckTests.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -package org.elasticsearch.xpack.security; - -import org.elasticsearch.bootstrap.BootstrapCheck; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.test.AbstractBootstrapCheckTestCase; -import org.elasticsearch.xpack.core.XPackSettings; - -import java.util.Arrays; - -import static org.hamcrest.Matchers.equalTo; - -public class FIPS140PasswordHashingAlgorithmBootstrapCheckTests extends AbstractBootstrapCheckTestCase { - - public void testPBKDF2AlgorithmIsAllowed() { - { - final Settings settings = Settings.builder() - .put(XPackSettings.FIPS_MODE_ENABLED.getKey(), true) - .put(XPackSettings.PASSWORD_HASHING_ALGORITHM.getKey(), "PBKDF2_10000") - .build(); - final BootstrapCheck.BootstrapCheckResult result = - new FIPS140PasswordHashingAlgorithmBootstrapCheck().check(createTestContext(settings, null)); - assertFalse(result.isFailure()); - } - - { - final Settings settings = Settings.builder() - .put(XPackSettings.FIPS_MODE_ENABLED.getKey(), true) - .put(XPackSettings.PASSWORD_HASHING_ALGORITHM.getKey(), "PBKDF2") - .build(); - final BootstrapCheck.BootstrapCheckResult result = - new FIPS140PasswordHashingAlgorithmBootstrapCheck().check(createTestContext(settings, null)); - assertFalse(result.isFailure()); - } - } - - public void testBCRYPTAlgorithmDependsOnFipsMode() { - for (final Boolean fipsModeEnabled : Arrays.asList(true, false)) { - for (final String passwordHashingAlgorithm : Arrays.asList(null, "BCRYPT", "BCRYPT11")) { - runBCRYPTTest(fipsModeEnabled, passwordHashingAlgorithm); - } - } - } - - private void runBCRYPTTest(final boolean fipsModeEnabled, final String passwordHashingAlgorithm) { - final Settings.Builder builder = Settings.builder().put(XPackSettings.FIPS_MODE_ENABLED.getKey(), fipsModeEnabled); - if (passwordHashingAlgorithm != null) { - builder.put(XPackSettings.PASSWORD_HASHING_ALGORITHM.getKey(), passwordHashingAlgorithm); - } - final Settings settings = builder.build(); - final BootstrapCheck.BootstrapCheckResult result = - new FIPS140PasswordHashingAlgorithmBootstrapCheck().check(createTestContext(settings, null)); - assertThat(result.isFailure(), equalTo(fipsModeEnabled)); - } - -} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/FIPS140SecureSettingsBootstrapCheckTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/FIPS140SecureSettingsBootstrapCheckTests.java deleted file mode 100644 index 5497dcfe46045..0000000000000 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/FIPS140SecureSettingsBootstrapCheckTests.java +++ /dev/null @@ -1,101 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.security; - -import org.apache.lucene.codecs.CodecUtil; -import org.apache.lucene.store.IOContext; -import org.apache.lucene.store.IndexOutput; -import org.apache.lucene.store.SimpleFSDirectory; -import org.elasticsearch.common.settings.KeyStoreWrapper; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.env.Environment; -import org.elasticsearch.env.TestEnvironment; -import org.elasticsearch.test.AbstractBootstrapCheckTestCase; - -import javax.crypto.SecretKey; -import javax.crypto.SecretKeyFactory; -import javax.crypto.spec.PBEKeySpec; -import java.io.ByteArrayOutputStream; -import java.nio.file.Path; -import java.security.AccessControlException; -import java.security.KeyStore; -import java.util.Base64; - -public class FIPS140SecureSettingsBootstrapCheckTests extends AbstractBootstrapCheckTestCase { - - public void testLegacySecureSettingsIsNotAllowed() throws Exception { - assumeFalse("Can't run in a FIPS JVM, PBE is not available", inFipsJvm()); - final Settings.Builder builder = Settings.builder() - .put("path.home", createTempDir()) - .put("xpack.security.fips_mode.enabled", "true"); - Environment env = TestEnvironment.newEnvironment(builder.build()); - generateV2Keystore(env); - assertTrue(new FIPS140SecureSettingsBootstrapCheck(builder.build(), env).check(createTestContext(builder.build(), - null)).isFailure()); - } - - public void testCorrectSecureSettingsVersionIsAllowed() throws Exception { - final Settings.Builder builder = Settings.builder() - .put("path.home", createTempDir()) - .put("xpack.security.fips_mode.enabled", "true"); - Environment env = TestEnvironment.newEnvironment(builder.build()); - final KeyStoreWrapper keyStoreWrapper = KeyStoreWrapper.create(); - try { - keyStoreWrapper.save(env.configFile(), "password".toCharArray()); - } catch (final AccessControlException e) { - if (e.getPermission() instanceof RuntimePermission && e.getPermission().getName().equals("accessUserInformation")) { - // this is expected:but we don't care in tests - } else { - throw e; - } - } - assertFalse(new FIPS140SecureSettingsBootstrapCheck(builder.build(), env).check(createTestContext(builder.build(), - null)).isFailure()); - } - - private void generateV2Keystore(Environment env) throws Exception { - Path configDir = env.configFile(); - SimpleFSDirectory directory = new SimpleFSDirectory(configDir); - byte[] fileBytes = new byte[20]; - random().nextBytes(fileBytes); - try (IndexOutput output = directory.createOutput("elasticsearch.keystore", IOContext.DEFAULT)) { - - CodecUtil.writeHeader(output, "elasticsearch.keystore", 2); - output.writeByte((byte) 0); // hasPassword = false - output.writeString("PKCS12"); - output.writeString("PBE"); // string algo - output.writeString("PBE"); // file algo - - output.writeVInt(2); // num settings - output.writeString("string_setting"); - output.writeString("STRING"); - output.writeString("file_setting"); - output.writeString("FILE"); - - SecretKeyFactory secretFactory = SecretKeyFactory.getInstance("PBE"); - KeyStore keystore = KeyStore.getInstance("PKCS12"); - keystore.load(null, null); - SecretKey secretKey = secretFactory.generateSecret(new PBEKeySpec("stringSecretValue".toCharArray())); - KeyStore.ProtectionParameter protectionParameter = new KeyStore.PasswordProtection(new char[0]); - keystore.setEntry("string_setting", new KeyStore.SecretKeyEntry(secretKey), protectionParameter); - - byte[] base64Bytes = Base64.getEncoder().encode(fileBytes); - char[] chars = new char[base64Bytes.length]; - for (int i = 0; i < chars.length; ++i) { - chars[i] = (char) base64Bytes[i]; // PBE only stores the lower 8 bits, so this narrowing is ok - } - secretKey = secretFactory.generateSecret(new PBEKeySpec(chars)); - keystore.setEntry("file_setting", new KeyStore.SecretKeyEntry(secretKey), protectionParameter); - - ByteArrayOutputStream keystoreBytesStream = new ByteArrayOutputStream(); - keystore.store(keystoreBytesStream, new char[0]); - byte[] keystoreBytes = keystoreBytesStream.toByteArray(); - output.writeInt(keystoreBytes.length); - output.writeBytes(keystoreBytes, keystoreBytes.length); - CodecUtil.writeFooter(output); - } - } -} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java index 364b32f18b356..99d101744e656 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java @@ -38,6 +38,7 @@ import org.elasticsearch.xpack.core.security.authc.Realm; import org.elasticsearch.xpack.core.security.authc.RealmSettings; import org.elasticsearch.xpack.core.security.authc.file.FileRealmSettings; +import org.elasticsearch.xpack.core.security.authc.support.Hasher; import org.elasticsearch.xpack.core.security.authz.AuthorizationServiceField; import org.elasticsearch.xpack.core.security.authz.accesscontrol.IndicesAccessControl; import org.elasticsearch.xpack.core.security.authz.permission.DocumentPermissions; @@ -65,6 +66,7 @@ import java.util.stream.Collectors; import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_FORMAT_SETTING; +import static org.elasticsearch.license.XPackLicenseState.FIPS_ALLOWED_LICENSE_OPERATION_MODES; import static org.elasticsearch.xpack.core.security.index.RestrictedIndicesNames.SECURITY_MAIN_ALIAS; import static org.elasticsearch.xpack.security.support.SecurityIndexManager.INTERNAL_MAIN_INDEX_FORMAT; import static org.hamcrest.Matchers.containsString; @@ -243,24 +245,36 @@ public void testJoinValidatorOnDisabledSecurity() throws Exception { assertNull(joinValidator); } - public void testJoinValidatorForFIPSLicense() throws Exception { + public void testJoinValidatorForFIPSOnAllowedLicense() throws Exception { DiscoveryNode node = new DiscoveryNode("foo", buildNewFakeTransportAddress(), VersionUtils.randomVersionBetween(random(), null, Version.CURRENT)); MetaData.Builder builder = MetaData.builder(); - License license = TestUtils.generateSignedLicense(TimeValue.timeValueHours(24)); + License license = + TestUtils.generateSignedLicense(randomFrom(FIPS_ALLOWED_LICENSE_OPERATION_MODES).toString(), TimeValue.timeValueHours(24)); TestUtils.putLicense(builder, license); ClusterState state = ClusterState.builder(ClusterName.DEFAULT).metaData(builder.build()).build(); new Security.ValidateLicenseForFIPS(false).accept(node, state); + // no exception thrown + new Security.ValidateLicenseForFIPS(true).accept(node, state); + // no exception thrown + } + + public void testJoinValidatorForFIPSOnForbiddenLicense() throws Exception { + DiscoveryNode node = new DiscoveryNode("foo", buildNewFakeTransportAddress(), + VersionUtils.randomVersionBetween(random(), null, Version.CURRENT)); + MetaData.Builder builder = MetaData.builder(); + final String forbiddenLicenseType = + randomFrom(List.of(License.OperationMode.values()).stream() + .filter(l -> FIPS_ALLOWED_LICENSE_OPERATION_MODES.contains(l) == false).collect(Collectors.toList())).toString(); + License license = TestUtils.generateSignedLicense(forbiddenLicenseType, TimeValue.timeValueHours(24)); + TestUtils.putLicense(builder, license); + ClusterState state = ClusterState.builder(ClusterName.DEFAULT).metaData(builder.build()).build(); + new Security.ValidateLicenseForFIPS(false).accept(node, state); + // no exception thrown + IllegalStateException e = expectThrows(IllegalStateException.class, + () -> new Security.ValidateLicenseForFIPS(true).accept(node, state)); + assertThat(e.getMessage(), containsString("FIPS mode cannot be used")); - final boolean isLicenseValidForFips = - FIPS140LicenseBootstrapCheck.ALLOWED_LICENSE_OPERATION_MODES.contains(license.operationMode()); - if (isLicenseValidForFips) { - new Security.ValidateLicenseForFIPS(true).accept(node, state); - } else { - IllegalStateException e = expectThrows(IllegalStateException.class, - () -> new Security.ValidateLicenseForFIPS(true).accept(node, state)); - assertThat(e.getMessage(), containsString("FIPS mode cannot be used")); - } } public void testIndexJoinValidator_FullyCurrentCluster() throws Exception { @@ -377,4 +391,71 @@ public void testValidateRealmsWhenSettingsAreCorrect() { Security.validateRealmSettings(settings); // no-exception } + + public void testValidateForFipsKeystoreWithImplicitJksType() { + final Settings settings = Settings.builder() + .put(XPackSettings.FIPS_MODE_ENABLED.getKey(), true) + .put("xpack.security.transport.ssl.keystore.path", "path/to/keystore") + .put(XPackSettings.PASSWORD_HASHING_ALGORITHM.getKey(), + randomFrom(Hasher.getAvailableAlgoStoredHash().stream() + .filter(alg -> alg.startsWith("pbkdf2") == false).collect(Collectors.toList()))) + .build(); + final IllegalArgumentException iae = + expectThrows(IllegalArgumentException.class, () -> Security.validateForFips(settings)); + assertThat(iae.getMessage(), containsString("JKS Keystores cannot be used in a FIPS 140 compliant JVM")); + } + + public void testValidateForFipsKeystoreWithExplicitJksType() { + final Settings settings = Settings.builder() + .put(XPackSettings.FIPS_MODE_ENABLED.getKey(), true) + .put("xpack.security.transport.ssl.keystore.path", "path/to/keystore") + .put("xpack.security.transport.ssl.keystore.type", "JKS") + .put(XPackSettings.PASSWORD_HASHING_ALGORITHM.getKey(), + randomFrom(Hasher.getAvailableAlgoStoredHash().stream() + .filter(alg -> alg.startsWith("pbkdf2")).collect(Collectors.toList()))) + .build(); + final IllegalArgumentException iae = + expectThrows(IllegalArgumentException.class, () -> Security.validateForFips(settings)); + assertThat(iae.getMessage(), containsString("JKS Keystores cannot be used in a FIPS 140 compliant JVM")); + } + + public void testValidateForFipsInvalidPasswordHashingAlgorithm() { + final Settings settings = Settings.builder() + .put(XPackSettings.FIPS_MODE_ENABLED.getKey(), true) + .put(XPackSettings.PASSWORD_HASHING_ALGORITHM.getKey(), + randomFrom(Hasher.getAvailableAlgoStoredHash().stream() + .filter(alg -> alg.startsWith("pbkdf2") == false).collect(Collectors.toList()))) + .build(); + final IllegalArgumentException iae = + expectThrows(IllegalArgumentException.class, () -> Security.validateForFips(settings)); + assertThat(iae.getMessage(), containsString("Only PBKDF2 is allowed for password hashing in a FIPS 140 JVM.")); + } + + public void testValidateForFipsMultipleValidationErrors() { + final Settings settings = Settings.builder() + .put(XPackSettings.FIPS_MODE_ENABLED.getKey(), true) + .put("xpack.security.transport.ssl.keystore.path", "path/to/keystore") + .put("xpack.security.transport.ssl.keystore.type", "JKS") + .put(XPackSettings.PASSWORD_HASHING_ALGORITHM.getKey(), + randomFrom(Hasher.getAvailableAlgoStoredHash().stream() + .filter(alg -> alg.startsWith("pbkdf2") == false).collect(Collectors.toList()))) + .build(); + final IllegalArgumentException iae = + expectThrows(IllegalArgumentException.class, () -> Security.validateForFips(settings)); + assertThat(iae.getMessage(), containsString("JKS Keystores cannot be used in a FIPS 140 compliant JVM")); + assertThat(iae.getMessage(), containsString("Only PBKDF2 is allowed for password hashing in a FIPS 140 JVM.")); + } + + public void testValidateForFipsNoErrors() { + final Settings settings = Settings.builder() + .put(XPackSettings.FIPS_MODE_ENABLED.getKey(), true) + .put("xpack.security.transport.ssl.keystore.path", "path/to/keystore") + .put("xpack.security.transport.ssl.keystore.type", "BCFKS") + .put(XPackSettings.PASSWORD_HASHING_ALGORITHM.getKey(), + randomFrom(Hasher.getAvailableAlgoStoredHash().stream() + .filter(alg -> alg.startsWith("pbkdf2")).collect(Collectors.toList()))) + .build(); + Security.validateForFips(settings); + // no exception thrown + } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryTranslator.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryTranslator.java index a3f1f385346e8..25ff9e9879797 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryTranslator.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryTranslator.java @@ -108,11 +108,13 @@ import org.elasticsearch.xpack.sql.tree.Source; import org.elasticsearch.xpack.sql.util.Check; import org.elasticsearch.xpack.sql.util.DateUtils; +import org.elasticsearch.xpack.sql.util.Holder; import org.elasticsearch.xpack.sql.util.ReflectionUtils; import java.time.OffsetTime; import java.time.Period; import java.time.ZonedDateTime; +import java.time.temporal.TemporalAccessor; import java.util.Arrays; import java.util.LinkedHashMap; import java.util.List; @@ -821,9 +823,36 @@ protected QueryTranslation asQuery(Range r, boolean onAggs) { if (onAggs) { aggFilter = new AggFilter(at.id().toString(), r.asScript()); } else { + Holder lower = new Holder<>(valueOf(r.lower())); + Holder upper = new Holder<>(valueOf(r.upper())); + Holder format = new Holder<>(dateFormat(r.value())); + + // for a date constant comparison, we need to use a format for the date, to make sure that the format is the same + // no matter the timezone provided by the user + if (format.get() == null) { + DateFormatter formatter = null; + if (lower.get() instanceof ZonedDateTime || upper.get() instanceof ZonedDateTime) { + formatter = DateFormatter.forPattern(DATE_FORMAT); + } else if (lower.get() instanceof OffsetTime || upper.get() instanceof OffsetTime) { + formatter = DateFormatter.forPattern(TIME_FORMAT); + } + if (formatter != null) { + // RangeQueryBuilder accepts an Object as its parameter, but it will call .toString() on the ZonedDateTime + // instance which can have a slightly different format depending on the ZoneId used to create the ZonedDateTime + // Since RangeQueryBuilder can handle date as String as well, we'll format it as String and provide the format. + if (lower.get() instanceof ZonedDateTime || lower.get() instanceof OffsetTime) { + lower.set(formatter.format((TemporalAccessor) lower.get())); + } + if (upper.get() instanceof ZonedDateTime || upper.get() instanceof OffsetTime) { + upper.set(formatter.format((TemporalAccessor) upper.get())); + } + format.set(formatter.pattern()); + } + } + query = handleQuery(r, r.value(), - () -> new RangeQuery(r.source(), nameOf(r.value()), valueOf(r.lower()), r.includeLower(), - valueOf(r.upper()), r.includeUpper(), dateFormat(r.value()))); + () -> new RangeQuery(r.source(), nameOf(r.value()), lower.get(), r.includeLower(), + upper.get(), r.includeUpper(), format.get())); } return new QueryTranslation(query, aggFilter); } else { diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryTranslatorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryTranslatorTests.java index 9b78c4791095e..df500411926ae 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryTranslatorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryTranslatorTests.java @@ -239,22 +239,37 @@ public void testDateRangeCast() { public void testDateRangeWithCurrentTimestamp() { testDateRangeWithCurrentFunctions("CURRENT_TIMESTAMP()", DATE_FORMAT, TestUtils.TEST_CFG.now()); + testDateRangeWithCurrentFunctions_AndRangeOptimization("CURRENT_TIMESTAMP()", DATE_FORMAT, + TestUtils.TEST_CFG.now().minusDays(1L).minusSeconds(1L), + TestUtils.TEST_CFG.now().plusDays(1L).plusSeconds(1L)); } public void testDateRangeWithCurrentDate() { testDateRangeWithCurrentFunctions("CURRENT_DATE()", DATE_FORMAT, DateUtils.asDateOnly(TestUtils.TEST_CFG.now())); + testDateRangeWithCurrentFunctions_AndRangeOptimization("CURRENT_DATE()", DATE_FORMAT, + DateUtils.asDateOnly(TestUtils.TEST_CFG.now().minusDays(2L)), + DateUtils.asDateOnly(TestUtils.TEST_CFG.now().plusDays(1L))); } public void testDateRangeWithToday() { testDateRangeWithCurrentFunctions("TODAY()", DATE_FORMAT, DateUtils.asDateOnly(TestUtils.TEST_CFG.now())); + testDateRangeWithCurrentFunctions_AndRangeOptimization("TODAY()", DATE_FORMAT, + DateUtils.asDateOnly(TestUtils.TEST_CFG.now().minusDays(2L)), + DateUtils.asDateOnly(TestUtils.TEST_CFG.now().plusDays(1L))); } public void testDateRangeWithNow() { testDateRangeWithCurrentFunctions("NOW()", DATE_FORMAT, TestUtils.TEST_CFG.now()); + testDateRangeWithCurrentFunctions_AndRangeOptimization("NOW()", DATE_FORMAT, + TestUtils.TEST_CFG.now().minusDays(1L).minusSeconds(1L), + TestUtils.TEST_CFG.now().plusDays(1L).plusSeconds(1L)); } public void testDateRangeWithCurrentTime() { testDateRangeWithCurrentFunctions("CURRENT_TIME()", TIME_FORMAT, TestUtils.TEST_CFG.now()); + testDateRangeWithCurrentFunctions_AndRangeOptimization("CURRENT_TIME()", TIME_FORMAT, + TestUtils.TEST_CFG.now().minusDays(1L).minusSeconds(1L), + TestUtils.TEST_CFG.now().plusDays(1L).plusSeconds(1L)); } private void testDateRangeWithCurrentFunctions(String function, String pattern, ZonedDateTime now) { @@ -292,6 +307,38 @@ private void testDateRangeWithCurrentFunctions(String function, String pattern, assertEquals(operator.equals("=") || operator.equals("!=") || operator.equals(">="), rq.includeLower()); assertEquals(pattern, rq.format()); } + + private void testDateRangeWithCurrentFunctions_AndRangeOptimization(String function, String pattern, ZonedDateTime lowerValue, + ZonedDateTime upperValue) { + String lowerOperator = randomFrom(new String[] {"<", "<="}); + String upperOperator = randomFrom(new String[] {">", ">="}); + // use both date-only interval (1 DAY) and time-only interval (1 second) to cover CURRENT_TIMESTAMP and TODAY scenarios + String interval = "(INTERVAL 1 DAY + INTERVAL 1 SECOND)"; + + PhysicalPlan p = optimizeAndPlan("SELECT some.string FROM test WHERE date" + lowerOperator + function + " + " + interval + + " AND date " + upperOperator + function + " - " + interval); + assertEquals(EsQueryExec.class, p.getClass()); + EsQueryExec eqe = (EsQueryExec) p; + assertEquals(1, eqe.output().size()); + assertEquals("test.some.string", eqe.output().get(0).qualifiedName()); + assertEquals(DataType.TEXT, eqe.output().get(0).dataType()); + + Query query = eqe.queryContainer().query(); + // the range queries optimization should create a single "range" query with "from" and "to" populated with the values + // in the two branches of the AND condition + assertTrue(query instanceof RangeQuery); + RangeQuery rq = (RangeQuery) query; + assertEquals("date", rq.field()); + + assertEquals(DateFormatter.forPattern(pattern) + .format(upperValue.withNano(DateUtils.getNanoPrecision(null, upperValue.getNano()))), rq.upper()); + assertEquals(DateFormatter.forPattern(pattern) + .format(lowerValue.withNano(DateUtils.getNanoPrecision(null, lowerValue.getNano()))), rq.lower()); + + assertEquals(lowerOperator.equals("<="), rq.includeUpper()); + assertEquals(upperOperator.equals(">="), rq.includeLower()); + assertEquals(pattern, rq.format()); + } public void testTranslateDateAdd_WhereClause_Painless() { LogicalPlan p = plan("SELECT int FROM test WHERE DATE_ADD('quarter',int, date) > '2018-09-04'::date");