From 6e15229f6e118d07a4917e4fad9871c5ed0fe840 Mon Sep 17 00:00:00 2001
From: Daniel Mitterdorfer
Date: Fri, 2 Feb 2024 15:56:07 +0100
Subject: [PATCH 001/106] Make counted terms agg visible to profiling (#105049)
The counted-terms aggregation is defined in its own plugin. When other
plugins (such as the profiling plugin) want to use this aggregation,
this leads to class loader issues, such as that the aggregation class is
not recognized. By moving just the aggregation code itself to the server
module but keeping everything else (including registration) in the
`mapper-counted-keyword` module, we can use the counted-terms
aggregation also from other plugins.
---
server/src/main/java/module-info.java | 1 +
.../CountedTermsAggregationBuilder.java | 11 ++---
.../countedterms}/CountedTermsAggregator.java | 7 +--
.../CountedTermsAggregatorFactory.java | 7 +--
.../CountedTermsAggregatorSupplier.java | 7 +--
.../CountedKeywordMapperPlugin.java | 1 +
.../CountedTermsAggregationBuilderTests.java | 1 +
.../CountedTermsAggregatorTests.java | 1 +
.../TransportGetStackTracesAction.java | 2 +-
.../rest-api-spec/test/profiling/10_basic.yml | 44 ++++++++++++++++++-
10 files changed, 66 insertions(+), 16 deletions(-)
rename {x-pack/plugin/mapper-counted-keyword/src/main/java/org/elasticsearch/xpack/countedkeyword => server/src/main/java/org/elasticsearch/search/aggregations/bucket/countedterms}/CountedTermsAggregationBuilder.java (93%)
rename {x-pack/plugin/mapper-counted-keyword/src/main/java/org/elasticsearch/xpack/countedkeyword => server/src/main/java/org/elasticsearch/search/aggregations/bucket/countedterms}/CountedTermsAggregator.java (96%)
rename {x-pack/plugin/mapper-counted-keyword/src/main/java/org/elasticsearch/xpack/countedkeyword => server/src/main/java/org/elasticsearch/search/aggregations/bucket/countedterms}/CountedTermsAggregatorFactory.java (95%)
rename {x-pack/plugin/mapper-counted-keyword/src/main/java/org/elasticsearch/xpack/countedkeyword => server/src/main/java/org/elasticsearch/search/aggregations/bucket/countedterms}/CountedTermsAggregatorSupplier.java (81%)
diff --git a/server/src/main/java/module-info.java b/server/src/main/java/module-info.java
index eddc96764273c..4bc5d95f06896 100644
--- a/server/src/main/java/module-info.java
+++ b/server/src/main/java/module-info.java
@@ -325,6 +325,7 @@
exports org.elasticsearch.search.aggregations;
exports org.elasticsearch.search.aggregations.bucket;
exports org.elasticsearch.search.aggregations.bucket.composite;
+ exports org.elasticsearch.search.aggregations.bucket.countedterms;
exports org.elasticsearch.search.aggregations.bucket.filter;
exports org.elasticsearch.search.aggregations.bucket.geogrid;
exports org.elasticsearch.search.aggregations.bucket.global;
diff --git a/x-pack/plugin/mapper-counted-keyword/src/main/java/org/elasticsearch/xpack/countedkeyword/CountedTermsAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/countedterms/CountedTermsAggregationBuilder.java
similarity index 93%
rename from x-pack/plugin/mapper-counted-keyword/src/main/java/org/elasticsearch/xpack/countedkeyword/CountedTermsAggregationBuilder.java
rename to server/src/main/java/org/elasticsearch/search/aggregations/bucket/countedterms/CountedTermsAggregationBuilder.java
index 31be7f149831d..4f71c964ebaf9 100644
--- a/x-pack/plugin/mapper-counted-keyword/src/main/java/org/elasticsearch/xpack/countedkeyword/CountedTermsAggregationBuilder.java
+++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/countedterms/CountedTermsAggregationBuilder.java
@@ -1,11 +1,12 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
- * 2.0; you may not use this file except in compliance with the Elastic License
- * 2.0.
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
*/
-package org.elasticsearch.xpack.countedkeyword;
+package org.elasticsearch.search.aggregations.bucket.countedterms;
import org.elasticsearch.TransportVersion;
import org.elasticsearch.TransportVersions;
@@ -56,7 +57,7 @@ public CountedTermsAggregationBuilder(String name) {
super(name);
}
- protected CountedTermsAggregationBuilder(
+ public CountedTermsAggregationBuilder(
ValuesSourceAggregationBuilder clone,
AggregatorFactories.Builder factoriesBuilder,
Map metadata
@@ -64,7 +65,7 @@ protected CountedTermsAggregationBuilder(
super(clone, factoriesBuilder, metadata);
}
- protected CountedTermsAggregationBuilder(StreamInput in) throws IOException {
+ public CountedTermsAggregationBuilder(StreamInput in) throws IOException {
super(in);
bucketCountThresholds = new TermsAggregator.BucketCountThresholds(in);
}
diff --git a/x-pack/plugin/mapper-counted-keyword/src/main/java/org/elasticsearch/xpack/countedkeyword/CountedTermsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/countedterms/CountedTermsAggregator.java
similarity index 96%
rename from x-pack/plugin/mapper-counted-keyword/src/main/java/org/elasticsearch/xpack/countedkeyword/CountedTermsAggregator.java
rename to server/src/main/java/org/elasticsearch/search/aggregations/bucket/countedterms/CountedTermsAggregator.java
index 5e1b1e3624f00..588c53a2d1463 100644
--- a/x-pack/plugin/mapper-counted-keyword/src/main/java/org/elasticsearch/xpack/countedkeyword/CountedTermsAggregator.java
+++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/countedterms/CountedTermsAggregator.java
@@ -1,11 +1,12 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
- * 2.0; you may not use this file except in compliance with the Elastic License
- * 2.0.
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
*/
-package org.elasticsearch.xpack.countedkeyword;
+package org.elasticsearch.search.aggregations.bucket.countedterms;
import org.apache.lucene.index.SortedSetDocValues;
import org.apache.lucene.util.BytesRef;
diff --git a/x-pack/plugin/mapper-counted-keyword/src/main/java/org/elasticsearch/xpack/countedkeyword/CountedTermsAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/countedterms/CountedTermsAggregatorFactory.java
similarity index 95%
rename from x-pack/plugin/mapper-counted-keyword/src/main/java/org/elasticsearch/xpack/countedkeyword/CountedTermsAggregatorFactory.java
rename to server/src/main/java/org/elasticsearch/search/aggregations/bucket/countedterms/CountedTermsAggregatorFactory.java
index 3b8be76f14da8..430e28e96d5ee 100644
--- a/x-pack/plugin/mapper-counted-keyword/src/main/java/org/elasticsearch/xpack/countedkeyword/CountedTermsAggregatorFactory.java
+++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/countedterms/CountedTermsAggregatorFactory.java
@@ -1,11 +1,12 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
- * 2.0; you may not use this file except in compliance with the Elastic License
- * 2.0.
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
*/
-package org.elasticsearch.xpack.countedkeyword;
+package org.elasticsearch.search.aggregations.bucket.countedterms;
import org.elasticsearch.ElasticsearchStatusException;
import org.elasticsearch.rest.RestStatus;
diff --git a/x-pack/plugin/mapper-counted-keyword/src/main/java/org/elasticsearch/xpack/countedkeyword/CountedTermsAggregatorSupplier.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/countedterms/CountedTermsAggregatorSupplier.java
similarity index 81%
rename from x-pack/plugin/mapper-counted-keyword/src/main/java/org/elasticsearch/xpack/countedkeyword/CountedTermsAggregatorSupplier.java
rename to server/src/main/java/org/elasticsearch/search/aggregations/bucket/countedterms/CountedTermsAggregatorSupplier.java
index 2817863f6b42c..979c99018e969 100644
--- a/x-pack/plugin/mapper-counted-keyword/src/main/java/org/elasticsearch/xpack/countedkeyword/CountedTermsAggregatorSupplier.java
+++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/countedterms/CountedTermsAggregatorSupplier.java
@@ -1,11 +1,12 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
- * 2.0; you may not use this file except in compliance with the Elastic License
- * 2.0.
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
*/
-package org.elasticsearch.xpack.countedkeyword;
+package org.elasticsearch.search.aggregations.bucket.countedterms;
import org.elasticsearch.search.aggregations.Aggregator;
import org.elasticsearch.search.aggregations.AggregatorFactories;
diff --git a/x-pack/plugin/mapper-counted-keyword/src/main/java/org/elasticsearch/xpack/countedkeyword/CountedKeywordMapperPlugin.java b/x-pack/plugin/mapper-counted-keyword/src/main/java/org/elasticsearch/xpack/countedkeyword/CountedKeywordMapperPlugin.java
index 62fb10be05f9d..43610ecede072 100644
--- a/x-pack/plugin/mapper-counted-keyword/src/main/java/org/elasticsearch/xpack/countedkeyword/CountedKeywordMapperPlugin.java
+++ b/x-pack/plugin/mapper-counted-keyword/src/main/java/org/elasticsearch/xpack/countedkeyword/CountedKeywordMapperPlugin.java
@@ -11,6 +11,7 @@
import org.elasticsearch.plugins.MapperPlugin;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.plugins.SearchPlugin;
+import org.elasticsearch.search.aggregations.bucket.countedterms.CountedTermsAggregationBuilder;
import java.util.ArrayList;
import java.util.Collections;
diff --git a/x-pack/plugin/mapper-counted-keyword/src/test/java/org/elasticsearch/xpack/countedkeyword/CountedTermsAggregationBuilderTests.java b/x-pack/plugin/mapper-counted-keyword/src/test/java/org/elasticsearch/xpack/countedkeyword/CountedTermsAggregationBuilderTests.java
index ba266e82fecc8..00740d8a0bd20 100644
--- a/x-pack/plugin/mapper-counted-keyword/src/test/java/org/elasticsearch/xpack/countedkeyword/CountedTermsAggregationBuilderTests.java
+++ b/x-pack/plugin/mapper-counted-keyword/src/test/java/org/elasticsearch/xpack/countedkeyword/CountedTermsAggregationBuilderTests.java
@@ -9,6 +9,7 @@
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.search.aggregations.BaseAggregationTestCase;
+import org.elasticsearch.search.aggregations.bucket.countedterms.CountedTermsAggregationBuilder;
import java.util.Collection;
import java.util.Collections;
diff --git a/x-pack/plugin/mapper-counted-keyword/src/test/java/org/elasticsearch/xpack/countedkeyword/CountedTermsAggregatorTests.java b/x-pack/plugin/mapper-counted-keyword/src/test/java/org/elasticsearch/xpack/countedkeyword/CountedTermsAggregatorTests.java
index 02d629c7604ac..ef11c7dd3e9d9 100644
--- a/x-pack/plugin/mapper-counted-keyword/src/test/java/org/elasticsearch/xpack/countedkeyword/CountedTermsAggregatorTests.java
+++ b/x-pack/plugin/mapper-counted-keyword/src/test/java/org/elasticsearch/xpack/countedkeyword/CountedTermsAggregatorTests.java
@@ -18,6 +18,7 @@
import org.elasticsearch.index.mapper.TestDocumentParserContext;
import org.elasticsearch.plugins.SearchPlugin;
import org.elasticsearch.search.aggregations.AggregatorTestCase;
+import org.elasticsearch.search.aggregations.bucket.countedterms.CountedTermsAggregationBuilder;
import org.elasticsearch.search.aggregations.bucket.terms.InternalTerms;
import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper;
import org.elasticsearch.xcontent.XContentParser;
diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetStackTracesAction.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetStackTracesAction.java
index 7fac24a094aae..567c36e6b4404 100644
--- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetStackTracesAction.java
+++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetStackTracesAction.java
@@ -31,6 +31,7 @@
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.search.SearchHit;
import org.elasticsearch.search.aggregations.bucket.SingleBucketAggregation;
+import org.elasticsearch.search.aggregations.bucket.countedterms.CountedTermsAggregationBuilder;
import org.elasticsearch.search.aggregations.bucket.sampler.random.RandomSamplerAggregationBuilder;
import org.elasticsearch.search.aggregations.bucket.terms.StringTerms;
import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder;
@@ -48,7 +49,6 @@
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.xcontent.ObjectPath;
-import org.elasticsearch.xpack.countedkeyword.CountedTermsAggregationBuilder;
import java.time.Duration;
import java.time.Instant;
diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/profiling/10_basic.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/profiling/10_basic.yml
index 83522b05bf25d..684d554f08e58 100644
--- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/profiling/10_basic.yml
+++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/profiling/10_basic.yml
@@ -15,6 +15,18 @@ setup:
wait_for_resources_created: true
timeout: "1m"
+ - do:
+ indices.create:
+ index: test-events
+ body:
+ mappings:
+ properties:
+ "@timestamp":
+ type: date
+ format: epoch_second
+ events:
+ type: counted_keyword
+
- do:
bulk:
refresh: wait_for
@@ -105,6 +117,8 @@ setup:
- {"@timestamp": "1698624000", "Executable": {"build": {"id": "c5f89ea1c68710d2a493bb604c343a92c4f8ddeb"}, "file": {"name": "vmlinux"}}, "Symbolization": {"next_time": "4852491791"}, "ecs": {"version": "1.12.0"}}
- {"create": {"_index": "profiling-hosts", "_id": "eLH27YsBj2lLi3tJYlvr"}}
- {"profiling.project.id": 100, "host.id": "8457605156473051743", "@timestamp": 1700504426, "ecs.version": "1.12.0", "profiling.agent.build_timestamp": 1688111067, "profiling.instance.private_ipv4s": ["192.168.1.2"], "ec2.instance_life_cycle": "on-demand", "profiling.agent.config.map_scale_factor": 0, "ec2.instance_type": "i3.2xlarge", "profiling.host.ip": "192.168.1.2", "profiling.agent.config.bpf_log_level": 0, "profiling.host.sysctl.net.core.bpf_jit_enable": 1, "profiling.agent.config.file": "/etc/prodfiler/prodfiler.conf", "ec2.local_ipv4": "192.168.1.2", "profiling.agent.config.no_kernel_version_check": false, "profiling.host.machine": "x86_64", "profiling.host.tags": ["cloud_provider:aws", "cloud_environment:qa", "cloud_region:eu-west-1"], "profiling.agent.config.probabilistic_threshold": 100, "profiling.agent.config.disable_tls": false, "profiling.agent.config.tracers": "all", "profiling.agent.start_time": 1700090045589, "profiling.agent.config.max_elements_per_interval": 800, "ec2.placement.region": "eu-west-1", "profiling.agent.config.present_cpu_cores": 8, "profiling.host.kernel_version": "9.9.9-0-aws", "profiling.agent.config.bpf_log_size": 65536, "profiling.agent.config.known_traces_entries": 65536, "profiling.host.sysctl.kernel.unprivileged_bpf_disabled": 1, "profiling.agent.config.verbose": false, "profiling.agent.config.probabilistic_interval": "1m0s", "ec2.placement.availability_zone_id": "euw1-az1", "ec2.security_groups": "", "ec2.local_hostname": "ip-192-168-1-2.eu-west-1.compute.internal", "ec2.placement.availability_zone": "eu-west-1c", "profiling.agent.config.upload_symbols": false, "profiling.host.sysctl.kernel.bpf_stats_enabled": 0, "profiling.host.name": "ip-192-168-1-2", "ec2.mac": "00:11:22:33:44:55", "profiling.host.kernel_proc_version": "Linux version 9.9.9-0-aws", "profiling.agent.config.cache_directory": "/var/cache/optimyze/", "profiling.agent.version": "v8.12.0", "ec2.hostname": "ip-192-168-1-2.eu-west-1.compute.internal", "profiling.agent.config.elastic_mode": false, "ec2.ami_id": "ami-aaaaaaaaaaa", "ec2.instance_id": "i-0b999999999999999" }
+ - {"index": {"_index": "test-events"}}
+ - {"@timestamp": "1700504427", "events": ["S07KmaoGhvNte78xwwRbZQ"]}
---
teardown:
- do:
@@ -151,12 +165,40 @@ teardown:
- match: { stack_traces.S07KmaoGhvNte78xwwRbZQ.count: 1}
---
-"Test flamegraph":
+"Test flamegraph from profiling-events":
+ - do:
+ profiling.flamegraph:
+ body: >
+ {
+ "sample_size": 20000,
+ "requested_duration": 86400,
+ "query": {
+ "bool": {
+ "filter": [
+ {
+ "range": {
+ "@timestamp": {
+ "gte": "2023-11-20",
+ "lt": "2023-11-21",
+ "format": "yyyy-MM-dd"
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+ - match: { Size: 47}
+
+---
+"Test flamegraph from test-events":
- do:
profiling.flamegraph:
body: >
{
"sample_size": 20000,
+ "indices": "test-events",
+ "stacktrace_ids_field": "events",
"requested_duration": 86400,
"query": {
"bool": {
From da7bed3584bac69113778654e6ee0f82bdb4d993 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Przemys=C5=82aw=20Witek?=
Date: Fri, 2 Feb 2024 16:16:42 +0100
Subject: [PATCH 002/106] [ML] Fix handling of `ml.config_version` node
attribute (#105066)
---
docs/changelog/105066.yaml | 5 ++
.../xpack/core/ml/MlConfigVersion.java | 2 +-
.../xpack/core/ml/MlConfigVersionTests.java | 46 +++++++++++++++++++
.../ml/integration/MlPluginDisabledIT.java | 23 ++++++++--
.../xpack/ml/MachineLearning.java | 19 +++++---
5 files changed, 84 insertions(+), 11 deletions(-)
create mode 100644 docs/changelog/105066.yaml
diff --git a/docs/changelog/105066.yaml b/docs/changelog/105066.yaml
new file mode 100644
index 0000000000000..95757a9edaf81
--- /dev/null
+++ b/docs/changelog/105066.yaml
@@ -0,0 +1,5 @@
+pr: 105066
+summary: Fix handling of `ml.config_version` node attribute for nodes with machine learning disabled
+area: Machine Learning
+type: bug
+issues: []
diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlConfigVersion.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlConfigVersion.java
index ffaa8489929ff..1b365bd96d834 100644
--- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlConfigVersion.java
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlConfigVersion.java
@@ -289,7 +289,7 @@ public static Tuple getMinMaxMlConfigVersion(D
if (mlConfigVersion.after(maxMlConfigVersion)) {
maxMlConfigVersion = mlConfigVersion;
}
- } catch (IllegalArgumentException e) {
+ } catch (IllegalStateException e) {
// This means we encountered a node that is after 8.10.0 but has the ML plugin disabled - ignore it
}
}
diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/MlConfigVersionTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/MlConfigVersionTests.java
index f97d9e1f21d07..34428c303a076 100644
--- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/MlConfigVersionTests.java
+++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/MlConfigVersionTests.java
@@ -14,6 +14,7 @@
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.node.VersionInformation;
import org.elasticsearch.common.transport.TransportAddress;
+import org.elasticsearch.index.IndexVersion;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.xpack.core.ml.utils.MlConfigVersionUtils;
import org.hamcrest.Matchers;
@@ -149,6 +150,51 @@ public void testGetMinMaxMlConfigVersion() {
assertEquals(MlConfigVersion.V_10, MlConfigVersion.getMaxMlConfigVersion(nodes));
}
+ public void testGetMinMaxMlConfigVersionWhenMlConfigVersionAttrIsMissing() {
+ Map nodeAttr1 = Map.of(MlConfigVersion.ML_CONFIG_VERSION_NODE_ATTR, MlConfigVersion.V_7_1_0.toString());
+ Map nodeAttr2 = Map.of(MlConfigVersion.ML_CONFIG_VERSION_NODE_ATTR, MlConfigVersion.V_8_2_0.toString());
+ Map nodeAttr3 = Map.of();
+ DiscoveryNodes nodes = DiscoveryNodes.builder()
+ .add(
+ DiscoveryNodeUtils.builder("_node_id1")
+ .name("_node_name1")
+ .address(new TransportAddress(InetAddress.getLoopbackAddress(), 9300))
+ .attributes(nodeAttr1)
+ .roles(ROLES_WITH_ML)
+ .version(VersionInformation.inferVersions(Version.fromString("7.2.0")))
+ .build()
+ )
+ .add(
+ DiscoveryNodeUtils.builder("_node_id2")
+ .name("_node_name2")
+ .address(new TransportAddress(InetAddress.getLoopbackAddress(), 9301))
+ .attributes(nodeAttr2)
+ .roles(ROLES_WITH_ML)
+ .version(VersionInformation.inferVersions(Version.fromString("7.1.0")))
+ .build()
+ )
+ .add(
+ DiscoveryNodeUtils.builder("_node_id3")
+ .name("_node_name3")
+ .address(new TransportAddress(InetAddress.getLoopbackAddress(), 9302))
+ .attributes(nodeAttr3)
+ .roles(ROLES_WITH_ML)
+ .version(
+ new VersionInformation(
+ Version.V_8_11_0,
+ IndexVersion.getMinimumCompatibleIndexVersion(Version.V_8_11_0.id),
+ IndexVersion.fromId(Version.V_8_11_0.id)
+ )
+ )
+ .build()
+ )
+ .build();
+
+ assertEquals(MlConfigVersion.V_7_1_0, MlConfigVersion.getMinMlConfigVersion(nodes));
+ // _node_name3 is ignored
+ assertEquals(MlConfigVersion.V_8_2_0, MlConfigVersion.getMaxMlConfigVersion(nodes));
+ }
+
public void testGetMlConfigVersionForNode() {
DiscoveryNode node = DiscoveryNodeUtils.builder("_node_id4")
.name("_node_name4")
diff --git a/x-pack/plugin/ml/qa/disabled/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/MlPluginDisabledIT.java b/x-pack/plugin/ml/qa/disabled/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/MlPluginDisabledIT.java
index 25ea9c5d2b27e..a518e0d496868 100644
--- a/x-pack/plugin/ml/qa/disabled/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/MlPluginDisabledIT.java
+++ b/x-pack/plugin/ml/qa/disabled/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/MlPluginDisabledIT.java
@@ -10,14 +10,19 @@
import org.elasticsearch.client.Response;
import org.elasticsearch.client.ResponseException;
import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.xcontent.support.XContentMapValues;
import org.elasticsearch.test.rest.ESRestTestCase;
import org.elasticsearch.xcontent.XContentBuilder;
import java.io.IOException;
+import java.util.Map;
import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder;
+import static org.hamcrest.Matchers.aMapWithSize;
import static org.hamcrest.Matchers.containsString;
-import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.greaterThanOrEqualTo;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.Matchers.notNullValue;
public class MlPluginDisabledIT extends ESRestTestCase {
@@ -71,7 +76,19 @@ public void testActionsFail() throws Exception {
public void testMlFeatureReset() throws IOException {
Request request = new Request("POST", "/_features/_reset");
- Response response = client().performRequest(request);
- assertThat(response.getStatusLine().getStatusCode(), equalTo(200));
+ assertOK(client().performRequest(request));
+ }
+
+ @SuppressWarnings("unchecked")
+ public void testAllNodesHaveMlConfigVersionAttribute() throws IOException {
+ Request request = new Request("GET", "/_nodes");
+ Response response = assertOK(client().performRequest(request));
+ var nodesMap = (Map) entityAsMap(response).get("nodes");
+ assertThat(nodesMap, is(aMapWithSize(greaterThanOrEqualTo(1))));
+ for (var nodeObj : nodesMap.values()) {
+ var nodeMap = (Map) nodeObj;
+ // We do not expect any specific version. The only important assertion is that the attribute exists.
+ assertThat(XContentMapValues.extractValue(nodeMap, "attributes", "ml.config_version"), is(notNullValue()));
+ }
}
}
diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java
index 7c0004a7532e8..6916a04084285 100644
--- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java
+++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java
@@ -829,13 +829,19 @@ public Settings additionalSettings() {
String allocatedProcessorsAttrName = "node.attr." + ALLOCATED_PROCESSORS_NODE_ATTR;
String mlConfigVersionAttrName = "node.attr." + ML_CONFIG_VERSION_NODE_ATTR;
- if (enabled == false) {
- disallowMlNodeAttributes(maxOpenJobsPerNodeNodeAttrName, machineMemoryAttrName, jvmSizeAttrName, mlConfigVersionAttrName);
- return Settings.EMPTY;
- }
-
Settings.Builder additionalSettings = Settings.builder();
- if (DiscoveryNode.hasRole(settings, DiscoveryNodeRole.ML_ROLE)) {
+
+ // The ML config version is needed for two related reasons even if ML is currently disabled on the node:
+ // 1. If ML is in use then decisions about minimum node versions need to include this node, and not
+ // having it available can cause exceptions during cluster state processing
+ // 2. It could be argued that reason 1 could be fixed by completely ignoring the node, however,
+ // then there would be a risk that ML is later enabled on an old node that was ignored, and
+ // some new ML feature that's been used is then incompatible with it
+ // The only safe approach is to consider which ML code _all_ nodes in the cluster are running, regardless
+ // of whether they currently have ML enabled.
+ addMlNodeAttribute(additionalSettings, mlConfigVersionAttrName, MlConfigVersion.CURRENT.toString());
+
+ if (enabled && DiscoveryNode.hasRole(settings, DiscoveryNodeRole.ML_ROLE)) {
addMlNodeAttribute(
additionalSettings,
machineMemoryAttrName,
@@ -859,7 +865,6 @@ public Settings additionalSettings() {
allocatedProcessorsAttrName
);
}
- addMlNodeAttribute(additionalSettings, mlConfigVersionAttrName, MlConfigVersion.CURRENT.toString());
return additionalSettings.build();
}
From 8b7c777b5407c6efdd91c3a1df4d258ed4b28903 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Slobodan=20Adamovi=C4=87?=
Date: Fri, 2 Feb 2024 17:37:14 +0100
Subject: [PATCH 003/106] Validate settings before reloading JWT shared secret
(#105070)
This PR adds missing validation before reloading JWT shared secret settings.
The shared secret setting must always be configured when the client
authentication type is `shared_secret` and omitted when it's `none`.
---
docs/changelog/105070.yaml | 5 +
.../xpack/security/authc/jwt/JwtRestIT.java | 13 +-
.../authc/jwt/JwtRealmSingleNodeTests.java | 119 +++++++++++++++++-
.../xpack/security/authc/jwt/JwtRealm.java | 17 ++-
4 files changed, 143 insertions(+), 11 deletions(-)
create mode 100644 docs/changelog/105070.yaml
diff --git a/docs/changelog/105070.yaml b/docs/changelog/105070.yaml
new file mode 100644
index 0000000000000..ff4c115e21eea
--- /dev/null
+++ b/docs/changelog/105070.yaml
@@ -0,0 +1,5 @@
+pr: 105070
+summary: Validate settings before reloading JWT shared secret
+area: Authentication
+type: bug
+issues: []
diff --git a/x-pack/plugin/security/qa/jwt-realm/src/javaRestTest/java/org/elasticsearch/xpack/security/authc/jwt/JwtRestIT.java b/x-pack/plugin/security/qa/jwt-realm/src/javaRestTest/java/org/elasticsearch/xpack/security/authc/jwt/JwtRestIT.java
index 52d87c2e32c87..aef0ec95372cf 100644
--- a/x-pack/plugin/security/qa/jwt-realm/src/javaRestTest/java/org/elasticsearch/xpack/security/authc/jwt/JwtRestIT.java
+++ b/x-pack/plugin/security/qa/jwt-realm/src/javaRestTest/java/org/elasticsearch/xpack/security/authc/jwt/JwtRestIT.java
@@ -542,15 +542,12 @@ public void testReloadClientSecret() throws Exception {
// secret updated, so authentication succeeds
getSecurityClient(buildAndSignJwtForRealm2(principal), Optional.of(newValidSharedSecret)).authenticate();
- // removing setting also works and leads to authentication failure
+ // removing setting should not work since it can
+ // lead to inconsistency in realm's configuration
+ // and eventual authentication failures
writeSettingToKeystoreThenReload("xpack.security.authc.realms.jwt.jwt2.client_authentication.shared_secret", null);
- assertThat(
- expectThrows(
- ResponseException.class,
- () -> getSecurityClient(buildAndSignJwtForRealm2(principal), Optional.of(newValidSharedSecret)).authenticate()
- ).getResponse(),
- hasStatusCode(RestStatus.UNAUTHORIZED)
- );
+ getSecurityClient(buildAndSignJwtForRealm2(principal), Optional.of(newValidSharedSecret)).authenticate();
+
} finally {
// Restore setting for other tests
writeSettingToKeystoreThenReload(
diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmSingleNodeTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmSingleNodeTests.java
index c9b43afd4322d..ac033ba75798a 100644
--- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmSingleNodeTests.java
+++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmSingleNodeTests.java
@@ -16,6 +16,7 @@
import com.nimbusds.jwt.SignedJWT;
import org.apache.http.HttpEntity;
+import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ElasticsearchSecurityException;
import org.elasticsearch.client.Request;
import org.elasticsearch.client.RequestOptions;
@@ -61,10 +62,12 @@
import java.util.HashMap;
import java.util.List;
import java.util.Map;
+import java.util.Set;
import java.util.stream.Collectors;
import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE;
import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.WAIT_UNTIL;
+import static org.elasticsearch.xpack.core.security.authc.jwt.JwtRealmSettings.CLIENT_AUTHENTICATION_TYPE;
import static org.elasticsearch.xpack.core.security.authc.jwt.JwtRealmSettings.CLIENT_AUTH_SHARED_SECRET_ROTATION_GRACE_PERIOD;
import static org.hamcrest.Matchers.contains;
import static org.hamcrest.Matchers.containsString;
@@ -124,7 +127,20 @@ protected Settings nodeSettings() {
.put("xpack.security.authc.realms.jwt.jwt2.claims.groups", "groups")
.put("xpack.security.authc.realms.jwt.jwt2.client_authentication.type", "shared_secret")
.put("xpack.security.authc.realms.jwt.jwt2.client_authentication.rotation_grace_period", "0s")
- .putList("xpack.security.authc.realms.jwt.jwt2.allowed_signature_algorithms", "HS256", "HS384");
+ .putList("xpack.security.authc.realms.jwt.jwt2.allowed_signature_algorithms", "HS256", "HS384")
+ // 4th JWT realm
+ .put("xpack.security.authc.realms.jwt.jwt3.order", 40)
+ .put("xpack.security.authc.realms.jwt.jwt3.token_type", "id_token")
+ .put("xpack.security.authc.realms.jwt.jwt3.allowed_issuer", "my-issuer-04")
+ .put("xpack.security.authc.realms.jwt.jwt3.allowed_subjects", "user-04")
+ .put("xpack.security.authc.realms.jwt.jwt3.allowed_audiences", "es-04")
+ .put("xpack.security.authc.realms.jwt.jwt3.claims.principal", "sub")
+ .put("xpack.security.authc.realms.jwt.jwt3.claims.groups", "groups")
+ .put("xpack.security.authc.realms.jwt.jwt3.client_authentication.type", "NONE")
+ .put(
+ "xpack.security.authc.realms.jwt.jwt3.pkc_jwkset_path",
+ getDataPath("/org/elasticsearch/xpack/security/authc/apikey/rsa-public-jwkset.json")
+ );
SecuritySettingsSource.addSecureSettings(builder, secureSettings -> {
secureSettings.setString("xpack.security.authc.realms.jwt.jwt0.hmac_key", jwtHmacKey);
@@ -491,6 +507,103 @@ public void testClientSecretRotation() throws Exception {
}
}
+ public void testValidationDuringReloadingClientSecrets() {
+ final Map realmsByName = getJwtRealms().stream().collect(Collectors.toMap(Realm::name, r -> r));
+ final Set realmsWithSharedSecret = Set.of(realmsByName.get("jwt0"), realmsByName.get("jwt1"), realmsByName.get("jwt2"));
+ final JwtRealm realmWithoutSharedSecret = realmsByName.get("jwt3");
+
+ // Sanity check all client_authentication.type settings.
+ for (JwtRealm realm : realmsWithSharedSecret) {
+ assertThat(getClientAuthenticationType(realm), equalTo(JwtRealmSettings.ClientAuthenticationType.SHARED_SECRET));
+ }
+ assertThat(getClientAuthenticationType(realmWithoutSharedSecret), equalTo(JwtRealmSettings.ClientAuthenticationType.NONE));
+
+ // Randomly chose one JWT realm which requires shared secret and omit it.
+ final MockSecureSettings newSecureSettings = new MockSecureSettings();
+ final JwtRealm chosenRealmToRemoveSharedSecret = randomFrom(realmsWithSharedSecret);
+ for (JwtRealm realm : realmsWithSharedSecret) {
+ if (realm != chosenRealmToRemoveSharedSecret) {
+ newSecureSettings.setString(
+ "xpack.security.authc.realms.jwt." + realm.name() + ".client_authentication.shared_secret",
+ realm.name() + "_shared_secret"
+ );
+ }
+ }
+
+ // Reload settings and check if validation prevented updating for randomly chosen realm.
+ final PluginsService plugins = getInstanceFromNode(PluginsService.class);
+ final LocalStateSecurity localStateSecurity = plugins.filterPlugins(LocalStateSecurity.class).findFirst().get();
+ final Security securityPlugin = localStateSecurity.plugins()
+ .stream()
+ .filter(p -> p instanceof Security)
+ .map(Security.class::cast)
+ .findFirst()
+ .orElseThrow(() -> new IllegalStateException("Security plugin not found!"));
+
+ Settings.Builder newSettingsBuilder = Settings.builder().setSecureSettings(newSecureSettings);
+ {
+ var e = expectThrows(ElasticsearchException.class, () -> securityPlugin.reload(newSettingsBuilder.build()));
+ assertThat(e.getMessage(), containsString("secure settings reload failed for one or more security component"));
+
+ var suppressedExceptions = e.getSuppressed();
+ assertThat(suppressedExceptions.length, equalTo(1));
+ assertThat(suppressedExceptions[0].getMessage(), containsString("secure settings reload failed for one or more realms"));
+
+ var realmSuppressedExceptions = suppressedExceptions[0].getSuppressed();
+ assertThat(realmSuppressedExceptions.length, equalTo(1));
+ assertThat(
+ realmSuppressedExceptions[0].getMessage(),
+ containsString(
+ "Missing setting for [xpack.security.authc.realms.jwt."
+ + chosenRealmToRemoveSharedSecret.name()
+ + ".client_authentication.shared_secret]. It is required when setting [xpack.security.authc.realms.jwt."
+ + chosenRealmToRemoveSharedSecret.name()
+ + ".client_authentication.type] is ["
+ + JwtRealmSettings.ClientAuthenticationType.SHARED_SECRET.value()
+ + "]"
+ )
+ );
+ }
+
+ // Add missing required shared secret setting in order
+ // to avoid raising an exception for realm which has
+ // client_authentication.type set to shared_secret.
+ newSecureSettings.setString(
+ "xpack.security.authc.realms.jwt." + chosenRealmToRemoveSharedSecret.name() + ".client_authentication.shared_secret",
+ chosenRealmToRemoveSharedSecret.name() + "_shared_secret"
+ );
+ // Add shared secret for realm which does not require it,
+ // because it has client_authentication.type set to NONE.
+ newSecureSettings.setString(
+ "xpack.security.authc.realms.jwt." + realmWithoutSharedSecret.name() + ".client_authentication.shared_secret",
+ realmWithoutSharedSecret.name() + "_shared_secret"
+ );
+
+ {
+ var e = expectThrows(ElasticsearchException.class, () -> securityPlugin.reload(newSettingsBuilder.build()));
+ assertThat(e.getMessage(), containsString("secure settings reload failed for one or more security component"));
+
+ var suppressedExceptions = e.getSuppressed();
+ assertThat(suppressedExceptions.length, equalTo(1));
+ assertThat(suppressedExceptions[0].getMessage(), containsString("secure settings reload failed for one or more realms"));
+
+ var realmSuppressedExceptions = suppressedExceptions[0].getSuppressed();
+ assertThat(realmSuppressedExceptions.length, equalTo(1));
+ assertThat(
+ realmSuppressedExceptions[0].getMessage(),
+ containsString(
+ "Setting [xpack.security.authc.realms.jwt."
+ + realmWithoutSharedSecret.name()
+ + ".client_authentication.shared_secret] is not supported, because setting [xpack.security.authc.realms.jwt."
+ + realmWithoutSharedSecret.name()
+ + ".client_authentication.type] is ["
+ + JwtRealmSettings.ClientAuthenticationType.NONE.value()
+ + "]"
+ )
+ );
+ }
+ }
+
private SignedJWT getSignedJWT(JWTClaimsSet claimsSet, byte[] hmacKeyBytes) throws Exception {
JWSHeader jwtHeader = new JWSHeader.Builder(JWSAlgorithm.HS256).build();
OctetSequenceKey.Builder jwt0signer = new OctetSequenceKey.Builder(hmacKeyBytes);
@@ -517,6 +630,10 @@ private TimeValue getGracePeriod(JwtRealm realm) {
return realm.getConfig().getConcreteSetting(CLIENT_AUTH_SHARED_SECRET_ROTATION_GRACE_PERIOD).get(realm.getConfig().settings());
}
+ private JwtRealmSettings.ClientAuthenticationType getClientAuthenticationType(JwtRealm realm) {
+ return realm.getConfig().getConcreteSetting(CLIENT_AUTHENTICATION_TYPE).get(realm.getConfig().settings());
+ }
+
private void assertJwtToken(JwtAuthenticationToken token, String tokenPrincipal, String sharedSecret, SignedJWT signedJWT)
throws ParseException {
assertThat(token.principal(), equalTo(tokenPrincipal));
diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealm.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealm.java
index bef342d330f34..a541eef2f07f6 100644
--- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealm.java
+++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealm.java
@@ -404,8 +404,21 @@ public void usageStats(final ActionListener
*/
public IndexRequestBuilder setSource(Object... source) {
- request.source(source);
+ if (source.length % 2 != 0) {
+ throw new IllegalArgumentException("The number of object passed must be even but was [" + source.length + "]");
+ }
+ this.sourceArray = source;
return this;
}
@@ -145,7 +187,11 @@ public IndexRequestBuilder setSource(Object... source) {
*
*/
public IndexRequestBuilder setSource(XContentType xContentType, Object... source) {
- request.source(xContentType, source);
+ if (source.length % 2 != 0) {
+ throw new IllegalArgumentException("The number of object passed must be even but was [" + source.length + "]");
+ }
+ this.sourceArray = source;
+ this.sourceContentType = xContentType;
return this;
}
@@ -153,7 +199,7 @@ public IndexRequestBuilder setSource(XContentType xContentType, Object... source
* Sets the type of operation to perform.
*/
public IndexRequestBuilder setOpType(DocWriteRequest.OpType opType) {
- request.opType(opType);
+ this.opType = opType;
return this;
}
@@ -161,7 +207,7 @@ public IndexRequestBuilder setOpType(DocWriteRequest.OpType opType) {
* Set to {@code true} to force this index to use {@link org.elasticsearch.action.index.IndexRequest.OpType#CREATE}.
*/
public IndexRequestBuilder setCreate(boolean create) {
- request.create(create);
+ this.create = create;
return this;
}
@@ -170,7 +216,7 @@ public IndexRequestBuilder setCreate(boolean create) {
* version exists and no changes happened on the doc since then.
*/
public IndexRequestBuilder setVersion(long version) {
- request.version(version);
+ this.version = version;
return this;
}
@@ -178,7 +224,7 @@ public IndexRequestBuilder setVersion(long version) {
* Sets the versioning type. Defaults to {@link VersionType#INTERNAL}.
*/
public IndexRequestBuilder setVersionType(VersionType versionType) {
- request.versionType(versionType);
+ this.versionType = versionType;
return this;
}
@@ -190,7 +236,7 @@ public IndexRequestBuilder setVersionType(VersionType versionType) {
* {@link org.elasticsearch.index.engine.VersionConflictEngineException} will be thrown.
*/
public IndexRequestBuilder setIfSeqNo(long seqNo) {
- request.setIfSeqNo(seqNo);
+ this.ifSeqNo = seqNo;
return this;
}
@@ -202,7 +248,7 @@ public IndexRequestBuilder setIfSeqNo(long seqNo) {
* {@link org.elasticsearch.index.engine.VersionConflictEngineException} will be thrown.
*/
public IndexRequestBuilder setIfPrimaryTerm(long term) {
- request.setIfPrimaryTerm(term);
+ this.ifPrimaryTerm = term;
return this;
}
@@ -210,7 +256,7 @@ public IndexRequestBuilder setIfPrimaryTerm(long term) {
* Sets the ingest pipeline to be executed before indexing the document
*/
public IndexRequestBuilder setPipeline(String pipeline) {
- request.setPipeline(pipeline);
+ this.pipeline = pipeline;
return this;
}
@@ -218,7 +264,7 @@ public IndexRequestBuilder setPipeline(String pipeline) {
* Sets the require_alias flag
*/
public IndexRequestBuilder setRequireAlias(boolean requireAlias) {
- request.setRequireAlias(requireAlias);
+ this.requireAlias = requireAlias;
return this;
}
@@ -226,7 +272,117 @@ public IndexRequestBuilder setRequireAlias(boolean requireAlias) {
* Sets the require_data_stream flag
*/
public IndexRequestBuilder setRequireDataStream(boolean requireDataStream) {
- request.setRequireDataStream(requireDataStream);
+ this.requireDataStream = requireDataStream;
+ return this;
+ }
+
+ public IndexRequestBuilder setRefreshPolicy(WriteRequest.RefreshPolicy refreshPolicy) {
+ this.refreshPolicy = refreshPolicy;
return this;
}
+
+ public IndexRequestBuilder setRefreshPolicy(String refreshPolicy) {
+ this.refreshPolicyString = refreshPolicy;
+ return this;
+ }
+
+ @Override
+ public IndexRequest request() {
+ validate();
+ IndexRequest request = new IndexRequest();
+ super.apply(request);
+ request.id(id);
+ if (sourceMap != null && sourceContentType != null) {
+ request.source(sourceMap, sourceContentType);
+ } else if (sourceMap != null) {
+ request.source(sourceMap);
+ }
+ if (sourceArray != null && sourceContentType != null) {
+ request.source(sourceContentType, sourceArray);
+ } else if (sourceArray != null) {
+ request.source(sourceArray);
+ }
+ if (sourceXContentBuilder != null) {
+ request.source(sourceXContentBuilder);
+ }
+ if (sourceString != null && sourceContentType != null) {
+ request.source(sourceString, sourceContentType);
+ }
+ if (sourceBytesReference != null && sourceContentType != null) {
+ request.source(sourceBytesReference, sourceContentType);
+ }
+ if (sourceBytes != null && sourceContentType != null) {
+ if (sourceOffset != null && sourceLength != null) {
+ request.source(sourceBytes, sourceOffset, sourceLength, sourceContentType);
+ } else {
+ request.source(sourceBytes, sourceContentType);
+ }
+ }
+ if (pipeline != null) {
+ request.setPipeline(pipeline);
+ }
+ if (routing != null) {
+ request.routing(routing);
+ }
+ if (refreshPolicy != null) {
+ request.setRefreshPolicy(refreshPolicy);
+ }
+ if (refreshPolicyString != null) {
+ request.setRefreshPolicy(refreshPolicyString);
+ }
+ if (ifSeqNo != null) {
+ request.setIfSeqNo(ifSeqNo);
+ }
+ if (ifPrimaryTerm != null) {
+ request.setIfPrimaryTerm(ifPrimaryTerm);
+ }
+ if (pipeline != null) {
+ request.setPipeline(pipeline);
+ }
+ if (requireAlias != null) {
+ request.setRequireAlias(requireAlias);
+ }
+ if (requireDataStream != null) {
+ request.setRequireDataStream(requireDataStream);
+ }
+ if (opType != null) {
+ request.opType(opType);
+ }
+ if (create != null) {
+ request.create(create);
+ }
+ if (version != null) {
+ request.version(version);
+ }
+ if (versionType != null) {
+ request.versionType(versionType);
+ }
+ return request;
+ }
+
+ @Override
+ protected void validate() throws IllegalStateException {
+ super.validate();
+ int sourceFieldsSet = countSourceFieldsSet();
+ if (sourceFieldsSet > 1) {
+ throw new IllegalStateException("Only one setSource() method may be called, but " + sourceFieldsSet + " have been");
+ }
+ }
+
+ /*
+ * Returns the number of the source fields that are non-null (ideally this will be 1).
+ */
+ private int countSourceFieldsSet() {
+ return countNonNullObjects(sourceMap, sourceArray, sourceXContentBuilder, sourceString, sourceBytesReference, sourceBytes);
+ }
+
+ private int countNonNullObjects(Object... objects) {
+ int sum = 0;
+ for (Object object : objects) {
+ if (object != null) {
+ sum++;
+ }
+ }
+ return sum;
+ }
}
diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequestBuilder.java
index a4d5e07103df3..94935a670afb7 100644
--- a/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequestBuilder.java
+++ b/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequestBuilder.java
@@ -8,7 +8,7 @@
package org.elasticsearch.action.support.replication;
-import org.elasticsearch.action.ActionRequestBuilder;
+import org.elasticsearch.action.ActionRequestLazyBuilder;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.action.ActionType;
import org.elasticsearch.action.support.ActiveShardCount;
@@ -18,18 +18,24 @@
public abstract class ReplicationRequestBuilder<
Request extends ReplicationRequest,
Response extends ActionResponse,
- RequestBuilder extends ReplicationRequestBuilder> extends ActionRequestBuilder {
+ RequestBuilder extends ReplicationRequestBuilder> extends ActionRequestLazyBuilder<
+ Request,
+ Response> {
+ private String index;
+ private TimeValue timeout;
+ private String timeoutString;
+ private ActiveShardCount waitForActiveShards;
- protected ReplicationRequestBuilder(ElasticsearchClient client, ActionType action, Request request) {
- super(client, action, request);
+ protected ReplicationRequestBuilder(ElasticsearchClient client, ActionType action) {
+ super(client, action);
}
/**
* A timeout to wait if the index operation can't be performed immediately. Defaults to {@code 1m}.
*/
@SuppressWarnings("unchecked")
- public final RequestBuilder setTimeout(TimeValue timeout) {
- request.timeout(timeout);
+ public RequestBuilder setTimeout(TimeValue timeout) {
+ this.timeout = timeout;
return (RequestBuilder) this;
}
@@ -37,24 +43,28 @@ public final RequestBuilder setTimeout(TimeValue timeout) {
* A timeout to wait if the index operation can't be performed immediately. Defaults to {@code 1m}.
*/
@SuppressWarnings("unchecked")
- public final RequestBuilder setTimeout(String timeout) {
- request.timeout(timeout);
+ public RequestBuilder setTimeout(String timeout) {
+ this.timeoutString = timeout;
return (RequestBuilder) this;
}
@SuppressWarnings("unchecked")
- public final RequestBuilder setIndex(String index) {
- request.index(index);
+ public RequestBuilder setIndex(String index) {
+ this.index = index;
return (RequestBuilder) this;
}
+ public String getIndex() {
+ return index;
+ }
+
/**
* Sets the number of shard copies that must be active before proceeding with the write.
* See {@link ReplicationRequest#waitForActiveShards(ActiveShardCount)} for details.
*/
@SuppressWarnings("unchecked")
public RequestBuilder setWaitForActiveShards(ActiveShardCount waitForActiveShards) {
- request.waitForActiveShards(waitForActiveShards);
+ this.waitForActiveShards = waitForActiveShards;
return (RequestBuilder) this;
}
@@ -66,4 +76,25 @@ public RequestBuilder setWaitForActiveShards(ActiveShardCount waitForActiveShard
public RequestBuilder setWaitForActiveShards(final int waitForActiveShards) {
return setWaitForActiveShards(ActiveShardCount.from(waitForActiveShards));
}
+
+ protected void apply(Request request) {
+ if (index != null) {
+ request.index(index);
+ }
+ if (timeout != null) {
+ request.timeout(timeout);
+ }
+ if (timeoutString != null) {
+ request.timeout(timeoutString);
+ }
+ if (waitForActiveShards != null) {
+ request.waitForActiveShards(waitForActiveShards);
+ }
+ }
+
+ protected void validate() throws IllegalStateException {
+ if (timeout != null && timeoutString != null) {
+ throw new IllegalStateException("Must use only one setTimeout method");
+ }
+ }
}
diff --git a/server/src/main/java/org/elasticsearch/action/support/single/instance/InstanceShardOperationRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/support/single/instance/InstanceShardOperationRequestBuilder.java
index 931f072e1e45e..1678b08aba940 100644
--- a/server/src/main/java/org/elasticsearch/action/support/single/instance/InstanceShardOperationRequestBuilder.java
+++ b/server/src/main/java/org/elasticsearch/action/support/single/instance/InstanceShardOperationRequestBuilder.java
@@ -8,7 +8,7 @@
package org.elasticsearch.action.support.single.instance;
-import org.elasticsearch.action.ActionRequestBuilder;
+import org.elasticsearch.action.ActionRequestLazyBuilder;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.action.ActionType;
import org.elasticsearch.client.internal.ElasticsearchClient;
@@ -17,26 +17,33 @@
public abstract class InstanceShardOperationRequestBuilder<
Request extends InstanceShardOperationRequest,
Response extends ActionResponse,
- RequestBuilder extends InstanceShardOperationRequestBuilder> extends ActionRequestBuilder<
+ RequestBuilder extends InstanceShardOperationRequestBuilder> extends ActionRequestLazyBuilder<
Request,
Response> {
+ private String index;
+ private TimeValue timeout;
+ private String timeoutString;
- protected InstanceShardOperationRequestBuilder(ElasticsearchClient client, ActionType action, Request request) {
- super(client, action, request);
+ protected InstanceShardOperationRequestBuilder(ElasticsearchClient client, ActionType action) {
+ super(client, action);
}
@SuppressWarnings("unchecked")
- public final RequestBuilder setIndex(String index) {
- request.index(index);
+ public RequestBuilder setIndex(String index) {
+ this.index = index;
return (RequestBuilder) this;
}
+ protected String getIndex() {
+ return index;
+ }
+
/**
* A timeout to wait if the index operation can't be performed immediately. Defaults to {@code 1m}.
*/
@SuppressWarnings("unchecked")
- public final RequestBuilder setTimeout(TimeValue timeout) {
- request.timeout(timeout);
+ public RequestBuilder setTimeout(TimeValue timeout) {
+ this.timeout = timeout;
return (RequestBuilder) this;
}
@@ -44,8 +51,26 @@ public final RequestBuilder setTimeout(TimeValue timeout) {
* A timeout to wait if the index operation can't be performed immediately. Defaults to {@code 1m}.
*/
@SuppressWarnings("unchecked")
- public final RequestBuilder setTimeout(String timeout) {
- request.timeout(timeout);
+ public RequestBuilder setTimeout(String timeout) {
+ this.timeoutString = timeout;
return (RequestBuilder) this;
}
+
+ protected void apply(Request request) {
+ if (index != null) {
+ request.index(index);
+ }
+ if (timeout != null) {
+ request.timeout(timeout);
+ }
+ if (timeoutString != null) {
+ request.timeout(timeoutString);
+ }
+ }
+
+ protected void validate() throws IllegalStateException {
+ if (timeoutString != null && timeout != null) {
+ throw new IllegalStateException("Must use only one setTimeout method");
+ }
+ }
}
diff --git a/server/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java
index 88bed844558f2..cbf28d6718594 100644
--- a/server/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java
+++ b/server/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java
@@ -10,6 +10,7 @@
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.support.ActiveShardCount;
+import org.elasticsearch.action.support.WriteRequest;
import org.elasticsearch.action.support.WriteRequestBuilder;
import org.elasticsearch.action.support.replication.ReplicationRequest;
import org.elasticsearch.action.support.single.instance.InstanceShardOperationRequestBuilder;
@@ -26,19 +27,65 @@ public class UpdateRequestBuilder extends InstanceShardOperationRequestBuilder {
+ private String id;
+ private String routing;
+ private Script script;
+
+ private String fetchSourceInclude;
+ private String fetchSourceExclude;
+ private String[] fetchSourceIncludeArray;
+ private String[] fetchSourceExcludeArray;
+ private Boolean fetchSource;
+
+ private Integer retryOnConflict;
+ private Long version;
+ private VersionType versionType;
+ private Long ifSeqNo;
+ private Long ifPrimaryTerm;
+ private ActiveShardCount waitForActiveShards;
+
+ private IndexRequest doc;
+ private XContentBuilder docSourceXContentBuilder;
+ private Map docSourceMap;
+ private XContentType docSourceXContentType;
+ private String docSourceString;
+ private byte[] docSourceBytes;
+ private Integer docSourceOffset;
+ private Integer docSourceLength;
+ private Object[] docSourceArray;
+
+ private IndexRequest upsert;
+ private XContentBuilder upsertSourceXContentBuilder;
+ private Map upsertSourceMap;
+ private XContentType upsertSourceXContentType;
+ private String upsertSourceString;
+ private byte[] upsertSourceBytes;
+ private Integer upsertSourceOffset;
+ private Integer upsertSourceLength;
+ private Object[] upsertSourceArray;
+
+ private Boolean docAsUpsert;
+ private Boolean detectNoop;
+ private Boolean scriptedUpsert;
+ private Boolean requireAlias;
+ private WriteRequest.RefreshPolicy refreshPolicy;
+ private String refreshPolicyString;
+
public UpdateRequestBuilder(ElasticsearchClient client) {
- super(client, TransportUpdateAction.TYPE, new UpdateRequest());
+ this(client, null, null);
}
public UpdateRequestBuilder(ElasticsearchClient client, String index, String id) {
- super(client, TransportUpdateAction.TYPE, new UpdateRequest(index, id));
+ super(client, TransportUpdateAction.TYPE);
+ setIndex(index);
+ setId(id);
}
/**
* Sets the id of the indexed document.
*/
public UpdateRequestBuilder setId(String id) {
- request.id(id);
+ this.id = id;
return this;
}
@@ -47,7 +94,7 @@ public UpdateRequestBuilder setId(String id) {
* and not the id.
*/
public UpdateRequestBuilder setRouting(String routing) {
- request.routing(routing);
+ this.routing = routing;
return this;
}
@@ -60,7 +107,7 @@ public UpdateRequestBuilder setRouting(String routing) {
*
*/
public UpdateRequestBuilder setScript(Script script) {
- request.script(script);
+ this.script = script;
return this;
}
@@ -77,7 +124,8 @@ public UpdateRequestBuilder setScript(Script script) {
* the returned _source
*/
public UpdateRequestBuilder setFetchSource(@Nullable String include, @Nullable String exclude) {
- request.fetchSource(include, exclude);
+ this.fetchSourceInclude = include;
+ this.fetchSourceExclude = exclude;
return this;
}
@@ -94,7 +142,8 @@ public UpdateRequestBuilder setFetchSource(@Nullable String include, @Nullable S
* filter the returned _source
*/
public UpdateRequestBuilder setFetchSource(@Nullable String[] includes, @Nullable String[] excludes) {
- request.fetchSource(includes, excludes);
+ this.fetchSourceIncludeArray = includes;
+ this.fetchSourceExcludeArray = excludes;
return this;
}
@@ -102,7 +151,7 @@ public UpdateRequestBuilder setFetchSource(@Nullable String[] includes, @Nullabl
* Indicates whether the response should contain the updated _source.
*/
public UpdateRequestBuilder setFetchSource(boolean fetchSource) {
- request.fetchSource(fetchSource);
+ this.fetchSource = fetchSource;
return this;
}
@@ -111,7 +160,7 @@ public UpdateRequestBuilder setFetchSource(boolean fetchSource) {
* getting it and updating it. Defaults to 0.
*/
public UpdateRequestBuilder setRetryOnConflict(int retryOnConflict) {
- request.retryOnConflict(retryOnConflict);
+ this.retryOnConflict = retryOnConflict;
return this;
}
@@ -120,7 +169,7 @@ public UpdateRequestBuilder setRetryOnConflict(int retryOnConflict) {
* version exists and no changes happened on the doc since then.
*/
public UpdateRequestBuilder setVersion(long version) {
- request.version(version);
+ this.version = version;
return this;
}
@@ -128,7 +177,7 @@ public UpdateRequestBuilder setVersion(long version) {
* Sets the versioning type. Defaults to {@link org.elasticsearch.index.VersionType#INTERNAL}.
*/
public UpdateRequestBuilder setVersionType(VersionType versionType) {
- request.versionType(versionType);
+ this.versionType = versionType;
return this;
}
@@ -140,7 +189,7 @@ public UpdateRequestBuilder setVersionType(VersionType versionType) {
* {@link org.elasticsearch.index.engine.VersionConflictEngineException} will be thrown.
*/
public UpdateRequestBuilder setIfSeqNo(long seqNo) {
- request.setIfSeqNo(seqNo);
+ this.ifSeqNo = seqNo;
return this;
}
@@ -152,7 +201,7 @@ public UpdateRequestBuilder setIfSeqNo(long seqNo) {
* {@link org.elasticsearch.index.engine.VersionConflictEngineException} will be thrown.
*/
public UpdateRequestBuilder setIfPrimaryTerm(long term) {
- request.setIfPrimaryTerm(term);
+ this.ifPrimaryTerm = term;
return this;
}
@@ -161,7 +210,7 @@ public UpdateRequestBuilder setIfPrimaryTerm(long term) {
* See {@link ReplicationRequest#waitForActiveShards(ActiveShardCount)} for details.
*/
public UpdateRequestBuilder setWaitForActiveShards(ActiveShardCount waitForActiveShards) {
- request.waitForActiveShards(waitForActiveShards);
+ this.waitForActiveShards = waitForActiveShards;
return this;
}
@@ -178,7 +227,7 @@ public UpdateRequestBuilder setWaitForActiveShards(final int waitForActiveShards
* Sets the doc to use for updates when a script is not specified.
*/
public UpdateRequestBuilder setDoc(IndexRequest indexRequest) {
- request.doc(indexRequest);
+ this.doc = indexRequest;
return this;
}
@@ -186,7 +235,7 @@ public UpdateRequestBuilder setDoc(IndexRequest indexRequest) {
* Sets the doc to use for updates when a script is not specified.
*/
public UpdateRequestBuilder setDoc(XContentBuilder source) {
- request.doc(source);
+ this.docSourceXContentBuilder = source;
return this;
}
@@ -194,7 +243,7 @@ public UpdateRequestBuilder setDoc(XContentBuilder source) {
* Sets the doc to use for updates when a script is not specified.
*/
public UpdateRequestBuilder setDoc(Map source) {
- request.doc(source);
+ this.docSourceMap = source;
return this;
}
@@ -202,7 +251,8 @@ public UpdateRequestBuilder setDoc(Map source) {
* Sets the doc to use for updates when a script is not specified.
*/
public UpdateRequestBuilder setDoc(Map source, XContentType contentType) {
- request.doc(source, contentType);
+ this.docSourceMap = source;
+ this.docSourceXContentType = contentType;
return this;
}
@@ -210,7 +260,8 @@ public UpdateRequestBuilder setDoc(Map source, XContentType cont
* Sets the doc to use for updates when a script is not specified.
*/
public UpdateRequestBuilder setDoc(String source, XContentType xContentType) {
- request.doc(source, xContentType);
+ this.docSourceString = source;
+ this.docSourceXContentType = xContentType;
return this;
}
@@ -218,7 +269,8 @@ public UpdateRequestBuilder setDoc(String source, XContentType xContentType) {
* Sets the doc to use for updates when a script is not specified.
*/
public UpdateRequestBuilder setDoc(byte[] source, XContentType xContentType) {
- request.doc(source, xContentType);
+ this.docSourceBytes = source;
+ this.docSourceXContentType = xContentType;
return this;
}
@@ -226,7 +278,10 @@ public UpdateRequestBuilder setDoc(byte[] source, XContentType xContentType) {
* Sets the doc to use for updates when a script is not specified.
*/
public UpdateRequestBuilder setDoc(byte[] source, int offset, int length, XContentType xContentType) {
- request.doc(source, offset, length, xContentType);
+ this.docSourceBytes = source;
+ this.docSourceOffset = offset;
+ this.docSourceLength = length;
+ this.docSourceXContentType = xContentType;
return this;
}
@@ -235,7 +290,7 @@ public UpdateRequestBuilder setDoc(byte[] source, int offset, int length, XConte
* is a field and value pairs.
*/
public UpdateRequestBuilder setDoc(Object... source) {
- request.doc(source);
+ this.docSourceArray = source;
return this;
}
@@ -244,7 +299,8 @@ public UpdateRequestBuilder setDoc(Object... source) {
* is a field and value pairs.
*/
public UpdateRequestBuilder setDoc(XContentType xContentType, Object... source) {
- request.doc(xContentType, source);
+ this.docSourceArray = source;
+ this.docSourceXContentType = xContentType;
return this;
}
@@ -253,7 +309,7 @@ public UpdateRequestBuilder setDoc(XContentType xContentType, Object... source)
* {@link org.elasticsearch.index.engine.DocumentMissingException} is thrown.
*/
public UpdateRequestBuilder setUpsert(IndexRequest indexRequest) {
- request.upsert(indexRequest);
+ this.upsert = indexRequest;
return this;
}
@@ -261,7 +317,7 @@ public UpdateRequestBuilder setUpsert(IndexRequest indexRequest) {
* Sets the doc source of the update request to be used when the document does not exists.
*/
public UpdateRequestBuilder setUpsert(XContentBuilder source) {
- request.upsert(source);
+ this.upsertSourceXContentBuilder = source;
return this;
}
@@ -269,7 +325,7 @@ public UpdateRequestBuilder setUpsert(XContentBuilder source) {
* Sets the doc source of the update request to be used when the document does not exists.
*/
public UpdateRequestBuilder setUpsert(Map source) {
- request.upsert(source);
+ this.upsertSourceMap = source;
return this;
}
@@ -277,7 +333,8 @@ public UpdateRequestBuilder setUpsert(Map source) {
* Sets the doc source of the update request to be used when the document does not exists.
*/
public UpdateRequestBuilder setUpsert(Map source, XContentType contentType) {
- request.upsert(source, contentType);
+ this.upsertSourceMap = source;
+ this.upsertSourceXContentType = contentType;
return this;
}
@@ -285,7 +342,8 @@ public UpdateRequestBuilder setUpsert(Map source, XContentType c
* Sets the doc source of the update request to be used when the document does not exists.
*/
public UpdateRequestBuilder setUpsert(String source, XContentType xContentType) {
- request.upsert(source, xContentType);
+ this.upsertSourceString = source;
+ this.upsertSourceXContentType = xContentType;
return this;
}
@@ -293,7 +351,8 @@ public UpdateRequestBuilder setUpsert(String source, XContentType xContentType)
* Sets the doc source of the update request to be used when the document does not exists.
*/
public UpdateRequestBuilder setUpsert(byte[] source, XContentType xContentType) {
- request.upsert(source, xContentType);
+ this.upsertSourceBytes = source;
+ this.upsertSourceXContentType = xContentType;
return this;
}
@@ -301,7 +360,10 @@ public UpdateRequestBuilder setUpsert(byte[] source, XContentType xContentType)
* Sets the doc source of the update request to be used when the document does not exists.
*/
public UpdateRequestBuilder setUpsert(byte[] source, int offset, int length, XContentType xContentType) {
- request.upsert(source, offset, length, xContentType);
+ this.upsertSourceBytes = source;
+ this.upsertSourceOffset = offset;
+ this.upsertSourceLength = length;
+ this.upsertSourceXContentType = xContentType;
return this;
}
@@ -310,7 +372,7 @@ public UpdateRequestBuilder setUpsert(byte[] source, int offset, int length, XCo
* includes field and value pairs.
*/
public UpdateRequestBuilder setUpsert(Object... source) {
- request.upsert(source);
+ this.upsertSourceArray = source;
return this;
}
@@ -319,7 +381,8 @@ public UpdateRequestBuilder setUpsert(Object... source) {
* includes field and value pairs.
*/
public UpdateRequestBuilder setUpsert(XContentType xContentType, Object... source) {
- request.upsert(xContentType, source);
+ this.upsertSourceArray = source;
+ this.upsertSourceXContentType = xContentType;
return this;
}
@@ -327,7 +390,7 @@ public UpdateRequestBuilder setUpsert(XContentType xContentType, Object... sourc
* Sets whether the specified doc parameter should be used as upsert document.
*/
public UpdateRequestBuilder setDocAsUpsert(boolean shouldUpsertDoc) {
- request.docAsUpsert(shouldUpsertDoc);
+ this.docAsUpsert = shouldUpsertDoc;
return this;
}
@@ -336,7 +399,7 @@ public UpdateRequestBuilder setDocAsUpsert(boolean shouldUpsertDoc) {
* Defaults to true.
*/
public UpdateRequestBuilder setDetectNoop(boolean detectNoop) {
- request.detectNoop(detectNoop);
+ this.detectNoop = detectNoop;
return this;
}
@@ -344,7 +407,7 @@ public UpdateRequestBuilder setDetectNoop(boolean detectNoop) {
* Sets whether the script should be run in the case of an insert
*/
public UpdateRequestBuilder setScriptedUpsert(boolean scriptedUpsert) {
- request.scriptedUpsert(scriptedUpsert);
+ this.scriptedUpsert = scriptedUpsert;
return this;
}
@@ -352,7 +415,183 @@ public UpdateRequestBuilder setScriptedUpsert(boolean scriptedUpsert) {
* Sets the require_alias flag
*/
public UpdateRequestBuilder setRequireAlias(boolean requireAlias) {
- request.setRequireAlias(requireAlias);
- return this;
+ this.requireAlias = requireAlias;
+ return this;
+ }
+
+ @Override
+ public UpdateRequestBuilder setRefreshPolicy(WriteRequest.RefreshPolicy refreshPolicy) {
+ this.refreshPolicy = refreshPolicy;
+ return this;
+ }
+
+ @Override
+ public UpdateRequestBuilder setRefreshPolicy(String refreshPolicy) {
+ this.refreshPolicyString = refreshPolicy;
+ return this;
+ }
+
+ @Override
+ public UpdateRequest request() {
+ validate();
+ UpdateRequest request = new UpdateRequest();
+ super.apply(request);
+ if (id != null) {
+ request.id(id);
+ }
+ if (routing != null) {
+ request.routing(routing);
+ }
+ if (script != null) {
+ request.script(script);
+ }
+ if (fetchSourceInclude != null || fetchSourceExclude != null) {
+ request.fetchSource(fetchSourceInclude, fetchSourceExclude);
+ }
+ if (fetchSourceIncludeArray != null || fetchSourceExcludeArray != null) {
+ request.fetchSource(fetchSourceIncludeArray, fetchSourceExcludeArray);
+ }
+ if (fetchSource != null) {
+ request.fetchSource(fetchSource);
+ }
+ if (retryOnConflict != null) {
+ request.retryOnConflict(retryOnConflict);
+ }
+ if (version != null) {
+ request.version(version);
+ }
+ if (versionType != null) {
+ request.versionType(versionType);
+ }
+ if (ifSeqNo != null) {
+ request.setIfSeqNo(ifSeqNo);
+ }
+ if (ifPrimaryTerm != null) {
+ request.setIfPrimaryTerm(ifPrimaryTerm);
+ }
+ if (waitForActiveShards != null) {
+ request.waitForActiveShards(waitForActiveShards);
+ }
+ if (doc != null) {
+ request.doc(doc);
+ }
+ if (docSourceXContentBuilder != null) {
+ request.doc(docSourceXContentBuilder);
+ }
+ if (docSourceMap != null) {
+ if (docSourceXContentType == null) {
+ request.doc(docSourceMap);
+ } else {
+ request.doc(docSourceMap, docSourceXContentType);
+ }
+ }
+ if (docSourceString != null && docSourceXContentType != null) {
+ request.doc(docSourceString, docSourceXContentType);
+ }
+ if (docSourceBytes != null && docSourceXContentType != null) {
+ if (docSourceOffset != null && docSourceLength != null) {
+ request.doc(docSourceBytes, docSourceOffset, docSourceLength, docSourceXContentType);
+ }
+ }
+ if (docSourceArray != null) {
+ if (docSourceXContentType == null) {
+ request.doc(docSourceArray);
+ } else {
+ request.doc(docSourceXContentType, docSourceArray);
+ }
+ }
+ if (upsert != null) {
+ request.upsert(upsert);
+ }
+ if (upsertSourceXContentBuilder != null) {
+ request.upsert(upsertSourceXContentBuilder);
+ }
+ if (upsertSourceMap != null) {
+ if (upsertSourceXContentType == null) {
+ request.upsert(upsertSourceMap);
+ } else {
+ request.upsert(upsertSourceMap, upsertSourceXContentType);
+ }
+ }
+ if (upsertSourceString != null && upsertSourceXContentType != null) {
+ request.upsert(upsertSourceString, upsertSourceXContentType);
+ }
+ if (upsertSourceBytes != null && upsertSourceXContentType != null) {
+ if (upsertSourceOffset != null && upsertSourceLength != null) {
+ request.upsert(upsertSourceBytes, upsertSourceOffset, upsertSourceLength, upsertSourceXContentType);
+ }
+ }
+ if (upsertSourceArray != null) {
+ if (upsertSourceXContentType == null) {
+ request.upsert(upsertSourceArray);
+ } else {
+ request.upsert(upsertSourceXContentType, upsertSourceArray);
+ }
+ }
+ if (docAsUpsert != null) {
+ request.docAsUpsert(docAsUpsert);
+ }
+ if (detectNoop != null) {
+ request.detectNoop(detectNoop);
+ }
+ if (scriptedUpsert != null) {
+ request.scriptedUpsert(scriptedUpsert);
+ }
+ if (requireAlias != null) {
+ request.setRequireAlias(requireAlias);
+ }
+ if (refreshPolicy != null) {
+ request.setRefreshPolicy(refreshPolicy);
+ }
+ if (refreshPolicyString != null) {
+ request.setRefreshPolicy(refreshPolicyString);
+ }
+ return request;
+ }
+
+ @Override
+ protected void validate() throws IllegalStateException {
+ super.validate();
+ boolean fetchIncludeExcludeNotNull = fetchSourceInclude != null || fetchSourceExclude != null;
+ boolean fetchIncludeExcludeArrayNotNull = fetchSourceIncludeArray != null || fetchSourceExcludeArray != null;
+ boolean fetchSourceNotNull = fetchSource != null;
+ if ((fetchIncludeExcludeNotNull && fetchIncludeExcludeArrayNotNull)
+ || (fetchIncludeExcludeNotNull && fetchSourceNotNull)
+ || (fetchIncludeExcludeArrayNotNull && fetchSourceNotNull)) {
+ throw new IllegalStateException("Only one fetchSource() method may be called");
+ }
+ int docSourceFieldsSet = countDocSourceFieldsSet();
+ if (docSourceFieldsSet > 1) {
+ throw new IllegalStateException("Only one setDoc() method may be called, but " + docSourceFieldsSet + " have been");
+ }
+ int upsertSourceFieldsSet = countUpsertSourceFieldsSet();
+ if (upsertSourceFieldsSet > 1) {
+ throw new IllegalStateException("Only one setUpsert() method may be called, but " + upsertSourceFieldsSet + " have been");
+ }
+ }
+
+ private int countDocSourceFieldsSet() {
+ return countNonNullObjects(doc, docSourceXContentBuilder, docSourceMap, docSourceString, docSourceBytes, docSourceArray);
+ }
+
+ private int countUpsertSourceFieldsSet() {
+ return countNonNullObjects(
+ upsert,
+ upsertSourceXContentBuilder,
+ upsertSourceMap,
+ upsertSourceString,
+ upsertSourceBytes,
+ upsertSourceArray
+ );
+ }
+
+ private int countNonNullObjects(Object... objects) {
+ int sum = 0;
+ for (Object object : objects) {
+ if (object != null) {
+ sum++;
+ }
+ }
+ return sum;
}
}
diff --git a/server/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByScrollRequestBuilder.java b/server/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByScrollRequestBuilder.java
index ffafc1be6a7ba..0aeb64d5b250f 100644
--- a/server/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByScrollRequestBuilder.java
+++ b/server/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByScrollRequestBuilder.java
@@ -8,7 +8,7 @@
package org.elasticsearch.index.reindex;
-import org.elasticsearch.action.ActionRequestBuilder;
+import org.elasticsearch.action.ActionRequestLazyBuilder;
import org.elasticsearch.action.ActionType;
import org.elasticsearch.action.search.SearchRequestBuilder;
import org.elasticsearch.action.support.ActiveShardCount;
@@ -16,20 +16,44 @@
import org.elasticsearch.client.internal.ElasticsearchClient;
import org.elasticsearch.core.TimeValue;
import org.elasticsearch.index.query.QueryBuilder;
+import org.elasticsearch.search.builder.SearchSourceBuilder;
+
+import static org.elasticsearch.index.reindex.AbstractBulkByScrollRequest.DEFAULT_SCROLL_SIZE;
+import static org.elasticsearch.index.reindex.AbstractBulkByScrollRequest.DEFAULT_SCROLL_TIMEOUT;
public abstract class AbstractBulkByScrollRequestBuilder<
Request extends AbstractBulkByScrollRequest,
- Self extends AbstractBulkByScrollRequestBuilder> extends ActionRequestBuilder {
+ Self extends AbstractBulkByScrollRequestBuilder> extends ActionRequestLazyBuilder {
private final SearchRequestBuilder source;
+ private Integer maxDocs;
+ private Boolean abortOnVersionConflict;
+ private Boolean refresh;
+ private TimeValue timeout;
+ private ActiveShardCount waitForActiveShards;
+ private TimeValue retryBackoffInitialTime;
+ private Integer maxRetries;
+ private Float requestsPerSecond;
+ private Boolean shouldStoreResult;
+ private Integer slices;
protected AbstractBulkByScrollRequestBuilder(
ElasticsearchClient client,
ActionType action,
- SearchRequestBuilder source,
- Request request
+ SearchRequestBuilder source
) {
- super(client, action, request);
+ super(client, action);
this.source = source;
+ initSourceSearchRequest();
+ }
+
+ /*
+ * The following is normally done within the AbstractBulkByScrollRequest constructor. But that constructor is not called until the
+ * request() method is called once this builder is complete. Doing it there blows away changes made to the source request.
+ */
+ private void initSourceSearchRequest() {
+ source.request().scroll(DEFAULT_SCROLL_TIMEOUT);
+ source.request().source(new SearchSourceBuilder());
+ source.request().source().size(DEFAULT_SCROLL_SIZE);
}
protected abstract Self self();
@@ -73,7 +97,7 @@ public Self size(int size) {
* documents.
*/
public Self maxDocs(int maxDocs) {
- request.setMaxDocs(maxDocs);
+ this.maxDocs = maxDocs;
return self();
}
@@ -81,7 +105,7 @@ public Self maxDocs(int maxDocs) {
* Set whether or not version conflicts cause the action to abort.
*/
public Self abortOnVersionConflict(boolean abortOnVersionConflict) {
- request.setAbortOnVersionConflict(abortOnVersionConflict);
+ this.abortOnVersionConflict = abortOnVersionConflict;
return self();
}
@@ -89,7 +113,7 @@ public Self abortOnVersionConflict(boolean abortOnVersionConflict) {
* Call refresh on the indexes we've written to after the request ends?
*/
public Self refresh(boolean refresh) {
- request.setRefresh(refresh);
+ this.refresh = refresh;
return self();
}
@@ -97,7 +121,7 @@ public Self refresh(boolean refresh) {
* Timeout to wait for the shards on to be available for each bulk request.
*/
public Self timeout(TimeValue timeout) {
- request.setTimeout(timeout);
+ this.timeout = timeout;
return self();
}
@@ -106,7 +130,7 @@ public Self timeout(TimeValue timeout) {
* See {@link ReplicationRequest#waitForActiveShards(ActiveShardCount)} for details.
*/
public Self waitForActiveShards(ActiveShardCount activeShardCount) {
- request.setWaitForActiveShards(activeShardCount);
+ this.waitForActiveShards = activeShardCount;
return self();
}
@@ -115,7 +139,7 @@ public Self waitForActiveShards(ActiveShardCount activeShardCount) {
* is about one minute per bulk request. Once the entire bulk request is successful the retry counter resets.
*/
public Self setRetryBackoffInitialTime(TimeValue retryBackoffInitialTime) {
- request.setRetryBackoffInitialTime(retryBackoffInitialTime);
+ this.retryBackoffInitialTime = retryBackoffInitialTime;
return self();
}
@@ -123,7 +147,7 @@ public Self setRetryBackoffInitialTime(TimeValue retryBackoffInitialTime) {
* Total number of retries attempted for rejections. There is no way to ask for unlimited retries.
*/
public Self setMaxRetries(int maxRetries) {
- request.setMaxRetries(maxRetries);
+ this.maxRetries = maxRetries;
return self();
}
@@ -133,7 +157,7 @@ public Self setMaxRetries(int maxRetries) {
* make sure that it contains any time that we might wait.
*/
public Self setRequestsPerSecond(float requestsPerSecond) {
- request.setRequestsPerSecond(requestsPerSecond);
+ this.requestsPerSecond = requestsPerSecond;
return self();
}
@@ -141,7 +165,7 @@ public Self setRequestsPerSecond(float requestsPerSecond) {
* Should this task store its result after it has finished?
*/
public Self setShouldStoreResult(boolean shouldStoreResult) {
- request.setShouldStoreResult(shouldStoreResult);
+ this.shouldStoreResult = shouldStoreResult;
return self();
}
@@ -149,7 +173,40 @@ public Self setShouldStoreResult(boolean shouldStoreResult) {
* The number of slices this task should be divided into. Defaults to 1 meaning the task isn't sliced into subtasks.
*/
public Self setSlices(int slices) {
- request.setSlices(slices);
+ this.slices = slices;
return self();
}
+
+ protected void apply(Request request) {
+ if (maxDocs != null) {
+ request.setMaxDocs(maxDocs);
+ }
+ if (abortOnVersionConflict != null) {
+ request.setAbortOnVersionConflict(abortOnVersionConflict);
+ }
+ if (refresh != null) {
+ request.setRefresh(refresh);
+ }
+ if (timeout != null) {
+ request.setTimeout(timeout);
+ }
+ if (waitForActiveShards != null) {
+ request.setWaitForActiveShards(waitForActiveShards);
+ }
+ if (retryBackoffInitialTime != null) {
+ request.setRetryBackoffInitialTime(retryBackoffInitialTime);
+ }
+ if (maxRetries != null) {
+ request.setMaxRetries(maxRetries);
+ }
+ if (requestsPerSecond != null) {
+ request.setRequestsPerSecond(requestsPerSecond);
+ }
+ if (shouldStoreResult != null) {
+ request.setShouldStoreResult(shouldStoreResult);
+ }
+ if (slices != null) {
+ request.setSlices(slices);
+ }
+ }
}
diff --git a/server/src/main/java/org/elasticsearch/index/reindex/AbstractBulkIndexByScrollRequestBuilder.java b/server/src/main/java/org/elasticsearch/index/reindex/AbstractBulkIndexByScrollRequestBuilder.java
index 53e878b643517..30114b1472dd5 100644
--- a/server/src/main/java/org/elasticsearch/index/reindex/AbstractBulkIndexByScrollRequestBuilder.java
+++ b/server/src/main/java/org/elasticsearch/index/reindex/AbstractBulkIndexByScrollRequestBuilder.java
@@ -16,21 +16,29 @@
public abstract class AbstractBulkIndexByScrollRequestBuilder<
Request extends AbstractBulkIndexByScrollRequest,
Self extends AbstractBulkIndexByScrollRequestBuilder> extends AbstractBulkByScrollRequestBuilder {
+ private Script script;
protected AbstractBulkIndexByScrollRequestBuilder(
ElasticsearchClient client,
ActionType action,
- SearchRequestBuilder search,
- Request request
+ SearchRequestBuilder search
) {
- super(client, action, search, request);
+ super(client, action, search);
}
/**
* Script to modify the documents before they are processed.
*/
public Self script(Script script) {
- request.setScript(script);
+ this.script = script;
return self();
}
+
+ @Override
+ public void apply(Request request) {
+ super.apply(request);
+ if (script != null) {
+ request.setScript(script);
+ }
+ }
}
diff --git a/server/src/main/java/org/elasticsearch/index/reindex/DeleteByQueryRequest.java b/server/src/main/java/org/elasticsearch/index/reindex/DeleteByQueryRequest.java
index 85424c2eef7d2..6243859ec0e33 100644
--- a/server/src/main/java/org/elasticsearch/index/reindex/DeleteByQueryRequest.java
+++ b/server/src/main/java/org/elasticsearch/index/reindex/DeleteByQueryRequest.java
@@ -60,7 +60,7 @@ public DeleteByQueryRequest(StreamInput in) throws IOException {
super(in);
}
- private DeleteByQueryRequest(SearchRequest search, boolean setDefaults) {
+ DeleteByQueryRequest(SearchRequest search, boolean setDefaults) {
super(search, setDefaults);
// Delete-By-Query does not require the source
if (setDefaults) {
diff --git a/server/src/main/java/org/elasticsearch/index/reindex/DeleteByQueryRequestBuilder.java b/server/src/main/java/org/elasticsearch/index/reindex/DeleteByQueryRequestBuilder.java
index 49d3c660a4b68..3452c6659a392 100644
--- a/server/src/main/java/org/elasticsearch/index/reindex/DeleteByQueryRequestBuilder.java
+++ b/server/src/main/java/org/elasticsearch/index/reindex/DeleteByQueryRequestBuilder.java
@@ -8,17 +8,21 @@
package org.elasticsearch.index.reindex;
+import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.search.SearchRequestBuilder;
import org.elasticsearch.client.internal.ElasticsearchClient;
public class DeleteByQueryRequestBuilder extends AbstractBulkByScrollRequestBuilder {
+ private Boolean abortOnVersionConflict;
+
public DeleteByQueryRequestBuilder(ElasticsearchClient client) {
this(client, new SearchRequestBuilder(client));
}
private DeleteByQueryRequestBuilder(ElasticsearchClient client, SearchRequestBuilder search) {
- super(client, DeleteByQueryAction.INSTANCE, search, new DeleteByQueryRequest(search.request()));
+ super(client, DeleteByQueryAction.INSTANCE, search);
+ source().setFetchSource(false);
}
@Override
@@ -28,7 +32,33 @@ protected DeleteByQueryRequestBuilder self() {
@Override
public DeleteByQueryRequestBuilder abortOnVersionConflict(boolean abortOnVersionConflict) {
- request.setAbortOnVersionConflict(abortOnVersionConflict);
+ this.abortOnVersionConflict = abortOnVersionConflict;
return this;
}
+
+ @Override
+ public DeleteByQueryRequest request() {
+ SearchRequest search = source().request();
+ try {
+ DeleteByQueryRequest deleteByQueryRequest = new DeleteByQueryRequest(search, false);
+ try {
+ apply(deleteByQueryRequest);
+ return deleteByQueryRequest;
+ } catch (Exception e) {
+ deleteByQueryRequest.decRef();
+ throw e;
+ }
+ } catch (Exception e) {
+ search.decRef();
+ throw e;
+ }
+ }
+
+ @Override
+ public void apply(DeleteByQueryRequest request) {
+ super.apply(request);
+ if (abortOnVersionConflict != null) {
+ request.setAbortOnVersionConflict(abortOnVersionConflict);
+ }
+ }
}
diff --git a/server/src/main/java/org/elasticsearch/index/reindex/ReindexRequest.java b/server/src/main/java/org/elasticsearch/index/reindex/ReindexRequest.java
index a1f741d7d51d6..683ec75c57d76 100644
--- a/server/src/main/java/org/elasticsearch/index/reindex/ReindexRequest.java
+++ b/server/src/main/java/org/elasticsearch/index/reindex/ReindexRequest.java
@@ -68,7 +68,7 @@ public ReindexRequest() {
this(search, destination, true);
}
- private ReindexRequest(SearchRequest search, IndexRequest destination, boolean setDefaults) {
+ ReindexRequest(SearchRequest search, IndexRequest destination, boolean setDefaults) {
super(search, setDefaults);
this.destination = destination;
}
diff --git a/server/src/main/java/org/elasticsearch/index/reindex/ReindexRequestBuilder.java b/server/src/main/java/org/elasticsearch/index/reindex/ReindexRequestBuilder.java
index 88a851bee15e0..156b39d608654 100644
--- a/server/src/main/java/org/elasticsearch/index/reindex/ReindexRequestBuilder.java
+++ b/server/src/main/java/org/elasticsearch/index/reindex/ReindexRequestBuilder.java
@@ -8,20 +8,23 @@
package org.elasticsearch.index.reindex;
+import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.search.SearchRequestBuilder;
import org.elasticsearch.client.internal.ElasticsearchClient;
public class ReindexRequestBuilder extends AbstractBulkIndexByScrollRequestBuilder {
- private final IndexRequestBuilder destination;
+ private final IndexRequestBuilder destinationBuilder;
+ private RemoteInfo remoteInfo;
public ReindexRequestBuilder(ElasticsearchClient client) {
this(client, new SearchRequestBuilder(client), new IndexRequestBuilder(client));
}
private ReindexRequestBuilder(ElasticsearchClient client, SearchRequestBuilder search, IndexRequestBuilder destination) {
- super(client, ReindexAction.INSTANCE, search, new ReindexRequest(search.request(), destination.request()));
- this.destination = destination;
+ super(client, ReindexAction.INSTANCE, search);
+ this.destinationBuilder = destination;
}
@Override
@@ -30,14 +33,14 @@ protected ReindexRequestBuilder self() {
}
public IndexRequestBuilder destination() {
- return destination;
+ return destinationBuilder;
}
/**
* Set the destination index.
*/
public ReindexRequestBuilder destination(String index) {
- destination.setIndex(index);
+ destinationBuilder.setIndex(index);
return this;
}
@@ -45,7 +48,34 @@ public ReindexRequestBuilder destination(String index) {
* Setup reindexing from a remote cluster.
*/
public ReindexRequestBuilder setRemoteInfo(RemoteInfo remoteInfo) {
- request().setRemoteInfo(remoteInfo);
+ this.remoteInfo = remoteInfo;
return this;
}
+
+ @Override
+ public ReindexRequest request() {
+ SearchRequest source = source().request();
+ try {
+ IndexRequest destination = destinationBuilder.request();
+ try {
+ ReindexRequest reindexRequest = new ReindexRequest(source, destination, false);
+ try {
+ super.apply(reindexRequest);
+ if (remoteInfo != null) {
+ reindexRequest.setRemoteInfo(remoteInfo);
+ }
+ return reindexRequest;
+ } catch (Exception e) {
+ reindexRequest.decRef();
+ throw e;
+ }
+ } catch (Exception e) {
+ destination.decRef();
+ throw e;
+ }
+ } catch (Exception e) {
+ source.decRef();
+ throw e;
+ }
+ }
}
diff --git a/server/src/main/java/org/elasticsearch/index/reindex/UpdateByQueryRequest.java b/server/src/main/java/org/elasticsearch/index/reindex/UpdateByQueryRequest.java
index d30b54fdafd42..44b959074ed76 100644
--- a/server/src/main/java/org/elasticsearch/index/reindex/UpdateByQueryRequest.java
+++ b/server/src/main/java/org/elasticsearch/index/reindex/UpdateByQueryRequest.java
@@ -52,7 +52,7 @@ public UpdateByQueryRequest(StreamInput in) throws IOException {
pipeline = in.readOptionalString();
}
- private UpdateByQueryRequest(SearchRequest search, boolean setDefaults) {
+ UpdateByQueryRequest(SearchRequest search, boolean setDefaults) {
super(search, setDefaults);
}
diff --git a/server/src/main/java/org/elasticsearch/index/reindex/UpdateByQueryRequestBuilder.java b/server/src/main/java/org/elasticsearch/index/reindex/UpdateByQueryRequestBuilder.java
index b63ebdf1def86..270014d6ab3f2 100644
--- a/server/src/main/java/org/elasticsearch/index/reindex/UpdateByQueryRequestBuilder.java
+++ b/server/src/main/java/org/elasticsearch/index/reindex/UpdateByQueryRequestBuilder.java
@@ -8,6 +8,7 @@
package org.elasticsearch.index.reindex;
+import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.search.SearchRequestBuilder;
import org.elasticsearch.client.internal.ElasticsearchClient;
@@ -15,12 +16,15 @@ public class UpdateByQueryRequestBuilder extends AbstractBulkIndexByScrollReques
UpdateByQueryRequest,
UpdateByQueryRequestBuilder> {
+ private Boolean abortOnVersionConflict;
+ private String pipeline;
+
public UpdateByQueryRequestBuilder(ElasticsearchClient client) {
this(client, new SearchRequestBuilder(client));
}
private UpdateByQueryRequestBuilder(ElasticsearchClient client, SearchRequestBuilder search) {
- super(client, UpdateByQueryAction.INSTANCE, search, new UpdateByQueryRequest(search.request()));
+ super(client, UpdateByQueryAction.INSTANCE, search);
}
@Override
@@ -30,12 +34,41 @@ protected UpdateByQueryRequestBuilder self() {
@Override
public UpdateByQueryRequestBuilder abortOnVersionConflict(boolean abortOnVersionConflict) {
- request.setAbortOnVersionConflict(abortOnVersionConflict);
+ this.abortOnVersionConflict = abortOnVersionConflict;
return this;
}
public UpdateByQueryRequestBuilder setPipeline(String pipeline) {
- request.setPipeline(pipeline);
+ this.pipeline = pipeline;
return this;
}
+
+ @Override
+ public UpdateByQueryRequest request() {
+ SearchRequest search = source().request();
+ try {
+ UpdateByQueryRequest updateByQueryRequest = new UpdateByQueryRequest(search, false);
+ try {
+ apply(updateByQueryRequest);
+ return updateByQueryRequest;
+ } catch (Exception e) {
+ updateByQueryRequest.decRef();
+ throw e;
+ }
+ } catch (Exception e) {
+ search.decRef();
+ throw e;
+ }
+ }
+
+ @Override
+ public void apply(UpdateByQueryRequest request) {
+ super.apply(request);
+ if (abortOnVersionConflict != null) {
+ request.setAbortOnVersionConflict(abortOnVersionConflict);
+ }
+ if (pipeline != null) {
+ request.setPipeline(pipeline);
+ }
+ }
}
diff --git a/server/src/test/java/org/elasticsearch/action/delete/DeleteRequestBuilderTests.java b/server/src/test/java/org/elasticsearch/action/delete/DeleteRequestBuilderTests.java
new file mode 100644
index 0000000000000..0a59dac833ca9
--- /dev/null
+++ b/server/src/test/java/org/elasticsearch/action/delete/DeleteRequestBuilderTests.java
@@ -0,0 +1,28 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+
+package org.elasticsearch.action.delete;
+
+import org.elasticsearch.action.support.WriteRequest;
+import org.elasticsearch.core.TimeValue;
+import org.elasticsearch.test.ESTestCase;
+
+public class DeleteRequestBuilderTests extends ESTestCase {
+
+ public void testValidation() {
+ DeleteRequestBuilder deleteRequestBuilder = new DeleteRequestBuilder(null, randomAlphaOfLength(10));
+ deleteRequestBuilder.setRefreshPolicy(randomFrom(WriteRequest.RefreshPolicy.values()).toString());
+ deleteRequestBuilder.setRefreshPolicy(randomFrom(WriteRequest.RefreshPolicy.values()));
+ expectThrows(IllegalStateException.class, deleteRequestBuilder::request);
+
+ deleteRequestBuilder = new DeleteRequestBuilder(null, randomAlphaOfLength(10));
+ deleteRequestBuilder.setTimeout(randomTimeValue());
+ deleteRequestBuilder.setTimeout(TimeValue.timeValueSeconds(randomIntBetween(1, 30)));
+ expectThrows(IllegalStateException.class, deleteRequestBuilder::request);
+ }
+}
diff --git a/server/src/test/java/org/elasticsearch/action/index/IndexRequestBuilderTests.java b/server/src/test/java/org/elasticsearch/action/index/IndexRequestBuilderTests.java
index 9af522524abc9..778bd6a1d138e 100644
--- a/server/src/test/java/org/elasticsearch/action/index/IndexRequestBuilderTests.java
+++ b/server/src/test/java/org/elasticsearch/action/index/IndexRequestBuilderTests.java
@@ -9,6 +9,7 @@
package org.elasticsearch.action.index;
import org.elasticsearch.common.xcontent.XContentHelper;
+import org.elasticsearch.core.TimeValue;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.client.NoOpClient;
import org.elasticsearch.threadpool.TestThreadPool;
@@ -82,4 +83,20 @@ public void testSetSource() throws Exception {
indexRequestBuilder.setSource(doc);
assertEquals(EXPECTED_SOURCE, XContentHelper.convertToJson(indexRequestBuilder.request().source(), true));
}
+
+ public void testValidation() {
+ IndexRequestBuilder indexRequestBuilder = new IndexRequestBuilder(this.testClient);
+ Map source = new HashMap<>();
+ source.put("SomeKey", "SomeValue");
+ indexRequestBuilder.setSource(source);
+ assertNotNull(indexRequestBuilder.request());
+ indexRequestBuilder.setSource("SomeKey", "SomeValue");
+ expectThrows(IllegalStateException.class, indexRequestBuilder::request);
+
+ indexRequestBuilder = new IndexRequestBuilder(this.testClient);
+ indexRequestBuilder.setTimeout(randomTimeValue());
+ assertNotNull(indexRequestBuilder.request());
+ indexRequestBuilder.setTimeout(TimeValue.timeValueSeconds(randomIntBetween(1, 30)));
+ expectThrows(IllegalStateException.class, indexRequestBuilder::request);
+ }
}
diff --git a/server/src/test/java/org/elasticsearch/action/update/UpdateRequestBuilderTests.java b/server/src/test/java/org/elasticsearch/action/update/UpdateRequestBuilderTests.java
new file mode 100644
index 0000000000000..8091daed2b5b0
--- /dev/null
+++ b/server/src/test/java/org/elasticsearch/action/update/UpdateRequestBuilderTests.java
@@ -0,0 +1,39 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+
+package org.elasticsearch.action.update;
+
+import org.elasticsearch.core.TimeValue;
+import org.elasticsearch.test.ESTestCase;
+
+import java.util.Map;
+
+public class UpdateRequestBuilderTests extends ESTestCase {
+
+ public void testValidation() {
+ UpdateRequestBuilder updateRequestBuilder = new UpdateRequestBuilder(null);
+ updateRequestBuilder.setFetchSource(randomAlphaOfLength(10), randomAlphaOfLength(10));
+ updateRequestBuilder.setFetchSource(true);
+ expectThrows(IllegalStateException.class, updateRequestBuilder::request);
+
+ updateRequestBuilder = new UpdateRequestBuilder(null);
+ updateRequestBuilder.setTimeout(randomTimeValue());
+ updateRequestBuilder.setTimeout(TimeValue.timeValueSeconds(randomIntBetween(1, 30)));
+ expectThrows(IllegalStateException.class, updateRequestBuilder::request);
+
+ updateRequestBuilder = new UpdateRequestBuilder(null);
+ updateRequestBuilder.setDoc("key", "value");
+ updateRequestBuilder.setDoc(Map.of("key", "value"));
+ expectThrows(IllegalStateException.class, updateRequestBuilder::request);
+
+ updateRequestBuilder = new UpdateRequestBuilder(null);
+ updateRequestBuilder.setUpsert("key", "value");
+ updateRequestBuilder.setUpsert(Map.of("key", "value"));
+ expectThrows(IllegalStateException.class, updateRequestBuilder::request);
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java
index 65b28ad874431..82e02ec1db70f 100644
--- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java
@@ -1649,7 +1649,7 @@ public void indexRandom(boolean forceRefresh, boolean dummyDocuments, boolean ma
Set indices = new HashSet<>();
builders = new ArrayList<>(builders);
for (IndexRequestBuilder builder : builders) {
- indices.add(builder.request().index());
+ indices.add(builder.getIndex());
}
Set> bogusIds = new HashSet<>(); // (index, type, id)
if (random.nextBoolean() && builders.isEmpty() == false && dummyDocuments) {
From 40a61abb952e0cb2f3c49124a2af7ad1029b40e1 Mon Sep 17 00:00:00 2001
From: Nhat Nguyen
Date: Sat, 3 Feb 2024 18:34:03 -0800
Subject: [PATCH 007/106] Awaits fix #105104
---
.../aggregations/bucket/TimeSeriesTsidHashCardinalityIT.java | 2 ++
1 file changed, 2 insertions(+)
diff --git a/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/bucket/TimeSeriesTsidHashCardinalityIT.java b/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/bucket/TimeSeriesTsidHashCardinalityIT.java
index b1db2f8a7d3a1..278f905e3900b 100644
--- a/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/bucket/TimeSeriesTsidHashCardinalityIT.java
+++ b/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/bucket/TimeSeriesTsidHashCardinalityIT.java
@@ -8,6 +8,7 @@
package org.elasticsearch.aggregations.bucket;
+import org.apache.lucene.tests.util.LuceneTestCase;
import org.elasticsearch.action.DocWriteRequest;
import org.elasticsearch.action.bulk.BulkRequestBuilder;
import org.elasticsearch.action.search.SearchResponse;
@@ -43,6 +44,7 @@
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder;
+@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/105104")
public class TimeSeriesTsidHashCardinalityIT extends ESSingleNodeTestCase {
private static final String START_TIME = "2021-01-01T00:00:00Z";
private static final String END_TIME = "2021-12-31T23:59:59Z";
From e1488a0fc7ddc2ba01ca72f92409c8a6dd92a10c Mon Sep 17 00:00:00 2001
From: Ryan Ernst
Date: Sat, 3 Feb 2024 20:48:11 -0800
Subject: [PATCH 008/106] Fix compilation of example rest handler (#105101)
---
.../example/resthandler/ExampleRestHandlerPlugin.java | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)
diff --git a/plugins/examples/rest-handler/src/main/java/org/elasticsearch/example/resthandler/ExampleRestHandlerPlugin.java b/plugins/examples/rest-handler/src/main/java/org/elasticsearch/example/resthandler/ExampleRestHandlerPlugin.java
index e142ba80147e0..a820973c19ca3 100644
--- a/plugins/examples/rest-handler/src/main/java/org/elasticsearch/example/resthandler/ExampleRestHandlerPlugin.java
+++ b/plugins/examples/rest-handler/src/main/java/org/elasticsearch/example/resthandler/ExampleRestHandlerPlugin.java
@@ -15,12 +15,14 @@
import org.elasticsearch.common.settings.IndexScopedSettings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.settings.SettingsFilter;
+import org.elasticsearch.features.NodeFeature;
import org.elasticsearch.plugins.ActionPlugin;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.rest.RestController;
import org.elasticsearch.rest.RestHandler;
import java.util.List;
+import java.util.function.Predicate;
import java.util.function.Supplier;
import static java.util.Collections.singletonList;
@@ -35,8 +37,8 @@ public List getRestHandlers(final Settings settings,
final IndexScopedSettings indexScopedSettings,
final SettingsFilter settingsFilter,
final IndexNameExpressionResolver indexNameExpressionResolver,
- final Supplier nodesInCluster) {
-
+ final Supplier nodesInCluster,
+ final Predicate clusterSupportsFeature) {
return singletonList(new ExampleCatAction());
}
}
From 552d2f563b31df1d42e2d927efc8e9b17310bc21 Mon Sep 17 00:00:00 2001
From: Yang Wang
Date: Sun, 4 Feb 2024 19:21:50 +1100
Subject: [PATCH 009/106] Expose OperationPurpose via CustomQueryParameter to
s3 logs (#105044)
This PR adds the OperationPurpose as a custom query parameter for each
S3 request so that they are available in s3 access logs.
Resolves: ES-7750
---
docs/changelog/105044.yaml | 5 +
.../s3/S3BlobStoreRepositoryTests.java | 65 +++++++++++--
.../repositories/s3/S3BlobContainer.java | 38 ++++----
.../repositories/s3/S3BlobStore.java | 21 ++++-
.../s3/S3RetryingInputStream.java | 3 +-
.../s3/S3BlobContainerRetriesTests.java | 33 ++++---
.../main/java/fixture/s3/S3HttpHandler.java | 91 +++++++++++++++----
7 files changed, 199 insertions(+), 57 deletions(-)
create mode 100644 docs/changelog/105044.yaml
diff --git a/docs/changelog/105044.yaml b/docs/changelog/105044.yaml
new file mode 100644
index 0000000000000..5a9a11f928f98
--- /dev/null
+++ b/docs/changelog/105044.yaml
@@ -0,0 +1,5 @@
+pr: 105044
+summary: Expose `OperationPurpose` via `CustomQueryParameter` to s3 logs
+area: Snapshot/Restore
+type: enhancement
+issues: []
diff --git a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java
index 9ad2c57b7f585..1d9a5fee31d39 100644
--- a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java
+++ b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java
@@ -31,6 +31,8 @@
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.util.BigArrays;
+import org.elasticsearch.common.util.Maps;
+import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
import org.elasticsearch.core.SuppressForbidden;
import org.elasticsearch.core.TimeValue;
import org.elasticsearch.index.IndexVersion;
@@ -72,6 +74,7 @@
import java.util.Objects;
import java.util.Set;
import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicLong;
import java.util.stream.Collectors;
import java.util.stream.StreamSupport;
@@ -246,7 +249,7 @@ public void testMetrics() throws Exception {
assertHitCount(prepareSearch(index).setSize(0).setTrackTotalHits(true), nbDocs);
assertAcked(clusterAdmin().prepareDeleteSnapshot(repository, snapshot).get());
- final Map aggregatedMetrics = new HashMap<>();
+ final Map aggregatedMetrics = new HashMap<>();
// Compare collected stats and metrics for each node and they should be the same
for (var nodeName : internalCluster().getNodeNames()) {
final BlobStoreRepository blobStoreRepository;
@@ -293,13 +296,12 @@ public void testMetrics() throws Exception {
metric.getLong(),
equalTo(statsCollectors.get(statsKey).counter.sum())
);
-
- aggregatedMetrics.compute(operation.getKey(), (k, v) -> v == null ? metric.getLong() : v + metric.getLong());
+ aggregatedMetrics.compute(statsKey, (k, v) -> v == null ? metric.getLong() : v + metric.getLong());
});
}
// Metrics number should be consistent with server side request count as well.
- assertThat(aggregatedMetrics, equalTo(getMockRequestCounts()));
+ assertThat(aggregatedMetrics, equalTo(getServerMetrics()));
}
public void testRequestStatsWithOperationPurposes() throws IOException {
@@ -423,6 +425,18 @@ public void testEnforcedCooldownPeriod() throws IOException {
assertThat(repository.threadPool().relativeTimeInNanos() - beforeFastDelete, lessThan(TEST_COOLDOWN_PERIOD.getNanos()));
}
+ private Map getServerMetrics() {
+ for (HttpHandler h : handlers.values()) {
+ while (h instanceof DelegatingHttpHandler) {
+ if (h instanceof S3StatsCollectorHttpHandler s3StatsCollectorHttpHandler) {
+ return Maps.transformValues(s3StatsCollectorHttpHandler.getMetricsCount(), AtomicLong::get);
+ }
+ h = ((DelegatingHttpHandler) h).getDelegate();
+ }
+ }
+ return Collections.emptyMap();
+ }
+
/**
* S3RepositoryPlugin that allows to disable chunked encoding and to set a low threshold between single upload and multipart upload.
*/
@@ -525,13 +539,21 @@ protected String requestUniqueId(final HttpExchange exchange) {
@SuppressForbidden(reason = "this test uses a HttpServer to emulate an S3 endpoint")
protected class S3StatsCollectorHttpHandler extends HttpStatsCollectorHandler {
+ private final Map metricsCount = ConcurrentCollections.newConcurrentMap();
+
S3StatsCollectorHttpHandler(final HttpHandler delegate) {
super(delegate);
}
@Override
public void handle(HttpExchange exchange) throws IOException {
- final String request = exchange.getRequestMethod() + " " + exchange.getRequestURI();
+ final S3HttpHandler.RequestComponents requestComponents = S3HttpHandler.parseRequestComponents(
+ S3HttpHandler.getRawRequestString(exchange)
+ );
+ if (false == requestComponents.request().startsWith("HEAD ")) {
+ assertThat(requestComponents.customQueryParameters(), hasKey(S3BlobStore.CUSTOM_QUERY_PARAMETER_PURPOSE));
+ }
+ final String request = requestComponents.request();
if (shouldFailCompleteMultipartUploadRequest.get() && Regex.simpleMatch("POST /*/*?uploadId=*", request)) {
try (exchange) {
drainInputStream(exchange.getRequestBody());
@@ -546,22 +568,53 @@ public void handle(HttpExchange exchange) throws IOException {
}
@Override
- public void maybeTrack(final String request, Headers requestHeaders) {
+ public void maybeTrack(final String rawRequest, Headers requestHeaders) {
+ final S3HttpHandler.RequestComponents requestComponents = S3HttpHandler.parseRequestComponents(rawRequest);
+ final String request = requestComponents.request();
+ final OperationPurpose purpose;
+ // TODO: Remove the condition once ES-7810 is resolved
+ if (false == request.startsWith("HEAD ")) {
+ purpose = OperationPurpose.parse(
+ requestComponents.customQueryParameters().get(S3BlobStore.CUSTOM_QUERY_PARAMETER_PURPOSE).get(0)
+ );
+ } else {
+ purpose = null;
+ }
if (Regex.simpleMatch("GET /*/?prefix=*", request)) {
trackRequest("ListObjects");
+ metricsCount.computeIfAbsent(new S3BlobStore.StatsKey(S3BlobStore.Operation.LIST_OBJECTS, purpose), k -> new AtomicLong())
+ .incrementAndGet();
} else if (Regex.simpleMatch("GET /*/*", request)) {
trackRequest("GetObject");
+ metricsCount.computeIfAbsent(new S3BlobStore.StatsKey(S3BlobStore.Operation.GET_OBJECT, purpose), k -> new AtomicLong())
+ .incrementAndGet();
} else if (isMultiPartUpload(request)) {
trackRequest("PutMultipartObject");
+ metricsCount.computeIfAbsent(
+ new S3BlobStore.StatsKey(S3BlobStore.Operation.PUT_MULTIPART_OBJECT, purpose),
+ k -> new AtomicLong()
+ ).incrementAndGet();
} else if (Regex.simpleMatch("PUT /*/*", request)) {
trackRequest("PutObject");
+ metricsCount.computeIfAbsent(new S3BlobStore.StatsKey(S3BlobStore.Operation.PUT_OBJECT, purpose), k -> new AtomicLong())
+ .incrementAndGet();
} else if (Regex.simpleMatch("POST /*/?delete", request)) {
trackRequest("DeleteObjects");
+ metricsCount.computeIfAbsent(new S3BlobStore.StatsKey(S3BlobStore.Operation.DELETE_OBJECTS, purpose), k -> new AtomicLong())
+ .incrementAndGet();
} else if (Regex.simpleMatch("DELETE /*/*?uploadId=*", request)) {
trackRequest("AbortMultipartObject");
+ metricsCount.computeIfAbsent(
+ new S3BlobStore.StatsKey(S3BlobStore.Operation.ABORT_MULTIPART_OBJECT, purpose),
+ k -> new AtomicLong()
+ ).incrementAndGet();
}
}
+ Map getMetricsCount() {
+ return metricsCount;
+ }
+
private boolean isMultiPartUpload(String request) {
return Regex.simpleMatch("POST /*/*?uploads", request)
|| Regex.simpleMatch("POST /*/*?*uploadId=*", request)
diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java
index dadd15ed640c0..b70fd8e87eeef 100644
--- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java
+++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java
@@ -90,6 +90,7 @@ class S3BlobContainer extends AbstractBlobContainer {
@Override
public boolean blobExists(OperationPurpose purpose, String blobName) {
+ // TODO: Exists request needs to be include for metrics as well, see ES-7810
try (AmazonS3Reference clientReference = blobStore.clientReference()) {
return SocketAccess.doPrivileged(() -> clientReference.client().doesObjectExist(blobStore.bucket(), buildKey(blobName)));
} catch (final Exception e) {
@@ -207,7 +208,7 @@ protected void onCompletion() throws IOException {
uploadId.get(),
parts
);
- complRequest.setRequestMetricCollector(blobStore.getMetricCollector(Operation.PUT_MULTIPART_OBJECT, purpose));
+ S3BlobStore.configureRequestForMetrics(complRequest, blobStore, Operation.PUT_MULTIPART_OBJECT, purpose);
SocketAccess.doPrivilegedVoid(() -> clientReference.client().completeMultipartUpload(complRequest));
}
}
@@ -240,7 +241,7 @@ private UploadPartRequest createPartUploadRequest(
uploadRequest.setUploadId(uploadId);
uploadRequest.setPartNumber(number);
uploadRequest.setInputStream(stream);
- uploadRequest.setRequestMetricCollector(blobStore.getMetricCollector(Operation.PUT_MULTIPART_OBJECT, purpose));
+ S3BlobStore.configureRequestForMetrics(uploadRequest, blobStore, Operation.PUT_MULTIPART_OBJECT, purpose);
uploadRequest.setPartSize(size);
uploadRequest.setLastPart(lastPart);
return uploadRequest;
@@ -248,7 +249,7 @@ private UploadPartRequest createPartUploadRequest(
private void abortMultiPartUpload(OperationPurpose purpose, String uploadId, String blobName) {
final AbortMultipartUploadRequest abortRequest = new AbortMultipartUploadRequest(blobStore.bucket(), blobName, uploadId);
- abortRequest.setRequestMetricCollector(blobStore.getMetricCollector(Operation.ABORT_MULTIPART_OBJECT, purpose));
+ S3BlobStore.configureRequestForMetrics(abortRequest, blobStore, Operation.ABORT_MULTIPART_OBJECT, purpose);
try (AmazonS3Reference clientReference = blobStore.clientReference()) {
SocketAccess.doPrivilegedVoid(() -> clientReference.client().abortMultipartUpload(abortRequest));
}
@@ -258,7 +259,7 @@ private InitiateMultipartUploadRequest initiateMultiPartUpload(OperationPurpose
final InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(blobStore.bucket(), blobName);
initRequest.setStorageClass(blobStore.getStorageClass());
initRequest.setCannedACL(blobStore.getCannedACL());
- initRequest.setRequestMetricCollector(blobStore.getMetricCollector(Operation.PUT_MULTIPART_OBJECT, purpose));
+ S3BlobStore.configureRequestForMetrics(initRequest, blobStore, Operation.PUT_MULTIPART_OBJECT, purpose);
if (blobStore.serverSideEncryption()) {
final ObjectMetadata md = new ObjectMetadata();
md.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
@@ -289,13 +290,13 @@ public DeleteResult delete(OperationPurpose purpose) throws IOException {
final ObjectListing list;
if (prevListing != null) {
final var listNextBatchOfObjectsRequest = new ListNextBatchOfObjectsRequest(prevListing);
- listNextBatchOfObjectsRequest.setRequestMetricCollector(blobStore.getMetricCollector(Operation.LIST_OBJECTS, purpose));
+ S3BlobStore.configureRequestForMetrics(listNextBatchOfObjectsRequest, blobStore, Operation.LIST_OBJECTS, purpose);
list = SocketAccess.doPrivileged(() -> clientReference.client().listNextBatchOfObjects(listNextBatchOfObjectsRequest));
} else {
final ListObjectsRequest listObjectsRequest = new ListObjectsRequest();
listObjectsRequest.setBucketName(blobStore.bucket());
listObjectsRequest.setPrefix(keyPath);
- listObjectsRequest.setRequestMetricCollector(blobStore.getMetricCollector(Operation.LIST_OBJECTS, purpose));
+ S3BlobStore.configureRequestForMetrics(listObjectsRequest, blobStore, Operation.LIST_OBJECTS, purpose);
list = SocketAccess.doPrivileged(() -> clientReference.client().listObjects(listObjectsRequest));
}
final Iterator blobNameIterator = Iterators.map(list.getObjectSummaries().iterator(), summary -> {
@@ -378,7 +379,7 @@ private List executeListing(
ObjectListing list;
if (prevListing != null) {
final var listNextBatchOfObjectsRequest = new ListNextBatchOfObjectsRequest(prevListing);
- listNextBatchOfObjectsRequest.setRequestMetricCollector(blobStore.getMetricCollector(Operation.LIST_OBJECTS, purpose));
+ S3BlobStore.configureRequestForMetrics(listNextBatchOfObjectsRequest, blobStore, Operation.LIST_OBJECTS, purpose);
list = SocketAccess.doPrivileged(() -> clientReference.client().listNextBatchOfObjects(listNextBatchOfObjectsRequest));
} else {
list = SocketAccess.doPrivileged(() -> clientReference.client().listObjects(listObjectsRequest));
@@ -394,10 +395,11 @@ private List executeListing(
}
private ListObjectsRequest listObjectsRequest(OperationPurpose purpose, String pathPrefix) {
- return new ListObjectsRequest().withBucketName(blobStore.bucket())
+ final ListObjectsRequest listObjectsRequest = new ListObjectsRequest().withBucketName(blobStore.bucket())
.withPrefix(pathPrefix)
- .withDelimiter("/")
- .withRequestMetricCollector(blobStore.getMetricCollector(Operation.LIST_OBJECTS, purpose));
+ .withDelimiter("/");
+ S3BlobStore.configureRequestForMetrics(listObjectsRequest, blobStore, Operation.LIST_OBJECTS, purpose);
+ return listObjectsRequest;
}
// exposed for tests
@@ -432,7 +434,7 @@ void executeSingleUpload(
final PutObjectRequest putRequest = new PutObjectRequest(s3BlobStore.bucket(), blobName, input, md);
putRequest.setStorageClass(s3BlobStore.getStorageClass());
putRequest.setCannedAcl(s3BlobStore.getCannedACL());
- putRequest.setRequestMetricCollector(s3BlobStore.getMetricCollector(Operation.PUT_OBJECT, purpose));
+ S3BlobStore.configureRequestForMetrics(putRequest, blobStore, Operation.PUT_OBJECT, purpose);
try (AmazonS3Reference clientReference = s3BlobStore.clientReference()) {
SocketAccess.doPrivilegedVoid(() -> { clientReference.client().putObject(putRequest); });
@@ -510,7 +512,7 @@ void executeMultipartUpload(
uploadId.get(),
parts
);
- complRequest.setRequestMetricCollector(blobStore.getMetricCollector(Operation.PUT_MULTIPART_OBJECT, purpose));
+ S3BlobStore.configureRequestForMetrics(complRequest, blobStore, Operation.PUT_MULTIPART_OBJECT, purpose);
SocketAccess.doPrivilegedVoid(() -> clientReference.client().completeMultipartUpload(complRequest));
success = true;
@@ -708,7 +710,7 @@ private void logUploads(String description, List uploads) {
private List listMultipartUploads() {
final var listRequest = new ListMultipartUploadsRequest(bucket);
listRequest.setPrefix(blobKey);
- listRequest.setRequestMetricCollector(blobStore.getMetricCollector(Operation.LIST_OBJECTS, purpose));
+ S3BlobStore.configureRequestForMetrics(listRequest, blobStore, Operation.LIST_OBJECTS, purpose);
try {
return SocketAccess.doPrivileged(() -> client.listMultipartUploads(listRequest)).getMultipartUploads();
} catch (AmazonS3Exception e) {
@@ -721,7 +723,7 @@ private List listMultipartUploads() {
private String initiateMultipartUpload() {
final var initiateRequest = new InitiateMultipartUploadRequest(bucket, blobKey);
- initiateRequest.setRequestMetricCollector(blobStore.getMetricCollector(Operation.PUT_MULTIPART_OBJECT, purpose));
+ S3BlobStore.configureRequestForMetrics(initiateRequest, blobStore, Operation.PUT_MULTIPART_OBJECT, purpose);
return SocketAccess.doPrivileged(() -> client.initiateMultipartUpload(initiateRequest)).getUploadId();
}
@@ -734,7 +736,7 @@ private PartETag uploadPart(BytesReference updated, String uploadId) throws IOEx
uploadPartRequest.setLastPart(true);
uploadPartRequest.setInputStream(updated.streamInput());
uploadPartRequest.setPartSize(updated.length());
- uploadPartRequest.setRequestMetricCollector(blobStore.getMetricCollector(Operation.PUT_MULTIPART_OBJECT, purpose));
+ S3BlobStore.configureRequestForMetrics(uploadPartRequest, blobStore, Operation.PUT_MULTIPART_OBJECT, purpose);
return SocketAccess.doPrivileged(() -> client.uploadPart(uploadPartRequest)).getPartETag();
}
@@ -828,7 +830,7 @@ private void safeAbortMultipartUpload(String uploadId) {
private void abortMultipartUploadIfExists(String uploadId) {
try {
final var request = new AbortMultipartUploadRequest(bucket, blobKey, uploadId);
- request.setRequestMetricCollector(blobStore.getMetricCollector(Operation.ABORT_MULTIPART_OBJECT, purpose));
+ S3BlobStore.configureRequestForMetrics(request, blobStore, Operation.ABORT_MULTIPART_OBJECT, purpose);
SocketAccess.doPrivilegedVoid(() -> client.abortMultipartUpload(request));
} catch (AmazonS3Exception e) {
if (e.getStatusCode() != 404) {
@@ -840,7 +842,7 @@ private void abortMultipartUploadIfExists(String uploadId) {
private void completeMultipartUpload(String uploadId, PartETag partETag) {
final var completeMultipartUploadRequest = new CompleteMultipartUploadRequest(bucket, blobKey, uploadId, List.of(partETag));
- completeMultipartUploadRequest.setRequestMetricCollector(blobStore.getMetricCollector(Operation.PUT_MULTIPART_OBJECT, purpose));
+ S3BlobStore.configureRequestForMetrics(completeMultipartUploadRequest, blobStore, Operation.PUT_MULTIPART_OBJECT, purpose);
SocketAccess.doPrivilegedVoid(() -> client.completeMultipartUpload(completeMultipartUploadRequest));
}
}
@@ -875,7 +877,7 @@ public void compareAndExchangeRegister(
public void getRegister(OperationPurpose purpose, String key, ActionListener listener) {
ActionListener.completeWith(listener, () -> {
final var getObjectRequest = new GetObjectRequest(blobStore.bucket(), buildKey(key));
- getObjectRequest.setRequestMetricCollector(blobStore.getMetricCollector(Operation.GET_OBJECT, purpose));
+ S3BlobStore.configureRequestForMetrics(getObjectRequest, blobStore, Operation.GET_OBJECT, purpose);
try (
var clientReference = blobStore.clientReference();
var s3Object = SocketAccess.doPrivileged(() -> clientReference.client().getObject(getObjectRequest));
diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java
index 6b58026aba4f4..68def0598ef60 100644
--- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java
+++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java
@@ -9,6 +9,7 @@
package org.elasticsearch.repositories.s3;
import com.amazonaws.AmazonClientException;
+import com.amazonaws.AmazonWebServiceRequest;
import com.amazonaws.Request;
import com.amazonaws.Response;
import com.amazonaws.metrics.RequestMetricCollector;
@@ -55,6 +56,8 @@
class S3BlobStore implements BlobStore {
+ public static final String CUSTOM_QUERY_PARAMETER_PURPOSE = "x-purpose";
+
/**
* Maximum number of deletes in a {@link DeleteObjectsRequest}.
* @see S3 Documentation.
@@ -341,9 +344,11 @@ private void deletePartition(
}
private static DeleteObjectsRequest bulkDelete(OperationPurpose purpose, S3BlobStore blobStore, List blobs) {
- return new DeleteObjectsRequest(blobStore.bucket()).withKeys(blobs.toArray(Strings.EMPTY_ARRAY))
- .withQuiet(true)
- .withRequestMetricCollector(blobStore.getMetricCollector(Operation.DELETE_OBJECTS, purpose));
+ final DeleteObjectsRequest deleteObjectsRequest = new DeleteObjectsRequest(blobStore.bucket()).withKeys(
+ blobs.toArray(Strings.EMPTY_ARRAY)
+ ).withQuiet(true);
+ configureRequestForMetrics(deleteObjectsRequest, blobStore, Operation.DELETE_OBJECTS, purpose);
+ return deleteObjectsRequest;
}
@Override
@@ -456,4 +461,14 @@ IgnoreNoResponseMetricsCollector buildMetricCollector(Operation operation, Opera
return new IgnoreNoResponseMetricsCollector(operation, purpose);
}
}
+
+ static void configureRequestForMetrics(
+ AmazonWebServiceRequest request,
+ S3BlobStore blobStore,
+ Operation operation,
+ OperationPurpose purpose
+ ) {
+ request.setRequestMetricCollector(blobStore.getMetricCollector(operation, purpose));
+ request.putCustomQueryParameter(CUSTOM_QUERY_PARAMETER_PURPOSE, purpose.getKey());
+ }
}
diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RetryingInputStream.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RetryingInputStream.java
index 0ebf6c54b49aa..c457b9d51e8b9 100644
--- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RetryingInputStream.java
+++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RetryingInputStream.java
@@ -29,6 +29,7 @@
import java.util.List;
import static org.elasticsearch.core.Strings.format;
+import static org.elasticsearch.repositories.s3.S3BlobStore.configureRequestForMetrics;
/**
* Wrapper around an S3 object that will retry the {@link GetObjectRequest} if the download fails part-way through, resuming from where
@@ -86,7 +87,7 @@ private void openStreamWithRetry() throws IOException {
while (true) {
try (AmazonS3Reference clientReference = blobStore.clientReference()) {
final GetObjectRequest getObjectRequest = new GetObjectRequest(blobStore.bucket(), blobKey);
- getObjectRequest.setRequestMetricCollector(blobStore.getMetricCollector(Operation.GET_OBJECT, purpose));
+ configureRequestForMetrics(getObjectRequest, blobStore, Operation.GET_OBJECT, purpose);
if (currentOffset > 0 || start > 0 || end < Long.MAX_VALUE - 1) {
assert start + currentOffset <= end
: "requesting beyond end, start = " + start + " offset=" + currentOffset + " end=" + end;
diff --git a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobContainerRetriesTests.java b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobContainerRetriesTests.java
index 04a836997e0f7..0ddd29171b3bd 100644
--- a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobContainerRetriesTests.java
+++ b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobContainerRetriesTests.java
@@ -7,6 +7,8 @@
*/
package org.elasticsearch.repositories.s3;
+import fixture.s3.S3HttpHandler;
+
import com.amazonaws.DnsResolver;
import com.amazonaws.SdkClientException;
import com.amazonaws.services.s3.AmazonS3ClientBuilder;
@@ -230,7 +232,10 @@ public void testWriteBlobWithRetries() throws Exception {
final byte[] bytes = randomBlobContent();
httpServer.createContext(downloadStorageEndpoint(blobContainer, "write_blob_max_retries"), exchange -> {
- if ("PUT".equals(exchange.getRequestMethod()) && exchange.getRequestURI().getQuery() == null) {
+ final S3HttpHandler.RequestComponents requestComponents = S3HttpHandler.parseRequestComponents(
+ S3HttpHandler.getRawRequestString(exchange)
+ );
+ if ("PUT".equals(requestComponents.method()) && requestComponents.query().isEmpty()) {
if (countDown.countDown()) {
final BytesReference body = Streams.readFully(exchange.getRequestBody());
if (Objects.deepEquals(bytes, BytesReference.toBytes(body))) {
@@ -319,9 +324,12 @@ public void testWriteLargeBlob() throws Exception {
final CountDown countDownComplete = new CountDown(nbErrors);
httpServer.createContext(downloadStorageEndpoint(blobContainer, "write_large_blob"), exchange -> {
+ final S3HttpHandler.RequestComponents requestComponents = S3HttpHandler.parseRequestComponents(
+ S3HttpHandler.getRawRequestString(exchange)
+ );
final long contentLength = Long.parseLong(exchange.getRequestHeaders().getFirst("Content-Length"));
- if ("POST".equals(exchange.getRequestMethod()) && exchange.getRequestURI().getQuery().equals("uploads")) {
+ if ("POST".equals(requestComponents.method()) && requestComponents.query().equals("uploads")) {
// initiate multipart upload request
if (countDownInitiate.countDown()) {
byte[] response = ("""
@@ -337,9 +345,9 @@ public void testWriteLargeBlob() throws Exception {
exchange.close();
return;
}
- } else if ("PUT".equals(exchange.getRequestMethod())
- && exchange.getRequestURI().getQuery().contains("uploadId=TEST")
- && exchange.getRequestURI().getQuery().contains("partNumber=")) {
+ } else if ("PUT".equals(requestComponents.method())
+ && requestComponents.query().contains("uploadId=TEST")
+ && requestComponents.query().contains("partNumber=")) {
// upload part request
MD5DigestCalculatingInputStream md5 = new MD5DigestCalculatingInputStream(exchange.getRequestBody());
BytesReference bytes = Streams.readFully(md5);
@@ -353,7 +361,7 @@ public void testWriteLargeBlob() throws Exception {
return;
}
- } else if ("POST".equals(exchange.getRequestMethod()) && exchange.getRequestURI().getQuery().equals("uploadId=TEST")) {
+ } else if ("POST".equals(requestComponents.method()) && requestComponents.query().equals("uploadId=TEST")) {
// complete multipart upload request
if (countDownComplete.countDown()) {
Streams.readFully(exchange.getRequestBody());
@@ -418,9 +426,12 @@ public void testWriteLargeBlobStreaming() throws Exception {
final CountDown countDownComplete = new CountDown(nbErrors);
httpServer.createContext(downloadStorageEndpoint(blobContainer, "write_large_blob_streaming"), exchange -> {
+ final S3HttpHandler.RequestComponents requestComponents = S3HttpHandler.parseRequestComponents(
+ S3HttpHandler.getRawRequestString(exchange)
+ );
final long contentLength = Long.parseLong(exchange.getRequestHeaders().getFirst("Content-Length"));
- if ("POST".equals(exchange.getRequestMethod()) && exchange.getRequestURI().getQuery().equals("uploads")) {
+ if ("POST".equals(requestComponents.method()) && requestComponents.query().equals("uploads")) {
// initiate multipart upload request
if (countDownInitiate.countDown()) {
byte[] response = ("""
@@ -436,9 +447,9 @@ public void testWriteLargeBlobStreaming() throws Exception {
exchange.close();
return;
}
- } else if ("PUT".equals(exchange.getRequestMethod())
- && exchange.getRequestURI().getQuery().contains("uploadId=TEST")
- && exchange.getRequestURI().getQuery().contains("partNumber=")) {
+ } else if ("PUT".equals(requestComponents.method())
+ && requestComponents.query().contains("uploadId=TEST")
+ && requestComponents.query().contains("partNumber=")) {
// upload part request
MD5DigestCalculatingInputStream md5 = new MD5DigestCalculatingInputStream(exchange.getRequestBody());
BytesReference bytes = Streams.readFully(md5);
@@ -451,7 +462,7 @@ public void testWriteLargeBlobStreaming() throws Exception {
return;
}
- } else if ("POST".equals(exchange.getRequestMethod()) && exchange.getRequestURI().getQuery().equals("uploadId=TEST")) {
+ } else if ("POST".equals(requestComponents.method()) && requestComponents.query().equals("uploadId=TEST")) {
// complete multipart upload request
if (countDownComplete.countDown()) {
Streams.readFully(exchange.getRequestBody());
diff --git a/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpHandler.java b/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpHandler.java
index 336b888dd7d3c..2698d96ab7ab9 100644
--- a/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpHandler.java
+++ b/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpHandler.java
@@ -12,6 +12,7 @@
import com.sun.net.httpserver.HttpHandler;
import org.elasticsearch.ExceptionsHelper;
+import org.elasticsearch.common.Strings;
import org.elasticsearch.common.UUIDs;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.bytes.CompositeBytesReference;
@@ -31,6 +32,7 @@
import java.io.PrintStream;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
+import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
@@ -74,14 +76,21 @@ public S3HttpHandler(final String bucket, @Nullable final String basePath) {
@Override
public void handle(final HttpExchange exchange) throws IOException {
- final String request = exchange.getRequestMethod() + " " + exchange.getRequestURI().toString();
+ // Remove custom query parameters before processing the request. This simulates how S3 ignores them.
+ // https://docs.aws.amazon.com/AmazonS3/latest/userguide/LogFormat.html#LogFormatCustom
+ final RequestComponents requestComponents = parseRequestComponents(
+ exchange.getRequestMethod() + " " + exchange.getRequestURI().toString()
+ );
+ final String request = requestComponents.request();
+ onCustomQueryParameters(requestComponents.customQueryParameters);
+
if (request.startsWith("GET") || request.startsWith("HEAD") || request.startsWith("DELETE")) {
int read = exchange.getRequestBody().read();
assert read == -1 : "Request body should have been empty but saw [" + read + "]";
}
try {
if (Regex.simpleMatch("HEAD /" + path + "/*", request)) {
- final BytesReference blob = blobs.get(exchange.getRequestURI().getPath());
+ final BytesReference blob = blobs.get(requestComponents.path);
if (blob == null) {
exchange.sendResponseHeaders(RestStatus.NOT_FOUND.getStatus(), -1);
} else {
@@ -89,8 +98,7 @@ public void handle(final HttpExchange exchange) throws IOException {
}
} else if (Regex.simpleMatch("GET /" + bucket + "/?uploads&prefix=*", request)) {
final Map params = new HashMap<>();
- RestUtils.decodeQueryString(exchange.getRequestURI().getQuery(), 0, params);
-
+ RestUtils.decodeQueryString(request, request.indexOf('?') + 1, params);
final var prefix = params.get("prefix");
final var uploadsList = new StringBuilder();
@@ -120,10 +128,7 @@ public void handle(final HttpExchange exchange) throws IOException {
exchange.getResponseBody().write(response);
} else if (Regex.simpleMatch("POST /" + path + "/*?uploads", request)) {
- final var upload = new MultipartUpload(
- UUIDs.randomBase64UUID(),
- exchange.getRequestURI().getPath().substring(bucket.length() + 2)
- );
+ final var upload = new MultipartUpload(UUIDs.randomBase64UUID(), requestComponents.path.substring(bucket.length() + 2));
uploads.put(upload.getUploadId(), upload);
final var uploadResult = new StringBuilder();
@@ -141,7 +146,7 @@ public void handle(final HttpExchange exchange) throws IOException {
} else if (Regex.simpleMatch("PUT /" + path + "/*?uploadId=*&partNumber=*", request)) {
final Map params = new HashMap<>();
- RestUtils.decodeQueryString(exchange.getRequestURI().getQuery(), 0, params);
+ RestUtils.decodeQueryString(request, request.indexOf('?') + 1, params);
final var upload = uploads.get(params.get("uploadId"));
if (upload == null) {
@@ -154,15 +159,14 @@ public void handle(final HttpExchange exchange) throws IOException {
}
} else if (Regex.simpleMatch("POST /" + path + "/*?uploadId=*", request)) {
-
final Map params = new HashMap<>();
- RestUtils.decodeQueryString(exchange.getRequestURI().getQuery(), 0, params);
+ RestUtils.decodeQueryString(request, request.indexOf('?') + 1, params);
final var upload = uploads.remove(params.get("uploadId"));
if (upload == null) {
exchange.sendResponseHeaders(RestStatus.NOT_FOUND.getStatus(), -1);
} else {
final var blobContents = upload.complete(extractPartEtags(Streams.readFully(exchange.getRequestBody())));
- blobs.put(exchange.getRequestURI().getPath(), blobContents);
+ blobs.put(requestComponents.path, blobContents);
byte[] response = ("\n"
+ "\n"
@@ -170,7 +174,7 @@ public void handle(final HttpExchange exchange) throws IOException {
+ bucket
+ "\n"
+ ""
- + exchange.getRequestURI().getPath()
+ + requestComponents.path
+ "\n"
+ "").getBytes(StandardCharsets.UTF_8);
exchange.getResponseHeaders().add("Content-Type", "application/xml");
@@ -179,19 +183,19 @@ public void handle(final HttpExchange exchange) throws IOException {
}
} else if (Regex.simpleMatch("DELETE /" + path + "/*?uploadId=*", request)) {
final Map params = new HashMap<>();
- RestUtils.decodeQueryString(exchange.getRequestURI().getQuery(), 0, params);
+ RestUtils.decodeQueryString(request, request.indexOf('?') + 1, params);
final var upload = uploads.remove(params.get("uploadId"));
exchange.sendResponseHeaders((upload == null ? RestStatus.NOT_FOUND : RestStatus.NO_CONTENT).getStatus(), -1);
} else if (Regex.simpleMatch("PUT /" + path + "/*", request)) {
final Tuple blob = parseRequestBody(exchange);
- blobs.put(exchange.getRequestURI().toString(), blob.v2());
+ blobs.put(requestComponents.uri(), blob.v2());
exchange.getResponseHeaders().add("ETag", blob.v1());
exchange.sendResponseHeaders(RestStatus.OK.getStatus(), -1);
} else if (Regex.simpleMatch("GET /" + bucket + "/?prefix=*", request)) {
final Map params = new HashMap<>();
- RestUtils.decodeQueryString(exchange.getRequestURI().getQuery(), 0, params);
+ RestUtils.decodeQueryString(request, request.indexOf('?') + 1, params);
if (params.get("list-type") != null) {
throw new AssertionError("Test must be adapted for GET Bucket (List Objects) Version 2");
}
@@ -240,7 +244,7 @@ public void handle(final HttpExchange exchange) throws IOException {
exchange.getResponseBody().write(response);
} else if (Regex.simpleMatch("GET /" + path + "/*", request)) {
- final BytesReference blob = blobs.get(exchange.getRequestURI().toString());
+ final BytesReference blob = blobs.get(requestComponents.uri());
if (blob != null) {
final String range = exchange.getRequestHeaders().getFirst("Range");
if (range == null) {
@@ -271,7 +275,7 @@ public void handle(final HttpExchange exchange) throws IOException {
int deletions = 0;
for (Iterator> iterator = blobs.entrySet().iterator(); iterator.hasNext();) {
Map.Entry blob = iterator.next();
- if (blob.getKey().startsWith(exchange.getRequestURI().toString())) {
+ if (blob.getKey().startsWith(requestComponents.uri())) {
iterator.remove();
deletions++;
}
@@ -311,6 +315,42 @@ public Map blobs() {
return blobs;
}
+ protected void onCustomQueryParameters(final Map> params) {}
+
+ public static RequestComponents parseRequestComponents(final String request) {
+ final int spacePos = request.indexOf(' ');
+ final String method = request.substring(0, spacePos);
+ final String uriString = request.substring(spacePos + 1);
+ final int questsionMarkPos = uriString.indexOf('?');
+ // AWS s3 allows the same custom query parameter to be specified multiple times
+ final Map> customQueryParameters = new HashMap<>();
+ if (questsionMarkPos == -1) {
+ return new RequestComponents(method, uriString, "", customQueryParameters);
+ } else {
+ final String queryString = uriString.substring(questsionMarkPos + 1);
+ final ArrayList queryParameters = new ArrayList<>();
+ Arrays.stream(Strings.tokenizeToStringArray(queryString, "&")).forEach(param -> {
+ if (param.startsWith("x-")) {
+ final int equalPos = param.indexOf("=");
+ customQueryParameters.computeIfAbsent(param.substring(0, equalPos), k -> new ArrayList<>())
+ .add(param.substring(equalPos + 1));
+ } else {
+ queryParameters.add(param);
+ }
+ });
+ return new RequestComponents(
+ method,
+ uriString.substring(0, questsionMarkPos),
+ Strings.collectionToDelimitedString(queryParameters, "&"),
+ customQueryParameters
+ );
+ }
+ }
+
+ public static String getRawRequestString(final HttpExchange exchange) {
+ return exchange.getRequestMethod() + " " + exchange.getRequestURI();
+ }
+
private static final Pattern chunkSignaturePattern = Pattern.compile("^([0-9a-z]+);chunk-signature=([^\\r\\n]*)$");
private static Tuple parseRequestBody(final HttpExchange exchange) throws IOException {
@@ -475,4 +515,19 @@ public static void sendError(final HttpExchange exchange, final RestStatus statu
MultipartUpload getUpload(String uploadId) {
return uploads.get(uploadId);
}
+
+ public record RequestComponents(String method, String path, String query, Map> customQueryParameters) {
+
+ public String request() {
+ return method + " " + uri();
+ }
+
+ public String uri() {
+ if (query.isEmpty()) {
+ return path;
+ } else {
+ return path + "?" + query;
+ }
+ }
+ }
}
From e85bb5afc30b3eb16253591e59b2d724496d67d3 Mon Sep 17 00:00:00 2001
From: Kostas Krikellas <131142368+kkrik-es@users.noreply.github.com>
Date: Mon, 5 Feb 2024 09:31:13 +0200
Subject: [PATCH 010/106] Nest pass-through objects within objects (#105062)
* Fix test failure
https://gradle-enterprise.elastic.co/s/icg66i6mwnjoi
* Fix test failure
https://gradle-enterprise.elastic.co/s/icg66i6mwnjoi
* Nest pass-through objects within objects
* Update docs/changelog/105062.yaml
* improve test
---
docs/changelog/105062.yaml | 5 +
.../test/data_stream/150_tsdb.yml | 136 ++++++++++++---
.../index/mapper/ObjectMapper.java | 23 +--
.../index/mapper/PassThroughObjectMapper.java | 9 +-
.../index/mapper/RootObjectMapper.java | 18 +-
.../index/mapper/DynamicTemplatesTests.java | 2 +-
.../index/mapper/ObjectMapperTests.java | 4 +-
.../index/mapper/RootObjectMapperTests.java | 165 +++++++++++++++---
8 files changed, 284 insertions(+), 78 deletions(-)
create mode 100644 docs/changelog/105062.yaml
diff --git a/docs/changelog/105062.yaml b/docs/changelog/105062.yaml
new file mode 100644
index 0000000000000..928786f62381a
--- /dev/null
+++ b/docs/changelog/105062.yaml
@@ -0,0 +1,5 @@
+pr: 105062
+summary: Nest pass-through objects within objects
+area: TSDB
+type: enhancement
+issues: []
diff --git a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/150_tsdb.yml b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/150_tsdb.yml
index 794b27612fdd8..81ede2b045e61 100644
--- a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/150_tsdb.yml
+++ b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/150_tsdb.yml
@@ -381,12 +381,12 @@ dynamic templates - conflicting aliases:
- match: { aggregations.filterA.tsids.buckets.0.doc_count: 2 }
---
-dynamic templates - subobject in passthrough object error:
+dynamic templates with nesting:
- skip:
version: " - 8.12.99"
+ features: "default_shards"
reason: "Support for dynamic fields was added in 8.13"
- do:
- catch: /Tried to add subobject \[subcategory\] to object \[attributes\] which does not support subobjects/
indices.put_index_template:
name: my-dynamic-template
body:
@@ -395,21 +395,116 @@ dynamic templates - subobject in passthrough object error:
template:
settings:
index:
+ number_of_shards: 1
mode: time_series
+ time_series:
+ start_time: 2023-08-31T13:03:08.138Z
mappings:
properties:
attributes:
type: passthrough
+ dynamic: true
+ time_series_dimension: true
+ resource:
+ type: object
properties:
- subcategory:
- type: object
- properties:
- dim:
- type: keyword
+ attributes:
+ type: passthrough
+ dynamic: true
+ time_series_dimension: true
+ dynamic_templates:
+ - counter_metric:
+ mapping:
+ type: integer
+ time_series_metric: counter
- do:
- catch: /Mapping definition for \[attributes\] has unsupported parameters:\ \[subobjects \:\ true\]/
+ bulk:
+ index: k9s
+ refresh: true
+ body:
+ - '{ "create": { "dynamic_templates": { "data": "counter_metric" } } }'
+ - '{ "@timestamp": "2023-09-01T13:03:08.138Z","data": "10", "resource.attributes.dim": "A", "resource.attributes.another.dim": "C", "attributes.more.dim": "E" }'
+ - '{ "create": { "dynamic_templates": { "data": "counter_metric" } } }'
+ - '{ "@timestamp": "2023-09-01T13:03:09.138Z","data": "20", "resource.attributes.dim": "A", "resource.attributes.another.dim": "C", "attributes.more.dim": "E" }'
+ - '{ "create": { "dynamic_templates": { "data": "counter_metric" } } }'
+ - '{ "@timestamp": "2023-09-01T13:03:10.138Z","data": "30", "resource.attributes.dim": "B", "resource.attributes.another.dim": "D", "attributes.more.dim": "F" }'
+ - '{ "create": { "dynamic_templates": { "data": "counter_metric" } } }'
+ - '{ "@timestamp": "2023-09-01T13:03:10.238Z","data": "40", "resource.attributes.dim": "B", "resource.attributes.another.dim": "D", "attributes.more.dim": "F" }'
+
+ - do:
+ search:
+ index: k9s
+ body:
+ size: 0
+
+ - match: { hits.total.value: 4 }
+
+ - do:
+ search:
+ index: k9s
+ body:
+ size: 0
+ aggs:
+ filterA:
+ filter:
+ term:
+ dim: A
+ aggs:
+ tsids:
+ terms:
+ field: _tsid
+
+ - length: { aggregations.filterA.tsids.buckets: 1 }
+ - match: { aggregations.filterA.tsids.buckets.0.key: "LEjiJ4ATCXWlzeFvhGQ9lYlnP-nRIGKYihfZ18WoJ94t9a8OpbsCdwZALomb" }
+ - match: { aggregations.filterA.tsids.buckets.0.doc_count: 2 }
+
+ - do:
+ search:
+ index: k9s
+ body:
+ size: 0
+ aggs:
+ filterA:
+ filter:
+ term:
+ another.dim: C
+ aggs:
+ tsids:
+ terms:
+ field: _tsid
+
+ - length: { aggregations.filterA.tsids.buckets: 1 }
+ - match: { aggregations.filterA.tsids.buckets.0.key: "LEjiJ4ATCXWlzeFvhGQ9lYlnP-nRIGKYihfZ18WoJ94t9a8OpbsCdwZALomb" }
+ - match: { aggregations.filterA.tsids.buckets.0.doc_count: 2 }
+
+ - do:
+ search:
+ index: k9s
+ body:
+ size: 0
+ aggs:
+ filterA:
+ filter:
+ term:
+ more.dim: E
+ aggs:
+ tsids:
+ terms:
+ field: _tsid
+
+ - length: { aggregations.filterA.tsids.buckets: 1 }
+ - match: { aggregations.filterA.tsids.buckets.0.key: "LEjiJ4ATCXWlzeFvhGQ9lYlnP-nRIGKYihfZ18WoJ94t9a8OpbsCdwZALomb" }
+ - match: { aggregations.filterA.tsids.buckets.0.doc_count: 2 }
+
+---
+dynamic templates - subobject in passthrough object error:
+ - skip:
+ version: " - 8.12.99"
+ reason: "Support for dynamic fields was added in 8.13"
+ - do:
+ catch: /Tried to add subobject \[subcategory\] to object \[attributes\] which does not support subobjects/
indices.put_index_template:
name: my-dynamic-template
body:
@@ -418,24 +513,21 @@ dynamic templates - subobject in passthrough object error:
template:
settings:
index:
- number_of_shards: 1
mode: time_series
- time_series:
- start_time: 2023-08-31T13:03:08.138Z
mappings:
properties:
attributes:
type: passthrough
- subobjects: true
+ properties:
+ subcategory:
+ type: object
+ properties:
+ dim:
+ type: keyword
----
-dynamic templates - passthrough not under root error:
- - skip:
- version: " - 8.12.99"
- reason: "Support for dynamic fields was added in 8.13"
- do:
- catch: /Tried to add passthrough subobject \[attributes\] to object \[resource\], passthrough is not supported as a subobject/
+ catch: /Mapping definition for \[attributes\] has unsupported parameters:\ \[subobjects \:\ true\]/
indices.put_index_template:
name: my-dynamic-template
body:
@@ -444,11 +536,13 @@ dynamic templates - passthrough not under root error:
template:
settings:
index:
+ number_of_shards: 1
mode: time_series
+ time_series:
+ start_time: 2023-08-31T13:03:08.138Z
mappings:
properties:
- "resource.attributes":
+ attributes:
type: passthrough
- dynamic: true
- time_series_dimension: true
+ subobjects: true
diff --git a/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java
index 1ed9713d73e75..9d7353859ed25 100644
--- a/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java
+++ b/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java
@@ -295,7 +295,10 @@ protected static void parseProperties(
}
}
- if (objBuilder.subobjects.value() == false && type.equals(ObjectMapper.CONTENT_TYPE)) {
+ if (objBuilder.subobjects.value() == false
+ && (type.equals(ObjectMapper.CONTENT_TYPE)
+ || type.equals(NestedObjectMapper.CONTENT_TYPE)
+ || type.equals(PassThroughObjectMapper.CONTENT_TYPE))) {
throw new MapperParsingException(
"Tried to add subobject ["
+ fieldName
@@ -304,24 +307,6 @@ protected static void parseProperties(
+ "] which does not support subobjects"
);
}
- if (objBuilder.subobjects.value() == false && type.equals(NestedObjectMapper.CONTENT_TYPE)) {
- throw new MapperParsingException(
- "Tried to add nested object ["
- + fieldName
- + "] to object ["
- + objBuilder.name()
- + "] which does not support subobjects"
- );
- }
- if (type.equals(PassThroughObjectMapper.CONTENT_TYPE) && objBuilder instanceof RootObjectMapper.Builder == false) {
- throw new MapperParsingException(
- "Tried to add passthrough subobject ["
- + fieldName
- + "] to object ["
- + objBuilder.name()
- + "], passthrough is not supported as a subobject"
- );
- }
Mapper.TypeParser typeParser = parserContext.typeParser(type);
if (typeParser == null) {
throw new MapperParsingException("No handler for type [" + type + "] declared on field [" + fieldName + "]");
diff --git a/server/src/main/java/org/elasticsearch/index/mapper/PassThroughObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/PassThroughObjectMapper.java
index 7688b217ab7fc..b49c9328fcc79 100644
--- a/server/src/main/java/org/elasticsearch/index/mapper/PassThroughObjectMapper.java
+++ b/server/src/main/java/org/elasticsearch/index/mapper/PassThroughObjectMapper.java
@@ -57,6 +57,7 @@ public PassThroughObjectMapper.Builder setContainsDimensions() {
public PassThroughObjectMapper build(MapperBuilderContext context) {
return new PassThroughObjectMapper(
name,
+ context.buildFullName(name),
enabled,
dynamic,
buildMappers(context.createChildContext(name)),
@@ -70,19 +71,20 @@ public PassThroughObjectMapper build(MapperBuilderContext context) {
PassThroughObjectMapper(
String name,
+ String fullPath,
Explicit enabled,
Dynamic dynamic,
Map mappers,
Explicit timeSeriesDimensionSubFields
) {
// Subobjects are not currently supported.
- super(name, name, enabled, Explicit.IMPLICIT_FALSE, dynamic, mappers);
+ super(name, fullPath, enabled, Explicit.IMPLICIT_FALSE, dynamic, mappers);
this.timeSeriesDimensionSubFields = timeSeriesDimensionSubFields;
}
@Override
PassThroughObjectMapper withoutMappers() {
- return new PassThroughObjectMapper(simpleName(), enabled, dynamic, Map.of(), timeSeriesDimensionSubFields);
+ return new PassThroughObjectMapper(simpleName(), fullPath(), enabled, dynamic, Map.of(), timeSeriesDimensionSubFields);
}
public boolean containsDimensions() {
@@ -91,7 +93,7 @@ public boolean containsDimensions() {
@Override
public PassThroughObjectMapper.Builder newBuilder(IndexVersion indexVersionCreated) {
- PassThroughObjectMapper.Builder builder = new PassThroughObjectMapper.Builder(name());
+ PassThroughObjectMapper.Builder builder = new PassThroughObjectMapper.Builder(simpleName());
builder.enabled = enabled;
builder.dynamic = dynamic;
builder.timeSeriesDimensionSubFields = timeSeriesDimensionSubFields;
@@ -108,6 +110,7 @@ public PassThroughObjectMapper merge(ObjectMapper mergeWith, MergeReason reason,
return new PassThroughObjectMapper(
simpleName(),
+ fullPath(),
mergeResult.enabled(),
mergeResult.dynamic(),
mergeResult.mappers(),
diff --git a/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java
index 86bdb2aa2bba7..7994c018f40f2 100644
--- a/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java
+++ b/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java
@@ -45,6 +45,7 @@
public class RootObjectMapper extends ObjectMapper {
private static final DeprecationLogger DEPRECATION_LOGGER = DeprecationLogger.getLogger(RootObjectMapper.class);
+ private static final int MAX_NESTING_LEVEL_FOR_PASS_THROUGH_OBJECTS = 20;
/**
* Parameter used when serializing {@link RootObjectMapper} and request that the runtime section is skipped.
@@ -111,7 +112,11 @@ public RootObjectMapper.Builder addRuntimeFields(Map runti
@Override
public RootObjectMapper build(MapperBuilderContext context) {
Map mappers = buildMappers(context);
- mappers.putAll(getAliasMappers(mappers, context));
+
+ Map aliasMappers = new HashMap<>();
+ getAliasMappers(mappers, aliasMappers, context, 0);
+ mappers.putAll(aliasMappers);
+
return new RootObjectMapper(
name,
enabled,
@@ -126,8 +131,11 @@ public RootObjectMapper build(MapperBuilderContext context) {
);
}
- Map getAliasMappers(Map mappers, MapperBuilderContext context) {
- Map aliasMappers = new HashMap<>();
+ void getAliasMappers(Map mappers, Map aliasMappers, MapperBuilderContext context, int level) {
+ if (level >= MAX_NESTING_LEVEL_FOR_PASS_THROUGH_OBJECTS) {
+ logger.warn("Exceeded maximum nesting level for searching for pass-through object fields within object fields.");
+ return;
+ }
for (Mapper mapper : mappers.values()) {
// Create aliases for all fields in child passthrough mappers and place them under the root object.
if (mapper instanceof PassThroughObjectMapper passthroughMapper) {
@@ -154,9 +162,11 @@ Map getAliasMappers(Map mappers, MapperBuilderCo
}
}
}
+ } else if (mapper instanceof ObjectMapper objectMapper) {
+ // Call recursively to check child fields. The level guards against long recursive call sequences.
+ getAliasMappers(objectMapper.mappers, aliasMappers, context, level + 1);
}
}
- return aliasMappers;
}
}
diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DynamicTemplatesTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DynamicTemplatesTests.java
index 54db5832c2726..10bd6c667c26e 100644
--- a/server/src/test/java/org/elasticsearch/index/mapper/DynamicTemplatesTests.java
+++ b/server/src/test/java/org/elasticsearch/index/mapper/DynamicTemplatesTests.java
@@ -1391,7 +1391,7 @@ public void testSubobjectsFalseWithInnerNestedFromDynamicTemplate() {
);
assertThat(exception.getRootCause(), instanceOf(MapperParsingException.class));
assertEquals(
- "Tried to add nested object [time] to object [__dynamic__test] which does not support subobjects",
+ "Tried to add subobject [time] to object [__dynamic__test] which does not support subobjects",
exception.getRootCause().getMessage()
);
}
diff --git a/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperTests.java
index 6e958ddbea904..cbb0929b813fc 100644
--- a/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperTests.java
+++ b/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperTests.java
@@ -407,7 +407,7 @@ public void testSubobjectsFalseWithInnerNested() {
b.endObject();
})));
assertEquals(
- "Failed to parse mapping: Tried to add nested object [time] to object [service] which does not support subobjects",
+ "Failed to parse mapping: Tried to add subobject [time] to object [service] which does not support subobjects",
exception.getMessage()
);
}
@@ -457,7 +457,7 @@ public void testSubobjectsFalseRootWithInnerNested() {
b.endObject();
})));
assertEquals(
- "Failed to parse mapping: Tried to add nested object [metrics.service] to object [_doc] which does not support subobjects",
+ "Failed to parse mapping: Tried to add subobject [metrics.service] to object [_doc] which does not support subobjects",
exception.getMessage()
);
}
diff --git a/server/src/test/java/org/elasticsearch/index/mapper/RootObjectMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/RootObjectMapperTests.java
index b77019806fc4f..662a809e6d065 100644
--- a/server/src/test/java/org/elasticsearch/index/mapper/RootObjectMapperTests.java
+++ b/server/src/test/java/org/elasticsearch/index/mapper/RootObjectMapperTests.java
@@ -360,50 +360,159 @@ public void testPassThroughObjectWithAliases() throws IOException {
assertThat(mapperService.mappingLookup().getMapper("labels.dim"), instanceOf(KeywordFieldMapper.class));
}
+ public void testPassThroughObjectNested() throws IOException {
+ MapperService mapperService = createMapperService(mapping(b -> {
+ b.startObject("resource").field("type", "object");
+ {
+ b.startObject("properties");
+ {
+ b.startObject("attributes").field("type", "passthrough");
+ {
+ b.startObject("properties");
+ b.startObject("dim").field("type", "keyword").endObject();
+ b.endObject();
+ }
+ b.endObject();
+ }
+ b.endObject();
+ }
+ b.endObject();
+ b.startObject("attributes").field("type", "passthrough");
+ {
+ b.startObject("properties");
+ b.startObject("another.dim").field("type", "keyword").endObject();
+ b.endObject();
+ }
+ b.endObject();
+ }));
+ assertThat(mapperService.mappingLookup().getMapper("dim"), instanceOf(FieldAliasMapper.class));
+ assertThat(mapperService.mappingLookup().getMapper("resource.attributes.dim"), instanceOf(KeywordFieldMapper.class));
+ assertThat(mapperService.mappingLookup().getMapper("another.dim"), instanceOf(FieldAliasMapper.class));
+ assertThat(mapperService.mappingLookup().getMapper("attributes.another.dim"), instanceOf(KeywordFieldMapper.class));
+ }
+
public void testAliasMappersCreatesAlias() throws Exception {
var context = MapperBuilderContext.root(false, false);
-
- Map fields = new HashMap<>();
- fields.put("host", new KeywordFieldMapper.Builder("host", IndexVersion.current()).build(context));
-
- Map mappers = new HashMap<>();
- mappers.put(
- "labels",
- new PassThroughObjectMapper("labels", Explicit.EXPLICIT_TRUE, ObjectMapper.Dynamic.FALSE, fields, Explicit.EXPLICIT_FALSE)
+ Map aliases = new HashMap<>();
+ new RootObjectMapper.Builder("root", Explicit.EXPLICIT_FALSE).getAliasMappers(
+ Map.of(
+ "labels",
+ new PassThroughObjectMapper(
+ "labels",
+ "labels",
+ Explicit.EXPLICIT_TRUE,
+ ObjectMapper.Dynamic.FALSE,
+ Map.of("host", new KeywordFieldMapper.Builder("host", IndexVersion.current()).build(context)),
+ Explicit.EXPLICIT_FALSE
+ )
+ ),
+ aliases,
+ context,
+ 0
);
-
- Map aliases = new RootObjectMapper.Builder("root", Explicit.EXPLICIT_FALSE).getAliasMappers(mappers, context);
assertEquals(1, aliases.size());
assertThat(aliases.get("host"), instanceOf(FieldAliasMapper.class));
}
- public void testAliasMappersCreatesNoAliasForRegularObject() throws Exception {
+ public void testAliasMappersCreatesAliasNested() throws Exception {
var context = MapperBuilderContext.root(false, false);
+ Map aliases = new HashMap<>();
+ new RootObjectMapper.Builder("root", Explicit.EXPLICIT_FALSE).getAliasMappers(
+ Map.of(
+ "outer",
+ new ObjectMapper(
+ "outer",
+ "outer",
+ Explicit.EXPLICIT_TRUE,
+ Explicit.EXPLICIT_TRUE,
+ ObjectMapper.Dynamic.FALSE,
+ Map.of(
+ "inner",
+ new PassThroughObjectMapper(
+ "inner",
+ "outer.inner",
+ Explicit.EXPLICIT_TRUE,
+ ObjectMapper.Dynamic.FALSE,
+ Map.of("host", new KeywordFieldMapper.Builder("host", IndexVersion.current()).build(context)),
+ Explicit.EXPLICIT_FALSE
+ )
+ )
+ )
+ ),
+ aliases,
+ context,
+ 0
+ );
+ assertEquals(1, aliases.size());
+ assertThat(aliases.get("host"), instanceOf(FieldAliasMapper.class));
+ }
- Map fields = new HashMap<>();
- fields.put("host", new KeywordFieldMapper.Builder("host", IndexVersion.current()).build(context));
+ public void testAliasMappersExitsInDeepNesting() throws Exception {
+ var context = MapperBuilderContext.root(false, false);
+ Map aliases = new HashMap<>();
+ new RootObjectMapper.Builder("root", Explicit.EXPLICIT_FALSE).getAliasMappers(
+ Map.of(
+ "labels",
+ new PassThroughObjectMapper(
+ "labels",
+ "labels",
+ Explicit.EXPLICIT_TRUE,
+ ObjectMapper.Dynamic.FALSE,
+ Map.of("host", new KeywordFieldMapper.Builder("host", IndexVersion.current()).build(context)),
+ Explicit.EXPLICIT_FALSE
+ )
+ ),
+ aliases,
+ context,
+ 1_000_000
+ );
+ assertTrue(aliases.isEmpty());
+ }
- Map mappers = new HashMap<>();
- mappers.put(
- "labels",
- new ObjectMapper("labels", "labels", Explicit.EXPLICIT_TRUE, Explicit.EXPLICIT_FALSE, ObjectMapper.Dynamic.FALSE, fields)
+ public void testAliasMappersCreatesNoAliasForRegularObject() throws Exception {
+ var context = MapperBuilderContext.root(false, false);
+ Map aliases = new HashMap<>();
+ new RootObjectMapper.Builder("root", Explicit.EXPLICIT_FALSE).getAliasMappers(
+ Map.of(
+ "labels",
+ new ObjectMapper(
+ "labels",
+ "labels",
+ Explicit.EXPLICIT_TRUE,
+ Explicit.EXPLICIT_FALSE,
+ ObjectMapper.Dynamic.FALSE,
+ Map.of("host", new KeywordFieldMapper.Builder("host", IndexVersion.current()).build(context))
+ )
+ ),
+ aliases,
+ context,
+ 0
);
- assertTrue(new RootObjectMapper.Builder("root", Explicit.EXPLICIT_FALSE).getAliasMappers(mappers, context).isEmpty());
+ assertTrue(aliases.isEmpty());
}
public void testAliasMappersConflictingField() throws Exception {
var context = MapperBuilderContext.root(false, false);
-
- Map fields = new HashMap<>();
- fields.put("host", new KeywordFieldMapper.Builder("host", IndexVersion.current()).build(context));
-
- Map mappers = new HashMap<>();
- mappers.put(
- "labels",
- new PassThroughObjectMapper("labels", Explicit.EXPLICIT_TRUE, ObjectMapper.Dynamic.FALSE, fields, Explicit.EXPLICIT_FALSE)
+ Map aliases = new HashMap<>();
+ new RootObjectMapper.Builder("root", Explicit.EXPLICIT_FALSE).getAliasMappers(
+ Map.of(
+ "labels",
+ new PassThroughObjectMapper(
+ "labels",
+ "labels",
+ Explicit.EXPLICIT_TRUE,
+ ObjectMapper.Dynamic.FALSE,
+ Map.of("host", new KeywordFieldMapper.Builder("host", IndexVersion.current()).build(context)),
+ Explicit.EXPLICIT_FALSE
+ ),
+ "host",
+ new KeywordFieldMapper.Builder("host", IndexVersion.current()).build(context)
+ ),
+ aliases,
+ context,
+ 0
);
- mappers.put("host", new KeywordFieldMapper.Builder("host", IndexVersion.current()).build(context));
- assertTrue(new RootObjectMapper.Builder("root", Explicit.EXPLICIT_FALSE).getAliasMappers(mappers, context).isEmpty());
+ assertTrue(aliases.isEmpty());
}
public void testEmptyType() throws Exception {
From b328982e37358122d765360119e1399e56bdcd45 Mon Sep 17 00:00:00 2001
From: Jan Kuipers <148754765+jan-elastic@users.noreply.github.com>
Date: Mon, 5 Feb 2024 10:08:12 +0100
Subject: [PATCH 011/106] Refactor DataExtractor summary (#105011)
* Empty getDataSummary methods
* Move chunking logic from DataSummary to Chunker.
* Move DataSummary to ScrollDataExtractor.
* Move DataSummary to AbstractAggregationDataExtractor.
* Remove ununsed code
* Make ChunkedDataExtractorContext a record
* Remove more unused code
* Add tests for getSummary()
* Implement CompositeAggregationDataExtractor::getSummary().
* More unused code
* Move shared code to DataExtractorQueryContext.
* Move shared code to DataExtractorUtils.
* Lint fixes
* Move checkForSkippedClusters to DataExtractorUtils
* Replace monkey patching by ArgumentCaptor.
* Add checkForSkippedClusters to AbstractAggregationDataExtractor::executeSearchRequest
* Fix DataSummary for rollups
* Add documentation
---
.../ml/datafeed/extractor/DataExtractor.java | 28 +-
.../extractor/DataExtractorFactory.java | 3 +-
.../extractor/DataExtractorQueryContext.java | 47 ++
.../extractor/DataExtractorUtils.java | 71 +++-
.../AbstractAggregationDataExtractor.java | 68 ++-
.../aggregation/AggregationDataExtractor.java | 6 +-
.../AggregationDataExtractorContext.java | 31 +-
.../CompositeAggregationDataExtractor.java | 52 ++-
...positeAggregationDataExtractorContext.java | 28 +-
.../aggregation/RollupDataExtractor.java | 6 +-
.../chunked/ChunkedDataExtractor.java | 299 ++-----------
.../chunked/ChunkedDataExtractorContext.java | 66 +--
.../chunked/ChunkedDataExtractorFactory.java | 30 +-
.../extractor/scroll/ScrollDataExtractor.java | 50 ++-
.../scroll/ScrollDataExtractorContext.java | 28 +-
.../AggregationDataExtractorFactoryTests.java | 8 +-
.../AggregationDataExtractorTests.java | 173 +++++---
...ompositeAggregationDataExtractorTests.java | 174 +++++---
.../ChunkedDataExtractorFactoryTests.java | 19 +-
.../chunked/ChunkedDataExtractorTests.java | 401 ++++++------------
.../scroll/ScrollDataExtractorTests.java | 47 ++
21 files changed, 767 insertions(+), 868 deletions(-)
create mode 100644 x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/DataExtractorQueryContext.java
diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/DataExtractor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/DataExtractor.java
index 991916333f4cf..0ea1914d3e14b 100644
--- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/DataExtractor.java
+++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/DataExtractor.java
@@ -6,8 +6,6 @@
*/
package org.elasticsearch.xpack.ml.datafeed.extractor;
-import org.elasticsearch.ResourceNotFoundException;
-import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.xpack.core.ml.datafeed.SearchInterval;
import java.io.IOException;
@@ -18,6 +16,14 @@ public interface DataExtractor {
record Result(SearchInterval searchInterval, Optional data) {}
+ record DataSummary(Long earliestTime, Long latestTime, long totalHits) {
+ public boolean hasData() {
+ return earliestTime != null;
+ }
+ }
+
+ DataSummary getSummary();
+
/**
* @return {@code true} if the search has not finished yet, or {@code false} otherwise
*/
@@ -50,22 +56,4 @@ record Result(SearchInterval searchInterval, Optional data) {}
* @return the end time to which this extractor will search
*/
long getEndTime();
-
- /**
- * Check whether the search skipped CCS clusters.
- * @throws ResourceNotFoundException if any CCS clusters were skipped, as this could
- * cause anomalies to be spuriously detected.
- * @param searchResponse The search response to check for skipped CCS clusters.
- */
- default void checkForSkippedClusters(SearchResponse searchResponse) {
- SearchResponse.Clusters clusterResponse = searchResponse.getClusters();
- if (clusterResponse != null && clusterResponse.getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED) > 0) {
- throw new ResourceNotFoundException(
- "[{}] remote clusters out of [{}] were skipped when performing datafeed search",
- clusterResponse.getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED),
- clusterResponse.getTotal()
- );
- }
- }
-
}
diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/DataExtractorFactory.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/DataExtractorFactory.java
index 3175891aa4d6e..be2c8dd871a9b 100644
--- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/DataExtractorFactory.java
+++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/DataExtractorFactory.java
@@ -28,6 +28,7 @@
import org.elasticsearch.xpack.ml.datafeed.extractor.scroll.ScrollDataExtractorFactory;
public interface DataExtractorFactory {
+
DataExtractor newExtractor(long start, long end);
/**
@@ -61,7 +62,7 @@ static void create(
ActionListener factoryHandler = ActionListener.wrap(
factory -> listener.onResponse(
datafeed.getChunkingConfig().isEnabled()
- ? new ChunkedDataExtractorFactory(client, datafeed, extraFilters, job, xContentRegistry, factory, timingStatsReporter)
+ ? new ChunkedDataExtractorFactory(datafeed, job, xContentRegistry, factory)
: factory
),
listener::onFailure
diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/DataExtractorQueryContext.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/DataExtractorQueryContext.java
new file mode 100644
index 0000000000000..8ba901f82d351
--- /dev/null
+++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/DataExtractorQueryContext.java
@@ -0,0 +1,47 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+
+package org.elasticsearch.xpack.ml.datafeed.extractor;
+
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.index.query.QueryBuilder;
+
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+
+public class DataExtractorQueryContext {
+
+ public final String[] indices;
+ public final QueryBuilder query;
+ public final String timeField;
+ public final long start;
+ public final long end;
+ public final Map headers;
+ public final IndicesOptions indicesOptions;
+ public final Map runtimeMappings;
+
+ public DataExtractorQueryContext(
+ List indices,
+ QueryBuilder query,
+ String timeField,
+ long start,
+ long end,
+ Map headers,
+ IndicesOptions indicesOptions,
+ Map runtimeMappings
+ ) {
+ this.indices = indices.toArray(new String[0]);
+ this.query = Objects.requireNonNull(query);
+ this.timeField = timeField;
+ this.start = start;
+ this.end = end;
+ this.headers = headers;
+ this.indicesOptions = Objects.requireNonNull(indicesOptions);
+ this.runtimeMappings = Objects.requireNonNull(runtimeMappings);
+ }
+}
diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/DataExtractorUtils.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/DataExtractorUtils.java
index 0f6ae6f90fb52..f0e03a1e94973 100644
--- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/DataExtractorUtils.java
+++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/DataExtractorUtils.java
@@ -7,9 +7,18 @@
package org.elasticsearch.xpack.ml.datafeed.extractor;
+import org.elasticsearch.ResourceNotFoundException;
+import org.elasticsearch.action.search.SearchRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.client.internal.Client;
import org.elasticsearch.index.query.BoolQueryBuilder;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.RangeQueryBuilder;
+import org.elasticsearch.search.aggregations.AggregationBuilders;
+import org.elasticsearch.search.aggregations.InternalAggregations;
+import org.elasticsearch.search.aggregations.metrics.Max;
+import org.elasticsearch.search.aggregations.metrics.Min;
+import org.elasticsearch.search.builder.SearchSourceBuilder;
/**
* Utility methods for various DataExtractor implementations.
@@ -17,12 +26,70 @@
public final class DataExtractorUtils {
private static final String EPOCH_MILLIS = "epoch_millis";
+ private static final String EARLIEST_TIME = "earliest_time";
+ private static final String LATEST_TIME = "latest_time";
+
+ private DataExtractorUtils() {}
/**
* Combines a user query with a time range query.
*/
- public static QueryBuilder wrapInTimeRangeQuery(QueryBuilder userQuery, String timeField, long start, long end) {
+ public static QueryBuilder wrapInTimeRangeQuery(QueryBuilder query, String timeField, long start, long end) {
QueryBuilder timeQuery = new RangeQueryBuilder(timeField).gte(start).lt(end).format(EPOCH_MILLIS);
- return new BoolQueryBuilder().filter(userQuery).filter(timeQuery);
+ return new BoolQueryBuilder().filter(query).filter(timeQuery);
+ }
+
+ public static SearchRequestBuilder getSearchRequestBuilderForSummary(Client client, DataExtractorQueryContext context) {
+ return new SearchRequestBuilder(client).setIndices(context.indices)
+ .setIndicesOptions(context.indicesOptions)
+ .setSource(getSearchSourceBuilderForSummary(context))
+ .setAllowPartialSearchResults(false)
+ .setTrackTotalHits(true);
+ }
+
+ public static SearchSourceBuilder getSearchSourceBuilderForSummary(DataExtractorQueryContext context) {
+ return new SearchSourceBuilder().size(0)
+ .query(DataExtractorUtils.wrapInTimeRangeQuery(context.query, context.timeField, context.start, context.end))
+ .runtimeMappings(context.runtimeMappings)
+ .aggregation(AggregationBuilders.min(EARLIEST_TIME).field(context.timeField))
+ .aggregation(AggregationBuilders.max(LATEST_TIME).field(context.timeField));
+ }
+
+ public static DataExtractor.DataSummary getDataSummary(SearchResponse searchResponse) {
+ InternalAggregations aggregations = searchResponse.getAggregations();
+ if (aggregations == null) {
+ return new DataExtractor.DataSummary(null, null, 0L);
+ } else {
+ Long earliestTime = toLongIfFinite((aggregations.get(EARLIEST_TIME)).value());
+ Long latestTime = toLongIfFinite((aggregations.get(LATEST_TIME)).value());
+ long totalHits = searchResponse.getHits().getTotalHits().value;
+ return new DataExtractor.DataSummary(earliestTime, latestTime, totalHits);
+ }
+ }
+
+ /**
+ * The min and max aggregations return infinity when there is no data. To ensure consistency
+ * between the different types of data summary we represent no data by earliest and latest times
+ * being null
. Hence, this method converts infinite values to null
.
+ */
+ private static Long toLongIfFinite(double x) {
+ return Double.isFinite(x) ? (long) x : null;
+ }
+
+ /**
+ * Check whether the search skipped CCS clusters.
+ * @throws ResourceNotFoundException if any CCS clusters were skipped, as this could
+ * cause anomalies to be spuriously detected.
+ * @param searchResponse The search response to check for skipped CCS clusters.
+ */
+ public static void checkForSkippedClusters(SearchResponse searchResponse) {
+ SearchResponse.Clusters clusterResponse = searchResponse.getClusters();
+ if (clusterResponse != null && clusterResponse.getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED) > 0) {
+ throw new ResourceNotFoundException(
+ "[{}] remote clusters out of [{}] were skipped when performing datafeed search",
+ clusterResponse.getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED),
+ clusterResponse.getTotal()
+ );
+ }
}
}
diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AbstractAggregationDataExtractor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AbstractAggregationDataExtractor.java
index 4cd5379d8fe3b..26c43e1d098c1 100644
--- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AbstractAggregationDataExtractor.java
+++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AbstractAggregationDataExtractor.java
@@ -34,10 +34,8 @@
/**
* Abstract class for aggregated data extractors, e.g. {@link RollupDataExtractor}
- *
- * @param The request builder type for getting data from ElasticSearch
*/
-abstract class AbstractAggregationDataExtractor> implements DataExtractor {
+abstract class AbstractAggregationDataExtractor implements DataExtractor {
private static final Logger LOGGER = LogManager.getLogger(AbstractAggregationDataExtractor.class);
@@ -86,7 +84,7 @@ public void destroy() {
@Override
public long getEndTime() {
- return context.end;
+ return context.queryContext.end;
}
@Override
@@ -95,7 +93,7 @@ public Result next() throws IOException {
throw new NoSuchElementException();
}
- SearchInterval searchInterval = new SearchInterval(context.start, context.end);
+ SearchInterval searchInterval = new SearchInterval(context.queryContext.start, context.queryContext.end);
if (aggregationToJsonProcessor == null) {
InternalAggregations aggs = search();
if (aggs == null) {
@@ -121,11 +119,10 @@ public Result next() throws IOException {
private InternalAggregations search() {
LOGGER.debug("[{}] Executing aggregated search", context.jobId);
- T searchRequest = buildSearchRequest(buildBaseSearchSource());
+ ActionRequestBuilder searchRequest = buildSearchRequest(buildBaseSearchSource());
assert searchRequest.request().allowPartialSearchResults() == false;
SearchResponse searchResponse = executeSearchRequest(searchRequest);
try {
- checkForSkippedClusters(searchResponse);
LOGGER.debug("[{}] Search response was obtained", context.jobId);
timingStatsReporter.reportSearchDuration(searchResponse.getTook());
return validateAggs(searchResponse.getAggregations());
@@ -136,37 +133,62 @@ private InternalAggregations search() {
private void initAggregationProcessor(InternalAggregations aggs) throws IOException {
aggregationToJsonProcessor = new AggregationToJsonProcessor(
- context.timeField,
+ context.queryContext.timeField,
context.fields,
context.includeDocCount,
- context.start,
+ context.queryContext.start,
null
);
aggregationToJsonProcessor.process(aggs);
}
- protected SearchResponse executeSearchRequest(T searchRequestBuilder) {
- return ClientHelper.executeWithHeaders(context.headers, ClientHelper.ML_ORIGIN, client, searchRequestBuilder::get);
+ private SearchResponse executeSearchRequest(ActionRequestBuilder searchRequestBuilder) {
+ SearchResponse searchResponse = ClientHelper.executeWithHeaders(
+ context.queryContext.headers,
+ ClientHelper.ML_ORIGIN,
+ client,
+ searchRequestBuilder::get
+ );
+ boolean success = false;
+ try {
+ DataExtractorUtils.checkForSkippedClusters(searchResponse);
+ success = true;
+ } finally {
+ if (success == false) {
+ searchResponse.decRef();
+ }
+ }
+ return searchResponse;
}
private SearchSourceBuilder buildBaseSearchSource() {
// For derivative aggregations the first bucket will always be null
// so query one extra histogram bucket back and hope there is data
// in that bucket
- long histogramSearchStartTime = Math.max(0, context.start - DatafeedConfigUtils.getHistogramIntervalMillis(context.aggs));
+ long histogramSearchStartTime = Math.max(
+ 0,
+ context.queryContext.start - DatafeedConfigUtils.getHistogramIntervalMillis(context.aggs)
+ );
SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder().size(0)
- .query(DataExtractorUtils.wrapInTimeRangeQuery(context.query, context.timeField, histogramSearchStartTime, context.end));
+ .query(
+ DataExtractorUtils.wrapInTimeRangeQuery(
+ context.queryContext.query,
+ context.queryContext.timeField,
+ histogramSearchStartTime,
+ context.queryContext.end
+ )
+ );
- if (context.runtimeMappings.isEmpty() == false) {
- searchSourceBuilder.runtimeMappings(context.runtimeMappings);
+ if (context.queryContext.runtimeMappings.isEmpty() == false) {
+ searchSourceBuilder.runtimeMappings(context.queryContext.runtimeMappings);
}
context.aggs.getAggregatorFactories().forEach(searchSourceBuilder::aggregation);
context.aggs.getPipelineAggregatorFactories().forEach(searchSourceBuilder::aggregation);
return searchSourceBuilder;
}
- protected abstract T buildSearchRequest(SearchSourceBuilder searchRequestBuilder);
+ protected abstract ActionRequestBuilder buildSearchRequest(SearchSourceBuilder searchRequestBuilder);
private static InternalAggregations validateAggs(@Nullable InternalAggregations aggs) {
if (aggs == null) {
@@ -189,4 +211,18 @@ public AggregationDataExtractorContext getContext() {
return context;
}
+ @Override
+ public DataSummary getSummary() {
+ ActionRequestBuilder searchRequestBuilder = buildSearchRequest(
+ DataExtractorUtils.getSearchSourceBuilderForSummary(context.queryContext)
+ );
+ SearchResponse searchResponse = executeSearchRequest(searchRequestBuilder);
+ try {
+ LOGGER.debug("[{}] Aggregating Data summary response was obtained", context.jobId);
+ timingStatsReporter.reportSearchDuration(searchResponse.getTook());
+ return DataExtractorUtils.getDataSummary(searchResponse);
+ } finally {
+ searchResponse.decRef();
+ }
+ }
}
diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationDataExtractor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationDataExtractor.java
index 34ea3a1fad04e..0a41c4387634e 100644
--- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationDataExtractor.java
+++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationDataExtractor.java
@@ -17,7 +17,7 @@
* stored and they are then processed in batches. Cancellation is supported between batches.
* Note that this class is NOT thread-safe.
*/
-class AggregationDataExtractor extends AbstractAggregationDataExtractor {
+class AggregationDataExtractor extends AbstractAggregationDataExtractor {
AggregationDataExtractor(
Client client,
@@ -30,8 +30,8 @@ class AggregationDataExtractor extends AbstractAggregationDataExtractor fields;
- final String[] indices;
- final QueryBuilder query;
final AggregatorFactories.Builder aggs;
- final long start;
- final long end;
final boolean includeDocCount;
- final Map headers;
- final IndicesOptions indicesOptions;
- final Map runtimeMappings;
+ final DataExtractorQueryContext queryContext;
AggregationDataExtractorContext(
String jobId,
@@ -44,17 +37,19 @@ class AggregationDataExtractorContext {
IndicesOptions indicesOptions,
Map runtimeMappings
) {
- this.jobId = Objects.requireNonNull(jobId);
- this.timeField = Objects.requireNonNull(timeField);
+ this.jobId = jobId;
this.fields = Objects.requireNonNull(fields);
- this.indices = indices.toArray(new String[0]);
- this.query = Objects.requireNonNull(query);
this.aggs = Objects.requireNonNull(aggs);
- this.start = start;
- this.end = end;
this.includeDocCount = includeDocCount;
- this.headers = headers;
- this.indicesOptions = Objects.requireNonNull(indicesOptions);
- this.runtimeMappings = Objects.requireNonNull(runtimeMappings);
+ this.queryContext = new DataExtractorQueryContext(
+ indices,
+ query,
+ Objects.requireNonNull(timeField),
+ start,
+ end,
+ headers,
+ indicesOptions,
+ runtimeMappings
+ );
}
}
diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/CompositeAggregationDataExtractor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/CompositeAggregationDataExtractor.java
index 0dfdd9897737e..e4712d051ef1e 100644
--- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/CompositeAggregationDataExtractor.java
+++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/CompositeAggregationDataExtractor.java
@@ -48,6 +48,9 @@ class CompositeAggregationDataExtractor implements DataExtractor {
private static final Logger LOGGER = LogManager.getLogger(CompositeAggregationDataExtractor.class);
+ private static final String EARLIEST_TIME = "earliest_time";
+ private static final String LATEST_TIME = "latest_time";
+
private volatile Map afterKey = null;
private final CompositeAggregationBuilder compositeAggregationBuilder;
private final Client client;
@@ -98,7 +101,7 @@ public void destroy() {
@Override
public long getEndTime() {
- return context.end;
+ return context.queryContext.end;
}
@Override
@@ -107,7 +110,7 @@ public Result next() throws IOException {
throw new NoSuchElementException();
}
- SearchInterval searchInterval = new SearchInterval(context.start, context.end);
+ SearchInterval searchInterval = new SearchInterval(context.queryContext.start, context.queryContext.end);
InternalAggregations aggs = search();
if (aggs == null) {
LOGGER.trace(() -> "[" + context.jobId + "] extraction finished");
@@ -125,13 +128,25 @@ private InternalAggregations search() {
// Also, it doesn't make sense to have a derivative when grouping by time AND by some other criteria.
LOGGER.trace(
- () -> format("[%s] Executing composite aggregated search from [%s] to [%s]", context.jobId, context.start, context.end)
+ () -> format(
+ "[%s] Executing composite aggregated search from [%s] to [%s]",
+ context.jobId,
+ context.queryContext.start,
+ context.queryContext.end
+ )
);
SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder().size(0)
- .query(DataExtractorUtils.wrapInTimeRangeQuery(context.query, context.timeField, context.start, context.end));
+ .query(
+ DataExtractorUtils.wrapInTimeRangeQuery(
+ context.queryContext.query,
+ context.queryContext.timeField,
+ context.queryContext.start,
+ context.queryContext.end
+ )
+ );
- if (context.runtimeMappings.isEmpty() == false) {
- searchSourceBuilder.runtimeMappings(context.runtimeMappings);
+ if (context.queryContext.runtimeMappings.isEmpty() == false) {
+ searchSourceBuilder.runtimeMappings(context.queryContext.runtimeMappings);
}
if (afterKey != null) {
compositeAggregationBuilder.aggregateAfter(afterKey);
@@ -156,16 +171,16 @@ private InternalAggregations search() {
}
}
- protected SearchResponse executeSearchRequest(ActionRequestBuilder searchRequestBuilder) {
+ private SearchResponse executeSearchRequest(ActionRequestBuilder searchRequestBuilder) {
SearchResponse searchResponse = ClientHelper.executeWithHeaders(
- context.headers,
+ context.queryContext.headers,
ClientHelper.ML_ORIGIN,
client,
searchRequestBuilder::get
);
boolean success = false;
try {
- checkForSkippedClusters(searchResponse);
+ DataExtractorUtils.checkForSkippedClusters(searchResponse);
success = true;
} finally {
if (success == false) {
@@ -177,10 +192,10 @@ protected SearchResponse executeSearchRequest(ActionRequestBuilder searchRequestBuilder = DataExtractorUtils.getSearchRequestBuilderForSummary(
+ client,
+ context.queryContext
+ );
+ SearchResponse searchResponse = executeSearchRequest(searchRequestBuilder);
+ try {
+ LOGGER.debug("[{}] Aggregating Data summary response was obtained", context.jobId);
+ timingStatsReporter.reportSearchDuration(searchResponse.getTook());
+ return DataExtractorUtils.getDataSummary(searchResponse);
+ } finally {
+ searchResponse.decRef();
+ }
+ }
}
diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/CompositeAggregationDataExtractorContext.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/CompositeAggregationDataExtractorContext.java
index 5fd5b58c5556d..75531e68d9738 100644
--- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/CompositeAggregationDataExtractorContext.java
+++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/CompositeAggregationDataExtractorContext.java
@@ -9,6 +9,7 @@
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.search.aggregations.bucket.composite.CompositeAggregationBuilder;
+import org.elasticsearch.xpack.ml.datafeed.extractor.DataExtractorQueryContext;
import java.util.List;
import java.util.Map;
@@ -18,18 +19,11 @@
class CompositeAggregationDataExtractorContext {
final String jobId;
- final String timeField;
final Set fields;
- final String[] indices;
- final QueryBuilder query;
final CompositeAggregationBuilder compositeAggregationBuilder;
- final long start;
- final long end;
final boolean includeDocCount;
- final Map headers;
- final IndicesOptions indicesOptions;
- final Map runtimeMappings;
final String compositeAggDateHistogramGroupSourceName;
+ final DataExtractorQueryContext queryContext;
CompositeAggregationDataExtractorContext(
String jobId,
@@ -47,17 +41,19 @@ class CompositeAggregationDataExtractorContext {
Map runtimeMappings
) {
this.jobId = Objects.requireNonNull(jobId);
- this.timeField = Objects.requireNonNull(timeField);
this.fields = Objects.requireNonNull(fields);
- this.indices = indices.toArray(new String[0]);
- this.query = Objects.requireNonNull(query);
this.compositeAggregationBuilder = Objects.requireNonNull(compositeAggregationBuilder);
this.compositeAggDateHistogramGroupSourceName = Objects.requireNonNull(compositeAggDateHistogramGroupSourceName);
- this.start = start;
- this.end = end;
this.includeDocCount = includeDocCount;
- this.headers = headers;
- this.indicesOptions = Objects.requireNonNull(indicesOptions);
- this.runtimeMappings = Objects.requireNonNull(runtimeMappings);
+ this.queryContext = new DataExtractorQueryContext(
+ indices,
+ query,
+ Objects.requireNonNull(timeField),
+ start,
+ end,
+ headers,
+ indicesOptions,
+ runtimeMappings
+ );
}
}
diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/RollupDataExtractor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/RollupDataExtractor.java
index 1503e93e5c11b..89a137807959d 100644
--- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/RollupDataExtractor.java
+++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/RollupDataExtractor.java
@@ -18,7 +18,7 @@
* stored and they are then processed in batches. Cancellation is supported between batches.
* Note that this class is NOT thread-safe.
*/
-class RollupDataExtractor extends AbstractAggregationDataExtractor {
+class RollupDataExtractor extends AbstractAggregationDataExtractor {
RollupDataExtractor(
Client client,
@@ -30,8 +30,8 @@ class RollupDataExtractor extends AbstractAggregationDataExtractor The chunk span can be either specified or not. When not specified,
- * a heuristic is employed (see {@link DataSummary#estimateChunk()}) to automatically determine the chunk span.
- * The search is set up (see {@link #setUpChunkedSearch()} by querying a data summary for the given time range
+ * a heuristic is employed (see {@link #setUpChunkedSearch()}) to automatically determine the chunk span.
+ * The search is set up by querying a data summary for the given time range
* that includes the number of total hits and the earliest/latest times. Those are then used to determine the chunk span,
* when necessary, and to jump the search forward to the time where the earliest data can be found.
* If a search for a chunk returns empty, the set up is performed again for the remaining time.
@@ -50,49 +35,30 @@
*/
public class ChunkedDataExtractor implements DataExtractor {
- interface DataSummary {
- long estimateChunk();
-
- boolean hasData();
-
- long earliestTime();
-
- long getDataTimeSpread();
- }
-
private static final Logger LOGGER = LogManager.getLogger(ChunkedDataExtractor.class);
- private static final String EARLIEST_TIME = "earliest_time";
- private static final String LATEST_TIME = "latest_time";
-
/** Let us set a minimum chunk span of 1 minute */
private static final long MIN_CHUNK_SPAN = 60000L;
- private final Client client;
private final DataExtractorFactory dataExtractorFactory;
private final ChunkedDataExtractorContext context;
- private final DataSummaryFactory dataSummaryFactory;
- private final DatafeedTimingStatsReporter timingStatsReporter;
private long currentStart;
private long currentEnd;
private long chunkSpan;
private boolean isCancelled;
private DataExtractor currentExtractor;
- public ChunkedDataExtractor(
- Client client,
- DataExtractorFactory dataExtractorFactory,
- ChunkedDataExtractorContext context,
- DatafeedTimingStatsReporter timingStatsReporter
- ) {
- this.client = Objects.requireNonNull(client);
+ public ChunkedDataExtractor(DataExtractorFactory dataExtractorFactory, ChunkedDataExtractorContext context) {
this.dataExtractorFactory = Objects.requireNonNull(dataExtractorFactory);
this.context = Objects.requireNonNull(context);
- this.timingStatsReporter = Objects.requireNonNull(timingStatsReporter);
- this.currentStart = context.start;
- this.currentEnd = context.start;
+ this.currentStart = context.start();
+ this.currentEnd = context.start();
this.isCancelled = false;
- this.dataSummaryFactory = new DataSummaryFactory();
+ }
+
+ @Override
+ public DataSummary getSummary() {
+ return null;
}
@Override
@@ -101,7 +67,7 @@ public boolean hasNext() {
if (isCancelled()) {
return currentHasNext;
}
- return currentHasNext || currentEnd < context.end;
+ return currentHasNext || currentEnd < context.end();
}
@Override
@@ -119,47 +85,42 @@ public Result next() throws IOException {
}
private void setUpChunkedSearch() {
- DataSummary dataSummary = dataSummaryFactory.buildDataSummary();
+ DataSummary dataSummary = dataExtractorFactory.newExtractor(currentStart, context.end()).getSummary();
if (dataSummary.hasData()) {
- currentStart = context.timeAligner.alignToFloor(dataSummary.earliestTime());
+ currentStart = context.timeAligner().alignToFloor(dataSummary.earliestTime());
currentEnd = currentStart;
- chunkSpan = context.chunkSpan == null ? dataSummary.estimateChunk() : context.chunkSpan.getMillis();
- chunkSpan = context.timeAligner.alignToCeil(chunkSpan);
- LOGGER.debug(
- "[{}] Chunked search configured: kind = {}, dataTimeSpread = {} ms, chunk span = {} ms",
- context.jobId,
- dataSummary.getClass().getSimpleName(),
- dataSummary.getDataTimeSpread(),
- chunkSpan
- );
- } else {
- // search is over
- currentEnd = context.end;
- LOGGER.debug("[{}] Chunked search configured: no data found", context.jobId);
- }
- }
- protected SearchResponse executeSearchRequest(ActionRequestBuilder searchRequestBuilder) {
- SearchResponse searchResponse = ClientHelper.executeWithHeaders(
- context.headers,
- ClientHelper.ML_ORIGIN,
- client,
- searchRequestBuilder::get
- );
- boolean success = false;
- try {
- checkForSkippedClusters(searchResponse);
- success = true;
- } finally {
- if (success == false) {
- searchResponse.decRef();
+ if (context.chunkSpan() != null) {
+ chunkSpan = context.chunkSpan().getMillis();
+ } else if (context.hasAggregations()) {
+ // This heuristic is a direct copy of the manual chunking config auto-creation done in {@link DatafeedConfig}
+ chunkSpan = DatafeedConfig.DEFAULT_AGGREGATION_CHUNKING_BUCKETS * context.histogramInterval();
+ } else {
+ long timeSpread = dataSummary.latestTime() - dataSummary.earliestTime();
+ if (timeSpread <= 0) {
+ chunkSpan = context.end() - currentEnd;
+ } else {
+ // The heuristic here is that we want a time interval where we expect roughly scrollSize documents
+ // (assuming data are uniformly spread over time).
+ // We have totalHits documents over dataTimeSpread (latestTime - earliestTime), we want scrollSize documents over chunk.
+ // Thus, the interval would be (scrollSize * dataTimeSpread) / totalHits.
+ // However, assuming this as the chunk span may often lead to half-filled pages or empty searches.
+ // It is beneficial to take a multiple of that. Based on benchmarking, we set this to 10x.
+ chunkSpan = Math.max(MIN_CHUNK_SPAN, 10 * (context.scrollSize() * timeSpread) / dataSummary.totalHits());
+ }
}
+
+ chunkSpan = context.timeAligner().alignToCeil(chunkSpan);
+ LOGGER.debug("[{}] Chunked search configured: chunk span = {} ms", context.jobId(), chunkSpan);
+ } else {
+ // search is over
+ currentEnd = context.end();
+ LOGGER.debug("[{}] Chunked search configured: no data found", context.jobId());
}
- return searchResponse;
}
private Result getNextStream() throws IOException {
- SearchInterval lastSearchInterval = new SearchInterval(context.start, context.end);
+ SearchInterval lastSearchInterval = new SearchInterval(context.start(), context.end());
while (hasNext()) {
boolean isNewSearch = false;
@@ -202,9 +163,9 @@ private Result getNextStream() throws IOException {
private void advanceTime() {
currentStart = currentEnd;
- currentEnd = Math.min(currentStart + chunkSpan, context.end);
+ currentEnd = Math.min(currentStart + chunkSpan, context.end());
currentExtractor = dataExtractorFactory.newExtractor(currentStart, currentEnd);
- LOGGER.trace("[{}] advances time to [{}, {})", context.jobId, currentStart, currentEnd);
+ LOGGER.debug("[{}] advances time to [{}, {})", context.jobId(), currentStart, currentEnd);
}
@Override
@@ -230,186 +191,10 @@ public void destroy() {
@Override
public long getEndTime() {
- return context.end;
+ return context.end();
}
ChunkedDataExtractorContext getContext() {
return context;
}
-
- private class DataSummaryFactory {
-
- /**
- * If there are aggregations, an AggregatedDataSummary object is created. It returns a ScrollingDataSummary otherwise.
- *
- * By default a DatafeedConfig with aggregations, should already have a manual ChunkingConfig created.
- * However, the end user could have specifically set the ChunkingConfig to AUTO, which would not really work for aggregations.
- * So, if we need to gather an appropriate chunked time for aggregations, we can utilize the AggregatedDataSummary
- *
- * @return DataSummary object
- */
- private DataSummary buildDataSummary() {
- return context.hasAggregations ? newAggregatedDataSummary() : newScrolledDataSummary();
- }
-
- private DataSummary newScrolledDataSummary() {
- SearchRequestBuilder searchRequestBuilder = rangeSearchRequest();
-
- SearchResponse searchResponse = executeSearchRequest(searchRequestBuilder);
- try {
- LOGGER.debug("[{}] Scrolling Data summary response was obtained", context.jobId);
- timingStatsReporter.reportSearchDuration(searchResponse.getTook());
-
- long earliestTime = 0;
- long latestTime = 0;
- long totalHits = searchResponse.getHits().getTotalHits().value;
- if (totalHits > 0) {
- InternalAggregations aggregations = searchResponse.getAggregations();
- Min min = aggregations.get(EARLIEST_TIME);
- earliestTime = (long) min.value();
- Max max = aggregations.get(LATEST_TIME);
- latestTime = (long) max.value();
- }
- return new ScrolledDataSummary(earliestTime, latestTime, totalHits);
- } finally {
- searchResponse.decRef();
- }
- }
-
- private DataSummary newAggregatedDataSummary() {
- // TODO: once RollupSearchAction is changed from indices:admin* to indices:data/read/* this branch is not needed
- ActionRequestBuilder searchRequestBuilder =
- dataExtractorFactory instanceof RollupDataExtractorFactory ? rollupRangeSearchRequest() : rangeSearchRequest();
- SearchResponse searchResponse = executeSearchRequest(searchRequestBuilder);
- try {
- LOGGER.debug("[{}] Aggregating Data summary response was obtained", context.jobId);
- timingStatsReporter.reportSearchDuration(searchResponse.getTook());
-
- InternalAggregations aggregations = searchResponse.getAggregations();
- // This can happen if all the indices the datafeed is searching are deleted after it started.
- // Note that unlike the scrolled data summary method above we cannot check for this situation
- // by checking for zero hits, because aggregations that work on rollups return zero hits even
- // when they retrieve data.
- if (aggregations == null) {
- return AggregatedDataSummary.noDataSummary(context.histogramInterval);
- }
- Min min = aggregations.get(EARLIEST_TIME);
- Max max = aggregations.get(LATEST_TIME);
- return new AggregatedDataSummary(min.value(), max.value(), context.histogramInterval);
- } finally {
- searchResponse.decRef();
- }
- }
-
- private SearchSourceBuilder rangeSearchBuilder() {
- return new SearchSourceBuilder().size(0)
- .query(DataExtractorUtils.wrapInTimeRangeQuery(context.query, context.timeField, currentStart, context.end))
- .runtimeMappings(context.runtimeMappings)
- .aggregation(AggregationBuilders.min(EARLIEST_TIME).field(context.timeField))
- .aggregation(AggregationBuilders.max(LATEST_TIME).field(context.timeField));
- }
-
- private SearchRequestBuilder rangeSearchRequest() {
- return new SearchRequestBuilder(client).setIndices(context.indices)
- .setIndicesOptions(context.indicesOptions)
- .setSource(rangeSearchBuilder())
- .setAllowPartialSearchResults(false)
- .setTrackTotalHits(true);
- }
-
- private RollupSearchAction.RequestBuilder rollupRangeSearchRequest() {
- SearchRequest searchRequest = new SearchRequest().indices(context.indices)
- .indicesOptions(context.indicesOptions)
- .allowPartialSearchResults(false)
- .source(rangeSearchBuilder());
- return new RollupSearchAction.RequestBuilder(client, searchRequest);
- }
- }
-
- private class ScrolledDataSummary implements DataSummary {
-
- private final long earliestTime;
- private final long latestTime;
- private final long totalHits;
-
- private ScrolledDataSummary(long earliestTime, long latestTime, long totalHits) {
- this.earliestTime = earliestTime;
- this.latestTime = latestTime;
- this.totalHits = totalHits;
- }
-
- @Override
- public long earliestTime() {
- return earliestTime;
- }
-
- @Override
- public long getDataTimeSpread() {
- return latestTime - earliestTime;
- }
-
- /**
- * The heuristic here is that we want a time interval where we expect roughly scrollSize documents
- * (assuming data are uniformly spread over time).
- * We have totalHits documents over dataTimeSpread (latestTime - earliestTime), we want scrollSize documents over chunk.
- * Thus, the interval would be (scrollSize * dataTimeSpread) / totalHits.
- * However, assuming this as the chunk span may often lead to half-filled pages or empty searches.
- * It is beneficial to take a multiple of that. Based on benchmarking, we set this to 10x.
- */
- @Override
- public long estimateChunk() {
- long dataTimeSpread = getDataTimeSpread();
- if (totalHits <= 0 || dataTimeSpread <= 0) {
- return context.end - currentEnd;
- }
- long estimatedChunk = 10 * (context.scrollSize * getDataTimeSpread()) / totalHits;
- return Math.max(estimatedChunk, MIN_CHUNK_SPAN);
- }
-
- @Override
- public boolean hasData() {
- return totalHits > 0;
- }
- }
-
- static class AggregatedDataSummary implements DataSummary {
-
- private final double earliestTime;
- private final double latestTime;
- private final long histogramIntervalMillis;
-
- static AggregatedDataSummary noDataSummary(long histogramInterval) {
- // hasData() uses infinity to mean no data
- return new AggregatedDataSummary(Double.POSITIVE_INFINITY, Double.POSITIVE_INFINITY, histogramInterval);
- }
-
- AggregatedDataSummary(double earliestTime, double latestTime, long histogramInterval) {
- this.earliestTime = earliestTime;
- this.latestTime = latestTime;
- this.histogramIntervalMillis = histogramInterval;
- }
-
- /**
- * This heuristic is a direct copy of the manual chunking config auto-creation done in {@link DatafeedConfig}
- */
- @Override
- public long estimateChunk() {
- return DatafeedConfig.DEFAULT_AGGREGATION_CHUNKING_BUCKETS * histogramIntervalMillis;
- }
-
- @Override
- public boolean hasData() {
- return (Double.isInfinite(earliestTime) || Double.isInfinite(latestTime)) == false;
- }
-
- @Override
- public long earliestTime() {
- return (long) earliestTime;
- }
-
- @Override
- public long getDataTimeSpread() {
- return (long) latestTime - (long) earliestTime;
- }
- }
}
diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractorContext.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractorContext.java
index 2989ddb40d370..465c97c38372b 100644
--- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractorContext.java
+++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractorContext.java
@@ -6,67 +6,21 @@
*/
package org.elasticsearch.xpack.ml.datafeed.extractor.chunked;
-import org.elasticsearch.action.support.IndicesOptions;
-import org.elasticsearch.core.Nullable;
import org.elasticsearch.core.TimeValue;
-import org.elasticsearch.index.query.QueryBuilder;
-
-import java.util.List;
-import java.util.Map;
-import java.util.Objects;
-
-class ChunkedDataExtractorContext {
+record ChunkedDataExtractorContext(
+ String jobId,
+ int scrollSize,
+ long start,
+ long end,
+ TimeValue chunkSpan,
+ TimeAligner timeAligner,
+ boolean hasAggregations,
+ Long histogramInterval
+) {
interface TimeAligner {
long alignToFloor(long value);
long alignToCeil(long value);
}
-
- final String jobId;
- final String timeField;
- final String[] indices;
- final QueryBuilder query;
- final int scrollSize;
- final long start;
- final long end;
- final TimeValue chunkSpan;
- final TimeAligner timeAligner;
- final Map headers;
- final boolean hasAggregations;
- final Long histogramInterval;
- final IndicesOptions indicesOptions;
- final Map runtimeMappings;
-
- ChunkedDataExtractorContext(
- String jobId,
- String timeField,
- List indices,
- QueryBuilder query,
- int scrollSize,
- long start,
- long end,
- @Nullable TimeValue chunkSpan,
- TimeAligner timeAligner,
- Map headers,
- boolean hasAggregations,
- @Nullable Long histogramInterval,
- IndicesOptions indicesOptions,
- Map runtimeMappings
- ) {
- this.jobId = Objects.requireNonNull(jobId);
- this.timeField = Objects.requireNonNull(timeField);
- this.indices = indices.toArray(new String[indices.size()]);
- this.query = Objects.requireNonNull(query);
- this.scrollSize = scrollSize;
- this.start = start;
- this.end = end;
- this.chunkSpan = chunkSpan;
- this.timeAligner = Objects.requireNonNull(timeAligner);
- this.headers = headers;
- this.hasAggregations = hasAggregations;
- this.histogramInterval = histogramInterval;
- this.indicesOptions = Objects.requireNonNull(indicesOptions);
- this.runtimeMappings = Objects.requireNonNull(runtimeMappings);
- }
}
diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractorFactory.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractorFactory.java
index b4141ec632d3b..09414ba58aacb 100644
--- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractorFactory.java
+++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractorFactory.java
@@ -6,14 +6,10 @@
*/
package org.elasticsearch.xpack.ml.datafeed.extractor.chunked;
-import org.elasticsearch.client.internal.Client;
-import org.elasticsearch.index.query.QueryBuilder;
-import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.xcontent.NamedXContentRegistry;
import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig;
import org.elasticsearch.xpack.core.ml.job.config.Job;
import org.elasticsearch.xpack.core.ml.utils.Intervals;
-import org.elasticsearch.xpack.ml.datafeed.DatafeedTimingStatsReporter;
import org.elasticsearch.xpack.ml.datafeed.extractor.DataExtractor;
import org.elasticsearch.xpack.ml.datafeed.extractor.DataExtractorFactory;
@@ -21,57 +17,37 @@
public class ChunkedDataExtractorFactory implements DataExtractorFactory {
- private final Client client;
private final DatafeedConfig datafeedConfig;
-
- private final QueryBuilder extraFilters;
private final Job job;
private final DataExtractorFactory dataExtractorFactory;
private final NamedXContentRegistry xContentRegistry;
- private final DatafeedTimingStatsReporter timingStatsReporter;
public ChunkedDataExtractorFactory(
- Client client,
DatafeedConfig datafeedConfig,
- QueryBuilder extraFilters,
Job job,
NamedXContentRegistry xContentRegistry,
- DataExtractorFactory dataExtractorFactory,
- DatafeedTimingStatsReporter timingStatsReporter
+ DataExtractorFactory dataExtractorFactory
) {
- this.client = Objects.requireNonNull(client);
this.datafeedConfig = Objects.requireNonNull(datafeedConfig);
- this.extraFilters = extraFilters;
this.job = Objects.requireNonNull(job);
this.dataExtractorFactory = Objects.requireNonNull(dataExtractorFactory);
this.xContentRegistry = xContentRegistry;
- this.timingStatsReporter = Objects.requireNonNull(timingStatsReporter);
}
@Override
public DataExtractor newExtractor(long start, long end) {
- QueryBuilder queryBuilder = datafeedConfig.getParsedQuery(xContentRegistry);
- if (extraFilters != null) {
- queryBuilder = QueryBuilders.boolQuery().filter(queryBuilder).filter(extraFilters);
- }
ChunkedDataExtractorContext.TimeAligner timeAligner = newTimeAligner();
ChunkedDataExtractorContext dataExtractorContext = new ChunkedDataExtractorContext(
job.getId(),
- job.getDataDescription().getTimeField(),
- datafeedConfig.getIndices(),
- queryBuilder,
datafeedConfig.getScrollSize(),
timeAligner.alignToCeil(start),
timeAligner.alignToFloor(end),
datafeedConfig.getChunkingConfig().getTimeSpan(),
timeAligner,
- datafeedConfig.getHeaders(),
datafeedConfig.hasAggregations(),
- datafeedConfig.hasAggregations() ? datafeedConfig.getHistogramIntervalMillis(xContentRegistry) : null,
- datafeedConfig.getIndicesOptions(),
- datafeedConfig.getRuntimeMappings()
+ datafeedConfig.hasAggregations() ? datafeedConfig.getHistogramIntervalMillis(xContentRegistry) : null
);
- return new ChunkedDataExtractor(client, dataExtractorFactory, dataExtractorContext, timingStatsReporter);
+ return new ChunkedDataExtractor(dataExtractorFactory, dataExtractorContext);
}
private ChunkedDataExtractorContext.TimeAligner newTimeAligner() {
diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractor.java
index 0caa59fae914b..5da89da6b3450 100644
--- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractor.java
+++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractor.java
@@ -91,7 +91,7 @@ public void destroy() {
@Override
public long getEndTime() {
- return context.end;
+ return context.queryContext.end;
}
@Override
@@ -103,12 +103,12 @@ public Result next() throws IOException {
if (stream.isPresent() == false) {
hasNext = false;
}
- return new Result(new SearchInterval(context.start, context.end), stream);
+ return new Result(new SearchInterval(context.queryContext.start, context.queryContext.end), stream);
}
private Optional tryNextStream() throws IOException {
try {
- return scrollId == null ? Optional.ofNullable(initScroll(context.start)) : Optional.ofNullable(continueScroll());
+ return scrollId == null ? Optional.ofNullable(initScroll(context.queryContext.start)) : Optional.ofNullable(continueScroll());
} catch (Exception e) {
scrollId = null;
if (searchHasShardFailure) {
@@ -116,7 +116,7 @@ private Optional tryNextStream() throws IOException {
}
logger.debug("[{}] Resetting scroll search after shard failure", context.jobId);
markScrollAsErrored();
- return Optional.ofNullable(initScroll(lastTimestamp == null ? context.start : lastTimestamp));
+ return Optional.ofNullable(initScroll(lastTimestamp == null ? context.queryContext.start : lastTimestamp));
}
}
@@ -135,14 +135,14 @@ protected InputStream initScroll(long startTimestamp) throws IOException {
protected SearchResponse executeSearchRequest(SearchRequestBuilder searchRequestBuilder) {
SearchResponse searchResponse = ClientHelper.executeWithHeaders(
- context.headers,
+ context.queryContext.headers,
ClientHelper.ML_ORIGIN,
client,
searchRequestBuilder::get
);
boolean success = false;
try {
- checkForSkippedClusters(searchResponse);
+ DataExtractorUtils.checkForSkippedClusters(searchResponse);
success = true;
} catch (ResourceNotFoundException e) {
clearScrollLoggingExceptions(searchResponse.getScrollId());
@@ -158,12 +158,19 @@ protected SearchResponse executeSearchRequest(SearchRequestBuilder searchRequest
private SearchRequestBuilder buildSearchRequest(long start) {
SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder().size(context.scrollSize)
.sort(context.extractedFields.timeField(), SortOrder.ASC)
- .query(DataExtractorUtils.wrapInTimeRangeQuery(context.query, context.extractedFields.timeField(), start, context.end))
- .runtimeMappings(context.runtimeMappings);
+ .query(
+ DataExtractorUtils.wrapInTimeRangeQuery(
+ context.queryContext.query,
+ context.extractedFields.timeField(),
+ start,
+ context.queryContext.end
+ )
+ )
+ .runtimeMappings(context.queryContext.runtimeMappings);
SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client).setScroll(SCROLL_TIMEOUT)
- .setIndices(context.indices)
- .setIndicesOptions(context.indicesOptions)
+ .setIndices(context.queryContext.indices)
+ .setIndicesOptions(context.queryContext.indicesOptions)
.setAllowPartialSearchResults(false)
.setSource(searchSourceBuilder);
@@ -228,7 +235,9 @@ private InputStream continueScroll() throws IOException {
}
logger.debug("[{}] search failed due to SearchPhaseExecutionException. Will attempt again with new scroll", context.jobId);
markScrollAsErrored();
- searchResponse = executeSearchRequest(buildSearchRequest(lastTimestamp == null ? context.start : lastTimestamp));
+ searchResponse = executeSearchRequest(
+ buildSearchRequest(lastTimestamp == null ? context.queryContext.start : lastTimestamp)
+ );
}
logger.debug("[{}] Search response was obtained", context.jobId);
timingStatsReporter.reportSearchDuration(searchResponse.getTook());
@@ -254,14 +263,14 @@ void markScrollAsErrored() {
@SuppressWarnings("HiddenField")
protected SearchResponse executeSearchScrollRequest(String scrollId) {
SearchResponse searchResponse = ClientHelper.executeWithHeaders(
- context.headers,
+ context.queryContext.headers,
ClientHelper.ML_ORIGIN,
client,
() -> new SearchScrollRequestBuilder(client).setScroll(SCROLL_TIMEOUT).setScrollId(scrollId).get()
);
boolean success = false;
try {
- checkForSkippedClusters(searchResponse);
+ DataExtractorUtils.checkForSkippedClusters(searchResponse);
success = true;
} catch (ResourceNotFoundException e) {
clearScrollLoggingExceptions(searchResponse.getScrollId());
@@ -294,11 +303,24 @@ private void innerClearScroll(String scrollId) {
ClearScrollRequest request = new ClearScrollRequest();
request.addScrollId(scrollId);
ClientHelper.executeWithHeaders(
- context.headers,
+ context.queryContext.headers,
ClientHelper.ML_ORIGIN,
client,
() -> client.execute(TransportClearScrollAction.TYPE, request).actionGet()
);
}
}
+
+ @Override
+ public DataSummary getSummary() {
+ SearchRequestBuilder searchRequestBuilder = DataExtractorUtils.getSearchRequestBuilderForSummary(client, context.queryContext);
+ SearchResponse searchResponse = executeSearchRequest(searchRequestBuilder);
+ try {
+ logger.debug("[{}] Scrolling Data summary response was obtained", context.jobId);
+ timingStatsReporter.reportSearchDuration(searchResponse.getTook());
+ return DataExtractorUtils.getDataSummary(searchResponse);
+ } finally {
+ searchResponse.decRef();
+ }
+ }
}
diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorContext.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorContext.java
index 58c0c5b485742..776c7c252ffdf 100644
--- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorContext.java
+++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorContext.java
@@ -9,6 +9,7 @@
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.search.builder.SearchSourceBuilder;
+import org.elasticsearch.xpack.ml.datafeed.extractor.DataExtractorQueryContext;
import java.util.List;
import java.util.Map;
@@ -18,15 +19,9 @@ class ScrollDataExtractorContext {
final String jobId;
final TimeBasedExtractedFields extractedFields;
- final String[] indices;
- final QueryBuilder query;
final List scriptFields;
final int scrollSize;
- final long start;
- final long end;
- final Map headers;
- final IndicesOptions indicesOptions;
- final Map runtimeMappings;
+ final DataExtractorQueryContext queryContext;
ScrollDataExtractorContext(
String jobId,
@@ -41,16 +36,19 @@ class ScrollDataExtractorContext {
IndicesOptions indicesOptions,
Map runtimeMappings
) {
- this.jobId = Objects.requireNonNull(jobId);
+ this.jobId = jobId;
this.extractedFields = Objects.requireNonNull(extractedFields);
- this.indices = indices.toArray(new String[indices.size()]);
- this.query = Objects.requireNonNull(query);
this.scriptFields = Objects.requireNonNull(scriptFields);
this.scrollSize = scrollSize;
- this.start = start;
- this.end = end;
- this.headers = headers;
- this.indicesOptions = Objects.requireNonNull(indicesOptions);
- this.runtimeMappings = Objects.requireNonNull(runtimeMappings);
+ this.queryContext = new DataExtractorQueryContext(
+ indices,
+ query,
+ extractedFields.timeField(),
+ start,
+ end,
+ headers,
+ indicesOptions,
+ runtimeMappings
+ );
}
}
diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationDataExtractorFactoryTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationDataExtractorFactoryTests.java
index 9a76eb5f2b936..2a086457ba755 100644
--- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationDataExtractorFactoryTests.java
+++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationDataExtractorFactoryTests.java
@@ -50,8 +50,8 @@ public void testNewExtractor_GivenAlignedTimes() {
AggregationDataExtractor dataExtractor = (AggregationDataExtractor) factory.newExtractor(2000, 5000);
- assertThat(dataExtractor.getContext().start, equalTo(2000L));
- assertThat(dataExtractor.getContext().end, equalTo(5000L));
+ assertThat(dataExtractor.getContext().queryContext.start, equalTo(2000L));
+ assertThat(dataExtractor.getContext().queryContext.end, equalTo(5000L));
}
public void testNewExtractor_GivenNonAlignedTimes() {
@@ -59,8 +59,8 @@ public void testNewExtractor_GivenNonAlignedTimes() {
AggregationDataExtractor dataExtractor = (AggregationDataExtractor) factory.newExtractor(3980, 9200);
- assertThat(dataExtractor.getContext().start, equalTo(4000L));
- assertThat(dataExtractor.getContext().end, equalTo(9000L));
+ assertThat(dataExtractor.getContext().queryContext.start, equalTo(4000L));
+ assertThat(dataExtractor.getContext().queryContext.end, equalTo(9000L));
}
private AggregationDataExtractorFactory createFactory(long histogramInterval) {
diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationDataExtractorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationDataExtractorTests.java
index 5b2cd8f78d02e..02e33695ff7e2 100644
--- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationDataExtractorTests.java
+++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationDataExtractorTests.java
@@ -6,28 +6,35 @@
*/
package org.elasticsearch.xpack.ml.datafeed.extractor.aggregation;
+import org.apache.lucene.search.TotalHits;
+import org.elasticsearch.action.ActionFuture;
import org.elasticsearch.action.search.SearchPhaseExecutionException;
import org.elasticsearch.action.search.SearchRequest;
-import org.elasticsearch.action.search.SearchRequestBuilder;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.action.search.ShardSearchFailure;
+import org.elasticsearch.action.search.TransportSearchAction;
import org.elasticsearch.client.internal.Client;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.core.TimeValue;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.search.SearchHits;
import org.elasticsearch.search.aggregations.AggregationBuilders;
import org.elasticsearch.search.aggregations.AggregatorFactories;
import org.elasticsearch.search.aggregations.InternalAggregations;
import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram;
+import org.elasticsearch.search.aggregations.metrics.Max;
+import org.elasticsearch.search.aggregations.metrics.Min;
import org.elasticsearch.test.ESTestCase;
-import org.elasticsearch.xpack.core.ml.datafeed.DatafeedTimingStats;
+import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.xpack.core.ml.datafeed.SearchInterval;
import org.elasticsearch.xpack.ml.datafeed.DatafeedTimingStatsReporter;
-import org.elasticsearch.xpack.ml.datafeed.DatafeedTimingStatsReporter.DatafeedTimingStatsPersister;
import org.elasticsearch.xpack.ml.datafeed.extractor.DataExtractor;
import org.elasticsearch.xpack.ml.datafeed.extractor.aggregation.AggregationTestUtils.Term;
import org.junit.Before;
+import org.mockito.ArgumentCaptor;
import java.io.BufferedReader;
import java.io.IOException;
@@ -50,14 +57,17 @@
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.is;
+import static org.hamcrest.Matchers.not;
import static org.hamcrest.Matchers.stringContainsInOrder;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.eq;
import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
public class AggregationDataExtractorTests extends ESTestCase {
- private Client testClient;
- private List capturedSearchRequests;
+ private Client client;
private String jobId;
private String timeField;
private Set fields;
@@ -67,37 +77,12 @@ public class AggregationDataExtractorTests extends ESTestCase {
private DatafeedTimingStatsReporter timingStatsReporter;
private Map runtimeMappings;
- private class TestDataExtractor extends AggregationDataExtractor {
-
- private SearchResponse nextResponse;
- private SearchPhaseExecutionException ex;
-
- TestDataExtractor(long start, long end) {
- super(testClient, createContext(start, end), timingStatsReporter);
- }
-
- @Override
- protected SearchResponse executeSearchRequest(SearchRequestBuilder searchRequestBuilder) {
- capturedSearchRequests.add(searchRequestBuilder);
- if (ex != null) {
- throw ex;
- }
- return nextResponse;
- }
-
- void setNextResponse(SearchResponse searchResponse) {
- nextResponse = searchResponse;
- }
-
- void setNextResponseToError(SearchPhaseExecutionException ex) {
- this.ex = ex;
- }
- }
-
@Before
public void setUpTests() {
- testClient = mock(Client.class);
- capturedSearchRequests = new ArrayList<>();
+ client = mock(Client.class);
+ when(client.threadPool()).thenReturn(mock(ThreadPool.class));
+ when(client.threadPool().getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY));
+
jobId = "test-job";
timeField = "time";
fields = new HashSet<>();
@@ -115,7 +100,7 @@ public void setUpTests() {
)
);
runtimeMappings = Collections.emptyMap();
- timingStatsReporter = new DatafeedTimingStatsReporter(new DatafeedTimingStats(jobId), mock(DatafeedTimingStatsPersister.class));
+ timingStatsReporter = mock(DatafeedTimingStatsReporter.class);
}
public void testExtraction() throws IOException {
@@ -139,10 +124,11 @@ public void testExtraction() throws IOException {
)
);
- TestDataExtractor extractor = new TestDataExtractor(1000L, 4000L);
+ AggregationDataExtractor extractor = new AggregationDataExtractor(client, createContext(1000L, 4000L), timingStatsReporter);
- SearchResponse response = createSearchResponse("time", histogramBuckets);
- extractor.setNextResponse(response);
+ ArgumentCaptor searchRequestCaptor = ArgumentCaptor.forClass(SearchRequest.class);
+ ActionFuture searchResponse = toActionFuture(createSearchResponse("time", histogramBuckets));
+ when(client.execute(eq(TransportSearchAction.TYPE), searchRequestCaptor.capture())).thenReturn(searchResponse);
assertThat(extractor.hasNext(), is(true));
DataExtractor.Result result = extractor.next();
@@ -156,9 +142,8 @@ public void testExtraction() throws IOException {
{"time":3999,"airline":"b","responsetime":32.0,"doc_count":3}""";
assertThat(asString(stream.get()), equalTo(expectedStream));
assertThat(extractor.hasNext(), is(false));
- assertThat(capturedSearchRequests.size(), equalTo(1));
- String searchRequest = capturedSearchRequests.get(0).toString().replaceAll("\\s", "");
+ String searchRequest = searchRequestCaptor.getValue().toString().replaceAll("\\s", "");
assertThat(searchRequest, containsString("\"size\":0"));
assertThat(
searchRequest,
@@ -175,45 +160,47 @@ public void testExtraction() throws IOException {
}
public void testExtractionGivenResponseHasNullAggs() throws IOException {
- TestDataExtractor extractor = new TestDataExtractor(1000L, 2000L);
+ AggregationDataExtractor extractor = new AggregationDataExtractor(client, createContext(1000L, 2000L), timingStatsReporter);
- SearchResponse response = createSearchResponse(null);
- extractor.setNextResponse(response);
+ ActionFuture searchResponse = toActionFuture(createSearchResponse(null));
+ when(client.execute(eq(TransportSearchAction.TYPE), any())).thenReturn(searchResponse);
assertThat(extractor.hasNext(), is(true));
assertThat(extractor.next().data().isPresent(), is(false));
assertThat(extractor.hasNext(), is(false));
- assertThat(capturedSearchRequests.size(), equalTo(1));
+ verify(client).execute(eq(TransportSearchAction.TYPE), any());
}
public void testExtractionGivenResponseHasEmptyAggs() throws IOException {
- TestDataExtractor extractor = new TestDataExtractor(1000L, 2000L);
+ AggregationDataExtractor extractor = new AggregationDataExtractor(client, createContext(1000L, 2000L), timingStatsReporter);
+
InternalAggregations emptyAggs = AggregationTestUtils.createAggs(Collections.emptyList());
- SearchResponse response = createSearchResponse(emptyAggs);
- extractor.setNextResponse(response);
+ ActionFuture searchResponse = toActionFuture(createSearchResponse(emptyAggs));
+ when(client.execute(eq(TransportSearchAction.TYPE), any())).thenReturn(searchResponse);
assertThat(extractor.hasNext(), is(true));
assertThat(extractor.next().data().isPresent(), is(false));
assertThat(extractor.hasNext(), is(false));
- assertThat(capturedSearchRequests.size(), equalTo(1));
+ verify(client).execute(eq(TransportSearchAction.TYPE), any());
}
public void testExtractionGivenResponseHasEmptyHistogramAgg() throws IOException {
- TestDataExtractor extractor = new TestDataExtractor(1000L, 2000L);
- SearchResponse response = createSearchResponse("time", Collections.emptyList());
- extractor.setNextResponse(response);
+ AggregationDataExtractor extractor = new AggregationDataExtractor(client, createContext(1000L, 2000L), timingStatsReporter);
+
+ ActionFuture searchResponse = toActionFuture(createSearchResponse("time", Collections.emptyList()));
+ when(client.execute(eq(TransportSearchAction.TYPE), any())).thenReturn(searchResponse);
assertThat(extractor.hasNext(), is(true));
assertThat(extractor.next().data().isPresent(), is(false));
assertThat(extractor.hasNext(), is(false));
- assertThat(capturedSearchRequests.size(), equalTo(1));
+ verify(client).execute(eq(TransportSearchAction.TYPE), any());
}
public void testExtractionGivenResponseHasMultipleTopLevelAggs() {
- TestDataExtractor extractor = new TestDataExtractor(1000L, 2000L);
+ AggregationDataExtractor extractor = new AggregationDataExtractor(client, createContext(1000L, 2000L), timingStatsReporter);
InternalHistogram histogram1 = mock(InternalHistogram.class);
when(histogram1.getName()).thenReturn("hist_1");
@@ -221,8 +208,8 @@ public void testExtractionGivenResponseHasMultipleTopLevelAggs() {
when(histogram2.getName()).thenReturn("hist_2");
InternalAggregations aggs = AggregationTestUtils.createAggs(Arrays.asList(histogram1, histogram2));
- SearchResponse response = createSearchResponse(aggs);
- extractor.setNextResponse(response);
+ ActionFuture searchResponse = toActionFuture(createSearchResponse(aggs));
+ when(client.execute(eq(TransportSearchAction.TYPE), any())).thenReturn(searchResponse);
assertThat(extractor.hasNext(), is(true));
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, extractor::next);
@@ -230,9 +217,10 @@ public void testExtractionGivenResponseHasMultipleTopLevelAggs() {
}
public void testExtractionGivenCancelBeforeNext() {
- TestDataExtractor extractor = new TestDataExtractor(1000L, 4000L);
- SearchResponse response = createSearchResponse("time", Collections.emptyList());
- extractor.setNextResponse(response);
+ AggregationDataExtractor extractor = new AggregationDataExtractor(client, createContext(1000L, 4000L), timingStatsReporter);
+
+ ActionFuture searchResponse = toActionFuture(createSearchResponse("time", Collections.emptyList()));
+ when(client.execute(eq(TransportSearchAction.TYPE), any())).thenReturn(searchResponse);
extractor.cancel();
assertThat(extractor.hasNext(), is(false));
@@ -256,10 +244,10 @@ public void testExtractionGivenCancelHalfWay() throws IOException {
timestamp += 1000L;
}
- TestDataExtractor extractor = new TestDataExtractor(1000L, timestamp + 1);
+ AggregationDataExtractor extractor = new AggregationDataExtractor(client, createContext(1000L, timestamp + 1), timingStatsReporter);
- SearchResponse response = createSearchResponse("time", histogramBuckets);
- extractor.setNextResponse(response);
+ ActionFuture searchResponse = toActionFuture(createSearchResponse("time", histogramBuckets));
+ when(client.execute(eq(TransportSearchAction.TYPE), any())).thenReturn(searchResponse);
assertThat(extractor.hasNext(), is(true));
assertThat(countMatches('{', asString(extractor.next().data().get())), equalTo(2400L));
@@ -277,23 +265,57 @@ public void testExtractionGivenCancelHalfWay() throws IOException {
);
timestamp += 1000L;
}
- response = createSearchResponse("time", histogramBuckets);
- extractor.setNextResponse(response);
+ searchResponse = toActionFuture(createSearchResponse("time", histogramBuckets));
+ when(client.execute(eq(TransportSearchAction.TYPE), any())).thenReturn(searchResponse);
extractor.cancel();
assertThat(extractor.hasNext(), is(false));
assertThat(extractor.isCancelled(), is(true));
- assertThat(capturedSearchRequests.size(), equalTo(1));
+ verify(client).execute(eq(TransportSearchAction.TYPE), any());
}
public void testExtractionGivenSearchResponseHasError() {
- TestDataExtractor extractor = new TestDataExtractor(1000L, 2000L);
- extractor.setNextResponseToError(new SearchPhaseExecutionException("phase 1", "boom", ShardSearchFailure.EMPTY_ARRAY));
+ AggregationDataExtractor extractor = new AggregationDataExtractor(client, createContext(1000L, 2000L), timingStatsReporter);
+ when(client.execute(eq(TransportSearchAction.TYPE), any())).thenThrow(
+ new SearchPhaseExecutionException("phase 1", "boom", ShardSearchFailure.EMPTY_ARRAY)
+ );
assertThat(extractor.hasNext(), is(true));
expectThrows(SearchPhaseExecutionException.class, extractor::next);
}
+ public void testGetSummary() {
+ AggregationDataExtractor extractor = new AggregationDataExtractor(client, createContext(1000L, 2300L), timingStatsReporter);
+
+ ArgumentCaptor searchRequestCaptor = ArgumentCaptor.forClass(SearchRequest.class);
+ ActionFuture searchResponse = toActionFuture(createSummaryResponse(1001L, 2299L, 10L));
+ when(client.execute(eq(TransportSearchAction.TYPE), searchRequestCaptor.capture())).thenReturn(searchResponse);
+
+ DataExtractor.DataSummary summary = extractor.getSummary();
+ assertThat(summary.earliestTime(), equalTo(1001L));
+ assertThat(summary.latestTime(), equalTo(2299L));
+ assertThat(summary.totalHits(), equalTo(10L));
+
+ String searchRequest = searchRequestCaptor.getValue().toString().replaceAll("\\s", "");
+ assertThat(searchRequest, containsString("\"size\":0"));
+ assertThat(
+ searchRequest,
+ containsString(
+ "\"query\":{\"bool\":{\"filter\":[{\"match_all\":{\"boost\":1.0}},"
+ + "{\"range\":{\"time\":{\"gte\":1000,\"lt\":2300,"
+ + "\"format\":\"epoch_millis\",\"boost\":1.0}}}]"
+ )
+ );
+ assertThat(
+ searchRequest,
+ containsString(
+ "\"aggregations\":{\"earliest_time\":{\"min\":{\"field\":\"time\"}}," + "\"latest_time\":{\"max\":{\"field\":\"time\"}}}}"
+ )
+ );
+ assertThat(searchRequest, not(containsString("\"track_total_hits\":false")));
+ assertThat(searchRequest, not(containsString("\"sort\"")));
+ }
+
private AggregationDataExtractorContext createContext(long start, long end) {
return new AggregationDataExtractorContext(
jobId,
@@ -311,7 +333,13 @@ private AggregationDataExtractorContext createContext(long start, long end) {
);
}
- @SuppressWarnings("unchecked")
+ private ActionFuture toActionFuture(T t) {
+ @SuppressWarnings("unchecked")
+ ActionFuture future = (ActionFuture) mock(ActionFuture.class);
+ when(future.actionGet()).thenReturn(t);
+ return future;
+ }
+
private SearchResponse createSearchResponse(String histogramName, List histogramBuckets) {
InternalHistogram histogram = mock(InternalHistogram.class);
when(histogram.getName()).thenReturn(histogramName);
@@ -330,6 +358,17 @@ private SearchResponse createSearchResponse(InternalAggregations aggregations) {
return searchResponse;
}
+ private SearchResponse createSummaryResponse(long start, long end, long totalHits) {
+ SearchResponse searchResponse = mock(SearchResponse.class);
+ when(searchResponse.getHits()).thenReturn(
+ new SearchHits(SearchHits.EMPTY, new TotalHits(totalHits, TotalHits.Relation.EQUAL_TO), 1)
+ );
+ when(searchResponse.getAggregations()).thenReturn(
+ InternalAggregations.from(List.of(new Min("earliest_time", start, null, null), new Max("latest_time", end, null, null)))
+ );
+ return searchResponse;
+ }
+
private static String asString(InputStream inputStream) throws IOException {
try (BufferedReader reader = new BufferedReader(new InputStreamReader(inputStream, StandardCharsets.UTF_8))) {
return reader.lines().collect(Collectors.joining("\n"));
diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/CompositeAggregationDataExtractorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/CompositeAggregationDataExtractorTests.java
index 6cc432dd4831f..5b9370d53e26e 100644
--- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/CompositeAggregationDataExtractorTests.java
+++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/CompositeAggregationDataExtractorTests.java
@@ -6,13 +6,16 @@
*/
package org.elasticsearch.xpack.ml.datafeed.extractor.aggregation;
-import org.elasticsearch.action.ActionRequestBuilder;
+import org.elasticsearch.action.ActionFuture;
import org.elasticsearch.action.search.SearchPhaseExecutionException;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.search.SearchRequestBuilder;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.action.search.ShardSearchFailure;
+import org.elasticsearch.action.search.TransportSearchAction;
import org.elasticsearch.client.internal.Client;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.core.TimeValue;
import org.elasticsearch.core.Tuple;
import org.elasticsearch.index.query.QueryBuilder;
@@ -26,12 +29,12 @@
import org.elasticsearch.search.aggregations.bucket.composite.TermsValuesSourceBuilder;
import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval;
import org.elasticsearch.test.ESTestCase;
-import org.elasticsearch.xpack.core.ml.datafeed.DatafeedTimingStats;
+import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.xpack.core.ml.datafeed.SearchInterval;
import org.elasticsearch.xpack.ml.datafeed.DatafeedTimingStatsReporter;
-import org.elasticsearch.xpack.ml.datafeed.DatafeedTimingStatsReporter.DatafeedTimingStatsPersister;
import org.elasticsearch.xpack.ml.datafeed.extractor.DataExtractor;
import org.junit.Before;
+import org.mockito.ArgumentCaptor;
import java.io.BufferedReader;
import java.io.IOException;
@@ -55,13 +58,16 @@
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.stringContainsInOrder;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.eq;
import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
public class CompositeAggregationDataExtractorTests extends ESTestCase {
- private Client testClient;
- private List capturedSearchRequests;
+ private Client client;
private String jobId;
private String timeField;
private Set fields;
@@ -72,37 +78,12 @@ public class CompositeAggregationDataExtractorTests extends ESTestCase {
private AggregatedSearchRequestBuilder aggregatedSearchRequestBuilder;
private Map runtimeMappings;
- private class TestDataExtractor extends CompositeAggregationDataExtractor {
-
- private SearchResponse nextResponse;
- private SearchPhaseExecutionException ex;
-
- TestDataExtractor(long start, long end) {
- super(compositeAggregationBuilder, testClient, createContext(start, end), timingStatsReporter, aggregatedSearchRequestBuilder);
- }
-
- @Override
- protected SearchResponse executeSearchRequest(ActionRequestBuilder searchRequestBuilder) {
- capturedSearchRequests.add(searchRequestBuilder.request());
- if (ex != null) {
- throw ex;
- }
- return nextResponse;
- }
-
- void setNextResponse(SearchResponse searchResponse) {
- nextResponse = searchResponse;
- }
-
- void setNextResponseToError(SearchPhaseExecutionException ex) {
- this.ex = ex;
- }
- }
-
@Before
public void setUpTests() {
- testClient = mock(Client.class);
- capturedSearchRequests = new ArrayList<>();
+ client = mock(Client.class);
+ when(client.threadPool()).thenReturn(mock(ThreadPool.class));
+ when(client.threadPool().getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY));
+
jobId = "test-job";
timeField = "time";
fields = new HashSet<>();
@@ -120,8 +101,8 @@ public void setUpTests() {
.subAggregation(AggregationBuilders.max("time").field("time"))
.subAggregation(AggregationBuilders.avg("responsetime").field("responsetime"));
runtimeMappings = Collections.emptyMap();
- timingStatsReporter = new DatafeedTimingStatsReporter(new DatafeedTimingStats(jobId), mock(DatafeedTimingStatsPersister.class));
- aggregatedSearchRequestBuilder = (searchSourceBuilder) -> new SearchRequestBuilder(testClient).setSource(searchSourceBuilder)
+ timingStatsReporter = mock(DatafeedTimingStatsReporter.class);
+ aggregatedSearchRequestBuilder = (searchSourceBuilder) -> new SearchRequestBuilder(client).setSource(searchSourceBuilder)
.setAllowPartialSearchResults(false)
.setIndices(indices.toArray(String[]::new));
}
@@ -159,10 +140,19 @@ public void testExtraction() throws IOException {
)
);
- TestDataExtractor extractor = new TestDataExtractor(1000L, 4000L);
+ CompositeAggregationDataExtractor extractor = new CompositeAggregationDataExtractor(
+ compositeAggregationBuilder,
+ client,
+ createContext(1000L, 4000L),
+ timingStatsReporter,
+ aggregatedSearchRequestBuilder
+ );
- SearchResponse response = createSearchResponse("buckets", compositeBucket, Map.of("time_bucket", 4000L, "airline", "d"));
- extractor.setNextResponse(response);
+ ArgumentCaptor searchRequestCaptor = ArgumentCaptor.forClass(SearchRequest.class);
+ ActionFuture searchResponse = toActionFuture(
+ createSearchResponse("buckets", compositeBucket, Map.of("time_bucket", 4000L, "airline", "d"))
+ );
+ when(client.execute(eq(TransportSearchAction.TYPE), searchRequestCaptor.capture())).thenReturn(searchResponse);
assertThat(extractor.hasNext(), is(true));
DataExtractor.Result result = extractor.next();
@@ -175,9 +165,8 @@ public void testExtraction() throws IOException {
{"airline":"c","time":3999,"responsetime":31.0,"doc_count":4} \
{"airline":"b","time":3999,"responsetime":32.0,"doc_count":3}""";
assertThat(asString(stream.get()), equalTo(expectedStream));
- assertThat(capturedSearchRequests.size(), equalTo(1));
- String searchRequest = capturedSearchRequests.get(0).toString().replaceAll("\\s", "");
+ String searchRequest = searchRequestCaptor.getValue().toString().replaceAll("\\s", "");
assertThat(searchRequest, containsString("\"size\":0"));
assertThat(
searchRequest,
@@ -194,35 +183,57 @@ public void testExtraction() throws IOException {
}
public void testExtractionGivenResponseHasNullAggs() throws IOException {
- TestDataExtractor extractor = new TestDataExtractor(1000L, 2000L);
+ CompositeAggregationDataExtractor extractor = new CompositeAggregationDataExtractor(
+ compositeAggregationBuilder,
+ client,
+ createContext(1000L, 2000L),
+ timingStatsReporter,
+ aggregatedSearchRequestBuilder
+ );
- SearchResponse response = createSearchResponse(null);
- extractor.setNextResponse(response);
+ ActionFuture searchResponse = toActionFuture(createSearchResponse(null));
+ when(client.execute(eq(TransportSearchAction.TYPE), any())).thenReturn(searchResponse);
assertThat(extractor.hasNext(), is(true));
assertThat(extractor.next().data().isPresent(), is(false));
assertThat(extractor.hasNext(), is(false));
- assertThat(capturedSearchRequests.size(), equalTo(1));
+ verify(client).execute(eq(TransportSearchAction.TYPE), any());
}
public void testExtractionGivenResponseHasEmptyAggs() throws IOException {
- TestDataExtractor extractor = new TestDataExtractor(1000L, 2000L);
+ CompositeAggregationDataExtractor extractor = new CompositeAggregationDataExtractor(
+ compositeAggregationBuilder,
+ client,
+ createContext(1000L, 2000L),
+ timingStatsReporter,
+ aggregatedSearchRequestBuilder
+ );
+
InternalAggregations emptyAggs = AggregationTestUtils.createAggs(Collections.emptyList());
- SearchResponse response = createSearchResponse(emptyAggs);
- extractor.setNextResponse(response);
+ ActionFuture searchResponse = toActionFuture(createSearchResponse(emptyAggs));
+ when(client.execute(eq(TransportSearchAction.TYPE), any())).thenReturn(searchResponse);
assertThat(extractor.hasNext(), is(true));
assertThat(extractor.next().data().isPresent(), is(false));
assertThat(extractor.hasNext(), is(false));
- assertThat(capturedSearchRequests.size(), equalTo(1));
+ verify(client).execute(eq(TransportSearchAction.TYPE), any());
}
public void testExtractionGivenCancelBeforeNext() {
- TestDataExtractor extractor = new TestDataExtractor(1000L, 4000L);
- SearchResponse response = createSearchResponse("time", Collections.emptyList(), Collections.emptyMap());
- extractor.setNextResponse(response);
+ CompositeAggregationDataExtractor extractor = new CompositeAggregationDataExtractor(
+ compositeAggregationBuilder,
+ client,
+ createContext(1000L, 4000L),
+ timingStatsReporter,
+ aggregatedSearchRequestBuilder
+ );
+
+ ActionFuture searchResponse = toActionFuture(
+ createSearchResponse("time", Collections.emptyList(), Collections.emptyMap())
+ );
+ when(client.execute(eq(TransportSearchAction.TYPE), any())).thenReturn(searchResponse);
extractor.cancel();
// Composite aggs should be true because we need to make sure the first search has occurred or not
@@ -245,10 +256,19 @@ public void testExtractionCancelOnFirstPage() throws IOException {
);
}
- TestDataExtractor extractor = new TestDataExtractor(1000L, timestamp + 1000 + 1);
+ CompositeAggregationDataExtractor extractor = new CompositeAggregationDataExtractor(
+ compositeAggregationBuilder,
+ client,
+ createContext(1000L, timestamp + 1000 + 1),
+ timingStatsReporter,
+ aggregatedSearchRequestBuilder
+ );
+
+ ActionFuture searchResponse = toActionFuture(
+ createSearchResponse("buckets", buckets, Map.of("time_bucket", 1000L, "airline", "d"))
+ );
+ when(client.execute(eq(TransportSearchAction.TYPE), any())).thenReturn(searchResponse);
- SearchResponse response = createSearchResponse("buckets", buckets, Map.of("time_bucket", 1000L, "airline", "d"));
- extractor.setNextResponse(response);
extractor.cancel();
// We should have next right now as we have not yet determined if we have handled a page or not
assertThat(extractor.hasNext(), is(true));
@@ -274,10 +294,18 @@ public void testExtractionGivenCancelHalfWay() throws IOException {
);
}
- TestDataExtractor extractor = new TestDataExtractor(1000L, timestamp + 1000 + 1);
+ CompositeAggregationDataExtractor extractor = new CompositeAggregationDataExtractor(
+ compositeAggregationBuilder,
+ client,
+ createContext(1000L, timestamp + 1000 + 1),
+ timingStatsReporter,
+ aggregatedSearchRequestBuilder
+ );
- SearchResponse response = createSearchResponse("buckets", buckets, Map.of("time_bucket", 1000L, "airline", "d"));
- extractor.setNextResponse(response);
+ ActionFuture searchResponse = toActionFuture(
+ createSearchResponse("buckets", buckets, Map.of("time_bucket", 1000L, "airline", "d"))
+ );
+ when(client.execute(eq(TransportSearchAction.TYPE), any())).thenReturn(searchResponse);
assertThat(extractor.hasNext(), is(true));
assertThat(countMatches('{', asString(extractor.next().data().get())), equalTo(10L));
@@ -305,8 +333,10 @@ public void testExtractionGivenCancelHalfWay() throws IOException {
)
);
}
- response = createSearchResponse("buckets", buckets, Map.of("time_bucket", 3000L, "airline", "a"));
- extractor.setNextResponse(response);
+
+ searchResponse = toActionFuture(createSearchResponse("buckets", buckets, Map.of("time_bucket", 3000L, "airline", "a")));
+ when(client.execute(eq(TransportSearchAction.TYPE), any())).thenReturn(searchResponse);
+
extractor.cancel();
assertThat(extractor.hasNext(), is(true));
assertThat(extractor.isCancelled(), is(true));
@@ -315,12 +345,22 @@ public void testExtractionGivenCancelHalfWay() throws IOException {
// Once we have handled the 6 remaining in that time bucket, we shouldn't finish the page and the extractor should end
assertThat(extractor.hasNext(), is(false));
- assertThat(capturedSearchRequests.size(), equalTo(2));
+
+ verify(client, times(2)).execute(eq(TransportSearchAction.TYPE), any());
}
public void testExtractionGivenSearchResponseHasError() {
- TestDataExtractor extractor = new TestDataExtractor(1000L, 2000L);
- extractor.setNextResponseToError(new SearchPhaseExecutionException("phase 1", "boom", ShardSearchFailure.EMPTY_ARRAY));
+ CompositeAggregationDataExtractor extractor = new CompositeAggregationDataExtractor(
+ compositeAggregationBuilder,
+ client,
+ createContext(1000L, 2000L),
+ timingStatsReporter,
+ aggregatedSearchRequestBuilder
+ );
+
+ when(client.execute(eq(TransportSearchAction.TYPE), any())).thenThrow(
+ new SearchPhaseExecutionException("phase 1", "boom", ShardSearchFailure.EMPTY_ARRAY)
+ );
assertThat(extractor.hasNext(), is(true));
expectThrows(SearchPhaseExecutionException.class, extractor::next);
@@ -344,7 +384,13 @@ private CompositeAggregationDataExtractorContext createContext(long start, long
);
}
- @SuppressWarnings("unchecked")
+ private ActionFuture toActionFuture(T t) {
+ @SuppressWarnings("unchecked")
+ ActionFuture future = (ActionFuture) mock(ActionFuture.class);
+ when(future.actionGet()).thenReturn(t);
+ return future;
+ }
+
private SearchResponse createSearchResponse(
String aggName,
List buckets,
diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractorFactoryTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractorFactoryTests.java
index a7260d34a0136..878f49dbe77fe 100644
--- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractorFactoryTests.java
+++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractorFactoryTests.java
@@ -6,7 +6,6 @@
*/
package org.elasticsearch.xpack.ml.datafeed.extractor.chunked;
-import org.elasticsearch.client.internal.Client;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.search.SearchModule;
import org.elasticsearch.search.aggregations.AggregationBuilders;
@@ -18,7 +17,6 @@
import org.elasticsearch.xpack.core.ml.job.config.DataDescription;
import org.elasticsearch.xpack.core.ml.job.config.Detector;
import org.elasticsearch.xpack.core.ml.job.config.Job;
-import org.elasticsearch.xpack.ml.datafeed.DatafeedTimingStatsReporter;
import org.elasticsearch.xpack.ml.datafeed.extractor.DataExtractorFactory;
import org.junit.Before;
@@ -31,9 +29,7 @@
public class ChunkedDataExtractorFactoryTests extends ESTestCase {
- private Client client;
private DataExtractorFactory dataExtractorFactory;
- private DatafeedTimingStatsReporter timingStatsReporter;
@Override
protected NamedXContentRegistry xContentRegistry() {
@@ -43,9 +39,7 @@ protected NamedXContentRegistry xContentRegistry() {
@Before
public void setUpMocks() {
- client = mock(Client.class);
dataExtractorFactory = mock(DataExtractorFactory.class);
- timingStatsReporter = mock(DatafeedTimingStatsReporter.class);
}
public void testNewExtractor_GivenAlignedTimes() {
@@ -53,8 +47,8 @@ public void testNewExtractor_GivenAlignedTimes() {
ChunkedDataExtractor dataExtractor = (ChunkedDataExtractor) factory.newExtractor(2000, 5000);
- assertThat(dataExtractor.getContext().start, equalTo(2000L));
- assertThat(dataExtractor.getContext().end, equalTo(5000L));
+ assertThat(dataExtractor.getContext().start(), equalTo(2000L));
+ assertThat(dataExtractor.getContext().end(), equalTo(5000L));
}
public void testNewExtractor_GivenNonAlignedTimes() {
@@ -62,8 +56,8 @@ public void testNewExtractor_GivenNonAlignedTimes() {
ChunkedDataExtractor dataExtractor = (ChunkedDataExtractor) factory.newExtractor(3980, 9200);
- assertThat(dataExtractor.getContext().start, equalTo(4000L));
- assertThat(dataExtractor.getContext().end, equalTo(9000L));
+ assertThat(dataExtractor.getContext().start(), equalTo(4000L));
+ assertThat(dataExtractor.getContext().end(), equalTo(9000L));
}
public void testIntervalTimeAligner() {
@@ -111,13 +105,10 @@ private ChunkedDataExtractorFactory createFactory(long histogramInterval) {
datafeedConfigBuilder.setParsedAggregations(aggs);
datafeedConfigBuilder.setIndices(Arrays.asList("my_index"));
return new ChunkedDataExtractorFactory(
- client,
datafeedConfigBuilder.build(),
- null,
jobBuilder.build(new Date()),
xContentRegistry(),
- dataExtractorFactory,
- timingStatsReporter
+ dataExtractorFactory
);
}
}
diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractorTests.java
index 7c8d2572461d4..ce6cf92d4bd51 100644
--- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractorTests.java
+++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractorTests.java
@@ -6,29 +6,13 @@
*/
package org.elasticsearch.xpack.ml.datafeed.extractor.chunked;
-import org.apache.lucene.search.TotalHits;
-import org.elasticsearch.action.ActionRequestBuilder;
import org.elasticsearch.action.search.SearchPhaseExecutionException;
-import org.elasticsearch.action.search.SearchRequest;
-import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.action.search.ShardSearchFailure;
-import org.elasticsearch.client.internal.Client;
import org.elasticsearch.core.TimeValue;
-import org.elasticsearch.index.query.QueryBuilder;
-import org.elasticsearch.index.query.QueryBuilders;
-import org.elasticsearch.rest.RestStatus;
-import org.elasticsearch.search.SearchHit;
-import org.elasticsearch.search.SearchHits;
-import org.elasticsearch.search.aggregations.InternalAggregation;
-import org.elasticsearch.search.aggregations.InternalAggregations;
-import org.elasticsearch.search.aggregations.metrics.Max;
-import org.elasticsearch.search.aggregations.metrics.Min;
import org.elasticsearch.test.ESTestCase;
-import org.elasticsearch.xpack.core.ml.datafeed.DatafeedTimingStats;
import org.elasticsearch.xpack.core.ml.datafeed.SearchInterval;
-import org.elasticsearch.xpack.ml.datafeed.DatafeedTimingStatsReporter;
-import org.elasticsearch.xpack.ml.datafeed.DatafeedTimingStatsReporter.DatafeedTimingStatsPersister;
import org.elasticsearch.xpack.ml.datafeed.extractor.DataExtractor;
+import org.elasticsearch.xpack.ml.datafeed.extractor.DataExtractor.DataSummary;
import org.elasticsearch.xpack.ml.datafeed.extractor.DataExtractorFactory;
import org.junit.Before;
import org.mockito.Mockito;
@@ -36,100 +20,62 @@
import java.io.IOException;
import java.io.InputStream;
import java.util.ArrayList;
-import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Optional;
-import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.is;
-import static org.hamcrest.Matchers.not;
import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
public class ChunkedDataExtractorTests extends ESTestCase {
- private Client client;
- private List capturedSearchRequests;
private String jobId;
- private String timeField;
- private List indices;
- private QueryBuilder query;
private int scrollSize;
private TimeValue chunkSpan;
private DataExtractorFactory dataExtractorFactory;
- private DatafeedTimingStatsReporter timingStatsReporter;
-
- private class TestDataExtractor extends ChunkedDataExtractor {
-
- private SearchResponse nextResponse;
- private SearchPhaseExecutionException ex;
-
- TestDataExtractor(long start, long end) {
- super(client, dataExtractorFactory, createContext(start, end), timingStatsReporter);
- }
-
- TestDataExtractor(long start, long end, boolean hasAggregations, Long histogramInterval) {
- super(client, dataExtractorFactory, createContext(start, end, hasAggregations, histogramInterval), timingStatsReporter);
- }
-
- @Override
- protected SearchResponse executeSearchRequest(ActionRequestBuilder searchRequestBuilder) {
- capturedSearchRequests.add(searchRequestBuilder.request());
- if (ex != null) {
- throw ex;
- }
- return nextResponse;
- }
-
- void setNextResponse(SearchResponse searchResponse) {
- nextResponse = searchResponse;
- }
-
- void setNextResponseToError(SearchPhaseExecutionException ex) {
- this.ex = ex;
- }
- }
@Before
public void setUpTests() {
- client = mock(Client.class);
- capturedSearchRequests = new ArrayList<>();
jobId = "test-job";
- timeField = "time";
- indices = Arrays.asList("index-1", "index-2");
scrollSize = 1000;
chunkSpan = null;
dataExtractorFactory = mock(DataExtractorFactory.class);
- timingStatsReporter = new DatafeedTimingStatsReporter(new DatafeedTimingStats(jobId), mock(DatafeedTimingStatsPersister.class));
}
public void testExtractionGivenNoData() throws IOException {
- TestDataExtractor extractor = new TestDataExtractor(1000L, 2300L);
- extractor.setNextResponse(createSearchResponse(0L, 0L, 0L));
+ DataExtractor extractor = new ChunkedDataExtractor(dataExtractorFactory, createContext(1000L, 2300L));
+
+ DataExtractor summaryExtractor = new StubSubExtractor(new SearchInterval(1000L, 2300L), new DataSummary(null, null, 0L));
+ when(dataExtractorFactory.newExtractor(1000L, 2300L)).thenReturn(summaryExtractor);
assertThat(extractor.hasNext(), is(true));
assertThat(extractor.next().data().isPresent(), is(false));
assertThat(extractor.hasNext(), is(false));
+
+ verify(dataExtractorFactory).newExtractor(1000L, 2300L);
Mockito.verifyNoMoreInteractions(dataExtractorFactory);
}
public void testExtractionGivenSpecifiedChunk() throws IOException {
chunkSpan = TimeValue.timeValueSeconds(1);
- TestDataExtractor extractor = new TestDataExtractor(1000L, 2300L);
- extractor.setNextResponse(createSearchResponse(10L, 1000L, 2200L));
+ DataExtractor extractor = new ChunkedDataExtractor(dataExtractorFactory, createContext(1000L, 2300L));
+
+ DataExtractor summaryExtractor = new StubSubExtractor(new SearchInterval(1000L, 2300L), new DataSummary(1000L, 2300L, 10L));
+ when(dataExtractorFactory.newExtractor(1000L, 2300L)).thenReturn(summaryExtractor);
InputStream inputStream1 = mock(InputStream.class);
InputStream inputStream2 = mock(InputStream.class);
InputStream inputStream3 = mock(InputStream.class);
- DataExtractor subExtactor1 = new StubSubExtractor(new SearchInterval(1000L, 2000L), inputStream1, inputStream2);
- when(dataExtractorFactory.newExtractor(1000L, 2000L)).thenReturn(subExtactor1);
+ DataExtractor subExtractor1 = new StubSubExtractor(new SearchInterval(1000L, 2000L), inputStream1, inputStream2);
+ when(dataExtractorFactory.newExtractor(1000L, 2000L)).thenReturn(subExtractor1);
- DataExtractor subExtactor2 = new StubSubExtractor(new SearchInterval(2000L, 2300L), inputStream3);
- when(dataExtractorFactory.newExtractor(2000L, 2300L)).thenReturn(subExtactor2);
+ DataExtractor subExtractor2 = new StubSubExtractor(new SearchInterval(2000L, 2300L), inputStream3);
+ when(dataExtractorFactory.newExtractor(2000L, 2300L)).thenReturn(subExtractor2);
assertThat(extractor.hasNext(), is(true));
DataExtractor.Result result = extractor.next();
@@ -148,46 +94,31 @@ public void testExtractionGivenSpecifiedChunk() throws IOException {
assertThat(result.searchInterval(), equalTo(new SearchInterval(2000L, 2300L)));
assertThat(result.data().isPresent(), is(false));
+ verify(dataExtractorFactory).newExtractor(1000L, 2300L);
verify(dataExtractorFactory).newExtractor(1000L, 2000L);
verify(dataExtractorFactory).newExtractor(2000L, 2300L);
Mockito.verifyNoMoreInteractions(dataExtractorFactory);
-
- assertThat(capturedSearchRequests.size(), equalTo(1));
- String searchRequest = capturedSearchRequests.get(0).toString().replaceAll("\\s", "");
- assertThat(searchRequest, containsString("\"size\":0"));
- assertThat(
- searchRequest,
- containsString(
- "\"query\":{\"bool\":{\"filter\":[{\"match_all\":{\"boost\":1.0}},"
- + "{\"range\":{\"time\":{\"gte\":1000,\"lt\":2300,"
- + "\"format\":\"epoch_millis\",\"boost\":1.0}}}]"
- )
- );
- assertThat(
- searchRequest,
- containsString(
- "\"aggregations\":{\"earliest_time\":{\"min\":{\"field\":\"time\"}}," + "\"latest_time\":{\"max\":{\"field\":\"time\"}}}}"
- )
- );
- assertThat(searchRequest, not(containsString("\"track_total_hits\":false")));
- assertThat(searchRequest, not(containsString("\"sort\"")));
}
public void testExtractionGivenSpecifiedChunkAndAggs() throws IOException {
chunkSpan = TimeValue.timeValueSeconds(1);
- TestDataExtractor extractor = new TestDataExtractor(1000L, 2300L, true, 1000L);
- // 0 hits with non-empty data is possible with rollups
- extractor.setNextResponse(createSearchResponse(randomFrom(0L, 2L, 10000L), 1000L, 2200L));
+ DataExtractor summaryExtractor = new StubSubExtractor(
+ new SearchInterval(1000L, 2300L),
+ new DataSummary(1000L, 2200L, randomFrom(0L, 2L, 10000L))
+ );
+ when(dataExtractorFactory.newExtractor(1000L, 2300L)).thenReturn(summaryExtractor);
+
+ DataExtractor extractor = new ChunkedDataExtractor(dataExtractorFactory, createContext(1000L, 2300L, true, 200L));
InputStream inputStream1 = mock(InputStream.class);
InputStream inputStream2 = mock(InputStream.class);
InputStream inputStream3 = mock(InputStream.class);
- DataExtractor subExtactor1 = new StubSubExtractor(new SearchInterval(1000L, 2000L), inputStream1, inputStream2);
- when(dataExtractorFactory.newExtractor(1000L, 2000L)).thenReturn(subExtactor1);
+ DataExtractor subExtractor1 = new StubSubExtractor(new SearchInterval(1000L, 2000L), inputStream1, inputStream2);
+ when(dataExtractorFactory.newExtractor(1000L, 2000L)).thenReturn(subExtractor1);
- DataExtractor subExtactor2 = new StubSubExtractor(new SearchInterval(2000L, 2300L), inputStream3);
- when(dataExtractorFactory.newExtractor(2000L, 2300L)).thenReturn(subExtactor2);
+ DataExtractor subExtractor2 = new StubSubExtractor(new SearchInterval(2000L, 2300L), inputStream3);
+ when(dataExtractorFactory.newExtractor(2000L, 2300L)).thenReturn(subExtractor2);
assertThat(extractor.hasNext(), is(true));
DataExtractor.Result result = extractor.next();
@@ -206,47 +137,31 @@ public void testExtractionGivenSpecifiedChunkAndAggs() throws IOException {
assertThat(result.searchInterval(), equalTo(new SearchInterval(2000L, 2300L)));
assertThat(result.data().isPresent(), is(false));
+ verify(dataExtractorFactory).newExtractor(1000L, 2300L);
verify(dataExtractorFactory).newExtractor(1000L, 2000L);
verify(dataExtractorFactory).newExtractor(2000L, 2300L);
Mockito.verifyNoMoreInteractions(dataExtractorFactory);
-
- assertThat(capturedSearchRequests.size(), equalTo(1));
- String searchRequest = capturedSearchRequests.get(0).toString().replaceAll("\\s", "");
- assertThat(searchRequest, containsString("\"size\":0"));
- assertThat(
- searchRequest,
- containsString(
- "\"query\":{\"bool\":{\"filter\":[{\"match_all\":{\"boost\":1.0}},"
- + "{\"range\":{\"time\":{\"gte\":1000,\"lt\":2300,"
- + "\"format\":\"epoch_millis\",\"boost\":1.0}}}]"
- )
- );
- assertThat(
- searchRequest,
- containsString(
- "\"aggregations\":{\"earliest_time\":{\"min\":{\"field\":\"time\"}}," + "\"latest_time\":{\"max\":{\"field\":\"time\"}}}}"
- )
- );
- assertThat(searchRequest, not(containsString("\"track_total_hits\":false")));
- assertThat(searchRequest, not(containsString("\"sort\"")));
}
public void testExtractionGivenAutoChunkAndAggs() throws IOException {
chunkSpan = null;
- TestDataExtractor extractor = new TestDataExtractor(100_000L, 450_000L, true, 200L);
+ DataExtractor summaryExtractor = new StubSubExtractor(
+ new SearchInterval(100_000L, 450_000L),
+ new DataSummary(100_000L, 400_000L, randomFrom(0L, 2L, 10000L))
+ );
+ when(dataExtractorFactory.newExtractor(100_000L, 450_000L)).thenReturn(summaryExtractor);
- // 0 hits with non-empty data is possible with rollups
- extractor.setNextResponse(createSearchResponse(randomFrom(0L, 2L, 10000L), 100_000L, 400_000L));
+ DataExtractor extractor = new ChunkedDataExtractor(dataExtractorFactory, createContext(100_000L, 450_000L, true, 200L));
InputStream inputStream1 = mock(InputStream.class);
InputStream inputStream2 = mock(InputStream.class);
// 200 * 1_000 == 200_000
- DataExtractor subExtactor1 = new StubSubExtractor(new SearchInterval(100_000L, 300_000L), inputStream1);
- when(dataExtractorFactory.newExtractor(100_000L, 300_000L)).thenReturn(subExtactor1);
+ DataExtractor subExtractor1 = new StubSubExtractor(new SearchInterval(100_000L, 300_000L), inputStream1);
+ when(dataExtractorFactory.newExtractor(100_000L, 300_000L)).thenReturn(subExtractor1);
- DataExtractor subExtactor2 = new StubSubExtractor(new SearchInterval(300_000L, 450_000L), inputStream2);
- when(dataExtractorFactory.newExtractor(300_000L, 450_000L)).thenReturn(subExtactor2);
+ DataExtractor subExtractor2 = new StubSubExtractor(new SearchInterval(300_000L, 450_000L), inputStream2);
+ when(dataExtractorFactory.newExtractor(300_000L, 450_000L)).thenReturn(subExtractor2);
assertThat(extractor.hasNext(), is(true));
DataExtractor.Result result = extractor.next();
@@ -261,43 +176,47 @@ public void testExtractionGivenAutoChunkAndAggs() throws IOException {
assertThat(result.data().isPresent(), is(false));
assertThat(extractor.hasNext(), is(false));
+ verify(dataExtractorFactory).newExtractor(100_000L, 450_000L);
verify(dataExtractorFactory).newExtractor(100_000L, 300_000L);
verify(dataExtractorFactory).newExtractor(300_000L, 450_000L);
Mockito.verifyNoMoreInteractions(dataExtractorFactory);
-
- assertThat(capturedSearchRequests.size(), equalTo(1));
}
public void testExtractionGivenAutoChunkAndAggsAndNoData() throws IOException {
chunkSpan = null;
- TestDataExtractor extractor = new TestDataExtractor(100L, 500L, true, 200L);
+ DataExtractor summaryExtractor = new StubSubExtractor(new SearchInterval(100L, 500L), new DataSummary(null, null, 0L));
+ when(dataExtractorFactory.newExtractor(100L, 500L)).thenReturn(summaryExtractor);
- extractor.setNextResponse(createNullSearchResponse());
+ DataExtractor extractor = new ChunkedDataExtractor(dataExtractorFactory, createContext(100L, 500L, true, 200L));
assertThat(extractor.next().data().isPresent(), is(false));
assertThat(extractor.hasNext(), is(false));
+ verify(dataExtractorFactory).newExtractor(100L, 500L);
Mockito.verifyNoMoreInteractions(dataExtractorFactory);
-
- assertThat(capturedSearchRequests.size(), equalTo(1));
}
public void testExtractionGivenAutoChunkAndScrollSize1000() throws IOException {
chunkSpan = null;
scrollSize = 1000;
- TestDataExtractor extractor = new TestDataExtractor(100000L, 450000L);
// 300K millis * 1000 * 10 / 15K docs = 200000
- extractor.setNextResponse(createSearchResponse(15000L, 100000L, 400000L));
+ DataExtractor summaryExtractor = new StubSubExtractor(
+ new SearchInterval(100000L, 450000L),
+ new DataSummary(100000L, 400000L, 15000L)
+ );
+ when(dataExtractorFactory.newExtractor(100000L, 450000L)).thenReturn(summaryExtractor);
+
+ DataExtractor extractor = new ChunkedDataExtractor(dataExtractorFactory, createContext(100000L, 450000L));
InputStream inputStream1 = mock(InputStream.class);
InputStream inputStream2 = mock(InputStream.class);
- DataExtractor subExtactor1 = new StubSubExtractor(new SearchInterval(100_000L, 300_000L), inputStream1);
- when(dataExtractorFactory.newExtractor(100_000L, 300_000L)).thenReturn(subExtactor1);
+ DataExtractor subExtractor1 = new StubSubExtractor(new SearchInterval(100_000L, 300_000L), inputStream1);
+ when(dataExtractorFactory.newExtractor(100_000L, 300_000L)).thenReturn(subExtractor1);
- DataExtractor subExtactor2 = new StubSubExtractor(new SearchInterval(300_000L, 450_000L), inputStream2);
- when(dataExtractorFactory.newExtractor(300_000L, 450_000L)).thenReturn(subExtactor2);
+ DataExtractor subExtractor2 = new StubSubExtractor(new SearchInterval(300_000L, 450_000L), inputStream2);
+ when(dataExtractorFactory.newExtractor(300_000L, 450_000L)).thenReturn(subExtractor2);
assertThat(extractor.hasNext(), is(true));
assertEquals(inputStream1, extractor.next().data().get());
@@ -306,29 +225,31 @@ public void testExtractionGivenAutoChunkAndScrollSize1000() throws IOException {
assertThat(extractor.next().data().isPresent(), is(false));
assertThat(extractor.hasNext(), is(false));
+ verify(dataExtractorFactory).newExtractor(100000L, 450000L);
verify(dataExtractorFactory).newExtractor(100000L, 300000L);
verify(dataExtractorFactory).newExtractor(300000L, 450000L);
Mockito.verifyNoMoreInteractions(dataExtractorFactory);
-
- assertThat(capturedSearchRequests.size(), equalTo(1));
}
public void testExtractionGivenAutoChunkAndScrollSize500() throws IOException {
chunkSpan = null;
scrollSize = 500;
- TestDataExtractor extractor = new TestDataExtractor(100000L, 450000L);
+ DataExtractor extractor = new ChunkedDataExtractor(dataExtractorFactory, createContext(100000L, 450000L));
- // 300K millis * 500 * 10 / 15K docs = 100000
- extractor.setNextResponse(createSearchResponse(15000L, 100000L, 400000L));
+ DataExtractor summaryExtractor = new StubSubExtractor(
+ new SearchInterval(100000L, 450000L),
+ new DataSummary(100000L, 400000L, 15000L)
+ );
+ when(dataExtractorFactory.newExtractor(100000L, 450000L)).thenReturn(summaryExtractor);
InputStream inputStream1 = mock(InputStream.class);
InputStream inputStream2 = mock(InputStream.class);
- DataExtractor subExtactor1 = new StubSubExtractor(new SearchInterval(100_000L, 200_000L), inputStream1);
- when(dataExtractorFactory.newExtractor(100000L, 200000L)).thenReturn(subExtactor1);
+ DataExtractor subExtractor1 = new StubSubExtractor(new SearchInterval(100_000L, 200_000L), inputStream1);
+ when(dataExtractorFactory.newExtractor(100000L, 200000L)).thenReturn(subExtractor1);
- DataExtractor subExtactor2 = new StubSubExtractor(new SearchInterval(200_000L, 300_000L), inputStream2);
- when(dataExtractorFactory.newExtractor(200000L, 300000L)).thenReturn(subExtactor2);
+ DataExtractor subExtractor2 = new StubSubExtractor(new SearchInterval(200_000L, 300_000L), inputStream2);
+ when(dataExtractorFactory.newExtractor(200000L, 300000L)).thenReturn(subExtractor2);
assertThat(extractor.hasNext(), is(true));
assertEquals(inputStream1, extractor.next().data().get());
@@ -336,28 +257,31 @@ public void testExtractionGivenAutoChunkAndScrollSize500() throws IOException {
assertEquals(inputStream2, extractor.next().data().get());
assertThat(extractor.hasNext(), is(true));
+ verify(dataExtractorFactory).newExtractor(100000L, 450000L);
verify(dataExtractorFactory).newExtractor(100000L, 200000L);
verify(dataExtractorFactory).newExtractor(200000L, 300000L);
-
- assertThat(capturedSearchRequests.size(), equalTo(1));
}
public void testExtractionGivenAutoChunkIsLessThanMinChunk() throws IOException {
chunkSpan = null;
scrollSize = 1000;
- TestDataExtractor extractor = new TestDataExtractor(100000L, 450000L);
+ DataExtractor extractor = new ChunkedDataExtractor(dataExtractorFactory, createContext(100000L, 450000L));
// 30K millis * 1000 * 10 / 150K docs = 2000 < min of 60K
- extractor.setNextResponse(createSearchResponse(150000L, 100000L, 400000L));
+ DataExtractor summaryExtractor = new StubSubExtractor(
+ new SearchInterval(100000L, 450000L),
+ new DataSummary(100000L, 400000L, 150000L)
+ );
+ when(dataExtractorFactory.newExtractor(100000L, 450000L)).thenReturn(summaryExtractor);
InputStream inputStream1 = mock(InputStream.class);
InputStream inputStream2 = mock(InputStream.class);
- DataExtractor subExtactor1 = new StubSubExtractor(new SearchInterval(100_000L, 160_000L), inputStream1);
- when(dataExtractorFactory.newExtractor(100000L, 160000L)).thenReturn(subExtactor1);
+ DataExtractor subExtractor1 = new StubSubExtractor(new SearchInterval(100_000L, 160_000L), inputStream1);
+ when(dataExtractorFactory.newExtractor(100000L, 160000L)).thenReturn(subExtractor1);
- DataExtractor subExtactor2 = new StubSubExtractor(new SearchInterval(160_000L, 220_000L), inputStream2);
- when(dataExtractorFactory.newExtractor(160000L, 220000L)).thenReturn(subExtactor2);
+ DataExtractor subExtractor2 = new StubSubExtractor(new SearchInterval(160_000L, 220_000L), inputStream2);
+ when(dataExtractorFactory.newExtractor(160000L, 220000L)).thenReturn(subExtractor2);
assertThat(extractor.hasNext(), is(true));
assertEquals(inputStream1, extractor.next().data().get());
@@ -365,24 +289,24 @@ public void testExtractionGivenAutoChunkIsLessThanMinChunk() throws IOException
assertEquals(inputStream2, extractor.next().data().get());
assertThat(extractor.hasNext(), is(true));
+ verify(dataExtractorFactory).newExtractor(100000L, 450000L);
verify(dataExtractorFactory).newExtractor(100000L, 160000L);
verify(dataExtractorFactory).newExtractor(160000L, 220000L);
Mockito.verifyNoMoreInteractions(dataExtractorFactory);
-
- assertThat(capturedSearchRequests.size(), equalTo(1));
}
public void testExtractionGivenAutoChunkAndDataTimeSpreadIsZero() throws IOException {
chunkSpan = null;
scrollSize = 1000;
- TestDataExtractor extractor = new TestDataExtractor(100L, 500L);
+ DataExtractor extractor = new ChunkedDataExtractor(dataExtractorFactory, createContext(100L, 500L));
- extractor.setNextResponse(createSearchResponse(150000L, 300L, 300L));
+ DataExtractor summaryExtractor = new StubSubExtractor(new SearchInterval(100L, 500L), new DataSummary(300L, 300L, 150000L));
+ when(dataExtractorFactory.newExtractor(100L, 500L)).thenReturn(summaryExtractor);
InputStream inputStream1 = mock(InputStream.class);
- DataExtractor subExtactor1 = new StubSubExtractor(new SearchInterval(300L, 500L), inputStream1);
- when(dataExtractorFactory.newExtractor(300L, 500L)).thenReturn(subExtactor1);
+ DataExtractor subExtractor1 = new StubSubExtractor(new SearchInterval(300L, 500L), inputStream1);
+ when(dataExtractorFactory.newExtractor(300L, 500L)).thenReturn(subExtractor1);
assertThat(extractor.hasNext(), is(true));
assertEquals(inputStream1, extractor.next().data().get());
@@ -390,24 +314,20 @@ public void testExtractionGivenAutoChunkAndDataTimeSpreadIsZero() throws IOExcep
assertThat(extractor.next().data().isPresent(), is(false));
assertThat(extractor.hasNext(), is(false));
+ verify(dataExtractorFactory).newExtractor(100L, 500L);
verify(dataExtractorFactory).newExtractor(300L, 500L);
Mockito.verifyNoMoreInteractions(dataExtractorFactory);
-
- assertThat(capturedSearchRequests.size(), equalTo(1));
}
public void testExtractionGivenAutoChunkAndTotalTimeRangeSmallerThanChunk() throws IOException {
chunkSpan = null;
scrollSize = 1000;
- TestDataExtractor extractor = new TestDataExtractor(1L, 101L);
+ DataExtractor extractor = new ChunkedDataExtractor(dataExtractorFactory, createContext(1L, 101L));
// 100 millis * 1000 * 10 / 10 docs = 100000
- extractor.setNextResponse(createSearchResponse(10L, 1L, 101L));
-
InputStream inputStream1 = mock(InputStream.class);
-
- DataExtractor subExtactor1 = new StubSubExtractor(new SearchInterval(1L, 10L), inputStream1);
- when(dataExtractorFactory.newExtractor(1L, 101L)).thenReturn(subExtactor1);
+ DataExtractor stubExtractor = new StubSubExtractor(new SearchInterval(1L, 101L), new DataSummary(1L, 101L, 10L), inputStream1);
+ when(dataExtractorFactory.newExtractor(1L, 101L)).thenReturn(stubExtractor);
assertThat(extractor.hasNext(), is(true));
assertEquals(inputStream1, extractor.next().data().get());
@@ -415,19 +335,21 @@ public void testExtractionGivenAutoChunkAndTotalTimeRangeSmallerThanChunk() thro
assertThat(extractor.next().data().isPresent(), is(false));
assertThat(extractor.hasNext(), is(false));
- verify(dataExtractorFactory).newExtractor(1L, 101L);
+ verify(dataExtractorFactory, times(2)).newExtractor(1L, 101L);
Mockito.verifyNoMoreInteractions(dataExtractorFactory);
-
- assertThat(capturedSearchRequests.size(), equalTo(1));
}
public void testExtractionGivenAutoChunkAndIntermediateEmptySearchShouldReconfigure() throws IOException {
chunkSpan = null;
scrollSize = 500;
- TestDataExtractor extractor = new TestDataExtractor(100000L, 400000L);
+ DataExtractor extractor = new ChunkedDataExtractor(dataExtractorFactory, createContext(100000L, 400000L));
// 300K millis * 500 * 10 / 15K docs = 100000
- extractor.setNextResponse(createSearchResponse(15000L, 100000L, 400000L));
+ DataExtractor summaryExtractor = new StubSubExtractor(
+ new SearchInterval(100000L, 400000L),
+ new DataSummary(100000L, 400000L, 15000L)
+ );
+ when(dataExtractorFactory.newExtractor(100000L, 400000L)).thenReturn(summaryExtractor);
InputStream inputStream1 = mock(InputStream.class);
@@ -443,44 +365,30 @@ public void testExtractionGivenAutoChunkAndIntermediateEmptySearchShouldReconfig
assertThat(extractor.hasNext(), is(true));
// Now we have: 200K millis * 500 * 10 / 5K docs = 200000
- extractor.setNextResponse(createSearchResponse(5000, 200000L, 400000L));
-
- // This is the last one
InputStream inputStream2 = mock(InputStream.class);
- DataExtractor subExtractor3 = new StubSubExtractor(new SearchInterval(200_000L, 400_000L), inputStream2);
- when(dataExtractorFactory.newExtractor(200000, 400000)).thenReturn(subExtractor3);
+ DataExtractor newExtractor = new StubSubExtractor(
+ new SearchInterval(300000L, 400000L),
+ new DataSummary(300000L, 400000L, 5000L),
+ inputStream2
+ );
+ when(dataExtractorFactory.newExtractor(300000L, 400000L)).thenReturn(newExtractor);
assertEquals(inputStream2, extractor.next().data().get());
assertThat(extractor.next().data().isPresent(), is(false));
assertThat(extractor.hasNext(), is(false));
- verify(dataExtractorFactory).newExtractor(100000L, 200000L);
- verify(dataExtractorFactory).newExtractor(200000L, 300000L);
- verify(dataExtractorFactory).newExtractor(200000L, 400000L);
+ verify(dataExtractorFactory).newExtractor(100000L, 400000L); // Initial summary
+ verify(dataExtractorFactory).newExtractor(100000L, 200000L); // Chunk 1
+ verify(dataExtractorFactory).newExtractor(200000L, 300000L); // Chunk 2 with no data
+ verify(dataExtractorFactory, times(2)).newExtractor(300000L, 400000L); // Reconfigure and new chunk
Mockito.verifyNoMoreInteractions(dataExtractorFactory);
-
- assertThat(capturedSearchRequests.size(), equalTo(2));
-
- String searchRequest = capturedSearchRequests.get(0).toString().replaceAll("\\s", "");
- assertThat(searchRequest, containsString("\"gte\":100000,\"lt\":400000"));
- searchRequest = capturedSearchRequests.get(1).toString().replaceAll("\\s", "");
- assertThat(searchRequest, containsString("\"gte\":300000,\"lt\":400000"));
}
public void testCancelGivenNextWasNeverCalled() {
chunkSpan = TimeValue.timeValueSeconds(1);
- TestDataExtractor extractor = new TestDataExtractor(1000L, 2300L);
- extractor.setNextResponse(createSearchResponse(10L, 1000L, 2200L));
-
- InputStream inputStream1 = mock(InputStream.class);
-
- DataExtractor subExtactor1 = new StubSubExtractor(new SearchInterval(1000L, 2000L), inputStream1);
- when(dataExtractorFactory.newExtractor(1000L, 2000L)).thenReturn(subExtactor1);
-
+ DataExtractor extractor = new ChunkedDataExtractor(dataExtractorFactory, createContext(1000L, 2300L));
assertThat(extractor.hasNext(), is(true));
-
extractor.cancel();
-
assertThat(extractor.isCancelled(), is(true));
assertThat(extractor.hasNext(), is(false));
Mockito.verifyNoMoreInteractions(dataExtractorFactory);
@@ -488,14 +396,16 @@ public void testCancelGivenNextWasNeverCalled() {
public void testCancelGivenCurrentSubExtractorHasMore() throws IOException {
chunkSpan = TimeValue.timeValueSeconds(1);
- TestDataExtractor extractor = new TestDataExtractor(1000L, 2300L);
- extractor.setNextResponse(createSearchResponse(10L, 1000L, 2200L));
+ DataExtractor extractor = new ChunkedDataExtractor(dataExtractorFactory, createContext(1000L, 2300L));
+
+ DataExtractor summaryExtractor = new StubSubExtractor(new SearchInterval(1000L, 2300L), new DataSummary(1000L, 2200L, 10L));
+ when(dataExtractorFactory.newExtractor(1000L, 2300L)).thenReturn(summaryExtractor);
InputStream inputStream1 = mock(InputStream.class);
InputStream inputStream2 = mock(InputStream.class);
- DataExtractor subExtactor1 = new StubSubExtractor(new SearchInterval(1000L, 2000L), inputStream1, inputStream2);
- when(dataExtractorFactory.newExtractor(1000L, 2000L)).thenReturn(subExtactor1);
+ DataExtractor subExtractor1 = new StubSubExtractor(new SearchInterval(1000L, 2000L), inputStream1, inputStream2);
+ when(dataExtractorFactory.newExtractor(1000L, 2000L)).thenReturn(subExtractor1);
assertThat(extractor.hasNext(), is(true));
assertEquals(inputStream1, extractor.next().data().get());
@@ -509,19 +419,23 @@ public void testCancelGivenCurrentSubExtractorHasMore() throws IOException {
assertThat(extractor.next().data().isPresent(), is(false));
assertThat(extractor.hasNext(), is(false));
+ verify(dataExtractorFactory).newExtractor(1000L, 2300L);
verify(dataExtractorFactory).newExtractor(1000L, 2000L);
Mockito.verifyNoMoreInteractions(dataExtractorFactory);
}
public void testCancelGivenCurrentSubExtractorIsDone() throws IOException {
chunkSpan = TimeValue.timeValueSeconds(1);
- TestDataExtractor extractor = new TestDataExtractor(1000L, 2300L);
- extractor.setNextResponse(createSearchResponse(10L, 1000L, 2200L));
+
+ DataExtractor extractor = new ChunkedDataExtractor(dataExtractorFactory, createContext(1000L, 2300L));
+
+ DataExtractor summaryExtractor = new StubSubExtractor(new SearchInterval(1000L, 2300L), new DataSummary(1000L, 2200L, 10L));
+ when(dataExtractorFactory.newExtractor(1000L, 2300L)).thenReturn(summaryExtractor);
InputStream inputStream1 = mock(InputStream.class);
- DataExtractor subExtactor1 = new StubSubExtractor(new SearchInterval(1000L, 3000L), inputStream1);
- when(dataExtractorFactory.newExtractor(1000L, 2000L)).thenReturn(subExtactor1);
+ DataExtractor subExtractor1 = new StubSubExtractor(new SearchInterval(1000L, 3000L), inputStream1);
+ when(dataExtractorFactory.newExtractor(1000L, 2000L)).thenReturn(subExtractor1);
assertThat(extractor.hasNext(), is(true));
assertEquals(inputStream1, extractor.next().data().get());
@@ -533,66 +447,27 @@ public void testCancelGivenCurrentSubExtractorIsDone() throws IOException {
assertThat(extractor.next().data().isPresent(), is(false));
assertThat(extractor.hasNext(), is(false));
+ verify(dataExtractorFactory).newExtractor(1000L, 2300L);
verify(dataExtractorFactory).newExtractor(1000L, 2000L);
Mockito.verifyNoMoreInteractions(dataExtractorFactory);
}
public void testDataSummaryRequestIsFailed() {
chunkSpan = TimeValue.timeValueSeconds(2);
- TestDataExtractor extractor = new TestDataExtractor(1000L, 2300L);
- extractor.setNextResponseToError(new SearchPhaseExecutionException("search phase 1", "boom", ShardSearchFailure.EMPTY_ARRAY));
+ DataExtractor extractor = new ChunkedDataExtractor(dataExtractorFactory, createContext(1000L, 2300L));
+ when(dataExtractorFactory.newExtractor(1000L, 2300L)).thenThrow(
+ new SearchPhaseExecutionException("search phase 1", "boom", ShardSearchFailure.EMPTY_ARRAY)
+ );
assertThat(extractor.hasNext(), is(true));
expectThrows(SearchPhaseExecutionException.class, extractor::next);
}
public void testNoDataSummaryHasNoData() {
- ChunkedDataExtractor.DataSummary summary = ChunkedDataExtractor.AggregatedDataSummary.noDataSummary(randomNonNegativeLong());
+ DataSummary summary = new DataSummary(null, null, 0L);
assertFalse(summary.hasData());
}
- private SearchResponse createSearchResponse(long totalHits, long earliestTime, long latestTime) {
- SearchResponse searchResponse = mock(SearchResponse.class);
- when(searchResponse.status()).thenReturn(RestStatus.OK);
- SearchHit[] hits = new SearchHit[(int) totalHits];
- Arrays.fill(hits, SearchHit.unpooled(1));
- SearchHits searchHits = SearchHits.unpooled(hits, new TotalHits(totalHits, TotalHits.Relation.EQUAL_TO), 1);
- when(searchResponse.getHits()).thenReturn(searchHits);
-
- List aggs = new ArrayList<>();
- Min min = mock(Min.class);
- when(min.value()).thenReturn((double) earliestTime);
- when(min.getName()).thenReturn("earliest_time");
- aggs.add(min);
- Max max = mock(Max.class);
- when(max.value()).thenReturn((double) latestTime);
- when(max.getName()).thenReturn("latest_time");
- aggs.add(max);
- InternalAggregations aggregations = InternalAggregations.from(aggs);
- when(searchResponse.getAggregations()).thenReturn(aggregations);
- return searchResponse;
- }
-
- private SearchResponse createNullSearchResponse() {
- SearchResponse searchResponse = mock(SearchResponse.class);
- when(searchResponse.status()).thenReturn(RestStatus.OK);
- SearchHits searchHits = SearchHits.empty(new TotalHits(0, TotalHits.Relation.EQUAL_TO), 1);
- when(searchResponse.getHits()).thenReturn(searchHits);
-
- List aggs = new ArrayList<>();
- Min min = mock(Min.class);
- when(min.value()).thenReturn(Double.POSITIVE_INFINITY);
- when(min.getName()).thenReturn("earliest_time");
- aggs.add(min);
- Max max = mock(Max.class);
- when(max.value()).thenReturn(Double.POSITIVE_INFINITY);
- when(max.getName()).thenReturn("latest_time");
- aggs.add(max);
- InternalAggregations aggregations = InternalAggregations.from(aggs);
- when(searchResponse.getAggregations()).thenReturn(aggregations);
- return searchResponse;
- }
-
private ChunkedDataExtractorContext createContext(long start, long end) {
return createContext(start, end, false, null);
}
@@ -600,32 +475,38 @@ private ChunkedDataExtractorContext createContext(long start, long end) {
private ChunkedDataExtractorContext createContext(long start, long end, boolean hasAggregations, Long histogramInterval) {
return new ChunkedDataExtractorContext(
jobId,
- timeField,
- indices,
- QueryBuilders.matchAllQuery(),
scrollSize,
start,
end,
chunkSpan,
ChunkedDataExtractorFactory.newIdentityTimeAligner(),
- Collections.emptyMap(),
hasAggregations,
- histogramInterval,
- SearchRequest.DEFAULT_INDICES_OPTIONS,
- Collections.emptyMap()
+ histogramInterval
);
}
private static class StubSubExtractor implements DataExtractor {
- final SearchInterval searchInterval;
- List streams = new ArrayList<>();
- boolean hasNext = true;
+
+ private final DataSummary summary;
+ private final SearchInterval searchInterval;
+ private final List streams = new ArrayList<>();
+ private boolean hasNext = true;
StubSubExtractor(SearchInterval searchInterval, InputStream... streams) {
+ this(searchInterval, null, streams);
+ }
+
+ StubSubExtractor(SearchInterval searchInterval, DataSummary summary, InputStream... streams) {
this.searchInterval = searchInterval;
+ this.summary = summary;
Collections.addAll(this.streams, streams);
}
+ @Override
+ public DataSummary getSummary() {
+ return summary;
+ }
+
@Override
public boolean hasNext() {
return hasNext;
diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorTests.java
index f3eab09b7bc2e..d994b14265a26 100644
--- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorTests.java
+++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorTests.java
@@ -31,6 +31,9 @@
import org.elasticsearch.script.ScriptType;
import org.elasticsearch.search.SearchHit;
import org.elasticsearch.search.SearchHits;
+import org.elasticsearch.search.aggregations.InternalAggregations;
+import org.elasticsearch.search.aggregations.metrics.Max;
+import org.elasticsearch.search.aggregations.metrics.Min;
import org.elasticsearch.search.builder.SearchSourceBuilder;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.threadpool.ThreadPool;
@@ -39,6 +42,7 @@
import org.elasticsearch.xpack.ml.datafeed.DatafeedTimingStatsReporter;
import org.elasticsearch.xpack.ml.datafeed.DatafeedTimingStatsReporter.DatafeedTimingStatsPersister;
import org.elasticsearch.xpack.ml.datafeed.extractor.DataExtractor;
+import org.elasticsearch.xpack.ml.datafeed.extractor.DataExtractor.DataSummary;
import org.elasticsearch.xpack.ml.extractor.DocValueField;
import org.elasticsearch.xpack.ml.extractor.ExtractedField;
import org.elasticsearch.xpack.ml.extractor.TimeField;
@@ -65,6 +69,7 @@
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.is;
+import static org.hamcrest.Matchers.not;
import static org.mockito.ArgumentMatchers.same;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
@@ -512,6 +517,37 @@ public void testDomainSplitScriptField() throws IOException {
assertThat(capturedClearScrollIds.get(0), equalTo(response2.getScrollId()));
}
+ public void testGetSummary() {
+ ScrollDataExtractorContext context = createContext(1000L, 2300L);
+ TestDataExtractor extractor = new TestDataExtractor(context);
+ extractor.setNextResponse(createSummaryResponse(1001L, 2299L, 10L));
+
+ DataSummary summary = extractor.getSummary();
+ assertThat(summary.earliestTime(), equalTo(1001L));
+ assertThat(summary.latestTime(), equalTo(2299L));
+ assertThat(summary.totalHits(), equalTo(10L));
+
+ assertThat(capturedSearchRequests.size(), equalTo(1));
+ String searchRequest = capturedSearchRequests.get(0).toString().replaceAll("\\s", "");
+ assertThat(searchRequest, containsString("\"size\":0"));
+ assertThat(
+ searchRequest,
+ containsString(
+ "\"query\":{\"bool\":{\"filter\":[{\"match_all\":{\"boost\":1.0}},"
+ + "{\"range\":{\"time\":{\"gte\":1000,\"lt\":2300,"
+ + "\"format\":\"epoch_millis\",\"boost\":1.0}}}]"
+ )
+ );
+ assertThat(
+ searchRequest,
+ containsString(
+ "\"aggregations\":{\"earliest_time\":{\"min\":{\"field\":\"time\"}}," + "\"latest_time\":{\"max\":{\"field\":\"time\"}}}}"
+ )
+ );
+ assertThat(searchRequest, not(containsString("\"track_total_hits\":false")));
+ assertThat(searchRequest, not(containsString("\"sort\"")));
+ }
+
private ScrollDataExtractorContext createContext(long start, long end) {
return new ScrollDataExtractorContext(
jobId,
@@ -553,6 +589,17 @@ private SearchResponse createSearchResponse(List timestamps, List
return searchResponse;
}
+ private SearchResponse createSummaryResponse(long start, long end, long totalHits) {
+ SearchResponse searchResponse = mock(SearchResponse.class);
+ when(searchResponse.getHits()).thenReturn(
+ new SearchHits(SearchHits.EMPTY, new TotalHits(totalHits, TotalHits.Relation.EQUAL_TO), 1)
+ );
+ when(searchResponse.getAggregations()).thenReturn(
+ InternalAggregations.from(List.of(new Min("earliest_time", start, null, null), new Max("latest_time", end, null, null)))
+ );
+ return searchResponse;
+ }
+
private List getCapturedClearScrollIds() {
return capturedClearScrollRequests.getAllValues().stream().map(r -> r.getScrollIds().get(0)).collect(Collectors.toList());
}
From 71c3f34ce5c50cf04c35c1a65a4c15e78672949e Mon Sep 17 00:00:00 2001
From: Armin Braun
Date: Mon, 5 Feb 2024 10:19:53 +0100
Subject: [PATCH 012/106] Speedup slicing from ReleasableBytesReference some
more (#105108)
We can speed up the slice operation quite a bit by speeding up skip for
the common case and passing the delegete as the basis for the stream (this neatly avoids
a multi-morphic call to `length` on the bytes reference).
Also, while we're at it, we can speed up the common-case read operation
the same way.
---
.../common/bytes/BytesReferenceStreamInput.java | 16 ++++++++++++++++
.../common/bytes/ReleasableBytesReference.java | 2 +-
2 files changed, 17 insertions(+), 1 deletion(-)
diff --git a/server/src/main/java/org/elasticsearch/common/bytes/BytesReferenceStreamInput.java b/server/src/main/java/org/elasticsearch/common/bytes/BytesReferenceStreamInput.java
index 2fca882724bbd..1e30579292d00 100644
--- a/server/src/main/java/org/elasticsearch/common/bytes/BytesReferenceStreamInput.java
+++ b/server/src/main/java/org/elasticsearch/common/bytes/BytesReferenceStreamInput.java
@@ -143,6 +143,14 @@ public int read() throws IOException {
@Override
public int read(final byte[] b, final int bOffset, final int len) throws IOException {
+ if (slice.remaining() >= len) {
+ slice.get(b, bOffset, len);
+ return len;
+ }
+ return readFromMultipleSlices(b, bOffset, len);
+ }
+
+ private int readFromMultipleSlices(byte[] b, int bOffset, int len) throws IOException {
final int length = bytesReference.length();
final int offset = offset();
if (offset >= length) {
@@ -186,6 +194,14 @@ public long skip(long n) throws IOException {
if (n <= 0L) {
return 0L;
}
+ if (n <= slice.remaining()) {
+ slice.position(slice.position() + (int) n);
+ return n;
+ }
+ return skipMultiple(n);
+ }
+
+ private int skipMultiple(long n) throws IOException {
assert offset() <= bytesReference.length() : offset() + " vs " + bytesReference.length();
// definitely >= 0 and <= Integer.MAX_VALUE so casting is ok
final int numBytesSkipped = (int) Math.min(n, bytesReference.length() - offset());
diff --git a/server/src/main/java/org/elasticsearch/common/bytes/ReleasableBytesReference.java b/server/src/main/java/org/elasticsearch/common/bytes/ReleasableBytesReference.java
index 567f39d968200..e9fe63529e17a 100644
--- a/server/src/main/java/org/elasticsearch/common/bytes/ReleasableBytesReference.java
+++ b/server/src/main/java/org/elasticsearch/common/bytes/ReleasableBytesReference.java
@@ -144,7 +144,7 @@ public long ramBytesUsed() {
@Override
public StreamInput streamInput() throws IOException {
assert hasReferences();
- return new BytesReferenceStreamInput(this) {
+ return new BytesReferenceStreamInput(delegate) {
private ReleasableBytesReference retainAndSkip(int len) throws IOException {
if (len == 0) {
return ReleasableBytesReference.empty();
From 6a40c04cc1430c918372cbd8aba327ee2b1ed6ce Mon Sep 17 00:00:00 2001
From: David Turner
Date: Mon, 5 Feb 2024 10:04:24 +0000
Subject: [PATCH 013/106] More guidance in balance settings docs (#105119)
Today the docs on balancing settings describe what the settings all do
but offer little guidance about how to configure them. This commit adds
some extra detail to avoid some common misunderstandings and reorders
the docs a little so that more commonly-adjusted settings are mentioned
earlier.
---
.../cluster/shards_allocation.asciidoc | 127 +++++++++++-------
.../modules/indices/recovery.asciidoc | 5 +-
2 files changed, 84 insertions(+), 48 deletions(-)
diff --git a/docs/reference/modules/cluster/shards_allocation.asciidoc b/docs/reference/modules/cluster/shards_allocation.asciidoc
index 5a7aa43155c66..a73a3906bd3fd 100644
--- a/docs/reference/modules/cluster/shards_allocation.asciidoc
+++ b/docs/reference/modules/cluster/shards_allocation.asciidoc
@@ -22,37 +22,54 @@ one of the active allocation ids in the cluster state.
--
+[[cluster-routing-allocation-same-shard-host]]
+`cluster.routing.allocation.same_shard.host`::
+ (<>)
+ If `true`, forbids multiple copies of a shard from being allocated to
+ distinct nodes on the same host, i.e. which have the same network
+ address. Defaults to `false`, meaning that copies of a shard may
+ sometimes be allocated to nodes on the same host. This setting is only
+ relevant if you run multiple nodes on each host.
+
`cluster.routing.allocation.node_concurrent_incoming_recoveries`::
(<>)
- How many concurrent incoming shard recoveries are allowed to happen on a node. Incoming recoveries are the recoveries
- where the target shard (most likely the replica unless a shard is relocating) is allocated on the node. Defaults to `2`.
+ How many concurrent incoming shard recoveries are allowed to happen on a
+ node. Incoming recoveries are the recoveries where the target shard (most
+ likely the replica unless a shard is relocating) is allocated on the node.
+ Defaults to `2`. Increasing this setting may cause shard movements to have
+ a performance impact on other activity in your cluster, but may not make
+ shard movements complete noticeably sooner. We do not recommend adjusting
+ this setting from its default of `2`.
`cluster.routing.allocation.node_concurrent_outgoing_recoveries`::
(<>)
- How many concurrent outgoing shard recoveries are allowed to happen on a node. Outgoing recoveries are the recoveries
- where the source shard (most likely the primary unless a shard is relocating) is allocated on the node. Defaults to `2`.
+ How many concurrent outgoing shard recoveries are allowed to happen on a
+ node. Outgoing recoveries are the recoveries where the source shard (most
+ likely the primary unless a shard is relocating) is allocated on the node.
+ Defaults to `2`. Increasing this setting may cause shard movements to have
+ a performance impact on other activity in your cluster, but may not make
+ shard movements complete noticeably sooner. We do not recommend adjusting
+ this setting from its default of `2`.
`cluster.routing.allocation.node_concurrent_recoveries`::
(<>)
- A shortcut to set both `cluster.routing.allocation.node_concurrent_incoming_recoveries` and
- `cluster.routing.allocation.node_concurrent_outgoing_recoveries`. Defaults to 2.
-
+ A shortcut to set both
+ `cluster.routing.allocation.node_concurrent_incoming_recoveries` and
+ `cluster.routing.allocation.node_concurrent_outgoing_recoveries`. Defaults
+ to `2`. Increasing this setting may cause shard movements to have a
+ performance impact on other activity in your cluster, but may not make
+ shard movements complete noticeably sooner. We do not recommend adjusting
+ this setting from its default of `2`.
`cluster.routing.allocation.node_initial_primaries_recoveries`::
- (<>)
- While the recovery of replicas happens over the network, the recovery of
- an unassigned primary after node restart uses data from the local disk.
- These should be fast so more initial primary recoveries can happen in
- parallel on the same node. Defaults to `4`.
-
-[[cluster-routing-allocation-same-shard-host]]
-`cluster.routing.allocation.same_shard.host`::
- (<>)
- If `true`, forbids multiple copies of a shard from being allocated to
- distinct nodes on the same host, i.e. which have the same network
- address. Defaults to `false`, meaning that copies of a shard may
- sometimes be allocated to nodes on the same host. This setting is only
- relevant if you run multiple nodes on each host.
+ (<>)
+ While the recovery of replicas happens over the network, the recovery of
+ an unassigned primary after node restart uses data from the local disk.
+ These should be fast so more initial primary recoveries can happen in
+ parallel on each node. Defaults to `4`. Increasing this setting may cause
+ shard recoveries to have a performance impact on other activity in your
+ cluster, but may not make shard recoveries complete noticeably sooner. We
+ do not recommend adjusting this setting from its default of `4`.
[[shards-rebalancing-settings]]
==== Shard rebalancing settings
@@ -73,38 +90,44 @@ balancer works independently within each tier.
You can use the following settings to control the rebalancing of shards across
the cluster:
-`cluster.routing.rebalance.enable`::
+`cluster.routing.allocation.allow_rebalance`::
+
--
(<>)
-Enable or disable rebalancing for specific kinds of shards:
+Specify when shard rebalancing is allowed:
-* `all` - (default) Allows shard balancing for all kinds of shards.
-* `primaries` - Allows shard balancing only for primary shards.
-* `replicas` - Allows shard balancing only for replica shards.
-* `none` - No shard balancing of any kind are allowed for any indices.
+
+* `always` - Always allow rebalancing.
+* `indices_primaries_active` - Only when all primaries in the cluster are allocated.
+* `indices_all_active` - (default) Only when all shards (primaries and replicas) in the cluster are allocated.
--
-`cluster.routing.allocation.allow_rebalance`::
+`cluster.routing.rebalance.enable`::
+
--
(<>)
-Specify when shard rebalancing is allowed:
+Enable or disable rebalancing for specific kinds of shards:
+* `all` - (default) Allows shard balancing for all kinds of shards.
+* `primaries` - Allows shard balancing only for primary shards.
+* `replicas` - Allows shard balancing only for replica shards.
+* `none` - No shard balancing of any kind are allowed for any indices.
-* `always` - Always allow rebalancing.
-* `indices_primaries_active` - Only when all primaries in the cluster are allocated.
-* `indices_all_active` - (default) Only when all shards (primaries and replicas) in the cluster are allocated.
+Rebalancing is important to ensure the cluster returns to a healthy and fully
+resilient state after a disruption. If you adjust this setting, remember to set
+it back to `all` as soon as possible.
--
`cluster.routing.allocation.cluster_concurrent_rebalance`::
(<>)
Defines the number of concurrent shard rebalances are allowed across the whole
cluster. Defaults to `2`. Note that this setting only controls the number of
-concurrent shard relocations due to imbalances in the cluster. This setting does
-not limit shard relocations due to
+concurrent shard relocations due to imbalances in the cluster. This setting
+does not limit shard relocations due to
<> or
-<>.
+<>. Increasing this setting may cause the
+cluster to use additional resources moving shards between nodes, so we
+generally do not recommend adjusting this setting from its default of `2`.
`cluster.routing.allocation.type`::
+
@@ -149,6 +172,12 @@ data stream have an estimated write load of zero.
The following settings control how {es} combines these values into an overall
measure of each node's weight.
+`cluster.routing.allocation.balance.threshold`::
+(float, <>)
+The minimum improvement in weight which triggers a rebalancing shard movement.
+Defaults to `1.0f`. Raising this value will cause {es} to stop rebalancing
+shards sooner, leaving the cluster in a more unbalanced state.
+
`cluster.routing.allocation.balance.shard`::
(float, <>)
Defines the weight factor for the total number of shards allocated to each node.
@@ -177,19 +206,25 @@ estimated number of indexing threads needed by the shard. Defaults to `10.0f`.
Raising this value increases the tendency of {es} to equalize the total write
load across nodes ahead of the other balancing variables.
-`cluster.routing.allocation.balance.threshold`::
-(float, <>)
-The minimum improvement in weight which triggers a rebalancing shard movement.
-Defaults to `1.0f`. Raising this value will cause {es} to stop rebalancing
-shards sooner, leaving the cluster in a more unbalanced state.
-
[NOTE]
====
-* It is not recommended to adjust the values of the heuristics settings. The
-default values are generally good, and although different values may improve
-the current balance, it is possible that they create problems in the future
-if the cluster or workload changes.
+* If you have a large cluster, it may be unnecessary to keep it in
+a perfectly balanced state at all times. It is less resource-intensive for the
+cluster to operate in a somewhat unbalanced state rather than to perform all
+the shard movements needed to achieve the perfect balance. If so, increase the
+value of `cluster.routing.allocation.balance.threshold` to define the
+acceptable imbalance between nodes. For instance, if you have an average of 500
+shards per node and can accept a difference of 5% (25 typical shards) between
+nodes, set `cluster.routing.allocation.balance.threshold` to `25`.
+
+* We do not recommend adjusting the values of the heuristic weight factor
+settings. The default values work well in all reasonable clusters. Although
+different values may improve the current balance in some ways, it is possible
+that they will create unexpected problems in the future or prevent it from
+gracefully handling an unexpected disruption.
+
* Regardless of the result of the balancing algorithm, rebalancing might
not be allowed due to allocation rules such as forced awareness and allocation
-filtering.
+filtering. Use the <> API to explain the current
+allocation of shards.
====
diff --git a/docs/reference/modules/indices/recovery.asciidoc b/docs/reference/modules/indices/recovery.asciidoc
index 02b70c69876ff..261c3d3fc3f24 100644
--- a/docs/reference/modules/indices/recovery.asciidoc
+++ b/docs/reference/modules/indices/recovery.asciidoc
@@ -38,8 +38,9 @@ This limit applies to each node separately. If multiple nodes in a cluster
perform recoveries at the same time, the cluster's total recovery traffic may
exceed this limit.
+
-If this limit is too high, ongoing recoveries may consume an excess of bandwidth
-and other resources, which can destabilize the cluster.
+If this limit is too high, ongoing recoveries may consume an excess of
+bandwidth and other resources, which can have a performance impact on your
+cluster and in extreme cases may destabilize it.
+
This is a dynamic setting, which means you can set it in each node's
`elasticsearch.yml` config file and you can update it dynamically using the
From 931f2c48c916536626b4fbbb86a6595ac4f9b7d2 Mon Sep 17 00:00:00 2001
From: Yang Wang
Date: Mon, 5 Feb 2024 21:06:09 +1100
Subject: [PATCH 014/106] [Docs] Fix a doc bug for Flush API's force parameter
(#105112)
The force parameter defaults to false instead of true.
---
docs/reference/indices/flush.asciidoc | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/docs/reference/indices/flush.asciidoc b/docs/reference/indices/flush.asciidoc
index 1f0a79258bd37..25d39a17af306 100644
--- a/docs/reference/indices/flush.asciidoc
+++ b/docs/reference/indices/flush.asciidoc
@@ -81,7 +81,7 @@ Defaults to `open`.
If `true`,
the request forces a flush
even if there are no changes to commit to the index.
-Defaults to `true`.
+Defaults to `false`.
You can use this parameter
to increment the generation number of the transaction log.
From 2a39c32fb0386581e51ac5126bf70d2a2e766d54 Mon Sep 17 00:00:00 2001
From: David Kyle
Date: Mon, 5 Feb 2024 10:38:39 +0000
Subject: [PATCH 015/106] Mute IndexRecoveryIT
testDoNotInfinitelyWaitForMapping (#105125)
For #105122
---
.../java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java | 1 +
1 file changed, 1 insertion(+)
diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java
index bd400f9f0f6a1..1c4dbce2ccf32 100644
--- a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java
+++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java
@@ -1039,6 +1039,7 @@ public void testHistoryRetention() throws Exception {
assertThat(recoveryState.getTranslog().recoveredOperations(), greaterThan(0));
}
+ @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/105122")
public void testDoNotInfinitelyWaitForMapping() {
internalCluster().ensureAtLeastNumDataNodes(3);
createIndex(
From d6bbfc53bbc86e1f6595db1a3c21793ebab5c140 Mon Sep 17 00:00:00 2001
From: Martijn van Groningen
Date: Mon, 5 Feb 2024 11:41:49 +0100
Subject: [PATCH 016/106] Unmute DownsampleActionIT#testRollupNonTSIndex(...)
(#105116)
and add more logging for when test fails next time.
Relates to #103981
---
.../xpack/ilm/actions/DownsampleActionIT.java | 18 +++++++++++++-----
1 file changed, 13 insertions(+), 5 deletions(-)
diff --git a/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/DownsampleActionIT.java b/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/DownsampleActionIT.java
index a6fa7cd3ffbc6..8a7ec329e55c3 100644
--- a/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/DownsampleActionIT.java
+++ b/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/DownsampleActionIT.java
@@ -394,7 +394,6 @@ public void testILMWaitsForTimeSeriesEndTimeToLapse() throws Exception {
}, 30, TimeUnit.SECONDS);
}
- @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/103981")
public void testRollupNonTSIndex() throws Exception {
createIndex(index, alias, false);
index(client(), index, true, null, "@timestamp", "2020-01-01T05:10:00Z", "volume", 11.0, "metricset", randomAlphaOfLength(5));
@@ -404,10 +403,19 @@ public void testRollupNonTSIndex() throws Exception {
createNewSingletonPolicy(client(), policy, phaseName, new DownsampleAction(fixedInterval, DownsampleAction.DEFAULT_WAIT_TIMEOUT));
updatePolicy(client(), index, policy);
- assertBusy(() -> assertThat(getStepKeyForIndex(client(), index), equalTo(PhaseCompleteStep.finalStep(phaseName).getKey())));
- String rollupIndex = getRollupIndexName(client(), index, fixedInterval);
- assertNull("Rollup index should not have been created", rollupIndex);
- assertTrue("Source index should not have been deleted", indexExists(index));
+ try {
+ assertBusy(() -> assertThat(getStepKeyForIndex(client(), index), equalTo(PhaseCompleteStep.finalStep(phaseName).getKey())));
+ String rollupIndex = getRollupIndexName(client(), index, fixedInterval);
+ assertNull("Rollup index should not have been created", rollupIndex);
+ assertTrue("Source index should not have been deleted", indexExists(index));
+ } catch (AssertionError ea) {
+ logger.warn(
+ "--> original index name is [{}], rollup index name is NULL, possible explanation: {}",
+ index,
+ explainIndex(client(), index)
+ );
+ throw ea;
+ }
}
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/101428")
From 9bc2a7045ead36870110926d94fb609c6512b35d Mon Sep 17 00:00:00 2001
From: Dmitry Cherniachenko <2sabio@gmail.com>
Date: Mon, 5 Feb 2024 11:48:41 +0100
Subject: [PATCH 017/106] Minor grammar fixes (StreamInput.java) (#99857)
---
.../org/elasticsearch/common/io/stream/StreamInput.java | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java b/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java
index c4f0dc58f5ffd..9e271ee6f9bfc 100644
--- a/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java
+++ b/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java
@@ -364,7 +364,7 @@ public Text readOptionalText() throws IOException {
}
public Text readText() throws IOException {
- // use StringAndBytes so we can cache the string if its ever converted to it
+ // use StringAndBytes so we can cache the string if it's ever converted to it
int length = readInt();
return new Text(readBytesReference(length));
}
@@ -1271,8 +1271,8 @@ protected int readArraySize() throws IOException {
if (arraySize < 0) {
throwNegative(arraySize);
}
- // lets do a sanity check that if we are reading an array size that is bigger that the remaining bytes we can safely
- // throw an exception instead of allocating the array based on the size. A simple corrutpted byte can make a node go OOM
+ // let's do a sanity check that if we are reading an array size that is bigger that the remaining bytes we can safely
+ // throw an exception instead of allocating the array based on the size. A simple corrupted byte can make a node go OOM
// if the size is large and for perf reasons we allocate arrays ahead of time
ensureCanReadBytes(arraySize);
return arraySize;
@@ -1287,7 +1287,7 @@ private static void throwExceedsMaxArraySize(int arraySize) {
}
/**
- * This method throws an {@link EOFException} if the given number of bytes can not be read from the this stream. This method might
+ * This method throws an {@link EOFException} if the given number of bytes can not be read from the stream. This method might
* be a no-op depending on the underlying implementation if the information of the remaining bytes is not present.
*/
protected abstract void ensureCanReadBytes(int length) throws EOFException;
From 9f2d38856d3f689344fe768e10a59a5eb91438c4 Mon Sep 17 00:00:00 2001
From: Armin Braun
Date: Mon, 5 Feb 2024 11:56:36 +0100
Subject: [PATCH 018/106] Simplify EQL logic that references SearchHit
(#105060)
I tried to move this logic to use pooled SearchHit instances but it
turned out to be too complicated in one go, so simplifying obvious spots
here:
* ReversePayload is pointless, it just reverses the original payload.
* a number of listeners were unnecessary and could be expressed inline
much clearer
* moved some "unpooling" to later in the logic to make objects live for
shorter and have fewer references to them
---
.../xpack/core/async/AsyncResponse.java | 2 +-
.../xpack/eql/action/EqlSearchResponse.java | 6 ++-
.../eql/execution/payload/EventPayload.java | 11 +++--
.../eql/execution/payload/ReversePayload.java | 44 -------------------
.../eql/execution/sample/SamplePayload.java | 4 +-
.../eql/execution/search/AsEventListener.java | 26 -----------
.../execution/search/BasicQueryClient.java | 6 +--
.../eql/execution/search/ReverseListener.java | 25 -----------
.../execution/sequence/SequencePayload.java | 4 +-
.../execution/sequence/TumblingWindow.java | 3 +-
.../xpack/eql/plan/physical/EsQueryExec.java | 11 +++--
.../eql/action/EqlSearchResponseTests.java | 4 +-
12 files changed, 25 insertions(+), 121 deletions(-)
delete mode 100644 x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/payload/ReversePayload.java
delete mode 100644 x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/search/AsEventListener.java
delete mode 100644 x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/search/ReverseListener.java
diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/async/AsyncResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/async/AsyncResponse.java
index b31544a1921a6..f6a9bd8474838 100644
--- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/async/AsyncResponse.java
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/async/AsyncResponse.java
@@ -10,7 +10,7 @@
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.core.RefCounted;
-public interface AsyncResponse> extends Writeable, RefCounted {
+public interface AsyncResponse> extends Writeable, RefCounted {
/**
* When this response will expire as a timestamp in milliseconds since epoch.
*/
diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/action/EqlSearchResponse.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/action/EqlSearchResponse.java
index f9f9238b6c4ab..5eef57cbb6c5b 100644
--- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/action/EqlSearchResponse.java
+++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/action/EqlSearchResponse.java
@@ -22,6 +22,7 @@
import org.elasticsearch.core.Nullable;
import org.elasticsearch.index.get.GetResult;
import org.elasticsearch.index.mapper.SourceFieldMapper;
+import org.elasticsearch.search.SearchHit;
import org.elasticsearch.search.SearchHits;
import org.elasticsearch.xcontent.ConstructingObjectParser;
import org.elasticsearch.xcontent.InstantiatingObjectParser;
@@ -43,6 +44,7 @@
import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg;
import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg;
+import static org.elasticsearch.xpack.eql.util.SearchHitUtils.qualifiedIndex;
public class EqlSearchResponse extends ActionResponse implements ToXContentObject, QlStatusResponse.AsyncStatus {
@@ -260,8 +262,8 @@ private static final class Fields {
private final boolean missing;
- public Event(String index, String id, BytesReference source, Map fetchFields) {
- this(index, id, source, fetchFields, false);
+ public Event(SearchHit hit) {
+ this(qualifiedIndex(hit), hit.getId(), hit.getSourceRef(), hit.getDocumentFields(), false);
}
public Event(String index, String id, BytesReference source, Map fetchFields, Boolean missing) {
diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/payload/EventPayload.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/payload/EventPayload.java
index 0749b53c7b1cf..a7845ca62dccc 100644
--- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/payload/EventPayload.java
+++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/payload/EventPayload.java
@@ -9,14 +9,12 @@
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.search.SearchHits;
import org.elasticsearch.xpack.eql.action.EqlSearchResponse.Event;
-import org.elasticsearch.xpack.eql.execution.search.RuntimeUtils;
import java.util.ArrayList;
import java.util.List;
-import static org.elasticsearch.xpack.eql.util.SearchHitUtils.qualifiedIndex;
-
public class EventPayload extends AbstractPayload {
private final List values;
@@ -24,10 +22,11 @@ public class EventPayload extends AbstractPayload {
public EventPayload(SearchResponse response) {
super(response.isTimedOut(), response.getTook());
- List hits = RuntimeUtils.searchHits(response);
- values = new ArrayList<>(hits.size());
+ SearchHits hits = response.getHits();
+ values = new ArrayList<>(hits.getHits().length);
for (SearchHit hit : hits) {
- values.add(new Event(qualifiedIndex(hit), hit.getId(), hit.getSourceRef(), hit.getDocumentFields()));
+ // TODO: remove unpooled usage
+ values.add(new Event(hit.asUnpooled()));
}
}
diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/payload/ReversePayload.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/payload/ReversePayload.java
deleted file mode 100644
index 533dc3a992e74..0000000000000
--- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/payload/ReversePayload.java
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
- * or more contributor license agreements. Licensed under the Elastic License
- * 2.0; you may not use this file except in compliance with the Elastic License
- * 2.0.
- */
-
-package org.elasticsearch.xpack.eql.execution.payload;
-
-import org.elasticsearch.core.TimeValue;
-import org.elasticsearch.xpack.eql.session.Payload;
-
-import java.util.Collections;
-import java.util.List;
-
-public class ReversePayload implements Payload {
-
- private final Payload delegate;
-
- public ReversePayload(Payload delegate) {
- this.delegate = delegate;
- Collections.reverse(delegate.values());
- }
-
- @Override
- public Type resultType() {
- return delegate.resultType();
- }
-
- @Override
- public boolean timedOut() {
- return delegate.timedOut();
- }
-
- @Override
- public TimeValue timeTook() {
- return delegate.timeTook();
- }
-
- @Override
- public List> values() {
- return delegate.values();
- }
-}
diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sample/SamplePayload.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sample/SamplePayload.java
index ddd33e58f5448..121f4c208273b 100644
--- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sample/SamplePayload.java
+++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sample/SamplePayload.java
@@ -15,8 +15,6 @@
import java.util.ArrayList;
import java.util.List;
-import static org.elasticsearch.xpack.eql.util.SearchHitUtils.qualifiedIndex;
-
class SamplePayload extends AbstractPayload {
private final List values;
@@ -30,7 +28,7 @@ class SamplePayload extends AbstractPayload {
List hits = docs.get(i);
List events = new ArrayList<>(hits.size());
for (SearchHit hit : hits) {
- events.add(new Event(qualifiedIndex(hit), hit.getId(), hit.getSourceRef(), hit.getDocumentFields()));
+ events.add(new Event(hit));
}
values.add(new org.elasticsearch.xpack.eql.action.EqlSearchResponse.Sequence(s.key().asList(), events));
}
diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/search/AsEventListener.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/search/AsEventListener.java
deleted file mode 100644
index 122e28f4e50b8..0000000000000
--- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/search/AsEventListener.java
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
- * or more contributor license agreements. Licensed under the Elastic License
- * 2.0; you may not use this file except in compliance with the Elastic License
- * 2.0.
- */
-
-package org.elasticsearch.xpack.eql.execution.search;
-
-import org.elasticsearch.action.ActionListener;
-import org.elasticsearch.action.DelegatingActionListener;
-import org.elasticsearch.action.search.SearchResponse;
-import org.elasticsearch.xpack.eql.execution.payload.EventPayload;
-import org.elasticsearch.xpack.eql.session.Payload;
-
-public class AsEventListener extends DelegatingActionListener {
-
- public AsEventListener(ActionListener listener) {
- super(listener);
- }
-
- @Override
- public void onResponse(SearchResponse response) {
- delegate.onResponse(new EventPayload(response));
- }
-}
diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/search/BasicQueryClient.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/search/BasicQueryClient.java
index ceaf8bcbb6b6f..6cbe5298b5950 100644
--- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/search/BasicQueryClient.java
+++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/search/BasicQueryClient.java
@@ -159,13 +159,13 @@ public void fetchHits(Iterable> refs, ActionListener docs = RuntimeUtils.searchHits(item.getResponse());
// for each doc, find its reference and its position inside the matrix
- for (SearchHit doc : docs) {
+ for (SearchHit doc : item.getResponse().getHits()) {
HitReference docRef = new HitReference(doc);
List positions = referenceToPosition.get(docRef);
positions.forEach(pos -> {
- SearchHit previous = seq.get(pos / listSize).set(pos % listSize, doc);
+ // TODO: stop using unpooled
+ SearchHit previous = seq.get(pos / listSize).set(pos % listSize, doc.asUnpooled());
if (previous != null) {
throw new EqlIllegalArgumentException(
"Overriding sequence match [{}] with [{}]",
diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/search/ReverseListener.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/search/ReverseListener.java
deleted file mode 100644
index bc6fd8c82b85a..0000000000000
--- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/search/ReverseListener.java
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
- * or more contributor license agreements. Licensed under the Elastic License
- * 2.0; you may not use this file except in compliance with the Elastic License
- * 2.0.
- */
-
-package org.elasticsearch.xpack.eql.execution.search;
-
-import org.elasticsearch.action.ActionListener;
-import org.elasticsearch.action.DelegatingActionListener;
-import org.elasticsearch.xpack.eql.execution.payload.ReversePayload;
-import org.elasticsearch.xpack.eql.session.Payload;
-
-public class ReverseListener extends DelegatingActionListener {
-
- public ReverseListener(ActionListener delegate) {
- super(delegate);
- }
-
- @Override
- public void onResponse(Payload response) {
- delegate.onResponse(new ReversePayload(response));
- }
-}
diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sequence/SequencePayload.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sequence/SequencePayload.java
index 95e18d54f5a08..45083babddbb4 100644
--- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sequence/SequencePayload.java
+++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sequence/SequencePayload.java
@@ -15,8 +15,6 @@
import java.util.ArrayList;
import java.util.List;
-import static org.elasticsearch.xpack.eql.util.SearchHitUtils.qualifiedIndex;
-
class SequencePayload extends AbstractPayload {
private final List values;
@@ -33,7 +31,7 @@ class SequencePayload extends AbstractPayload {
if (hit == null) {
events.add(Event.MISSING_EVENT);
} else {
- events.add(new Event(qualifiedIndex(hit), hit.getId(), hit.getSourceRef(), hit.getDocumentFields()));
+ events.add(new Event(hit));
}
}
diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sequence/TumblingWindow.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sequence/TumblingWindow.java
index d692bc376de01..35f171806ccb2 100644
--- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sequence/TumblingWindow.java
+++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sequence/TumblingWindow.java
@@ -732,8 +732,7 @@ private void doPayload(ActionListener listener) {
if (criteria.get(matcher.firstPositiveStage).descending()) {
Collections.reverse(completed);
}
- SequencePayload payload = new SequencePayload(completed, addMissingEventPlaceholders(listOfHits), false, timeTook());
- return payload;
+ return new SequencePayload(completed, addMissingEventPlaceholders(listOfHits), false, timeTook());
}));
}
diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/plan/physical/EsQueryExec.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/plan/physical/EsQueryExec.java
index 4877b4d909a72..6fa61dcd84e48 100644
--- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/plan/physical/EsQueryExec.java
+++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/plan/physical/EsQueryExec.java
@@ -11,10 +11,9 @@
import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
import org.elasticsearch.search.sort.SortBuilder;
import org.elasticsearch.search.sort.SortOrder;
-import org.elasticsearch.xpack.eql.execution.search.AsEventListener;
+import org.elasticsearch.xpack.eql.execution.payload.EventPayload;
import org.elasticsearch.xpack.eql.execution.search.BasicQueryClient;
import org.elasticsearch.xpack.eql.execution.search.QueryRequest;
-import org.elasticsearch.xpack.eql.execution.search.ReverseListener;
import org.elasticsearch.xpack.eql.execution.search.SourceGenerator;
import org.elasticsearch.xpack.eql.querydsl.container.QueryContainer;
import org.elasticsearch.xpack.eql.session.EqlConfiguration;
@@ -24,6 +23,7 @@
import org.elasticsearch.xpack.ql.tree.NodeInfo;
import org.elasticsearch.xpack.ql.tree.Source;
+import java.util.Collections;
import java.util.List;
import java.util.Objects;
@@ -71,8 +71,11 @@ public SearchSourceBuilder source(EqlSession session, boolean includeFetchFields
public void execute(EqlSession session, ActionListener listener) {
// endpoint - fetch all source
QueryRequest request = () -> source(session, true).fetchSource(FetchSourceContext.FETCH_SOURCE);
- listener = shouldReverse(request) ? new ReverseListener(listener) : listener;
- new BasicQueryClient(session).query(request, new AsEventListener(listener));
+ new BasicQueryClient(session).query(request, listener.safeMap(shouldReverse(request) ? r -> {
+ var res = new EventPayload(r);
+ Collections.reverse(res.values());
+ return res;
+ } : EventPayload::new));
}
private static boolean shouldReverse(QueryRequest query) {
diff --git a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/action/EqlSearchResponseTests.java b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/action/EqlSearchResponseTests.java
index 255e94d6bda34..6cb283d11848e 100644
--- a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/action/EqlSearchResponseTests.java
+++ b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/action/EqlSearchResponseTests.java
@@ -117,7 +117,7 @@ static List randomEvents(XContentType xType) {
if (fetchFields.isEmpty() && randomBoolean()) {
fetchFields = null;
}
- hits.add(new Event(String.valueOf(i), randomAlphaOfLength(10), bytes, fetchFields));
+ hits.add(new Event(String.valueOf(i), randomAlphaOfLength(10), bytes, fetchFields, false));
}
}
}
@@ -297,7 +297,7 @@ private List mutateEvents(List original, TransportVersion version)
}
public void testEmptyIndexAsMissingEvent() throws IOException {
- Event event = new Event("", "", new BytesArray("{}".getBytes(StandardCharsets.UTF_8)), null);
+ Event event = new Event("", "", new BytesArray("{}".getBytes(StandardCharsets.UTF_8)), null, false);
BytesStreamOutput out = new BytesStreamOutput();
out.setTransportVersion(TransportVersions.V_8_9_X);// 8.9.1
event.writeTo(out);
From e75ca48ece475411f515208761cdb68703259405 Mon Sep 17 00:00:00 2001
From: Ievgen Degtiarenko
Date: Mon, 5 Feb 2024 14:11:46 +0100
Subject: [PATCH 019/106] Fix testListTasksWaitForCompletion (#104391)
This change attempts to fix testListTasksWaitForCompletion by setting bariers to
verify task started on all nodes and had a chance to list all running tasks
before canceling the TEST_TASK
---
.../admin/cluster/node/tasks/TasksIT.java | 113 +++++++++---------
.../tasks/list/TransportListTasksAction.java | 5 -
2 files changed, 58 insertions(+), 60 deletions(-)
diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java
index 884f6dbcd677e..0766b732099c4 100644
--- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java
+++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java
@@ -48,10 +48,10 @@
import org.elasticsearch.tasks.TaskResult;
import org.elasticsearch.tasks.TaskResultsService;
import org.elasticsearch.test.ESIntegTestCase;
-import org.elasticsearch.test.junit.annotations.TestLogging;
import org.elasticsearch.test.tasks.MockTaskManager;
import org.elasticsearch.test.tasks.MockTaskManagerListener;
import org.elasticsearch.test.transport.MockTransportService;
+import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.ReceiveTimeoutTransportException;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.xcontent.XContentType;
@@ -64,6 +64,7 @@
import java.util.HashMap;
import java.util.List;
import java.util.Map;
+import java.util.Objects;
import java.util.Set;
import java.util.concurrent.BrokenBarrierException;
import java.util.concurrent.CountDownLatch;
@@ -71,6 +72,7 @@
import java.util.concurrent.TimeUnit;
import java.util.function.Consumer;
import java.util.function.Function;
+import java.util.function.Supplier;
import java.util.stream.Collectors;
import static java.util.Collections.emptyList;
@@ -531,22 +533,32 @@ public void testTasksUnblocking() throws Exception {
);
}
- @TestLogging(
- reason = "https://github.com/elastic/elasticsearch/issues/97923",
- value = "org.elasticsearch.action.admin.cluster.node.tasks.list.TransportListTasksAction:TRACE"
- )
public void testListTasksWaitForCompletion() throws Exception {
- waitForCompletionTestCase(
- randomBoolean(),
- id -> clusterAdmin().prepareListTasks().setActions(TEST_TASK_ACTION.name()).setWaitForCompletion(true).execute(),
- response -> {
- assertThat(response.getNodeFailures(), empty());
- assertThat(response.getTaskFailures(), empty());
- assertThat(response.getTasks(), hasSize(1));
- TaskInfo task = response.getTasks().get(0);
- assertEquals(TEST_TASK_ACTION.name(), task.action());
+ waitForCompletionTestCase(randomBoolean(), id -> {
+ var future = ensureStartedOnAllNodes(
+ "cluster:monitor/tasks/lists[n]",
+ () -> clusterAdmin().prepareListTasks().setActions(TEST_TASK_ACTION.name()).setWaitForCompletion(true).execute()
+ );
+
+ // This ensures that a task has progressed to the point of listing all running tasks and subscribing to their updates
+ for (var threadPool : internalCluster().getInstances(ThreadPool.class)) {
+ var max = threadPool.info(ThreadPool.Names.MANAGEMENT).getMax();
+ var executor = threadPool.executor(ThreadPool.Names.MANAGEMENT);
+ var waitForManagementToCompleteAllTasks = new CyclicBarrier(max + 1);
+ for (int i = 0; i < max; i++) {
+ executor.submit(() -> safeAwait(waitForManagementToCompleteAllTasks));
+ }
+ safeAwait(waitForManagementToCompleteAllTasks);
}
- );
+
+ return future;
+ }, response -> {
+ assertThat(response.getNodeFailures(), empty());
+ assertThat(response.getTaskFailures(), empty());
+ assertThat(response.getTasks(), hasSize(1));
+ TaskInfo task = response.getTasks().get(0);
+ assertEquals(TEST_TASK_ACTION.name(), task.action());
+ });
}
public void testGetTaskWaitForCompletionWithoutStoringResult() throws Exception {
@@ -582,34 +594,20 @@ private void waitForCompletionTestCase(boolean storeResult, Function future = client().execute(TEST_TASK_ACTION, request);
+ ActionFuture future = ensureStartedOnAllNodes(
+ TEST_TASK_ACTION.name() + "[n]",
+ () -> client().execute(TEST_TASK_ACTION, request)
+ );
ActionFuture waitResponseFuture;
- TaskId taskId;
try {
- taskId = waitForTestTaskStartOnAllNodes();
-
- // Wait for the task to start
- assertBusy(() -> clusterAdmin().prepareGetTask(taskId).get());
-
- // Register listeners so we can be sure the waiting started
- CountDownLatch waitForWaitingToStart = new CountDownLatch(1);
- for (TransportService transportService : internalCluster().getInstances(TransportService.class)) {
- ((MockTaskManager) transportService.getTaskManager()).addListener(new MockTaskManagerListener() {
- @Override
- public void onTaskUnregistered(Task task) {
- waitForWaitingToStart.countDown();
- }
- });
- }
+ var tasks = clusterAdmin().prepareListTasks().setActions(TEST_TASK_ACTION.name()).get().getTasks();
+ assertThat(tasks, hasSize(1));
+ var taskId = tasks.get(0).taskId();
+ clusterAdmin().prepareGetTask(taskId).get();
// Spin up a request to wait for the test task to finish
waitResponseFuture = wait.apply(taskId);
-
- /* Wait for the wait to start. This should count down just *before* we wait for completion but after the list/get has got a
- * reference to the running task. Because we unblock immediately after this the task may no longer be running for us to wait
- * on which is fine. */
- waitForWaitingToStart.await();
} finally {
// Unblock the request so the wait for completion request can finish
client().execute(UNBLOCK_TASK_ACTION, new TestTaskPlugin.UnblockTestTasksRequest()).get();
@@ -651,14 +649,15 @@ public void testGetTaskWaitForTimeout() throws Exception {
*/
private void waitForTimeoutTestCase(Function> wait) throws Exception {
// Start blocking test task
- TestTaskPlugin.NodesRequest request = new TestTaskPlugin.NodesRequest("test");
- ActionFuture future = client().execute(TEST_TASK_ACTION, request);
+ ActionFuture future = ensureStartedOnAllNodes(
+ TEST_TASK_ACTION.name() + "[n]",
+ () -> client().execute(TEST_TASK_ACTION, new TestTaskPlugin.NodesRequest("test"))
+ );
try {
- TaskId taskId = waitForTestTaskStartOnAllNodes();
-
- // Wait for the task to start
- assertBusy(() -> clusterAdmin().prepareGetTask(taskId).get());
-
+ var tasks = clusterAdmin().prepareListTasks().setActions(TEST_TASK_ACTION.name()).get().getTasks();
+ assertThat(tasks, hasSize(1));
+ var taskId = tasks.get(0).taskId();
+ clusterAdmin().prepareGetTask(taskId).get();
// Spin up a request that should wait for those tasks to finish
// It will timeout because we haven't unblocked the tasks
Iterable extends Throwable> failures = wait.apply(taskId);
@@ -675,17 +674,21 @@ private void waitForTimeoutTestCase(Function {
- List tasks = clusterAdmin().prepareListTasks().setActions(TEST_TASK_ACTION.name() + "[n]").get().getTasks();
- assertEquals(internalCluster().size(), tasks.size());
- });
- List task = clusterAdmin().prepareListTasks().setActions(TEST_TASK_ACTION.name()).get().getTasks();
- assertThat(task, hasSize(1));
- return task.get(0).taskId();
+ private ActionFuture ensureStartedOnAllNodes(String nodeTaskName, Supplier> taskStarter) {
+ var startedOnAllNodes = new CountDownLatch(internalCluster().size());
+ for (TransportService transportService : internalCluster().getInstances(TransportService.class)) {
+ ((MockTaskManager) transportService.getTaskManager()).addListener(new MockTaskManagerListener() {
+ @Override
+ public void onTaskRegistered(Task task) {
+ if (Objects.equals(task.getAction(), nodeTaskName)) {
+ startedOnAllNodes.countDown();
+ }
+ }
+ });
+ }
+ var future = taskStarter.get();
+ safeAwait(startedOnAllNodes);
+ return future;
}
public void testTasksListWaitForNoTask() throws Exception {
diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/TransportListTasksAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/TransportListTasksAction.java
index 62ede5b2f480b..4f8a6b6db2980 100644
--- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/TransportListTasksAction.java
+++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/TransportListTasksAction.java
@@ -8,8 +8,6 @@
package org.elasticsearch.action.admin.cluster.node.tasks.list;
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionType;
import org.elasticsearch.action.FailedNodeException;
@@ -43,8 +41,6 @@
public class TransportListTasksAction extends TransportTasksAction {
- private static final Logger logger = LogManager.getLogger(TransportListTasksAction.class);
-
public static final ActionType TYPE = new ActionType<>("cluster:monitor/tasks/lists");
public static long waitForCompletionTimeout(TimeValue timeout) {
@@ -132,7 +128,6 @@ protected void processTasks(CancellableTask nodeTask, ListTasksRequest request,
}
processedTasks.add(task);
}
- logger.trace("Matched {} tasks of all running {}", processedTasks, taskManager.getTasks().values());
} catch (Exception e) {
allMatchedTasksRemovedListener.onFailure(e);
return;
From 62dc143ab5f8654b40d1988dc74896fbea5ea9b5 Mon Sep 17 00:00:00 2001
From: Jedr Blaszyk
Date: Mon, 5 Feb 2024 14:37:36 +0100
Subject: [PATCH 020/106] [Connectors API] Fix bug with crawler configuration
parsing and sync_now flag (#105024)
---
docs/changelog/105024.yaml | 6 ++
.../335_connector_update_configuration.yml | 24 ++++++-
.../application/connector/Connector.java | 4 +-
.../connector/ConnectorConfiguration.java | 70 ++++++++++++-------
.../connector/syncjob/ConnectorSyncJob.java | 2 +-
.../ConnectorConfigurationTests.java | 40 +++++++++++
.../syncjob/ConnectorSyncJobTests.java | 2 +-
7 files changed, 116 insertions(+), 32 deletions(-)
create mode 100644 docs/changelog/105024.yaml
diff --git a/docs/changelog/105024.yaml b/docs/changelog/105024.yaml
new file mode 100644
index 0000000000000..96268b78ddf5d
--- /dev/null
+++ b/docs/changelog/105024.yaml
@@ -0,0 +1,6 @@
+pr: 105024
+summary: "[Connectors API] Fix bug with crawler configuration parsing and `sync_now`\
+ \ flag"
+area: Application
+type: bug
+issues: []
diff --git a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/335_connector_update_configuration.yml b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/335_connector_update_configuration.yml
index 5a7ab14dc6386..aeac8202a950b 100644
--- a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/335_connector_update_configuration.yml
+++ b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/335_connector_update_configuration.yml
@@ -185,7 +185,6 @@ setup:
- field: some_field
value: 31
display: numeric
- label: Very important field
---
"Update Connector Configuration - Unknown field type":
@@ -240,3 +239,26 @@ setup:
- constraint: 0
type: unknown_constraint
value: 123
+
+---
+"Update Connector Configuration - Crawler configuration":
+ - do:
+ connector.update_configuration:
+ connector_id: test-connector
+ body:
+ configuration:
+ nextSyncConfig:
+ label: nextSyncConfig
+ value:
+ max_crawl_depth: 3
+ sitemap_discovery_disabled: false
+ seed_urls:
+ - https://elastic.co/
+
+ - match: { result: updated }
+
+ - do:
+ connector.get:
+ connector_id: test-connector
+
+ - match: { configuration.nextSyncConfig.value.max_crawl_depth: 3 }
diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/Connector.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/Connector.java
index db8578e7dfa99..5bae203175d36 100644
--- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/Connector.java
+++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/Connector.java
@@ -710,8 +710,8 @@ public Builder setSyncCursor(Object syncCursor) {
return this;
}
- public Builder setSyncNow(boolean syncNow) {
- this.syncNow = syncNow;
+ public Builder setSyncNow(Boolean syncNow) {
+ this.syncNow = Objects.requireNonNullElse(syncNow, false);
return this;
}
diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorConfiguration.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorConfiguration.java
index 8ed7c417a1af1..7d7c7b5fa61f9 100644
--- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorConfiguration.java
+++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorConfiguration.java
@@ -162,8 +162,8 @@ public ConnectorConfiguration(StreamInput in) throws IOException {
.setOptions((List) args[i++])
.setOrder((Integer) args[i++])
.setPlaceholder((String) args[i++])
- .setRequired((boolean) args[i++])
- .setSensitive((boolean) args[i++])
+ .setRequired((Boolean) args[i++])
+ .setSensitive((Boolean) args[i++])
.setTooltip((String) args[i++])
.setType((ConfigurationFieldType) args[i++])
.setUiRestrictions((List) args[i++])
@@ -187,40 +187,42 @@ public ConnectorConfiguration(StreamInput in) throws IOException {
}
throw new XContentParseException("Unsupported token [" + p.currentToken() + "]");
}, DEFAULT_VALUE_FIELD, ObjectParser.ValueType.VALUE);
- PARSER.declareObjectArray(constructorArg(), (p, c) -> ConfigurationDependency.fromXContent(p), DEPENDS_ON_FIELD);
+ PARSER.declareObjectArray(optionalConstructorArg(), (p, c) -> ConfigurationDependency.fromXContent(p), DEPENDS_ON_FIELD);
PARSER.declareField(
- constructorArg(),
+ optionalConstructorArg(),
(p, c) -> ConfigurationDisplayType.displayType(p.text()),
DISPLAY_FIELD,
- ObjectParser.ValueType.STRING
+ ObjectParser.ValueType.STRING_OR_NULL
);
PARSER.declareString(constructorArg(), LABEL_FIELD);
- PARSER.declareObjectArray(constructorArg(), (p, c) -> ConfigurationSelectOption.fromXContent(p), OPTIONS_FIELD);
+ PARSER.declareObjectArray(optionalConstructorArg(), (p, c) -> ConfigurationSelectOption.fromXContent(p), OPTIONS_FIELD);
PARSER.declareInt(optionalConstructorArg(), ORDER_FIELD);
- PARSER.declareString(optionalConstructorArg(), PLACEHOLDER_FIELD);
- PARSER.declareBoolean(constructorArg(), REQUIRED_FIELD);
- PARSER.declareBoolean(constructorArg(), SENSITIVE_FIELD);
+ PARSER.declareStringOrNull(optionalConstructorArg(), PLACEHOLDER_FIELD);
+ PARSER.declareBoolean(optionalConstructorArg(), REQUIRED_FIELD);
+ PARSER.declareBoolean(optionalConstructorArg(), SENSITIVE_FIELD);
PARSER.declareStringOrNull(optionalConstructorArg(), TOOLTIP_FIELD);
PARSER.declareField(
- constructorArg(),
- (p, c) -> ConfigurationFieldType.fieldType(p.text()),
+ optionalConstructorArg(),
+ (p, c) -> p.currentToken() == XContentParser.Token.VALUE_NULL ? null : ConfigurationFieldType.fieldType(p.text()),
TYPE_FIELD,
- ObjectParser.ValueType.STRING
+ ObjectParser.ValueType.STRING_OR_NULL
);
- PARSER.declareStringArray(constructorArg(), UI_RESTRICTIONS_FIELD);
- PARSER.declareObjectArray(constructorArg(), (p, c) -> ConfigurationValidation.fromXContent(p), VALIDATIONS_FIELD);
- PARSER.declareField(constructorArg(), (p, c) -> {
+ PARSER.declareStringArray(optionalConstructorArg(), UI_RESTRICTIONS_FIELD);
+ PARSER.declareObjectArray(optionalConstructorArg(), (p, c) -> ConfigurationValidation.fromXContent(p), VALIDATIONS_FIELD);
+ PARSER.declareField(optionalConstructorArg(), (p, c) -> {
if (p.currentToken() == XContentParser.Token.VALUE_STRING) {
return p.text();
} else if (p.currentToken() == XContentParser.Token.VALUE_NUMBER) {
return p.numberValue();
} else if (p.currentToken() == XContentParser.Token.VALUE_BOOLEAN) {
return p.booleanValue();
+ } else if (p.currentToken() == XContentParser.Token.START_OBJECT) {
+ return p.map();
} else if (p.currentToken() == XContentParser.Token.VALUE_NULL) {
return null;
}
throw new XContentParseException("Unsupported token [" + p.currentToken() + "]");
- }, VALUE_FIELD, ObjectParser.ValueType.VALUE);
+ }, VALUE_FIELD, ObjectParser.ValueType.VALUE_OBJECT_ARRAY);
}
@Override
@@ -231,10 +233,16 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws
builder.field(CATEGORY_FIELD.getPreferredName(), category);
}
builder.field(DEFAULT_VALUE_FIELD.getPreferredName(), defaultValue);
- builder.xContentList(DEPENDS_ON_FIELD.getPreferredName(), dependsOn);
- builder.field(DISPLAY_FIELD.getPreferredName(), display.toString());
+ if (dependsOn != null) {
+ builder.xContentList(DEPENDS_ON_FIELD.getPreferredName(), dependsOn);
+ }
+ if (display != null) {
+ builder.field(DISPLAY_FIELD.getPreferredName(), display.toString());
+ }
builder.field(LABEL_FIELD.getPreferredName(), label);
- builder.xContentList(OPTIONS_FIELD.getPreferredName(), options);
+ if (options != null) {
+ builder.xContentList(OPTIONS_FIELD.getPreferredName(), options);
+ }
if (order != null) {
builder.field(ORDER_FIELD.getPreferredName(), order);
}
@@ -243,10 +251,18 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws
}
builder.field(REQUIRED_FIELD.getPreferredName(), required);
builder.field(SENSITIVE_FIELD.getPreferredName(), sensitive);
- builder.field(TOOLTIP_FIELD.getPreferredName(), tooltip);
- builder.field(TYPE_FIELD.getPreferredName(), type.toString());
- builder.stringListField(UI_RESTRICTIONS_FIELD.getPreferredName(), uiRestrictions);
- builder.xContentList(VALIDATIONS_FIELD.getPreferredName(), validations);
+ if (tooltip != null) {
+ builder.field(TOOLTIP_FIELD.getPreferredName(), tooltip);
+ }
+ if (type != null) {
+ builder.field(TYPE_FIELD.getPreferredName(), type.toString());
+ }
+ if (uiRestrictions != null) {
+ builder.stringListField(UI_RESTRICTIONS_FIELD.getPreferredName(), uiRestrictions);
+ }
+ if (validations != null) {
+ builder.xContentList(VALIDATIONS_FIELD.getPreferredName(), validations);
+ }
builder.field(VALUE_FIELD.getPreferredName(), value);
}
builder.endObject();
@@ -385,13 +401,13 @@ public Builder setPlaceholder(String placeholder) {
return this;
}
- public Builder setRequired(boolean required) {
- this.required = required;
+ public Builder setRequired(Boolean required) {
+ this.required = Objects.requireNonNullElse(required, false);
return this;
}
- public Builder setSensitive(boolean sensitive) {
- this.sensitive = sensitive;
+ public Builder setSensitive(Boolean sensitive) {
+ this.sensitive = Objects.requireNonNullElse(sensitive, false);
return this;
}
diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJob.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJob.java
index fb34035e5400b..c531187dbb0a0 100644
--- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJob.java
+++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJob.java
@@ -322,7 +322,7 @@ public ConnectorSyncJob(StreamInput in) throws IOException {
STATUS_FIELD,
ObjectParser.ValueType.STRING
);
- PARSER.declareLong(constructorArg(), TOTAL_DOCUMENT_COUNT_FIELD);
+ PARSER.declareLongOrNull(constructorArg(), 0L, TOTAL_DOCUMENT_COUNT_FIELD);
PARSER.declareField(
constructorArg(),
(p, c) -> ConnectorSyncJobTriggerMethod.fromString(p.text()),
diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorConfigurationTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorConfigurationTests.java
index 9b1f9c60d1607..35b21ce676a57 100644
--- a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorConfigurationTests.java
+++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorConfigurationTests.java
@@ -85,6 +85,46 @@ public void testToXContent() throws IOException {
assertToXContentEquivalent(originalBytes, toXContent(parsed, XContentType.JSON, humanReadable), XContentType.JSON);
}
+ public void testToXContentCrawlerConfig_WithNullValue() throws IOException {
+ String content = XContentHelper.stripWhitespace("""
+ {
+ "label": "nextSyncConfig",
+ "value": null
+ }
+ """);
+
+ ConnectorConfiguration configuration = ConnectorConfiguration.fromXContentBytes(new BytesArray(content), XContentType.JSON);
+ boolean humanReadable = true;
+ BytesReference originalBytes = toShuffledXContent(configuration, XContentType.JSON, ToXContent.EMPTY_PARAMS, humanReadable);
+ ConnectorConfiguration parsed;
+ try (XContentParser parser = createParser(XContentType.JSON.xContent(), originalBytes)) {
+ parsed = ConnectorConfiguration.fromXContent(parser);
+ }
+ assertToXContentEquivalent(originalBytes, toXContent(parsed, XContentType.JSON, humanReadable), XContentType.JSON);
+ }
+
+ public void testToXContentCrawlerConfig_WithCrawlerConfigurationOverrides() throws IOException {
+ String content = XContentHelper.stripWhitespace("""
+ {
+ "label": "nextSyncConfig",
+ "value": {
+ "max_crawl_depth": 3,
+ "sitemap_discovery_disabled": false,
+ "seed_urls": ["https://elastic.co/"]
+ }
+ }
+ """);
+
+ ConnectorConfiguration configuration = ConnectorConfiguration.fromXContentBytes(new BytesArray(content), XContentType.JSON);
+ boolean humanReadable = true;
+ BytesReference originalBytes = toShuffledXContent(configuration, XContentType.JSON, ToXContent.EMPTY_PARAMS, humanReadable);
+ ConnectorConfiguration parsed;
+ try (XContentParser parser = createParser(XContentType.JSON.xContent(), originalBytes)) {
+ parsed = ConnectorConfiguration.fromXContent(parser);
+ }
+ assertToXContentEquivalent(originalBytes, toXContent(parsed, XContentType.JSON, humanReadable), XContentType.JSON);
+ }
+
public void testToXContentWithMultipleConstraintTypes() throws IOException {
String content = XContentHelper.stripWhitespace("""
{
diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobTests.java
index 7b1a0f7d8dcf7..81b05ce25e177 100644
--- a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobTests.java
+++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobTests.java
@@ -235,7 +235,7 @@ public void testFromXContent_WithAllNullableFieldsSetToNull_DoesNotThrow() throw
"metadata": {},
"started_at": null,
"status": "canceling",
- "total_document_count": 0,
+ "total_document_count": null,
"trigger_method": "scheduled",
"worker_hostname": null
}
From a34174c2244b93d50687ac89667f6216b5631da7 Mon Sep 17 00:00:00 2001
From: Michael Peterson
Date: Mon, 5 Feb 2024 08:44:43 -0500
Subject: [PATCH 021/106] Query timeouts should not be return 500
INTERNAL_SERVER_ERROR status code (#104868)
Created new Exception QueryPhaseTimeoutException, which returns RestStatus 504.
We considered the 408 status code, but decided that the official spec for that status doesn't
match this scenario, so 504 was considered the closest fit.
---
.../elasticsearch/ElasticsearchException.java | 7 ++++
.../org/elasticsearch/TransportVersions.java | 1 +
.../search/query/QueryPhase.java | 2 +-
.../query/QueryPhaseTimeoutException.java | 34 +++++++++++++++++++
.../ExceptionSerializationTests.java | 2 ++
5 files changed, 45 insertions(+), 1 deletion(-)
create mode 100644 server/src/main/java/org/elasticsearch/search/query/QueryPhaseTimeoutException.java
diff --git a/server/src/main/java/org/elasticsearch/ElasticsearchException.java b/server/src/main/java/org/elasticsearch/ElasticsearchException.java
index 4f29fb3a168b3..656d213e7a1fd 100644
--- a/server/src/main/java/org/elasticsearch/ElasticsearchException.java
+++ b/server/src/main/java/org/elasticsearch/ElasticsearchException.java
@@ -37,6 +37,7 @@
import org.elasticsearch.search.aggregations.AggregationExecutionException;
import org.elasticsearch.search.aggregations.MultiBucketConsumerService;
import org.elasticsearch.search.aggregations.UnsupportedAggregationOnDownsampledIndex;
+import org.elasticsearch.search.query.QueryPhaseTimeoutException;
import org.elasticsearch.transport.TcpTransport;
import org.elasticsearch.xcontent.ParseField;
import org.elasticsearch.xcontent.ToXContentFragment;
@@ -1896,6 +1897,12 @@ private enum ElasticsearchExceptionHandle {
AutoscalingMissedIndicesUpdateException::new,
175,
TransportVersions.MISSED_INDICES_UPDATE_EXCEPTION_ADDED
+ ),
+ QUERY_PHASE_TIMEOUT_EXCEPTION(
+ QueryPhaseTimeoutException.class,
+ QueryPhaseTimeoutException::new,
+ 176,
+ TransportVersions.QUERY_PHASE_TIMEOUT_EXCEPTION_ADDED
);
final Class extends ElasticsearchException> exceptionClass;
diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java
index 014e1a71d7a0e..cd7f9eb756b91 100644
--- a/server/src/main/java/org/elasticsearch/TransportVersions.java
+++ b/server/src/main/java/org/elasticsearch/TransportVersions.java
@@ -170,6 +170,7 @@ static TransportVersion def(int id) {
public static final TransportVersion KNN_QUERY_NUMCANDS_AS_OPTIONAL_PARAM = def(8_583_00_0);
public static final TransportVersion TRANSFORM_GET_BASIC_STATS = def(8_584_00_0);
public static final TransportVersion NLP_DOCUMENT_CHUNKING_ADDED = def(8_585_00_0);
+ public static final TransportVersion QUERY_PHASE_TIMEOUT_EXCEPTION_ADDED = def(8_586_00_0);
/*
* STOP! READ THIS FIRST! No, really,
diff --git a/server/src/main/java/org/elasticsearch/search/query/QueryPhase.java b/server/src/main/java/org/elasticsearch/search/query/QueryPhase.java
index 01015ec8cc78e..2368eeb18b021 100644
--- a/server/src/main/java/org/elasticsearch/search/query/QueryPhase.java
+++ b/server/src/main/java/org/elasticsearch/search/query/QueryPhase.java
@@ -210,7 +210,7 @@ static void addCollectorsAndSearch(SearchContext searchContext) throws QueryPhas
if (searcher.timeExceeded()) {
assert timeoutRunnable != null : "TimeExceededException thrown even though timeout wasn't set";
if (searchContext.request().allowPartialSearchResults() == false) {
- throw new QueryPhaseExecutionException(searchContext.shardTarget(), "Time exceeded");
+ throw new QueryPhaseTimeoutException(searchContext.shardTarget(), "Time exceeded");
}
queryResult.searchTimedOut(true);
}
diff --git a/server/src/main/java/org/elasticsearch/search/query/QueryPhaseTimeoutException.java b/server/src/main/java/org/elasticsearch/search/query/QueryPhaseTimeoutException.java
new file mode 100644
index 0000000000000..1b41f31ea1c82
--- /dev/null
+++ b/server/src/main/java/org/elasticsearch/search/query/QueryPhaseTimeoutException.java
@@ -0,0 +1,34 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+
+package org.elasticsearch.search.query;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.search.SearchShardTarget;
+
+import java.io.IOException;
+
+/**
+ * Specific instance of QueryPhaseExecutionException that indicates that a search timeout occurred.
+ * Always returns http status 504 (Gateway Timeout)
+ */
+public class QueryPhaseTimeoutException extends QueryPhaseExecutionException {
+ public QueryPhaseTimeoutException(SearchShardTarget shardTarget, String msg) {
+ super(shardTarget, msg);
+ }
+
+ public QueryPhaseTimeoutException(StreamInput in) throws IOException {
+ super(in);
+ }
+
+ @Override
+ public RestStatus status() {
+ return RestStatus.GATEWAY_TIMEOUT;
+ }
+}
diff --git a/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java b/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java
index 3e0d9193ffed9..9d5c47fbccbc6 100644
--- a/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java
+++ b/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java
@@ -80,6 +80,7 @@
import org.elasticsearch.search.aggregations.MultiBucketConsumerService;
import org.elasticsearch.search.aggregations.UnsupportedAggregationOnDownsampledIndex;
import org.elasticsearch.search.internal.ShardSearchContextId;
+import org.elasticsearch.search.query.QueryPhaseTimeoutException;
import org.elasticsearch.snapshots.Snapshot;
import org.elasticsearch.snapshots.SnapshotException;
import org.elasticsearch.snapshots.SnapshotId;
@@ -827,6 +828,7 @@ public void testIds() {
ids.put(173, TooManyScrollContextsException.class);
ids.put(174, AggregationExecutionException.InvalidPath.class);
ids.put(175, AutoscalingMissedIndicesUpdateException.class);
+ ids.put(176, QueryPhaseTimeoutException.class);
Map, Integer> reverse = new HashMap<>();
for (Map.Entry> entry : ids.entrySet()) {
From 6054ca36cfa79a1943afde26641240e817239158 Mon Sep 17 00:00:00 2001
From: Jedr Blaszyk
Date: Mon, 5 Feb 2024 15:47:22 +0100
Subject: [PATCH 022/106] [Connector API] Support filtering by name, index name
in list action (#105131)
---
docs/changelog/105131.yaml | 5 ++
.../rest-api-spec/api/connector.list.json | 8 +++
.../test/entsearch/310_connector_list.yml | 64 ++++++++++++++++++-
.../connector/ConnectorIndexService.java | 41 +++++++++++-
.../connector/action/ListConnectorAction.java | 56 ++++++++++++++--
.../action/RestListConnectorAction.java | 6 +-
.../action/TransportListConnectorAction.java | 3 +
.../connector/ConnectorIndexServiceTests.java | 5 +-
...ectorActionRequestBWCSerializingTests.java | 9 ++-
9 files changed, 180 insertions(+), 17 deletions(-)
create mode 100644 docs/changelog/105131.yaml
diff --git a/docs/changelog/105131.yaml b/docs/changelog/105131.yaml
new file mode 100644
index 0000000000000..36993527da583
--- /dev/null
+++ b/docs/changelog/105131.yaml
@@ -0,0 +1,5 @@
+pr: 105131
+summary: "[Connector API] Support filtering by name, index name in list action"
+area: Application
+type: enhancement
+issues: []
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/connector.list.json b/rest-api-spec/src/main/resources/rest-api-spec/api/connector.list.json
index bc8f12a933b1e..562190f6f5cad 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/api/connector.list.json
+++ b/rest-api-spec/src/main/resources/rest-api-spec/api/connector.list.json
@@ -31,6 +31,14 @@
"type": "int",
"default": 100,
"description": "specifies a max number of results to get (default: 100)"
+ },
+ "index_name": {
+ "type": "string",
+ "description": "connector index name(s) to fetch connector documents for"
+ },
+ "connector_name": {
+ "type": "string",
+ "description": "connector name(s) to fetch connector documents for"
}
}
}
diff --git a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/310_connector_list.yml b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/310_connector_list.yml
index 52cfcdee0bb85..7aa49297902d5 100644
--- a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/310_connector_list.yml
+++ b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/310_connector_list.yml
@@ -9,7 +9,7 @@ setup:
connector_id: connector-a
body:
index_name: search-1-test
- name: my-connector
+ name: my-connector-1
language: pl
is_native: false
service_type: super-connector
@@ -18,7 +18,7 @@ setup:
connector_id: connector-c
body:
index_name: search-3-test
- name: my-connector
+ name: my-connector-3
language: nl
is_native: false
service_type: super-connector
@@ -27,7 +27,7 @@ setup:
connector_id: connector-b
body:
index_name: search-2-test
- name: my-connector
+ name: my-connector-2
language: en
is_native: true
service_type: super-connector
@@ -106,3 +106,61 @@ setup:
- match: { count: 0 }
+
+---
+"List Connector - filter by index names":
+ - do:
+ connector.list:
+ index_name: search-1-test
+
+ - match: { count: 1 }
+ - match: { results.0.index_name: "search-1-test" }
+
+ - do:
+ connector.list:
+ index_name: search-1-test,search-2-test
+
+ - match: { count: 2 }
+ - match: { results.0.index_name: "search-1-test" }
+ - match: { results.1.index_name: "search-2-test" }
+
+
+---
+"List Connector - filter by index names, illegal name":
+ - do:
+ catch: "bad_request"
+ connector.list:
+ index_name: ~.!$$#index-name$$$
+
+
+---
+"List Connector - filter by connector names":
+ - do:
+ connector.list:
+ connector_name: my-connector-1
+
+ - match: { count: 1 }
+ - match: { results.0.name: "my-connector-1" }
+
+ - do:
+ connector.list:
+ connector_name: my-connector-1,my-connector-2
+
+ - match: { count: 2 }
+ - match: { results.0.name: "my-connector-1" }
+ - match: { results.1.name: "my-connector-2" }
+
+
+---
+"List Connector - filter by index name and name":
+ - do:
+ connector.list:
+ connector_name: my-connector-1,my-connector-2
+ index_name: search-2-test
+
+ - match: { count: 1 }
+ - match: { results.0.index_name: "search-2-test" }
+ - match: { results.0.name: "my-connector-2" }
+
+
+
diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorIndexService.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorIndexService.java
index fe810bc4ca783..b321a497ab58d 100644
--- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorIndexService.java
+++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorIndexService.java
@@ -29,7 +29,9 @@
import org.elasticsearch.index.query.BoolQueryBuilder;
import org.elasticsearch.index.query.IdsQueryBuilder;
import org.elasticsearch.index.query.MatchAllQueryBuilder;
+import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.TermQueryBuilder;
+import org.elasticsearch.index.query.TermsQueryBuilder;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.script.Script;
import org.elasticsearch.script.ScriptType;
@@ -280,13 +282,21 @@ public void deleteConnector(String connectorId, ActionListener l
*
* @param from From index to start the search from.
* @param size The maximum number of {@link Connector}s to return.
+ * @param indexNames A list of index names to filter the connectors.
+ * @param connectorNames A list of connector names to further filter the search results.
* @param listener The action listener to invoke on response/failure.
*/
- public void listConnectors(int from, int size, ActionListener