From 64e8659845a7d873c9580a997e422e46718e1346 Mon Sep 17 00:00:00 2001 From: Luke Whiting Date: Wed, 16 Oct 2024 08:58:13 +0100 Subject: [PATCH 01/31] #104411 Add warning headers for ingest pipelines containing special characters (#114837) * Add logs and headers For pipeline creation when name is invalid * Fix YAML tests and add YAML test for warnings * Update docs/changelog/114837.yaml * Changelog entry * Changelog entry * Update docs/changelog/114837.yaml * Changelog entry --- docs/changelog/114837.yaml | 5 +++ .../10_pipeline_with_mustache_templates.yml | 4 +-- .../test/ingest/20_combine_processors.yml | 12 +++---- ...ation_warnings_on_invalid_names_ingest.yml | 28 +++++++++++++++ .../org/elasticsearch/common/Strings.java | 1 + .../elasticsearch/ingest/IngestService.java | 21 +++++++++++ .../ingest/IngestServiceTests.java | 36 ++++++++++++++++--- 7 files changed, 95 insertions(+), 12 deletions(-) create mode 100644 docs/changelog/114837.yaml create mode 100644 qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/90_deprecation_warnings_on_invalid_names_ingest.yml diff --git a/docs/changelog/114837.yaml b/docs/changelog/114837.yaml new file mode 100644 index 0000000000000..313d88f92282c --- /dev/null +++ b/docs/changelog/114837.yaml @@ -0,0 +1,5 @@ +pr: 114837 +summary: Add warning headers for ingest pipelines containing special characters +area: Ingest Node +type: bug +issues: [ 104411 ] diff --git a/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/10_pipeline_with_mustache_templates.yml b/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/10_pipeline_with_mustache_templates.yml index a8f7e1e5877c8..cc767dfa56597 100644 --- a/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/10_pipeline_with_mustache_templates.yml +++ b/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/10_pipeline_with_mustache_templates.yml @@ -214,7 +214,7 @@ "Test rolling up json object arrays": - do: ingest.put_pipeline: - id: "_id" + id: "pipeline-id" body: > { "processors": [ @@ -237,7 +237,7 @@ index: index: test id: "1" - pipeline: "_id" + pipeline: "pipeline-id" body: { values_flat : [], values: [ diff --git a/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/20_combine_processors.yml b/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/20_combine_processors.yml index 9a7444c4ffc6c..ef790843b7bfb 100644 --- a/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/20_combine_processors.yml +++ b/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/20_combine_processors.yml @@ -2,7 +2,7 @@ "Test with date processor": - do: ingest.put_pipeline: - id: "_id" + id: "pipeline-id" body: > { "processors": [ @@ -44,7 +44,7 @@ index: index: test id: "1" - pipeline: "_id" + pipeline: "pipeline-id" body: { log: "89.160.20.128 - - [08/Sep/2014:02:54:42 +0000] \"GET /presentations/logstash-scale11x/images/ahhh___rage_face_by_samusmmx-d5g5zap.png HTTP/1.1\" 200 175208 \"http://mobile.rivals.com/board_posts.asp?SID=880&mid=198829575&fid=2208&tid=198829575&Team=&TeamId=&SiteId=\" \"Mozilla/5.0 (Linux; Android 4.2.2; VS980 4G Build/JDQ39B) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.135 Mobile Safari/537.36\"" } @@ -71,7 +71,7 @@ "Test with date processor and ECS-v1": - do: ingest.put_pipeline: - id: "_id" + id: "pipeline-id" body: > { "processors": [ @@ -102,7 +102,7 @@ index: index: test id: "1" - pipeline: "_id" + pipeline: "pipeline-id" body: { log: "89.160.20.128 - - [08/Sep/2014:02:54:42 +0000] \"GET /presentations/logstash-scale11x/images/ahhh___rage_face_by_samusmmx-d5g5zap.png HTTP/1.1\" 200 175208 \"http://mobile.rivals.com/board_posts.asp?SID=880&mid=198829575&fid=2208&tid=198829575&Team=&TeamId=&SiteId=\" \"Mozilla/5.0 (Linux; Android 4.2.2; VS980 4G Build/JDQ39B) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.135 Mobile Safari/537.36\"" } @@ -128,7 +128,7 @@ "Test mutate": - do: ingest.put_pipeline: - id: "_id" + id: "pipeline-id" body: > { "processors": [ @@ -188,7 +188,7 @@ index: index: test id: "1" - pipeline: "_id" + pipeline: "pipeline-id" body: { "age" : 33, "eyeColor" : "brown", diff --git a/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/90_deprecation_warnings_on_invalid_names_ingest.yml b/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/90_deprecation_warnings_on_invalid_names_ingest.yml new file mode 100644 index 0000000000000..64f5ccc4609ac --- /dev/null +++ b/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/90_deprecation_warnings_on_invalid_names_ingest.yml @@ -0,0 +1,28 @@ +--- +"Test invalid name warnings": + - requires: + cluster_features: [ "ingest.pipeline_name_special_chars_warning" ] + test_runner_features: [ "warnings" ] + reason: verifying deprecation warnings from 9.0 onwards for invalid pipeline names + + - do: + cluster.health: + wait_for_status: green + + - do: + ingest.put_pipeline: + id: "Invalid*-pipeline:id" + body: > + { + "description": "_description", + "processors": [ + { + "set" : { + "field" : "field1", + "value": "_value" + } + }] + } + warnings: + - "Invalid pipeline id: Invalid*-pipeline:id" + - match: { acknowledged: true } diff --git a/server/src/main/java/org/elasticsearch/common/Strings.java b/server/src/main/java/org/elasticsearch/common/Strings.java index 4314d2e16799a..82504b5840792 100644 --- a/server/src/main/java/org/elasticsearch/common/Strings.java +++ b/server/src/main/java/org/elasticsearch/common/Strings.java @@ -285,6 +285,7 @@ private static String changeFirstCharacterCase(String str, boolean capitalize) { static final Set INVALID_CHARS = Set.of('\\', '/', '*', '?', '"', '<', '>', '|', ' ', ','); public static final String INVALID_FILENAME_CHARS = INVALID_CHARS.stream() + .sorted() .map(c -> "'" + c + "'") .collect(Collectors.joining(",", "[", "]")); diff --git a/server/src/main/java/org/elasticsearch/ingest/IngestService.java b/server/src/main/java/org/elasticsearch/ingest/IngestService.java index 0f63d2a8dcc1b..99ff44a3cd135 100644 --- a/server/src/main/java/org/elasticsearch/ingest/IngestService.java +++ b/server/src/main/java/org/elasticsearch/ingest/IngestService.java @@ -39,6 +39,7 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.IndexTemplateMetadata; import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.metadata.MetadataCreateIndexService; import org.elasticsearch.cluster.metadata.MetadataIndexTemplateService; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; @@ -46,6 +47,8 @@ import org.elasticsearch.common.Priority; import org.elasticsearch.common.TriConsumer; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.logging.DeprecationCategory; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.CollectionUtils; @@ -55,7 +58,9 @@ import org.elasticsearch.core.Releasable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; +import org.elasticsearch.core.UpdateForV10; import org.elasticsearch.env.Environment; +import org.elasticsearch.features.NodeFeature; import org.elasticsearch.gateway.GatewayService; import org.elasticsearch.grok.MatcherWatchdog; import org.elasticsearch.index.IndexSettings; @@ -97,6 +102,7 @@ import java.util.stream.Collectors; import static org.elasticsearch.core.Strings.format; +import static org.elasticsearch.core.UpdateForV10.Owner.DATA_MANAGEMENT; /** * Holder class for several ingest related services. @@ -107,7 +113,10 @@ public class IngestService implements ClusterStateApplier, ReportingService taskQueue; private final ClusterService clusterService; @@ -652,12 +661,24 @@ public IngestMetadata execute(IngestMetadata currentIngestMetadata, Collection ingestInfos, String pipelineId, Map pipelineConfig) throws Exception { if (ingestInfos.isEmpty()) { throw new IllegalStateException("Ingest info is empty"); } + try { + MetadataCreateIndexService.validateIndexOrAliasName( + pipelineId, + (pipelineName, error) -> new IllegalArgumentException( + "Pipeline name [" + pipelineName + "] will be disallowed in a future version for the following reason: " + error + ) + ); + } catch (IllegalArgumentException e) { + deprecationLogger.critical(DeprecationCategory.API, "pipeline_name_special_chars", e.getMessage()); + } + Pipeline pipeline = Pipeline.create(pipelineId, pipelineConfig, processorFactories, scriptService); List exceptions = new ArrayList<>(); for (Processor processor : pipeline.flattenAllProcessors()) { diff --git a/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java b/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java index 3adaf398624de..d83fdbd5dd46b 100644 --- a/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java +++ b/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java @@ -24,6 +24,7 @@ import org.elasticsearch.action.ingest.DeletePipelineRequest; import org.elasticsearch.action.ingest.PutPipelineRequest; import org.elasticsearch.action.support.ActionTestUtils; +import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.client.internal.Client; @@ -48,6 +49,7 @@ import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.Strings; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; @@ -424,7 +426,7 @@ public void testDelete() { public void testValidateNoIngestInfo() throws Exception { IngestService ingestService = createWithProcessors(); - PutPipelineRequest putRequest = putJsonPipelineRequest("_id", """ + PutPipelineRequest putRequest = putJsonPipelineRequest("pipeline-id", """ {"processors": [{"set" : {"field": "_field", "value": "_value"}}]}"""); var pipelineConfig = XContentHelper.convertToMap(putRequest.getSource(), false, putRequest.getXContentType()).v2(); @@ -965,7 +967,7 @@ public void testGetPipelines() { public void testValidateProcessorTypeOnAllNodes() throws Exception { IngestService ingestService = createWithProcessors(); - PutPipelineRequest putRequest = putJsonPipelineRequest("_id", """ + PutPipelineRequest putRequest = putJsonPipelineRequest("pipeline-id", """ { "processors": [ { @@ -1009,7 +1011,7 @@ public void testValidateConfigurationExceptions() { // ordinary validation issues happen at processor construction time throw newConfigurationException("fail_validation", tag, "no_property_name", "validation failure reason"); })); - PutPipelineRequest putRequest = putJsonPipelineRequest("_id", """ + PutPipelineRequest putRequest = putJsonPipelineRequest("pipeline-id", """ { "processors": [ { @@ -1043,7 +1045,7 @@ public void extraValidation() throws Exception { } }; })); - PutPipelineRequest putRequest = putJsonPipelineRequest("_id", """ + PutPipelineRequest putRequest = putJsonPipelineRequest("pipeline-id", """ { "processors": [ { @@ -1067,6 +1069,32 @@ public void extraValidation() throws Exception { assertEquals("fail_extra_validation", e.getMetadata("es.processor_type").get(0)); } + public void testValidatePipelineName() throws Exception { + IngestService ingestService = createWithProcessors(); + for (Character badChar : List.of('\\', '/', '*', '?', '"', '<', '>', '|', ' ', ',')) { + PutPipelineRequest putRequest = new PutPipelineRequest( + TimeValue.timeValueSeconds(10), + AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, + "_id", + new BytesArray(""" + {"description":"test processor","processors":[{"set":{"field":"_field","value":"_value"}}]}"""), + XContentType.JSON + ); + var pipelineConfig = XContentHelper.convertToMap(putRequest.getSource(), false, putRequest.getXContentType()).v2(); + DiscoveryNode node1 = DiscoveryNodeUtils.create("_node_id1", buildNewFakeTransportAddress(), Map.of(), Set.of()); + Map ingestInfos = new HashMap<>(); + ingestInfos.put(node1, new IngestInfo(List.of(new ProcessorInfo("set")))); + final String name = randomAlphaOfLength(5) + badChar + randomAlphaOfLength(5); + ingestService.validatePipeline(ingestInfos, name, pipelineConfig); + assertCriticalWarnings( + "Pipeline name [" + + name + + "] will be disallowed in a future version for the following reason: must not contain the following characters" + + " [' ','\"','*',',','/','<','>','?','\\','|']" + ); + } + } + public void testExecuteIndexPipelineExistsButFailedParsing() { IngestService ingestService = createWithProcessors( Map.of("mock", (factories, tag, description, config) -> new AbstractProcessor("mock", "description") { From 5271b20b64bbf17284d344f4bf1adea42ea78f1e Mon Sep 17 00:00:00 2001 From: Mary Gouseti Date: Wed, 16 Oct 2024 11:16:50 +0300 Subject: [PATCH 02/31] [Failure store - selector syntax] Replace failureOptions with selector options internally. (#114812) **Introduction** > In order to make adoption of failure stores simpler for all users, we are introducing a new syntactical feature to index expression resolution: The selector. > > Selectors, denoted with a :: followed by a recognized suffix will allow users to specify which component of an index abstraction they would like to operate on within an API call. In this case, an index abstraction is a concrete index, data stream, or alias; Any abstraction that can be resolved to a set of indices/shards. We define a component of an index abstraction to be some searchable unit of the index abstraction. > > To start, we will support two components: data and failures. Concrete indices are their own data components, while the data component for index aliases are all of the indices contained therein. For data streams, the data component corresponds to their backing indices. Data stream aliases mirror this, treating all backing indices of the data streams they correspond to as their data component. > > The failure component is only supported by data streams and data stream aliases. The failure component of these abstractions refer to the data streams' failure stores. Indices and index aliases do not have a failure component. For more details and examples see https://github.com/elastic/elasticsearch/pull/113144. All this work has been cherry picked from there. **Purpose of this PR** This PR is replacing the `FailureStoreOptions` with the `SelectorOptions`, there shouldn't be any perceivable change to the user since we kept the query parameter "failure_store" for now. It will be removed in the next PR which will introduce the parsing of the expressions. _The current PR is just a refactoring and does not and should not change any existing behaviour._ --- .../datastreams/DataStreamsSnapshotsIT.java | 4 +- .../IngestFailureStoreMetricsIT.java | 4 +- .../lifecycle/DataStreamLifecycleService.java | 4 +- .../rest/RestGetDataStreamsAction.java | 2 +- .../DataStreamLifecycleServiceTests.java | 20 +- .../org/elasticsearch/TransportVersions.java | 1 + .../admin/indices/get/GetIndexRequest.java | 4 +- .../indices/rollover/RolloverRequest.java | 6 +- .../rollover/TransportRolloverAction.java | 4 +- .../action/bulk/BulkOperation.java | 2 +- .../action/bulk/TransportBulkAction.java | 2 +- .../datastreams/DataStreamsStatsAction.java | 4 +- .../support/IndexComponentSelector.java | 49 +++ .../action/support/IndicesOptions.java | 311 +++++++++++------- .../indices/RestRolloverIndexAction.java | 2 +- .../indices/get/GetIndexRequestTests.java | 5 +- .../MetadataRolloverServiceTests.java | 8 +- .../rollover/RolloverRequestTests.java | 17 +- .../action/support/IndicesOptionsTests.java | 27 +- .../IndexNameExpressionResolverTests.java | 19 +- .../xpack/core/ilm/RolloverStep.java | 2 +- .../core/ilm/WaitForRolloverReadyStep.java | 2 +- .../ilm/WaitForRolloverReadyStepTests.java | 4 +- 23 files changed, 304 insertions(+), 199 deletions(-) create mode 100644 server/src/main/java/org/elasticsearch/action/support/IndexComponentSelector.java diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java index 638e4d813a79a..212b869c6d933 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java @@ -138,9 +138,7 @@ public void setup() throws Exception { // Initialize the failure store. RolloverRequest rolloverRequest = new RolloverRequest("with-fs", null); rolloverRequest.setIndicesOptions( - IndicesOptions.builder(rolloverRequest.indicesOptions()) - .failureStoreOptions(b -> b.includeRegularIndices(false).includeFailureIndices(true)) - .build() + IndicesOptions.builder(rolloverRequest.indicesOptions()).selectorOptions(IndicesOptions.SelectorOptions.ONLY_FAILURES).build() ); response = client.execute(RolloverAction.INSTANCE, rolloverRequest).get(); assertTrue(response.isAcknowledged()); diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/IngestFailureStoreMetricsIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/IngestFailureStoreMetricsIT.java index b5d06dc33e035..679ad5b000c8f 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/IngestFailureStoreMetricsIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/IngestFailureStoreMetricsIT.java @@ -195,9 +195,7 @@ public void testRejectionFromFailureStore() throws IOException { // Initialize failure store. var rolloverRequest = new RolloverRequest(dataStream, null); rolloverRequest.setIndicesOptions( - IndicesOptions.builder(rolloverRequest.indicesOptions()) - .failureStoreOptions(opts -> opts.includeFailureIndices(true).includeRegularIndices(false)) - .build() + IndicesOptions.builder(rolloverRequest.indicesOptions()).selectorOptions(IndicesOptions.SelectorOptions.ONLY_FAILURES).build() ); var rolloverResponse = client().execute(RolloverAction.INSTANCE, rolloverRequest).actionGet(); var failureStoreIndex = rolloverResponse.getNewIndex(); diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java index 878583de4861f..7bbf7137d290e 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java @@ -946,7 +946,7 @@ private Set maybeExecuteForceMerge(ClusterState state, List indice UpdateSettingsRequest updateMergePolicySettingsRequest = new UpdateSettingsRequest(); updateMergePolicySettingsRequest.indicesOptions( IndicesOptions.builder(updateMergePolicySettingsRequest.indicesOptions()) - .failureStoreOptions(new IndicesOptions.FailureStoreOptions(true, true)) + .selectorOptions(IndicesOptions.SelectorOptions.DATA_AND_FAILURE) .build() ); updateMergePolicySettingsRequest.indices(indexName); @@ -1409,7 +1409,7 @@ static RolloverRequest getDefaultRolloverRequest( if (rolloverFailureStore) { rolloverRequest.setIndicesOptions( IndicesOptions.builder(rolloverRequest.indicesOptions()) - .failureStoreOptions(opts -> opts.includeFailureIndices(true).includeRegularIndices(false)) + .selectorOptions(IndicesOptions.SelectorOptions.ONLY_FAILURES) .build() ); } diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/rest/RestGetDataStreamsAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/rest/RestGetDataStreamsAction.java index 3456f4b679474..b61e38297397d 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/rest/RestGetDataStreamsAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/rest/RestGetDataStreamsAction.java @@ -43,7 +43,7 @@ public class RestGetDataStreamsAction extends BaseRestHandler { IndicesOptions.GatekeeperOptions.IGNORE_THROTTLED, "verbose" ), - DataStream.isFailureStoreFeatureFlagEnabled() ? Set.of(IndicesOptions.FailureStoreOptions.FAILURE_STORE) : Set.of() + DataStream.isFailureStoreFeatureFlagEnabled() ? Set.of(IndicesOptions.FAILURE_STORE_QUERY_PARAM) : Set.of() ) ); diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java index 0d5ce54c44b56..d6bf80798764d 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java @@ -225,17 +225,11 @@ public void testOperationsExecutedOnce() { assertThat(clientSeenRequests.get(0), instanceOf(RolloverRequest.class)); RolloverRequest rolloverBackingIndexRequest = (RolloverRequest) clientSeenRequests.get(0); assertThat(rolloverBackingIndexRequest.getRolloverTarget(), is(dataStreamName)); - assertThat( - rolloverBackingIndexRequest.indicesOptions().failureStoreOptions(), - equalTo(new IndicesOptions.FailureStoreOptions(true, false)) - ); + assertThat(rolloverBackingIndexRequest.indicesOptions().selectorOptions(), equalTo(IndicesOptions.SelectorOptions.ONLY_DATA)); assertThat(clientSeenRequests.get(1), instanceOf(RolloverRequest.class)); RolloverRequest rolloverFailureIndexRequest = (RolloverRequest) clientSeenRequests.get(1); assertThat(rolloverFailureIndexRequest.getRolloverTarget(), is(dataStreamName)); - assertThat( - rolloverFailureIndexRequest.indicesOptions().failureStoreOptions(), - equalTo(new IndicesOptions.FailureStoreOptions(false, true)) - ); + assertThat(rolloverFailureIndexRequest.indicesOptions().selectorOptions(), equalTo(IndicesOptions.SelectorOptions.ONLY_FAILURES)); List deleteRequests = clientSeenRequests.subList(2, 5) .stream() .map(transportRequest -> (DeleteIndexRequest) transportRequest) @@ -1552,17 +1546,11 @@ public void testFailureStoreIsManagedEvenWhenDisabled() { assertThat(clientSeenRequests.get(0), instanceOf(RolloverRequest.class)); RolloverRequest rolloverBackingIndexRequest = (RolloverRequest) clientSeenRequests.get(0); assertThat(rolloverBackingIndexRequest.getRolloverTarget(), is(dataStreamName)); - assertThat( - rolloverBackingIndexRequest.indicesOptions().failureStoreOptions(), - equalTo(new IndicesOptions.FailureStoreOptions(true, false)) - ); + assertThat(rolloverBackingIndexRequest.indicesOptions().selectorOptions(), equalTo(IndicesOptions.SelectorOptions.ONLY_DATA)); assertThat(clientSeenRequests.get(1), instanceOf(RolloverRequest.class)); RolloverRequest rolloverFailureIndexRequest = (RolloverRequest) clientSeenRequests.get(1); assertThat(rolloverFailureIndexRequest.getRolloverTarget(), is(dataStreamName)); - assertThat( - rolloverFailureIndexRequest.indicesOptions().failureStoreOptions(), - equalTo(new IndicesOptions.FailureStoreOptions(false, true)) - ); + assertThat(rolloverFailureIndexRequest.indicesOptions().selectorOptions(), equalTo(IndicesOptions.SelectorOptions.ONLY_FAILURES)); assertThat( ((DeleteIndexRequest) clientSeenRequests.get(2)).indices()[0], is(dataStream.getFailureIndices().getIndices().get(0).getName()) diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 4038d5a224850..f89c5a65693f2 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -245,6 +245,7 @@ static TransportVersion def(int id) { public static final TransportVersion QUERY_RULE_TEST_API = def(8_769_00_0); public static final TransportVersion ESQL_PER_AGGREGATE_FILTER = def(8_770_00_0); public static final TransportVersion ML_INFERENCE_ATTACH_TO_EXISTSING_DEPLOYMENT = def(8_771_00_0); + public static final TransportVersion CONVERT_FAILURE_STORE_OPTIONS_TO_SELECTOR_OPTIONS_INTERNALLY = def(8_772_00_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequest.java index 7ff7066a15fc2..4c5ee08beb192 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequest.java @@ -98,9 +98,7 @@ public GetIndexRequest() { super( DataStream.isFailureStoreFeatureFlagEnabled() ? IndicesOptions.builder(IndicesOptions.strictExpandOpen()) - .failureStoreOptions( - IndicesOptions.FailureStoreOptions.builder().includeRegularIndices(true).includeFailureIndices(true) - ) + .selectorOptions(IndicesOptions.SelectorOptions.DATA_AND_FAILURE) .build() : IndicesOptions.strictExpandOpen() ); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java index 1ef9194b51203..fefc41317591b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java @@ -138,8 +138,8 @@ public ActionRequestValidationException validate() { ); } - var failureStoreOptions = indicesOptions.failureStoreOptions(); - if (failureStoreOptions.includeRegularIndices() && failureStoreOptions.includeFailureIndices()) { + var selectors = indicesOptions.selectorOptions().defaultSelectors(); + if (selectors.size() > 1) { validationException = addValidationError( "rollover cannot be applied to both regular and failure indices at the same time", validationException @@ -179,7 +179,7 @@ public IndicesOptions indicesOptions() { * @return true of the rollover request targets the failure store, false otherwise. */ public boolean targetsFailureStore() { - return DataStream.isFailureStoreFeatureFlagEnabled() && indicesOptions.failureStoreOptions().includeFailureIndices(); + return DataStream.isFailureStoreFeatureFlagEnabled() && indicesOptions.includeFailureIndices(); } public void setIndicesOptions(IndicesOptions indicesOptions) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java index d65a66dcc47fb..c5c874f9bcddf 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java @@ -150,7 +150,7 @@ protected ClusterBlockException checkBlock(RolloverRequest request, ClusterState .matchClosed(request.indicesOptions().expandWildcardsClosed()) .build(), IndicesOptions.GatekeeperOptions.DEFAULT, - request.indicesOptions().failureStoreOptions() + request.indicesOptions().selectorOptions() ); return state.blocks() @@ -247,7 +247,7 @@ protected void masterOperation( IndicesOptions.ConcreteTargetOptions.ALLOW_UNAVAILABLE_TARGETS, IndicesOptions.WildcardOptions.builder().matchClosed(true).allowEmptyExpressions(false).build(), IndicesOptions.GatekeeperOptions.DEFAULT, - rolloverRequest.indicesOptions().failureStoreOptions() + rolloverRequest.indicesOptions().selectorOptions() ); IndicesStatsRequest statsRequest = new IndicesStatsRequest().indices(rolloverRequest.getRolloverTarget()) .clear() diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java index f04d07fb690c4..007f274d7f493 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java @@ -212,7 +212,7 @@ private void rollOverFailureStores(Runnable runnable) { RolloverRequest rolloverRequest = new RolloverRequest(dataStream, null); rolloverRequest.setIndicesOptions( IndicesOptions.builder(rolloverRequest.indicesOptions()) - .failureStoreOptions(new IndicesOptions.FailureStoreOptions(false, true)) + .selectorOptions(IndicesOptions.SelectorOptions.ONLY_FAILURES) .build() ); // We are executing a lazy rollover because it is an action specialised for this situation, when we want an diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java index 61adf41a9a276..a3a73415ec4f6 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java @@ -418,7 +418,7 @@ private void rollOverDataStreams( if (targetFailureStore) { rolloverRequest.setIndicesOptions( IndicesOptions.builder(rolloverRequest.indicesOptions()) - .failureStoreOptions(new IndicesOptions.FailureStoreOptions(false, true)) + .selectorOptions(IndicesOptions.SelectorOptions.ONLY_FAILURES) .build() ); } diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/DataStreamsStatsAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/DataStreamsStatsAction.java index fbb084e8cd121..1c30303915c8e 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/DataStreamsStatsAction.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/DataStreamsStatsAction.java @@ -61,9 +61,7 @@ public Request() { .allowFailureIndices(true) .build() ) - .failureStoreOptions( - IndicesOptions.FailureStoreOptions.builder().includeRegularIndices(true).includeFailureIndices(true).build() - ) + .selectorOptions(IndicesOptions.SelectorOptions.DATA_AND_FAILURE) .build() ); } diff --git a/server/src/main/java/org/elasticsearch/action/support/IndexComponentSelector.java b/server/src/main/java/org/elasticsearch/action/support/IndexComponentSelector.java new file mode 100644 index 0000000000000..65b48db8f5cf3 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/support/IndexComponentSelector.java @@ -0,0 +1,49 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.action.support; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +/** + * We define as index components the two different sets of indices a data stream could consist of: + * - DATA: represents the backing indices + * - FAILURES: represent the failing indices + * Note: An index is its own DATA component, but it cannot have a FAILURE component. + */ +public enum IndexComponentSelector { + DATA("data"), + FAILURES("failures"); + + private final String key; + + IndexComponentSelector(String key) { + this.key = key; + } + + public String getKey() { + return key; + } + + private static final Map REGISTRY; + + static { + Map registry = new HashMap<>(IndexComponentSelector.values().length); + for (IndexComponentSelector value : IndexComponentSelector.values()) { + registry.put(value.getKey(), value); + } + REGISTRY = Collections.unmodifiableMap(registry); + } + + public static IndexComponentSelector getByKey(String key) { + return REGISTRY.get(key); + } +} diff --git a/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java b/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java index d3ea063247704..22d019f80837d 100644 --- a/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java +++ b/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java @@ -31,7 +31,6 @@ import java.util.List; import java.util.Locale; import java.util.Map; -import java.util.function.Consumer; import java.util.stream.Collectors; import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue; @@ -48,24 +47,36 @@ * @param gatekeeperOptions, applies to all the resolved indices and defines if throttled will be included and if certain type of * aliases or indices are allowed, or they will throw an error. It acts as a gatekeeper when an action * does not support certain options. - * @param failureStoreOptions, applies to all indices already matched and controls the type of indices that will be returned. Currently, - * there are two types, data stream failure indices (only certain data streams have them) and data stream - * backing indices or stand-alone indices. + * @param selectorOptions, applies to all resolved expressions, and it specifies the index component that should be included, if there + * is no index component defined on the expression level. */ public record IndicesOptions( ConcreteTargetOptions concreteTargetOptions, WildcardOptions wildcardOptions, GatekeeperOptions gatekeeperOptions, - FailureStoreOptions failureStoreOptions + SelectorOptions selectorOptions ) implements ToXContentFragment { - public IndicesOptions( - ConcreteTargetOptions concreteTargetOptions, - WildcardOptions wildcardOptions, - GatekeeperOptions gatekeeperOptions - ) { - this(concreteTargetOptions, wildcardOptions, gatekeeperOptions, FailureStoreOptions.DEFAULT); - } + /** + * @deprecated this query param will be replaced by the selector `::` on the expression level + */ + @Deprecated + public static final String FAILURE_STORE_QUERY_PARAM = "failure_store"; + /** + * @deprecated this value will be replaced by the selector `::*` on the expression level + */ + @Deprecated + public static final String INCLUDE_ALL = "include"; + /** + * @deprecated this value will be replaced by the selector `::data` on the expression level + */ + @Deprecated + public static final String INCLUDE_ONLY_REGULAR_INDICES = "exclude"; + /** + * @deprecated this value will be replaced by the selector `::failures` on the expression level + */ + @Deprecated + public static final String INCLUDE_ONLY_FAILURE_INDICES = "only"; public static IndicesOptions.Builder builder() { return new Builder(); @@ -310,7 +321,7 @@ public static Builder builder(WildcardOptions wildcardOptions) { * - The "allow*" flags, which purpose is to enable actions to define certain conditions that need to apply on the concrete indices * they accept. For example, single-index actions will set allowAliasToMultipleIndices to false, while search will not accept a * closed index etc. These options are not configurable by the end-user. - * - The ignoreThrottled flag, which is a depricared flag that will filter out frozen indices. + * - The ignoreThrottled flag, which is a deprecated flag that will filter out frozen indices. * @param allowAliasToMultipleIndices, allow aliases to multiple indices, true by default. * @param allowClosedIndices, allow closed indices, true by default. * @param allowFailureIndices, allow failure indices in the response, true by default @@ -408,87 +419,53 @@ public static Builder builder(GatekeeperOptions gatekeeperOptions) { } /** - * Applies to all indices already matched and controls the type of indices that will be returned. There are two types, data stream - * failure indices (only certain data streams have them) and data stream backing indices or stand-alone indices. - * @param includeRegularIndices, when true regular or data stream backing indices will be retrieved. - * @param includeFailureIndices, when true data stream failure indices will be included. + * Defines which selectors should be used by default for an index operation in the event that no selectors are provided. */ - public record FailureStoreOptions(boolean includeRegularIndices, boolean includeFailureIndices) - implements - Writeable, - ToXContentFragment { + public record SelectorOptions(EnumSet defaultSelectors) implements Writeable { - public static final String FAILURE_STORE = "failure_store"; - public static final String INCLUDE_ALL = "include"; - public static final String INCLUDE_ONLY_REGULAR_INDICES = "exclude"; - public static final String INCLUDE_ONLY_FAILURE_INDICES = "only"; - - public static final FailureStoreOptions DEFAULT = new FailureStoreOptions(true, false); - - public static FailureStoreOptions read(StreamInput in) throws IOException { - return new FailureStoreOptions(in.readBoolean(), in.readBoolean()); - } - - public static FailureStoreOptions parseParameters(Object failureStoreValue, FailureStoreOptions defaultOptions) { - if (failureStoreValue == null) { - return defaultOptions; - } - FailureStoreOptions.Builder builder = defaultOptions == null - ? new FailureStoreOptions.Builder() - : new FailureStoreOptions.Builder(defaultOptions); - return switch (failureStoreValue.toString()) { - case INCLUDE_ALL -> builder.includeRegularIndices(true).includeFailureIndices(true).build(); - case INCLUDE_ONLY_REGULAR_INDICES -> builder.includeRegularIndices(true).includeFailureIndices(false).build(); - case INCLUDE_ONLY_FAILURE_INDICES -> builder.includeRegularIndices(false).includeFailureIndices(true).build(); - default -> throw new IllegalArgumentException("No valid " + FAILURE_STORE + " value [" + failureStoreValue + "]"); - }; - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - return builder.field(FAILURE_STORE, displayValue()); - } + public static final SelectorOptions DATA_AND_FAILURE = new SelectorOptions( + EnumSet.of(IndexComponentSelector.DATA, IndexComponentSelector.FAILURES) + ); + public static final SelectorOptions ONLY_DATA = new SelectorOptions(EnumSet.of(IndexComponentSelector.DATA)); + public static final SelectorOptions ONLY_FAILURES = new SelectorOptions(EnumSet.of(IndexComponentSelector.FAILURES)); + /** + * Default instance. Uses
::data
as the default selector if none are present in an index expression. + */ + public static final SelectorOptions DEFAULT = ONLY_DATA; - public String displayValue() { - if (includeRegularIndices && includeFailureIndices) { - return INCLUDE_ALL; - } else if (includeRegularIndices) { - return INCLUDE_ONLY_REGULAR_INDICES; - } - return INCLUDE_ONLY_FAILURE_INDICES; + public static SelectorOptions read(StreamInput in) throws IOException { + return new SelectorOptions(in.readEnumSet(IndexComponentSelector.class)); } @Override public void writeTo(StreamOutput out) throws IOException { - out.writeBoolean(includeRegularIndices); - out.writeBoolean(includeFailureIndices); + out.writeEnumSet(defaultSelectors); } public static class Builder { - private boolean includeRegularIndices; - private boolean includeFailureIndices; + private EnumSet defaultSelectors; public Builder() { this(DEFAULT); } - Builder(FailureStoreOptions options) { - includeRegularIndices = options.includeRegularIndices; - includeFailureIndices = options.includeFailureIndices; + Builder(SelectorOptions options) { + defaultSelectors = EnumSet.copyOf(options.defaultSelectors); } - public Builder includeRegularIndices(boolean includeRegularIndices) { - this.includeRegularIndices = includeRegularIndices; + public Builder setDefaultSelectors(IndexComponentSelector first, IndexComponentSelector... remaining) { + defaultSelectors = EnumSet.of(first, remaining); return this; } - public Builder includeFailureIndices(boolean includeFailureIndices) { - this.includeFailureIndices = includeFailureIndices; + public Builder setDefaultSelectors(EnumSet defaultSelectors) { + this.defaultSelectors = EnumSet.copyOf(defaultSelectors); return this; } - public FailureStoreOptions build() { - return new FailureStoreOptions(includeRegularIndices, includeFailureIndices); + public SelectorOptions build() { + assert defaultSelectors.isEmpty() != true : "Default selectors cannot be an empty set"; + return new SelectorOptions(EnumSet.copyOf(defaultSelectors)); } } @@ -496,8 +473,8 @@ public static Builder builder() { return new Builder(); } - public static Builder builder(FailureStoreOptions failureStoreOptions) { - return new Builder(failureStoreOptions); + public static Builder builder(SelectorOptions selectorOptions) { + return new Builder(selectorOptions); } } @@ -550,7 +527,7 @@ private enum Option { ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS, WildcardOptions.DEFAULT, GatekeeperOptions.DEFAULT, - FailureStoreOptions.DEFAULT + SelectorOptions.DEFAULT ); public static final IndicesOptions STRICT_EXPAND_OPEN = IndicesOptions.builder() @@ -570,7 +547,7 @@ private enum Option { .allowFailureIndices(true) .ignoreThrottled(false) ) - .failureStoreOptions(FailureStoreOptions.builder().includeRegularIndices(true).includeFailureIndices(false)) + .selectorOptions(SelectorOptions.ONLY_DATA) .build(); public static final IndicesOptions STRICT_EXPAND_OPEN_FAILURE_STORE = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) @@ -589,7 +566,7 @@ private enum Option { .allowFailureIndices(true) .ignoreThrottled(false) ) - .failureStoreOptions(FailureStoreOptions.builder().includeRegularIndices(true).includeFailureIndices(true)) + .selectorOptions(SelectorOptions.DATA_AND_FAILURE) .build(); public static final IndicesOptions LENIENT_EXPAND_OPEN = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ALLOW_UNAVAILABLE_TARGETS) @@ -608,7 +585,25 @@ private enum Option { .allowFailureIndices(true) .ignoreThrottled(false) ) - .failureStoreOptions(FailureStoreOptions.builder().includeRegularIndices(true).includeFailureIndices(false)) + .selectorOptions(SelectorOptions.ONLY_DATA) + .build(); + public static final IndicesOptions LENIENT_EXPAND_OPEN_NO_SELECTORS = IndicesOptions.builder() + .concreteTargetOptions(ConcreteTargetOptions.ALLOW_UNAVAILABLE_TARGETS) + .wildcardOptions( + WildcardOptions.builder() + .matchOpen(true) + .matchClosed(false) + .includeHidden(false) + .allowEmptyExpressions(true) + .resolveAliases(true) + ) + .gatekeeperOptions( + GatekeeperOptions.builder() + .allowAliasToMultipleIndices(true) + .allowClosedIndices(true) + .allowFailureIndices(false) + .ignoreThrottled(false) + ) .build(); public static final IndicesOptions LENIENT_EXPAND_OPEN_HIDDEN = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ALLOW_UNAVAILABLE_TARGETS) @@ -627,7 +622,7 @@ private enum Option { .allowFailureIndices(true) .ignoreThrottled(false) ) - .failureStoreOptions(FailureStoreOptions.builder().includeRegularIndices(true).includeFailureIndices(false)) + .selectorOptions(SelectorOptions.ONLY_DATA) .build(); public static final IndicesOptions LENIENT_EXPAND_OPEN_CLOSED = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ALLOW_UNAVAILABLE_TARGETS) @@ -646,7 +641,7 @@ private enum Option { .allowFailureIndices(true) .ignoreThrottled(false) ) - .failureStoreOptions(FailureStoreOptions.builder().includeRegularIndices(true).includeFailureIndices(false)) + .selectorOptions(SelectorOptions.ONLY_DATA) .build(); public static final IndicesOptions LENIENT_EXPAND_OPEN_CLOSED_HIDDEN = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ALLOW_UNAVAILABLE_TARGETS) @@ -660,7 +655,20 @@ private enum Option { .allowFailureIndices(true) .ignoreThrottled(false) ) - .failureStoreOptions(FailureStoreOptions.builder().includeRegularIndices(true).includeFailureIndices(false)) + .selectorOptions(SelectorOptions.ONLY_DATA) + .build(); + public static final IndicesOptions LENIENT_EXPAND_OPEN_CLOSED_HIDDEN_NO_SELECTOR = IndicesOptions.builder() + .concreteTargetOptions(ConcreteTargetOptions.ALLOW_UNAVAILABLE_TARGETS) + .wildcardOptions( + WildcardOptions.builder().matchOpen(true).matchClosed(true).includeHidden(true).allowEmptyExpressions(true).resolveAliases(true) + ) + .gatekeeperOptions( + GatekeeperOptions.builder() + .allowAliasToMultipleIndices(true) + .allowClosedIndices(true) + .allowFailureIndices(false) + .ignoreThrottled(false) + ) .build(); public static final IndicesOptions STRICT_EXPAND_OPEN_CLOSED = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) @@ -679,7 +687,7 @@ private enum Option { .allowFailureIndices(true) .ignoreThrottled(false) ) - .failureStoreOptions(FailureStoreOptions.builder().includeRegularIndices(true).includeFailureIndices(false)) + .selectorOptions(SelectorOptions.ONLY_DATA) .build(); public static final IndicesOptions STRICT_EXPAND_OPEN_CLOSED_HIDDEN = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) @@ -693,7 +701,20 @@ private enum Option { .allowFailureIndices(true) .ignoreThrottled(false) ) - .failureStoreOptions(FailureStoreOptions.builder().includeRegularIndices(true).includeFailureIndices(false)) + .selectorOptions(SelectorOptions.ONLY_DATA) + .build(); + public static final IndicesOptions STRICT_EXPAND_OPEN_CLOSED_HIDDEN_NO_SELECTORS = IndicesOptions.builder() + .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) + .wildcardOptions( + WildcardOptions.builder().matchOpen(true).matchClosed(true).includeHidden(true).allowEmptyExpressions(true).resolveAliases(true) + ) + .gatekeeperOptions( + GatekeeperOptions.builder() + .allowAliasToMultipleIndices(true) + .allowClosedIndices(true) + .allowFailureIndices(false) + .ignoreThrottled(false) + ) .build(); public static final IndicesOptions LENIENT_EXPAND_OPEN_CLOSED_FAILURE_STORE = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ALLOW_UNAVAILABLE_TARGETS) @@ -712,7 +733,7 @@ private enum Option { .allowFailureIndices(true) .ignoreThrottled(false) ) - .failureStoreOptions(FailureStoreOptions.builder().includeRegularIndices(true).includeFailureIndices(true)) + .selectorOptions(SelectorOptions.DATA_AND_FAILURE) .build(); public static final IndicesOptions STRICT_EXPAND_OPEN_CLOSED_HIDDEN_FAILURE_STORE = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) @@ -726,7 +747,7 @@ private enum Option { .allowFailureIndices(true) .ignoreThrottled(false) ) - .failureStoreOptions(FailureStoreOptions.builder().includeRegularIndices(true).includeFailureIndices(true)) + .selectorOptions(SelectorOptions.DATA_AND_FAILURE) .build(); public static final IndicesOptions STRICT_EXPAND_OPEN_CLOSED_FAILURE_STORE = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) @@ -745,7 +766,7 @@ private enum Option { .allowFailureIndices(true) .ignoreThrottled(false) ) - .failureStoreOptions(FailureStoreOptions.builder().includeRegularIndices(true).includeFailureIndices(true)) + .selectorOptions(SelectorOptions.DATA_AND_FAILURE) .build(); public static final IndicesOptions STRICT_EXPAND_OPEN_FORBID_CLOSED = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) @@ -764,7 +785,7 @@ private enum Option { .allowFailureIndices(true) .ignoreThrottled(false) ) - .failureStoreOptions(FailureStoreOptions.builder().includeRegularIndices(true).includeFailureIndices(false)) + .selectorOptions(SelectorOptions.ONLY_DATA) .build(); public static final IndicesOptions STRICT_EXPAND_OPEN_HIDDEN_FORBID_CLOSED = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) @@ -783,7 +804,7 @@ private enum Option { .allowFailureIndices(true) .ignoreThrottled(false) ) - .failureStoreOptions(FailureStoreOptions.builder().includeRegularIndices(true).includeFailureIndices(false)) + .selectorOptions(SelectorOptions.ONLY_DATA) .build(); public static final IndicesOptions STRICT_EXPAND_OPEN_FORBID_CLOSED_IGNORE_THROTTLED = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) @@ -802,7 +823,7 @@ private enum Option { .allowFailureIndices(true) .allowAliasToMultipleIndices(true) ) - .failureStoreOptions(FailureStoreOptions.builder().includeRegularIndices(true).includeFailureIndices(false)) + .selectorOptions(SelectorOptions.ONLY_DATA) .build(); public static final IndicesOptions STRICT_SINGLE_INDEX_NO_EXPAND_FORBID_CLOSED = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) @@ -821,7 +842,7 @@ private enum Option { .allowFailureIndices(true) .ignoreThrottled(false) ) - .failureStoreOptions(FailureStoreOptions.builder().includeRegularIndices(true).includeFailureIndices(false)) + .selectorOptions(SelectorOptions.ONLY_DATA) .build(); public static final IndicesOptions STRICT_NO_EXPAND_FORBID_CLOSED = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) @@ -840,7 +861,7 @@ private enum Option { .allowFailureIndices(true) .ignoreThrottled(false) ) - .failureStoreOptions(FailureStoreOptions.builder().includeRegularIndices(true).includeFailureIndices(false)) + .selectorOptions(SelectorOptions.ONLY_DATA) .build(); /** @@ -929,14 +950,14 @@ public boolean ignoreThrottled() { * @return whether regular indices (stand-alone or backing indices) will be included in the response */ public boolean includeRegularIndices() { - return failureStoreOptions().includeRegularIndices(); + return selectorOptions().defaultSelectors().contains(IndexComponentSelector.DATA); } /** * @return whether failure indices (only supported by certain data streams) will be included in the response */ public boolean includeFailureIndices() { - return failureStoreOptions().includeFailureIndices(); + return selectorOptions().defaultSelectors().contains(IndexComponentSelector.FAILURES); } public void writeIndicesOptions(StreamOutput out) throws IOException { @@ -977,8 +998,13 @@ public void writeIndicesOptions(StreamOutput out) throws IOException { states.add(WildcardStates.HIDDEN); } out.writeEnumSet(states); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0)) { - failureStoreOptions.writeTo(out); + if (out.getTransportVersion() + .between(TransportVersions.V_8_14_0, TransportVersions.CONVERT_FAILURE_STORE_OPTIONS_TO_SELECTOR_OPTIONS_INTERNALLY)) { + out.writeBoolean(includeRegularIndices()); + out.writeBoolean(includeFailureIndices()); + } + if (out.getTransportVersion().onOrAfter(TransportVersions.CONVERT_FAILURE_STORE_OPTIONS_TO_SELECTOR_OPTIONS_INTERNALLY)) { + out.writeEnumSet(selectorOptions.defaultSelectors); } } @@ -999,16 +1025,30 @@ public static IndicesOptions readIndicesOptions(StreamInput in) throws IOExcepti .allowFailureIndices(allowFailureIndices) .ignoreThrottled(options.contains(Option.IGNORE_THROTTLED)) .build(); - FailureStoreOptions failureStoreOptions = in.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0) - ? FailureStoreOptions.read(in) - : FailureStoreOptions.DEFAULT; + SelectorOptions selectorOptions = SelectorOptions.DEFAULT; + if (in.getTransportVersion() + .between(TransportVersions.V_8_14_0, TransportVersions.CONVERT_FAILURE_STORE_OPTIONS_TO_SELECTOR_OPTIONS_INTERNALLY)) { + // Reading from an older node, which will be sending two booleans that we must read out and ignore. + var includeData = in.readBoolean(); + var includeFailures = in.readBoolean(); + if (includeData && includeFailures) { + selectorOptions = SelectorOptions.DATA_AND_FAILURE; + } else if (includeData) { + selectorOptions = SelectorOptions.ONLY_DATA; + } else { + selectorOptions = SelectorOptions.ONLY_FAILURES; + } + } + if (in.getTransportVersion().onOrAfter(TransportVersions.CONVERT_FAILURE_STORE_OPTIONS_TO_SELECTOR_OPTIONS_INTERNALLY)) { + selectorOptions = new SelectorOptions(in.readEnumSet(IndexComponentSelector.class)); + } return new IndicesOptions( options.contains(Option.ALLOW_UNAVAILABLE_CONCRETE_TARGETS) ? ConcreteTargetOptions.ALLOW_UNAVAILABLE_TARGETS : ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS, wildcardOptions, gatekeeperOptions, - failureStoreOptions + selectorOptions ); } @@ -1016,7 +1056,7 @@ public static class Builder { private ConcreteTargetOptions concreteTargetOptions; private WildcardOptions wildcardOptions; private GatekeeperOptions gatekeeperOptions; - private FailureStoreOptions failureStoreOptions; + private SelectorOptions selectorOptions; Builder() { this(DEFAULT); @@ -1026,7 +1066,7 @@ public static class Builder { concreteTargetOptions = indicesOptions.concreteTargetOptions; wildcardOptions = indicesOptions.wildcardOptions; gatekeeperOptions = indicesOptions.gatekeeperOptions; - failureStoreOptions = indicesOptions.failureStoreOptions; + selectorOptions = indicesOptions.selectorOptions; } public Builder concreteTargetOptions(ConcreteTargetOptions concreteTargetOptions) { @@ -1054,25 +1094,18 @@ public Builder gatekeeperOptions(GatekeeperOptions.Builder generalOptions) { return this; } - public Builder failureStoreOptions(FailureStoreOptions failureStoreOptions) { - this.failureStoreOptions = failureStoreOptions; + public Builder selectorOptions(SelectorOptions selectorOptions) { + this.selectorOptions = selectorOptions; return this; } - public Builder failureStoreOptions(FailureStoreOptions.Builder failureStoreOptions) { - this.failureStoreOptions = failureStoreOptions.build(); - return this; - } - - public Builder failureStoreOptions(Consumer failureStoreOptionsConfig) { - FailureStoreOptions.Builder failureStoreOptionsBuilder = FailureStoreOptions.builder(failureStoreOptions); - failureStoreOptionsConfig.accept(failureStoreOptionsBuilder); - this.failureStoreOptions = failureStoreOptionsBuilder.build(); + public Builder selectorOptions(SelectorOptions.Builder selectorOptions) { + this.selectorOptions = selectorOptions.build(); return this; } public IndicesOptions build() { - return new IndicesOptions(concreteTargetOptions, wildcardOptions, gatekeeperOptions, failureStoreOptions); + return new IndicesOptions(concreteTargetOptions, wildcardOptions, gatekeeperOptions, selectorOptions); } } @@ -1171,11 +1204,12 @@ public static IndicesOptions fromOptions( .allowClosedIndices(forbidClosedIndices == false) .ignoreThrottled(ignoreThrottled) .build(); + final SelectorOptions selectorOptions = SelectorOptions.DEFAULT; return new IndicesOptions( ignoreUnavailable ? ConcreteTargetOptions.ALLOW_UNAVAILABLE_TARGETS : ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS, wildcards, gatekeeperOptions, - FailureStoreOptions.DEFAULT + selectorOptions ); } @@ -1189,9 +1223,7 @@ public static IndicesOptions fromRequest(RestRequest request, IndicesOptions def request.param(ConcreteTargetOptions.IGNORE_UNAVAILABLE), request.param(WildcardOptions.ALLOW_NO_INDICES), request.param(GatekeeperOptions.IGNORE_THROTTLED), - DataStream.isFailureStoreFeatureFlagEnabled() - ? request.param(FailureStoreOptions.FAILURE_STORE) - : FailureStoreOptions.INCLUDE_ONLY_REGULAR_INDICES, + DataStream.isFailureStoreFeatureFlagEnabled() ? request.param(FAILURE_STORE_QUERY_PARAM) : INCLUDE_ONLY_REGULAR_INDICES, defaultSettings ); } @@ -1207,7 +1239,7 @@ public static IndicesOptions fromMap(Map map, IndicesOptions def map.containsKey(GatekeeperOptions.IGNORE_THROTTLED) ? map.get(GatekeeperOptions.IGNORE_THROTTLED) : map.get("ignoreThrottled"), - map.containsKey(FailureStoreOptions.FAILURE_STORE) ? map.get(FailureStoreOptions.FAILURE_STORE) : map.get("failureStore"), + map.containsKey(FAILURE_STORE_QUERY_PARAM) ? map.get(FAILURE_STORE_QUERY_PARAM) : map.get("failureStore"), defaultSettings ); } @@ -1235,7 +1267,7 @@ public static boolean isIndicesOptions(String name) { || "ignoreThrottled".equals(name) || WildcardOptions.ALLOW_NO_INDICES.equals(name) || "allowNoIndices".equals(name) - || (DataStream.isFailureStoreFeatureFlagEnabled() && FailureStoreOptions.FAILURE_STORE.equals(name)) + || (DataStream.isFailureStoreFeatureFlagEnabled() && FAILURE_STORE_QUERY_PARAM.equals(name)) || (DataStream.isFailureStoreFeatureFlagEnabled() && "failureStore".equals(name)); } @@ -1267,26 +1299,51 @@ public static IndicesOptions fromParameters( WildcardOptions wildcards = WildcardOptions.parseParameters(wildcardsString, allowNoIndicesString, defaultSettings.wildcardOptions); GatekeeperOptions gatekeeperOptions = GatekeeperOptions.parseParameter(ignoreThrottled, defaultSettings.gatekeeperOptions); - FailureStoreOptions failureStoreOptions = DataStream.isFailureStoreFeatureFlagEnabled() - ? FailureStoreOptions.parseParameters(failureStoreString, defaultSettings.failureStoreOptions) - : FailureStoreOptions.DEFAULT; + SelectorOptions selectorOptions = DataStream.isFailureStoreFeatureFlagEnabled() + ? parseFailureStoreParameters(failureStoreString, defaultSettings.selectorOptions) + : SelectorOptions.DEFAULT; // note that allowAliasesToMultipleIndices is not exposed, always true (only for internal use) return IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.fromParameter(ignoreUnavailableString, defaultSettings.concreteTargetOptions)) .wildcardOptions(wildcards) .gatekeeperOptions(gatekeeperOptions) - .failureStoreOptions(failureStoreOptions) + .selectorOptions(selectorOptions) .build(); } + /** + * @deprecated This method parses the query parameter failure_store. This is a deprecated param, and it will be replaced + * the selector suffix, for example `my-data-stream::data` or `my-data-stream::failures` + */ + @Deprecated + private static SelectorOptions parseFailureStoreParameters(Object failureStoreValue, SelectorOptions defaultOptions) { + if (failureStoreValue == null) { + return defaultOptions; + } + return switch (failureStoreValue.toString()) { + case INCLUDE_ALL -> SelectorOptions.DATA_AND_FAILURE; + case INCLUDE_ONLY_REGULAR_INDICES -> SelectorOptions.ONLY_DATA; + case INCLUDE_ONLY_FAILURE_INDICES -> SelectorOptions.ONLY_FAILURES; + default -> throw new IllegalArgumentException("No valid " + FAILURE_STORE_QUERY_PARAM + " value [" + failureStoreValue + "]"); + }; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { concreteTargetOptions.toXContent(builder, params); wildcardOptions.toXContent(builder, params); gatekeeperOptions.toXContent(builder, params); if (DataStream.isFailureStoreFeatureFlagEnabled()) { - failureStoreOptions.toXContent(builder, params); + String displayValue; + if (SelectorOptions.DATA_AND_FAILURE.equals(selectorOptions())) { + displayValue = INCLUDE_ALL; + } else if (SelectorOptions.ONLY_DATA.equals(selectorOptions())) { + displayValue = INCLUDE_ONLY_REGULAR_INDICES; + } else { + displayValue = INCLUDE_ONLY_FAILURE_INDICES; + } + builder.field(FAILURE_STORE_QUERY_PARAM, displayValue); } return builder; } @@ -1295,7 +1352,7 @@ public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params par private static final ParseField IGNORE_UNAVAILABLE_FIELD = new ParseField(ConcreteTargetOptions.IGNORE_UNAVAILABLE); private static final ParseField IGNORE_THROTTLED_FIELD = new ParseField(GatekeeperOptions.IGNORE_THROTTLED).withAllDeprecated(); private static final ParseField ALLOW_NO_INDICES_FIELD = new ParseField(WildcardOptions.ALLOW_NO_INDICES); - private static final ParseField FAILURE_STORE_FIELD = new ParseField(FailureStoreOptions.FAILURE_STORE); + private static final ParseField FAILURE_STORE_FIELD = new ParseField(FAILURE_STORE_QUERY_PARAM); public static IndicesOptions fromXContent(XContentParser parser) throws IOException { return fromXContent(parser, null); @@ -1306,7 +1363,7 @@ public static IndicesOptions fromXContent(XContentParser parser, @Nullable Indic WildcardOptions.Builder wildcards = defaults == null ? null : WildcardOptions.builder(defaults.wildcardOptions()); GatekeeperOptions.Builder generalOptions = GatekeeperOptions.builder() .ignoreThrottled(defaults != null && defaults.gatekeeperOptions().ignoreThrottled()); - FailureStoreOptions failureStoreOptions = defaults == null ? FailureStoreOptions.DEFAULT : defaults.failureStoreOptions(); + SelectorOptions selectorOptions = defaults == null ? SelectorOptions.DEFAULT : defaults.selectorOptions(); Boolean allowNoIndices = defaults == null ? null : defaults.allowNoIndices(); Boolean ignoreUnavailable = defaults == null ? null : defaults.ignoreUnavailable(); Token token = parser.currentToken() == Token.START_OBJECT ? parser.currentToken() : parser.nextToken(); @@ -1358,7 +1415,7 @@ public static IndicesOptions fromXContent(XContentParser parser, @Nullable Indic generalOptions.ignoreThrottled(parser.booleanValue()); } else if (DataStream.isFailureStoreFeatureFlagEnabled() && FAILURE_STORE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { - failureStoreOptions = FailureStoreOptions.parseParameters(parser.text(), failureStoreOptions); + selectorOptions = parseFailureStoreParameters(parser.text(), selectorOptions); } else { throw new ElasticsearchParseException( "could not read indices options. Unexpected index option [" + currentFieldName + "]" @@ -1389,7 +1446,7 @@ public static IndicesOptions fromXContent(XContentParser parser, @Nullable Indic .concreteTargetOptions(new ConcreteTargetOptions(ignoreUnavailable)) .wildcardOptions(wildcards) .gatekeeperOptions(generalOptions) - .failureStoreOptions(failureStoreOptions) + .selectorOptions(selectorOptions) .build(); } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestRolloverIndexAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestRolloverIndexAction.java index ebae4a36c6d3d..942844dd1dd16 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestRolloverIndexAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestRolloverIndexAction.java @@ -69,7 +69,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC if (failureStore) { rolloverIndexRequest.setIndicesOptions( IndicesOptions.builder(rolloverIndexRequest.indicesOptions()) - .failureStoreOptions(new IndicesOptions.FailureStoreOptions(false, true)) + .selectorOptions(IndicesOptions.SelectorOptions.ONLY_FAILURES) .build() ); } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexRequestTests.java index 4cfd9b66306ad..a75b50e3a88f4 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexRequestTests.java @@ -82,9 +82,6 @@ public void testIndicesOptions() { ); assertThat(getIndexRequest.indicesOptions().wildcardOptions(), equalTo(IndicesOptions.strictExpandOpen().wildcardOptions())); assertThat(getIndexRequest.indicesOptions().gatekeeperOptions(), equalTo(IndicesOptions.strictExpandOpen().gatekeeperOptions())); - assertThat( - getIndexRequest.indicesOptions().failureStoreOptions(), - equalTo(IndicesOptions.FailureStoreOptions.builder().includeRegularIndices(true).includeFailureIndices(true).build()) - ); + assertThat(getIndexRequest.indicesOptions().selectorOptions(), equalTo(IndicesOptions.SelectorOptions.DATA_AND_FAILURE)); } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java index 848e46f2b3366..b9fdb13958632 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java @@ -13,7 +13,7 @@ import org.elasticsearch.action.admin.indices.create.CreateIndexClusterStateUpdateRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.support.ActiveShardCount; -import org.elasticsearch.action.support.IndicesOptions.FailureStoreOptions; +import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.AliasAction; @@ -747,14 +747,14 @@ public void testValidation() throws Exception { final String defaultRolloverIndexName; final boolean useDataStream = randomBoolean(); final Metadata.Builder builder = Metadata.builder(); - var failureStoreOptions = FailureStoreOptions.DEFAULT; + var defaultSelectorOptions = IndicesOptions.SelectorOptions.DEFAULT; if (useDataStream) { DataStream dataStream = DataStreamTestHelper.randomInstance() // ensure no replicate data stream .promoteDataStream(); rolloverTarget = dataStream.getName(); if (dataStream.isFailureStoreEnabled() && randomBoolean()) { - failureStoreOptions = new FailureStoreOptions(false, true); + defaultSelectorOptions = IndicesOptions.SelectorOptions.ONLY_FAILURES; sourceIndexName = dataStream.getFailureStoreWriteIndex().getName(); defaultRolloverIndexName = DataStream.getDefaultFailureStoreName( dataStream.getName(), @@ -815,7 +815,7 @@ public void testValidation() throws Exception { true, null, null, - failureStoreOptions.includeFailureIndices() + IndicesOptions.SelectorOptions.ONLY_FAILURES.equals(defaultSelectorOptions) ); newIndexName = newIndexName == null ? defaultRolloverIndexName : newIndexName; diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequestTests.java index 67bfa0e37dcf5..08e92c833dc85 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequestTests.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; +import org.elasticsearch.action.support.IndexComponentSelector; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; @@ -33,7 +34,9 @@ import org.junit.Before; import java.io.IOException; +import java.util.EnumSet; import java.util.Map; +import java.util.Set; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -176,7 +179,12 @@ public void testSerialize() throws Exception { originalRequest.lazy(randomBoolean()); originalRequest.setIndicesOptions( IndicesOptions.builder(originalRequest.indicesOptions()) - .failureStoreOptions(new IndicesOptions.FailureStoreOptions(randomBoolean(), randomBoolean())) + .selectorOptions( + IndicesOptions.SelectorOptions.builder() + .setDefaultSelectors( + EnumSet.copyOf(randomNonEmptySubsetOf(Set.of(IndexComponentSelector.DATA, IndexComponentSelector.FAILURES))) + ) + ) .build() ); @@ -188,10 +196,7 @@ public void testSerialize() throws Exception { assertThat(cloneRequest.getNewIndexName(), equalTo(originalRequest.getNewIndexName())); assertThat(cloneRequest.getRolloverTarget(), equalTo(originalRequest.getRolloverTarget())); assertThat(cloneRequest.isLazy(), equalTo(originalRequest.isLazy())); - assertThat( - cloneRequest.indicesOptions().failureStoreOptions(), - equalTo(originalRequest.indicesOptions().failureStoreOptions()) - ); + assertThat(cloneRequest.indicesOptions().selectorOptions(), equalTo(originalRequest.indicesOptions().selectorOptions())); for (Map.Entry> entry : cloneRequest.getConditions().getConditions().entrySet()) { Condition condition = originalRequest.getConditions().getConditions().get(entry.getKey()); // here we compare the string representation as there is some information loss when serializing @@ -261,7 +266,7 @@ public void testValidation() { RolloverRequest rolloverRequest = new RolloverRequest("alias-index", "new-index-name"); rolloverRequest.setIndicesOptions( IndicesOptions.builder(rolloverRequest.indicesOptions()) - .failureStoreOptions(new IndicesOptions.FailureStoreOptions(true, true)) + .selectorOptions(IndicesOptions.SelectorOptions.DATA_AND_FAILURE) .build() ); ActionRequestValidationException validationException = rolloverRequest.validate(); diff --git a/server/src/test/java/org/elasticsearch/action/support/IndicesOptionsTests.java b/server/src/test/java/org/elasticsearch/action/support/IndicesOptionsTests.java index 437899a1fae1c..1784ab863bf1c 100644 --- a/server/src/test/java/org/elasticsearch/action/support/IndicesOptionsTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/IndicesOptionsTests.java @@ -10,7 +10,6 @@ package org.elasticsearch.action.support; import org.elasticsearch.action.support.IndicesOptions.ConcreteTargetOptions; -import org.elasticsearch.action.support.IndicesOptions.FailureStoreOptions; import org.elasticsearch.action.support.IndicesOptions.GatekeeperOptions; import org.elasticsearch.action.support.IndicesOptions.WildcardOptions; import org.elasticsearch.common.bytes.BytesReference; @@ -31,9 +30,11 @@ import java.util.Arrays; import java.util.Collection; import java.util.Collections; +import java.util.EnumSet; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Set; import static org.hamcrest.CoreMatchers.equalTo; @@ -57,8 +58,12 @@ public void testSerialization() throws Exception { .allowAliasToMultipleIndices(randomBoolean()) .allowClosedIndices(randomBoolean()) ) - .failureStoreOptions( - FailureStoreOptions.builder().includeRegularIndices(randomBoolean()).includeFailureIndices(randomBoolean()) + .selectorOptions( + IndicesOptions.SelectorOptions.builder() + .setDefaultSelectors( + EnumSet.copyOf(randomNonEmptySubsetOf(Set.of(IndexComponentSelector.DATA, IndexComponentSelector.FAILURES))) + ) + .build() ) .build(); @@ -345,9 +350,11 @@ public void testToXContent() throws IOException { randomBoolean() ); GatekeeperOptions gatekeeperOptions = new GatekeeperOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean()); - FailureStoreOptions failureStoreOptions = new IndicesOptions.FailureStoreOptions(randomBoolean(), randomBoolean()); + IndicesOptions.SelectorOptions selectorOptions = new IndicesOptions.SelectorOptions( + EnumSet.copyOf(randomNonEmptySubsetOf(Set.of(IndexComponentSelector.DATA, IndexComponentSelector.FAILURES))) + ); - IndicesOptions indicesOptions = new IndicesOptions(concreteTargetOptions, wildcardOptions, gatekeeperOptions, failureStoreOptions); + IndicesOptions indicesOptions = new IndicesOptions(concreteTargetOptions, wildcardOptions, gatekeeperOptions, selectorOptions); XContentType type = randomFrom(XContentType.values()); BytesReference xContentBytes = toXContentBytes(indicesOptions, type); @@ -362,7 +369,15 @@ public void testToXContent() throws IOException { assertThat(map.get("ignore_unavailable"), equalTo(concreteTargetOptions.allowUnavailableTargets())); assertThat(map.get("allow_no_indices"), equalTo(wildcardOptions.allowEmptyExpressions())); assertThat(map.get("ignore_throttled"), equalTo(gatekeeperOptions.ignoreThrottled())); - assertThat(map.get("failure_store"), equalTo(failureStoreOptions.displayValue())); + String displayValue; + if (IndicesOptions.SelectorOptions.DATA_AND_FAILURE.equals(selectorOptions)) { + displayValue = "include"; + } else if (IndicesOptions.SelectorOptions.ONLY_DATA.equals(selectorOptions)) { + displayValue = "exclude"; + } else { + displayValue = "only"; + } + assertThat(map.get("failure_store"), equalTo(displayValue)); } public void testFromXContent() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java index bddbe259e0ef3..da19bd68e288a 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java @@ -2319,7 +2319,8 @@ public void testIgnoreThrottled() { new IndicesOptions( IndicesOptions.ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS, IndicesOptions.WildcardOptions.DEFAULT, - IndicesOptions.GatekeeperOptions.builder().ignoreThrottled(true).build() + IndicesOptions.GatekeeperOptions.builder().ignoreThrottled(true).build(), + IndicesOptions.SelectorOptions.DEFAULT ), "ind*", "test-index" @@ -2757,7 +2758,7 @@ public void testDataStreamsWithFailureStore() { // Test include failure store with an exact data stream name { IndicesOptions indicesOptions = IndicesOptions.builder(IndicesOptions.STRICT_EXPAND_OPEN) - .failureStoreOptions(IndicesOptions.FailureStoreOptions.builder().includeRegularIndices(true).includeFailureIndices(true)) + .selectorOptions(IndicesOptions.SelectorOptions.DATA_AND_FAILURE) .build(); Index[] result = indexNameExpressionResolver.concreteIndices(state, indicesOptions, true, "my-data-stream"); assertThat(result.length, equalTo(4)); @@ -2771,7 +2772,7 @@ public void testDataStreamsWithFailureStore() { // We expect that they will be skipped { IndicesOptions indicesOptions = IndicesOptions.builder(IndicesOptions.STRICT_EXPAND_OPEN) - .failureStoreOptions(IndicesOptions.FailureStoreOptions.builder().includeRegularIndices(true).includeFailureIndices(true)) + .selectorOptions(IndicesOptions.SelectorOptions.DATA_AND_FAILURE) .gatekeeperOptions(IndicesOptions.GatekeeperOptions.builder().allowFailureIndices(false).build()) .concreteTargetOptions(IndicesOptions.ConcreteTargetOptions.ALLOW_UNAVAILABLE_TARGETS) .build(); @@ -2785,7 +2786,7 @@ public void testDataStreamsWithFailureStore() { // We expect an error { IndicesOptions indicesOptions = IndicesOptions.builder(IndicesOptions.STRICT_EXPAND_OPEN) - .failureStoreOptions(IndicesOptions.FailureStoreOptions.builder().includeRegularIndices(true).includeFailureIndices(true)) + .selectorOptions(IndicesOptions.SelectorOptions.DATA_AND_FAILURE) .gatekeeperOptions(IndicesOptions.GatekeeperOptions.builder().allowFailureIndices(false).build()) .build(); FailureIndexNotSupportedException failureIndexNotSupportedException = expectThrows( @@ -2801,7 +2802,7 @@ public void testDataStreamsWithFailureStore() { // Test only failure store with an exact data stream name { IndicesOptions indicesOptions = IndicesOptions.builder(IndicesOptions.STRICT_EXPAND_OPEN) - .failureStoreOptions(IndicesOptions.FailureStoreOptions.builder().includeRegularIndices(false).includeFailureIndices(true)) + .selectorOptions(IndicesOptions.SelectorOptions.ONLY_FAILURES) .build(); Index[] result = indexNameExpressionResolver.concreteIndices(state, indicesOptions, true, "my-data-stream"); assertThat(result.length, equalTo(2)); @@ -2828,7 +2829,7 @@ public void testDataStreamsWithFailureStore() { // Test include failure store without any expressions { IndicesOptions indicesOptions = IndicesOptions.builder(IndicesOptions.STRICT_EXPAND_OPEN) - .failureStoreOptions(IndicesOptions.FailureStoreOptions.builder().includeRegularIndices(true).includeFailureIndices(true)) + .selectorOptions(IndicesOptions.SelectorOptions.DATA_AND_FAILURE) .build(); Index[] result = indexNameExpressionResolver.concreteIndices(state, indicesOptions, true); assertThat(result.length, equalTo(5)); @@ -2848,7 +2849,7 @@ public void testDataStreamsWithFailureStore() { // Test only failure store without any expressions { IndicesOptions indicesOptions = IndicesOptions.builder(IndicesOptions.STRICT_EXPAND_OPEN) - .failureStoreOptions(IndicesOptions.FailureStoreOptions.builder().includeRegularIndices(false).includeFailureIndices(true)) + .selectorOptions(IndicesOptions.SelectorOptions.ONLY_FAILURES) .build(); Index[] result = indexNameExpressionResolver.concreteIndices(state, indicesOptions, true); assertThat(result.length, equalTo(2)); @@ -2881,7 +2882,7 @@ public void testDataStreamsWithFailureStore() { // Test include failure store with wildcard expression { IndicesOptions indicesOptions = IndicesOptions.builder(IndicesOptions.STRICT_EXPAND_OPEN) - .failureStoreOptions(IndicesOptions.FailureStoreOptions.builder().includeRegularIndices(true).includeFailureIndices(true)) + .selectorOptions(IndicesOptions.SelectorOptions.DATA_AND_FAILURE) .build(); Index[] result = indexNameExpressionResolver.concreteIndices(state, indicesOptions, true, "my-*"); assertThat(result.length, equalTo(5)); @@ -2901,7 +2902,7 @@ public void testDataStreamsWithFailureStore() { // Test only failure store with wildcard expression { IndicesOptions indicesOptions = IndicesOptions.builder(IndicesOptions.STRICT_EXPAND_OPEN) - .failureStoreOptions(IndicesOptions.FailureStoreOptions.builder().includeRegularIndices(false).includeFailureIndices(true)) + .selectorOptions(IndicesOptions.SelectorOptions.ONLY_FAILURES) .build(); Index[] result = indexNameExpressionResolver.concreteIndices(state, indicesOptions, true, "my-*"); assertThat(result.length, equalTo(2)); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/RolloverStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/RolloverStep.java index 3e6c00eeadba4..d648dd1c7edf8 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/RolloverStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/RolloverStep.java @@ -127,7 +127,7 @@ public void performAction( if (targetFailureStore) { rolloverRequest.setIndicesOptions( IndicesOptions.builder(rolloverRequest.indicesOptions()) - .failureStoreOptions(opts -> opts.includeFailureIndices(true).includeRegularIndices(false)) + .selectorOptions(IndicesOptions.SelectorOptions.ONLY_FAILURES) .build() ); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitForRolloverReadyStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitForRolloverReadyStep.java index 7b751994222b1..67f65481ef63e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitForRolloverReadyStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitForRolloverReadyStep.java @@ -248,7 +248,7 @@ RolloverRequest createRolloverRequest( if (targetFailureStore) { rolloverRequest.setIndicesOptions( IndicesOptions.builder(rolloverRequest.indicesOptions()) - .failureStoreOptions(opts -> opts.includeFailureIndices(true).includeRegularIndices(false)) + .selectorOptions(IndicesOptions.SelectorOptions.ONLY_FAILURES) .build() ); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForRolloverReadyStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForRolloverReadyStepTests.java index 15958e9396d81..afb17644303bb 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForRolloverReadyStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForRolloverReadyStepTests.java @@ -308,8 +308,8 @@ public void onFailure(Exception e) { verify(indicesClient, Mockito.only()).rolloverIndex(requestCaptor.capture(), Mockito.any()); RolloverRequest request = requestCaptor.getValue(); - assertThat(request.indicesOptions().failureStoreOptions().includeFailureIndices(), equalTo(failureStoreIndex)); - assertThat(request.indicesOptions().failureStoreOptions().includeRegularIndices(), not(equalTo(failureStoreIndex))); + assertThat(request.indicesOptions().includeFailureIndices(), equalTo(failureStoreIndex)); + assertThat(request.indicesOptions().includeRegularIndices(), not(equalTo(failureStoreIndex))); } public void testSkipRolloverIfDataStreamIsAlreadyRolledOver() { From 64f2c42a76ac640f5fcf795d90a5b6c006701e94 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Wed, 16 Oct 2024 20:07:28 +1100 Subject: [PATCH 03/31] Mute org.elasticsearch.packaging.test.EnrollmentProcessTests test20DockerAutoFormCluster #114885 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 0b24bac278fa6..6817011d399b2 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -388,6 +388,9 @@ tests: - class: org.elasticsearch.xpack.enrich.EnrichIT method: testDeleteIsCaseSensitive issue: https://github.com/elastic/elasticsearch/issues/114840 +- class: org.elasticsearch.packaging.test.EnrollmentProcessTests + method: test20DockerAutoFormCluster + issue: https://github.com/elastic/elasticsearch/issues/114885 # Examples: # From ae452becc7f65a677fbd01a2485176b9dbb2ddd4 Mon Sep 17 00:00:00 2001 From: Pooya Salehi Date: Wed, 16 Oct 2024 11:08:17 +0200 Subject: [PATCH 04/31] Document _cat/indices behavior when encountering source only indices (#114884) Closes https://github.com/elastic/elasticsearch/issues/114546 --- docs/reference/cat/indices.asciidoc | 8 ++++++-- .../snapshot-restore/repository-source-only.asciidoc | 3 +++ 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/docs/reference/cat/indices.asciidoc b/docs/reference/cat/indices.asciidoc index cf1cc9f825cb2..b8dda01c2eae0 100644 --- a/docs/reference/cat/indices.asciidoc +++ b/docs/reference/cat/indices.asciidoc @@ -6,8 +6,8 @@ [IMPORTANT] ==== -cat APIs are only intended for human consumption using the command line or {kib} -console. They are _not_ intended for use by applications. For application +cat APIs are only intended for human consumption using the command line or {kib} +console. They are _not_ intended for use by applications. For application consumption, use the <>. ==== @@ -50,6 +50,10 @@ indexing and search. As a result, all document counts include hidden To get an accurate count of {es} documents, use the <> or <> APIs. +Note that information such as document count, deleted document count and store size are not shown for +indices restored from <> since these indices +do not contain the relevant data structures to retrieve this information from. + [[cat-indices-api-path-params]] ==== {api-path-parms-title} diff --git a/docs/reference/snapshot-restore/repository-source-only.asciidoc b/docs/reference/snapshot-restore/repository-source-only.asciidoc index 04e53c42aff9d..3c11d6ca6e59c 100644 --- a/docs/reference/snapshot-restore/repository-source-only.asciidoc +++ b/docs/reference/snapshot-restore/repository-source-only.asciidoc @@ -27,6 +27,9 @@ As a result, indices adopting synthetic source cannot be restored. When you rest * The mapping of the restored index is empty, but the original mapping is available from the types top level `meta` element. + * Information such as document count, deleted document count and store size are not available for such indices + since these indices do not contain the relevant data structures to retrieve this information from. Therefore, + this information is not shown for such indices in APIs such as the <>. ================================================== Before registering a source-only repository, use {kib} or the From 15c1051fb61fdaf2684694f5f82417031fe973f7 Mon Sep 17 00:00:00 2001 From: David Turner Date: Wed, 16 Oct 2024 10:35:22 +0100 Subject: [PATCH 05/31] Inline `MockTransportService#getLocalDiscoNode()` (#114883) This method just delegates to `getLocalNode()`, we may as well call the more widely-used method with the shorter name directly. --- .../netty4/SimpleNetty4TransportTests.java | 4 +- ...rossClusterSearchUnavailableClusterIT.java | 4 +- .../search/TransportSearchActionTests.java | 2 +- .../tasks/BanFailureLoggingTests.java | 6 +- .../RemoteClusterAwareClientTests.java | 14 ++-- .../transport/RemoteClusterClientTests.java | 6 +- .../RemoteClusterConnectionTests.java | 22 ++--- .../transport/RemoteClusterServiceTests.java | 84 +++++++++---------- .../transport/TransportActionProxyTests.java | 8 +- .../AbstractIndexRecoveryIntegTestCase.java | 8 +- .../test/transport/MockTransportService.java | 5 -- .../AbstractSimpleTransportTestCase.java | 16 ++-- .../exchange/ExchangeServiceTests.java | 2 +- .../enrich/EnrichPolicyResolverTests.java | 2 +- ...ty4ServerTransportAuthenticationTests.java | 2 +- 15 files changed, 90 insertions(+), 95 deletions(-) diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java index b55f4eccafca8..cad839bed9555 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java @@ -103,7 +103,7 @@ public void testDefaultKeepAliveSettings() throws IOException { MockTransportService serviceD = buildService("TS_D", VersionInformation.CURRENT, TransportVersion.current(), Settings.EMPTY) ) { - try (Transport.Connection connection = openConnection(serviceC, serviceD.getLocalDiscoNode(), TestProfiles.LIGHT_PROFILE)) { + try (Transport.Connection connection = openConnection(serviceC, serviceD.getLocalNode(), TestProfiles.LIGHT_PROFILE)) { assertThat(connection, instanceOf(StubbableTransport.WrappedConnection.class)); Transport.Connection conn = ((StubbableTransport.WrappedConnection) connection).getConnection(); assertThat(conn, instanceOf(TcpTransport.NodeChannels.class)); @@ -147,7 +147,7 @@ public void testTransportProfile() { MockTransportService serviceD = buildService("TS_D", VersionInformation.CURRENT, TransportVersion.current(), Settings.EMPTY) ) { - try (Transport.Connection connection = openConnection(serviceC, serviceD.getLocalDiscoNode(), connectionProfile)) { + try (Transport.Connection connection = openConnection(serviceC, serviceD.getLocalNode(), connectionProfile)) { assertThat(connection, instanceOf(StubbableTransport.WrappedConnection.class)); Transport.Connection conn = ((StubbableTransport.WrappedConnection) connection).getConnection(); assertThat(conn, instanceOf(TcpTransport.NodeChannels.class)); diff --git a/qa/ccs-unavailable-clusters/src/javaRestTest/java/org/elasticsearch/search/CrossClusterSearchUnavailableClusterIT.java b/qa/ccs-unavailable-clusters/src/javaRestTest/java/org/elasticsearch/search/CrossClusterSearchUnavailableClusterIT.java index 7b42292848395..780f3994ce627 100644 --- a/qa/ccs-unavailable-clusters/src/javaRestTest/java/org/elasticsearch/search/CrossClusterSearchUnavailableClusterIT.java +++ b/qa/ccs-unavailable-clusters/src/javaRestTest/java/org/elasticsearch/search/CrossClusterSearchUnavailableClusterIT.java @@ -154,7 +154,7 @@ public void testSearchSkipUnavailable() throws IOException { threadPool ) ) { - DiscoveryNode remoteNode = remoteTransport.getLocalDiscoNode(); + DiscoveryNode remoteNode = remoteTransport.getLocalNode(); updateRemoteClusterSettings(Collections.singletonMap("seeds", remoteNode.getAddress().toString())); @@ -307,7 +307,7 @@ public void testSkipUnavailableDependsOnSeeds() throws IOException { threadPool ) ) { - DiscoveryNode remoteNode = remoteTransport.getLocalDiscoNode(); + DiscoveryNode remoteNode = remoteTransport.getLocalNode(); { // check that skip_unavailable alone cannot be set diff --git a/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java b/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java index 06434a0c90518..70682cfd41d82 100644 --- a/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java @@ -486,7 +486,7 @@ private MockTransportService[] startTransport( threadPool ); mockTransportServices[i] = remoteSeedTransport; - DiscoveryNode remoteSeedNode = remoteSeedTransport.getLocalDiscoNode(); + DiscoveryNode remoteSeedNode = remoteSeedTransport.getLocalNode(); knownNodes.add(remoteSeedNode); nodes[i] = remoteSeedNode; settingsBuilder.put("cluster.remote.remote" + i + ".seeds", remoteSeedNode.getAddress().toString()); diff --git a/server/src/test/java/org/elasticsearch/tasks/BanFailureLoggingTests.java b/server/src/test/java/org/elasticsearch/tasks/BanFailureLoggingTests.java index 78d76476d06fc..e5cdecd25ef34 100644 --- a/server/src/test/java/org/elasticsearch/tasks/BanFailureLoggingTests.java +++ b/server/src/test/java/org/elasticsearch/tasks/BanFailureLoggingTests.java @@ -157,13 +157,13 @@ public Task createTask(long id, String type, String action, TaskId parentTaskId, parentTransportService.addSendBehavior(sendRequestBehavior); - AbstractSimpleTransportTestCase.connectToNode(parentTransportService, childTransportService.getLocalDiscoNode()); + AbstractSimpleTransportTestCase.connectToNode(parentTransportService, childTransportService.getLocalNode()); final CancellableTask parentTask = (CancellableTask) parentTransportService.getTaskManager() .register("transport", "internal:testAction", new ParentRequest()); parentTransportService.sendChildRequest( - childTransportService.getLocalDiscoNode(), + childTransportService.getLocalNode(), "internal:testAction[c]", new EmptyRequest(), parentTask, @@ -172,7 +172,7 @@ public Task createTask(long id, String type, String action, TaskId parentTaskId, ); try (MockLog mockLog = MockLog.capture(TaskCancellationService.class)) { - for (MockLog.LoggingExpectation expectation : expectations.apply(childTransportService.getLocalDiscoNode())) { + for (MockLog.LoggingExpectation expectation : expectations.apply(childTransportService.getLocalNode())) { mockLog.addExpectation(expectation); } diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterAwareClientTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterAwareClientTests.java index 43dce7d406ba2..d7cf38828b7ba 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterAwareClientTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterAwareClientTests.java @@ -89,7 +89,7 @@ public void testRemoteTaskCancellationOnFailedResponse() throws Exception { ) { remoteTransport.getTaskManager().setTaskCancellationService(new TaskCancellationService(remoteTransport)); Settings.Builder builder = Settings.builder(); - builder.putList("cluster.remote.cluster1.seeds", remoteTransport.getLocalDiscoNode().getAddress().toString()); + builder.putList("cluster.remote.cluster1.seeds", remoteTransport.getLocalNode().getAddress().toString()); try ( MockTransportService localService = MockTransportService.createNewService( builder.build(), @@ -163,11 +163,11 @@ public void testSearchShards() throws Exception { MockTransportService seedTransport = startTransport("seed_node", knownNodes); MockTransportService discoverableTransport = startTransport("discoverable_node", knownNodes) ) { - knownNodes.add(seedTransport.getLocalDiscoNode()); - knownNodes.add(discoverableTransport.getLocalDiscoNode()); + knownNodes.add(seedTransport.getLocalNode()); + knownNodes.add(discoverableTransport.getLocalNode()); Collections.shuffle(knownNodes, random()); Settings.Builder builder = Settings.builder(); - builder.putList("cluster.remote.cluster1.seeds", seedTransport.getLocalDiscoNode().getAddress().toString()); + builder.putList("cluster.remote.cluster1.seeds", seedTransport.getLocalNode().getAddress().toString()); try ( MockTransportService service = MockTransportService.createNewService( builder.build(), @@ -216,11 +216,11 @@ public void testSearchShardsThreadContextHeader() { MockTransportService seedTransport = startTransport("seed_node", knownNodes); MockTransportService discoverableTransport = startTransport("discoverable_node", knownNodes) ) { - knownNodes.add(seedTransport.getLocalDiscoNode()); - knownNodes.add(discoverableTransport.getLocalDiscoNode()); + knownNodes.add(seedTransport.getLocalNode()); + knownNodes.add(discoverableTransport.getLocalNode()); Collections.shuffle(knownNodes, random()); Settings.Builder builder = Settings.builder(); - builder.putList("cluster.remote.cluster1.seeds", seedTransport.getLocalDiscoNode().getAddress().toString()); + builder.putList("cluster.remote.cluster1.seeds", seedTransport.getLocalNode().getAddress().toString()); try ( MockTransportService service = MockTransportService.createNewService( builder.build(), diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterClientTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterClientTests.java index ff0742c89bba9..0efaef7015649 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterClientTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterClientTests.java @@ -73,7 +73,7 @@ public void testConnectAndExecuteRequest() throws Exception { remoteSettings ) ) { - DiscoveryNode remoteNode = remoteTransport.getLocalDiscoNode(); + DiscoveryNode remoteNode = remoteTransport.getLocalNode(); Settings localSettings = Settings.builder() .put(onlyRole(DiscoveryNodeRole.REMOTE_CLUSTER_CLIENT_ROLE)) @@ -152,7 +152,7 @@ public void testEnsureWeReconnect() throws Exception { remoteSettings ) ) { - DiscoveryNode remoteNode = remoteTransport.getLocalDiscoNode(); + DiscoveryNode remoteNode = remoteTransport.getLocalNode(); Settings localSettings = Settings.builder() .put(onlyRole(DiscoveryNodeRole.REMOTE_CLUSTER_CLIENT_ROLE)) .put("cluster.remote.test.seeds", remoteNode.getAddress().getAddress() + ":" + remoteNode.getAddress().getPort()) @@ -251,7 +251,7 @@ public void testQuicklySkipUnavailableClusters() throws Exception { remoteSettings ) ) { - DiscoveryNode remoteNode = remoteTransport.getLocalDiscoNode(); + DiscoveryNode remoteNode = remoteTransport.getLocalNode(); Settings localSettings = Settings.builder() .put(onlyRole(DiscoveryNodeRole.REMOTE_CLUSTER_CLIENT_ROLE)) diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java index 7a259cf3100f0..21346bb93ef8e 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java @@ -311,11 +311,11 @@ public void testCloseWhileConcurrentlyConnecting() throws IOException, Interrupt TransportVersion.current() ) ) { - DiscoveryNode seedNode = seedTransport.getLocalDiscoNode(); - DiscoveryNode seedNode1 = seedTransport1.getLocalDiscoNode(); - knownNodes.add(seedTransport.getLocalDiscoNode()); - knownNodes.add(discoverableTransport.getLocalDiscoNode()); - knownNodes.add(seedTransport1.getLocalDiscoNode()); + DiscoveryNode seedNode = seedTransport.getLocalNode(); + DiscoveryNode seedNode1 = seedTransport1.getLocalNode(); + knownNodes.add(seedTransport.getLocalNode()); + knownNodes.add(discoverableTransport.getLocalNode()); + knownNodes.add(seedTransport1.getLocalNode()); Collections.shuffle(knownNodes, random()); List seedNodes = addresses(seedNode1, seedNode); Collections.shuffle(seedNodes, random()); @@ -447,9 +447,9 @@ private void doTestGetConnectionInfo(boolean hasClusterCredentials) throws Excep seedTransportSettings ) ) { - DiscoveryNode node1 = transport1.getLocalDiscoNode(); - DiscoveryNode node2 = transport3.getLocalDiscoNode(); - DiscoveryNode node3 = transport2.getLocalDiscoNode(); + DiscoveryNode node1 = transport1.getLocalNode(); + DiscoveryNode node2 = transport3.getLocalNode(); + DiscoveryNode node3 = transport2.getLocalNode(); if (hasClusterCredentials) { node1 = node1.withTransportAddress(transport1.boundRemoteAccessAddress().publishAddress()); node2 = node2.withTransportAddress(transport3.boundRemoteAccessAddress().publishAddress()); @@ -645,7 +645,7 @@ private void doTestCollectNodes(boolean hasClusterCredentials) throws Exception seedTransportSettings ) ) { - DiscoveryNode seedNode = seedTransport.getLocalDiscoNode(); + DiscoveryNode seedNode = seedTransport.getLocalNode(); if (hasClusterCredentials) { seedNode = seedNode.withTransportAddress(seedTransport.boundRemoteAccessAddress().publishAddress()); } @@ -725,8 +725,8 @@ public void testNoChannelsExceptREG() throws Exception { TransportVersion.current() ) ) { - DiscoveryNode seedNode = seedTransport.getLocalDiscoNode(); - knownNodes.add(seedTransport.getLocalDiscoNode()); + DiscoveryNode seedNode = seedTransport.getLocalNode(); + knownNodes.add(seedTransport.getLocalNode()); try ( MockTransportService service = MockTransportService.createNewService( Settings.EMPTY, diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java index 11243ba088f8f..3633128c45bfa 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java @@ -141,10 +141,10 @@ public void testGroupClusterIndices() throws IOException { TransportVersion.current() ) ) { - DiscoveryNode cluster1Seed = cluster1Transport.getLocalDiscoNode(); - DiscoveryNode cluster2Seed = cluster2Transport.getLocalDiscoNode(); - knownNodes.add(cluster1Transport.getLocalDiscoNode()); - knownNodes.add(cluster2Transport.getLocalDiscoNode()); + DiscoveryNode cluster1Seed = cluster1Transport.getLocalNode(); + DiscoveryNode cluster2Seed = cluster2Transport.getLocalNode(); + knownNodes.add(cluster1Transport.getLocalNode()); + knownNodes.add(cluster2Transport.getLocalNode()); Collections.shuffle(knownNodes, random()); try ( @@ -343,10 +343,10 @@ public void testGroupIndices() throws IOException { TransportVersion.current() ) ) { - DiscoveryNode cluster1Seed = cluster1Transport.getLocalDiscoNode(); - DiscoveryNode cluster2Seed = cluster2Transport.getLocalDiscoNode(); - knownNodes.add(cluster1Transport.getLocalDiscoNode()); - knownNodes.add(cluster2Transport.getLocalDiscoNode()); + DiscoveryNode cluster1Seed = cluster1Transport.getLocalNode(); + DiscoveryNode cluster2Seed = cluster2Transport.getLocalNode(); + knownNodes.add(cluster1Transport.getLocalNode()); + knownNodes.add(cluster2Transport.getLocalNode()); Collections.shuffle(knownNodes, random()); try ( @@ -453,10 +453,10 @@ public void testIncrementallyAddClusters() throws IOException { TransportVersion.current() ) ) { - DiscoveryNode cluster1Seed = cluster1Transport.getLocalDiscoNode(); - DiscoveryNode cluster2Seed = cluster2Transport.getLocalDiscoNode(); - knownNodes.add(cluster1Transport.getLocalDiscoNode()); - knownNodes.add(cluster2Transport.getLocalDiscoNode()); + DiscoveryNode cluster1Seed = cluster1Transport.getLocalNode(); + DiscoveryNode cluster2Seed = cluster2Transport.getLocalNode(); + knownNodes.add(cluster1Transport.getLocalNode()); + knownNodes.add(cluster2Transport.getLocalNode()); Collections.shuffle(knownNodes, random()); try ( @@ -526,8 +526,8 @@ public void testDefaultPingSchedule() throws IOException { TransportVersion.current() ) ) { - DiscoveryNode seedNode = seedTransport.getLocalDiscoNode(); - knownNodes.add(seedTransport.getLocalDiscoNode()); + DiscoveryNode seedNode = seedTransport.getLocalNode(); + knownNodes.add(seedTransport.getLocalNode()); TimeValue pingSchedule; Settings.Builder settingsBuilder = Settings.builder(); settingsBuilder.putList("cluster.remote.cluster_1.seeds", seedNode.getAddress().toString()); @@ -582,10 +582,10 @@ public void testCustomPingSchedule() throws IOException { TransportVersion.current() ) ) { - DiscoveryNode cluster1Seed = cluster1Transport.getLocalDiscoNode(); - DiscoveryNode cluster2Seed = cluster2Transport.getLocalDiscoNode(); - knownNodes.add(cluster1Transport.getLocalDiscoNode()); - knownNodes.add(cluster2Transport.getLocalDiscoNode()); + DiscoveryNode cluster1Seed = cluster1Transport.getLocalNode(); + DiscoveryNode cluster2Seed = cluster2Transport.getLocalNode(); + knownNodes.add(cluster1Transport.getLocalNode()); + knownNodes.add(cluster2Transport.getLocalNode()); Collections.shuffle(knownNodes, random()); Settings.Builder settingsBuilder = Settings.builder(); if (randomBoolean()) { @@ -635,8 +635,8 @@ public void testChangeSettings() throws Exception { TransportVersion.current() ) ) { - DiscoveryNode cluster1Seed = cluster1Transport.getLocalDiscoNode(); - knownNodes.add(cluster1Transport.getLocalDiscoNode()); + DiscoveryNode cluster1Seed = cluster1Transport.getLocalNode(); + knownNodes.add(cluster1Transport.getLocalNode()); Collections.shuffle(knownNodes, random()); try ( @@ -716,10 +716,10 @@ public void testRemoteNodeAttribute() throws IOException, InterruptedException { gateway ) ) { - final DiscoveryNode c1N1Node = c1N1.getLocalDiscoNode(); - final DiscoveryNode c1N2Node = c1N2.getLocalDiscoNode(); - final DiscoveryNode c2N1Node = c2N1.getLocalDiscoNode(); - final DiscoveryNode c2N2Node = c2N2.getLocalDiscoNode(); + final DiscoveryNode c1N1Node = c1N1.getLocalNode(); + final DiscoveryNode c1N2Node = c1N2.getLocalNode(); + final DiscoveryNode c2N1Node = c2N1.getLocalNode(); + final DiscoveryNode c2N2Node = c2N2.getLocalNode(); knownNodes.add(c1N1Node); knownNodes.add(c1N2Node); knownNodes.add(c2N1Node); @@ -809,10 +809,10 @@ public void testRemoteNodeRoles() throws IOException, InterruptedException { data ) ) { - final DiscoveryNode c1N1Node = c1N1.getLocalDiscoNode(); - final DiscoveryNode c1N2Node = c1N2.getLocalDiscoNode(); - final DiscoveryNode c2N1Node = c2N1.getLocalDiscoNode(); - final DiscoveryNode c2N2Node = c2N2.getLocalDiscoNode(); + final DiscoveryNode c1N1Node = c1N1.getLocalNode(); + final DiscoveryNode c1N2Node = c1N2.getLocalNode(); + final DiscoveryNode c2N1Node = c2N1.getLocalNode(); + final DiscoveryNode c2N2Node = c2N2.getLocalNode(); knownNodes.add(c1N1Node); knownNodes.add(c1N2Node); knownNodes.add(c2N1Node); @@ -906,10 +906,10 @@ public void testCollectNodes() throws InterruptedException, IOException { settings ) ) { - final DiscoveryNode c1N1Node = c1N1.getLocalDiscoNode(); - final DiscoveryNode c1N2Node = c1N2.getLocalDiscoNode(); - final DiscoveryNode c2N1Node = c2N1.getLocalDiscoNode(); - final DiscoveryNode c2N2Node = c2N2.getLocalDiscoNode(); + final DiscoveryNode c1N1Node = c1N1.getLocalNode(); + final DiscoveryNode c1N2Node = c1N2.getLocalNode(); + final DiscoveryNode c2N1Node = c2N1.getLocalNode(); + final DiscoveryNode c2N2Node = c2N2.getLocalNode(); knownNodes_c1.add(c1N1Node); knownNodes_c1.add(c1N2Node); knownNodes_c2.add(c2N1Node); @@ -1170,8 +1170,8 @@ public void testReconnectWhenStrategySettingsUpdated() throws Exception { ) ) { - final DiscoveryNode node0 = cluster_node_0.getLocalDiscoNode(); - final DiscoveryNode node1 = cluster_node_1.getLocalDiscoNode(); + final DiscoveryNode node0 = cluster_node_0.getLocalNode(); + final DiscoveryNode node1 = cluster_node_1.getLocalNode(); knownNodes.add(node0); knownNodes.add(node1); Collections.shuffle(knownNodes, random()); @@ -1267,10 +1267,10 @@ public void testSkipUnavailable() { TransportVersion.current() ) ) { - DiscoveryNode seedNode = seedTransport.getLocalDiscoNode(); + DiscoveryNode seedNode = seedTransport.getLocalNode(); knownNodes.add(seedNode); Settings.Builder builder = Settings.builder(); - builder.putList("cluster.remote.cluster1.seeds", seedTransport.getLocalDiscoNode().getAddress().toString()); + builder.putList("cluster.remote.cluster1.seeds", seedTransport.getLocalNode().getAddress().toString()); try ( MockTransportService service = MockTransportService.createNewService( builder.build(), @@ -1353,8 +1353,8 @@ public void testUseDifferentTransportProfileForCredentialsProtectedRemoteCluster ); MockTransportService c2 = startTransport("cluster_2", knownNodes, VersionInformation.CURRENT, TransportVersion.current()); ) { - final DiscoveryNode c1Node = c1.getLocalDiscoNode().withTransportAddress(c1.boundRemoteAccessAddress().publishAddress()); - final DiscoveryNode c2Node = c2.getLocalDiscoNode(); + final DiscoveryNode c1Node = c1.getLocalNode().withTransportAddress(c1.boundRemoteAccessAddress().publishAddress()); + final DiscoveryNode c2Node = c2.getLocalNode(); final MockSecureSettings secureSettings = new MockSecureSettings(); secureSettings.setString("cluster.remote.cluster_1.credentials", randomAlphaOfLength(10)); @@ -1441,7 +1441,7 @@ public void testUpdateRemoteClusterCredentialsRebuildsConnectionWithCorrectProfi .build() ) ) { - final DiscoveryNode discoNode = c.getLocalDiscoNode().withTransportAddress(c.boundRemoteAccessAddress().publishAddress()); + final DiscoveryNode discoNode = c.getLocalNode().withTransportAddress(c.boundRemoteAccessAddress().publishAddress()); try ( MockTransportService transportService = MockTransportService.createNewService( Settings.EMPTY, @@ -1518,8 +1518,8 @@ public void testUpdateRemoteClusterCredentialsRebuildsMultipleConnectionsDespite .build() ) ) { - final DiscoveryNode c1DiscoNode = c1.getLocalDiscoNode().withTransportAddress(c1.boundRemoteAccessAddress().publishAddress()); - final DiscoveryNode c2DiscoNode = c2.getLocalDiscoNode().withTransportAddress(c2.boundRemoteAccessAddress().publishAddress()); + final DiscoveryNode c1DiscoNode = c1.getLocalNode().withTransportAddress(c1.boundRemoteAccessAddress().publishAddress()); + final DiscoveryNode c2DiscoNode = c2.getLocalNode().withTransportAddress(c2.boundRemoteAccessAddress().publishAddress()); try ( MockTransportService transportService = MockTransportService.createNewService( Settings.EMPTY, @@ -1636,7 +1636,7 @@ public void testLogsConnectionResult() throws IOException { assertThatLogger( () -> clusterSettings.applySettings( - Settings.builder().putList("cluster.remote.remote_1.seeds", remote.getLocalDiscoNode().getAddress().toString()).build() + Settings.builder().putList("cluster.remote.remote_1.seeds", remote.getLocalNode().getAddress().toString()).build() ), RemoteClusterService.class, new MockLog.SeenEventExpectation( diff --git a/server/src/test/java/org/elasticsearch/transport/TransportActionProxyTests.java b/server/src/test/java/org/elasticsearch/transport/TransportActionProxyTests.java index 398bb5f2a9106..46585ac382583 100644 --- a/server/src/test/java/org/elasticsearch/transport/TransportActionProxyTests.java +++ b/server/src/test/java/org/elasticsearch/transport/TransportActionProxyTests.java @@ -85,15 +85,15 @@ public void setUp() throws Exception { threadPool = new TestThreadPool(getClass().getName()); serviceA = buildService(version0, transportVersion0); // this one supports dynamic tracer updates serviceA.taskManager.setTaskCancellationService(new TaskCancellationService(serviceA)); - nodeA = serviceA.getLocalDiscoNode(); + nodeA = serviceA.getLocalNode(); serviceB = buildService(version1, transportVersion1); // this one doesn't support dynamic tracer updates serviceB.taskManager.setTaskCancellationService(new TaskCancellationService(serviceB)); - nodeB = serviceB.getLocalDiscoNode(); + nodeB = serviceB.getLocalNode(); serviceC = buildService(version1, transportVersion1); // this one doesn't support dynamic tracer updates serviceC.taskManager.setTaskCancellationService(new TaskCancellationService(serviceC)); - nodeC = serviceC.getLocalDiscoNode(); + nodeC = serviceC.getLocalNode(); serviceD = buildService(version1, transportVersion1); - nodeD = serviceD.getLocalDiscoNode(); + nodeD = serviceD.getLocalNode(); } @Override diff --git a/test/framework/src/main/java/org/elasticsearch/indices/recovery/AbstractIndexRecoveryIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/indices/recovery/AbstractIndexRecoveryIntegTestCase.java index 568f386a81fd1..ec85feb200984 100644 --- a/test/framework/src/main/java/org/elasticsearch/indices/recovery/AbstractIndexRecoveryIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/indices/recovery/AbstractIndexRecoveryIntegTestCase.java @@ -156,11 +156,11 @@ protected void checkTransientErrorsDuringRecoveryAreRetried(String recoveryActio Runnable connectionBreaker = () -> { // Always break connection from source to remote to ensure that actions are retried logger.info("--> closing connections from source node to target node"); - blueTransportService.disconnectFromNode(redTransportService.getLocalDiscoNode()); + blueTransportService.disconnectFromNode(redTransportService.getLocalNode()); if (randomBoolean()) { // Sometimes break connection from remote to source to ensure that recovery is re-established logger.info("--> closing connections from target node to source node"); - redTransportService.disconnectFromNode(blueTransportService.getLocalDiscoNode()); + redTransportService.disconnectFromNode(blueTransportService.getLocalNode()); } }; TransientReceiveRejected handlingBehavior = new TransientReceiveRejected(recoveryActionToBlock, recoveryStarted, connectionBreaker); @@ -258,13 +258,13 @@ public void checkDisconnectsWhileRecovering(String recoveryActionToBlock) throws blueMockTransportService.addRequestHandlingBehavior(recoveryActionToBlock, (handler, request, channel, task) -> { logger.info("--> preventing {} response by closing response channel", recoveryActionToBlock); requestFailed.countDown(); - redMockTransportService.disconnectFromNode(blueMockTransportService.getLocalDiscoNode()); + redMockTransportService.disconnectFromNode(blueMockTransportService.getLocalNode()); handler.messageReceived(request, channel, task); }); redMockTransportService.addRequestHandlingBehavior(recoveryActionToBlock, (handler, request, channel, task) -> { logger.info("--> preventing {} response by closing response channel", recoveryActionToBlock); requestFailed.countDown(); - blueMockTransportService.disconnectFromNode(redMockTransportService.getLocalDiscoNode()); + blueMockTransportService.disconnectFromNode(redMockTransportService.getLocalNode()); handler.messageReceived(request, channel, task); }); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java index c4e1c6c7a0681..fd376fcd07688 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java +++ b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java @@ -830,9 +830,4 @@ protected void doClose() throws IOException { assertTrue(ThreadPool.terminate(testExecutor, 10, TimeUnit.SECONDS)); } } - - public DiscoveryNode getLocalDiscoNode() { - return this.getLocalNode(); - } - } diff --git a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java index 34f67ac78a41c..4595fbf286077 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java @@ -668,7 +668,7 @@ public void testVoidMessageCompressed() throws Exception { ) .build(); ConnectionProfile connectionProfile = ConnectionProfile.buildDefaultConnectionProfile(settingsWithCompress); - connectToNode(serviceC, serviceA.getLocalDiscoNode(), connectionProfile); + connectToNode(serviceC, serviceA.getLocalNode(), connectionProfile); Future res = submitRequest( serviceC, @@ -725,7 +725,7 @@ public void testHelloWorldCompressed() throws Exception { ) .build(); ConnectionProfile connectionProfile = ConnectionProfile.buildDefaultConnectionProfile(settingsWithCompress); - connectToNode(serviceC, serviceA.getLocalDiscoNode(), connectionProfile); + connectToNode(serviceC, serviceA.getLocalNode(), connectionProfile); Future res = submitRequest( serviceC, @@ -795,8 +795,8 @@ public void testIndexingDataCompression() throws Exception { ) .build(); ConnectionProfile connectionProfile = ConnectionProfile.buildDefaultConnectionProfile(settingsWithCompress); - connectToNode(serviceC, serviceA.getLocalDiscoNode(), connectionProfile); - connectToNode(serviceA, serviceC.getLocalDiscoNode(), connectionProfile); + connectToNode(serviceC, serviceA.getLocalNode(), connectionProfile); + connectToNode(serviceA, serviceC.getLocalNode(), connectionProfile); TransportResponseHandler responseHandler = new TransportResponseHandler<>() { @Override @@ -821,14 +821,14 @@ public void handleException(TransportException exp) { Future compressed = submitRequest( serviceC, - serviceA.getLocalDiscoNode(), + serviceA.getLocalNode(), "internal:sayHello", new StringMessageRequest(text, -1, true), responseHandler ); Future uncompressed = submitRequest( serviceA, - serviceC.getLocalDiscoNode(), + serviceC.getLocalNode(), "internal:sayHello", new StringMessageRequest(text, -1, false), responseHandler @@ -1049,7 +1049,7 @@ public void onAfter() { ignoringRequestHandler ); serviceB = newService; - nodeB = newService.getLocalDiscoNode(); + nodeB = newService.getLocalNode(); connectToNode(serviceB, nodeA); connectToNode(serviceA, nodeB); } else if (serviceA.nodeConnected(nodeB)) { @@ -3419,7 +3419,7 @@ public void sendRequest( ) { final CountDownLatch latch = new CountDownLatch(1); serviceC.connectToNode( - serviceA.getLocalDiscoNode(), + serviceA.getLocalNode(), ConnectionProfile.buildDefaultConnectionProfile(Settings.EMPTY), new ActionListener<>() { @Override diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/exchange/ExchangeServiceTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/exchange/ExchangeServiceTests.java index ab785e739d080..9e07f9c8f5faf 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/exchange/ExchangeServiceTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/exchange/ExchangeServiceTests.java @@ -441,7 +441,7 @@ public void sendResponse(TransportResponse transportResponse) { PlainActionFuture sourceCompletionFuture = new PlainActionFuture<>(); sourceHandler.addCompletionListener(sourceCompletionFuture); ExchangeSinkHandler sinkHandler = exchange1.createSinkHandler(exchangeId, randomIntBetween(1, 128)); - Transport.Connection connection = node0.getConnection(node1.getLocalDiscoNode()); + Transport.Connection connection = node0.getConnection(node1.getLocalNode()); sourceHandler.addRemoteSink(exchange0.newRemoteSink(task, exchangeId, node0, connection), randomIntBetween(1, 5)); Exception err = expectThrows( Exception.class, diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolverTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolverTests.java index ebad8e6e13b8c..05a7486a18068 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolverTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolverTests.java @@ -448,7 +448,7 @@ EnrichResolution resolvePolicies(Collection clusters, Collection policies) { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SecurityNetty4ServerTransportAuthenticationTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SecurityNetty4ServerTransportAuthenticationTests.java index 3d3f96b98d5e5..d294fb50046d6 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SecurityNetty4ServerTransportAuthenticationTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SecurityNetty4ServerTransportAuthenticationTests.java @@ -158,7 +158,7 @@ public TransportRequestHandler interceptHandler( } } ); - DiscoveryNode remoteNode = remoteTransportService.getLocalDiscoNode(); + DiscoveryNode remoteNode = remoteTransportService.getLocalNode(); remoteTransportService.registerRequestHandler( RemoteClusterNodesAction.TYPE.name(), EsExecutors.DIRECT_EXECUTOR_SERVICE, From 4fd621833b90f0414982bd309428eac4dd2fa34d Mon Sep 17 00:00:00 2001 From: Ioana Tagirta Date: Wed, 16 Oct 2024 11:54:48 +0200 Subject: [PATCH 06/31] Better DataType string checks (#114863) * Use DataType.isString * Add DataType.stringTypes() * Fix shouldHideSignature check --- .../elasticsearch/xpack/esql/core/type/DataType.java | 6 ++++++ .../xpack/esql/core/type/DataTypeConverter.java | 4 +--- .../expression/function/aggregate/CountDistinct.java | 2 +- .../xpack/esql/expression/function/aggregate/Max.java | 2 +- .../xpack/esql/expression/function/aggregate/Min.java | 2 +- .../function/scalar/conditional/Greatest.java | 6 +----- .../expression/function/scalar/conditional/Least.java | 6 +----- .../scalar/convert/AbstractConvertFunction.java | 5 ++--- .../operator/comparison/InsensitiveEqualsMapper.java | 2 +- .../expression/function/AbstractFunctionTestCase.java | 10 +--------- .../esql/expression/function/TestCaseSupplier.java | 5 ++--- .../esql/expression/function/fulltext/MatchTests.java | 9 ++------- .../expression/function/fulltext/QueryStringTests.java | 3 +-- .../function/scalar/convert/ToVersionTests.java | 2 +- .../function/scalar/string/AbstractTrimTests.java | 2 +- .../expression/function/scalar/string/ConcatTests.java | 4 +--- .../function/scalar/string/EndsWithTests.java | 5 ++--- .../expression/function/scalar/string/LocateTests.java | 6 ++---- .../expression/function/scalar/string/RLikeTests.java | 2 +- .../function/scalar/string/StartsWithTests.java | 5 ++--- .../xpack/esql/type/EsqlDataTypeConverterTests.java | 3 +-- 21 files changed, 32 insertions(+), 59 deletions(-) diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DataType.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DataType.java index b23703c6d8b66..cb1a7b2eb6fe0 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DataType.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DataType.java @@ -266,6 +266,8 @@ public enum DataType { .sorted(Comparator.comparing(DataType::typeName)) .toList(); + private static final Collection STRING_TYPES = DataType.types().stream().filter(DataType::isString).toList(); + private static final Map NAME_TO_TYPE = TYPES.stream().collect(toUnmodifiableMap(DataType::typeName, t -> t)); private static final Map ES_TO_TYPE; @@ -292,6 +294,10 @@ public static Collection types() { return TYPES; } + public static Collection stringTypes() { + return STRING_TYPES; + } + /** * Resolve a type from a name. This name is sometimes user supplied, * like in the case of {@code ::} and is sometimes the name diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DataTypeConverter.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DataTypeConverter.java index 78b395503e700..7c91a506697c1 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DataTypeConverter.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DataTypeConverter.java @@ -30,11 +30,9 @@ import static org.elasticsearch.xpack.esql.core.type.DataType.FLOAT; import static org.elasticsearch.xpack.esql.core.type.DataType.INTEGER; import static org.elasticsearch.xpack.esql.core.type.DataType.IP; -import static org.elasticsearch.xpack.esql.core.type.DataType.KEYWORD; import static org.elasticsearch.xpack.esql.core.type.DataType.LONG; import static org.elasticsearch.xpack.esql.core.type.DataType.NULL; import static org.elasticsearch.xpack.esql.core.type.DataType.SHORT; -import static org.elasticsearch.xpack.esql.core.type.DataType.TEXT; import static org.elasticsearch.xpack.esql.core.type.DataType.UNSIGNED_LONG; import static org.elasticsearch.xpack.esql.core.type.DataType.VERSION; import static org.elasticsearch.xpack.esql.core.type.DataType.isDateTime; @@ -62,7 +60,7 @@ public static Converter converterFor(DataType from, DataType to) { return DefaultConverter.TO_NULL; } // proper converters - if (to == KEYWORD || to == TEXT) { + if (isString(to)) { return conversionToString(from); } if (to == LONG) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/CountDistinct.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/CountDistinct.java index 2550e5bdcf515..756000dfbb187 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/CountDistinct.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/CountDistinct.java @@ -209,7 +209,7 @@ public AggregatorFunctionSupplier supplier(List inputChannels) { if (type == DataType.DOUBLE) { return new CountDistinctDoubleAggregatorFunctionSupplier(inputChannels, precision); } - if (type == DataType.KEYWORD || type == DataType.IP || type == DataType.VERSION || type == DataType.TEXT) { + if (DataType.isString(type) || type == DataType.IP || type == DataType.VERSION) { return new CountDistinctBytesRefAggregatorFunctionSupplier(inputChannels, precision); } throw EsqlIllegalArgumentException.illegalDataType(type); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Max.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Max.java index 47d74c71d9cc5..6119b2ce58465 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Max.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Max.java @@ -128,7 +128,7 @@ public final AggregatorFunctionSupplier supplier(List inputChannels) { if (type == DataType.IP) { return new MaxIpAggregatorFunctionSupplier(inputChannels); } - if (type == DataType.VERSION || type == DataType.KEYWORD || type == DataType.TEXT) { + if (type == DataType.VERSION || DataType.isString(type)) { return new MaxBytesRefAggregatorFunctionSupplier(inputChannels); } throw EsqlIllegalArgumentException.illegalDataType(type); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Min.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Min.java index ce69decca8e81..a1492f79da393 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Min.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Min.java @@ -128,7 +128,7 @@ public final AggregatorFunctionSupplier supplier(List inputChannels) { if (type == DataType.IP) { return new MinIpAggregatorFunctionSupplier(inputChannels); } - if (type == DataType.VERSION || type == DataType.KEYWORD || type == DataType.TEXT) { + if (type == DataType.VERSION || DataType.isString(type)) { return new MinBytesRefAggregatorFunctionSupplier(inputChannels); } throw EsqlIllegalArgumentException.illegalDataType(type); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Greatest.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Greatest.java index 9d815d15accdc..d47ebeab4ca6c 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Greatest.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Greatest.java @@ -155,11 +155,7 @@ public ExpressionEvaluator.Factory toEvaluator(ToEvaluator toEvaluator) { if (dataType == DataType.LONG || dataType == DataType.DATETIME) { return new GreatestLongEvaluator.Factory(source(), factories); } - if (dataType == DataType.KEYWORD - || dataType == DataType.TEXT - || dataType == DataType.IP - || dataType == DataType.VERSION - || dataType == DataType.UNSUPPORTED) { + if (DataType.isString(dataType) || dataType == DataType.IP || dataType == DataType.VERSION || dataType == DataType.UNSUPPORTED) { return new GreatestBytesRefEvaluator.Factory(source(), factories); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Least.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Least.java index 435a14d0fef33..81c1419dcf788 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Least.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Least.java @@ -154,11 +154,7 @@ public ExpressionEvaluator.Factory toEvaluator(ToEvaluator toEvaluator) { if (dataType == DataType.LONG || dataType == DataType.DATETIME) { return new LeastLongEvaluator.Factory(source(), factories); } - if (dataType == DataType.KEYWORD - || dataType == DataType.TEXT - || dataType == DataType.IP - || dataType == DataType.VERSION - || dataType == DataType.UNSUPPORTED) { + if (DataType.isString(dataType) || dataType == DataType.IP || dataType == DataType.VERSION || dataType == DataType.UNSUPPORTED) { return new LeastBytesRefEvaluator.Factory(source(), factories); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/AbstractConvertFunction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/AbstractConvertFunction.java index 5401fcf188d4a..06815d738e82c 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/AbstractConvertFunction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/AbstractConvertFunction.java @@ -48,7 +48,6 @@ public abstract class AbstractConvertFunction extends UnaryScalarFunction { // the numeric types convert functions need to handle; the other numeric types are converted upstream to one of these private static final List NUMERIC_TYPES = List.of(DataType.INTEGER, DataType.LONG, DataType.UNSIGNED_LONG, DataType.DOUBLE); - public static final List STRING_TYPES = DataType.types().stream().filter(DataType::isString).toList(); protected AbstractConvertFunction(Source source, Expression field) { super(source, field); @@ -90,9 +89,9 @@ private static String supportedTypesNames(Set types) { NUMERIC_TYPES.forEach(supportTypes::remove); } - if (types.containsAll(STRING_TYPES)) { + if (types.containsAll(DataType.stringTypes())) { supportedTypesNames.add("string"); - STRING_TYPES.forEach(supportTypes::remove); + DataType.stringTypes().forEach(supportTypes::remove); } supportTypes.forEach(t -> supportedTypesNames.add(t.nameUpper().toLowerCase(Locale.ROOT))); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/InsensitiveEqualsMapper.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/InsensitiveEqualsMapper.java index d11f5c9b68532..f5704239993f9 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/InsensitiveEqualsMapper.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/InsensitiveEqualsMapper.java @@ -34,7 +34,7 @@ public final ExpressionEvaluator.Factory map(InsensitiveEquals bc, Layout layout var leftEval = toEvaluator(bc.left(), layout); var rightEval = toEvaluator(bc.right(), layout); - if (leftType == DataType.KEYWORD || leftType == DataType.TEXT) { + if (DataType.isString(leftType)) { if (bc.right().foldable() && DataType.isString(rightType)) { BytesRef rightVal = BytesRefs.toBytesRef(bc.right().fold()); Automaton automaton = InsensitiveEquals.automaton(rightVal); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java index ca9950a4bfe77..84a41ef040c8e 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java @@ -1283,13 +1283,6 @@ public void allMemoryReleased() { } } - /** - * All string types (keyword, text, match_only_text, etc). - */ - protected static DataType[] strings() { - return DataType.types().stream().filter(DataType::isString).toArray(DataType[]::new); - } - /** * Validate that we know the types for all the test cases already created * @param suppliers - list of suppliers before adding in the illegal type combinations @@ -1316,10 +1309,9 @@ private static boolean isAggregation() { */ private static boolean shouldHideSignature(List argTypes, DataType returnType) { for (DataType dt : DataType.UNDER_CONSTRUCTION.keySet()) { - if (returnType == dt) { + if (returnType == dt || argTypes.contains(dt)) { return true; } - return argTypes.contains(dt); } return false; } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/TestCaseSupplier.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/TestCaseSupplier.java index b3942a71edadb..2ba175657b6c2 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/TestCaseSupplier.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/TestCaseSupplier.java @@ -21,7 +21,6 @@ import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.core.util.NumericUtils; -import org.elasticsearch.xpack.esql.expression.function.scalar.convert.AbstractConvertFunction; import org.elasticsearch.xpack.versionfield.Version; import org.hamcrest.Matcher; @@ -91,7 +90,7 @@ public static List stringCases( List lhsSuppliers = new ArrayList<>(); List rhsSuppliers = new ArrayList<>(); List suppliers = new ArrayList<>(); - for (DataType type : AbstractConvertFunction.STRING_TYPES) { + for (DataType type : DataType.stringTypes()) { lhsSuppliers.addAll(stringCases(type)); rhsSuppliers.addAll(stringCases(type)); casesCrossProduct( @@ -760,7 +759,7 @@ public static void forUnaryStrings( Function expectedValue, Function> expectedWarnings ) { - for (DataType type : AbstractConvertFunction.STRING_TYPES) { + for (DataType type : DataType.stringTypes()) { unary( suppliers, expectedEvaluatorToString, diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/fulltext/MatchTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/fulltext/MatchTests.java index d37bc89635c1d..967b4d854c325 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/fulltext/MatchTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/fulltext/MatchTests.java @@ -20,7 +20,6 @@ import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.hamcrest.Matcher; -import java.util.Arrays; import java.util.LinkedList; import java.util.List; import java.util.Set; @@ -40,8 +39,8 @@ public static Iterable parameters() { Set supported = Set.of(DataType.KEYWORD, DataType.TEXT); List> supportedPerPosition = List.of(supported, supported); List suppliers = new LinkedList<>(); - for (DataType fieldType : validStringDataTypes()) { - for (DataType queryType : validStringDataTypes()) { + for (DataType fieldType : DataType.stringTypes()) { + for (DataType queryType : DataType.stringTypes()) { suppliers.add( new TestCaseSupplier( "<" + fieldType + "-ES field, " + queryType + ">", @@ -67,10 +66,6 @@ private static String matchTypeErrorSupplier(boolean includeOrdinal, List validStringDataTypes() { - return Arrays.stream(DataType.values()).filter(DataType::isString).toList(); - } - private static TestCaseSupplier.TestCase testCase( DataType fieldType, String field, diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/fulltext/QueryStringTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/fulltext/QueryStringTests.java index 2dfdb05ec8ecc..b4b4ebcaacde6 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/fulltext/QueryStringTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/fulltext/QueryStringTests.java @@ -19,7 +19,6 @@ import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.hamcrest.Matcher; -import java.util.Arrays; import java.util.LinkedList; import java.util.List; import java.util.function.Supplier; @@ -36,7 +35,7 @@ public QueryStringTests(@Name("TestCase") Supplier te @ParametersFactory public static Iterable parameters() { List suppliers = new LinkedList<>(); - for (DataType strType : Arrays.stream(DataType.values()).filter(DataType::isString).toList()) { + for (DataType strType : DataType.stringTypes()) { suppliers.add( new TestCaseSupplier( "<" + strType + ">", diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToVersionTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToVersionTests.java index 46a8086f9479c..57f11331818dc 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToVersionTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToVersionTests.java @@ -49,7 +49,7 @@ public static Iterable parameters() { ); // But strings that are shaped like versions do parse to valid versions - for (DataType inputType : AbstractConvertFunction.STRING_TYPES) { + for (DataType inputType : DataType.stringTypes()) { TestCaseSupplier.unary( suppliers, read, diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/AbstractTrimTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/AbstractTrimTests.java index f77a892d8682e..d069f7ffe2298 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/AbstractTrimTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/AbstractTrimTests.java @@ -21,7 +21,7 @@ public abstract class AbstractTrimTests extends AbstractScalarFunctionTestCase { static Iterable parameters(String name, boolean trimLeading, boolean trimTrailing) { List suppliers = new ArrayList<>(); - for (DataType type : strings()) { + for (DataType type : DataType.stringTypes()) { suppliers.add(new TestCaseSupplier("no whitespace/" + type, List.of(type), () -> { String text = randomAlphaOfLength(8); return testCase(name, type, text, text); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ConcatTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ConcatTests.java index bbe92ae4a6618..2ad953c9296b7 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ConcatTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ConcatTests.java @@ -58,9 +58,7 @@ public static Iterable parameters() { if (rhs == DataType.NULL || DataType.isRepresentable(rhs) == false) { continue; } - boolean lhsIsString = lhs == DataType.KEYWORD || lhs == DataType.TEXT; - boolean rhsIsString = rhs == DataType.KEYWORD || rhs == DataType.TEXT; - if (lhsIsString && rhsIsString) { + if (DataType.isString(lhs) && DataType.isString(rhs)) { continue; } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/EndsWithTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/EndsWithTests.java index 6d086e2626cb6..1b2e9c41cb25c 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/EndsWithTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/EndsWithTests.java @@ -18,7 +18,6 @@ import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.hamcrest.Matcher; -import java.util.Arrays; import java.util.LinkedList; import java.util.List; import java.util.function.Supplier; @@ -33,8 +32,8 @@ public EndsWithTests(@Name("TestCase") Supplier testC @ParametersFactory public static Iterable parameters() { List suppliers = new LinkedList<>(); - for (DataType strType : Arrays.stream(DataType.values()).filter(DataType::isString).toList()) { - for (DataType suffixType : Arrays.stream(DataType.values()).filter(DataType::isString).toList()) { + for (DataType strType : DataType.stringTypes()) { + for (DataType suffixType : DataType.stringTypes()) { suppliers.add( new TestCaseSupplier( "<" + strType + ">, empty <" + suffixType + ">", diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LocateTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LocateTests.java index 207125bed2a19..a10f97c45aa04 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LocateTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LocateTests.java @@ -35,13 +35,11 @@ public LocateTests(@Name("TestCase") Supplier testCas this.testCase = testCaseSupplier.get(); } - private static final DataType[] STRING_TYPES = new DataType[] { DataType.KEYWORD, DataType.TEXT }; - @ParametersFactory public static Iterable parameters() { List suppliers = new ArrayList<>(); - for (DataType strType : STRING_TYPES) { - for (DataType substrType : STRING_TYPES) { + for (DataType strType : DataType.stringTypes()) { + for (DataType substrType : DataType.stringTypes()) { suppliers.add( supplier( "", diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RLikeTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RLikeTests.java index 5a34d850cffe3..dab2fca212ff4 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RLikeTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RLikeTests.java @@ -69,7 +69,7 @@ static Iterable parameters(Function escapeString, Supp casesForString(cases, "6 bytes, 2 code points", () -> "❗️", false, escapeString, optionalPattern); casesForString(cases, "100 random code points", () -> randomUnicodeOfCodepointLength(100), true, escapeString, optionalPattern); for (DataType type : DataType.types()) { - if (type == DataType.KEYWORD || type == DataType.TEXT || type == DataType.NULL) { + if (DataType.isString(type) || type == DataType.NULL) { continue; } if (DataType.isRepresentable(type) == false) { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/StartsWithTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/StartsWithTests.java index 8bc8cf3184a75..60ed3b05ad642 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/StartsWithTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/StartsWithTests.java @@ -18,7 +18,6 @@ import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import java.util.ArrayList; -import java.util.Arrays; import java.util.List; import java.util.function.Supplier; @@ -32,8 +31,8 @@ public StartsWithTests(@Name("TestCase") Supplier tes @ParametersFactory public static Iterable parameters() { List suppliers = new ArrayList<>(); - for (DataType strType : Arrays.stream(DataType.values()).filter(DataType::isString).toList()) { - for (DataType prefixType : Arrays.stream(DataType.values()).filter(DataType::isString).toList()) { + for (DataType strType : DataType.stringTypes()) { + for (DataType prefixType : DataType.stringTypes()) { suppliers.add(new TestCaseSupplier(List.of(strType, prefixType), () -> { String str = randomAlphaOfLength(5); String prefix = randomAlphaOfLength(5); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeConverterTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeConverterTests.java index 8ad083683f696..babb9fc8c0bd1 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeConverterTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeConverterTests.java @@ -62,8 +62,7 @@ public void testCommonTypeNull() { } public void testCommonTypeStrings() { - List STRINGS = Arrays.stream(DataType.values()).filter(DataType::isString).toList(); - for (DataType dataType1 : STRINGS) { + for (DataType dataType1 : DataType.stringTypes()) { for (DataType dataType2 : DataType.values()) { if (dataType2 == NULL) { assertEqualsCommonType(dataType1, NULL, dataType1); From f13e495d765efee410ff9ff250a4fec52a2ed4ff Mon Sep 17 00:00:00 2001 From: Jan Kuipers <148754765+jan-elastic@users.noreply.github.com> Date: Wed, 16 Oct 2024 12:06:12 +0200 Subject: [PATCH 07/31] Fix NPE in AdaptiveAllocationsScalerService (#114880) * Fix NPE in AdaptiveAllocationsScalerService * Update docs/changelog/114880.yaml * Delete docs/changelog/114880.yaml --- .../adaptiveallocations/AdaptiveAllocationsScalerService.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/adaptiveallocations/AdaptiveAllocationsScalerService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/adaptiveallocations/AdaptiveAllocationsScalerService.java index 1c3a73a409dd1..9624d619ff20a 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/adaptiveallocations/AdaptiveAllocationsScalerService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/adaptiveallocations/AdaptiveAllocationsScalerService.java @@ -433,7 +433,8 @@ private void processDeploymentStats(GetDeploymentStatsAction.Response statsRespo public boolean maybeStartAllocation(TrainedModelAssignment assignment) { if (assignment.getAdaptiveAllocationsSettings() != null && assignment.getAdaptiveAllocationsSettings().getEnabled() == Boolean.TRUE - && assignment.getAdaptiveAllocationsSettings().getMinNumberOfAllocations() == 0) { + && (assignment.getAdaptiveAllocationsSettings().getMinNumberOfAllocations() == null + || assignment.getAdaptiveAllocationsSettings().getMinNumberOfAllocations() == 0)) { // Prevent against a flurry of scale up requests. if (deploymentIdsWithInFlightScaleFromZeroRequests.contains(assignment.getDeploymentId()) == false) { From 0fd58394edfe4300f78535ddb5e2e847f243c85e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Iv=C3=A1n=20Cea=20Fontenla?= Date: Wed, 16 Oct 2024 13:19:56 +0200 Subject: [PATCH 08/31] ESQL: Fix MvPercentileTests precision issues (#114844) Fixes https://github.com/elastic/elasticsearch/issues/114588 Fixes https://github.com/elastic/elasticsearch/issues/114587 Fixes https://github.com/elastic/elasticsearch/issues/114586 Fixes https://github.com/elastic/elasticsearch/issues/114585 Fixes https://github.com/elastic/elasticsearch/issues/113008 Fixes https://github.com/elastic/elasticsearch/issues/113007 Fixes https://github.com/elastic/elasticsearch/issues/113006 Fixes https://github.com/elastic/elasticsearch/issues/113005 Fixed the long precision issue by allowing a +/-1 range. Also made a minor refactor to simplify using different matchers for different types. --- .../scalar/multivalue/MvPercentileTests.java | 47 ++++++++++++------- 1 file changed, 30 insertions(+), 17 deletions(-) diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvPercentileTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvPercentileTests.java index 29cc959e6a943..0a419d44e3448 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvPercentileTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvPercentileTests.java @@ -17,6 +17,7 @@ import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.MultivalueTestCaseSupplier; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; +import org.hamcrest.Matcher; import java.math.BigDecimal; import java.util.ArrayList; @@ -28,6 +29,7 @@ import static org.elasticsearch.xpack.esql.core.type.DataType.DOUBLE; import static org.elasticsearch.xpack.esql.core.type.DataType.INTEGER; import static org.elasticsearch.xpack.esql.core.type.DataType.LONG; +import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.closeTo; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.nullValue; @@ -375,27 +377,25 @@ private static TestCaseSupplier makeSupplier( var values = (List) fieldTypedData.data(); var percentile = ((Number) percentileTypedData.data()).doubleValue(); - var expected = calculatePercentile(values, percentile); + var expectedMatcher = makePercentileMatcher(values, percentile); return new TestCaseSupplier.TestCase( List.of(fieldTypedData, percentileTypedData), evaluatorString(fieldSupplier.type(), percentileSupplier.type()), fieldSupplier.type(), - expected instanceof Double expectedDouble - ? closeTo(expectedDouble, Math.abs(expectedDouble * 0.0000001)) - : equalTo(expected) + expectedMatcher ); } ); } - private static Number calculatePercentile(List rawValues, double percentile) { + private static Matcher makePercentileMatcher(List rawValues, double percentile) { if (rawValues.isEmpty() || percentile < 0 || percentile > 100) { - return null; + return nullValue(); } if (rawValues.size() == 1) { - return rawValues.get(0); + return equalTo(rawValues.get(0)); } int valueCount = rawValues.size(); @@ -407,49 +407,62 @@ private static Number calculatePercentile(List rawValues, double percent if (rawValues.get(0) instanceof Integer) { var values = rawValues.stream().mapToInt(Number::intValue).sorted().toArray(); + int expected; if (percentile == 0) { - return values[0]; + expected = values[0]; } else if (percentile == 100) { - return values[valueCount - 1]; + expected = values[valueCount - 1]; } else { assert lowerIndex >= 0 && upperIndex < valueCount; var difference = (long) values[upperIndex] - values[lowerIndex]; - return values[lowerIndex] + (int) (fraction * difference); + expected = values[lowerIndex] + (int) (fraction * difference); } + + return equalTo(expected); } if (rawValues.get(0) instanceof Long) { var values = rawValues.stream().mapToLong(Number::longValue).sorted().toArray(); + long expected; if (percentile == 0) { - return values[0]; + expected = values[0]; } else if (percentile == 100) { - return values[valueCount - 1]; + expected = values[valueCount - 1]; } else { assert lowerIndex >= 0 && upperIndex < valueCount; - return calculatePercentile(fraction, new BigDecimal(values[lowerIndex]), new BigDecimal(values[upperIndex])).longValue(); + expected = calculatePercentile(fraction, BigDecimal.valueOf(values[lowerIndex]), BigDecimal.valueOf(values[upperIndex])) + .longValue(); } + + // Double*bigLong may lose precision, we allow a small range + return anyOf(equalTo(Math.min(expected, expected - 1)), equalTo(expected), equalTo(Math.max(expected, expected + 1))); } if (rawValues.get(0) instanceof Double) { var values = rawValues.stream().mapToDouble(Number::doubleValue).sorted().toArray(); + double expected; if (percentile == 0) { - return values[0]; + expected = values[0]; } else if (percentile == 100) { - return values[valueCount - 1]; + expected = values[valueCount - 1]; } else { assert lowerIndex >= 0 && upperIndex < valueCount; - return calculatePercentile(fraction, new BigDecimal(values[lowerIndex]), new BigDecimal(values[upperIndex])).doubleValue(); + expected = calculatePercentile(fraction, new BigDecimal(values[lowerIndex]), new BigDecimal(values[upperIndex])) + .doubleValue(); } + + return closeTo(expected, Math.abs(expected * 0.0000001)); } throw new IllegalArgumentException("Unsupported type: " + rawValues.get(0).getClass()); } private static BigDecimal calculatePercentile(double fraction, BigDecimal lowerValue, BigDecimal upperValue) { - return lowerValue.add(new BigDecimal(fraction).multiply(upperValue.subtract(lowerValue))); + var difference = upperValue.subtract(lowerValue); + return lowerValue.add(new BigDecimal(fraction).multiply(difference)); } private static TestCaseSupplier.TypedData percentileWithType(Number value, DataType type) { From 5faf0cdd90852b36f8978b2e20a53c4f50caa94a Mon Sep 17 00:00:00 2001 From: Simon Cooper Date: Wed, 16 Oct 2024 13:30:18 +0100 Subject: [PATCH 09/31] Remove the min_compatible_shard_node option and associated classes (#114713) Any similar functionality in the future should use capabilities instead --- .../SearchWithMinCompatibleSearchNodeIT.java | 144 ----- .../resources/rest-api-spec/api/search.json | 4 - .../elasticsearch/ElasticsearchException.java | 6 - .../org/elasticsearch/TransportVersions.java | 1 + .../search/AbstractSearchAsyncAction.java | 32 +- .../search/CanMatchPreFilterSearchPhase.java | 32 +- .../action/search/SearchRequest.java | 41 +- .../search/VersionMismatchException.java | 27 - .../rest/action/search/RestSearchAction.java | 11 +- .../ExceptionSerializationTests.java | 3 +- .../SearchQueryThenFetchAsyncActionTests.java | 495 ------------------ .../action/search/SearchRequestTests.java | 28 - .../eql/plugin/TransportEqlSearchAction.java | 22 +- .../fleet/rest/RestFleetSearchAction.java | 12 +- .../xpack/ql/plugin/TransportActionUtils.java | 81 --- .../xpack/sql/execution/search/Querier.java | 4 +- .../sql/plugin/TransportSqlQueryAction.java | 23 +- 17 files changed, 29 insertions(+), 937 deletions(-) delete mode 100644 qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/SearchWithMinCompatibleSearchNodeIT.java delete mode 100644 server/src/main/java/org/elasticsearch/action/search/VersionMismatchException.java delete mode 100644 x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/plugin/TransportActionUtils.java diff --git a/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/SearchWithMinCompatibleSearchNodeIT.java b/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/SearchWithMinCompatibleSearchNodeIT.java deleted file mode 100644 index a391ee5a3bd7b..0000000000000 --- a/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/SearchWithMinCompatibleSearchNodeIT.java +++ /dev/null @@ -1,144 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ -package org.elasticsearch.backwards; - -import org.apache.http.HttpHost; -import org.elasticsearch.client.Request; -import org.elasticsearch.client.Response; -import org.elasticsearch.client.ResponseException; -import org.elasticsearch.client.RestClient; -import org.elasticsearch.core.Strings; -import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.test.rest.ESRestTestCase; -import org.elasticsearch.test.rest.ObjectPath; -import org.junit.Before; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; - -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.equalTo; - -public class SearchWithMinCompatibleSearchNodeIT extends ESRestTestCase { - - private static final String BWC_NODES_VERSION = System.getProperty("tests.bwc_nodes_version"); - private static final String NEW_NODES_VERSION = System.getProperty("tests.new_nodes_version"); - - private static String index = "test_min_version"; - private static int numShards; - private static int numReplicas = 1; - private static int numDocs; - private static MixedClusterTestNodes nodes; - private static List allNodes; - - @Before - public void prepareTestData() throws IOException { - nodes = MixedClusterTestNodes.buildNodes(client(), BWC_NODES_VERSION); - numShards = nodes.size(); - numDocs = randomIntBetween(numShards, 16); - allNodes = new ArrayList<>(); - allNodes.addAll(nodes.getBWCNodes()); - allNodes.addAll(nodes.getNewNodes()); - - if (client().performRequest(new Request("HEAD", "/" + index)).getStatusLine().getStatusCode() == 404) { - createIndex(index, indexSettings(numShards, numReplicas).build()); - for (int i = 0; i < numDocs; i++) { - Request request = new Request("PUT", index + "/_doc/" + i); - request.setJsonEntity("{\"test\": \"test_" + randomAlphaOfLength(2) + "\"}"); - assertOK(client().performRequest(request)); - } - ensureGreen(index); - } - } - - public void testMinVersionAsNewVersion() throws Exception { - try ( - RestClient client = buildClient( - restClientSettings(), - allNodes.stream().map(MixedClusterTestNode::publishAddress).toArray(HttpHost[]::new) - ) - ) { - Request newVersionRequest = new Request( - "POST", - index + "/_search?min_compatible_shard_node=" + NEW_NODES_VERSION + "&ccs_minimize_roundtrips=false" - ); - assertBusy(() -> { - ResponseException responseException = expectThrows(ResponseException.class, () -> client.performRequest(newVersionRequest)); - assertThat( - responseException.getResponse().getStatusLine().getStatusCode(), - equalTo(RestStatus.INTERNAL_SERVER_ERROR.getStatus()) - ); - assertThat(responseException.getMessage(), containsString(""" - {"error":{"root_cause":[],"type":"search_phase_execution_exception\"""")); - assertThat(responseException.getMessage(), containsString(Strings.format(""" - caused_by":{"type":"version_mismatch_exception",\ - "reason":"One of the shards is incompatible with the required minimum version [%s]\"""", NEW_NODES_VERSION))); - }); - } - } - - public void testMinVersionAsOldVersion() throws Exception { - try ( - RestClient client = buildClient( - restClientSettings(), - allNodes.stream().map(MixedClusterTestNode::publishAddress).toArray(HttpHost[]::new) - ) - ) { - Request oldVersionRequest = new Request( - "POST", - index + "/_search?min_compatible_shard_node=" + BWC_NODES_VERSION + "&ccs_minimize_roundtrips=false" - ); - oldVersionRequest.setJsonEntity(""" - {"query":{"match_all":{}},"_source":false}"""); - assertBusy(() -> { - Response response = client.performRequest(oldVersionRequest); - ObjectPath responseObject = ObjectPath.createFromResponse(response); - Map shardsResult = responseObject.evaluate("_shards"); - assertThat(shardsResult.get("total"), equalTo(numShards)); - assertThat(shardsResult.get("successful"), equalTo(numShards)); - assertThat(shardsResult.get("failed"), equalTo(0)); - Map hitsResult = responseObject.evaluate("hits.total"); - assertThat(hitsResult.get("value"), equalTo(numDocs)); - assertThat(hitsResult.get("relation"), equalTo("eq")); - }); - } - } - - public void testCcsMinimizeRoundtripsIsFalse() throws Exception { - try ( - RestClient client = buildClient( - restClientSettings(), - allNodes.stream().map(MixedClusterTestNode::publishAddress).toArray(HttpHost[]::new) - ) - ) { - String version = randomBoolean() ? NEW_NODES_VERSION : BWC_NODES_VERSION; - - Request request = new Request( - "POST", - index + "/_search?min_compatible_shard_node=" + version + "&ccs_minimize_roundtrips=true" - ); - assertBusy(() -> { - ResponseException responseException = expectThrows(ResponseException.class, () -> client.performRequest(request)); - assertThat(responseException.getResponse().getStatusLine().getStatusCode(), equalTo(RestStatus.BAD_REQUEST.getStatus())); - assertThat(responseException.getMessage(), containsString(""" - {"error":{"root_cause":[{"type":"action_request_validation_exception"\ - """)); - assertThat( - responseException.getMessage(), - containsString( - "\"reason\":\"Validation Failed: 1: " - + "[ccs_minimize_roundtrips] cannot be [true] when setting a minimum compatible shard version;\"" - ) - ); - }); - } - } -} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/search.json b/rest-api-spec/src/main/resources/rest-api-spec/api/search.json index b5dc4d62a2f0f..25b4efd9c4c37 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/search.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/search.json @@ -237,10 +237,6 @@ "description":"Indicates whether hits.total should be rendered as an integer or an object in the rest search response", "default":false }, - "min_compatible_shard_node":{ - "type":"string", - "description":"The minimum compatible version that all shards involved in search should have for this request to be successful" - }, "include_named_queries_score":{ "type": "boolean", "description":"Indicates whether hit.matched_queries should be rendered as a map that includes the name of the matched query associated with its score (true) or as an array containing the name of the matched queries (false)", diff --git a/server/src/main/java/org/elasticsearch/ElasticsearchException.java b/server/src/main/java/org/elasticsearch/ElasticsearchException.java index 5d04e31069b1c..4119e12d45f6c 100644 --- a/server/src/main/java/org/elasticsearch/ElasticsearchException.java +++ b/server/src/main/java/org/elasticsearch/ElasticsearchException.java @@ -1819,12 +1819,6 @@ private enum ElasticsearchExceptionHandle { 160, TransportVersions.V_7_10_0 ), - VERSION_MISMATCH_EXCEPTION( - org.elasticsearch.action.search.VersionMismatchException.class, - org.elasticsearch.action.search.VersionMismatchException::new, - 161, - TransportVersions.V_7_12_0 - ), AUTHENTICATION_PROCESSING_ERROR( org.elasticsearch.ElasticsearchAuthenticationProcessingError.class, org.elasticsearch.ElasticsearchAuthenticationProcessingError::new, diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index f89c5a65693f2..d1d423dcc5405 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -246,6 +246,7 @@ static TransportVersion def(int id) { public static final TransportVersion ESQL_PER_AGGREGATE_FILTER = def(8_770_00_0); public static final TransportVersion ML_INFERENCE_ATTACH_TO_EXISTSING_DEPLOYMENT = def(8_771_00_0); public static final TransportVersion CONVERT_FAILURE_STORE_OPTIONS_TO_SELECTOR_OPTIONS_INTERNALLY = def(8_772_00_0); + public static final TransportVersion REMOVE_MIN_COMPATIBLE_SHARD_NODE = def(8_773_00_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java index caa7453185575..0c585c705dcd0 100644 --- a/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java @@ -15,7 +15,6 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.TransportVersion; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.NoShardAvailableActionException; import org.elasticsearch.action.OriginalIndices; @@ -234,15 +233,6 @@ public final void run() { } if (shardsIts.size() > 0) { doCheckNoMissingShards(getName(), request, shardsIts); - Version version = request.minCompatibleShardNode(); - if (version != null && Version.CURRENT.minimumCompatibilityVersion().equals(version) == false) { - if (checkMinimumVersion(shardsIts) == false) { - throw new VersionMismatchException( - "One of the shards is incompatible with the required minimum version [{}]", - request.minCompatibleShardNode() - ); - } - } for (int i = 0; i < shardsIts.size(); i++) { final SearchShardIterator shardRoutings = shardsIts.get(i); assert shardRoutings.skip() == false; @@ -260,21 +250,6 @@ void skipShard(SearchShardIterator iterator) { successfulShardExecution(iterator); } - private boolean checkMinimumVersion(GroupShardsIterator shardsIts) { - for (SearchShardIterator it : shardsIts) { - if (it.getTargetNodeIds().isEmpty() == false) { - boolean isCompatible = it.getTargetNodeIds().stream().anyMatch(nodeId -> { - Transport.Connection conn = getConnection(it.getClusterAlias(), nodeId); - return conn == null || conn.getNode().getVersion().onOrAfter(request.minCompatibleShardNode()); - }); - if (isCompatible == false) { - return false; - } - } - } - return true; - } - private static boolean assertExecuteOnStartThread() { // Ensure that the current code has the following stacktrace: // AbstractSearchAsyncAction#start -> AbstractSearchAsyncAction#executePhase -> AbstractSearchAsyncAction#performPhaseOnShard @@ -761,12 +736,7 @@ final void onPhaseDone() { // as a tribute to @kimchy aka. finishHim() @Override public final Transport.Connection getConnection(String clusterAlias, String nodeId) { - Transport.Connection conn = nodeIdToConnection.apply(clusterAlias, nodeId); - Version minVersion = request.minCompatibleShardNode(); - if (minVersion != null && conn != null && conn.getNode().getVersion().before(minVersion)) { - throw new VersionMismatchException("One of the shards is incompatible with the required minimum version [{}]", minVersion); - } - return conn; + return nodeIdToConnection.apply(clusterAlias, nodeId); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhase.java b/server/src/main/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhase.java index 8ce2cc7b6b19e..8dcfbf5f070a1 100644 --- a/server/src/main/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhase.java @@ -11,7 +11,6 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.util.FixedBitSet; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.common.util.Maps; @@ -133,15 +132,6 @@ private static boolean assertSearchCoordinationThread() { public void run() { assert assertSearchCoordinationThread(); checkNoMissingShards(); - Version version = request.minCompatibleShardNode(); - if (version != null && Version.CURRENT.minimumCompatibilityVersion().equals(version) == false) { - if (checkMinimumVersion(shardsIts) == false) { - throw new VersionMismatchException( - "One of the shards is incompatible with the required minimum version [{}]", - request.minCompatibleShardNode() - ); - } - } runCoordinatorRewritePhase(); } @@ -378,21 +368,6 @@ public CanMatchNodeRequest.Shard buildShardLevelRequest(SearchShardIterator shar ); } - private boolean checkMinimumVersion(GroupShardsIterator shardsIts) { - for (SearchShardIterator it : shardsIts) { - if (it.getTargetNodeIds().isEmpty() == false) { - boolean isCompatible = it.getTargetNodeIds().stream().anyMatch(nodeId -> { - Transport.Connection conn = getConnection(new SendingTarget(it.getClusterAlias(), nodeId)); - return conn == null || conn.getNode().getVersion().onOrAfter(request.minCompatibleShardNode()); - }); - if (isCompatible == false) { - return false; - } - } - } - return true; - } - @Override public void start() { if (getNumShards() == 0) { @@ -421,12 +396,7 @@ public void onPhaseFailure(String msg, Exception cause) { } public Transport.Connection getConnection(SendingTarget sendingTarget) { - Transport.Connection conn = nodeIdToConnection.apply(sendingTarget.clusterAlias, sendingTarget.nodeId); - Version minVersion = request.minCompatibleShardNode(); - if (minVersion != null && conn != null && conn.getNode().getVersion().before(minVersion)) { - throw new VersionMismatchException("One of the shards is incompatible with the required minimum version [{}]", minVersion); - } - return conn; + return nodeIdToConnection.apply(sendingTarget.clusterAlias, sendingTarget.nodeId); } private int getNumShards() { diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java b/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java index 9961c3770fa86..5aec2bcd04b26 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java @@ -92,9 +92,6 @@ public class SearchRequest extends ActionRequest implements IndicesRequest.Repla private boolean ccsMinimizeRoundtrips; - @Nullable - private final Version minCompatibleShardNode; - public static final IndicesOptions DEFAULT_INDICES_OPTIONS = IndicesOptions.strictExpandOpenAndForbidClosedIgnoreThrottled(); private IndicesOptions indicesOptions = DEFAULT_INDICES_OPTIONS; @@ -112,15 +109,10 @@ public class SearchRequest extends ActionRequest implements IndicesRequest.Repla private boolean forceSyntheticSource = false; public SearchRequest() { - this((Version) null); - } - - public SearchRequest(Version minCompatibleShardNode) { this.localClusterAlias = null; this.absoluteStartMillis = DEFAULT_ABSOLUTE_START_MILLIS; this.finalReduce = true; - this.minCompatibleShardNode = minCompatibleShardNode; - this.ccsMinimizeRoundtrips = minCompatibleShardNode == null; + this.ccsMinimizeRoundtrips = true; } /** @@ -219,7 +211,6 @@ private SearchRequest( this.localClusterAlias = localClusterAlias; this.absoluteStartMillis = absoluteStartMillis; this.finalReduce = finalReduce; - this.minCompatibleShardNode = searchRequest.minCompatibleShardNode; this.waitForCheckpoints = searchRequest.waitForCheckpoints; this.waitForCheckpointsTimeout = searchRequest.waitForCheckpointsTimeout; this.forceSyntheticSource = searchRequest.forceSyntheticSource; @@ -263,10 +254,8 @@ public SearchRequest(StreamInput in) throws IOException { finalReduce = true; } ccsMinimizeRoundtrips = in.readBoolean(); - if (in.readBoolean()) { - minCompatibleShardNode = Version.readVersion(in); - } else { - minCompatibleShardNode = null; + if (in.getTransportVersion().before(TransportVersions.REMOVE_MIN_COMPATIBLE_SHARD_NODE) && in.readBoolean()) { + Version.readVersion(in); // and drop on the floor } waitForCheckpoints = in.readMap(StreamInput::readLongArray); waitForCheckpointsTimeout = in.readTimeValue(); @@ -302,9 +291,8 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(finalReduce); } out.writeBoolean(ccsMinimizeRoundtrips); - out.writeBoolean(minCompatibleShardNode != null); - if (minCompatibleShardNode != null) { - Version.writeVersion(minCompatibleShardNode, out); + if (out.getTransportVersion().before(TransportVersions.REMOVE_MIN_COMPATIBLE_SHARD_NODE)) { + out.writeBoolean(false); } out.writeMap(waitForCheckpoints, StreamOutput::writeLongArray); out.writeTimeValue(waitForCheckpointsTimeout); @@ -351,14 +339,6 @@ public ActionRequestValidationException validate() { validationException = addValidationError("[preference] cannot be used with point in time", validationException); } } - if (minCompatibleShardNode() != null) { - if (isCcsMinimizeRoundtrips()) { - validationException = addValidationError( - "[ccs_minimize_roundtrips] cannot be [true] when setting a minimum compatible " + "shard version", - validationException - ); - } - } if (pointInTimeBuilder() != null && waitForCheckpoints.isEmpty() == false) { validationException = addValidationError("using [point in time] is not allowed with wait_for_checkpoints", validationException); @@ -401,15 +381,6 @@ long getAbsoluteStartMillis() { return absoluteStartMillis; } - /** - * Returns the minimum compatible shard version the search request needs to run on. If the version is null, then there are no - * restrictions imposed on shards versions part of this search. - */ - @Nullable - public Version minCompatibleShardNode() { - return minCompatibleShardNode; - } - /** * Sets the indices the search will be executed on. */ @@ -818,7 +789,6 @@ public boolean equals(Object o) { && Objects.equals(localClusterAlias, that.localClusterAlias) && absoluteStartMillis == that.absoluteStartMillis && ccsMinimizeRoundtrips == that.ccsMinimizeRoundtrips - && Objects.equals(minCompatibleShardNode, that.minCompatibleShardNode) && forceSyntheticSource == that.forceSyntheticSource; } @@ -840,7 +810,6 @@ public int hashCode() { localClusterAlias, absoluteStartMillis, ccsMinimizeRoundtrips, - minCompatibleShardNode, forceSyntheticSource ); } diff --git a/server/src/main/java/org/elasticsearch/action/search/VersionMismatchException.java b/server/src/main/java/org/elasticsearch/action/search/VersionMismatchException.java deleted file mode 100644 index 69ea4484ae691..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/search/VersionMismatchException.java +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ - -package org.elasticsearch.action.search; - -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.common.io.stream.StreamInput; - -import java.io.IOException; - -public class VersionMismatchException extends ElasticsearchException { - - public VersionMismatchException(String msg, Object... args) { - super(msg, args); - } - - public VersionMismatchException(StreamInput in) throws IOException { - super(in); - } - -} diff --git a/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java b/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java index af60979dfe169..80a85d3b9b748 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java @@ -10,7 +10,6 @@ package org.elasticsearch.rest.action.search; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.TransportSearchAction; @@ -100,12 +99,10 @@ public Set supportedCapabilities() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { - SearchRequest searchRequest; - if (request.hasParam("min_compatible_shard_node")) { - searchRequest = new SearchRequest(Version.fromString(request.param("min_compatible_shard_node"))); - } else { - searchRequest = new SearchRequest(); - } + SearchRequest searchRequest = new SearchRequest(); + // access the BwC param, but just drop it + // this might be set by old clients + request.param("min_compatible_shard_node"); /* * We have to pull out the call to `source().size(size)` because * _update_by_query and _delete_by_query uses this same parsing diff --git a/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java b/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java index 31739850e2d35..2c6be01c851e4 100644 --- a/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java +++ b/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java @@ -19,7 +19,6 @@ import org.elasticsearch.action.bulk.IndexDocFailureStoreStatus; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.ShardSearchFailure; -import org.elasticsearch.action.search.VersionMismatchException; import org.elasticsearch.action.support.replication.ReplicationOperation; import org.elasticsearch.client.internal.AbstractClientHeadersTestCase; import org.elasticsearch.cluster.action.shard.ShardStateAction; @@ -816,7 +815,7 @@ public void testIds() { ids.put(158, PeerRecoveryNotFound.class); ids.put(159, NodeHealthCheckFailureException.class); ids.put(160, NoSeedNodeLeftException.class); - ids.put(161, VersionMismatchException.class); + ids.put(161, null); // was org.elasticsearch.action.search.VersionMismatchException.class ids.put(162, ElasticsearchAuthenticationProcessingError.class); ids.put(163, RepositoryConflictException.class); ids.put(164, VersionConflictException.class); diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncActionTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncActionTests.java index b63c88f623e21..d279fa5030a8c 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncActionTests.java @@ -13,26 +13,18 @@ import org.apache.lucene.search.SortField; import org.apache.lucene.search.TopFieldDocs; import org.apache.lucene.search.TotalHits; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; -import org.elasticsearch.cluster.node.VersionInformation; import org.elasticsearch.cluster.routing.GroupShardsIterator; -import org.elasticsearch.cluster.routing.RecoverySource; -import org.elasticsearch.cluster.routing.ShardRouting; -import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.breaker.NoopCircuitBreaker; import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.index.Index; -import org.elasticsearch.index.IndexVersion; -import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.lucene.grouping.TopFieldGroups; import org.elasticsearch.search.DocValueFormat; @@ -47,24 +39,17 @@ import org.elasticsearch.search.sort.SortBuilders; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.InternalAggregationTestCase; -import org.elasticsearch.test.VersionUtils; -import org.elasticsearch.test.index.IndexVersionUtils; import org.elasticsearch.transport.Transport; -import java.util.ArrayList; import java.util.Collections; -import java.util.List; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; -import static java.util.Collections.singletonList; -import static org.elasticsearch.test.VersionUtils.allVersions; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; -import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; public class SearchQueryThenFetchAsyncActionTests extends ESTestCase { @@ -257,484 +242,4 @@ public void run() { assertThat(((FieldDoc) phase.sortedTopDocs().scoreDocs()[0]).fields[0], equalTo(0)); } } - - public void testMinimumVersionSameAsNewVersion() throws Exception { - var newVersion = VersionInformation.CURRENT; - var oldVersion = new VersionInformation( - VersionUtils.randomCompatibleVersion(random(), VersionUtils.getPreviousVersion()), - IndexVersions.MINIMUM_COMPATIBLE, - IndexVersionUtils.randomCompatibleVersion(random()) - ); - testMixedVersionsShardsSearch(newVersion, oldVersion, newVersion.nodeVersion()); - } - - public void testMinimumVersionBetweenNewAndOldVersion() throws Exception { - var oldVersion = new VersionInformation( - VersionUtils.getFirstVersion(), - IndexVersions.MINIMUM_COMPATIBLE, - IndexVersionUtils.randomCompatibleVersion(random()) - ); - - var newVersion = new VersionInformation( - VersionUtils.maxCompatibleVersion(VersionUtils.getFirstVersion()), - IndexVersions.MINIMUM_COMPATIBLE, - IndexVersion.current() - ); - - var minVersion = VersionUtils.randomVersionBetween( - random(), - allVersions().get(allVersions().indexOf(oldVersion.nodeVersion()) + 1), - newVersion.nodeVersion() - ); - - testMixedVersionsShardsSearch(newVersion, oldVersion, minVersion); - } - - private void testMixedVersionsShardsSearch(VersionInformation oldVersion, VersionInformation newVersion, Version minVersion) - throws Exception { - final TransportSearchAction.SearchTimeProvider timeProvider = new TransportSearchAction.SearchTimeProvider( - 0, - System.nanoTime(), - System::nanoTime - ); - int numConcurrent = randomIntBetween(1, 4); - - Map lookup = new ConcurrentHashMap<>(); - DiscoveryNode newVersionNode = DiscoveryNodeUtils.builder("node1").version(newVersion).build(); - DiscoveryNode oldVersionNode = DiscoveryNodeUtils.builder("node2").version(oldVersion).build(); - lookup.put("node1", new SearchAsyncActionTests.MockConnection(newVersionNode)); - lookup.put("node2", new SearchAsyncActionTests.MockConnection(oldVersionNode)); - - OriginalIndices idx = new OriginalIndices(new String[] { "idx" }, SearchRequest.DEFAULT_INDICES_OPTIONS); - ArrayList list = new ArrayList<>(); - ShardRouting routingNewVersionShard = ShardRouting.newUnassigned( - new ShardId(new Index("idx", "_na_"), 0), - true, - RecoverySource.EmptyStoreRecoverySource.INSTANCE, - new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foobar"), - ShardRouting.Role.DEFAULT - ); - routingNewVersionShard = routingNewVersionShard.initialize(newVersionNode.getId(), "p0", 0); - routingNewVersionShard.started(); - list.add(new SearchShardIterator(null, new ShardId(new Index("idx", "_na_"), 0), singletonList(routingNewVersionShard), idx)); - - ShardRouting routingOldVersionShard = ShardRouting.newUnassigned( - new ShardId(new Index("idx", "_na_"), 1), - true, - RecoverySource.EmptyStoreRecoverySource.INSTANCE, - new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foobar"), - ShardRouting.Role.DEFAULT - ); - routingOldVersionShard = routingOldVersionShard.initialize(oldVersionNode.getId(), "p1", 0); - routingOldVersionShard.started(); - list.add(new SearchShardIterator(null, new ShardId(new Index("idx", "_na_"), 1), singletonList(routingOldVersionShard), idx)); - - GroupShardsIterator shardsIter = new GroupShardsIterator<>(list); - final SearchRequest searchRequest = new SearchRequest(minVersion); - searchRequest.setMaxConcurrentShardRequests(numConcurrent); - searchRequest.setBatchedReduceSize(2); - searchRequest.source(new SearchSourceBuilder().size(1)); - searchRequest.allowPartialSearchResults(false); - - SearchTransportService searchTransportService = new SearchTransportService(null, null, null); - SearchPhaseController controller = new SearchPhaseController((t, r) -> InternalAggregationTestCase.emptyReduceContextBuilder()); - SearchTask task = new SearchTask(0, "n/a", "n/a", () -> "test", null, Collections.emptyMap()); - try ( - QueryPhaseResultConsumer resultConsumer = new QueryPhaseResultConsumer( - searchRequest, - EsExecutors.DIRECT_EXECUTOR_SERVICE, - new NoopCircuitBreaker(CircuitBreaker.REQUEST), - controller, - task::isCancelled, - task.getProgressListener(), - shardsIter.size(), - exc -> {} - ) - ) { - final List responses = new ArrayList<>(); - SearchQueryThenFetchAsyncAction newSearchAsyncAction = new SearchQueryThenFetchAsyncAction( - logger, - null, - searchTransportService, - (clusterAlias, node) -> lookup.get(node), - Collections.singletonMap("_na_", AliasFilter.EMPTY), - Collections.emptyMap(), - EsExecutors.DIRECT_EXECUTOR_SERVICE, - resultConsumer, - searchRequest, - new ActionListener<>() { - @Override - public void onFailure(Exception e) { - responses.add(e); - } - - public void onResponse(SearchResponse response) { - responses.add(response); - } - - ; - }, - shardsIter, - timeProvider, - new ClusterState.Builder(new ClusterName("test")).build(), - task, - SearchResponse.Clusters.EMPTY, - null - ); - - newSearchAsyncAction.start(); - assertThat(responses, hasSize(1)); - assertThat(responses.get(0), instanceOf(SearchPhaseExecutionException.class)); - SearchPhaseExecutionException e = (SearchPhaseExecutionException) responses.get(0); - assertThat(e.getCause(), instanceOf(VersionMismatchException.class)); - assertThat( - e.getCause().getMessage(), - equalTo("One of the shards is incompatible with the required minimum version [" + minVersion + "]") - ); - } - } - - public void testMinimumVersionSameAsOldVersion() throws Exception { - var newVersion = VersionInformation.CURRENT; - var oldVersion = new VersionInformation( - VersionUtils.randomCompatibleVersion(random(), VersionUtils.getPreviousVersion()), - IndexVersions.MINIMUM_COMPATIBLE, - IndexVersionUtils.randomCompatibleVersion(random()) - ); - Version minVersion = oldVersion.nodeVersion(); - - final TransportSearchAction.SearchTimeProvider timeProvider = new TransportSearchAction.SearchTimeProvider( - 0, - System.nanoTime(), - System::nanoTime - ); - AtomicInteger successfulOps = new AtomicInteger(); - - Map lookup = new ConcurrentHashMap<>(); - DiscoveryNode newVersionNode = DiscoveryNodeUtils.builder("node1").version(newVersion).build(); - DiscoveryNode oldVersionNode = DiscoveryNodeUtils.builder("node2").version(oldVersion).build(); - lookup.put("node1", new SearchAsyncActionTests.MockConnection(newVersionNode)); - lookup.put("node2", new SearchAsyncActionTests.MockConnection(oldVersionNode)); - - OriginalIndices idx = new OriginalIndices(new String[] { "idx" }, SearchRequest.DEFAULT_INDICES_OPTIONS); - ArrayList list = new ArrayList<>(); - ShardRouting routingNewVersionShard = ShardRouting.newUnassigned( - new ShardId(new Index("idx", "_na_"), 0), - true, - RecoverySource.EmptyStoreRecoverySource.INSTANCE, - new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foobar"), - ShardRouting.Role.DEFAULT - ); - routingNewVersionShard = routingNewVersionShard.initialize(newVersionNode.getId(), "p0", 0); - routingNewVersionShard.started(); - list.add(new SearchShardIterator(null, new ShardId(new Index("idx", "_na_"), 0), singletonList(routingNewVersionShard), idx)); - - ShardRouting routingOldVersionShard = ShardRouting.newUnassigned( - new ShardId(new Index("idx", "_na_"), 1), - true, - RecoverySource.EmptyStoreRecoverySource.INSTANCE, - new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foobar"), - ShardRouting.Role.DEFAULT - ); - routingOldVersionShard = routingOldVersionShard.initialize(oldVersionNode.getId(), "p1", 0); - routingOldVersionShard.started(); - list.add(new SearchShardIterator(null, new ShardId(new Index("idx", "_na_"), 1), singletonList(routingOldVersionShard), idx)); - - GroupShardsIterator shardsIter = new GroupShardsIterator<>(list); - final SearchRequest searchRequest = new SearchRequest(minVersion); - searchRequest.allowPartialSearchResults(false); - searchRequest.source(new SearchSourceBuilder().size(1).sort(SortBuilders.fieldSort("timestamp"))); - - SearchTransportService searchTransportService = new SearchTransportService(null, null, null) { - @Override - public void sendExecuteQuery( - Transport.Connection connection, - ShardSearchRequest request, - SearchTask task, - ActionListener listener - ) { - int shardId = request.shardId().id(); - QuerySearchResult queryResult = new QuerySearchResult( - new ShardSearchContextId("N/A", 123), - new SearchShardTarget("node1", new ShardId("idx", "na", shardId), null), - null - ); - try { - SortField sortField = new SortField("timestamp", SortField.Type.LONG); - if (shardId == 0) { - queryResult.topDocs( - new TopDocsAndMaxScore( - new TopFieldDocs( - new TotalHits(1, TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO), - new FieldDoc[] { new FieldDoc(randomInt(1000), Float.NaN, new Object[] { shardId }) }, - new SortField[] { sortField } - ), - Float.NaN - ), - new DocValueFormat[] { DocValueFormat.RAW } - ); - } else if (shardId == 1) { - queryResult.topDocs( - new TopDocsAndMaxScore( - new TopFieldDocs( - new TotalHits(1, TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO), - new FieldDoc[] { new FieldDoc(randomInt(1000), Float.NaN, new Object[] { shardId }) }, - new SortField[] { sortField } - ), - Float.NaN - ), - new DocValueFormat[] { DocValueFormat.RAW } - ); - } - queryResult.from(0); - queryResult.size(1); - successfulOps.incrementAndGet(); - queryResult.incRef(); - new Thread(() -> ActionListener.respondAndRelease(listener, queryResult)).start(); - } finally { - queryResult.decRef(); - } - } - }; - SearchPhaseController controller = new SearchPhaseController((t, r) -> InternalAggregationTestCase.emptyReduceContextBuilder()); - SearchTask task = new SearchTask(0, "n/a", "n/a", () -> "test", null, Collections.emptyMap()); - try ( - QueryPhaseResultConsumer resultConsumer = new QueryPhaseResultConsumer( - searchRequest, - EsExecutors.DIRECT_EXECUTOR_SERVICE, - new NoopCircuitBreaker(CircuitBreaker.REQUEST), - controller, - task::isCancelled, - task.getProgressListener(), - shardsIter.size(), - exc -> {} - ) - ) { - CountDownLatch latch = new CountDownLatch(1); - SearchQueryThenFetchAsyncAction action = new SearchQueryThenFetchAsyncAction( - logger, - null, - searchTransportService, - (clusterAlias, node) -> lookup.get(node), - Collections.singletonMap("_na_", AliasFilter.EMPTY), - Collections.emptyMap(), - EsExecutors.DIRECT_EXECUTOR_SERVICE, - resultConsumer, - searchRequest, - null, - shardsIter, - timeProvider, - new ClusterState.Builder(new ClusterName("test")).build(), - task, - SearchResponse.Clusters.EMPTY, - null - ) { - @Override - protected SearchPhase getNextPhase(SearchPhaseResults results, SearchPhaseContext context) { - return new SearchPhase("test") { - @Override - public void run() { - latch.countDown(); - } - }; - } - }; - - action.start(); - latch.await(); - assertThat(successfulOps.get(), equalTo(2)); - SearchPhaseController.ReducedQueryPhase phase = action.results.reduce(); - assertThat(phase.numReducePhases(), greaterThanOrEqualTo(1)); - assertThat(phase.totalHits().value, equalTo(2L)); - assertThat(phase.totalHits().relation, equalTo(TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO)); - } - } - - public void testMinimumVersionShardDuringPhaseExecution() throws Exception { - var newVersion = VersionInformation.CURRENT; - var oldVersion = new VersionInformation( - VersionUtils.randomCompatibleVersion(random(), VersionUtils.getPreviousVersion()), - IndexVersions.MINIMUM_COMPATIBLE, - IndexVersionUtils.randomCompatibleVersion(random()) - ); - - Version minVersion = newVersion.nodeVersion(); - - final TransportSearchAction.SearchTimeProvider timeProvider = new TransportSearchAction.SearchTimeProvider( - 0, - System.nanoTime(), - System::nanoTime - ); - AtomicInteger successfulOps = new AtomicInteger(); - - Map lookup = new ConcurrentHashMap<>(); - DiscoveryNode newVersionNode1 = DiscoveryNodeUtils.builder("node1").version(newVersion).build(); - DiscoveryNode newVersionNode2 = DiscoveryNodeUtils.builder("node2").version(newVersion).build(); - DiscoveryNode oldVersionNode = DiscoveryNodeUtils.builder("node3").version(oldVersion).build(); - lookup.put("node1", new SearchAsyncActionTests.MockConnection(newVersionNode1)); - lookup.put("node2", new SearchAsyncActionTests.MockConnection(newVersionNode2)); - lookup.put("node3", new SearchAsyncActionTests.MockConnection(oldVersionNode)); - - OriginalIndices idx = new OriginalIndices(new String[] { "idx" }, SearchRequest.DEFAULT_INDICES_OPTIONS); - ArrayList list = new ArrayList<>(); - ShardRouting routingNewVersionShard1 = ShardRouting.newUnassigned( - new ShardId(new Index("idx", "_na_"), 0), - true, - RecoverySource.EmptyStoreRecoverySource.INSTANCE, - new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foobar"), - ShardRouting.Role.DEFAULT - ); - routingNewVersionShard1 = routingNewVersionShard1.initialize(newVersionNode1.getId(), "p0", 0); - routingNewVersionShard1.started(); - list.add(new SearchShardIterator(null, new ShardId(new Index("idx", "_na_"), 0), singletonList(routingNewVersionShard1), idx)); - - ShardRouting routingNewVersionShard2 = ShardRouting.newUnassigned( - new ShardId(new Index("idx", "_na_"), 1), - true, - RecoverySource.EmptyStoreRecoverySource.INSTANCE, - new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foobar"), - ShardRouting.Role.DEFAULT - ); - routingNewVersionShard2 = routingNewVersionShard2.initialize(newVersionNode2.getId(), "p1", 0); - routingNewVersionShard2.started(); - list.add(new SearchShardIterator(null, new ShardId(new Index("idx", "_na_"), 1), singletonList(routingNewVersionShard2), idx)); - - GroupShardsIterator shardsIter = new GroupShardsIterator<>(list); - final SearchRequest searchRequest = new SearchRequest(minVersion); - searchRequest.allowPartialSearchResults(false); - searchRequest.source(new SearchSourceBuilder().size(1).sort(SortBuilders.fieldSort("timestamp"))); - - SearchTransportService searchTransportService = new SearchTransportService(null, null, null) { - @Override - public void sendExecuteQuery( - Transport.Connection connection, - ShardSearchRequest request, - SearchTask task, - ActionListener listener - ) { - int shardId = request.shardId().id(); - QuerySearchResult queryResult = new QuerySearchResult( - new ShardSearchContextId("N/A", 123), - new SearchShardTarget("node1", new ShardId("idx", "na", shardId), null), - null - ); - try { - SortField sortField = new SortField("timestamp", SortField.Type.LONG); - if (shardId == 0) { - queryResult.topDocs( - new TopDocsAndMaxScore( - new TopFieldDocs( - new TotalHits(1, TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO), - new FieldDoc[] { new FieldDoc(randomInt(1000), Float.NaN, new Object[] { shardId }) }, - new SortField[] { sortField } - ), - Float.NaN - ), - new DocValueFormat[] { DocValueFormat.RAW } - ); - } else if (shardId == 1) { - queryResult.topDocs( - new TopDocsAndMaxScore( - new TopFieldDocs( - new TotalHits(1, TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO), - new FieldDoc[] { new FieldDoc(randomInt(1000), Float.NaN, new Object[] { shardId }) }, - new SortField[] { sortField } - ), - Float.NaN - ), - new DocValueFormat[] { DocValueFormat.RAW } - ); - } - queryResult.from(0); - queryResult.size(1); - successfulOps.incrementAndGet(); - queryResult.incRef(); - new Thread(() -> ActionListener.respondAndRelease(listener, queryResult)).start(); - } finally { - queryResult.decRef(); - } - } - }; - SearchPhaseController controller = new SearchPhaseController((t, r) -> InternalAggregationTestCase.emptyReduceContextBuilder()); - SearchTask task = new SearchTask(0, "n/a", "n/a", () -> "test", null, Collections.emptyMap()); - - CountDownLatch latch = new CountDownLatch(1); - try ( - QueryPhaseResultConsumer resultConsumer = new QueryPhaseResultConsumer( - searchRequest, - EsExecutors.DIRECT_EXECUTOR_SERVICE, - new NoopCircuitBreaker(CircuitBreaker.REQUEST), - controller, - task::isCancelled, - task.getProgressListener(), - shardsIter.size(), - exc -> {} - ) - ) { - SearchQueryThenFetchAsyncAction action = new SearchQueryThenFetchAsyncAction( - logger, - null, - searchTransportService, - (clusterAlias, node) -> lookup.get(node), - Collections.singletonMap("_na_", AliasFilter.EMPTY), - Collections.emptyMap(), - EsExecutors.DIRECT_EXECUTOR_SERVICE, - resultConsumer, - searchRequest, - null, - shardsIter, - timeProvider, - new ClusterState.Builder(new ClusterName("test")).build(), - task, - SearchResponse.Clusters.EMPTY, - null - ) { - @Override - protected SearchPhase getNextPhase(SearchPhaseResults results, SearchPhaseContext context) { - return new SearchPhase("test") { - @Override - public void run() { - latch.countDown(); - } - }; - } - }; - ShardRouting routingOldVersionShard = ShardRouting.newUnassigned( - new ShardId(new Index("idx", "_na_"), 2), - true, - RecoverySource.EmptyStoreRecoverySource.INSTANCE, - new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foobar"), - ShardRouting.Role.DEFAULT - ); - SearchShardIterator shardIt = new SearchShardIterator( - null, - new ShardId(new Index("idx", "_na_"), 2), - singletonList(routingOldVersionShard), - idx - ); - routingOldVersionShard = routingOldVersionShard.initialize(oldVersionNode.getId(), "p2", 0); - routingOldVersionShard.started(); - action.start(); - latch.await(); - assertThat(successfulOps.get(), equalTo(2)); - SearchPhaseController.ReducedQueryPhase phase = action.results.reduce(); - assertThat(phase.numReducePhases(), greaterThanOrEqualTo(1)); - assertThat(phase.totalHits().value, equalTo(2L)); - assertThat(phase.totalHits().relation, equalTo(TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO)); - - SearchShardTarget searchShardTarget = new SearchShardTarget("node3", shardIt.shardId(), null); - SearchActionListener listener = new SearchActionListener(searchShardTarget, 0) { - @Override - public void onFailure(Exception e) {} - - @Override - protected void innerOnResponse(SearchPhaseResult response) {} - }; - Exception e = expectThrows( - VersionMismatchException.class, - () -> action.executePhaseOnShard(shardIt, searchShardTarget, listener) - ); - assertThat(e.getMessage(), equalTo("One of the shards is incompatible with the required minimum version [" + minVersion + "]")); - } - } } diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java index 23c956e6e52f2..3079b6d4b0371 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java @@ -39,7 +39,6 @@ import org.elasticsearch.tasks.TaskId; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.TransportVersionUtils; -import org.elasticsearch.test.VersionUtils; import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; @@ -438,33 +437,6 @@ public QueryBuilder topDocsQuery() { assertEquals(1, validationErrors.validationErrors().size()); assertEquals("using [point in time] is not allowed in a scroll context", validationErrors.validationErrors().get(0)); } - { - // Minimum compatible shard node version with ccs_minimize_roundtrips - SearchRequest searchRequest; - boolean isMinCompatibleShardVersion = randomBoolean(); - if (isMinCompatibleShardVersion) { - searchRequest = new SearchRequest(VersionUtils.randomVersion(random())); - } else { - searchRequest = new SearchRequest(); - } - - boolean shouldSetCcsMinimizeRoundtrips = randomBoolean(); - if (shouldSetCcsMinimizeRoundtrips) { - searchRequest.setCcsMinimizeRoundtrips(true); - } - ActionRequestValidationException validationErrors = searchRequest.validate(); - - if (isMinCompatibleShardVersion && shouldSetCcsMinimizeRoundtrips) { - assertNotNull(validationErrors); - assertEquals(1, validationErrors.validationErrors().size()); - assertEquals( - "[ccs_minimize_roundtrips] cannot be [true] when setting a minimum compatible shard version", - validationErrors.validationErrors().get(0) - ); - } else { - assertNull(validationErrors); - } - } { SearchRequest searchRequest = new SearchRequest().source( new SearchSourceBuilder().rankBuilder(new TestRankBuilder(100)) diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/plugin/TransportEqlSearchAction.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/plugin/TransportEqlSearchAction.java index 51f92bcda7da4..c0141da2432ce 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/plugin/TransportEqlSearchAction.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/plugin/TransportEqlSearchAction.java @@ -59,7 +59,6 @@ import static org.elasticsearch.action.ActionListener.wrap; import static org.elasticsearch.transport.RemoteClusterAware.buildRemoteIndexName; import static org.elasticsearch.xpack.core.ClientHelper.ASYNC_SEARCH_ORIGIN; -import static org.elasticsearch.xpack.ql.plugin.TransportActionUtils.executeRequestWithRetryAttempt; public final class TransportEqlSearchAction extends HandledTransportAction implements @@ -236,22 +235,11 @@ public static void operation( new TaskId(nodeId, task.getId()), task ); - executeRequestWithRetryAttempt( - clusterService, - listener::onFailure, - onFailure -> planExecutor.eql( - cfg, - request.query(), - params, - wrap(r -> listener.onResponse(createResponse(r, task.getExecutionId())), onFailure) - ), - node -> transportService.sendRequest( - node, - EqlSearchAction.NAME, - request, - new ActionListenerResponseHandler<>(listener, EqlSearchResponse::new, EsExecutors.DIRECT_EXECUTOR_SERVICE) - ), - log + planExecutor.eql( + cfg, + request.query(), + params, + wrap(r -> listener.onResponse(createResponse(r, task.getExecutionId())), listener::onFailure) ); } } diff --git a/x-pack/plugin/fleet/src/main/java/org/elasticsearch/xpack/fleet/rest/RestFleetSearchAction.java b/x-pack/plugin/fleet/src/main/java/org/elasticsearch/xpack/fleet/rest/RestFleetSearchAction.java index a6c369734f0e3..a79424b8b7d59 100644 --- a/x-pack/plugin/fleet/src/main/java/org/elasticsearch/xpack/fleet/rest/RestFleetSearchAction.java +++ b/x-pack/plugin/fleet/src/main/java/org/elasticsearch/xpack/fleet/rest/RestFleetSearchAction.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.fleet.rest; -import org.elasticsearch.Version; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.client.internal.node.NodeClient; @@ -57,12 +56,11 @@ public List routes() { @Override protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { - SearchRequest searchRequest; - if (request.hasParam("min_compatible_shard_node")) { - searchRequest = new SearchRequest(Version.fromString(request.param("min_compatible_shard_node"))); - } else { - searchRequest = new SearchRequest(); - } + SearchRequest searchRequest = new SearchRequest(); + // access the BwC param, but just drop it + // this might be set by old clients + request.param("min_compatible_shard_node"); + String[] indices = searchRequest.indices(); if (indices.length > 1) { throw new IllegalArgumentException( diff --git a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/plugin/TransportActionUtils.java b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/plugin/TransportActionUtils.java deleted file mode 100644 index 6431c83ee1c2e..0000000000000 --- a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/plugin/TransportActionUtils.java +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ -package org.elasticsearch.xpack.ql.plugin; - -import org.apache.logging.log4j.Logger; -import org.elasticsearch.action.search.SearchPhaseExecutionException; -import org.elasticsearch.action.search.VersionMismatchException; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.xpack.ql.util.Holder; - -import java.util.function.Consumer; - -public final class TransportActionUtils { - - /** - * Execute a *QL request and re-try it in case the first request failed with a {@code VersionMismatchException} - * - * @param clusterService The cluster service instance - * @param onFailure On-failure handler in case the request doesn't fail with a {@code VersionMismatchException} - * @param queryRunner *QL query execution code, typically a Plan Executor running the query - * @param retryRequest Re-trial logic - * @param log Log4j logger - */ - public static void executeRequestWithRetryAttempt( - ClusterService clusterService, - Consumer onFailure, - Consumer> queryRunner, - Consumer retryRequest, - Logger log - ) { - - Holder retrySecondTime = new Holder(false); - queryRunner.accept(e -> { - // the search request likely ran on nodes with different versions of ES - // we will retry on a node with an older version that should generate a backwards compatible _search request - if (e instanceof SearchPhaseExecutionException - && ((SearchPhaseExecutionException) e).getCause() instanceof VersionMismatchException) { - if (log.isDebugEnabled()) { - log.debug("Caught exception type [{}] with cause [{}].", e.getClass().getName(), e.getCause()); - } - DiscoveryNode localNode = clusterService.state().nodes().getLocalNode(); - DiscoveryNode candidateNode = null; - for (DiscoveryNode node : clusterService.state().nodes()) { - // find the first node that's older than the current node - if (node != localNode && node.getVersion().before(localNode.getVersion())) { - candidateNode = node; - break; - } - } - if (candidateNode != null) { - if (log.isDebugEnabled()) { - log.debug( - "Candidate node to resend the request to: address [{}], id [{}], name [{}], version [{}]", - candidateNode.getAddress(), - candidateNode.getId(), - candidateNode.getName(), - candidateNode.getVersion() - ); - } - // re-send the request to the older node - retryRequest.accept(candidateNode); - } else { - retrySecondTime.set(true); - } - } else { - onFailure.accept(e); - } - }); - if (retrySecondTime.get()) { - if (log.isDebugEnabled()) { - log.debug("No candidate node found, likely all were upgraded in the meantime. Re-trying the original request."); - } - queryRunner.accept(onFailure); - } - } -} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/Querier.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/Querier.java index b2ce91140de76..06293df4f4559 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/Querier.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/Querier.java @@ -10,7 +10,6 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.search.TotalHits; import org.apache.lucene.util.PriorityQueue; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.DelegatingActionListener; import org.elasticsearch.action.search.ClosePointInTimeRequest; @@ -101,7 +100,6 @@ import static org.elasticsearch.action.ActionListener.wrap; import static org.elasticsearch.xpack.ql.execution.search.extractor.AbstractFieldHitExtractor.MultiValueSupport.LENIENT; import static org.elasticsearch.xpack.ql.execution.search.extractor.AbstractFieldHitExtractor.MultiValueSupport.NONE; -import static org.elasticsearch.xpack.sql.proto.VersionCompatibility.INTRODUCING_UNSIGNED_LONG; // TODO: add retry/back-off public class Querier { @@ -202,7 +200,7 @@ public static void closePointInTime(Client client, BytesReference pointInTimeId, public static SearchRequest prepareRequest(SearchSourceBuilder source, SqlConfiguration cfg, boolean includeFrozen, String... indices) { source.timeout(cfg.requestTimeout()); - SearchRequest searchRequest = new SearchRequest(Version.fromId(INTRODUCING_UNSIGNED_LONG.id)); + SearchRequest searchRequest = new SearchRequest(); if (source.pointInTimeBuilder() == null) { searchRequest.indices(indices); searchRequest.indicesOptions( diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlQueryAction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlQueryAction.java index 7a76ffe8eb109..41fa66ae36aeb 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlQueryAction.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlQueryAction.java @@ -9,7 +9,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ActionListenerResponseHandler; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.cluster.service.ClusterService; @@ -57,7 +56,6 @@ import static java.util.Collections.unmodifiableList; import static org.elasticsearch.action.ActionListener.wrap; import static org.elasticsearch.xpack.core.ClientHelper.ASYNC_SEARCH_ORIGIN; -import static org.elasticsearch.xpack.ql.plugin.TransportActionUtils.executeRequestWithRetryAttempt; import static org.elasticsearch.xpack.sql.plugin.Transports.clusterName; import static org.elasticsearch.xpack.sql.plugin.Transports.username; import static org.elasticsearch.xpack.sql.proto.Mode.CLI; @@ -161,22 +159,11 @@ public static void operation( ); if (Strings.hasText(request.cursor()) == false) { - executeRequestWithRetryAttempt( - clusterService, - listener::onFailure, - onFailure -> planExecutor.sql( - cfg, - request.query(), - request.params(), - wrap(p -> listener.onResponse(createResponseWithSchema(request, p, task)), onFailure) - ), - node -> transportService.sendRequest( - node, - SqlQueryAction.NAME, - request, - new ActionListenerResponseHandler<>(listener, SqlQueryResponse::new, EsExecutors.DIRECT_EXECUTOR_SERVICE) - ), - log + planExecutor.sql( + cfg, + request.query(), + request.params(), + wrap(p -> listener.onResponse(createResponseWithSchema(request, p, task)), listener::onFailure) ); } else { Tuple decoded = Cursors.decodeFromStringWithZone(request.cursor(), planExecutor.writeableRegistry()); From 1a611bd2e3296a19c3b57216084d346577a30ada Mon Sep 17 00:00:00 2001 From: Craig Taverner Date: Wed, 16 Oct 2024 14:34:52 +0200 Subject: [PATCH 10/31] Fixes flaky ST_CENTROID_AGG tests (#114892) Even with Kahan summation, we were occasionally getting floating point differences at the 14th decimal point, well beyond anything a GIS use case would care about. --- muted-tests.yml | 6 -- .../aggregate/SpatialCentroidTests.java | 56 +++++++++++++++++-- 2 files changed, 50 insertions(+), 12 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index 6817011d399b2..a9a4cdcbe079b 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -94,12 +94,6 @@ tests: - class: org.elasticsearch.smoketest.DocsClientYamlTestSuiteIT method: test {yaml=reference/rest-api/watcher/put-watch/line_120} issue: https://github.com/elastic/elasticsearch/issues/99517 -- class: org.elasticsearch.xpack.esql.expression.function.aggregate.SpatialCentroidTests - method: "testAggregateIntermediate {TestCase= #2}" - issue: https://github.com/elastic/elasticsearch/issues/112461 -- class: org.elasticsearch.xpack.esql.expression.function.aggregate.SpatialCentroidTests - method: testAggregateIntermediate {TestCase=} - issue: https://github.com/elastic/elasticsearch/issues/112463 - class: org.elasticsearch.xpack.esql.action.ManyShardsIT method: testRejection issue: https://github.com/elastic/elasticsearch/issues/112406 diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/SpatialCentroidTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/SpatialCentroidTests.java index b79252c694084..15ea029a05554 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/SpatialCentroidTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/SpatialCentroidTests.java @@ -22,13 +22,15 @@ import org.elasticsearch.xpack.esql.expression.function.FunctionName; import org.elasticsearch.xpack.esql.expression.function.MultiRowTestCaseSupplier; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; +import org.hamcrest.BaseMatcher; +import org.hamcrest.Description; +import org.hamcrest.Matcher; -import java.nio.ByteOrder; import java.util.List; import java.util.function.Supplier; import java.util.stream.Stream; -import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.closeTo; @FunctionName("st_centroid_agg") public class SpatialCentroidTests extends AbstractAggregationTestCase { @@ -74,16 +76,58 @@ private static TestCaseSupplier makeSupplier(TestCaseSupplier.TypedDataSupplier count++; } - var expected = new BytesRef( - WellKnownBinary.toWKB(new Point(xSum.value() / count, ySum.value() / count), ByteOrder.LITTLE_ENDIAN) - ); + var expectedX = xSum.value() / count; + var expectedY = ySum.value() / count; return new TestCaseSupplier.TestCase( List.of(fieldTypedData), "SpatialCentroid[field=Attribute[channel=0]]", fieldTypedData.type(), - equalTo(expected) + centroidMatches(expectedX, expectedY, 1e-14) ); }); } + + @SuppressWarnings("SameParameterValue") + private static Matcher centroidMatches(double x, double y, double error) { + return new TestCentroidMatcher(x, y, error); + } + + private static class TestCentroidMatcher extends BaseMatcher { + private final double x; + private final double y; + private final Matcher mx; + private final Matcher my; + + private TestCentroidMatcher(double x, double y, double error) { + this.x = x; + this.y = y; + this.mx = closeTo(x, error); + this.my = closeTo(y, error); + } + + @Override + public boolean matches(Object item) { + if (item instanceof BytesRef wkb) { + var point = (Point) WellKnownBinary.fromWKB(GeometryValidator.NOOP, false, wkb.bytes, wkb.offset, wkb.length); + return mx.matches(point.getX()) && my.matches(point.getY()); + } + return false; + } + + @Override + public void describeMismatch(Object item, Description description) { + if (item instanceof BytesRef wkb) { + var point = (Point) WellKnownBinary.fromWKB(GeometryValidator.NOOP, false, wkb.bytes, wkb.offset, wkb.length); + description.appendText("was ").appendValue(point); + } else { + description.appendText("was ").appendValue(item); + } + } + + @Override + public void describeTo(Description description) { + description.appendValue(" POINT (" + x + " " + y + ")"); + } + } } From 1c0e29294d418c466c45530617946d831f6517dc Mon Sep 17 00:00:00 2001 From: Craig Taverner Date: Wed, 16 Oct 2024 14:36:24 +0200 Subject: [PATCH 11/31] Fix ST_CENTROID_AGG when no records are aggregated (#114888) This was returning an invalid result `POINT(NaN NaN)` and now instead returns `null`. --- docs/changelog/114888.yaml | 6 +++ .../spatial/CentroidPointAggregator.java | 14 ++++--- .../src/main/resources/spatial.csv-spec | 41 ++++++++++++++++++- .../xpack/esql/action/EsqlCapabilities.java | 5 +++ 4 files changed, 59 insertions(+), 7 deletions(-) create mode 100644 docs/changelog/114888.yaml diff --git a/docs/changelog/114888.yaml b/docs/changelog/114888.yaml new file mode 100644 index 0000000000000..6b99eb82d10f3 --- /dev/null +++ b/docs/changelog/114888.yaml @@ -0,0 +1,6 @@ +pr: 114888 +summary: Fix ST_CENTROID_AGG when no records are aggregated +area: ES|QL +type: bug +issues: + - 106025 diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/spatial/CentroidPointAggregator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/spatial/CentroidPointAggregator.java index 1fc2430393c98..c66c960dd8a99 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/spatial/CentroidPointAggregator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/spatial/CentroidPointAggregator.java @@ -58,7 +58,7 @@ public static void evaluateIntermediate(CentroidState state, DriverContext drive } public static Block evaluateFinal(CentroidState state, DriverContext driverContext) { - return driverContext.blockFactory().newConstantBytesRefBlockWith(state.encodeCentroidResult(), 1); + return state.toBlock(driverContext.blockFactory()); } public static void combineStates(GroupingCentroidState current, int groupId, GroupingCentroidState state, int statePosition) { @@ -181,10 +181,14 @@ public void add(double x, double dx, double y, double dy, long count) { this.count += count; } - protected BytesRef encodeCentroidResult() { - double x = xSum.value() / count; - double y = ySum.value() / count; - return encode(x, y); + protected Block toBlock(BlockFactory blockFactory) { + if (count > 0) { + double x = xSum.value() / count; + double y = ySum.value() / count; + return blockFactory.newConstantBytesRefBlockWith(encode(x, y), 1); + } else { + return blockFactory.newConstantNullBlock(1); + } } } diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/spatial.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/spatial.csv-spec index c5ca405005447..c1c4538c7393d 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/spatial.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/spatial.csv-spec @@ -616,6 +616,42 @@ location:geo_point | city_location:geo_point | count:long POINT (0 0) | POINT (0 0) | 1 ; +airportCityLocationPointIntersectionCentroidGroups +required_capability: st_intersects + +FROM airports_mp +| WHERE ST_INTERSECTS(location, city_location) +| STATS location=ST_CENTROID_AGG(location), city_location=ST_CENTROID_AGG(city_location), count=COUNT() BY country +; + +location:geo_point | city_location:geo_point | count:long | country:k +POINT (0 0) | POINT (0 0) | 1 | Atlantis +; + +airportCityLocationPointIntersectionNullCentroid +required_capability: st_intersects +required_capability: spatial_centroid_no_records + +FROM airports +| WHERE ST_INTERSECTS(location, city_location) +| STATS location=ST_CENTROID_AGG(location), city_location=ST_CENTROID_AGG(city_location), count=COUNT() +; + +location:geo_point | city_location:geo_point | count:long +null | null | 0 +; + +airportCityLocationPointIntersectionNullCentroidGroups +required_capability: st_intersects + +FROM airports +| WHERE ST_INTERSECTS(location, city_location) +| STATS location=ST_CENTROID_AGG(location), city_location=ST_CENTROID_AGG(city_location), count=COUNT() BY country +; + +location:geo_point | city_location:geo_point | count:long | country:k +; + ############################################### # Tests for ST_DISJOINT on GEO_POINT type @@ -1948,14 +1984,15 @@ wkt:keyword | pt:cartesian_point cartesianCentroidFromAirportsAfterPointContainsPolygonPredicate required_capability: st_contains_within +required_capability: spatial_centroid_no_records FROM airports_web | WHERE ST_CONTAINS(location, TO_CARTESIANSHAPE("POLYGON((4700000 1600000, 4800000 1600000, 4800000 1700000, 4700000 1700000, 4700000 1600000))")) | STATS centroid=ST_CENTROID_AGG(location), count=COUNT() ; -centroid:cartesian_point | count:long -POINT (NaN NaN) | 0 +centroid:cartesian_point | count:long +null | 0 ; cartesianPointContainsPolygonPredicate diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java index 842501744979c..18ebbe6d898af 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java @@ -180,6 +180,11 @@ public enum Cap { */ SPATIAL_DISTANCE_PUSHDOWN_ENHANCEMENTS, + /** + * Fix for spatial centroid when no records are found. + */ + SPATIAL_CENTROID_NO_RECORDS, + /** * Fix to GROK and DISSECT that allows extracting attributes with the same name as the input * https://github.com/elastic/elasticsearch/issues/110184 From 7ce484d2ad9282c93e41d390f165b00b408c2212 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Wed, 16 Oct 2024 23:44:43 +1100 Subject: [PATCH 12/31] Mute org.elasticsearch.test.rest.ClientYamlTestSuiteIT test {yaml=cluster.stats/30_ccs_stats/cross-cluster search stats search} #114902 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index a9a4cdcbe079b..df4c964340993 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -385,6 +385,9 @@ tests: - class: org.elasticsearch.packaging.test.EnrollmentProcessTests method: test20DockerAutoFormCluster issue: https://github.com/elastic/elasticsearch/issues/114885 +- class: org.elasticsearch.test.rest.ClientYamlTestSuiteIT + method: test {yaml=cluster.stats/30_ccs_stats/cross-cluster search stats search} + issue: https://github.com/elastic/elasticsearch/issues/114902 # Examples: # From 8935aad6dbee2fcaf62225fcee4334c8a0e6d674 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Thu, 17 Oct 2024 00:00:29 +1100 Subject: [PATCH 13/31] Mute org.elasticsearch.xpack.enrich.EnrichRestIT test {p0=enrich/40_synthetic_source/enrich documents over _bulk} #114825 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index df4c964340993..eb070b59f5c90 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -388,6 +388,9 @@ tests: - class: org.elasticsearch.test.rest.ClientYamlTestSuiteIT method: test {yaml=cluster.stats/30_ccs_stats/cross-cluster search stats search} issue: https://github.com/elastic/elasticsearch/issues/114902 +- class: org.elasticsearch.xpack.enrich.EnrichRestIT + method: test {p0=enrich/40_synthetic_source/enrich documents over _bulk} + issue: https://github.com/elastic/elasticsearch/issues/114825 # Examples: # From 8ae5ca468df88049ceeb6c8eda538e4131a325e5 Mon Sep 17 00:00:00 2001 From: Kostas Krikellas <131142368+kkrik-es@users.noreply.github.com> Date: Wed, 16 Oct 2024 16:13:23 +0300 Subject: [PATCH 14/31] Reset array scope tracking for nested objects (#114891) * Reset array scope tracking for nested objects * update * update * update --- muted-tests.yml | 3 -- .../index/mapper/DocumentParserContext.java | 25 ++++++++++------ .../mapper/IgnoredSourceFieldMapperTests.java | 30 +++++++++++++++++++ 3 files changed, 46 insertions(+), 12 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index eb070b59f5c90..2a3d4eac6d358 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -376,9 +376,6 @@ tests: issue: https://github.com/elastic/elasticsearch/issues/114839 - class: org.elasticsearch.license.LicensingTests issue: https://github.com/elastic/elasticsearch/issues/114865 -- class: org.elasticsearch.datastreams.logsdb.qa.LogsDbVersusLogsDbReindexedIntoStandardModeChallengeRestIT - method: testTermsQuery - issue: https://github.com/elastic/elasticsearch/issues/114873 - class: org.elasticsearch.xpack.enrich.EnrichIT method: testDeleteIsCaseSensitive issue: https://github.com/elastic/elasticsearch/issues/114840 diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java index 2eec14bd1a8d6..ef87ce52fbabf 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java @@ -111,7 +111,7 @@ public int get() { private final Set ignoredFields; private final List ignoredFieldValues; private final List ignoredFieldsMissingValues; - private final boolean inArrayScopeEnabled; + private boolean inArrayScopeEnabled; private boolean inArrayScope; private final Map> dynamicMappers; @@ -376,13 +376,14 @@ public final Collection getIgnoredFieldsMiss * Applies to synthetic source only. */ public final DocumentParserContext maybeCloneForArray(Mapper mapper) throws IOException { - if (canAddIgnoredField() && mapper instanceof ObjectMapper && inArrayScopeEnabled) { - boolean isNested = mapper instanceof NestedObjectMapper; - if ((inArrayScope == false && isNested == false) || (inArrayScope && isNested)) { - DocumentParserContext subcontext = switchParser(parser()); - subcontext.inArrayScope = inArrayScope == false; - return subcontext; - } + if (canAddIgnoredField() + && mapper instanceof ObjectMapper + && mapper instanceof NestedObjectMapper == false + && inArrayScope == false + && inArrayScopeEnabled) { + DocumentParserContext subcontext = switchParser(parser()); + subcontext.inArrayScope = true; + return subcontext; } return this; } @@ -709,12 +710,18 @@ public final DocumentParserContext createNestedContext(NestedObjectMapper nested * Return a new context that has the provided document as the current document. */ public final DocumentParserContext switchDoc(final LuceneDocument document) { - return new Wrapper(this.parent, this) { + DocumentParserContext cloned = new Wrapper(this.parent, this) { @Override public LuceneDocument doc() { return document; } }; + // Disable tracking array scopes for ignored source, as it would be added to the parent doc. + // Nested documents are added to preserve object structure within arrays of objects, so the use + // of ignored source for arrays inside them should be mostly redundant. + cloned.inArrayScope = false; + cloned.inArrayScopeEnabled = false; + return cloned; } /** diff --git a/server/src/test/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapperTests.java index 5eac5acdca286..934744ef3ef96 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapperTests.java @@ -932,6 +932,36 @@ public void testConflictingFieldNameAfterArray() throws IOException { {"path":{"id":0.1,"to":{"id":[1,20,3,10]}}}""", syntheticSource); } + public void testArrayWithNestedObjects() throws IOException { + DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + b.startObject("path").startObject("properties"); + { + b.startObject("to").field("type", "nested").startObject("properties"); + { + b.startObject("id").field("type", "integer").field("synthetic_source_keep", "arrays").endObject(); + } + b.endObject().endObject(); + } + b.endObject().endObject(); + })).documentMapper(); + + var syntheticSource = syntheticSource(documentMapper, b -> { + b.startArray("path"); + { + b.startObject().startArray("to"); + { + b.startObject().array("id", 1, 20, 3).endObject(); + b.startObject().field("id", 10).endObject(); + } + b.endArray().endObject(); + b.startObject().startObject("to").field("id", "0.1").endObject().endObject(); + } + b.endArray(); + }); + assertEquals(""" + {"path":{"to":[{"id":[1,20,3]},{"id":10},{"id":0}]}}""", syntheticSource); + } + public void testArrayWithinArray() throws IOException { DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { b.startObject("path"); From 58b588cbcd470e1a1d7f202a82694034faddf75d Mon Sep 17 00:00:00 2001 From: Bogdan Pintea Date: Wed, 16 Oct 2024 15:16:29 +0200 Subject: [PATCH 15/31] ESQL: adapt to new range in ToDatetimeTests (#114605) Two tests shared the same name in `ToDatetimeTests`, so that needed fixing. But then also the ranges in the masked test needed adjusting after the change that added the masking test. Fixes #108093 --- .../function/scalar/convert/ToDatetimeTests.java | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDatetimeTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDatetimeTests.java index 7799c3c756f23..2852b92ba156e 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDatetimeTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDatetimeTests.java @@ -134,9 +134,9 @@ public static Iterable parameters() { "ToDatetimeFromStringEvaluator[field=" + read + "]", List.of( new TestCaseSupplier.TypedDataSupplier( - "", - // millis before "0001-01-01T00:00:00.000Z" - () -> new BytesRef(randomDateString(Long.MIN_VALUE, -62135596800001L)), + "", + // millis before "-9999-12-31T23:59:59.999Z" + () -> new BytesRef(randomDateString(Long.MIN_VALUE, -377736739200000L)), DataType.KEYWORD ) ), @@ -154,8 +154,8 @@ public static Iterable parameters() { "ToDatetimeFromStringEvaluator[field=" + read + "]", List.of( new TestCaseSupplier.TypedDataSupplier( - "", - // millis before "0001-01-01T00:00:00.000Z" + "", + // millis after "9999-12-31T23:59:59.999Z" () -> new BytesRef(randomDateString(253402300800000L, Long.MAX_VALUE)), DataType.KEYWORD ) From 0cd306f34c77d9d35ab3d2b054980334aa1f6e21 Mon Sep 17 00:00:00 2001 From: Jan Kuipers <148754765+jan-elastic@users.noreply.github.com> Date: Wed, 16 Oct 2024 15:25:55 +0200 Subject: [PATCH 16/31] Fix setOnce in EmbeddingRequestChunker (#114900) --- .../xpack/inference/chunking/EmbeddingRequestChunker.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/chunking/EmbeddingRequestChunker.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/chunking/EmbeddingRequestChunker.java index 3ae8dc0550391..c5897f32d6eb8 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/chunking/EmbeddingRequestChunker.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/chunking/EmbeddingRequestChunker.java @@ -324,7 +324,7 @@ private ElasticsearchStatusException unexpectedResultTypeException(String got, S public void onFailure(Exception e) { var errorResult = new ErrorChunkedInferenceResults(e); for (var pos : positions) { - errors.setOnce(pos.inputIndex(), errorResult); + errors.set(pos.inputIndex(), errorResult); } if (resultCount.incrementAndGet() == totalNumberOfRequests) { From ccf6ab9ab3ca0fe2157a204e98f34bc8e957bfc0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Istv=C3=A1n=20Zolt=C3=A1n=20Szab=C3=B3?= Date: Wed, 16 Oct 2024 15:47:13 +0200 Subject: [PATCH 17/31] [DOCS] Adds link to tutorial and API docs to trained model autoscaling. (#114904) --- .../inference/service-elser.asciidoc | 20 +++++++++---------- .../semantic-search-semantic-text.asciidoc | 8 +++++--- 2 files changed, 15 insertions(+), 13 deletions(-) diff --git a/docs/reference/inference/service-elser.asciidoc b/docs/reference/inference/service-elser.asciidoc index c7217f38d459b..6afc2a2e3ef65 100644 --- a/docs/reference/inference/service-elser.asciidoc +++ b/docs/reference/inference/service-elser.asciidoc @@ -80,12 +80,13 @@ Must be a power of 2. Max allowed value is 32. [[inference-example-elser]] ==== ELSER service example -The following example shows how to create an {infer} endpoint called -`my-elser-model` to perform a `sparse_embedding` task type. +The following example shows how to create an {infer} endpoint called `my-elser-model` to perform a `sparse_embedding` task type. Refer to the {ml-docs}/ml-nlp-elser.html[ELSER model documentation] for more info. -The request below will automatically download the ELSER model if it isn't -already downloaded and then deploy the model. +NOTE: If you want to optimize your ELSER endpoint for ingest, set the number of threads to `1` (`"num_threads": 1`). +If you want to optimize your ELSER endpoint for search, set the number of threads to greater than `1`. + +The request below will automatically download the ELSER model if it isn't already downloaded and then deploy the model. [source,console] ------------------------------------------------------------ @@ -100,7 +101,6 @@ PUT _inference/sparse_embedding/my-elser-model ------------------------------------------------------------ // TEST[skip:TBD] - Example response: [source,console-result] @@ -130,12 +130,12 @@ If using the Python client, you can set the `timeout` parameter to a higher valu [[inference-example-elser-adaptive-allocation]] ==== Setting adaptive allocation for the ELSER service -The following example shows how to create an {infer} endpoint called -`my-elser-model` to perform a `sparse_embedding` task type and configure -adaptive allocations. +NOTE: For more information on how to optimize your ELSER endpoints, refer to {ml-docs}/ml-nlp-elser.html#elser-recommendations[the ELSER recommendations] section in the model documentation. +To learn more about model autoscaling, refer to the {ml-docs}/ml-nlp-auto-scale.html[trained model autoscaling] page. + +The following example shows how to create an {infer} endpoint called `my-elser-model` to perform a `sparse_embedding` task type and configure adaptive allocations. -The request below will automatically download the ELSER model if it isn't -already downloaded and then deploy the model. +The request below will automatically download the ELSER model if it isn't already downloaded and then deploy the model. [source,console] ------------------------------------------------------------ diff --git a/docs/reference/search/search-your-data/semantic-search-semantic-text.asciidoc b/docs/reference/search/search-your-data/semantic-search-semantic-text.asciidoc index dbcfbb1b615f9..60692c19c184a 100644 --- a/docs/reference/search/search-your-data/semantic-search-semantic-text.asciidoc +++ b/docs/reference/search/search-your-data/semantic-search-semantic-text.asciidoc @@ -50,7 +50,7 @@ PUT _inference/sparse_embedding/my-elser-endpoint <1> be used and ELSER creates sparse vectors. The `inference_id` is `my-elser-endpoint`. <2> The `elser` service is used in this example. -<3> This setting enables and configures adaptive allocations. +<3> This setting enables and configures {ml-docs}/ml-nlp-auto-scale.html#nlp-model-adaptive-allocations[adaptive allocations]. Adaptive allocations make it possible for ELSER to automatically scale up or down resources based on the current load on the process. [NOTE] @@ -284,6 +284,8 @@ query from the `semantic-embedding` index: [discrete] [[semantic-text-further-examples]] -==== Further examples +==== Further examples and reading -If you want to use `semantic_text` in hybrid search, refer to https://colab.research.google.com/github/elastic/elasticsearch-labs/blob/main/notebooks/search/09-semantic-text.ipynb[this notebook] for a step-by-step guide. \ No newline at end of file +* If you want to use `semantic_text` in hybrid search, refer to https://colab.research.google.com/github/elastic/elasticsearch-labs/blob/main/notebooks/search/09-semantic-text.ipynb[this notebook] for a step-by-step guide. +* For more information on how to optimize your ELSER endpoints, refer to {ml-docs}/ml-nlp-elser.html#elser-recommendations[the ELSER recommendations] section in the model documentation. +* To learn more about model autoscaling, refer to the {ml-docs}/ml-nlp-auto-scale.html[trained model autoscaling] page. \ No newline at end of file From ff7ea1073b5dabc2f808cc41f3f981a54a806abf Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Thu, 17 Oct 2024 00:58:01 +1100 Subject: [PATCH 18/31] Mute org.elasticsearch.xpack.inference.DefaultEndPointsIT testInferDeploysDefaultElser #114913 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 2a3d4eac6d358..fb48f9e04d5c4 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -388,6 +388,9 @@ tests: - class: org.elasticsearch.xpack.enrich.EnrichRestIT method: test {p0=enrich/40_synthetic_source/enrich documents over _bulk} issue: https://github.com/elastic/elasticsearch/issues/114825 +- class: org.elasticsearch.xpack.inference.DefaultEndPointsIT + method: testInferDeploysDefaultElser + issue: https://github.com/elastic/elasticsearch/issues/114913 # Examples: # From 9bf6e3b0baf4296125f2b8d8ab2726f3a4614e3f Mon Sep 17 00:00:00 2001 From: Salvatore Campagna <93581129+salvatore-campagna@users.noreply.github.com> Date: Wed, 16 Oct 2024 16:12:56 +0200 Subject: [PATCH 19/31] Inject the `host.name` field mapping only if required for `logsdb` index mode (#114573) Here we check for the existence of a `host.name` field in index sort settings when the index mode is `logsdb` and decide to inject the field in the mapping depending on whether it exists or not. By default `host.name` is required for sorting in LogsDB. This reduces the chances for errors at mapping or template composition time as a result of injecting the `host.name` field only if strictly required. A user who wants to override index sort settings without including a `host.name` field would be able to do so without finding an additional `host.name` field in the mappings (injected automatically). If users override the sort settings and a `host.name` field is not included we don't need to inject such field since sorting does not require it anymore. As a result of this change we have the following: * the user does not provide any index sorting configuration: we are responsible for injecting the default sort fields and their mapping (for `logsdb`) * the user explicitly provides non-empty index sorting configuration: the user is also responsible for providing correct mappings and we do not modify index sorting or mappings Note also that all sort settings `index.sort.*` are `final` which means doing this check once, when mappings are merged at template composition time, is enough. --- .../metadata/MetadataCreateIndexService.java | 2 +- .../org/elasticsearch/index/IndexMode.java | 71 +- .../index/LogsIndexModeTests.java | 16 +- .../index/mapper/MapperServiceTestCase.java | 8 +- .../test/30_logsdb_default_mapping.yml | 781 ++++++++++++++++++ .../rest-api-spec/test/20_logs_tests.yml | 24 + 6 files changed, 856 insertions(+), 46 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java index 7f2c076281735..29720e98a6e7b 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java @@ -1373,7 +1373,7 @@ private static void updateIndexMappingsAndBuildSortOrder( MapperService mapperService = indexService.mapperService(); IndexMode indexMode = indexService.getIndexSettings() != null ? indexService.getIndexSettings().getMode() : IndexMode.STANDARD; List allMappings = new ArrayList<>(); - final CompressedXContent defaultMapping = indexMode.getDefaultMapping(); + final CompressedXContent defaultMapping = indexMode.getDefaultMapping(indexService.getIndexSettings()); if (defaultMapping != null) { allMappings.add(defaultMapping); } diff --git a/server/src/main/java/org/elasticsearch/index/IndexMode.java b/server/src/main/java/org/elasticsearch/index/IndexMode.java index 2d9e89223d7a6..5908bc22e21e2 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexMode.java +++ b/server/src/main/java/org/elasticsearch/index/IndexMode.java @@ -75,7 +75,7 @@ public void validateTimestampFieldMapping(boolean isDataStream, MappingLookup ma } @Override - public CompressedXContent getDefaultMapping() { + public CompressedXContent getDefaultMapping(final IndexSettings indexSettings) { return null; } @@ -171,7 +171,7 @@ public void validateTimestampFieldMapping(boolean isDataStream, MappingLookup ma } @Override - public CompressedXContent getDefaultMapping() { + public CompressedXContent getDefaultMapping(final IndexSettings indexSettings) { return DEFAULT_TIME_SERIES_TIMESTAMP_MAPPING; } @@ -249,8 +249,10 @@ public void validateTimestampFieldMapping(boolean isDataStream, MappingLookup ma } @Override - public CompressedXContent getDefaultMapping() { - return DEFAULT_LOGS_TIMESTAMP_MAPPING; + public CompressedXContent getDefaultMapping(final IndexSettings indexSettings) { + return indexSettings != null && indexSettings.getIndexSortConfig().hasPrimarySortOnField(HOST_NAME) + ? DEFAULT_LOGS_TIMESTAMP_MAPPING_WITH_HOSTNAME + : DEFAULT_TIME_SERIES_TIMESTAMP_MAPPING; } @Override @@ -308,6 +310,8 @@ public String getDefaultCodec() { } }; + private static final String HOST_NAME = "host.name"; + private static void validateTimeSeriesSettings(Map, Object> settings) { settingRequiresTimeSeries(settings, IndexMetadata.INDEX_ROUTING_PATH); settingRequiresTimeSeries(settings, IndexSettings.TIME_SERIES_START_TIME); @@ -324,48 +328,33 @@ protected static String tsdbMode() { return "[" + IndexSettings.MODE.getKey() + "=time_series]"; } - public static final CompressedXContent DEFAULT_TIME_SERIES_TIMESTAMP_MAPPING; + private static CompressedXContent createDefaultMapping(boolean includeHostName) throws IOException { + return new CompressedXContent((builder, params) -> { + builder.startObject(MapperService.SINGLE_MAPPING_NAME) + .startObject(DataStreamTimestampFieldMapper.NAME) + .field("enabled", true) + .endObject() + .startObject("properties") + .startObject(DataStreamTimestampFieldMapper.DEFAULT_PATH) + .field("type", DateFieldMapper.CONTENT_TYPE) + .endObject(); + + if (includeHostName) { + builder.startObject(HOST_NAME).field("type", KeywordFieldMapper.CONTENT_TYPE).field("ignore_above", 1024).endObject(); + } - static { - try { - DEFAULT_TIME_SERIES_TIMESTAMP_MAPPING = new CompressedXContent( - ((builder, params) -> builder.startObject(MapperService.SINGLE_MAPPING_NAME) - .startObject(DataStreamTimestampFieldMapper.NAME) - .field("enabled", true) - .endObject() - .startObject("properties") - .startObject(DataStreamTimestampFieldMapper.DEFAULT_PATH) - .field("type", DateFieldMapper.CONTENT_TYPE) - .field("ignore_malformed", "false") - .endObject() - .endObject() - .endObject()) - ); - } catch (IOException e) { - throw new AssertionError(e); - } + return builder.endObject().endObject(); + }); } - public static final CompressedXContent DEFAULT_LOGS_TIMESTAMP_MAPPING; + private static final CompressedXContent DEFAULT_TIME_SERIES_TIMESTAMP_MAPPING; + + private static final CompressedXContent DEFAULT_LOGS_TIMESTAMP_MAPPING_WITH_HOSTNAME; static { try { - DEFAULT_LOGS_TIMESTAMP_MAPPING = new CompressedXContent( - ((builder, params) -> builder.startObject(MapperService.SINGLE_MAPPING_NAME) - .startObject(DataStreamTimestampFieldMapper.NAME) - .field("enabled", true) - .endObject() - .startObject("properties") - .startObject(DataStreamTimestampFieldMapper.DEFAULT_PATH) - .field("type", DateFieldMapper.CONTENT_TYPE) - .endObject() - .startObject("host.name") - .field("type", KeywordFieldMapper.CONTENT_TYPE) - .field("ignore_above", 1024) - .endObject() - .endObject() - .endObject()) - ); + DEFAULT_TIME_SERIES_TIMESTAMP_MAPPING = createDefaultMapping(false); + DEFAULT_LOGS_TIMESTAMP_MAPPING_WITH_HOSTNAME = createDefaultMapping(true); } catch (IOException e) { throw new AssertionError(e); } @@ -421,7 +410,7 @@ public String getName() { * Get default mapping for this index or {@code null} if there is none. */ @Nullable - public abstract CompressedXContent getDefaultMapping(); + public abstract CompressedXContent getDefaultMapping(IndexSettings indexSettings); /** * Build the {@link FieldMapper} for {@code _id}. diff --git a/server/src/test/java/org/elasticsearch/index/LogsIndexModeTests.java b/server/src/test/java/org/elasticsearch/index/LogsIndexModeTests.java index 8a66bb1464a5b..23fc788a89bde 100644 --- a/server/src/test/java/org/elasticsearch/index/LogsIndexModeTests.java +++ b/server/src/test/java/org/elasticsearch/index/LogsIndexModeTests.java @@ -13,14 +13,24 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESTestCase; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.not; public class LogsIndexModeTests extends ESTestCase { public void testLogsIndexModeSetting() { assertThat(IndexSettings.MODE.get(buildSettings()), equalTo(IndexMode.LOGSDB)); } - public void testSortField() { + public void testDefaultHostNameSortField() { + final IndexMetadata metadata = IndexSettingsTests.newIndexMeta("test", buildSettings()); + assertThat(metadata.getIndexMode(), equalTo(IndexMode.LOGSDB)); + final IndexSettings settings = new IndexSettings(metadata, Settings.EMPTY); + assertThat(settings.getIndexSortConfig().hasPrimarySortOnField("host.name"), equalTo(true)); + assertThat(IndexMode.LOGSDB.getDefaultMapping(settings).string(), containsString("host.name")); + } + + public void testCustomSortField() { final Settings sortSettings = Settings.builder() .put(buildSettings()) .put(IndexSortConfig.INDEX_SORT_FIELD_SETTING.getKey(), "agent_id") @@ -29,7 +39,9 @@ public void testSortField() { assertThat(metadata.getIndexMode(), equalTo(IndexMode.LOGSDB)); final IndexSettings settings = new IndexSettings(metadata, Settings.EMPTY); assertThat(settings.getMode(), equalTo(IndexMode.LOGSDB)); - assertThat("agent_id", equalTo(getIndexSetting(settings, IndexSortConfig.INDEX_SORT_FIELD_SETTING.getKey()))); + assertThat(getIndexSetting(settings, IndexSortConfig.INDEX_SORT_FIELD_SETTING.getKey()), equalTo("agent_id")); + assertThat(settings.getIndexSortConfig().hasPrimarySortOnField("host.name"), equalTo(false)); + assertThat(IndexMode.LOGSDB.getDefaultMapping(settings).string(), not(containsString("host"))); } public void testSortMode() { diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java index a9ee0317ce1ee..8bc2666bcfe3b 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java @@ -302,8 +302,12 @@ public void onRemoval(ShardId shardId, Accountable accountable) {} mapperMetrics ); - if (applyDefaultMapping && indexSettings.getMode().getDefaultMapping() != null) { - mapperService.merge(null, indexSettings.getMode().getDefaultMapping(), MapperService.MergeReason.MAPPING_UPDATE); + if (applyDefaultMapping && indexSettings.getMode().getDefaultMapping(indexSettings) != null) { + mapperService.merge( + null, + indexSettings.getMode().getDefaultMapping(indexSettings), + MapperService.MergeReason.MAPPING_UPDATE + ); } return mapperService; diff --git a/x-pack/plugin/logsdb/src/yamlRestTest/resources/rest-api-spec/test/30_logsdb_default_mapping.yml b/x-pack/plugin/logsdb/src/yamlRestTest/resources/rest-api-spec/test/30_logsdb_default_mapping.yml index 52c500c102cee..3f2bca2e4bcd9 100644 --- a/x-pack/plugin/logsdb/src/yamlRestTest/resources/rest-api-spec/test/30_logsdb_default_mapping.yml +++ b/x-pack/plugin/logsdb/src/yamlRestTest/resources/rest-api-spec/test/30_logsdb_default_mapping.yml @@ -280,3 +280,784 @@ create logsdb data stream with timestamp object mapping: - match: { error.type: "illegal_argument_exception" } - match: { error.reason: "composable template [logsdb-index-template] template after composition with component templates [logsdb-mappings] is invalid" } + +--- +create logsdb data stream with custom sorting without host.name: + - skip: + features: [ "allowed_warnings" ] + - requires: + cluster_features: [ "mapper.keyword_normalizer_synthetic_source" ] + reason: support for normalizer on keyword fields + + - do: + allowed_warnings: + - "index template [logs-template] has index patterns [logs-*-*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [logs-template] will take precedence during new index creation" + indices.put_index_template: + name: logs-template + body: + index_patterns: [ logs-http-prod ] + priority: 10000 + template: + settings: + index: + sort.field: [ agent.id ] + sort.order: [ desc ] + mode: logsdb + mappings: + properties: + agent.id: + type: keyword + host.hostname: + type: keyword + data_stream: { } + - is_true: acknowledged + + - do: + indices.create_data_stream: + name: logs-http-prod + - is_true: acknowledged + + - do: + indices.get_data_stream: + name: logs-http-prod + + - set: { data_streams.0.indices.0.index_name: backing_index } + - do: + indices.get_mapping: + index: $backing_index + + - match: { .$backing_index.mappings.properties.@timestamp.type: date } + - match: { .$backing_index.mappings.properties.agent.properties.id.type: keyword } + - match: { .$backing_index.mappings.properties.host.properties.hostname.type: keyword } + - match: { .$backing_index.mappings.properties.host.properties.name.type: null } + +--- +create logsdb data stream with custom sorting and host object: + - skip: + features: [ "allowed_warnings" ] + - requires: + cluster_features: [ "mapper.keyword_normalizer_synthetic_source" ] + reason: support for normalizer on keyword fields + + - do: + allowed_warnings: + - "index template [logs-template] has index patterns [logs-*-*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [logs-template] will take precedence during new index creation" + indices.put_index_template: + name: logs-template + body: + index_patterns: [ logs-nginx-prod ] + priority: 10000 + template: + settings: + index: + sort.field: [ host.hostname, host.region ] + sort.order: [ desc, desc ] + mode: logsdb + mappings: + properties: + host: + type: object + properties: + ip: + type: ip + hostname: + type: keyword + region: + type: keyword + name: + type: integer + + data_stream: { } + - is_true: acknowledged + + - do: + indices.create_data_stream: + name: logs-nginx-prod + - is_true: acknowledged + + - do: + indices.get_data_stream: + name: logs-nginx-prod + + - set: { data_streams.0.indices.0.index_name: backing_index } + - do: + indices.get_mapping: + index: $backing_index + + - match: { .$backing_index.mappings.properties.@timestamp.type: date } + - match: { .$backing_index.mappings.properties.host.properties.ip.type: ip } + - match: { .$backing_index.mappings.properties.host.properties.hostname.type: keyword } + - match: { .$backing_index.mappings.properties.host.properties.region.type: keyword } + - match: { .$backing_index.mappings.properties.host.properties.name.type: integer } # Overrides LogsDB injected + +--- +create logsdb data stream with custom sorting and dynamically mapped host.name: + - skip: + features: [ "allowed_warnings" ] + - requires: + cluster_features: [ "mapper.keyword_normalizer_synthetic_source" ] + reason: support for normalizer on keyword fields + + - do: + allowed_warnings: + - "index template [logs-template] has index patterns [logs-*-*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [logs-template] will take precedence during new index creation" + indices.put_index_template: + name: logs-template + body: + index_patterns: [ logs-kafka-qa ] + priority: 10000 + template: + settings: + index: + sort.field: [ "agent.id", "@timestamp" ] + sort.order: [ desc, asc ] + mode: logsdb + mappings: + properties: + agent: + type: object + properties: + name: + type: keyword + id: + type: keyword + + data_stream: { } + - is_true: acknowledged + + - do: + indices.create_data_stream: + name: logs-kafka-qa + - is_true: acknowledged + + - do: + bulk: + index: logs-kafka-qa + refresh: true + body: + - { "create": { } } + - { "@timestamp": "2022-01-01T00:00:00", agent.name: "foo", agent.id: "foo-568", host: { id: "db8fdcf1-b1e2-444b-8c6a-0466c61dcce4" } } + - { "create": { } } + - { "@timestamp": "2022-01-01T00:01:00", agent.name: "bar", agent.id: "foo-309", host: { id: "35e1ed10-961e-46c7-83ea-4109c913a1d6" } } + + - do: + indices.get_data_stream: + name: logs-kafka-qa + + - set: { data_streams.0.indices.0.index_name: backing_index } + - do: + indices.get_mapping: + index: $backing_index + + - match: { .$backing_index.mappings.properties.@timestamp.type: date } + - match: { .$backing_index.mappings.properties.agent.properties.name.type: keyword } + - match: { .$backing_index.mappings.properties.agent.properties.id.type: keyword } + - match: { .$backing_index.mappings.properties.host.properties.name: null } + - match: { .$backing_index.mappings.properties.host.properties.id.type: text } + +--- +create logsdb data stream with custom sorting and host.name object: + - skip: + features: [ "allowed_warnings" ] + - requires: + cluster_features: [ "mapper.keyword_normalizer_synthetic_source" ] + reason: support for normalizer on keyword fields + + - do: + allowed_warnings: + - "index template [logs-template] has index patterns [logs-*-*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [logs-template] will take precedence during new index creation" + indices.put_index_template: + name: logs-template + body: + index_patterns: [ logs-nginx-qa ] + priority: 10000 + template: + settings: + index: + sort.field: [ "host.name.value", "@timestamp" ] + sort.order: [ desc, desc ] + mode: logsdb + mappings: + properties: + host: + type: object + properties: + name: + type: object + properties: + value: + type: keyword + alias: + type: keyword + + data_stream: { } + - is_true: acknowledged + + - do: + indices.create_data_stream: + name: logs-nginx-qa + - is_true: acknowledged + + - do: + indices.get_data_stream: + name: logs-nginx-qa + + - set: { data_streams.0.indices.0.index_name: backing_index } + - do: + indices.get_mapping: + index: $backing_index + + - match: { .$backing_index.mappings.properties.@timestamp.type: date } + - match: { .$backing_index.mappings.properties.host.properties.name.properties.value.type: keyword } + - match: { .$backing_index.mappings.properties.host.properties.name.properties.alias.type: keyword } + +--- +create logsdb data stream with default sorting on malformed host.name: + - skip: + features: [ "allowed_warnings" ] + - requires: + cluster_features: [ "mapper.keyword_normalizer_synthetic_source" ] + reason: support for normalizer on keyword fields + + - do: + allowed_warnings: + - "index template [logs-template] has index patterns [logs-*-*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [logs-template] will take precedence during new index creation" + indices.put_index_template: + name: logs-template + body: + index_patterns: [ logs-win-prod ] + priority: 10000 + template: + settings: + index: + mode: logsdb + mappings: + properties: + agent: + type: object + properties: + name: + type: keyword + id: + type: keyword + + data_stream: { } + - is_true: acknowledged + + - do: + indices.create_data_stream: + name: logs-win-prod + - is_true: acknowledged + + - do: + bulk: + index: logs-win-prod + refresh: true + body: + - { "create": { } } + - { "@timestamp": "2022-01-01T00:00:00", agent.name: "foo", agent.id: "foo-568", host: { name: 192.168.10.12, id: "e70e91cd-bb3f-43f0-909c-2748e7fdfd54" } } + - { "create": { } } + - { "@timestamp": "2022-01-01T00:01:00", agent.name: "bar", agent.id: "foo-309", host: { name: 192.168.15.17, id: "ad2e3edb-2c4b-4f12-83dd-255691ed614c" } } + + - do: + indices.get_data_stream: + name: logs-win-prod + + - set: { data_streams.0.indices.0.index_name: backing_index } + - do: + indices.get_mapping: + index: $backing_index + + - match: { .$backing_index.mappings.properties.@timestamp.type: date } + - match: { .$backing_index.mappings.properties.agent.properties.name.type: keyword } + - match: { .$backing_index.mappings.properties.agent.properties.id.type: keyword } + - match: { .$backing_index.mappings.properties.host.properties.name.type: keyword } # LogsDB injected + - match: { .$backing_index.mappings.properties.host.properties.name.ignore_above: 1024 } # LogsDB injected + - match: { .$backing_index.mappings.properties.host.properties.id.type: text } + +--- +create logsdb data stream with custom sorting and host.name date field: + - skip: + features: [ "allowed_warnings" ] + - requires: + cluster_features: [ "mapper.keyword_normalizer_synthetic_source" ] + reason: support for normalizer on keyword fields + + - do: + allowed_warnings: + - "index template [logs-template] has index patterns [logs-*-*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [logs-template] will take precedence during new index creation" + indices.put_index_template: + name: logs-template + body: + index_patterns: [ logs-http-prod ] + priority: 10000 + template: + settings: + index: + sort.field: [ host.name, host.hostname ] + sort.order: [ desc, desc ] + mode: logsdb + mappings: + properties: + host: + type: object + properties: + hostname: + type: keyword + name: + type: date + + data_stream: { } + - is_true: acknowledged + + - do: + indices.create_data_stream: + name: logs-http-prod + - is_true: acknowledged + + - do: + indices.get_data_stream: + name: logs-http-prod + + - set: { data_streams.0.indices.0.index_name: backing_index } + - do: + indices.get_mapping: + index: $backing_index + + - match: { .$backing_index.mappings.properties.@timestamp.type: date } + - match: { .$backing_index.mappings.properties.host.properties.hostname.type: keyword } + - match: { .$backing_index.mappings.properties.host.properties.name.type: date } + +--- +create logsdb data stream with custom sorting and missing host.name field mapping: + - skip: + features: [ "allowed_warnings" ] + - requires: + cluster_features: [ "mapper.keyword_normalizer_synthetic_source" ] + reason: support for normalizer on keyword fields + + - do: + allowed_warnings: + - "index template [logs-template] has index patterns [logs-*-*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [logs-template] will take precedence during new index creation" + indices.put_index_template: + name: logs-template + body: + index_patterns: [ logs-http-qa ] + priority: 10000 + template: + settings: + index: + sort.field: [ host.name, host.hostname ] + sort.order: [ desc, desc ] + mode: logsdb + mappings: + properties: + host: + type: object + properties: + hostname: + type: keyword + + data_stream: { } + - is_true: acknowledged + + - do: + indices.create_data_stream: + name: logs-http-qa + - is_true: acknowledged + + - do: + indices.get_data_stream: + name: logs-http-qa + + - set: { data_streams.0.indices.0.index_name: backing_index } + - do: + indices.get_mapping: + index: $backing_index + + - match: { .$backing_index.mappings.properties.@timestamp.type: date } + - match: { .$backing_index.mappings.properties.host.properties.hostname.type: keyword } + - match: { .$backing_index.mappings.properties.host.properties.name.type: keyword } + - match: { .$backing_index.mappings.properties.host.properties.name.ignore_above: 1024 } + +--- +create logsdb data stream with custom sorting and host.name field without doc values: + - skip: + features: [ "allowed_warnings" ] + - requires: + cluster_features: [ "mapper.keyword_normalizer_synthetic_source" ] + reason: support for normalizer on keyword fields + + - do: + allowed_warnings: + - "index template [logs-template] has index patterns [logs-*-*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [logs-template] will take precedence during new index creation" + indices.put_index_template: + name: logs-template + body: + index_patterns: [ logs-http-dev ] + priority: 10000 + template: + settings: + index: + sort.field: [ "host.name", "@timestamp" ] + sort.order: [ desc, desc ] + mode: logsdb + mappings: + properties: + host: + type: object + properties: + name: + type: keyword + doc_values: false + + data_stream: { } + - is_true: acknowledged + + - do: + catch: bad_request + indices.create_data_stream: + name: logs-http-dev + + - match: { error.type: "illegal_argument_exception" } + - match: { error.reason: "docvalues not found for index sort field:[host.name]" } + +--- +create logsdb data stream with incompatible ignore_above on host.name: + - skip: + features: [ "allowed_warnings" ] + - requires: + cluster_features: [ "mapper.keyword_normalizer_synthetic_source" ] + reason: support for normalizer on keyword fields + + - do: + allowed_warnings: + - "index template [logsdb-index-template-ignore-above] has index patterns [logsdb-ignore-above] matching patterns from existing older templates [global]" + indices.put_index_template: + name: logsdb-index-template-ignore-above + body: + index_patterns: [ logsdb-ignore-above ] + priority: 10000 + template: + settings: + index: + sort.field: [ host.name ] + sort.order: [ desc ] + mode: logsdb + mappings: + properties: + host.name: + type: keyword + ignore_above: 128 + data_stream: {} + - is_true: acknowledged + + - do: + indices.create_data_stream: + name: logsdb-ignore-above + - is_true: acknowledged + + - do: + indices.get_data_stream: + name: logsdb-ignore-above + + - set: { data_streams.0.indices.0.index_name: backing_index } + - do: + indices.get_mapping: + index: $backing_index + + - match: { .$backing_index.mappings.properties.@timestamp.type: date } + - match: { .$backing_index.mappings.properties.host.properties.name.type: keyword } + - match: { .$backing_index.mappings.properties.host.properties.name.ignore_above: 128 } + +--- +create logsdb data stream with no sorting and host.name as text: + - skip: + features: [ "allowed_warnings" ] + - requires: + cluster_features: [ "mapper.keyword_normalizer_synthetic_source" ] + reason: support for normalizer on keyword fields + + - do: + allowed_warnings: + - "index template [logsdb-index-template-non-keyword] has index patterns [logsdb-non-keyword] matching patterns from existing older templates [global]" + indices.put_index_template: + name: logsdb-index-template-non-keyword + body: + index_patterns: [ logsdb-non-keyword ] + priority: 10000 + template: + settings: + mode: logsdb + mappings: + properties: + host.name: + type: text + data_stream: {} + - is_true: acknowledged + + - do: + catch: bad_request + indices.create_data_stream: + name: logsdb-non-keyword + + - match: { error.type: "illegal_argument_exception" } + - match: { error.reason: "docvalues not found for index sort field:[host.name]" } + +--- +create logsdb data stream without index sorting and ignore_above on host.name: + - skip: + features: [ "allowed_warnings" ] + - requires: + cluster_features: [ "mapper.keyword_normalizer_synthetic_source" ] + reason: support for normalizer on keyword fields + + - do: + allowed_warnings: + - "index template [logsdb-index-template-ignore-above-override] has index patterns [logsdb-ignore-above-override] matching patterns from existing older templates [global]" + indices.put_index_template: + name: logsdb-index-template-ignore-above-override + body: + index_patterns: [ logsdb-ignore-above-override ] + priority: 10000 + template: + settings: + index: + mode: logsdb + mappings: + properties: + host.name: + type: keyword + ignore_above: 128 + data_stream: {} + - is_true: acknowledged + + - do: + indices.create_data_stream: + name: logsdb-ignore-above-override + - is_true: acknowledged + + - do: + indices.get_data_stream: + name: logsdb-ignore-above-override + + - set: { data_streams.0.indices.0.index_name: backing_index } + - do: + indices.get_mapping: + index: $backing_index + + - match: { .$backing_index.mappings.properties.@timestamp.type: date } + - match: { .$backing_index.mappings.properties.host.properties.name.type: keyword } + - match: { .$backing_index.mappings.properties.host.properties.name.ignore_above: 128 } + +--- +create logsdb data stream with host.name as alias and sorting on it: + - skip: + features: [ "allowed_warnings" ] + - requires: + cluster_features: [ "mapper.keyword_normalizer_synthetic_source" ] + reason: support for normalizer on keyword fields + + - do: + allowed_warnings: + - "index template [logsdb-index-template-alias] has index patterns [logsdb-alias] matching patterns from existing older templates [global]" + indices.put_index_template: + name: logsdb-index-template-alias + body: + index_patterns: [ logsdb-alias ] + template: + settings: + index: + sort.field: [ host.name ] + sort.order: [ desc ] + mode: logsdb + mappings: + properties: + host.name: + type: alias + path: host.hostname + host.hostname: + type: + keyword + data_stream: {} + - do: + catch: bad_request + indices.create_data_stream: + name: logsdb-alias + + - match: { error.type: "illegal_argument_exception" } + - match: { error.reason: "Cannot use alias [host.name] as an index sort field" } + +--- +create logsdb data stream with multi-fields on host.name: + - skip: + features: [ "allowed_warnings" ] + - requires: + cluster_features: [ "mapper.keyword_normalizer_synthetic_source" ] + reason: support for normalizer on keyword fields + + - do: + allowed_warnings: + - "index template [logsdb-index-template-multi-fields] has index patterns [logsdb-multi-fields] matching patterns from existing older templates [global]" + indices.put_index_template: + name: logsdb-index-template-multi-fields + body: + index_patterns: [ logsdb-multi-fields ] + template: + settings: + index: + sort.field: [ host.name.keyword ] + sort.order: [ asc ] + mode: logsdb + mappings: + properties: + host.name: + type: "text" + fields: + keyword: + type: "keyword" + data_stream: {} + + - do: + indices.create_data_stream: + name: logsdb-multi-fields + - is_true: acknowledged + + - do: + indices.get_data_stream: + name: logsdb-multi-fields + + - set: { data_streams.0.indices.0.index_name: backing_index } + - do: + indices.get_mapping: + index: $backing_index + + - match: { .$backing_index.mappings.properties.@timestamp.type: date } + - match: { .$backing_index.mappings.properties.host.properties.name.fields.keyword.type: keyword } + +--- +create logsdb data stream with multi-fields on host.name and no sorting: + - skip: + features: [ "allowed_warnings" ] + - requires: + cluster_features: [ "mapper.keyword_normalizer_synthetic_source" ] + reason: support for normalizer on keyword fields + + - do: + allowed_warnings: + - "index template [ logsdb-no-sort-multi-fields-template ] has index patterns [logsdb-no-sort-multi-fields] matching patterns from existing older templates [global]" + indices.put_index_template: + name: logsdb-no-sort-multi-fields-template + body: + index_patterns: [ logsdb-no-sort-multi-fields ] + template: + settings: + mode: logsdb + mappings: + properties: + host.name: + type: text + fields: + keyword: + type: keyword + data_stream: {} + + - do: + catch: bad_request + indices.create_data_stream: + name: logsdb-no-sort-multi-fields + + - match: { error.type: "illegal_argument_exception" } + - match: { error.reason: "docvalues not found for index sort field:[host.name]" } + +--- +create logsdb data stream with custom empty sorting: + - skip: + features: [ "allowed_warnings" ] + - requires: + cluster_features: [ "mapper.keyword_normalizer_synthetic_source" ] + reason: support for normalizer on keyword fields + + - do: + allowed_warnings: + - "index template [logs-template] has index patterns [logs-*-*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [logs-template] will take precedence during new index creation" + indices.put_index_template: + name: logs-template + body: + index_patterns: [ logs-http-empty ] + priority: 10000 + template: + settings: + index: + sort.field: [ ] + sort.order: [ ] + mode: logsdb + mappings: + properties: + hostname: + type: keyword + data_stream: { } + - is_true: acknowledged + + - do: + indices.create_data_stream: + name: logs-http-empty + - is_true: acknowledged + + - do: + indices.get_data_stream: + name: logs-http-empty + + - set: { data_streams.0.indices.0.index_name: backing_index } + - do: + indices.get_mapping: + index: $backing_index + + - match: { .$backing_index.mappings.properties.@timestamp.type: date } + - match: { .$backing_index.mappings.properties.host.properties.name.type: keyword } + - match: { .$backing_index.mappings.properties.host.properties.name.ignore_above: 1024 } + +--- +create logsdb data stream with custom sorting on timestamp: + - skip: + features: [ "allowed_warnings" ] + - requires: + cluster_features: [ "mapper.keyword_normalizer_synthetic_source" ] + reason: support for normalizer on keyword fields + + - do: + allowed_warnings: + - "index template [logs-template] has index patterns [logs-*-*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [logs-template] will take precedence during new index creation" + indices.put_index_template: + name: logs-template + body: + index_patterns: [ logs-http-dev ] + priority: 10000 + template: + settings: + index: + sort.field: [ "@timestamp" ] + sort.order: [ "asc" ] + mode: logsdb + mappings: + properties: + hostname: + type: keyword + data_stream: { } + - is_true: acknowledged + + - do: + indices.create_data_stream: + name: logs-http-dev + - is_true: acknowledged + + - do: + indices.get_data_stream: + name: logs-http-dev + + - set: { data_streams.0.indices.0.index_name: backing_index } + - do: + indices.get_mapping: + index: $backing_index + + - match: { .$backing_index.mappings.properties.@timestamp.type: date } diff --git a/x-pack/plugin/otel-data/src/yamlRestTest/resources/rest-api-spec/test/20_logs_tests.yml b/x-pack/plugin/otel-data/src/yamlRestTest/resources/rest-api-spec/test/20_logs_tests.yml index be4de6dca6c76..6bc0cee78be4f 100644 --- a/x-pack/plugin/otel-data/src/yamlRestTest/resources/rest-api-spec/test/20_logs_tests.yml +++ b/x-pack/plugin/otel-data/src/yamlRestTest/resources/rest-api-spec/test/20_logs_tests.yml @@ -163,3 +163,27 @@ Structured log body: fields: ["event.dataset"] - length: { hits.hits: 1 } - match: { hits.hits.0.fields.event\.dataset: ["generic.otel"] } +--- +host.name pass-through: + - do: + bulk: + index: logs-generic.otel-default + refresh: true + body: + - create: {} + - "@timestamp": 2024-07-18T14:48:33.467654000Z + resource: + attributes: + host.name: localhost + - is_false: errors + - do: + search: + index: logs-generic.otel-default + body: + query: + term: + host.name: localhost + fields: [ "*" ] + - length: { hits.hits: 1 } + - match: { hits.hits.0.fields.resource\.attributes\.host\.name: [ "localhost" ] } + - match: { hits.hits.0.fields.host\.name: [ "localhost" ] } From f6a1e36d6be56a5d480765ad2d5f72f4adcaef5b Mon Sep 17 00:00:00 2001 From: Salvatore Campagna <93581129+salvatore-campagna@users.noreply.github.com> Date: Wed, 16 Oct 2024 16:17:41 +0200 Subject: [PATCH 20/31] Replace usages of `_source.mode` in documentation (#114743) We will deprecate the `_source.mode` mapping level configuration in favor of the index-level `index.mapping.source.mode` setting. As a result, we go through the documentation and update it to reflect the introduction of the setting. --- docs/plugins/mapper-annotated-text.asciidoc | 20 ++++++- .../mapping/fields/synthetic-source.asciidoc | 28 ++++++---- .../types/aggregate-metric-double.asciidoc | 10 +++- docs/reference/mapping/types/boolean.asciidoc | 10 +++- docs/reference/mapping/types/date.asciidoc | 12 ++++- .../mapping/types/date_nanos.asciidoc | 10 +++- .../mapping/types/flattened.asciidoc | 30 +++++++++-- .../mapping/types/geo-point.asciidoc | 10 +++- docs/reference/mapping/types/ip.asciidoc | 10 +++- docs/reference/mapping/types/keyword.asciidoc | 30 +++++++++-- docs/reference/mapping/types/numeric.asciidoc | 20 ++++++- docs/reference/mapping/types/range.asciidoc | 54 ++++++++++++++++--- docs/reference/mapping/types/text.asciidoc | 22 ++++++-- docs/reference/mapping/types/version.asciidoc | 19 ++++--- .../reference/mapping/types/wildcard.asciidoc | 10 +++- 15 files changed, 252 insertions(+), 43 deletions(-) diff --git a/docs/plugins/mapper-annotated-text.asciidoc b/docs/plugins/mapper-annotated-text.asciidoc index e4141e98a2285..9b6eccd136696 100644 --- a/docs/plugins/mapper-annotated-text.asciidoc +++ b/docs/plugins/mapper-annotated-text.asciidoc @@ -167,8 +167,16 @@ duplicates removed. So: ---- PUT idx { + "settings": { + "index": { + "mapping": { + "source": { + "mode": "synthetic" + } + } + } + }, "mappings": { - "_source": { "mode": "synthetic" }, "properties": { "text": { "type": "annotated_text", @@ -215,8 +223,16 @@ are preserved. ---- PUT idx { + "settings": { + "index": { + "mapping": { + "source": { + "mode": "synthetic" + } + } + } + }, "mappings": { - "_source": { "mode": "synthetic" }, "properties": { "text": { "type": "annotated_text", "store": true } } diff --git a/docs/reference/mapping/fields/synthetic-source.asciidoc b/docs/reference/mapping/fields/synthetic-source.asciidoc index 902b6c26611e5..f8666e2993d6a 100644 --- a/docs/reference/mapping/fields/synthetic-source.asciidoc +++ b/docs/reference/mapping/fields/synthetic-source.asciidoc @@ -2,7 +2,7 @@ ==== Synthetic `_source` IMPORTANT: Synthetic `_source` is Generally Available only for TSDB indices -(indices that have `index.mode` set to `time_series`). For other indices +(indices that have `index.mode` set to `time_series`). For other indices, synthetic `_source` is in technical preview. Features in technical preview may be changed or removed in a future release. Elastic will work to fix any issues, but features in technical preview are not subject to the support SLA @@ -11,15 +11,19 @@ of official GA features. Though very handy to have around, the source field takes up a significant amount of space on disk. Instead of storing source documents on disk exactly as you send them, Elasticsearch can reconstruct source content on the fly upon retrieval. -Enable this by setting `mode: synthetic` in `_source`: +Enable this by using the value `synthetic` for the index setting `index.mapping.source.mode`: [source,console,id=enable-synthetic-source-example] ---- PUT idx { - "mappings": { - "_source": { - "mode": "synthetic" + "settings": { + "index": { + "mapping": { + "source": { + "mode": "synthetic" + } + } } } } @@ -38,7 +42,7 @@ properties when used with synthetic `_source`. <> construct synthetic `_source` using existing data, most commonly <> and <>. For these field types, no additional space is needed to store the contents of `_source` field. Due to the storage layout of <>, the -generated `_source` field undergoes <> compared to original document. +generated `_source` field undergoes <> compared to the original document. For all other field types, the original value of the field is stored as is, in the same way as the `_source` field in non-synthetic mode. In this case there are no modifications and field data in `_source` is the same as in the original @@ -227,10 +231,16 @@ For instance: ---- PUT idx_keep { + "settings": { + "index": { + "mapping": { + "source": { + "mode": "synthetic" + } + } + } + }, "mappings": { - "_source": { - "mode": "synthetic" - }, "properties": { "path": { "type": "object", diff --git a/docs/reference/mapping/types/aggregate-metric-double.asciidoc b/docs/reference/mapping/types/aggregate-metric-double.asciidoc index 8e14fba976360..8a4ddffc30bbd 100644 --- a/docs/reference/mapping/types/aggregate-metric-double.asciidoc +++ b/docs/reference/mapping/types/aggregate-metric-double.asciidoc @@ -267,8 +267,16 @@ For example: ---- PUT idx { + "settings": { + "index": { + "mapping": { + "source": { + "mode": "synthetic" + } + } + } + }, "mappings": { - "_source": { "mode": "synthetic" }, "properties": { "agg_metric": { "type": "aggregate_metric_double", diff --git a/docs/reference/mapping/types/boolean.asciidoc b/docs/reference/mapping/types/boolean.asciidoc index 32f3d13edf581..494c41021dd2a 100644 --- a/docs/reference/mapping/types/boolean.asciidoc +++ b/docs/reference/mapping/types/boolean.asciidoc @@ -249,8 +249,16 @@ Synthetic source always sorts `boolean` fields. For example: ---- PUT idx { + "settings": { + "index": { + "mapping": { + "source": { + "mode": "synthetic" + } + } + } + }, "mappings": { - "_source": { "mode": "synthetic" }, "properties": { "bool": { "type": "boolean" } } diff --git a/docs/reference/mapping/types/date.asciidoc b/docs/reference/mapping/types/date.asciidoc index ca2c23f932fc3..53b17a669ae75 100644 --- a/docs/reference/mapping/types/date.asciidoc +++ b/docs/reference/mapping/types/date.asciidoc @@ -130,7 +130,7 @@ The following parameters are accepted by `date` fields: <>:: If `true`, malformed numbers are ignored. If `false` (default), malformed - numbers throw an exception and reject the whole document. Note that this + numbers throw an exception and reject the whole document. Note that this cannot be set if the `script` parameter is used. <>:: @@ -248,8 +248,16 @@ Synthetic source always sorts `date` fields. For example: ---- PUT idx { + "settings": { + "index": { + "mapping": { + "source": { + "mode": "synthetic" + } + } + } + }, "mappings": { - "_source": { "mode": "synthetic" }, "properties": { "date": { "type": "date" } } diff --git a/docs/reference/mapping/types/date_nanos.asciidoc b/docs/reference/mapping/types/date_nanos.asciidoc index 1a3b390b1690c..e9ec85c470ecf 100644 --- a/docs/reference/mapping/types/date_nanos.asciidoc +++ b/docs/reference/mapping/types/date_nanos.asciidoc @@ -160,8 +160,16 @@ Synthetic source always sorts `date_nanos` fields. For example: ---- PUT idx { + "settings": { + "index": { + "mapping": { + "source": { + "mode": "synthetic" + } + } + } + }, "mappings": { - "_source": { "mode": "synthetic" }, "properties": { "date": { "type": "date_nanos" } } diff --git a/docs/reference/mapping/types/flattened.asciidoc b/docs/reference/mapping/types/flattened.asciidoc index 0a72ebc98ecef..af6ef3e739d0f 100644 --- a/docs/reference/mapping/types/flattened.asciidoc +++ b/docs/reference/mapping/types/flattened.asciidoc @@ -334,8 +334,16 @@ For example: ---- PUT idx { + "settings": { + "index": { + "mapping": { + "source": { + "mode": "synthetic" + } + } + } + }, "mappings": { - "_source": { "mode": "synthetic" }, "properties": { "flattened": { "type": "flattened" } } @@ -367,8 +375,16 @@ For example: ---- PUT idx { + "settings": { + "index": { + "mapping": { + "source": { + "mode": "synthetic" + } + } + } + }, "mappings": { - "_source": { "mode": "synthetic" }, "properties": { "flattened": { "type": "flattened" } } @@ -407,8 +423,16 @@ For example: ---- PUT idx { + "settings": { + "index": { + "mapping": { + "source": { + "mode": "synthetic" + } + } + } + }, "mappings": { - "_source": { "mode": "synthetic" }, "properties": { "flattened": { "type": "flattened" } } diff --git a/docs/reference/mapping/types/geo-point.asciidoc b/docs/reference/mapping/types/geo-point.asciidoc index 6db05188dfb98..9ba8ea6e46782 100644 --- a/docs/reference/mapping/types/geo-point.asciidoc +++ b/docs/reference/mapping/types/geo-point.asciidoc @@ -229,8 +229,16 @@ longitude) and reduces them to their stored precision. For example: ---- PUT idx { + "settings": { + "index": { + "mapping": { + "source": { + "mode": "synthetic" + } + } + } + }, "mappings": { - "_source": { "mode": "synthetic" }, "properties": { "point": { "type": "geo_point" } } diff --git a/docs/reference/mapping/types/ip.asciidoc b/docs/reference/mapping/types/ip.asciidoc index f068916478a78..f85dd78ecbd4a 100644 --- a/docs/reference/mapping/types/ip.asciidoc +++ b/docs/reference/mapping/types/ip.asciidoc @@ -170,8 +170,16 @@ Synthetic source always sorts `ip` fields and removes duplicates. For example: ---- PUT idx { + "settings": { + "index": { + "mapping": { + "source": { + "mode": "synthetic" + } + } + } + }, "mappings": { - "_source": { "mode": "synthetic" }, "properties": { "ip": { "type": "ip" } } diff --git a/docs/reference/mapping/types/keyword.asciidoc b/docs/reference/mapping/types/keyword.asciidoc index a4be7026dffcd..b94216042427f 100644 --- a/docs/reference/mapping/types/keyword.asciidoc +++ b/docs/reference/mapping/types/keyword.asciidoc @@ -188,8 +188,16 @@ For example: ---- PUT idx { + "settings": { + "index": { + "mapping": { + "source": { + "mode": "synthetic" + } + } + } + }, "mappings": { - "_source": { "mode": "synthetic" }, "properties": { "kwd": { "type": "keyword" } } @@ -218,8 +226,16 @@ are preserved. For example: ---- PUT idx { + "settings": { + "index": { + "mapping": { + "source": { + "mode": "synthetic" + } + } + } + }, "mappings": { - "_source": { "mode": "synthetic" }, "properties": { "kwd": { "type": "keyword", "store": true } } @@ -248,8 +264,16 @@ For example: ---- PUT idx { + "settings": { + "index": { + "mapping": { + "source": { + "mode": "synthetic" + } + } + } + }, "mappings": { - "_source": { "mode": "synthetic" }, "properties": { "kwd": { "type": "keyword", "ignore_above": 3 } } diff --git a/docs/reference/mapping/types/numeric.asciidoc b/docs/reference/mapping/types/numeric.asciidoc index d1e1c037e571e..5bfa1bc7c1240 100644 --- a/docs/reference/mapping/types/numeric.asciidoc +++ b/docs/reference/mapping/types/numeric.asciidoc @@ -259,8 +259,16 @@ Synthetic source always sorts numeric fields. For example: ---- PUT idx { + "settings": { + "index": { + "mapping": { + "source": { + "mode": "synthetic" + } + } + } + }, "mappings": { - "_source": { "mode": "synthetic" }, "properties": { "long": { "type": "long" } } @@ -287,8 +295,16 @@ Scaled floats will always apply their scaling factor so: ---- PUT idx { + "settings": { + "index": { + "mapping": { + "source": { + "mode": "synthetic" + } + } + } + }, "mappings": { - "_source": { "mode": "synthetic" }, "properties": { "f": { "type": "scaled_float", "scaling_factor": 0.01 } } diff --git a/docs/reference/mapping/types/range.asciidoc b/docs/reference/mapping/types/range.asciidoc index 14c5b6098acbe..04341f68c630a 100644 --- a/docs/reference/mapping/types/range.asciidoc +++ b/docs/reference/mapping/types/range.asciidoc @@ -249,13 +249,21 @@ of official GA features. `range` fields support <> in their default configuration. Synthetic `_source` cannot be used with <> disabled. -Synthetic source always sorts values and removes duplicates for all `range` fields except `ip_range` . Ranges are sorted by their lower bound and then by upper bound. For example: +Synthetic source always sorts values and removes duplicates for all `range` fields except `ip_range`. Ranges are sorted by their lower bound and then by upper bound. For example: [source,console,id=synthetic-source-range-sorting-example] ---- PUT idx { + "settings": { + "index": { + "mapping": { + "source": { + "mode": "synthetic" + } + } + } + }, "mappings": { - "_source": { "mode": "synthetic" }, "properties": { "my_range": { "type": "long_range" } } @@ -316,8 +324,16 @@ For example: ---- PUT idx { + "settings": { + "index": { + "mapping": { + "source": { + "mode": "synthetic" + } + } + } + }, "mappings": { - "_source": { "mode": "synthetic" }, "properties": { "my_range": { "type": "ip_range" } } @@ -352,13 +368,21 @@ Will become: // TEST[s/^/{"_source":/ s/\n$/}/] [[range-synthetic-source-inclusive]] -Range field vales are always represented as inclusive on both sides with bounds adjusted accordingly. Default values for range bounds are represented as `null`. This is true even if range bound was explicitly provided. For example: +Range field values are always represented as inclusive on both sides with bounds adjusted accordingly. Default values for range bounds are represented as `null`. This is true even if range bound was explicitly provided. For example: [source,console,id=synthetic-source-range-normalization-example] ---- PUT idx { + "settings": { + "index": { + "mapping": { + "source": { + "mode": "synthetic" + } + } + } + }, "mappings": { - "_source": { "mode": "synthetic" }, "properties": { "my_range": { "type": "long_range" } } @@ -394,8 +418,16 @@ Default values for range bounds are represented as `null` in synthetic source. T ---- PUT idx { + "settings": { + "index": { + "mapping": { + "source": { + "mode": "synthetic" + } + } + } + }, "mappings": { - "_source": { "mode": "synthetic" }, "properties": { "my_range": { "type": "integer_range" } } @@ -429,8 +461,16 @@ Will become: ---- PUT idx { + "settings": { + "index": { + "mapping": { + "source": { + "mode": "synthetic" + } + } + } + }, "mappings": { - "_source": { "mode": "synthetic" }, "properties": { "my_range": { "type": "date_range" } } diff --git a/docs/reference/mapping/types/text.asciidoc b/docs/reference/mapping/types/text.asciidoc index c33af69df5607..ca69c93e8f1a8 100644 --- a/docs/reference/mapping/types/text.asciidoc +++ b/docs/reference/mapping/types/text.asciidoc @@ -177,15 +177,23 @@ a <> sub-field that supports synthetic `_source` or if the `text` field sets `store` to `true`. Either way, it may not have <>. -If using a sub-`keyword` field then the values are sorted in the same way as +If using a sub-`keyword` field, then the values are sorted in the same way as a `keyword` field's values are sorted. By default, that means sorted with duplicates removed. So: [source,console,id=synthetic-source-text-example-default] ---- PUT idx { + "settings": { + "index": { + "mapping": { + "source": { + "mode": "synthetic" + } + } + } + }, "mappings": { - "_source": { "mode": "synthetic" }, "properties": { "text": { "type": "text", @@ -233,8 +241,16 @@ are preserved. ---- PUT idx { + "settings": { + "index": { + "mapping": { + "source": { + "mode": "synthetic" + } + } + } + }, "mappings": { - "_source": { "mode": "synthetic" }, "properties": { "text": { "type": "text", "store": true } } diff --git a/docs/reference/mapping/types/version.asciidoc b/docs/reference/mapping/types/version.asciidoc index 8da0fcae80fcd..1600451432bd8 100644 --- a/docs/reference/mapping/types/version.asciidoc +++ b/docs/reference/mapping/types/version.asciidoc @@ -63,16 +63,15 @@ The following parameters are accepted by `version` fields: [discrete] ==== Limitations -This field type isn't optimized for heavy wildcard, regex or fuzzy searches. While those -type of queries work in this field, you should consider using a regular `keyword` field if -you strongly rely on these kind of queries. - +This field type isn't optimized for heavy wildcard, regex, or fuzzy searches. While those +types of queries work in this field, you should consider using a regular `keyword` field if +you strongly rely on these kinds of queries. [[version-synthetic-source]] ==== Synthetic `_source` IMPORTANT: Synthetic `_source` is Generally Available only for TSDB indices -(indices that have `index.mode` set to `time_series`). For other indices +(indices that have `index.mode` set to `time_series`). For other indices, synthetic `_source` is in technical preview. Features in technical preview may be changed or removed in a future release. Elastic will work to fix any issues, but features in technical preview are not subject to the support SLA @@ -86,8 +85,16 @@ Synthetic source always sorts `version` fields and removes duplicates. For examp ---- PUT idx { + "settings": { + "index": { + "mapping": { + "source": { + "mode": "synthetic" + } + } + } + }, "mappings": { - "_source": { "mode": "synthetic" }, "properties": { "versions": { "type": "version" } } diff --git a/docs/reference/mapping/types/wildcard.asciidoc b/docs/reference/mapping/types/wildcard.asciidoc index 79fc953051d54..89a3109a37164 100644 --- a/docs/reference/mapping/types/wildcard.asciidoc +++ b/docs/reference/mapping/types/wildcard.asciidoc @@ -141,8 +141,16 @@ Synthetic source always sorts `wildcard` fields. For example: ---- PUT idx { + "settings": { + "index": { + "mapping": { + "source": { + "mode": "synthetic" + } + } + } + }, "mappings": { - "_source": { "mode": "synthetic" }, "properties": { "card": { "type": "wildcard" } } From c76fd004d92be03e120bcf1bcbbe30e05addf717 Mon Sep 17 00:00:00 2001 From: Stef Nestor <26751266+stefnestor@users.noreply.github.com> Date: Wed, 16 Oct 2024 09:29:41 -0600 Subject: [PATCH 21/31] (Doc+) link video for resolving shards too large (#114915) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * (Doc+) link video for resolving shards too large 👋 howdy, team (cc: @anniegale9538 )! Playing forward https://github.com/elastic/elasticsearch/pull/111254, [this video](https://www.youtube.com/watch?v=sHyNYnwbYro) demonstrates an example resolving shards too large via reindex under [this section](https://www.elastic.co/guide/en/elasticsearch/reference/master/size-your-shards.html#shard-size-recommendation) as it's a top support ask. --------- Co-authored-by: Liam Thompson <32779855+leemthompo@users.noreply.github.com> --- docs/reference/how-to/size-your-shards.asciidoc | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/reference/how-to/size-your-shards.asciidoc b/docs/reference/how-to/size-your-shards.asciidoc index 5f67014d5bb4a..19848fb0338fe 100644 --- a/docs/reference/how-to/size-your-shards.asciidoc +++ b/docs/reference/how-to/size-your-shards.asciidoc @@ -208,6 +208,7 @@ index can be <>. You may then consider setting <> against the destination index for the source index's name to point to it for continuity. +See this https://www.youtube.com/watch?v=sHyNYnwbYro[fixing shard sizes video] for an example troubleshooting walkthrough. [discrete] [[shard-count-recommendation]] From bd754f798fd2c5bc55b24706e3c275e241ba13ca Mon Sep 17 00:00:00 2001 From: Stef Nestor <26751266+stefnestor@users.noreply.github.com> Date: Wed, 16 Oct 2024 09:29:57 -0600 Subject: [PATCH 22/31] (Doc+) Cross-link max shards (#114670) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * (Doc+) Cross-link max shards 👋 It appears we have two docs of similar content about max open shards. This one contains the error users search (so is what we linked the error to in https://github.com/elastic/elasticsearch/pull/110993) but the other I believe is a placeholder doc for the health api code. Should maybe consolidate some day but in the mean time at least cross-link. --------- Co-authored-by: Liam Thompson <32779855+leemthompo@users.noreply.github.com> --- docs/reference/how-to/size-your-shards.asciidoc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/reference/how-to/size-your-shards.asciidoc b/docs/reference/how-to/size-your-shards.asciidoc index 19848fb0338fe..8770ec373bb18 100644 --- a/docs/reference/how-to/size-your-shards.asciidoc +++ b/docs/reference/how-to/size-your-shards.asciidoc @@ -572,6 +572,8 @@ PUT _cluster/settings } ---- +For more information, see <>. + [discrete] [[troubleshooting-max-docs-limit]] ==== Number of documents in the shard cannot exceed [2147483519] From 1b2ffa2651ec813f60045b4e7c2ec42e023aa4e8 Mon Sep 17 00:00:00 2001 From: Joe Gallo Date: Wed, 16 Oct 2024 12:05:54 -0400 Subject: [PATCH 23/31] Fix this log level (#114921) @masseyke noticed this in his review of https://github.com/elastic/elasticsearch/pull/114847. I fixed it in the backport to `8.x` via https://github.com/elastic/elasticsearch/pull/114872, but this PR is needed to get the same fix into `main`. --- .../elasticsearch/ingest/geoip/EnterpriseGeoIpDownloader.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpDownloader.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpDownloader.java index f4ae440d171d3..e04014ff693be 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpDownloader.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpDownloader.java @@ -617,7 +617,7 @@ public Checksum checksum() throws IOException { } @SuppressWarnings("unchecked") String md5 = ((Map) checksums.get("checksums")).get("md5"); - logger.info("checksum was [{}]", md5); + logger.trace("checksum was [{}]", md5); var matcher = MD5_CHECKSUM_PATTERN.matcher(md5); boolean match = matcher.matches(); From 0c480861700443c93a720a992423aa85cb1d974a Mon Sep 17 00:00:00 2001 From: Tim Brooks Date: Wed, 16 Oct 2024 10:50:42 -0600 Subject: [PATCH 24/31] Reenable incremental bulk tests (#114922) These tests should be fixed and can be unmuted. The associated github issues have already been closed. --- muted-tests.yml | 6 ------ 1 file changed, 6 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index fb48f9e04d5c4..69cef9acc8cb9 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -259,12 +259,6 @@ tests: - class: org.elasticsearch.xpack.esql.expression.function.aggregate.AvgTests method: "testFold {TestCase= #7}" issue: https://github.com/elastic/elasticsearch/issues/114175 -- class: org.elasticsearch.action.bulk.IncrementalBulkIT - method: testMultipleBulkPartsWithBackoff - issue: https://github.com/elastic/elasticsearch/issues/114181 -- class: org.elasticsearch.action.bulk.IncrementalBulkIT - method: testIncrementalBulkLowWatermarkBackOff - issue: https://github.com/elastic/elasticsearch/issues/114182 - class: org.elasticsearch.xpack.ilm.ExplainLifecycleIT method: testStepInfoPreservedOnAutoRetry issue: https://github.com/elastic/elasticsearch/issues/114220 From 4ca8ef54e53e23670458a0aee7a90b274c8a8cdc Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Thu, 17 Oct 2024 04:00:08 +1100 Subject: [PATCH 25/31] Add 8.16 to branches.json --- branches.json | 3 +++ 1 file changed, 3 insertions(+) diff --git a/branches.json b/branches.json index e464d6179f2ba..e81d511a88458 100644 --- a/branches.json +++ b/branches.json @@ -4,6 +4,9 @@ { "branch": "main" }, + { + "branch": "8.16" + }, { "branch": "8.x" }, From 2aec12c17383de5da35664d9160904b668944364 Mon Sep 17 00:00:00 2001 From: Brian Seeders Date: Wed, 16 Oct 2024 12:32:03 -0400 Subject: [PATCH 26/31] Bump 8.x to version 8.17.0 --- .backportrc.json | 4 +-- .buildkite/pipelines/intake.template.yml | 1 + .buildkite/pipelines/intake.yml | 2 +- .buildkite/pipelines/periodic-packaging.yml | 16 ++++++++++ .buildkite/pipelines/periodic.yml | 25 +++++++++++++-- .ci/bwcVersions | 1 + .ci/snapshotBwcVersions | 1 + docs/reference/migration/index.asciidoc | 2 ++ .../reference/migration/migrate_8_17.asciidoc | 20 ++++++++++++ docs/reference/release-notes.asciidoc | 2 ++ docs/reference/release-notes/8.17.0.asciidoc | 8 +++++ .../release-notes/highlights.asciidoc | 31 +++---------------- .../main/java/org/elasticsearch/Version.java | 1 + 13 files changed, 81 insertions(+), 33 deletions(-) create mode 100644 docs/reference/migration/migrate_8_17.asciidoc create mode 100644 docs/reference/release-notes/8.17.0.asciidoc diff --git a/.backportrc.json b/.backportrc.json index d2e92817c026b..03f3f892f9227 100644 --- a/.backportrc.json +++ b/.backportrc.json @@ -1,10 +1,10 @@ { "upstream" : "elastic/elasticsearch", - "targetBranchChoices" : [ "main", "8.x", "8.15", "8.14", "8.13", "8.12", "8.11", "8.10", "8.9", "8.8", "8.7", "8.6", "8.5", "8.4", "8.3", "8.2", "8.1", "8.0", "7.17", "6.8" ], + "targetBranchChoices" : [ "main", "8.x", "8.16", "8.15", "8.14", "8.13", "8.12", "8.11", "8.10", "8.9", "8.8", "8.7", "8.6", "8.5", "8.4", "8.3", "8.2", "8.1", "8.0", "7.17", "6.8" ], "targetPRLabels" : [ "backport" ], "branchLabelMapping" : { "^v9.0.0$" : "main", - "^v8.16.0$" : "8.x", + "^v8.17.0$" : "8.x", "^v(\\d+).(\\d+).\\d+(?:-(?:alpha|beta|rc)\\d+)?$" : "$1.$2" } } diff --git a/.buildkite/pipelines/intake.template.yml b/.buildkite/pipelines/intake.template.yml index f530f237113a9..57412bbe908bc 100644 --- a/.buildkite/pipelines/intake.template.yml +++ b/.buildkite/pipelines/intake.template.yml @@ -75,6 +75,7 @@ steps: - trigger: elasticsearch-dra-workflow label: Trigger DRA snapshot workflow async: true + branches: "main 8.* 7.17" build: branch: "$BUILDKITE_BRANCH" commit: "$BUILDKITE_COMMIT" diff --git a/.buildkite/pipelines/intake.yml b/.buildkite/pipelines/intake.yml index 1bb13c4c10966..1ddb3e82920cd 100644 --- a/.buildkite/pipelines/intake.yml +++ b/.buildkite/pipelines/intake.yml @@ -56,7 +56,7 @@ steps: timeout_in_minutes: 300 matrix: setup: - BWC_VERSION: ["8.15.3", "8.16.0", "9.0.0"] + BWC_VERSION: ["8.15.3", "8.16.0", "8.17.0", "9.0.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 diff --git a/.buildkite/pipelines/periodic-packaging.yml b/.buildkite/pipelines/periodic-packaging.yml index b29747c60617e..03368e7e4a9c0 100644 --- a/.buildkite/pipelines/periodic-packaging.yml +++ b/.buildkite/pipelines/periodic-packaging.yml @@ -304,6 +304,22 @@ steps: env: BWC_VERSION: 8.16.0 + - label: "{{matrix.image}} / 8.17.0 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.17.0 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.17.0 + - label: "{{matrix.image}} / 9.0.0 / packaging-tests-upgrade" command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v9.0.0 timeout_in_minutes: 300 diff --git a/.buildkite/pipelines/periodic.yml b/.buildkite/pipelines/periodic.yml index cbca7f820c7b7..d572dd104d215 100644 --- a/.buildkite/pipelines/periodic.yml +++ b/.buildkite/pipelines/periodic.yml @@ -325,6 +325,25 @@ steps: - signal_reason: agent_stop limit: 3 + - label: 8.17.0 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.17.0#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: n1-standard-32 + buildDirectory: /dev/shm/bk + preemptible: true + env: + BWC_VERSION: 8.17.0 + retry: + automatic: + - exit_status: "-1" + limit: 3 + signal_reason: none + - signal_reason: agent_stop + limit: 3 + - label: 9.0.0 / bwc command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v9.0.0#bwcTest timeout_in_minutes: 300 @@ -410,7 +429,7 @@ steps: setup: ES_RUNTIME_JAVA: - openjdk21 - BWC_VERSION: ["8.15.3", "8.16.0", "9.0.0"] + BWC_VERSION: ["8.15.3", "8.16.0", "8.17.0", "9.0.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 @@ -452,7 +471,7 @@ steps: ES_RUNTIME_JAVA: - openjdk21 - openjdk23 - BWC_VERSION: ["8.15.3", "8.16.0", "9.0.0"] + BWC_VERSION: ["8.15.3", "8.16.0", "8.17.0", "9.0.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 @@ -554,7 +573,7 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: n2-standard-8 buildDirectory: /dev/shm/bk - if: build.branch == "main" || build.branch == "7.17" + if: build.branch == "main" || build.branch == "8.x" || build.branch == "7.17" - label: check-branch-consistency command: .ci/scripts/run-gradle.sh branchConsistency timeout_in_minutes: 15 diff --git a/.ci/bwcVersions b/.ci/bwcVersions index de0505c61a251..cd1f7d1ae269f 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -16,4 +16,5 @@ BWC_VERSION: - "8.14.3" - "8.15.3" - "8.16.0" + - "8.17.0" - "9.0.0" diff --git a/.ci/snapshotBwcVersions b/.ci/snapshotBwcVersions index 24f58abc72493..67ebf0c51ab1f 100644 --- a/.ci/snapshotBwcVersions +++ b/.ci/snapshotBwcVersions @@ -1,4 +1,5 @@ BWC_VERSION: - "8.15.3" - "8.16.0" + - "8.17.0" - "9.0.0" diff --git a/docs/reference/migration/index.asciidoc b/docs/reference/migration/index.asciidoc index 0690f60495c97..719588cb4b0d0 100644 --- a/docs/reference/migration/index.asciidoc +++ b/docs/reference/migration/index.asciidoc @@ -1,5 +1,6 @@ include::migration_intro.asciidoc[] +* <> * <> * <> * <> @@ -18,6 +19,7 @@ include::migration_intro.asciidoc[] * <> * <> +include::migrate_8_17.asciidoc[] include::migrate_8_16.asciidoc[] include::migrate_8_15.asciidoc[] include::migrate_8_14.asciidoc[] diff --git a/docs/reference/migration/migrate_8_17.asciidoc b/docs/reference/migration/migrate_8_17.asciidoc new file mode 100644 index 0000000000000..15bc6431c60ba --- /dev/null +++ b/docs/reference/migration/migrate_8_17.asciidoc @@ -0,0 +1,20 @@ +[[migrating-8.17]] +== Migrating to 8.17 +++++ +8.17 +++++ + +This section discusses the changes that you need to be aware of when migrating +your application to {es} 8.17. + +See also <> and <>. + +coming::[8.17.0] + + +[discrete] +[[breaking-changes-8.17]] +=== Breaking changes + +There are no breaking changes in {es} 8.17. + diff --git a/docs/reference/release-notes.asciidoc b/docs/reference/release-notes.asciidoc index 6f32b55c49af8..c912b0e62b94d 100644 --- a/docs/reference/release-notes.asciidoc +++ b/docs/reference/release-notes.asciidoc @@ -6,6 +6,7 @@ This section summarizes the changes in each release. +* <> * <> * <> * <> @@ -72,6 +73,7 @@ This section summarizes the changes in each release. -- +include::release-notes/8.17.0.asciidoc[] include::release-notes/8.16.0.asciidoc[] include::release-notes/8.15.1.asciidoc[] include::release-notes/8.15.0.asciidoc[] diff --git a/docs/reference/release-notes/8.17.0.asciidoc b/docs/reference/release-notes/8.17.0.asciidoc new file mode 100644 index 0000000000000..59962fd83e9b7 --- /dev/null +++ b/docs/reference/release-notes/8.17.0.asciidoc @@ -0,0 +1,8 @@ +[[release-notes-8.17.0]] +== {es} version 8.17.0 + +coming[8.17.0] + +Also see <>. + + diff --git a/docs/reference/release-notes/highlights.asciidoc b/docs/reference/release-notes/highlights.asciidoc index 1e0018f590ac0..81d46b5773877 100644 --- a/docs/reference/release-notes/highlights.asciidoc +++ b/docs/reference/release-notes/highlights.asciidoc @@ -11,7 +11,8 @@ For detailed information about this release, see the <> and // Add previous release to the list Other versions: -{ref-bare}/8.15/release-highlights.html[8.15] +{ref-bare}/8.16/release-highlights.html[8.16] +| {ref-bare}/8.15/release-highlights.html[8.15] | {ref-bare}/8.14/release-highlights.html[8.14] | {ref-bare}/8.13/release-highlights.html[8.13] | {ref-bare}/8.12/release-highlights.html[8.12] @@ -30,6 +31,8 @@ Other versions: endif::[] +// The notable-highlights tag marks entries that +// should be featured in the Stack Installation and Upgrade Guide: // tag::notable-highlights[] [discrete] @@ -97,29 +100,3 @@ ZStandard offers ~12% lower storage usage and a ~14% higher indexing throughput // end::notable-highlights[] -[discrete] -[[esql_multi_value_fields_supported_in_geospatial_predicates]] -=== ESQL: Multi-value fields supported in Geospatial predicates -Supporting multi-value fields in `WHERE` predicates is a challenge due to not knowing whether `ALL` or `ANY` -of the values in the field should pass the predicate. -For example, should the field `age:[10,30]` pass the predicate `WHERE age>20` or not? -This ambiguity does not exist with the spatial predicates -`ST_INTERSECTS` and `ST_DISJOINT`, because the choice between `ANY` or `ALL` -is implied by the predicate itself. -Consider a predicate checking a field named `location` against a test geometry named `shape`: - -* `ST_INTERSECTS(field, shape)` - true if `ANY` value can intersect the shape -* `ST_DISJOINT(field, shape)` - true only if `ALL` values are disjoint from the shape - -This works even if the shape argument is itself a complex or compound geometry. - -Similar logic exists for `ST_CONTAINS` and `ST_WITHIN` predicates, but these are not as easily solved -with `ANY` or `ALL`, because a collection of geometries contains another collection if each of the contained -geometries is within at least one of the containing geometries. Evaluating this requires that the multi-value -field is first combined into a single geometry before performing the predicate check. - -* `ST_CONTAINS(field, shape)` - true if the combined geometry contains the shape -* `ST_WITHIN(field, shape)` - true if the combined geometry is within the shape - -{es-pull}112063[#112063] - diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index 4b19d4b428526..48bf08ddfc028 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -187,6 +187,7 @@ public class Version implements VersionId, ToXContentFragment { public static final Version V_8_15_2 = new Version(8_15_02_99); public static final Version V_8_15_3 = new Version(8_15_03_99); public static final Version V_8_16_0 = new Version(8_16_00_99); + public static final Version V_8_17_0 = new Version(8_17_00_99); public static final Version V_9_0_0 = new Version(9_00_00_99); public static final Version CURRENT = V_9_0_0; From 9770ab7ac2da950b916743507abf8f9e73e084c7 Mon Sep 17 00:00:00 2001 From: Stef Nestor <26751266+stefnestor@users.noreply.github.com> Date: Wed, 16 Oct 2024 11:10:59 -0600 Subject: [PATCH 27/31] (Doc+) troubleshoot ILM videos (#114528) This links to our 6 newest [Support Troubleshooting](https://www.youtube.com/playlist?list=PL_mJOmq4zsHbQlfEMEh_30_LuV_hZp-3d) videos which are about resolving general ILM Health & the top five ILM rollover errors to the existing [Troubleshooting ILM errors](https://www.elastic.co/guide/en/elasticsearch/reference/master/index-lifecycle-error-handling.html). It side quests to link the watermark error to [its troubleshooting doc](https://www.elastic.co/guide/en/elasticsearch/reference/master/fix-watermark-errors.html). --- docs/reference/ilm/error-handling.asciidoc | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/docs/reference/ilm/error-handling.asciidoc b/docs/reference/ilm/error-handling.asciidoc index f810afc6c2b5f..e8df44653e9c5 100644 --- a/docs/reference/ilm/error-handling.asciidoc +++ b/docs/reference/ilm/error-handling.asciidoc @@ -8,6 +8,9 @@ When this happens, {ilm-init} moves the index to an `ERROR` step. If {ilm-init} cannot resolve the error automatically, execution is halted until you resolve the underlying issues with the policy, index, or cluster. +See this https://www.youtube.com/watch?v=VCIqkji3IwY[{ilm-init} health video] +for example troubleshooting walkthrough. + For example, you might have a `shrink-index` policy that shrinks an index to four shards once it is at least five days old: @@ -183,6 +186,8 @@ The rollover action then manages setting and updating the alias to Do not explicitly configure this same alias in the aliases section of an index template. +See this https://www.youtube.com/watch?v=Ww5POq4zZtY[resolving `duplicate alias` video] for an example troubleshooting walkthrough. + [discrete] ==== index.lifecycle.rollover_alias [x] does not point to index [y] @@ -191,6 +196,8 @@ Either the index is using the wrong alias or the alias does not exist. Check the `index.lifecycle.rollover_alias` <>. To see what aliases are configured, use <>. +See this https://www.youtube.com/watch?v=NKSe67x7aw8[resolving `not point to index` video] for an example troubleshooting walkthrough. + [discrete] ==== Setting [index.lifecycle.rollover_alias] for index [y] is empty or not defined @@ -198,6 +205,8 @@ The `index.lifecycle.rollover_alias` setting must be configured for the rollover Update the index settings to set `index.lifecycle.rollover_alias`. +See this https://www.youtube.com/watch?v=LRpMC2GS_FQ[resolving `empty or not defined` video] for an example troubleshooting walkthrough. + [discrete] ==== Alias [x] has more than one write index [y,z] @@ -205,6 +214,8 @@ Only one index can be designated as the write index for a particular alias. Use the <> API to set `is_write_index:false` for all but one index. +See this https://www.youtube.com/watch?v=jCUvZCT5Hm4[resolving `more than one write index` video] for an example troubleshooting walkthrough. + [discrete] ==== index name [x] does not match pattern ^.*-\d+ @@ -214,6 +225,8 @@ For example, `my-index` does not match the pattern requirement. Append a numeric value to the index name, for example `my-index-000001`. +See this https://www.youtube.com/watch?v=9sp1zF6iL00[resolving `does not match pattern` video] for an example troubleshooting walkthrough. + [discrete] ==== CircuitBreakingException: [x] data too large, data for [y] @@ -227,8 +240,7 @@ For more information, see <>. This indicates that the cluster is running out of disk space. This can happen when you don't have {ilm} set up to roll over from hot to warm nodes. - -Consider adding nodes, upgrading your hardware, or deleting unneeded indices. +For more information, see <>. [discrete] ==== security_exception: action [] is unauthorized for user [] with roles [], this action is granted by the index privileges [manage_follow_index,manage,all] From f99927e2d42cb4af5b03bd969357dc118df158c0 Mon Sep 17 00:00:00 2001 From: Stanislav Malyshev Date: Wed, 16 Oct 2024 11:49:08 -0600 Subject: [PATCH 28/31] Make ESQL EnrichPolicyResolver try to do proper connection before sending requests (#114870) * Make ESQL EnrichPolicyResolver try to do proper connection before sending requests * Make encureConnected be !skipUnavailable --- .../esql/enrich/EnrichPolicyResolver.java | 45 ++++++++++--------- .../enrich/EnrichPolicyResolverTests.java | 4 +- 2 files changed, 27 insertions(+), 22 deletions(-) diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java index 447df09942ca8..e67c406e26929 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java @@ -25,6 +25,7 @@ import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.RemoteClusterAware; +import org.elasticsearch.transport.RemoteClusterService; import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportChannel; import org.elasticsearch.transport.TransportRequest; @@ -72,12 +73,14 @@ public class EnrichPolicyResolver { private final IndexResolver indexResolver; private final TransportService transportService; private final ThreadPool threadPool; + private final RemoteClusterService remoteClusterService; public EnrichPolicyResolver(ClusterService clusterService, TransportService transportService, IndexResolver indexResolver) { this.clusterService = clusterService; this.transportService = transportService; this.indexResolver = indexResolver; this.threadPool = transportService.getThreadPool(); + this.remoteClusterService = transportService.getRemoteClusterService(); transportService.registerRequestHandler( RESOLVE_ACTION_NAME, threadPool.executor(ThreadPool.Names.SEARCH), @@ -257,22 +260,21 @@ private void lookupPolicies( // remote clusters if (remotePolicies.isEmpty() == false) { for (String cluster : remoteClusters) { - final Transport.Connection connection; - try { - connection = getRemoteConnection(cluster); - } catch (Exception e) { - refs.acquire().onFailure(e); - return; - } - transportService.sendRequest( - connection, - RESOLVE_ACTION_NAME, - new LookupRequest(cluster, remotePolicies), - TransportRequestOptions.EMPTY, - new ActionListenerResponseHandler<>( - refs.acquire(resp -> lookupResponses.put(cluster, resp)), - LookupResponse::new, - threadPool.executor(ThreadPool.Names.SEARCH) + ActionListener lookupListener = refs.acquire(resp -> lookupResponses.put(cluster, resp)); + getRemoteConnection( + cluster, + lookupListener.delegateFailureAndWrap( + (delegate, connection) -> transportService.sendRequest( + connection, + RESOLVE_ACTION_NAME, + new LookupRequest(cluster, remotePolicies), + TransportRequestOptions.EMPTY, + new ActionListenerResponseHandler<>( + delegate, + LookupResponse::new, + threadPool.executor(ThreadPool.Names.SEARCH) + ) + ) ) ); } @@ -389,13 +391,16 @@ protected Map availablePolicies() { return metadata == null ? Map.of() : metadata.getPolicies(); } - protected Transport.Connection getRemoteConnection(String cluster) { - return transportService.getRemoteClusterService().getConnection(cluster); + protected void getRemoteConnection(String cluster, ActionListener listener) { + remoteClusterService.maybeEnsureConnectedAndGetConnection( + cluster, + remoteClusterService.isSkipUnavailable(cluster) == false, + listener + ); } public Map> groupIndicesPerCluster(String[] indices) { - return transportService.getRemoteClusterService() - .groupIndices(SearchRequest.DEFAULT_INDICES_OPTIONS, indices) + return remoteClusterService.groupIndices(SearchRequest.DEFAULT_INDICES_OPTIONS, indices) .entrySet() .stream() .collect(Collectors.toMap(Map.Entry::getKey, e -> Arrays.asList(e.getValue().indices()))); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolverTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolverTests.java index 05a7486a18068..39170f1a305df 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolverTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolverTests.java @@ -446,9 +446,9 @@ EnrichResolution resolvePolicies(Collection clusters, Collection listener) { assertThat("Must only called on the local cluster", cluster, equalTo(LOCAL_CLUSTER_GROUP_KEY)); - return transports.get("").getConnection(transports.get(remoteCluster).getLocalNode()); + listener.onResponse(transports.get("").getConnection(transports.get(remoteCluster).getLocalNode())); } static ClusterService mockClusterService(Map policies) { From 8b8796908ac0c7a73566adb4647476b66656119c Mon Sep 17 00:00:00 2001 From: Luca Cavanna Date: Wed, 16 Oct 2024 19:51:18 +0200 Subject: [PATCH 29/31] Enhance empty queue conditional in slicing logic (#114911) With recent changes in Lucene 9.12 around not forking execution when not necessary (see https://github.com/apache/lucene/pull/13472), we have removed the search worker thread pool in #111099. The worker thread pool had unlimited queue, and we feared that we couuld have much more queueing on the search thread pool if we execute segment level searches on the same thread pool as the shard level searches, because every shard search would take up to a thread per slice when executing the query phase. We have then introduced an additional conditional to stop parallelizing when there is a queue. That is perhaps a bit extreme, as it's a decision made when creating the searcher, while a queue may no longer be there once the search is executing. This has caused some benchmarks regressions, given that having a queue may be a transient scenario, especially with short-lived segment searches being queued up. We may end up disabling inter-segment concurrency more aggressively than we would want, penalizing requests that do benefit from concurrency. At the same time, we do want to have some kind of protection against rejections of shard searches that would be caused by excessive slicing. When the queue is above a certain size, we can turn off the slicing and effectively disable inter-segment concurrency. With this commit we set that threshold to be the number of threads in the search pool. --- .../search/DefaultSearchContext.java | 2 +- .../search/DefaultSearchContextTests.java | 209 ++++++++++++------ 2 files changed, 148 insertions(+), 63 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java b/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java index 1521b17a81766..8ac35f7c40caa 100644 --- a/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java +++ b/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java @@ -291,7 +291,7 @@ static int determineMaximumNumberOfSlices( ToLongFunction fieldCardinality ) { return executor instanceof ThreadPoolExecutor tpe - && tpe.getQueue().isEmpty() + && tpe.getQueue().size() <= tpe.getMaximumPoolSize() && isParallelCollectionSupportedForResults(resultsType, request.source(), fieldCardinality, enableQueryPhaseParallelCollection) ? tpe.getMaximumPoolSize() : 1; diff --git a/server/src/test/java/org/elasticsearch/search/DefaultSearchContextTests.java b/server/src/test/java/org/elasticsearch/search/DefaultSearchContextTests.java index 0e4945e8bb8d1..a474c1dc38c50 100644 --- a/server/src/test/java/org/elasticsearch/search/DefaultSearchContextTests.java +++ b/server/src/test/java/org/elasticsearch/search/DefaultSearchContextTests.java @@ -81,6 +81,7 @@ import java.util.UUID; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; +import java.util.concurrent.ThreadPoolExecutor; import java.util.function.Function; import java.util.function.Supplier; import java.util.function.ToLongFunction; @@ -507,10 +508,10 @@ public void testNewIdLoaderWithTsdbAndRoutingPathMatch() throws Exception { } } - public void testDetermineMaximumNumberOfSlices() { + private static ShardSearchRequest createParallelRequest() { IndexShard indexShard = mock(IndexShard.class); when(indexShard.shardId()).thenReturn(new ShardId("index", "uuid", 0)); - ShardSearchRequest parallelReq = new ShardSearchRequest( + return new ShardSearchRequest( OriginalIndices.NONE, new SearchRequest().allowPartialSearchResults(randomBoolean()), indexShard.shardId(), @@ -521,69 +522,74 @@ public void testDetermineMaximumNumberOfSlices() { System.currentTimeMillis(), null ); - ShardSearchRequest singleSliceReq = new ShardSearchRequest( - OriginalIndices.NONE, - new SearchRequest().allowPartialSearchResults(randomBoolean()) - .source(new SearchSourceBuilder().sort(SortBuilders.fieldSort(FieldSortBuilder.DOC_FIELD_NAME))), - indexShard.shardId(), - 0, - 1, - AliasFilter.EMPTY, - 1f, - System.currentTimeMillis(), - null - ); - int executorPoolSize = randomIntBetween(1, 100); - ExecutorService threadPoolExecutor = EsExecutors.newFixed( - "test", - executorPoolSize, - 0, - Thread::new, - new ThreadContext(Settings.EMPTY), - EsExecutors.TaskTrackingConfig.DO_NOT_TRACK - ); - ExecutorService notThreadPoolExecutor = Executors.newWorkStealingPool(); - ToLongFunction fieldCardinality = name -> -1; + } + + public void testDetermineMaximumNumberOfSlicesNoExecutor() { + ToLongFunction fieldCardinality = name -> { throw new UnsupportedOperationException(); }; assertEquals( - executorPoolSize, + 1, DefaultSearchContext.determineMaximumNumberOfSlices( - threadPoolExecutor, - parallelReq, + null, + createParallelRequest(), SearchService.ResultsType.DFS, - true, + randomBoolean(), fieldCardinality ) ); assertEquals( - executorPoolSize, + 1, DefaultSearchContext.determineMaximumNumberOfSlices( - threadPoolExecutor, - singleSliceReq, - SearchService.ResultsType.DFS, - true, + null, + createParallelRequest(), + SearchService.ResultsType.QUERY, + randomBoolean(), fieldCardinality ) ); + } + + public void testDetermineMaximumNumberOfSlicesNotThreadPoolExecutor() { + ExecutorService notThreadPoolExecutor = Executors.newWorkStealingPool(); + ToLongFunction fieldCardinality = name -> { throw new UnsupportedOperationException(); }; assertEquals( 1, - DefaultSearchContext.determineMaximumNumberOfSlices(null, parallelReq, SearchService.ResultsType.DFS, true, fieldCardinality) + DefaultSearchContext.determineMaximumNumberOfSlices( + notThreadPoolExecutor, + createParallelRequest(), + SearchService.ResultsType.DFS, + randomBoolean(), + fieldCardinality + ) ); assertEquals( - executorPoolSize, + 1, DefaultSearchContext.determineMaximumNumberOfSlices( - threadPoolExecutor, - parallelReq, + notThreadPoolExecutor, + createParallelRequest(), SearchService.ResultsType.QUERY, - true, + randomBoolean(), fieldCardinality ) ); + } + + public void testDetermineMaximumNumberOfSlicesEnableQueryPhaseParallelCollection() { + int executorPoolSize = randomIntBetween(1, 100); + ThreadPoolExecutor threadPoolExecutor = EsExecutors.newFixed( + "test", + executorPoolSize, + 0, + Thread::new, + new ThreadContext(Settings.EMPTY), + EsExecutors.TaskTrackingConfig.DO_NOT_TRACK + ); + ToLongFunction fieldCardinality = name -> -1; assertEquals( - 1, + executorPoolSize, DefaultSearchContext.determineMaximumNumberOfSlices( threadPoolExecutor, - singleSliceReq, + createParallelRequest(), SearchService.ResultsType.QUERY, true, fieldCardinality @@ -592,54 +598,133 @@ public void testDetermineMaximumNumberOfSlices() { assertEquals( 1, DefaultSearchContext.determineMaximumNumberOfSlices( - notThreadPoolExecutor, - parallelReq, - SearchService.ResultsType.DFS, - true, + threadPoolExecutor, + createParallelRequest(), + SearchService.ResultsType.QUERY, + false, fieldCardinality ) ); - assertEquals( executorPoolSize, DefaultSearchContext.determineMaximumNumberOfSlices( threadPoolExecutor, - parallelReq, + createParallelRequest(), SearchService.ResultsType.DFS, - false, + randomBoolean(), fieldCardinality ) ); - assertEquals( + } + + public void testDetermineMaximumNumberOfSlicesSingleSortByField() { + IndexShard indexShard = mock(IndexShard.class); + when(indexShard.shardId()).thenReturn(new ShardId("index", "uuid", 0)); + ShardSearchRequest singleSliceReq = new ShardSearchRequest( + OriginalIndices.NONE, + new SearchRequest().allowPartialSearchResults(randomBoolean()) + .source(new SearchSourceBuilder().sort(SortBuilders.fieldSort(FieldSortBuilder.DOC_FIELD_NAME))), + indexShard.shardId(), + 0, 1, - DefaultSearchContext.determineMaximumNumberOfSlices(null, parallelReq, SearchService.ResultsType.DFS, false, fieldCardinality) + AliasFilter.EMPTY, + 1f, + System.currentTimeMillis(), + null ); + ToLongFunction fieldCardinality = name -> { throw new UnsupportedOperationException(); }; + int executorPoolSize = randomIntBetween(1, 100); + ThreadPoolExecutor threadPoolExecutor = EsExecutors.newFixed( + "test", + executorPoolSize, + 0, + Thread::new, + new ThreadContext(Settings.EMPTY), + EsExecutors.TaskTrackingConfig.DO_NOT_TRACK + ); + // DFS concurrency does not rely on slices, hence it kicks in regardless of the request (supportsParallelCollection is not called) assertEquals( - 1, + executorPoolSize, DefaultSearchContext.determineMaximumNumberOfSlices( threadPoolExecutor, - parallelReq, - SearchService.ResultsType.QUERY, - false, + singleSliceReq, + SearchService.ResultsType.DFS, + true, fieldCardinality ) ); - assertEquals( - 1, - DefaultSearchContext.determineMaximumNumberOfSlices(null, parallelReq, SearchService.ResultsType.QUERY, false, fieldCardinality) - ); assertEquals( 1, DefaultSearchContext.determineMaximumNumberOfSlices( - notThreadPoolExecutor, - parallelReq, - SearchService.ResultsType.DFS, - false, + threadPoolExecutor, + singleSliceReq, + SearchService.ResultsType.QUERY, + true, fieldCardinality ) ); } + public void testDetermineMaximumNumberOfSlicesWithQueue() { + int executorPoolSize = randomIntBetween(1, 100); + ThreadPoolExecutor threadPoolExecutor = EsExecutors.newFixed( + "test", + executorPoolSize, + 1000, + Thread::new, + new ThreadContext(Settings.EMPTY), + EsExecutors.TaskTrackingConfig.DO_NOT_TRACK + ); + ToLongFunction fieldCardinality = name -> { throw new UnsupportedOperationException(); }; + + for (int i = 0; i < executorPoolSize; i++) { + assertTrue(threadPoolExecutor.getQueue().offer(() -> {})); + assertEquals( + executorPoolSize, + DefaultSearchContext.determineMaximumNumberOfSlices( + threadPoolExecutor, + createParallelRequest(), + SearchService.ResultsType.DFS, + true, + fieldCardinality + ) + ); + assertEquals( + executorPoolSize, + DefaultSearchContext.determineMaximumNumberOfSlices( + threadPoolExecutor, + createParallelRequest(), + SearchService.ResultsType.QUERY, + true, + fieldCardinality + ) + ); + } + for (int i = 0; i < 100; i++) { + assertTrue(threadPoolExecutor.getQueue().offer(() -> {})); + assertEquals( + 1, + DefaultSearchContext.determineMaximumNumberOfSlices( + threadPoolExecutor, + createParallelRequest(), + SearchService.ResultsType.DFS, + true, + fieldCardinality + ) + ); + assertEquals( + 1, + DefaultSearchContext.determineMaximumNumberOfSlices( + threadPoolExecutor, + createParallelRequest(), + SearchService.ResultsType.QUERY, + true, + fieldCardinality + ) + ); + } + } + public void testIsParallelCollectionSupportedForResults() { SearchSourceBuilder searchSourceBuilderOrNull = randomBoolean() ? null : new SearchSourceBuilder(); ToLongFunction fieldCardinality = name -> -1; From 33ea3116c9295012ceddbdddb5310918973a4753 Mon Sep 17 00:00:00 2001 From: Keith Massey Date: Wed, 16 Oct 2024 13:06:06 -0500 Subject: [PATCH 30/31] Reducing error-level stack trace logging for normal events in GeoIpDownloader (#114924) --- docs/changelog/114924.yaml | 5 +++ .../ingest/geoip/GeoIpDownloader.java | 12 +++++-- .../ingest/geoip/GeoIpDownloaderTests.java | 34 ++++++------------- 3 files changed, 25 insertions(+), 26 deletions(-) create mode 100644 docs/changelog/114924.yaml diff --git a/docs/changelog/114924.yaml b/docs/changelog/114924.yaml new file mode 100644 index 0000000000000..536f446ef790d --- /dev/null +++ b/docs/changelog/114924.yaml @@ -0,0 +1,5 @@ +pr: 114924 +summary: Reducing error-level stack trace logging for normal events in `GeoIpDownloader` +area: Ingest Node +type: bug +issues: [] diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloader.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloader.java index dcaa8f6f2fb03..ae562d3c7359a 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloader.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloader.java @@ -11,7 +11,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; @@ -139,11 +138,18 @@ void updateDatabases() throws IOException { if (geoipIndex != null) { logger.trace("The {} index is not null", GeoIpDownloader.DATABASES_INDEX); if (clusterState.getRoutingTable().index(geoipIndex.getWriteIndex()).allPrimaryShardsActive() == false) { - throw new ElasticsearchException("not all primary shards of [" + DATABASES_INDEX + "] index are active"); + logger.debug( + "Not updating geoip database because not all primary shards of the [" + DATABASES_INDEX + "] index are active." + ); + return; } var blockException = clusterState.blocks().indexBlockedException(ClusterBlockLevel.WRITE, geoipIndex.getWriteIndex().getName()); if (blockException != null) { - throw blockException; + logger.debug( + "Not updating geoip database because there is a write block on the " + geoipIndex.getWriteIndex().getName() + " index", + blockException + ); + return; } } if (eagerDownloadSupplier.get() || atLeastOneGeoipProcessorSupplier.get()) { diff --git a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTests.java b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTests.java index e73f0a36cc632..5698328792787 100644 --- a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTests.java +++ b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTests.java @@ -9,7 +9,6 @@ package org.elasticsearch.ingest.geoip; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionResponse; @@ -25,11 +24,9 @@ import org.elasticsearch.action.index.TransportIndexAction; import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.ReferenceDocs; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.reindex.BulkByScrollResponse; @@ -583,37 +580,28 @@ void processDatabase(Map databaseInfo) { assertFalse(it.hasNext()); } - public void testUpdateDatabasesWriteBlock() { + public void testUpdateDatabasesWriteBlock() throws IOException { + /* + * Here we make sure that we bail out before making an httpClient request if there is write block on the .geoip_databases index + */ ClusterState state = createClusterState(new PersistentTasksCustomMetadata(1L, Map.of())); var geoIpIndex = state.getMetadata().getIndicesLookup().get(GeoIpDownloader.DATABASES_INDEX).getWriteIndex().getName(); state = ClusterState.builder(state) .blocks(new ClusterBlocks.Builder().addIndexBlock(geoIpIndex, IndexMetadata.INDEX_READ_ONLY_ALLOW_DELETE_BLOCK)) .build(); when(clusterService.state()).thenReturn(state); - var e = expectThrows(ClusterBlockException.class, () -> geoIpDownloader.updateDatabases()); - assertThat( - e.getMessage(), - equalTo( - "index [" - + geoIpIndex - + "] blocked by: [TOO_MANY_REQUESTS/12/disk usage exceeded flood-stage watermark, " - + "index has read-only-allow-delete block; for more information, see " - + ReferenceDocs.FLOOD_STAGE_WATERMARK - + "];" - ) - ); + geoIpDownloader.updateDatabases(); verifyNoInteractions(httpClient); } - public void testUpdateDatabasesIndexNotReady() { + public void testUpdateDatabasesIndexNotReady() throws IOException { + /* + * Here we make sure that we bail out before making an httpClient request if there are unallocated shards on the .geoip_databases + * index + */ ClusterState state = createClusterState(new PersistentTasksCustomMetadata(1L, Map.of()), true); - var geoIpIndex = state.getMetadata().getIndicesLookup().get(GeoIpDownloader.DATABASES_INDEX).getWriteIndex().getName(); - state = ClusterState.builder(state) - .blocks(new ClusterBlocks.Builder().addIndexBlock(geoIpIndex, IndexMetadata.INDEX_READ_ONLY_ALLOW_DELETE_BLOCK)) - .build(); when(clusterService.state()).thenReturn(state); - var e = expectThrows(ElasticsearchException.class, () -> geoIpDownloader.updateDatabases()); - assertThat(e.getMessage(), equalTo("not all primary shards of [.geoip_databases] index are active")); + geoIpDownloader.updateDatabases(); verifyNoInteractions(httpClient); } From e14418489685689d91c42c3477063af122a17b45 Mon Sep 17 00:00:00 2001 From: Tim Brooks Date: Wed, 16 Oct 2024 12:18:35 -0600 Subject: [PATCH 31/31] Standardize error code when bulk body is invalid (#114869) Currently the incremental and non-incremental bulk variations will return different error codes when the json body provided is invalid. This commit ensures both version return status code 400. Additionally, this renames the incremental rest tests to bulk tests and ensures that all tests work with both bulk api versions. We set these tests to randomize which version of the api we test each run. --- docs/changelog/114869.yaml | 5 +++ ...ementalBulkRestIT.java => BulkRestIT.java} | 42 ++++++++++++++---- .../rest/action/document/RestBulkAction.java | 43 ++++++++++++------- 3 files changed, 65 insertions(+), 25 deletions(-) create mode 100644 docs/changelog/114869.yaml rename qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/{IncrementalBulkRestIT.java => BulkRestIT.java} (81%) diff --git a/docs/changelog/114869.yaml b/docs/changelog/114869.yaml new file mode 100644 index 0000000000000..755418e7ce4d9 --- /dev/null +++ b/docs/changelog/114869.yaml @@ -0,0 +1,5 @@ +pr: 114869 +summary: Standardize error code when bulk body is invalid +area: CRUD +type: bug +issues: [] diff --git a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/IncrementalBulkRestIT.java b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/BulkRestIT.java similarity index 81% rename from qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/IncrementalBulkRestIT.java rename to qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/BulkRestIT.java index da05011696274..369d0824bdb28 100644 --- a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/IncrementalBulkRestIT.java +++ b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/BulkRestIT.java @@ -9,6 +9,8 @@ package org.elasticsearch.http; +import org.apache.http.entity.ByteArrayEntity; +import org.apache.http.entity.ContentType; import org.elasticsearch.action.bulk.IncrementalBulkService; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; @@ -19,24 +21,30 @@ import org.elasticsearch.xcontent.json.JsonXContent; import java.io.IOException; +import java.nio.charset.StandardCharsets; import java.util.List; import java.util.Map; +import static org.elasticsearch.rest.RestStatus.BAD_REQUEST; import static org.elasticsearch.rest.RestStatus.OK; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.Matchers.equalTo; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.SUITE, supportsDedicatedMasters = false, numDataNodes = 2, numClientNodes = 0) -public class IncrementalBulkRestIT extends HttpSmokeTestCase { +public class BulkRestIT extends HttpSmokeTestCase { @Override protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { return Settings.builder() .put(super.nodeSettings(nodeOrdinal, otherSettings)) - .put(IncrementalBulkService.INCREMENTAL_BULK.getKey(), true) + .put(IncrementalBulkService.INCREMENTAL_BULK.getKey(), seventyFivePercentOfTheTime()) .build(); } + private static boolean seventyFivePercentOfTheTime() { + return (randomBoolean() && randomBoolean()) == false; + } + public void testBulkUriMatchingDoesNotMatchBulkCapabilitiesApi() throws IOException { Request request = new Request("GET", "/_capabilities?method=GET&path=%2F_bulk&capabilities=failure_store_status&pretty"); Response response = getRestClient().performRequest(request); @@ -51,6 +59,26 @@ public void testBulkMissingBody() throws IOException { assertThat(responseException.getMessage(), containsString("request body is required")); } + public void testBulkInvalidIndexNameString() throws IOException { + Request request = new Request("POST", "/_bulk"); + + byte[] bytes1 = "{\"create\":{\"_index\":\"".getBytes(StandardCharsets.UTF_8); + byte[] bytes2 = new byte[] { (byte) 0xfe, (byte) 0xfe, (byte) 0xff, (byte) 0xff }; + byte[] bytes3 = "\",\"_id\":\"1\"}}\n{\"field\":1}\n\r\n".getBytes(StandardCharsets.UTF_8); + byte[] bulkBody = new byte[bytes1.length + bytes2.length + bytes3.length]; + System.arraycopy(bytes1, 0, bulkBody, 0, bytes1.length); + System.arraycopy(bytes2, 0, bulkBody, bytes1.length, bytes2.length); + System.arraycopy(bytes3, 0, bulkBody, bytes1.length + bytes2.length, bytes3.length); + + request.setEntity(new ByteArrayEntity(bulkBody, ContentType.APPLICATION_JSON)); + + ResponseException responseException = expectThrows(ResponseException.class, () -> getRestClient().performRequest(request)); + assertThat(responseException.getResponse().getStatusLine().getStatusCode(), equalTo(BAD_REQUEST.getStatus())); + assertThat(responseException.getMessage(), containsString("could not parse bulk request body")); + assertThat(responseException.getMessage(), containsString("json_parse_exception")); + assertThat(responseException.getMessage(), containsString("Invalid UTF-8")); + } + public void testBulkRequestBodyImproperlyTerminated() throws IOException { Request request = new Request(randomBoolean() ? "POST" : "PUT", "/_bulk"); // missing final line of the bulk body. cannot process @@ -61,10 +89,10 @@ public void testBulkRequestBodyImproperlyTerminated() throws IOException { ); ResponseException responseException = expectThrows(ResponseException.class, () -> getRestClient().performRequest(request)); assertEquals(400, responseException.getResponse().getStatusLine().getStatusCode()); - assertThat(responseException.getMessage(), containsString("could not parse bulk request body")); + assertThat(responseException.getMessage(), containsString("The bulk request must be terminated by a newline")); } - public void testIncrementalBulk() throws IOException { + public void testBulkRequest() throws IOException { Request createRequest = new Request("PUT", "/index_name"); createRequest.setJsonEntity(""" { @@ -81,7 +109,6 @@ public void testIncrementalBulk() throws IOException { Request firstBulkRequest = new Request("POST", "/index_name/_bulk"); - // index documents for the rollup job String bulkBody = "{\"index\":{\"_index\":\"index_name\",\"_id\":\"1\"}}\n" + "{\"field\":1}\n" + "{\"index\":{\"_index\":\"index_name\",\"_id\":\"2\"}}\n" @@ -113,7 +140,6 @@ public void testBulkWithIncrementalDisabled() throws IOException { Request firstBulkRequest = new Request("POST", "/index_name/_bulk"); - // index documents for the rollup job String bulkBody = "{\"index\":{\"_index\":\"index_name\",\"_id\":\"1\"}}\n" + "{\"field\":1}\n" + "{\"index\":{\"_index\":\"index_name\",\"_id\":\"2\"}}\n" @@ -137,7 +163,7 @@ public void testBulkWithIncrementalDisabled() throws IOException { } } - public void testIncrementalMalformed() throws IOException { + public void testMalformedActionLineBulk() throws IOException { Request createRequest = new Request("PUT", "/index_name"); createRequest.setJsonEntity(""" { @@ -154,7 +180,6 @@ public void testIncrementalMalformed() throws IOException { Request bulkRequest = new Request("POST", "/index_name/_bulk"); - // index documents for the rollup job final StringBuilder bulk = new StringBuilder(); bulk.append("{\"index\":{\"_index\":\"index_name\"}}\n"); bulk.append("{\"field\":1}\n"); @@ -170,7 +195,6 @@ public void testIncrementalMalformed() throws IOException { private static void sendLargeBulk() throws IOException { Request bulkRequest = new Request("POST", "/index_name/_bulk"); - // index documents for the rollup job final StringBuilder bulk = new StringBuilder(); bulk.append("{\"delete\":{\"_index\":\"index_name\",\"_id\":\"1\"}}\n"); int updates = 0; diff --git a/server/src/main/java/org/elasticsearch/rest/action/document/RestBulkAction.java b/server/src/main/java/org/elasticsearch/rest/action/document/RestBulkAction.java index 03694c7442d4d..1e80e6de60d65 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/document/RestBulkAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/document/RestBulkAction.java @@ -104,19 +104,23 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC boolean defaultRequireDataStream = request.paramAsBoolean(DocWriteRequest.REQUIRE_DATA_STREAM, false); bulkRequest.timeout(request.paramAsTime("timeout", BulkShardRequest.DEFAULT_TIMEOUT)); bulkRequest.setRefreshPolicy(request.param("refresh")); - bulkRequest.add( - request.requiredContent(), - defaultIndex, - defaultRouting, - defaultFetchSourceContext, - defaultPipeline, - defaultRequireAlias, - defaultRequireDataStream, - defaultListExecutedPipelines, - allowExplicitIndex, - request.getXContentType(), - request.getRestApiVersion() - ); + try { + bulkRequest.add( + request.requiredContent(), + defaultIndex, + defaultRouting, + defaultFetchSourceContext, + defaultPipeline, + defaultRequireAlias, + defaultRequireDataStream, + defaultListExecutedPipelines, + allowExplicitIndex, + request.getXContentType(), + request.getRestApiVersion() + ); + } catch (Exception e) { + return channel -> new RestToXContentListener<>(channel).onFailure(parseFailureException(e)); + } return channel -> client.bulk(bulkRequest, new RestRefCountedChunkedToXContentListener<>(channel)); } else { @@ -127,6 +131,15 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC } } + private static Exception parseFailureException(Exception e) { + if (e instanceof IllegalArgumentException) { + return e; + } else { + // TODO: Maybe improve in follow-up to be XContentParseException and include line number and column + return new ElasticsearchParseException("could not parse bulk request body", e); + } + } + static class ChunkHandler implements BaseRestHandler.RequestBodyChunkConsumer { private final boolean allowExplicitIndex; @@ -219,9 +232,7 @@ public void handleChunk(RestChannel channel, ReleasableBytesReference chunk, boo } catch (Exception e) { shortCircuit(); - new RestToXContentListener<>(channel).onFailure( - new ElasticsearchParseException("could not parse bulk request body", e) - ); + new RestToXContentListener<>(channel).onFailure(parseFailureException(e)); return; } }