From a577538da6a061a71c713805638e37d9123c1b0a Mon Sep 17 00:00:00 2001 From: David Kyle Date: Mon, 29 Apr 2024 13:11:13 +0100 Subject: [PATCH 01/19] [ML] Add start model import message (#107941) The new log message is "[model_id] starting model import" --- .../ml/packageloader/action/ModelImporter.java | 15 +-------------- .../action/TransportLoadTrainedModelPackage.java | 2 ++ .../TransportLoadTrainedModelPackageTests.java | 10 +++++++--- 3 files changed, 10 insertions(+), 17 deletions(-) diff --git a/x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/action/ModelImporter.java b/x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/action/ModelImporter.java index 69cb974a4514f..33d5d5982d2b0 100644 --- a/x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/action/ModelImporter.java +++ b/x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/action/ModelImporter.java @@ -10,7 +10,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchStatusException; -import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ActionType; @@ -19,8 +18,6 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.tasks.TaskCancelledException; -import org.elasticsearch.xpack.core.common.notifications.Level; -import org.elasticsearch.xpack.core.ml.action.AuditMlNotificationAction; import org.elasticsearch.xpack.core.ml.action.PutTrainedModelDefinitionPartAction; import org.elasticsearch.xpack.core.ml.action.PutTrainedModelVocabularyAction; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.ModelPackageConfig; @@ -59,7 +56,7 @@ public void doImport() throws URISyntaxException, IOException, ElasticsearchStat if (Strings.isNullOrEmpty(config.getVocabularyFile()) == false) { uploadVocabulary(); - writeDebugNotification(modelId, format("imported model vocabulary [%s]", config.getVocabularyFile())); + logger.debug(() -> format("[%s] imported model vocabulary [%s]", modelId, config.getVocabularyFile())); } URI uri = ModelLoaderUtils.resolvePackageLocation( @@ -152,14 +149,4 @@ private void ex client.execute(action, request).actionGet(); } - - private void writeDebugNotification(String modelId, String message) { - client.execute( - AuditMlNotificationAction.INSTANCE, - new AuditMlNotificationAction.Request(AuditMlNotificationAction.AuditType.INFERENCE, modelId, message, Level.INFO), - ActionListener.noop() - ); - - logger.debug(() -> format("[%s] %s", modelId, message)); - } } diff --git a/x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/action/TransportLoadTrainedModelPackage.java b/x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/action/TransportLoadTrainedModelPackage.java index f31f01b7c2aae..b0544806d52bd 100644 --- a/x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/action/TransportLoadTrainedModelPackage.java +++ b/x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/action/TransportLoadTrainedModelPackage.java @@ -141,6 +141,8 @@ static void importModel( try { final long relativeStartNanos = System.nanoTime(); + logAndWriteNotificationAtInfo(auditClient, modelId, "starting model import"); + modelImporter.doImport(); final long totalRuntimeNanos = System.nanoTime() - relativeStartNanos; diff --git a/x-pack/plugin/ml-package-loader/src/test/java/org/elasticsearch/xpack/ml/packageloader/action/TransportLoadTrainedModelPackageTests.java b/x-pack/plugin/ml-package-loader/src/test/java/org/elasticsearch/xpack/ml/packageloader/action/TransportLoadTrainedModelPackageTests.java index a4d7245acba6f..1e10ea48d03db 100644 --- a/x-pack/plugin/ml-package-loader/src/test/java/org/elasticsearch/xpack/ml/packageloader/action/TransportLoadTrainedModelPackageTests.java +++ b/x-pack/plugin/ml-package-loader/src/test/java/org/elasticsearch/xpack/ml/packageloader/action/TransportLoadTrainedModelPackageTests.java @@ -32,6 +32,7 @@ import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; public class TransportLoadTrainedModelPackageTests extends ESTestCase { @@ -53,7 +54,9 @@ public void testSendsFinishedUploadNotification() { ); var notificationArg = ArgumentCaptor.forClass(AuditMlNotificationAction.Request.class); - verify(client).execute(eq(AuditMlNotificationAction.INSTANCE), notificationArg.capture(), any()); + // 2 notifications- the start and finish messages + verify(client, times(2)).execute(eq(AuditMlNotificationAction.INSTANCE), notificationArg.capture(), any()); + // Only the last message is captured assertThat(notificationArg.getValue().getMessage(), CoreMatchers.containsString("finished model import after")); } @@ -145,8 +148,9 @@ private void assertNotificationAndOnFailure(Exception thrownException, Elasticse TransportLoadTrainedModelPackage.importModel(client, taskManager, createRequestWithWaiting(), uploader, listener, task); var notificationArg = ArgumentCaptor.forClass(AuditMlNotificationAction.Request.class); - verify(client).execute(eq(AuditMlNotificationAction.INSTANCE), notificationArg.capture(), any()); - assertThat(notificationArg.getValue().getMessage(), is(message)); + // 2 notifications- the starting message and the failure + verify(client, times(2)).execute(eq(AuditMlNotificationAction.INSTANCE), notificationArg.capture(), any()); + assertThat(notificationArg.getValue().getMessage(), is(message)); // the last message is captured var receivedException = (ElasticsearchStatusException) failureRef.get(); assertThat(receivedException.toString(), is(onFailureException.toString())); From 4b5c5e2dedaa902363dd1ff434b3c789fb29ed08 Mon Sep 17 00:00:00 2001 From: Bogdan Pintea Date: Mon, 29 Apr 2024 14:27:20 +0200 Subject: [PATCH 02/19] Update BUCKET docs in source (#108005) This applies a review proposed changes to the source, so that they're synchronized to the generated output. --- .../esql/functions/examples/bucket.asciidoc | 2 +- .../functions/kibana/definition/bucket.json | 3 ++- .../expression/function/grouping/Bucket.java | 23 +++++++++++++------ 3 files changed, 19 insertions(+), 9 deletions(-) diff --git a/docs/reference/esql/functions/examples/bucket.asciidoc b/docs/reference/esql/functions/examples/bucket.asciidoc index f66f737b7d4b5..e1bba0529d7db 100644 --- a/docs/reference/esql/functions/examples/bucket.asciidoc +++ b/docs/reference/esql/functions/examples/bucket.asciidoc @@ -108,7 +108,6 @@ include::{esql-specs}/bucket.csv-spec[tag=bucket_in_agg] |=== include::{esql-specs}/bucket.csv-spec[tag=bucket_in_agg-result] |=== - `BUCKET` may be used in both the aggregating and grouping part of the <> command provided that in the aggregating part the function is referenced by an alias defined in the @@ -121,3 +120,4 @@ include::{esql-specs}/bucket.csv-spec[tag=reuseGroupingFunctionWithExpression] |=== include::{esql-specs}/bucket.csv-spec[tag=reuseGroupingFunctionWithExpression-result] |=== + diff --git a/docs/reference/esql/functions/kibana/definition/bucket.json b/docs/reference/esql/functions/kibana/definition/bucket.json index 986c0e8f91d33..7141ca4c27443 100644 --- a/docs/reference/esql/functions/kibana/definition/bucket.json +++ b/docs/reference/esql/functions/kibana/definition/bucket.json @@ -943,6 +943,7 @@ "FROM employees\n| STATS COUNT(*) by bs = BUCKET(salary, 20, 25324, 74999)\n| SORT bs", "FROM employees\n| WHERE hire_date >= \"1985-01-01T00:00:00Z\" AND hire_date < \"1986-01-01T00:00:00Z\"\n| STATS c = COUNT(1) BY b = BUCKET(salary, 5000.)\n| SORT b", "FROM sample_data \n| WHERE @timestamp >= NOW() - 1 day and @timestamp < NOW()\n| STATS COUNT(*) BY bucket = BUCKET(@timestamp, 25, NOW() - 1 day, NOW())", - "FROM employees\n| WHERE hire_date >= \"1985-01-01T00:00:00Z\" AND hire_date < \"1986-01-01T00:00:00Z\"\n| STATS AVG(salary) BY bucket = BUCKET(hire_date, 20, \"1985-01-01T00:00:00Z\", \"1986-01-01T00:00:00Z\")\n| SORT bucket" + "FROM employees\n| WHERE hire_date >= \"1985-01-01T00:00:00Z\" AND hire_date < \"1986-01-01T00:00:00Z\"\n| STATS AVG(salary) BY bucket = BUCKET(hire_date, 20, \"1985-01-01T00:00:00Z\", \"1986-01-01T00:00:00Z\")\n| SORT bucket", + "FROM employees\n| STATS s1 = b1 + 1, s2 = BUCKET(salary / 1000 + 999, 50.) + 2 BY b1 = BUCKET(salary / 100 + 99, 50.), b2 = BUCKET(salary / 1000 + 999, 50.)\n| SORT b1, b2\n| KEEP s1, b1, s2, b2" ] } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/grouping/Bucket.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/grouping/Bucket.java index 218d469d626f9..32073d830841f 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/grouping/Bucket.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/grouping/Bucket.java @@ -93,7 +93,7 @@ public class Bucket extends GroupingFunction implements Validatable, TwoOptional @Example( description = """ `BUCKET` can work in two modes: one in which the size of the bucket is computed - based on a buckets count recommendation (four parameters) and a range and + based on a buckets count recommendation (four parameters) and a range, and another in which the bucket size is provided directly (two parameters). Using a target number of buckets, a start of a range, and an end of a range, @@ -127,8 +127,8 @@ another in which the bucket size is provided directly (two parameters). @Example(description = """ If the desired bucket size is known in advance, simply provide it as the second argument, leaving the range out:""", file = "bucket", tag = "docsBucketWeeklyHistogramWithSpan", explanation = """ - NOTE: When providing the bucket size as the second parameter, its type must be - of a time duration or date period type."""), + NOTE: When providing the bucket size as the second parameter, it must be a time + duration or date period."""), @Example( description = "`BUCKET` can also operate on numeric fields. For example, to create a salary histogram:", file = "bucket", @@ -138,10 +138,10 @@ another in which the bucket size is provided directly (two parameters). You have to find the `min` and `max` separately. {esql} doesn't yet have an easy way to do that automatically.""" ), @Example(description = """ - If the desired bucket size is known in advance, simply provide it as the second - argument, leaving the range out:""", file = "bucket", tag = "docsBucketNumericWithSpan", explanation = """ - NOTE: When providing the bucket size as the second parameter, its type must be - of a floating type."""), + The range can be omitted if the desired bucket size is known in advance. Simply + provide it as the second argument:""", file = "bucket", tag = "docsBucketNumericWithSpan", explanation = """ + NOTE: When providing the bucket size as the second parameter, it must be + of a floating point type."""), @Example( description = "Create hourly buckets for the last 24 hours, and calculate the number of events per hour:", file = "bucket", @@ -151,6 +151,15 @@ another in which the bucket size is provided directly (two parameters). description = "Create monthly buckets for the year 1985, and calculate the average salary by hiring month", file = "bucket", tag = "bucket_in_agg" + ), + @Example( + description = """ + `BUCKET` may be used in both the aggregating and grouping part of the + <> command provided that in the aggregating + part the function is referenced by an alias defined in the + grouping part, or that it is invoked with the exact same expression:""", + file = "bucket", + tag = "reuseGroupingFunctionWithExpression" ) } ) public Bucket( From 89a6c7e6285aa275ca6f38174693c647d8d88075 Mon Sep 17 00:00:00 2001 From: Kostas Krikellas <131142368+kkrik-es@users.noreply.github.com> Date: Mon, 29 Apr 2024 15:37:29 +0300 Subject: [PATCH 03/19] [TEST] ignore transient error in overlapping downsample (#108001) * Add retries in concurrent downsampling action * [TEST] ignore transient error in overlapping downsample --- .../downsample/DownsampleActionSingleNodeTests.java | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/x-pack/plugin/downsample/src/test/java/org/elasticsearch/xpack/downsample/DownsampleActionSingleNodeTests.java b/x-pack/plugin/downsample/src/test/java/org/elasticsearch/xpack/downsample/DownsampleActionSingleNodeTests.java index d68f6e8d11f81..80bb0368a1afc 100644 --- a/x-pack/plugin/downsample/src/test/java/org/elasticsearch/xpack/downsample/DownsampleActionSingleNodeTests.java +++ b/x-pack/plugin/downsample/src/test/java/org/elasticsearch/xpack/downsample/DownsampleActionSingleNodeTests.java @@ -555,8 +555,13 @@ public void onFailure(Exception e) { } }); - // Downsample with retries, in case the downsampled index is not ready. - assertBusy(() -> downsample(sourceIndex, downsampleIndex, config), 120, TimeUnit.SECONDS); + assertBusy(() -> { + try { + client().execute(DownsampleAction.INSTANCE, new DownsampleAction.Request(sourceIndex, downsampleIndex, TIMEOUT, config)); + } catch (ElasticsearchException e) { + fail("transient failure due to overlapping downsample operations"); + } + }); // We must wait until the in-progress downsample ends, otherwise data will not be cleaned up assertBusy(() -> assertTrue("In progress downsample did not complete", downsampleListener.success), 60, TimeUnit.SECONDS); From b0283eb6cf14cbc5227dd5158c462ada598802af Mon Sep 17 00:00:00 2001 From: Mary Gouseti Date: Mon, 29 Apr 2024 15:50:59 +0300 Subject: [PATCH 04/19] Conditionally display effective retention (#107964) --- .../get/GetComponentTemplateAction.java | 9 +++++++- .../get/GetComposableIndexTemplateAction.java | 13 +++++++----- .../post/SimulateIndexTemplateResponse.java | 20 +++++++----------- .../datastreams/GetDataStreamAction.java | 8 ++++++- .../ExplainDataStreamLifecycleAction.java | 8 ++++++- .../GetDataStreamLifecycleAction.java | 7 ++++++- .../cluster/metadata/DataStreamLifecycle.java | 13 ++++++++++++ .../metadata/DataStreamLifecycleTests.java | 21 +++++++++++++++++++ 8 files changed, 77 insertions(+), 22 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComponentTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComponentTemplateAction.java index 3bf9c3715b29a..8ef1df3d29a58 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComponentTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComponentTemplateAction.java @@ -16,6 +16,7 @@ import org.elasticsearch.action.support.master.MasterNodeReadRequest; import org.elasticsearch.cluster.metadata.ComponentTemplate; import org.elasticsearch.cluster.metadata.DataStreamGlobalRetention; +import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Nullable; @@ -196,7 +197,13 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.startObject(); builder.field(NAME.getPreferredName(), componentTemplate.getKey()); builder.field(COMPONENT_TEMPLATE.getPreferredName()); - componentTemplate.getValue().toXContent(builder, params, rolloverConfiguration, globalRetention); + componentTemplate.getValue() + .toXContent( + builder, + DataStreamLifecycle.maybeAddEffectiveRetentionParams(params), + rolloverConfiguration, + globalRetention + ); builder.endObject(); } builder.endArray(); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComposableIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComposableIndexTemplateAction.java index f2fcbeff73c37..07ebfe123c98f 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComposableIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComposableIndexTemplateAction.java @@ -16,6 +16,7 @@ import org.elasticsearch.action.support.master.MasterNodeReadRequest; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.metadata.DataStreamGlobalRetention; +import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Nullable; @@ -157,10 +158,6 @@ public Map indexTemplates() { return indexTemplates; } - public RolloverConfiguration getRolloverConfiguration() { - return rolloverConfiguration; - } - public DataStreamGlobalRetention getGlobalRetention() { return globalRetention; } @@ -199,7 +196,13 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.startObject(); builder.field(NAME.getPreferredName(), indexTemplate.getKey()); builder.field(INDEX_TEMPLATE.getPreferredName()); - indexTemplate.getValue().toXContent(builder, params, rolloverConfiguration, globalRetention); + indexTemplate.getValue() + .toXContent( + builder, + DataStreamLifecycle.maybeAddEffectiveRetentionParams(params), + rolloverConfiguration, + globalRetention + ); builder.endObject(); } builder.endArray(); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java index 52d40626f97ed..6985e86fb287a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java @@ -12,6 +12,7 @@ import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.admin.indices.rollover.RolloverConfiguration; import org.elasticsearch.cluster.metadata.DataStreamGlobalRetention; +import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.cluster.metadata.Template; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -69,22 +70,10 @@ public SimulateIndexTemplateResponse( this.globalRetention = globalRetention; } - public Template getResolvedTemplate() { - return resolvedTemplate; - } - - public Map> getOverlappingTemplates() { - return overlappingTemplates; - } - public RolloverConfiguration getRolloverConfiguration() { return rolloverConfiguration; } - public DataStreamGlobalRetention getGlobalRetention() { - return globalRetention; - } - public SimulateIndexTemplateResponse(StreamInput in) throws IOException { super(in); resolvedTemplate = in.readOptionalWriteable(Template::new); @@ -132,7 +121,12 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.startObject(); if (this.resolvedTemplate != null) { builder.field(TEMPLATE.getPreferredName()); - this.resolvedTemplate.toXContent(builder, params, rolloverConfiguration, globalRetention); + this.resolvedTemplate.toXContent( + builder, + DataStreamLifecycle.maybeAddEffectiveRetentionParams(params), + rolloverConfiguration, + globalRetention + ); } if (this.overlappingTemplates != null) { builder.startArray(OVERLAPPING.getPreferredName()); diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java index f2a581472303b..01ce7cbd3346b 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java @@ -20,6 +20,7 @@ import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.DataStreamAutoShardingEvent; import org.elasticsearch.cluster.metadata.DataStreamGlobalRetention; +import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -546,7 +547,12 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.startObject(); builder.startArray(DATA_STREAMS_FIELD.getPreferredName()); for (DataStreamInfo dataStream : dataStreams) { - dataStream.toXContent(builder, params, rolloverConfiguration, globalRetention); + dataStream.toXContent( + builder, + DataStreamLifecycle.maybeAddEffectiveRetentionParams(params), + rolloverConfiguration, + globalRetention + ); } builder.endArray(); builder.endObject(); diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/ExplainDataStreamLifecycleAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/ExplainDataStreamLifecycleAction.java index 17d33ae9167fd..36fc66c67c842 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/ExplainDataStreamLifecycleAction.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/ExplainDataStreamLifecycleAction.java @@ -17,6 +17,7 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.MasterNodeReadRequest; import org.elasticsearch.cluster.metadata.DataStreamGlobalRetention; +import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -212,7 +213,12 @@ public Iterator toXContentChunked(ToXContent.Params outerP return builder; }), Iterators.map(indices.iterator(), explainIndexDataLifecycle -> (builder, params) -> { builder.field(explainIndexDataLifecycle.getIndex()); - explainIndexDataLifecycle.toXContent(builder, outerParams, rolloverConfiguration, globalRetention); + explainIndexDataLifecycle.toXContent( + builder, + DataStreamLifecycle.maybeAddEffectiveRetentionParams(outerParams), + rolloverConfiguration, + globalRetention + ); return builder; }), Iterators.single((builder, params) -> { builder.endObject(); diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/GetDataStreamLifecycleAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/GetDataStreamLifecycleAction.java index 1c9dbb0575a1d..c7384e7003963 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/GetDataStreamLifecycleAction.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/GetDataStreamLifecycleAction.java @@ -174,7 +174,12 @@ public XContentBuilder toXContent( builder.field(NAME_FIELD.getPreferredName(), dataStreamName); if (lifecycle != null) { builder.field(LIFECYCLE_FIELD.getPreferredName()); - lifecycle.toXContent(builder, params, rolloverConfiguration, globalRetention); + lifecycle.toXContent( + builder, + org.elasticsearch.cluster.metadata.DataStreamLifecycle.maybeAddEffectiveRetentionParams(params), + rolloverConfiguration, + globalRetention + ); } builder.endObject(); return builder; diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamLifecycle.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamLifecycle.java index 9e23ffed6e8c5..9c89945046126 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamLifecycle.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamLifecycle.java @@ -24,11 +24,13 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; +import org.elasticsearch.rest.RestRequest; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.xcontent.AbstractObjectParser; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ObjectParser; import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.ToXContentFragment; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; @@ -355,6 +357,17 @@ public static DataStreamLifecycle fromXContent(XContentParser parser) throws IOE return PARSER.parse(parser, null); } + /** + * Adds a retention param to signal that this serialisation should include the effective retention metadata + */ + public static ToXContent.Params maybeAddEffectiveRetentionParams(ToXContent.Params params) { + boolean shouldAddEffectiveRetention = Objects.equals(params.param(RestRequest.PATH_RESTRICTED), "serverless"); + return new DelegatingMapParams( + Map.of(INCLUDE_EFFECTIVE_RETENTION_PARAM_NAME, Boolean.toString(shouldAddEffectiveRetention)), + params + ); + } + public static Builder newBuilder(DataStreamLifecycle lifecycle) { return new Builder().dataRetention(lifecycle.getDataRetention()) .downsampling(lifecycle.getDownsampling()) diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamLifecycleTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamLifecycleTests.java index f6f915b0e1a3d..a2b18c3328fd5 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamLifecycleTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamLifecycleTests.java @@ -31,6 +31,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; +import java.util.Map; import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.stream.Stream; @@ -38,6 +39,7 @@ import static org.elasticsearch.cluster.metadata.DataStreamLifecycle.RetentionSource.DATA_STREAM_CONFIGURATION; import static org.elasticsearch.cluster.metadata.DataStreamLifecycle.RetentionSource.DEFAULT_GLOBAL_RETENTION; import static org.elasticsearch.cluster.metadata.DataStreamLifecycle.RetentionSource.MAX_GLOBAL_RETENTION; +import static org.elasticsearch.rest.RestRequest.PATH_RESTRICTED; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; @@ -343,6 +345,25 @@ public void testEffectiveRetention() { } } + public void testEffectiveRetentionParams() { + { + ToXContent.Params params = DataStreamLifecycle.maybeAddEffectiveRetentionParams(new ToXContent.MapParams(Map.of())); + assertThat(params.paramAsBoolean(DataStreamLifecycle.INCLUDE_EFFECTIVE_RETENTION_PARAM_NAME, false), equalTo(false)); + } + { + ToXContent.Params params = DataStreamLifecycle.maybeAddEffectiveRetentionParams( + new ToXContent.MapParams(Map.of(PATH_RESTRICTED, "not-serverless")) + ); + assertThat(params.paramAsBoolean(DataStreamLifecycle.INCLUDE_EFFECTIVE_RETENTION_PARAM_NAME, false), equalTo(false)); + } + { + ToXContent.Params params = DataStreamLifecycle.maybeAddEffectiveRetentionParams( + new ToXContent.MapParams(Map.of(PATH_RESTRICTED, "serverless")) + ); + assertThat(params.paramAsBoolean(DataStreamLifecycle.INCLUDE_EFFECTIVE_RETENTION_PARAM_NAME, false), equalTo(true)); + } + } + @Nullable public static DataStreamLifecycle randomLifecycle() { return DataStreamLifecycle.newBuilder() From e4f0b193c6e56373be027ede79b4eb63c0b0336b Mon Sep 17 00:00:00 2001 From: Ignacio Vera Date: Mon, 29 Apr 2024 16:35:46 +0200 Subject: [PATCH 05/19] Optimise BinaryRangeAggregator for single value fields (#108016) this commit optimise range aggregations over IP for single value fields. --- docs/changelog/108016.yaml | 5 ++ .../bucket/range/BinaryRangeAggregator.java | 73 +++++++++++++------ 2 files changed, 56 insertions(+), 22 deletions(-) create mode 100644 docs/changelog/108016.yaml diff --git a/docs/changelog/108016.yaml b/docs/changelog/108016.yaml new file mode 100644 index 0000000000000..0aa3f86a6f859 --- /dev/null +++ b/docs/changelog/108016.yaml @@ -0,0 +1,5 @@ +pr: 108016 +summary: Optimise `BinaryRangeAggregator` for single value fields +area: Aggregations +type: enhancement +issues: [] diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/BinaryRangeAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/BinaryRangeAggregator.java index 51901b422c861..1a793ecd80b11 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/BinaryRangeAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/BinaryRangeAggregator.java @@ -7,9 +7,13 @@ */ package org.elasticsearch.search.aggregations.bucket.range; +import org.apache.lucene.index.BinaryDocValues; +import org.apache.lucene.index.DocValues; +import org.apache.lucene.index.SortedDocValues; import org.apache.lucene.index.SortedSetDocValues; import org.apache.lucene.search.ScoreMode; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.index.fielddata.FieldData; import org.elasticsearch.index.fielddata.SortedBinaryDocValues; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.AggregationExecutionContext; @@ -130,9 +134,9 @@ protected void doCollect(LeafBucketCollector sub, int doc, long bucket) throws I abstract static class SortedSetRangeLeafCollector extends LeafBucketCollectorBase { - final long[] froms, tos, maxTos; - final SortedSetDocValues values; - final LeafBucketCollector sub; + private final long[] froms, tos, maxTos; + private final DocCollector collector; + private final LeafBucketCollector sub; SortedSetRangeLeafCollector(SortedSetDocValues values, Range[] ranges, LeafBucketCollector sub) throws IOException { super(sub, values); @@ -141,7 +145,23 @@ abstract static class SortedSetRangeLeafCollector extends LeafBucketCollectorBas throw new IllegalArgumentException("Ranges must be sorted"); } } - this.values = values; + final SortedDocValues singleton = DocValues.unwrapSingleton(values); + if (singleton != null) { + this.collector = (doc, bucket) -> { + if (singleton.advanceExact(doc)) { + collect(doc, singleton.ordValue(), bucket, 0); + } + }; + } else { + this.collector = (doc, bucket) -> { + if (values.advanceExact(doc)) { + int lo = 0; + for (long ord = values.nextOrd(); ord != SortedSetDocValues.NO_MORE_ORDS; ord = values.nextOrd()) { + lo = collect(doc, ord, bucket, lo); + } + } + }; + } this.sub = sub; froms = new long[ranges.length]; tos = new long[ranges.length]; // inclusive @@ -174,12 +194,7 @@ abstract static class SortedSetRangeLeafCollector extends LeafBucketCollectorBas @Override public void collect(int doc, long bucket) throws IOException { - if (values.advanceExact(doc)) { - int lo = 0; - for (long ord = values.nextOrd(); ord != SortedSetDocValues.NO_MORE_ORDS; ord = values.nextOrd()) { - lo = collect(doc, ord, bucket, lo); - } - } + collector.collect(doc, bucket); } private int collect(int doc, long ord, long bucket, int lowBound) throws IOException { @@ -236,10 +251,10 @@ private int collect(int doc, long ord, long bucket, int lowBound) throws IOExcep abstract static class SortedBinaryRangeLeafCollector extends LeafBucketCollectorBase { - final Range[] ranges; - final BytesRef[] maxTos; - final SortedBinaryDocValues values; - final LeafBucketCollector sub; + private final Range[] ranges; + private final BytesRef[] maxTos; + private final DocCollector collector; + private final LeafBucketCollector sub; SortedBinaryRangeLeafCollector(SortedBinaryDocValues values, Range[] ranges, LeafBucketCollector sub) { super(sub, values); @@ -248,7 +263,22 @@ abstract static class SortedBinaryRangeLeafCollector extends LeafBucketCollector throw new IllegalArgumentException("Ranges must be sorted"); } } - this.values = values; + final BinaryDocValues singleton = FieldData.unwrapSingleton(values); + if (singleton != null) { + this.collector = (doc, bucket) -> { + if (singleton.advanceExact(doc)) { + collect(doc, singleton.binaryValue(), bucket, 0); + } + }; + } else { + this.collector = (doc, bucket) -> { + if (values.advanceExact(doc)) { + for (int i = 0, lo = 0; i < values.docValueCount(); ++i) { + lo = collect(doc, values.nextValue(), bucket, lo); + } + } + }; + } this.sub = sub; this.ranges = ranges; maxTos = new BytesRef[ranges.length]; @@ -266,13 +296,7 @@ abstract static class SortedBinaryRangeLeafCollector extends LeafBucketCollector @Override public void collect(int doc, long bucket) throws IOException { - if (values.advanceExact(doc)) { - final int valuesCount = values.docValueCount(); - for (int i = 0, lo = 0; i < valuesCount; ++i) { - final BytesRef value = values.nextValue(); - lo = collect(doc, value, bucket, lo); - } - } + collector.collect(doc, bucket); } private int collect(int doc, BytesRef value, long bucket, int lowBound) throws IOException { @@ -327,6 +351,11 @@ private int collect(int doc, BytesRef value, long bucket, int lowBound) throws I protected abstract void doCollect(LeafBucketCollector sub, int doc, long bucket) throws IOException; } + @FunctionalInterface + private interface DocCollector { + void collect(int doc, long bucket) throws IOException; + } + @Override public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException { return buildAggregationsForFixedBucketCount( From ee262954eedd3db27f75f5bb450b3422f965e1f8 Mon Sep 17 00:00:00 2001 From: eyalkoren <41850454+eyalkoren@users.noreply.github.com> Date: Mon, 29 Apr 2024 17:41:34 +0300 Subject: [PATCH 06/19] Adding aggregations support for the `_ignored` field (#101373) Enables aggregations on the _ignored metadata field replacing the stored field with doc values. --- docs/changelog/101373.yaml | 6 + .../mapping/fields/ignored-field.asciidoc | 17 + docs/reference/search/profile.asciidoc | 4 +- .../aggregations/ignored_metadata_field.yml | 302 ++++++++++++++++++ .../datastreams/LogsDataStreamIT.java | 4 +- .../rest-api-spec/test/30_inner_hits.yml | 4 +- .../ICUCollationKeywordFieldMapperTests.java | 4 +- .../IgnoredMetaFieldRollingUpgradeIT.java | 210 ++++++++++++ .../rest-api-spec/test/search/370_profile.yml | 6 +- .../index/mapper/IgnoredMetadataFieldIT.java | 164 ++++++++++ .../elasticsearch/index/IndexVersions.java | 1 + .../index/get/ShardGetService.java | 42 +++ .../index/mapper/IgnoredFieldMapper.java | 72 ++++- .../mapper/CompletionFieldMapperTests.java | 4 +- .../index/mapper/IgnoredFieldMapperTests.java | 43 ++- .../index/mapper/IgnoredFieldTypeTests.java | 8 + .../index/mapper/KeywordFieldMapperTests.java | 4 +- .../index/mapper/MapperScriptTestCase.java | 2 +- .../mapper/WildcardFieldMapperTests.java | 4 +- 19 files changed, 867 insertions(+), 34 deletions(-) create mode 100644 docs/changelog/101373.yaml create mode 100644 modules/aggregations/src/yamlRestTest/resources/rest-api-spec/test/aggregations/ignored_metadata_field.yml create mode 100644 qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/IgnoredMetaFieldRollingUpgradeIT.java create mode 100644 server/src/internalClusterTest/java/org/elasticsearch/index/mapper/IgnoredMetadataFieldIT.java diff --git a/docs/changelog/101373.yaml b/docs/changelog/101373.yaml new file mode 100644 index 0000000000000..53b5680301c79 --- /dev/null +++ b/docs/changelog/101373.yaml @@ -0,0 +1,6 @@ +pr: 101373 +summary: Adding aggregations support for the `_ignored` field +area: Search +type: feature +issues: + - 59946 diff --git a/docs/reference/mapping/fields/ignored-field.asciidoc b/docs/reference/mapping/fields/ignored-field.asciidoc index 5fd6c478438ab..48f8626c5ab0b 100644 --- a/docs/reference/mapping/fields/ignored-field.asciidoc +++ b/docs/reference/mapping/fields/ignored-field.asciidoc @@ -43,3 +43,20 @@ GET _search } } -------------------------------------------------- + +Since 8.15.0, the `_ignored` field supports aggregations as well. +For example, the below query finds all fields that got ignored: + +[source,console] +-------------------------------------------------- +GET _search +{ + "aggs": { + "ignored_fields": { + "terms": { + "field": "_ignored" + } + } + } +} +-------------------------------------------------- diff --git a/docs/reference/search/profile.asciidoc b/docs/reference/search/profile.asciidoc index 48c65ed0abc7b..3fed14231808c 100644 --- a/docs/reference/search/profile.asciidoc +++ b/docs/reference/search/profile.asciidoc @@ -194,7 +194,7 @@ The API returns the following result: "load_source_count": 5 }, "debug": { - "stored_fields": ["_id", "_ignored", "_routing", "_source"] + "stored_fields": ["_id", "_routing", "_source"] }, "children": [ { @@ -1051,7 +1051,7 @@ And here is the fetch profile: "load_source_count": 5 }, "debug": { - "stored_fields": ["_id", "_ignored", "_routing", "_source"] + "stored_fields": ["_id", "_routing", "_source"] }, "children": [ { diff --git a/modules/aggregations/src/yamlRestTest/resources/rest-api-spec/test/aggregations/ignored_metadata_field.yml b/modules/aggregations/src/yamlRestTest/resources/rest-api-spec/test/aggregations/ignored_metadata_field.yml new file mode 100644 index 0000000000000..fd15d24a5f3ca --- /dev/null +++ b/modules/aggregations/src/yamlRestTest/resources/rest-api-spec/test/aggregations/ignored_metadata_field.yml @@ -0,0 +1,302 @@ +setup: + - do: + indices.create: + index: test + body: + mappings: + properties: + city: + type: keyword + ignore_above: 10 + email: + type: keyword + ignore_above: 20 + date_of_birth: + type: date + format: "dd-MM-yyyy" + ignore_malformed: true + newsletter: + type: boolean + ignore_malformed: true + ip_address: + type: ip + ignore_malformed: true + products: + type: keyword + ignore_above: 12 + total_price: + type: double + ignore_malformed: true + location: + type: geo_point + ignore_malformed: true + order_datetime: + type: date + format: "yyyy-MM-dd HH:mm:ss" + ignore_malformed: true + + - do: + bulk: + index: test + refresh: true + body: + - { "index": { "_id": "001" } } + - { "city": "Milano", email: "alice@gmail.com", date_of_birth: "12-03-1990", newsletter: true, ip_address: "130.34.45.202", products: ["it-002-4567", "it-001-6679"], total_price: "57.99", location: [45.46, 9.16], order_datetime: "2021-05-01 20:01:37" } + - { "index": { "_id": "002" } } + - { "city": "Roma", email: "bob@gmail.com", date_of_birth: "15-05-1991", newsletter: false, ip_address: "2001:0db8:85a3:0000:0000:8a2e:0370:7334", products: [ "it-002-112467", "it-002-5579" ], total_price: "10.99", location: [ -44.78, 19.20 ], order_datetime: "2021-05-01 20:01:37" } + - { "index": { "_id": "003" } } + - { "city": "Venezia", email: "alice@gmail.com", date_of_birth: "01-09-1994", newsletter: false, ip_address: "fe80::1", products: [ "it-002", "it-003-17171717" ], total_price: "-12.99", location: [ 182.22, "20.12" ], order_datetime: "2021-05-02" } + - { "index": { "_id": "004" } } + - { "city": "Cortina d'Ampezzo", email: "a-very-long-email-address-that-should-be-ignored@gmail.com", date_of_birth: "05-06-1989", newsletter: t, ip_address: "::1", products: [ "it101020203030", "it" ], total_price: "57", location: [ 0, 9.16 ], order_datetime: "2021-05-01-20:01:37" } + - { "index": { "_id": "005" } } + - { "city": "Cortina d'Ampezzo", email: "dave@gmail.com", date_of_birth: "12-03-1990 12:30:45", newsletter: t, ip_address: "130.999.36.201", products: [ "it-002-2213", "it-001-7709" ], total_price: "twentytree/12", location: [ "45.33, 8.20" ], order_datetime: "20210501 20:01:37" } + - { "index": { "_id": "006" } } + - { "city": "Milano", email: "eric@gmail.com", date_of_birth: "19-12-90", newsletter: f, ip_address: "130.34.45", products: [ "it-002-555", "it-001-5589990000" ], total_price: "", location: [ "45.99", "9.16" ], order_datetime: "2021-05-01 20:01:37.123" } + - { "index": { "_id": "007" } } + - { "city": "Venezia", email: "luke-skywalker@gmail.com", date_of_birth: "20/03/1992", newsletter: f, ip_address: "130..45.202", products: [ "it-002-1234", "it-001-1213" ], total_price: "57.99.12", location: [ 45, 20 ], order_datetime: "2021-05-03 19:38:22" } + - { "index": { "_id": "008" } } + - { "city": "Firenze", email: "bob@gmail.com", date_of_birth: "02311988", newsletter: "", ip_address: ":::1", products: ["", ""], total_price: "0.0", location: [ 46.22, 11.22 ], order_datetime: "2021-05-03 20:01" } + - { "index": { "_id": "009" } } + - { "city": "Firenze", email: "tom@gmail.com", date_of_birth: "16-11-1990", newsletter: "not_sure", ip_address: "2001:0db8::1234:5678::", products: "it-002-4567", total_price: "0,99", location: [ 18.18, 19.19 ], order_datetime: "2021-05-03 20-01-55" } + - { "index": { "_id": "010" } } + - { "city": "Cortina d'Ampezzo", email: "alice@gmail.com", date_of_birth: "18-12-1992", newsletter: "false", ip_address: ":::1", products: "it-002-1890994567", total_price: "14,27", location: [ 45.46-9.16 ], order_datetime: "2021-05-01 20:05:37" } + - { "index": { "_id": "011" } } + - { "city": "Roma", email: "paul@gmail.com", date_of_birth: "17.15.1990", newsletter: "true", ip_address: "", products: [ "it-002-1019", "it-001-5578", "it-009-9901256" ], total_price: "49.99", location: 45.22, order_datetime: "2021-05-01T20:02:00" } + +--- +"terms aggregation on _ignored metadata field": + - skip: + version: " - 8.14.99" + reason: "_ignored metadata field aggregation support added in 8.15" + - do: + search: + body: + size: 0 + aggs: + ignored_terms: + terms: + field: _ignored + + - match: { hits.total.value: 11 } + - length: { aggregations.ignored_terms.buckets: 9 } + - match: { aggregations.ignored_terms.buckets.0.key: "ip_address" } + - match: { aggregations.ignored_terms.buckets.0.doc_count: 7 } + - match: { aggregations.ignored_terms.buckets.1.key: "order_datetime" } + - match: { aggregations.ignored_terms.buckets.1.doc_count: 7 } + - match: { aggregations.ignored_terms.buckets.2.key: "products" } + - match: { aggregations.ignored_terms.buckets.2.doc_count: 6 } + - match: { aggregations.ignored_terms.buckets.3.key: "date_of_birth" } + - match: { aggregations.ignored_terms.buckets.3.doc_count: 5 } + - match: { aggregations.ignored_terms.buckets.4.key: "newsletter" } + - match: { aggregations.ignored_terms.buckets.4.doc_count: 5 } + - match: { aggregations.ignored_terms.buckets.5.key: "total_price" } + - match: { aggregations.ignored_terms.buckets.5.doc_count: 4 } + - match: { aggregations.ignored_terms.buckets.6.key: "city" } + - match: { aggregations.ignored_terms.buckets.6.doc_count: 3 } + - match: { aggregations.ignored_terms.buckets.7.key: "location" } + - match: { aggregations.ignored_terms.buckets.7.doc_count: 3 } + - match: { aggregations.ignored_terms.buckets.8.key: "email" } + - match: { aggregations.ignored_terms.buckets.8.doc_count: 2 } + +--- +"terms aggregation on _ignored metadata field with top hits": + - skip: + version: " - 8.14.99" + reason: "_ignored metadata field aggregation support added in 8.15" + - do: + search: + body: + size: 0 + aggs: + ignored_terms: + terms: + field: _ignored + size: 3 + aggs: + top_by_datetime: + top_hits: + sort: + - order_datetime: { order: desc } + size: 1 + + - match: { hits.total.value: 11 } + - length: { aggregations.ignored_terms.buckets: 3 } + + - match: { aggregations.ignored_terms.buckets.0.key: "ip_address" } + - match: { aggregations.ignored_terms.buckets.0.doc_count: 7 } + - match: { aggregations.ignored_terms.buckets.0.top_by_datetime.hits.hits.0._ignored: ["date_of_birth", "email", "ip_address", "newsletter", "total_price"]} + + - match: { aggregations.ignored_terms.buckets.1.key: "order_datetime" } + - match: { aggregations.ignored_terms.buckets.1.doc_count: 7 } + - match: { aggregations.ignored_terms.buckets.1.top_by_datetime.hits.hits.0._ignored: ["order_datetime", "products"]} + + - match: { aggregations.ignored_terms.buckets.2.key: "products" } + - match: { aggregations.ignored_terms.buckets.2.doc_count: 6 } + - match: { aggregations.ignored_terms.buckets.2.top_by_datetime.hits.hits.0._ignored: ["city", "ip_address", "location", "products", "total_price"]} + +--- +"date histogram aggregation with terms on _ignored metadata field": + - skip: + version: " - 8.14.99" + reason: "_ignored metadata field aggregation support added in 8.15" + - do: + search: + body: + size: 0 + aggs: + order_datetime_histo: + date_histogram: + field: order_datetime + calendar_interval: day + aggs: + ignored_terms: + terms: + field: _ignored + size: 2 + + - match: { hits.total.value: 11 } + - length: { aggregations.order_datetime_histo.buckets: 3 } + + - match: { aggregations.order_datetime_histo.buckets.0.key_as_string: "2021-05-01 00:00:00" } + - match: { aggregations.order_datetime_histo.buckets.0.doc_count: 3 } + - match: { aggregations.order_datetime_histo.buckets.0.ignored_terms.buckets.0: { key: "products", doc_count: 2 } } + + - match: { aggregations.order_datetime_histo.buckets.1.key_as_string: "2021-05-02 00:00:00" } + - match: { aggregations.order_datetime_histo.buckets.1.doc_count: 0 } + - length: { aggregations.order_datetime_histo.buckets.1.ignored_terms.buckets: 0 } + + - match: { aggregations.order_datetime_histo.buckets.2.key_as_string: "2021-05-03 00:00:00" } + - match: { aggregations.order_datetime_histo.buckets.2.doc_count: 1 } + - match: { aggregations.order_datetime_histo.buckets.2.ignored_terms.buckets.0: { key: "date_of_birth", doc_count: 1 } } + - match: { aggregations.order_datetime_histo.buckets.2.ignored_terms.buckets.1: { key: "email", doc_count: 1 } } + +--- +"cardinality aggregation on _ignored metadata field": + - skip: + version: " - 8.14.99" + reason: "_ignored metadata field aggregation support added in 8.15" + - do: + search: + body: + size: 0 + aggs: + ignored_cardinality: + cardinality: + field: _ignored + + - match: { hits.total.value: 11 } + - match: {aggregations.ignored_cardinality.value: 9 } + +--- +"value count aggregation on _ignored metadata field": + - skip: + version: " - 8.14.99" + reason: "_ignored metadata field aggregation support added in 8.15" + - do: + search: + body: + size: 0 + aggs: + ignored_value_count: + value_count: + field: _ignored + + - match: { hits.total.value: 11 } + - match: {aggregations.ignored_value_count.value: 42 } + +--- +"date range aggregation with terms on _ignored metadata field": + - skip: + version: " - 8.14.99" + reason: "_ignored metadata field aggregation support added in 8.15" + - do: + search: + body: + size: 0 + aggs: + order_datetime_range: + date_range: + field: order_datetime + format: "dd-MM-yyyy" + ranges: + - to: "03-05-2021" + - from: "02-05-2021" + aggs: + ignored_terms: + terms: + field: _ignored + + - match: { hits.total.value: 11 } + - length: { aggregations.order_datetime_range.buckets: 2 } + + - match: { aggregations.order_datetime_range.buckets.0.to_as_string: "03-05-2021" } + - match: { aggregations.order_datetime_range.buckets.0.doc_count: 3 } + - length: { aggregations.order_datetime_range.buckets.0.ignored_terms.buckets: 5 } + - match: { aggregations.order_datetime_range.buckets.0.ignored_terms.buckets.0: { key: "products", doc_count: 2 } } + - match: { aggregations.order_datetime_range.buckets.0.ignored_terms.buckets.1: { key: "city", doc_count: 1 } } + - match: { aggregations.order_datetime_range.buckets.0.ignored_terms.buckets.2: { key: "ip_address", doc_count: 1 } } + - match: { aggregations.order_datetime_range.buckets.0.ignored_terms.buckets.3: { key: "location", doc_count: 1 } } + - match: { aggregations.order_datetime_range.buckets.0.ignored_terms.buckets.4: { key: "total_price", doc_count: 1 } } + + - match: { aggregations.order_datetime_range.buckets.1.from_as_string: "02-05-2021" } + - match: { aggregations.order_datetime_range.buckets.1.doc_count: 1 } + - length: { aggregations.order_datetime_range.buckets.1.ignored_terms.buckets: 5 } + - match: { aggregations.order_datetime_range.buckets.1.ignored_terms.buckets.0: { key: "date_of_birth", doc_count: 1 } } + - match: { aggregations.order_datetime_range.buckets.1.ignored_terms.buckets.1: { key: "email", doc_count: 1 } } + - match: { aggregations.order_datetime_range.buckets.1.ignored_terms.buckets.2: { key: "ip_address", doc_count: 1 } } + - match: { aggregations.order_datetime_range.buckets.1.ignored_terms.buckets.3: { key: "newsletter", doc_count: 1 } } + - match: { aggregations.order_datetime_range.buckets.1.ignored_terms.buckets.4: { key: "total_price", doc_count: 1 } } + +--- +"random sampler aggregation with terms on _ignored metadata field": + - skip: + version: " - 8.14.99" + reason: "_ignored metadata field aggregation support added in 8.15" + - do: + search: + body: + size: 0 + aggs: + sample: + random_sampler: + probability: 1.0 # make sure buckets count is consistent + seed: 43 + aggs: + ignored_terms: + terms: + field: _ignored + + - match: { hits.total.value: 11 } + - length: { aggregations.sample.ignored_terms.buckets: 9 } + - match: { aggregations.sample.ignored_terms.buckets.0: { key: "ip_address", doc_count: 7 } } + - match: { aggregations.sample.ignored_terms.buckets.1: { key: "order_datetime", doc_count: 7 } } + - match: { aggregations.sample.ignored_terms.buckets.2: { key: "products", doc_count: 6 } } + - match: { aggregations.sample.ignored_terms.buckets.3: { key: "date_of_birth", doc_count: 5 } } + - match: { aggregations.sample.ignored_terms.buckets.4: { key: "newsletter", doc_count: 5 } } + - match: { aggregations.sample.ignored_terms.buckets.5: { key: "total_price", doc_count: 4 } } + - match: { aggregations.sample.ignored_terms.buckets.6: { key: "city", doc_count: 3 } } + - match: { aggregations.sample.ignored_terms.buckets.7: { key: "location", doc_count: 3 } } + - match: { aggregations.sample.ignored_terms.buckets.8: { key: "email", doc_count: 2 } } + +--- +"filter aggregation on _ignored metadata field": + - skip: + version: " - 8.14.99" + reason: "_ignored metadata field aggregation support added in 8.15" + features: close_to + - do: + search: + body: + size: 0 + aggs: + total: + sum: + field: total_price + filter_ignored: + filter: + term: + _ignored: "email" + + - match: { hits.total.value: 11 } + - close_to: { aggregations.total.value: { value: 162.98, error: 0.01 } } + - match: { aggregations.filter_ignored.doc_count: 2 } diff --git a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/LogsDataStreamIT.java b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/LogsDataStreamIT.java index eed06c5c69332..2370cca08b23e 100644 --- a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/LogsDataStreamIT.java +++ b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/LogsDataStreamIT.java @@ -19,6 +19,7 @@ import java.util.Map; import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; @@ -550,9 +551,8 @@ public void testNoSubobjects() throws Exception { // "start-timestamp" doesn't match the ECS dynamic mapping pattern "*_timestamp" assertThat(fields.get("test.start-timestamp"), is(List.of("not a date"))); assertThat(ignored.size(), is(2)); - assertThat(ignored.get(0), is("vulnerability.textual_score")); + assertThat(ignored, containsInAnyOrder("test.start_timestamp", "vulnerability.textual_score")); // the ECS date dynamic template enforces mapping of "*_timestamp" fields to a date type - assertThat(ignored.get(1), is("test.start_timestamp")); assertThat(ignoredFieldValues.get("test.start_timestamp").size(), is(1)); assertThat(ignoredFieldValues.get("test.start_timestamp"), is(List.of("not a date"))); assertThat(ignoredFieldValues.get("vulnerability.textual_score").size(), is(1)); diff --git a/modules/parent-join/src/yamlRestTest/resources/rest-api-spec/test/30_inner_hits.yml b/modules/parent-join/src/yamlRestTest/resources/rest-api-spec/test/30_inner_hits.yml index eff9a9beb35bc..40d646cc645f5 100644 --- a/modules/parent-join/src/yamlRestTest/resources/rest-api-spec/test/30_inner_hits.yml +++ b/modules/parent-join/src/yamlRestTest/resources/rest-api-spec/test/30_inner_hits.yml @@ -120,7 +120,7 @@ teardown: --- profile fetch: - skip: - version: ' - 8.13.99' + version: ' - 8.14.99' reason: fetch fields and stored_fields using ValueFetcher - do: @@ -140,7 +140,7 @@ profile fetch: - gt: { profile.shards.0.fetch.breakdown.next_reader: 0 } - gt: { profile.shards.0.fetch.breakdown.load_stored_fields_count: 0 } - gt: { profile.shards.0.fetch.breakdown.load_stored_fields: 0 } - - match: { profile.shards.0.fetch.debug.stored_fields: [_id, _ignored, _routing, _source] } + - match: { profile.shards.0.fetch.debug.stored_fields: [_id, _routing, _source] } - length: { profile.shards.0.fetch.children: 4 } - match: { profile.shards.0.fetch.children.0.type: FetchFieldsPhase } - gt: { profile.shards.0.fetch.children.0.breakdown.next_reader_count: 0 } diff --git a/plugins/analysis-icu/src/test/java/org/elasticsearch/plugin/analysis/icu/ICUCollationKeywordFieldMapperTests.java b/plugins/analysis-icu/src/test/java/org/elasticsearch/plugin/analysis/icu/ICUCollationKeywordFieldMapperTests.java index 3a48a6bcce4e0..99017733dd989 100644 --- a/plugins/analysis-icu/src/test/java/org/elasticsearch/plugin/analysis/icu/ICUCollationKeywordFieldMapperTests.java +++ b/plugins/analysis-icu/src/test/java/org/elasticsearch/plugin/analysis/icu/ICUCollationKeywordFieldMapperTests.java @@ -281,8 +281,8 @@ public void testIgnoreAbove() throws IOException { fields = doc.rootDoc().getFields("field"); assertThat(fields, empty()); fields = doc.rootDoc().getFields("_ignored"); - assertEquals(1, fields.size()); - assertEquals("field", fields.get(0).stringValue()); + assertEquals(2, fields.size()); + assertTrue(fields.stream().anyMatch(field -> "field".equals(field.stringValue()))); } public void testUpdateIgnoreAbove() throws IOException { diff --git a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/IgnoredMetaFieldRollingUpgradeIT.java b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/IgnoredMetaFieldRollingUpgradeIT.java new file mode 100644 index 0000000000000..874fac615b9b1 --- /dev/null +++ b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/IgnoredMetaFieldRollingUpgradeIT.java @@ -0,0 +1,210 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.upgrades; + +import com.carrotsearch.randomizedtesting.annotations.Name; + +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.index.IndexVersions; +import org.elasticsearch.index.mapper.IgnoredFieldMapper; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentType; +import org.hamcrest.Matchers; + +import java.io.IOException; +import java.util.List; +import java.util.Locale; +import java.util.Map; + +public class IgnoredMetaFieldRollingUpgradeIT extends ParameterizedRollingUpgradeTestCase { + + private static final String TERMS_AGG_QUERY = Strings.format(""" + { + "aggs": { + "ignored_terms": { + "terms": { + "field": "_ignored" + } + } + } + }"""); + + public IgnoredMetaFieldRollingUpgradeIT(@Name("upgradedNodes") int upgradedNodes) { + super(upgradedNodes); + } + + public void testAggregation() throws IOException { + if (isOldCluster()) { + assertRestStatus(client().performRequest(createNewIndex("index-old-agg")), RestStatus.OK); + assertRestStatus(client().performRequest(indexDocument("index-old-agg", "foofoo", "1024.12.321.777", "1")), RestStatus.CREATED); + if (getOldClusterIndexVersion().before(IndexVersions.DOC_VALUES_FOR_IGNORED_META_FIELD)) { + assertTermsAggIgnoredMetadataFieldException( + "index-old-agg", + "Fielddata is not supported on field [_ignored] of type [_ignored]" + ); + } else { + assertTermsAggIgnoredMetadataField("index-old-agg"); + } + } else if (isUpgradedCluster()) { + assertRestStatus(client().performRequest(waitForClusterStatus("green", "90s")), RestStatus.OK); + assertRestStatus(client().performRequest(createNewIndex("index-new-agg")), RestStatus.OK); + assertRestStatus(client().performRequest(indexDocument("index-new-agg", "barbar", "555.222.111.000", "2")), RestStatus.CREATED); + + assertTermsAggIgnoredMetadataField("index-*"); + if (getOldClusterIndexVersion().before(IndexVersions.DOC_VALUES_FOR_IGNORED_META_FIELD)) { + assertTermsAggIgnoredMetadataFieldException( + "index-old-agg", + "Fielddata is not supported on field [_ignored] of type [_ignored]" + ); + } else { + assertTermsAggIgnoredMetadataField("index-old-agg"); + } + assertTermsAggIgnoredMetadataField("index-new-agg"); + } + } + + public void testIgnoredMetaFieldGetWithIgnoredQuery() throws IOException { + if (isOldCluster()) { + assertRestStatus(client().performRequest(createNewIndex("old-get-ignored-index")), RestStatus.OK); + assertRestStatus( + client().performRequest(indexDocument("old-get-ignored-index", "foofoo", "192.168.10.1234", "1")), + RestStatus.CREATED + ); + final Map doc = entityAsMap(getWithIgnored("old-get-ignored-index", "1")); + assertThat(((List) doc.get(IgnoredFieldMapper.NAME)), Matchers.containsInAnyOrder("ip_address", "keyword")); + } else if (isUpgradedCluster()) { + assertRestStatus(client().performRequest(waitForClusterStatus("green", "90s")), RestStatus.OK); + assertRestStatus( + client().performRequest(indexDocument("old-get-ignored-index", "barbar", "192.168.256.256", "2")), + RestStatus.CREATED + ); + final Map doc = entityAsMap(getWithIgnored("old-get-ignored-index", "2")); + // NOTE: here we are reading documents from an index created by an older version of Elasticsearch where the _ignored + // field could be stored depending on the version of Elasticsearch which created the index. The mapper for the _ignored field + // will keep the stored field if necessary to avoid mixing documents where the _ignored field is stored and documents where it + // is not, in the same index. + assertThat(((List) doc.get(IgnoredFieldMapper.NAME)), Matchers.containsInAnyOrder("ip_address", "keyword")); + + // NOTE: The stored field is dropped only once a new index is created by a new version of Elasticsearch. + final String newVersionIndexName = randomAlphaOfLength(8).toLowerCase(Locale.ROOT); + assertRestStatus(client().performRequest(createNewIndex(newVersionIndexName)), RestStatus.OK); + assertRestStatus(client().performRequest(indexDocument(newVersionIndexName, "foobar", "192.168.777", "3")), RestStatus.CREATED); + final Map docFromNewIndex = entityAsMap(getWithIgnored(newVersionIndexName, "3")); + assertThat(((List) docFromNewIndex.get(IgnoredFieldMapper.NAME)), Matchers.containsInAnyOrder("ip_address", "keyword")); + } + } + + public void testIgnoredMetaFieldGetWithoutIgnoredQuery() throws IOException { + if (isOldCluster()) { + assertRestStatus(client().performRequest(createNewIndex("old-get-index")), RestStatus.OK); + assertRestStatus(client().performRequest(indexDocument("old-get-index", "foofoo", "192.168.169.300", "1")), RestStatus.CREATED); + final Map doc = entityAsMap(get("old-get-index", "1")); + if (getOldClusterIndexVersion().onOrAfter(IndexVersions.DOC_VALUES_FOR_IGNORED_META_FIELD)) { + assertNull(doc.get(IgnoredFieldMapper.NAME)); + } + } else if (isUpgradedCluster()) { + assertRestStatus(client().performRequest(waitForClusterStatus("green", "90s")), RestStatus.OK); + final Map doc1 = entityAsMap(get("old-get-index", "1")); + assertNull(doc1.get(IgnoredFieldMapper.NAME)); + assertRestStatus(client().performRequest(indexDocument("old-get-index", "barbar", "192.168.0.1234", "2")), RestStatus.CREATED); + final Map doc2 = entityAsMap(get("old-get-index", "2")); + assertNull(doc2.get(IgnoredFieldMapper.NAME)); + + final String newVersionIndexName = randomAlphaOfLength(8).toLowerCase(Locale.ROOT); + assertRestStatus(client().performRequest(createNewIndex(newVersionIndexName)), RestStatus.OK); + // NOTE: new Elasticsearch version does not used stored field for _ignored due to writing an index created by the new version + assertRestStatus( + client().performRequest(indexDocument(newVersionIndexName, "foobar", "263.192.168.12", "3")), + RestStatus.CREATED + ); + final Map docFromNewIndex = entityAsMap(get(newVersionIndexName, "3")); + assertNull(docFromNewIndex.get(IgnoredFieldMapper.NAME)); + } + } + + private static Response getWithIgnored(final String index, final String docId) throws IOException { + return client().performRequest(new Request("GET", "/" + index + "/_doc/" + docId + "?stored_fields=_ignored")); + } + + private static Response get(final String index, final String docId) throws IOException { + return client().performRequest(new Request("GET", "/" + index + "/_doc/" + docId)); + } + + private static Request waitForClusterStatus(final String statusColor, final String timeoutSeconds) { + final Request waitForGreen = new Request("GET", "/_cluster/health"); + waitForGreen.addParameter("wait_for_status", statusColor); + waitForGreen.addParameter("timeout", timeoutSeconds); + waitForGreen.addParameter("level", "shards"); + return waitForGreen; + } + + private static void assertRestStatus(final Response indexDocumentResponse, final RestStatus restStatus) { + assertThat(indexDocumentResponse.getStatusLine().getStatusCode(), Matchers.equalTo(restStatus.getStatus())); + } + + private static Request createNewIndex(final String indexName) throws IOException { + final Request createIndex = new Request("PUT", "/" + indexName); + final XContentBuilder mappings = XContentBuilder.builder(XContentType.JSON.xContent()) + .startObject() + .startObject("mappings") + .startObject("properties") + .startObject("keyword") + .field("type", "keyword") + .field("ignore_above", 3) + .endObject() + .startObject("ip_address") + .field("type", "ip") + .field("ignore_malformed", true) + .endObject() + .endObject() + .endObject() + .endObject(); + createIndex.setJsonEntity(Strings.toString(mappings)); + return createIndex; + } + + private static Request indexDocument(final String indexName, final String keywordValue, final String ipAddressValue, final String docId) + throws IOException { + final Request indexRequest = new Request("POST", "/" + indexName + "/_doc/" + docId); + final XContentBuilder doc = XContentBuilder.builder(XContentType.JSON.xContent()) + .startObject() + .field("keyword", keywordValue) + .field("ip_address", ipAddressValue) + .endObject(); + indexRequest.addParameter("refresh", "true"); + indexRequest.setJsonEntity(Strings.toString(doc)); + return indexRequest; + } + + @SuppressWarnings("unchecked") + private static void assertTermsAggIgnoredMetadataField(final String indexPattern) throws IOException { + final Request aggRequest = new Request("POST", "/" + indexPattern + "/_search"); + aggRequest.addParameter("size", "0"); + aggRequest.setJsonEntity(TERMS_AGG_QUERY); + final Response aggResponse = client().performRequest(aggRequest); + final Map aggResponseEntityAsMap = entityAsMap(aggResponse); + final Map aggregations = (Map) aggResponseEntityAsMap.get("aggregations"); + final Map ignoredTerms = (Map) aggregations.get("ignored_terms"); + final List> buckets = (List>) ignoredTerms.get("buckets"); + assertThat(buckets.stream().map(bucket -> bucket.get("key")).toList(), Matchers.containsInAnyOrder("ip_address", "keyword")); + } + + private static void assertTermsAggIgnoredMetadataFieldException(final String indexPattern, final String exceptionMessage) { + final Request aggRequest = new Request("POST", "/" + indexPattern + "/_search"); + aggRequest.addParameter("size", "0"); + aggRequest.setJsonEntity(TERMS_AGG_QUERY); + final Exception responseException = assertThrows(ResponseException.class, () -> client().performRequest(aggRequest)); + assertThat(responseException.getMessage(), Matchers.containsString(exceptionMessage)); + } + +} diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/370_profile.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/370_profile.yml index 817c62dbdd12d..7625f19557e9b 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/370_profile.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/370_profile.yml @@ -41,7 +41,7 @@ fetch fields: - gt: { profile.shards.0.fetch.breakdown.next_reader: 0 } - gt: { profile.shards.0.fetch.breakdown.load_stored_fields_count: 0 } - gt: { profile.shards.0.fetch.breakdown.load_stored_fields: 0 } - - match: { profile.shards.0.fetch.debug.stored_fields: [_id, _ignored, _routing, _source] } + - match: { profile.shards.0.fetch.debug.stored_fields: [_id, _routing, _source] } - length: { profile.shards.0.fetch.children: 2 } - match: { profile.shards.0.fetch.children.0.type: FetchFieldsPhase } - gt: { profile.shards.0.fetch.children.0.breakdown.next_reader_count: 0 } @@ -74,7 +74,7 @@ fetch source: - gt: { profile.shards.0.fetch.breakdown.next_reader: 0 } - gt: { profile.shards.0.fetch.breakdown.load_stored_fields_count: 0 } - gt: { profile.shards.0.fetch.breakdown.load_stored_fields: 0 } - - match: { profile.shards.0.fetch.debug.stored_fields: [_id, _ignored, _routing, _source] } + - match: { profile.shards.0.fetch.debug.stored_fields: [_id, _routing, _source] } - length: { profile.shards.0.fetch.children: 3 } - match: { profile.shards.0.fetch.children.0.type: FetchFieldsPhase } - match: { profile.shards.0.fetch.children.1.type: FetchSourcePhase } @@ -139,7 +139,7 @@ fetch nested source: - gt: { profile.shards.0.fetch.breakdown.next_reader: 0 } - gt: { profile.shards.0.fetch.breakdown.load_stored_fields_count: 0 } - gt: { profile.shards.0.fetch.breakdown.load_stored_fields: 0 } - - match: { profile.shards.0.fetch.debug.stored_fields: [_id, _ignored, _routing, _source] } + - match: { profile.shards.0.fetch.debug.stored_fields: [_id, _routing, _source] } - length: { profile.shards.0.fetch.children: 4 } - match: { profile.shards.0.fetch.children.0.type: FetchFieldsPhase } - match: { profile.shards.0.fetch.children.1.type: FetchSourcePhase } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/IgnoredMetadataFieldIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/IgnoredMetadataFieldIT.java new file mode 100644 index 0000000000000..cfe5a2b69c6da --- /dev/null +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/IgnoredMetadataFieldIT.java @@ -0,0 +1,164 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.index.mapper; + +import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.common.document.DocumentField; +import org.elasticsearch.index.query.IdsQueryBuilder; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.aggregations.bucket.terms.StringTerms; +import org.elasticsearch.search.aggregations.metrics.InternalAvg; +import org.elasticsearch.test.ESSingleNodeTestCase; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentFactory; +import org.junit.Before; + +import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; +import static org.elasticsearch.search.aggregations.AggregationBuilders.avg; +import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.hamcrest.Matchers.hasSize; + +@SuppressWarnings("resource") +public class IgnoredMetadataFieldIT extends ESSingleNodeTestCase { + + public static final String NUMERIC_FIELD_NAME = "numeric_field"; + public static final String DATE_FIELD_NAME = "date_field"; + public static final String TEST_INDEX = "test-index"; + public static final String CORRECT_FIELD_TYPE_DOC_ID = "1"; + public static final String WRONG_FIELD_TYPE_DOC_ID = "2"; + + @Before + public void createTestIndex() throws Exception { + CreateIndexResponse createIndexResponse = null; + try { + XContentBuilder mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("_doc") + .startObject("properties") + .startObject(NUMERIC_FIELD_NAME) + .field("type", "long") + .field("ignore_malformed", true) + .endObject() + .startObject(DATE_FIELD_NAME) + .field("type", "date") + .field("ignore_malformed", true) + .endObject() + .endObject() + .endObject() + .endObject(); + createIndexResponse = indicesAdmin().prepareCreate(TEST_INDEX).setMapping(mapping).get(); + assertAcked(createIndexResponse); + indexTestDoc(NUMERIC_FIELD_NAME, CORRECT_FIELD_TYPE_DOC_ID, "42"); + indexTestDoc(NUMERIC_FIELD_NAME, WRONG_FIELD_TYPE_DOC_ID, "forty-two"); + } finally { + if (createIndexResponse != null) { + createIndexResponse.decRef(); + } + } + } + + public void testIgnoredMetadataFieldFetch() { + SearchResponse searchResponse1 = null; + SearchResponse searchResponse2 = null; + try { + searchResponse1 = client().prepareSearch() + .setQuery(new IdsQueryBuilder().addIds(CORRECT_FIELD_TYPE_DOC_ID)) + .addFetchField(NUMERIC_FIELD_NAME) + .get(); + assertHitCount(searchResponse1, 1); + SearchHit hit = searchResponse1.getHits().getAt(0); + DocumentField numericField = hit.field(NUMERIC_FIELD_NAME); + assertNotNull(numericField); + assertEquals(42, (long) numericField.getValue()); + DocumentField ignoredField = hit.field(IgnoredFieldMapper.NAME); + assertNull(ignoredField); + + searchResponse2 = client().prepareSearch() + .setQuery(new IdsQueryBuilder().addIds(WRONG_FIELD_TYPE_DOC_ID)) + .addFetchField(NUMERIC_FIELD_NAME) + .get(); + assertHitCount(searchResponse2, 1); + hit = searchResponse2.getHits().getAt(0); + numericField = hit.field(NUMERIC_FIELD_NAME); + assertNotNull(numericField); + assertEquals("forty-two", numericField.getIgnoredValues().get(0)); + ignoredField = hit.field(IgnoredFieldMapper.NAME); + assertNotNull(ignoredField); + assertEquals(NUMERIC_FIELD_NAME, ignoredField.getValue()); + } finally { + if (searchResponse1 != null) { + searchResponse1.decRef(); + } + if (searchResponse2 != null) { + searchResponse2.decRef(); + } + } + } + + public void testIgnoredMetadataFieldAggregation() { + SearchResponse avgSearch = null; + SearchResponse termsSearch = null; + try { + indexTestDoc(NUMERIC_FIELD_NAME, "correct-44", "44"); + avgSearch = client().prepareSearch(TEST_INDEX) + .setSize(0) + .addAggregation(avg("numeric-field-aggs").field(NUMERIC_FIELD_NAME)) + .get(); + assertTrue(avgSearch.hasAggregations()); + InternalAvg avg = avgSearch.getAggregations().get("numeric-field-aggs"); + assertNotNull(avg); + assertEquals(43.0, avg.getValue(), 0.0); + + indexTestDoc(NUMERIC_FIELD_NAME, "wrong-44", "forty-four"); + indexTestDoc(DATE_FIELD_NAME, "wrong-date", "today"); + termsSearch = client().prepareSearch(TEST_INDEX) + .setSize(0) + .addAggregation(terms("ignored-field-aggs").field(IgnoredFieldMapper.NAME)) + .get(); + assertTrue(termsSearch.hasAggregations()); + StringTerms terms = termsSearch.getAggregations().get("ignored-field-aggs"); + assertNotNull(terms); + assertThat(terms.getBuckets(), hasSize(2)); + StringTerms.Bucket numericFieldBucket = terms.getBucketByKey(NUMERIC_FIELD_NAME); + assertEquals(NUMERIC_FIELD_NAME, numericFieldBucket.getKeyAsString()); + assertEquals(2, numericFieldBucket.getDocCount()); + StringTerms.Bucket dateFieldBucket = terms.getBucketByKey(DATE_FIELD_NAME); + assertEquals(DATE_FIELD_NAME, dateFieldBucket.getKeyAsString()); + assertEquals(1, dateFieldBucket.getDocCount()); + } finally { + if (avgSearch != null) { + avgSearch.decRef(); + } + if (termsSearch != null) { + termsSearch.decRef(); + } + } + } + + private void indexTestDoc(String testField, String docId, String testValue) { + DocWriteResponse docWriteResponse = null; + try { + docWriteResponse = client().prepareIndex(TEST_INDEX) + .setId(docId) + .setSource(testField, testValue) + .setRefreshPolicy(IMMEDIATE) + .get(); + assertEquals(RestStatus.CREATED, docWriteResponse.status()); + } finally { + if (docWriteResponse != null) { + docWriteResponse.decRef(); + } + } + } +} diff --git a/server/src/main/java/org/elasticsearch/index/IndexVersions.java b/server/src/main/java/org/elasticsearch/index/IndexVersions.java index 6edd43683519e..78f07c8a137b9 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexVersions.java +++ b/server/src/main/java/org/elasticsearch/index/IndexVersions.java @@ -104,6 +104,7 @@ private static IndexVersion def(int id, Version luceneVersion) { public static final IndexVersion UPGRADE_TO_LUCENE_9_10 = def(8_503_00_0, Version.LUCENE_9_10_0); public static final IndexVersion TIME_SERIES_ROUTING_HASH_IN_ID = def(8_504_00_0, Version.LUCENE_9_10_0); public static final IndexVersion DEFAULT_DENSE_VECTOR_TO_INT8_HNSW = def(8_505_00_0, Version.LUCENE_9_10_0); + public static final IndexVersion DOC_VALUES_FOR_IGNORED_META_FIELD = def(8_505_00_1, Version.LUCENE_9_10_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/server/src/main/java/org/elasticsearch/index/get/ShardGetService.java b/server/src/main/java/org/elasticsearch/index/get/ShardGetService.java index 3758858a5b10a..3e191d0ab1e25 100644 --- a/server/src/main/java/org/elasticsearch/index/get/ShardGetService.java +++ b/server/src/main/java/org/elasticsearch/index/get/ShardGetService.java @@ -8,6 +8,7 @@ package org.elasticsearch.index.get; +import org.apache.lucene.index.SortedSetDocValues; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.document.DocumentField; @@ -17,10 +18,13 @@ import org.elasticsearch.common.metrics.MeanMetric; import org.elasticsearch.core.Nullable; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.fieldvisitor.LeafStoredFieldLoader; import org.elasticsearch.index.fieldvisitor.StoredFieldLoader; +import org.elasticsearch.index.mapper.IgnoredFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperService; @@ -35,6 +39,8 @@ import org.elasticsearch.search.lookup.Source; import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -308,6 +314,7 @@ private GetResult innerGetFetch( } // put stored fields into result objects + final IndexVersion indexVersion = indexSettings.getIndexVersionCreated(); if (leafStoredFieldLoader.storedFields().isEmpty() == false) { Set needed = new HashSet<>(); if (storedFields != null) { @@ -320,6 +327,10 @@ private GetResult innerGetFetch( if (false == needed.contains(entry.getKey())) { continue; } + if (IgnoredFieldMapper.NAME.equals(entry.getKey()) + && indexVersion.onOrAfter(IndexVersions.DOC_VALUES_FOR_IGNORED_META_FIELD)) { + continue; + } MappedFieldType ft = mapperService.fieldType(entry.getKey()); if (ft == null) { continue; // user asked for a non-existent field, ignore it @@ -333,6 +344,21 @@ private GetResult innerGetFetch( } } + // NOTE: when _ignored is requested via `stored_fields` we need to load it from doc values instead of loading it from stored fields. + // The _ignored field used to be stored, but as a result of supporting aggregations on it, it moved from using a stored field to + // using doc values. + if (indexVersion.onOrAfter(IndexVersions.DOC_VALUES_FOR_IGNORED_META_FIELD) + && storedFields != null + && Arrays.asList(storedFields).contains(IgnoredFieldMapper.NAME)) { + final DocumentField ignoredDocumentField = loadIgnoredMetadataField(docIdAndVersion); + if (ignoredDocumentField != null) { + if (metadataFields == null) { + metadataFields = new HashMap<>(); + } + metadataFields.put(IgnoredFieldMapper.NAME, ignoredDocumentField); + } + } + BytesReference sourceBytes = null; if (mapperService.mappingLookup().isSourceEnabled() && fetchSourceContext.fetchSource()) { Source source = loader.leaf(docIdAndVersion.reader, new int[] { docIdAndVersion.docId }) @@ -357,6 +383,22 @@ private GetResult innerGetFetch( ); } + private static DocumentField loadIgnoredMetadataField(final DocIdAndVersion docIdAndVersion) throws IOException { + final SortedSetDocValues ignoredDocValues = docIdAndVersion.reader.getContext() + .reader() + .getSortedSetDocValues(IgnoredFieldMapper.NAME); + if (ignoredDocValues == null + || ignoredDocValues.advanceExact(docIdAndVersion.docId) == false + || ignoredDocValues.docValueCount() <= 0) { + return null; + } + final List ignoredValues = new ArrayList<>(ignoredDocValues.docValueCount()); + for (int i = 0; i < ignoredDocValues.docValueCount(); i++) { + ignoredValues.add(ignoredDocValues.lookupOrd(ignoredDocValues.nextOrd()).utf8ToString()); + } + return new DocumentField(IgnoredFieldMapper.NAME, ignoredValues); + } + private static StoredFieldLoader buildStoredFieldLoader(String[] fields, FetchSourceContext fetchSourceContext, SourceLoader loader) { Set fieldsToLoad = new HashSet<>(); if (fields != null && fields.length > 0) { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/IgnoredFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/IgnoredFieldMapper.java index 4347bcfd8be3b..7da7992f9a9ca 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/IgnoredFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/IgnoredFieldMapper.java @@ -9,10 +9,21 @@ package org.elasticsearch.index.mapper; import org.apache.lucene.document.Field; +import org.apache.lucene.document.SortedSetDocValuesField; import org.apache.lucene.document.StringField; +import org.apache.lucene.search.FieldExistsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermRangeQuery; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.IndexVersions; +import org.elasticsearch.index.fielddata.FieldData; +import org.elasticsearch.index.fielddata.FieldDataContext; +import org.elasticsearch.index.fielddata.IndexFieldData; +import org.elasticsearch.index.fielddata.plain.SortedSetOrdinalsIndexFieldData; import org.elasticsearch.index.query.SearchExecutionContext; +import org.elasticsearch.script.field.KeywordDocValuesField; +import org.elasticsearch.search.aggregations.support.CoreValuesSourceType; import java.util.Collections; @@ -30,14 +41,20 @@ public static class Defaults { } public static final IgnoredFieldType FIELD_TYPE = new IgnoredFieldType(); + private static final IgnoredFieldMapper INSTANCE = new IgnoredFieldMapper(FIELD_TYPE); - private static final IgnoredFieldMapper INSTANCE = new IgnoredFieldMapper(); + public static final LegacyIgnoredFieldType LEGACY_FIELD_TYPE = new LegacyIgnoredFieldType(); + private static final IgnoredFieldMapper LEGACY_INSTANCE = new IgnoredFieldMapper(LEGACY_FIELD_TYPE); - public static final TypeParser PARSER = new FixedTypeParser(c -> INSTANCE); + public static final TypeParser PARSER = new FixedTypeParser(c -> getInstance(c.indexVersionCreated())); - public static final class IgnoredFieldType extends StringFieldType { + private static MetadataFieldMapper getInstance(IndexVersion indexVersion) { + return indexVersion.onOrAfter(IndexVersions.DOC_VALUES_FOR_IGNORED_META_FIELD) ? INSTANCE : LEGACY_INSTANCE; + } - private IgnoredFieldType() { + public static final class LegacyIgnoredFieldType extends StringFieldType { + + private LegacyIgnoredFieldType() { super(NAME, true, true, false, TextSearchInfo.SIMPLE_MATCH_ONLY, Collections.emptyMap()); } @@ -46,6 +63,11 @@ public String typeName() { return CONTENT_TYPE; } + @Override + public ValueFetcher valueFetcher(SearchExecutionContext context, String format) { + return new StoredValueFetcher(context.lookup(), NAME); + } + @Override public Query existsQuery(SearchExecutionContext context) { // This query is not performance sensitive, it only helps assess @@ -54,21 +76,53 @@ public Query existsQuery(SearchExecutionContext context) { // field is bounded by the number of fields in the mappings. return new TermRangeQuery(name(), null, null, true, true); } + } + + public static final class IgnoredFieldType extends StringFieldType { + + private IgnoredFieldType() { + super(NAME, true, false, true, TextSearchInfo.SIMPLE_MATCH_ONLY, Collections.emptyMap()); + } + + @Override + public String typeName() { + return CONTENT_TYPE; + } @Override public ValueFetcher valueFetcher(SearchExecutionContext context, String format) { - return new StoredValueFetcher(context.lookup(), NAME); + return new DocValueFetcher(docValueFormat(format, null), context.getForField(this, FielddataOperation.SEARCH)); + } + + public Query existsQuery(SearchExecutionContext context) { + return new FieldExistsQuery(name()); + } + + @Override + public IndexFieldData.Builder fielddataBuilder(FieldDataContext fieldDataContext) { + return new SortedSetOrdinalsIndexFieldData.Builder( + name(), + CoreValuesSourceType.KEYWORD, + (dv, n) -> new KeywordDocValuesField(FieldData.toString(dv), n) + ); } } - private IgnoredFieldMapper() { - super(FIELD_TYPE); + private IgnoredFieldMapper(StringFieldType fieldType) { + super(fieldType); } @Override public void postParse(DocumentParserContext context) { - for (String field : context.getIgnoredFields()) { - context.doc().add(new StringField(NAME, field, Field.Store.YES)); + if (context.indexSettings().getIndexVersionCreated().onOrAfter(IndexVersions.DOC_VALUES_FOR_IGNORED_META_FIELD)) { + for (String ignoredField : context.getIgnoredFields()) { + context.doc().add(new SortedSetDocValuesField(NAME, new BytesRef(ignoredField))); + context.doc().add(new StringField(NAME, ignoredField, Field.Store.NO)); + } + } else { + for (String ignoredField : context.getIgnoredFields()) { + context.doc().add(new StringField(NAME, ignoredField, Field.Store.YES)); + } } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/CompletionFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/CompletionFieldMapperTests.java index 982a7ed6afaa5..f574d95304c0a 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/CompletionFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/CompletionFieldMapperTests.java @@ -692,8 +692,8 @@ public void testFieldValueValidation() throws Exception { assertThat(doc.docs().size(), equalTo(1)); assertNull(doc.docs().get(0).get("field")); assertNotNull(doc.docs().get(0).getField("_ignored")); - IndexableField ignoredFields = doc.docs().get(0).getField("_ignored"); - assertThat(ignoredFields.stringValue(), equalTo("field")); + List ignoredFields = doc.docs().get(0).getFields("_ignored"); + assertTrue(ignoredFields.stream().anyMatch(field -> "field".equals(field.stringValue()))); // null inputs are ignored ParsedDocument nullDoc = defaultMapper.parse(source(b -> b.nullField("field"))); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/IgnoredFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/IgnoredFieldMapperTests.java index 7eff2dc73d76f..477f75be4c5a0 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/IgnoredFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/IgnoredFieldMapperTests.java @@ -8,13 +8,17 @@ package org.elasticsearch.index.mapper; +import org.apache.lucene.index.DocValuesType; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.IndexSearcher; +import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.search.lookup.SearchLookup; import org.elasticsearch.search.lookup.Source; +import org.elasticsearch.test.index.IndexVersionUtils; import org.elasticsearch.xcontent.XContentType; import java.io.IOException; @@ -22,8 +26,6 @@ import java.util.List; import static org.hamcrest.Matchers.containsString; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; public class IgnoredFieldMapperTests extends MetadataMapperTestCase { @@ -54,9 +56,19 @@ public void testDefaults() throws IOException { ); ParsedDocument document = mapper.parse(source(b -> b.field("field", "value"))); List fields = document.rootDoc().getFields(IgnoredFieldMapper.NAME); - assertEquals(1, fields.size()); - assertEquals(IndexOptions.DOCS, fields.get(0).fieldType().indexOptions()); - assertTrue(fields.get(0).fieldType().stored()); + assertEquals(2, fields.size()); + IndexableField stringField = fields.stream() + .filter(field -> DocValuesType.NONE == field.fieldType().docValuesType()) + .findFirst() + .orElseThrow(); + assertEquals(IndexOptions.DOCS, stringField.fieldType().indexOptions()); + assertEquals("field", stringField.stringValue()); + assertEquals(DocValuesType.NONE, stringField.fieldType().docValuesType()); + IndexableField docValues = fields.stream() + .filter(field -> DocValuesType.SORTED_SET == field.fieldType().docValuesType()) + .findFirst() + .orElseThrow(); + assertEquals(IndexOptions.NONE, docValues.fieldType().indexOptions()); } public void testFetchIgnoredFieldValue() throws IOException { @@ -65,8 +77,7 @@ public void testFetchIgnoredFieldValue() throws IOException { iw.addDocument(mapperService.documentMapper().parse(source(b -> b.field("field", "value"))).rootDoc()); }, iw -> { SearchLookup lookup = new SearchLookup(mapperService::fieldType, fieldDataLookup(mapperService), (ctx, doc) -> null); - SearchExecutionContext searchExecutionContext = mock(SearchExecutionContext.class); - when(searchExecutionContext.lookup()).thenReturn(lookup); + SearchExecutionContext searchExecutionContext = createSearchExecutionContext(mapperService); IgnoredFieldMapper.IgnoredFieldType ft = (IgnoredFieldMapper.IgnoredFieldType) mapperService.fieldType("_ignored"); ValueFetcher valueFetcher = ft.valueFetcher(searchExecutionContext, null); IndexSearcher searcher = newSearcher(iw); @@ -76,4 +87,22 @@ public void testFetchIgnoredFieldValue() throws IOException { }); } + public void testIgnoredFieldType() throws IOException { + IndexVersion version = IndexVersionUtils.randomVersionBetween( + random(), + IndexVersions.FIRST_DETACHED_INDEX_VERSION, + IndexVersion.current() + ); + boolean afterIntroducingDocValues = version.onOrAfter(IndexVersions.DOC_VALUES_FOR_IGNORED_META_FIELD); + boolean beforeRemovingStoredField = version.before(IndexVersions.DOC_VALUES_FOR_IGNORED_META_FIELD); + MapperService mapperService = createMapperService(version, fieldMapping(b -> b.field("type", "keyword").field("ignore_above", 3))); + withLuceneIndex(mapperService, iw -> { + iw.addDocument(mapperService.documentMapper().parse(source(b -> b.field("field", "value_to_ignore"))).rootDoc()); + }, iw -> { + MappedFieldType mappedFieldType = mapperService.fieldType(IgnoredFieldMapper.NAME); + assertEquals("version = " + version, afterIntroducingDocValues, mappedFieldType.hasDocValues()); + assertEquals("version = " + version, beforeRemovingStoredField, mappedFieldType.isStored()); + }); + } + } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/IgnoredFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/IgnoredFieldTypeTests.java index 52475e7b059b5..520fe8e5ac582 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/IgnoredFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/IgnoredFieldTypeTests.java @@ -9,6 +9,7 @@ package org.elasticsearch.index.mapper; import org.apache.lucene.index.Term; +import org.apache.lucene.search.FieldExistsQuery; import org.apache.lucene.search.PrefixQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.RegexpQuery; @@ -60,4 +61,11 @@ public void testWildcardQuery() { ); assertEquals("[wildcard] queries cannot be executed when 'search.allow_expensive_queries' is set to false.", ee.getMessage()); } + + public void testExistsQuery() { + MappedFieldType ft = IgnoredFieldMapper.FIELD_TYPE; + + Query expected = new FieldExistsQuery(IgnoredFieldMapper.NAME); + assertEquals(expected, ft.existsQuery(MOCK_CONTEXT)); + } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java index 4824bd337f5b0..e06ed1736cca2 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java @@ -234,8 +234,8 @@ public void testIgnoreAbove() throws IOException { assertEquals(0, fields.size()); fields = doc.rootDoc().getFields("_ignored"); - assertEquals(1, fields.size()); - assertEquals("field", fields.get(0).stringValue()); + assertEquals(2, fields.size()); + assertTrue(doc.rootDoc().getFields("_ignored").stream().anyMatch(field -> "field".equals(field.stringValue()))); } public void testNullValue() throws IOException { diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperScriptTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperScriptTestCase.java index 368de3e4d6e58..68d326be0dc83 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperScriptTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperScriptTestCase.java @@ -139,7 +139,7 @@ public final void testOnScriptErrorContinue() throws IOException { ParsedDocument doc = mapper.parse(source(b -> b.field("message", "this is some text"))); assertThat(doc.rootDoc().getFields("message_error"), hasSize(0)); - assertThat(doc.rootDoc().getField("_ignored").stringValue(), equalTo("message_error")); + assertTrue(doc.rootDoc().getFields("_ignored").stream().anyMatch(field -> "message_error".equals(field.stringValue()))); } public final void testRejectScriptErrors() throws IOException { diff --git a/x-pack/plugin/wildcard/src/test/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldMapperTests.java b/x-pack/plugin/wildcard/src/test/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldMapperTests.java index a17cb7474a681..98f5daec730bb 100644 --- a/x-pack/plugin/wildcard/src/test/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldMapperTests.java +++ b/x-pack/plugin/wildcard/src/test/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldMapperTests.java @@ -206,8 +206,8 @@ public void testIgnoreAbove() throws IOException { assertEquals(0, fields.size()); fields = doc.rootDoc().getFields("_ignored"); - assertEquals(1, fields.size()); - assertEquals("field", fields.get(0).stringValue()); + assertEquals(2, fields.size()); + assertTrue(fields.stream().anyMatch(field -> "field".equals(field.stringValue()))); } public void testBWCIndexVersion() throws IOException { From 0a9ab8eef2978087d42b3a248e35cb818fbb8dc6 Mon Sep 17 00:00:00 2001 From: Mark Tozzi Date: Mon, 29 Apr 2024 11:14:05 -0400 Subject: [PATCH 07/19] remove ComparisonMapper (#107896) More follow-up work from #105217. Since the binary comparison operators now implement EvaluatorMapper, there is no need for ComparisonMapper going forward. --- .../xpack/esql/evaluator/EvalMapper.java | 7 - .../evaluator/mapper/EvaluatorMapper.java | 18 ++ .../operator/comparison/ComparisonMapper.java | 182 ------------------ .../operator/comparison/InMapper.java | 5 +- 4 files changed, 20 insertions(+), 192 deletions(-) delete mode 100644 x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/ComparisonMapper.java diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/EvalMapper.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/EvalMapper.java index c26f722d9f765..096dcc183eaf4 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/EvalMapper.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/EvalMapper.java @@ -22,7 +22,6 @@ import org.elasticsearch.core.Releasables; import org.elasticsearch.xpack.esql.evaluator.mapper.EvaluatorMapper; import org.elasticsearch.xpack.esql.evaluator.mapper.ExpressionMapper; -import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.ComparisonMapper; import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.InMapper; import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.InsensitiveEqualsMapper; import org.elasticsearch.xpack.esql.planner.Layout; @@ -40,12 +39,6 @@ public final class EvalMapper { private static final List> MAPPERS = List.of( - ComparisonMapper.EQUALS, - ComparisonMapper.NOT_EQUALS, - ComparisonMapper.GREATER_THAN, - ComparisonMapper.GREATER_THAN_OR_EQUAL, - ComparisonMapper.LESS_THAN, - ComparisonMapper.LESS_THAN_OR_EQUAL, InMapper.IN_MAPPER, new InsensitiveEqualsMapper(), new BooleanLogic(), diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/mapper/EvaluatorMapper.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/mapper/EvaluatorMapper.java index e536547e006fd..7a084649ac4fa 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/mapper/EvaluatorMapper.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/mapper/EvaluatorMapper.java @@ -15,6 +15,7 @@ import org.elasticsearch.compute.data.Page; import org.elasticsearch.compute.operator.DriverContext; import org.elasticsearch.compute.operator.EvalOperator.ExpressionEvaluator; +import org.elasticsearch.xpack.esql.planner.Layout; import org.elasticsearch.xpack.ql.expression.Expression; import java.util.function.Function; @@ -27,6 +28,22 @@ */ public interface EvaluatorMapper { /** + *

+ * Note for implementors: + * If you are implementing this function, you should call the passed-in + * lambda on your children, after doing any other manipulation (casting, + * etc.) necessary. + *

+ *

+ * Note for Callers: + * If you are attempting to call this method, and you have an + * {@link Expression} and a {@link org.elasticsearch.xpack.esql.planner.Layout}, + * you likely want to call {@link org.elasticsearch.xpack.esql.evaluator.EvalMapper#toEvaluator(Expression, Layout)} + * instead. On the other hand, if you already have something that + * looks like the parameter for this method, you should call this method + * with that function. + *

+ *

* Build an {@link ExpressionEvaluator.Factory} for the tree of * expressions rooted at this node. This is only guaranteed to return * a sensible evaluator if this node has a valid type. If this node @@ -35,6 +52,7 @@ public interface EvaluatorMapper { * If {@linkplain Expression#typeResolved} returns an error then * this method may throw. Or return an evaluator that produces * garbage. Or return an evaluator that throws when run. + *

*/ ExpressionEvaluator.Factory toEvaluator(Function toEvaluator); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/ComparisonMapper.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/ComparisonMapper.java deleted file mode 100644 index 85b30032c1070..0000000000000 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/ComparisonMapper.java +++ /dev/null @@ -1,182 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison; - -import org.elasticsearch.common.TriFunction; -import org.elasticsearch.compute.operator.EvalOperator.ExpressionEvaluator; -import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; -import org.elasticsearch.xpack.esql.evaluator.mapper.ExpressionMapper; -import org.elasticsearch.xpack.esql.expression.function.scalar.math.Cast; -import org.elasticsearch.xpack.esql.planner.Layout; -import org.elasticsearch.xpack.esql.type.EsqlDataTypeRegistry; -import org.elasticsearch.xpack.esql.type.EsqlDataTypes; -import org.elasticsearch.xpack.ql.expression.predicate.BinaryOperator; -import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.BinaryComparison; -import org.elasticsearch.xpack.ql.tree.Source; -import org.elasticsearch.xpack.ql.type.DataType; -import org.elasticsearch.xpack.ql.type.DataTypes; - -import static org.elasticsearch.xpack.esql.evaluator.EvalMapper.toEvaluator; - -public abstract class ComparisonMapper extends ExpressionMapper { - public static final ExpressionMapper EQUALS = new ComparisonMapper( - org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.EqualsIntsEvaluator.Factory::new, - org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.EqualsLongsEvaluator.Factory::new, - org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.EqualsDoublesEvaluator.Factory::new, - org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.EqualsKeywordsEvaluator.Factory::new, - org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.EqualsBoolsEvaluator.Factory::new, - (s, l, r, t) -> new EqualsGeometriesEvaluator.Factory(s, l, r) - ) { - }; - - public static final ExpressionMapper NOT_EQUALS = new ComparisonMapper( - org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.NotEqualsIntsEvaluator.Factory::new, - org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.NotEqualsLongsEvaluator.Factory::new, - org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.NotEqualsDoublesEvaluator.Factory::new, - org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.NotEqualsKeywordsEvaluator.Factory::new, - org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.NotEqualsBoolsEvaluator.Factory::new, - (s, l, r, t) -> new NotEqualsGeometriesEvaluator.Factory(s, l, r) - ) { - }; - - public static final ExpressionMapper GREATER_THAN = new ComparisonMapper( - org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThanIntsEvaluator.Factory::new, - org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThanLongsEvaluator.Factory::new, - org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThanDoublesEvaluator.Factory::new, - org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThanKeywordsEvaluator.Factory::new - ) { - }; - - public static final ExpressionMapper GREATER_THAN_OR_EQUAL = new ComparisonMapper( - org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThanOrEqualIntsEvaluator.Factory::new, - org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThanOrEqualLongsEvaluator.Factory::new, - org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThanOrEqualDoublesEvaluator.Factory::new, - org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThanOrEqualKeywordsEvaluator.Factory::new - ) { - }; - - public static final ExpressionMapper LESS_THAN = new ComparisonMapper( - org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.LessThanIntsEvaluator.Factory::new, - org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.LessThanLongsEvaluator.Factory::new, - org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.LessThanDoublesEvaluator.Factory::new, - org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.LessThanKeywordsEvaluator.Factory::new - ) { - }; - - public static final ExpressionMapper LESS_THAN_OR_EQUAL = new ComparisonMapper( - org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.LessThanOrEqualIntsEvaluator.Factory::new, - org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.LessThanOrEqualLongsEvaluator.Factory::new, - org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.LessThanOrEqualDoublesEvaluator.Factory::new, - org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.LessThanOrEqualKeywordsEvaluator.Factory::new - ) { - }; - - private final TriFunction ints; - private final TriFunction longs; - private final TriFunction doubles; - private final TriFunction keywords; - private final TriFunction bools; - private final EvaluatorFunctionWithType geometries; - - @FunctionalInterface - private interface EvaluatorFunctionWithType { - ExpressionEvaluator.Factory apply(Source s, ExpressionEvaluator.Factory t, ExpressionEvaluator.Factory u, T dataType); - } - - private ComparisonMapper( - TriFunction ints, - TriFunction longs, - TriFunction doubles, - TriFunction keywords, - TriFunction bools, - EvaluatorFunctionWithType geometries - ) { - this.ints = ints; - this.longs = longs; - this.doubles = doubles; - this.keywords = keywords; - this.bools = bools; - this.geometries = geometries; - } - - private ComparisonMapper( - TriFunction ints, - TriFunction longs, - TriFunction doubles, - TriFunction keywords, - TriFunction bools - ) { - this.ints = ints; - this.longs = longs; - this.doubles = doubles; - this.keywords = keywords; - this.bools = bools; - this.geometries = (source, lhs, rhs, dataType) -> { throw EsqlIllegalArgumentException.illegalDataType(dataType); }; - } - - ComparisonMapper( - TriFunction ints, - TriFunction longs, - TriFunction doubles, - TriFunction keywords - ) { - this.ints = ints; - this.longs = longs; - this.doubles = doubles; - this.keywords = keywords; - this.bools = (source, lhs, rhs) -> { throw EsqlIllegalArgumentException.illegalDataType(DataTypes.BOOLEAN); }; - this.geometries = (source, lhs, rhs, dataType) -> { throw EsqlIllegalArgumentException.illegalDataType(dataType); }; - } - - @Override - public final ExpressionEvaluator.Factory map(BinaryComparison bc, Layout layout) { - DataType leftType = bc.left().dataType(); - if (leftType.isNumeric()) { - DataType type = EsqlDataTypeRegistry.INSTANCE.commonType(leftType, bc.right().dataType()); - if (type == DataTypes.INTEGER) { - return castToEvaluator(bc, layout, DataTypes.INTEGER, ints); - } - if (type == DataTypes.LONG) { - return castToEvaluator(bc, layout, DataTypes.LONG, longs); - } - if (type == DataTypes.DOUBLE) { - return castToEvaluator(bc, layout, DataTypes.DOUBLE, doubles); - } - if (type == DataTypes.UNSIGNED_LONG) { - // using the long comparators will work on UL as well - return castToEvaluator(bc, layout, DataTypes.UNSIGNED_LONG, longs); - } - } - var leftEval = toEvaluator(bc.left(), layout); - var rightEval = toEvaluator(bc.right(), layout); - if (leftType == DataTypes.KEYWORD || leftType == DataTypes.TEXT || leftType == DataTypes.IP || leftType == DataTypes.VERSION) { - return keywords.apply(bc.source(), leftEval, rightEval); - } - if (leftType == DataTypes.BOOLEAN) { - return bools.apply(bc.source(), leftEval, rightEval); - } - if (leftType == DataTypes.DATETIME) { - return longs.apply(bc.source(), leftEval, rightEval); - } - if (EsqlDataTypes.isSpatial(leftType)) { - return geometries.apply(bc.source(), leftEval, rightEval, leftType); - } - throw new EsqlIllegalArgumentException("resolved type for [" + bc + "] but didn't implement mapping"); - } - - public static ExpressionEvaluator.Factory castToEvaluator( - BinaryOperator op, - Layout layout, - DataType required, - TriFunction factory - ) { - var lhs = Cast.cast(op.source(), op.left().dataType(), required, toEvaluator(op.left(), layout)); - var rhs = Cast.cast(op.source(), op.right().dataType(), required, toEvaluator(op.right(), layout)); - return factory.apply(op.source(), lhs, rhs); - } -} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/InMapper.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/InMapper.java index 7b4e867adad91..cea88d3598c2f 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/InMapper.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/InMapper.java @@ -16,6 +16,7 @@ import org.elasticsearch.compute.operator.EvalOperator; import org.elasticsearch.compute.operator.EvalOperator.ExpressionEvaluator; import org.elasticsearch.core.Releasables; +import org.elasticsearch.xpack.esql.evaluator.EvalMapper; import org.elasticsearch.xpack.esql.evaluator.mapper.ExpressionMapper; import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.In; import org.elasticsearch.xpack.esql.planner.Layout; @@ -24,8 +25,6 @@ import java.util.BitSet; import java.util.List; -import static org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.ComparisonMapper.EQUALS; - public class InMapper extends ExpressionMapper { public static final InMapper IN_MAPPER = new InMapper(); @@ -38,7 +37,7 @@ public ExpressionEvaluator.Factory map(In in, Layout layout) { List listEvaluators = new ArrayList<>(in.list().size()); in.list().forEach(e -> { Equals eq = new Equals(in.source(), in.value(), e); - ExpressionEvaluator.Factory eqEvaluator = ((ExpressionMapper) EQUALS).map(eq, layout); + ExpressionEvaluator.Factory eqEvaluator = EvalMapper.toEvaluator(eq, layout); listEvaluators.add(eqEvaluator); }); return dvrCtx -> new InExpressionEvaluator(dvrCtx, listEvaluators.stream().map(fac -> fac.get(dvrCtx)).toList()); From 7eae95620b41c8c42a647b059b096703b4d510f4 Mon Sep 17 00:00:00 2001 From: Brian Seeders Date: Mon, 29 Apr 2024 11:15:43 -0400 Subject: [PATCH 08/19] [ci] Move multi-node tests from check part2 to part5 (#107553) --- .buildkite/pipelines/intake.template.yml | 8 ++++++++ .buildkite/pipelines/intake.yml | 8 ++++++++ .buildkite/pipelines/lucene-snapshot/run-tests.yml | 8 ++++++++ .buildkite/pipelines/periodic-platform-support.yml | 2 ++ .buildkite/pipelines/periodic.template.yml | 2 ++ .buildkite/pipelines/periodic.yml | 2 ++ .buildkite/pipelines/pull-request/part-5-arm.yml | 13 +++++++++++++ .buildkite/pipelines/pull-request/part-5-fips.yml | 11 +++++++++++ .../pipelines/pull-request/part-5-windows.yml | 14 ++++++++++++++ .buildkite/pipelines/pull-request/part-5.yml | 11 +++++++++++ build.gradle | 2 ++ 11 files changed, 81 insertions(+) create mode 100644 .buildkite/pipelines/pull-request/part-5-arm.yml create mode 100644 .buildkite/pipelines/pull-request/part-5-fips.yml create mode 100644 .buildkite/pipelines/pull-request/part-5-windows.yml create mode 100644 .buildkite/pipelines/pull-request/part-5.yml diff --git a/.buildkite/pipelines/intake.template.yml b/.buildkite/pipelines/intake.template.yml index 66b989d94455c..f530f237113a9 100644 --- a/.buildkite/pipelines/intake.template.yml +++ b/.buildkite/pipelines/intake.template.yml @@ -40,6 +40,14 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: n1-standard-32 buildDirectory: /dev/shm/bk + - label: part5 + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-task-input-files checkPart5 + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: n1-standard-32 + buildDirectory: /dev/shm/bk - group: bwc-snapshots steps: - label: "{{matrix.BWC_VERSION}} / bwc-snapshots" diff --git a/.buildkite/pipelines/intake.yml b/.buildkite/pipelines/intake.yml index 8103b40cbaff0..b1f05ea23da4c 100644 --- a/.buildkite/pipelines/intake.yml +++ b/.buildkite/pipelines/intake.yml @@ -41,6 +41,14 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: n1-standard-32 buildDirectory: /dev/shm/bk + - label: part5 + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-task-input-files checkPart5 + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: n1-standard-32 + buildDirectory: /dev/shm/bk - group: bwc-snapshots steps: - label: "{{matrix.BWC_VERSION}} / bwc-snapshots" diff --git a/.buildkite/pipelines/lucene-snapshot/run-tests.yml b/.buildkite/pipelines/lucene-snapshot/run-tests.yml index a5d3c4e5f7935..c76c54a56494e 100644 --- a/.buildkite/pipelines/lucene-snapshot/run-tests.yml +++ b/.buildkite/pipelines/lucene-snapshot/run-tests.yml @@ -40,6 +40,14 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: custom-32-98304 buildDirectory: /dev/shm/bk + - label: part5 + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-task-input-files checkPart5 + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk - group: bwc-snapshots steps: - label: "{{matrix.BWC_VERSION}} / bwc-snapshots" diff --git a/.buildkite/pipelines/periodic-platform-support.yml b/.buildkite/pipelines/periodic-platform-support.yml index f454d20fc542e..d8c5d55fc7e4f 100644 --- a/.buildkite/pipelines/periodic-platform-support.yml +++ b/.buildkite/pipelines/periodic-platform-support.yml @@ -48,6 +48,7 @@ steps: - checkPart2 - checkPart3 - checkPart4 + - checkPart5 - checkRestCompat agents: provider: gcp @@ -72,6 +73,7 @@ steps: - checkPart2 - checkPart3 - checkPart4 + - checkPart5 - checkRestCompat agents: provider: aws diff --git a/.buildkite/pipelines/periodic.template.yml b/.buildkite/pipelines/periodic.template.yml index b102208dd7cce..fda4315926b6b 100644 --- a/.buildkite/pipelines/periodic.template.yml +++ b/.buildkite/pipelines/periodic.template.yml @@ -50,6 +50,7 @@ steps: - checkPart2 - checkPart3 - checkPart4 + - checkPart5 - checkRestCompat agents: provider: gcp @@ -92,6 +93,7 @@ steps: - checkPart2 - checkPart3 - checkPart4 + - checkPart5 - checkRestCompat agents: provider: gcp diff --git a/.buildkite/pipelines/periodic.yml b/.buildkite/pipelines/periodic.yml index 3748f4941420e..fa37d37d9de9a 100644 --- a/.buildkite/pipelines/periodic.yml +++ b/.buildkite/pipelines/periodic.yml @@ -391,6 +391,7 @@ steps: - checkPart2 - checkPart3 - checkPart4 + - checkPart5 - checkRestCompat agents: provider: gcp @@ -433,6 +434,7 @@ steps: - checkPart2 - checkPart3 - checkPart4 + - checkPart5 - checkRestCompat agents: provider: gcp diff --git a/.buildkite/pipelines/pull-request/part-5-arm.yml b/.buildkite/pipelines/pull-request/part-5-arm.yml new file mode 100644 index 0000000000000..7bc3a6157155b --- /dev/null +++ b/.buildkite/pipelines/pull-request/part-5-arm.yml @@ -0,0 +1,13 @@ +config: + allow-labels: "test-arm" +steps: + - label: part-5-arm + command: .ci/scripts/run-gradle.sh -Dignore.tests.seed checkPart5 + timeout_in_minutes: 300 + agents: + provider: aws + imagePrefix: elasticsearch-ubuntu-2004-aarch64 + instanceType: m6g.8xlarge + diskSizeGb: 350 + diskType: gp3 + diskName: /dev/sda1 diff --git a/.buildkite/pipelines/pull-request/part-5-fips.yml b/.buildkite/pipelines/pull-request/part-5-fips.yml new file mode 100644 index 0000000000000..4e193ac751086 --- /dev/null +++ b/.buildkite/pipelines/pull-request/part-5-fips.yml @@ -0,0 +1,11 @@ +config: + allow-labels: "Team:Security" +steps: + - label: part-5-fips + command: .ci/scripts/run-gradle.sh -Dignore.tests.seed -Dtests.fips.enabled=true checkPart5 + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk diff --git a/.buildkite/pipelines/pull-request/part-5-windows.yml b/.buildkite/pipelines/pull-request/part-5-windows.yml new file mode 100644 index 0000000000000..4e16a8ef73238 --- /dev/null +++ b/.buildkite/pipelines/pull-request/part-5-windows.yml @@ -0,0 +1,14 @@ +config: + allow-labels: "test-windows" +steps: + - label: part-5-windows + command: .\.buildkite\scripts\run-script.ps1 bash .buildkite/scripts/windows-run-gradle.sh + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-windows-2022 + machineType: custom-32-98304 + diskType: pd-ssd + diskSizeGb: 350 + env: + GRADLE_TASK: checkPart5 diff --git a/.buildkite/pipelines/pull-request/part-5.yml b/.buildkite/pipelines/pull-request/part-5.yml new file mode 100644 index 0000000000000..306ce7533d0ed --- /dev/null +++ b/.buildkite/pipelines/pull-request/part-5.yml @@ -0,0 +1,11 @@ +config: + skip-target-branches: "7.17" +steps: + - label: part-5 + command: .ci/scripts/run-gradle.sh -Dignore.tests.seed checkPart5 + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk diff --git a/build.gradle b/build.gradle index 16c6fce28fe4b..1d9757f32543d 100644 --- a/build.gradle +++ b/build.gradle @@ -287,6 +287,8 @@ allprojects { tasks.register('checkPart4') { dependsOn 'check' } } else if (project.path == ":x-pack:plugin" || project.path.contains("ql") || project.path.contains("smoke-test")) { tasks.register('checkPart3') { dependsOn 'check' } + } else if (project.path.contains("multi-node")) { + tasks.register('checkPart5') { dependsOn 'check' } } else { tasks.register('checkPart2') { dependsOn 'check' } } From d6357452dc8b7a663c1eb1371a16949bcc7875b8 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 29 Apr 2024 11:35:24 -0400 Subject: [PATCH 09/19] ESQL: Add key names to description of hash lookup (#108012) When I was debugging further work on our new hash stuff I found myself really wanting the key names on the `description` and `toString` of the hash lookup operator. This adds it. --- .../compute/operator/HashLookupOperator.java | 52 +++++++++---------- .../elasticsearch/compute/OperatorTests.java | 8 ++- .../operator/HashLookupOperatorTests.java | 11 ++-- 3 files changed, 40 insertions(+), 31 deletions(-) diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/HashLookupOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/HashLookupOperator.java index 2b77003f11a4f..f821f2a37d1cf 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/HashLookupOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/HashLookupOperator.java @@ -23,19 +23,26 @@ import java.util.List; public class HashLookupOperator extends AbstractPageMappingToIteratorOperator { + public record Key(String name, Block block) { + @Override + public String toString() { + return "{name=" + + name + + ", type=" + + block.elementType() + + ", positions=" + + block.getPositionCount() + + ", size=" + + ByteSizeValue.ofBytes(block.ramBytesUsed()) + + "}"; + } + } + /** * Factory for {@link HashLookupOperator}. It's received {@link Block}s * are never closed, so we need to build them from a non-tracking factory. */ - public static class Factory implements Operator.OperatorFactory { - private final Block[] keys; - private final int[] blockMapping; - - public Factory(Block[] keys, int[] blockMapping) { - this.keys = keys; - this.blockMapping = blockMapping; - } - + public record Factory(Key[] keys, int[] blockMapping) implements Operator.OperatorFactory { @Override public Operator get(DriverContext driverContext) { return new HashLookupOperator(driverContext.blockFactory(), keys, blockMapping); @@ -43,30 +50,23 @@ public Operator get(DriverContext driverContext) { @Override public String describe() { - StringBuilder b = new StringBuilder(); - b.append("HashLookup[keys=["); - for (int k = 0; k < keys.length; k++) { - Block key = keys[k]; - if (k != 0) { - b.append(", "); - } - b.append("{type=").append(key.elementType()); - b.append(", positions=").append(key.getPositionCount()); - b.append(", size=").append(ByteSizeValue.ofBytes(key.ramBytesUsed())).append("}"); - } - b.append("], mapping=").append(Arrays.toString(blockMapping)).append("]"); - return b.toString(); + return "HashLookup[keys=" + Arrays.toString(keys) + ", mapping=" + Arrays.toString(blockMapping) + "]"; } } + private final List keys; private final BlockHash hash; private final int[] blockMapping; - public HashLookupOperator(BlockFactory blockFactory, Block[] keys, int[] blockMapping) { + public HashLookupOperator(BlockFactory blockFactory, Key[] keys, int[] blockMapping) { this.blockMapping = blockMapping; + this.keys = new ArrayList<>(keys.length); + Block[] blocks = new Block[keys.length]; List groups = new ArrayList<>(keys.length); for (int k = 0; k < keys.length; k++) { - groups.add(new BlockHash.GroupSpec(k, keys[k].elementType())); + this.keys.add(keys[k].name); + blocks[k] = keys[k].block; + groups.add(new BlockHash.GroupSpec(k, keys[k].block.elementType())); } /* * Force PackedValuesBlockHash because it assigned ordinals in order @@ -83,7 +83,7 @@ public HashLookupOperator(BlockFactory blockFactory, Block[] keys, int[] blockMa boolean success = false; try { final int[] lastOrd = new int[] { -1 }; - hash.add(new Page(keys), new GroupingAggregatorFunction.AddInput() { + hash.add(new Page(blocks), new GroupingAggregatorFunction.AddInput() { @Override public void add(int positionOffset, IntBlock groupIds) { // TODO support multiple rows with the same keys @@ -128,7 +128,7 @@ protected ReleasableIterator receive(Page page) { @Override public String toString() { - return "HashLookup[hash=" + hash + ", mapping=" + Arrays.toString(blockMapping) + "]"; + return "HashLookup[keys=" + keys + ", hash=" + hash + ", mapping=" + Arrays.toString(blockMapping) + "]"; } @Override diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/OperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/OperatorTests.java index 805f26e9ef280..64afb14d22326 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/OperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/OperatorTests.java @@ -370,7 +370,13 @@ public void testHashLookup() { var driver = new Driver( driverContext, new SequenceLongBlockSourceOperator(driverContext.blockFactory(), values, 100), - List.of(new HashLookupOperator(driverContext.blockFactory(), new Block[] { primesBlock }, new int[] { 0 })), + List.of( + new HashLookupOperator( + driverContext.blockFactory(), + new HashLookupOperator.Key[] { new HashLookupOperator.Key("primes", primesBlock) }, + new int[] { 0 } + ) + ), new PageConsumerOperator(page -> { try { BlockTestUtils.readInto(actualValues, page.getBlock(0)); diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/HashLookupOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/HashLookupOperatorTests.java index ec69297718237..e2af5e82f0d46 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/HashLookupOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/HashLookupOperatorTests.java @@ -7,7 +7,6 @@ package org.elasticsearch.compute.operator; -import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.Page; import org.elasticsearch.compute.data.TestBlockFactory; @@ -31,18 +30,22 @@ protected void assertSimpleOutput(List input, List results) { @Override protected Operator.OperatorFactory simple() { return new HashLookupOperator.Factory( - new Block[] { TestBlockFactory.getNonBreakingInstance().newLongArrayVector(new long[] { 7, 14, 20 }, 3).asBlock() }, + new HashLookupOperator.Key[] { + new HashLookupOperator.Key( + "foo", + TestBlockFactory.getNonBreakingInstance().newLongArrayVector(new long[] { 7, 14, 20 }, 3).asBlock() + ) }, new int[] { 0 } ); } @Override protected String expectedDescriptionOfSimple() { - return "HashLookup[keys=[{type=LONG, positions=3, size=96b}], mapping=[0]]"; + return "HashLookup[keys=[{name=foo, type=LONG, positions=3, size=96b}], mapping=[0]]"; } @Override protected String expectedToStringOfSimple() { - return "HashLookup[hash=PackedValuesBlockHash{groups=[0:LONG], entries=3, size=536b}, mapping=[0]]"; + return "HashLookup[keys=[foo], hash=PackedValuesBlockHash{groups=[0:LONG], entries=3, size=536b}, mapping=[0]]"; } } From 3e16d0ae559b9bc370fa45da5748bad69f125609 Mon Sep 17 00:00:00 2001 From: Pat Whelan Date: Mon, 29 Apr 2024 11:46:13 -0400 Subject: [PATCH 10/19] [Transform] Remove remote cluster from local test (#108014) This test can fail when we randomly add remote clusters. We would need to fix our assertion logic, but `testDisablePitWhenThereIsRemoteIndexInSource` already covers the test case when there is a remote cluster and pit disabled. --- .../transform/transforms/ClientTransformIndexerTests.java | 5 ----- 1 file changed, 5 deletions(-) diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexerTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexerTests.java index 04263277d6615..062c951f67c96 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexerTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexerTests.java @@ -308,11 +308,6 @@ public void testPitInjectionIfPitNotSupported() throws InterruptedException { public void testDisablePit() throws InterruptedException { TransformConfig.Builder configBuilder = new TransformConfig.Builder(TransformConfigTests.randomTransformConfig()); - if (randomBoolean()) { - // TransformConfigTests.randomTransformConfig never produces remote indices in the source. - // We need to explicitly set the remote index here for coverage. - configBuilder.setSource(new SourceConfig("remote-cluster:remote-index")); - } TransformConfig config = configBuilder.build(); boolean pitEnabled = TransformEffectiveSettings.isPitDisabled(config.getSettings()) == false; From 67630917a58c4f5771a47b58338df0afbec547d0 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 29 Apr 2024 12:35:23 -0400 Subject: [PATCH 11/19] ESQL: Add ColumnLookupOperator (#108020) This adds an operator ot lookup values from fixed columns which will be useful in making inline LOOKUP operations. --- .../compute/operator/ColumnLoadOperator.java | 74 ++++++++++++++++ .../operator/ColumnLoadOperatorTests.java | 84 +++++++++++++++++++ .../operator/HashLookupOperatorTests.java | 48 ++++++++++- .../compute/operator/OperatorTestCase.java | 6 +- 4 files changed, 206 insertions(+), 6 deletions(-) create mode 100644 x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/ColumnLoadOperator.java create mode 100644 x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/ColumnLoadOperatorTests.java diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/ColumnLoadOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/ColumnLoadOperator.java new file mode 100644 index 0000000000000..4e06c1f0f4b69 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/ColumnLoadOperator.java @@ -0,0 +1,74 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.operator; + +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.core.ReleasableIterator; + +/** + * {@link Block#lookup Looks up} values from a provided {@link Block} and + * mergeds them into each {@link Page}. + */ +public class ColumnLoadOperator extends AbstractPageMappingToIteratorOperator { + public record Values(String name, Block block) { + @Override + public String toString() { + return name + ":" + block.elementType(); + } + } + + /** + * Factory for {@link ColumnLoadOperator}. It's received {@link Block}s + * are never closed, so we need to build them from a non-tracking factory. + */ + public record Factory(Values values, int positionsOrd) implements OperatorFactory { + @Override + public Operator get(DriverContext driverContext) { + return new ColumnLoadOperator(values, positionsOrd); + } + + @Override + public String describe() { + return "ColumnLoad[values=" + values + ", positions=" + positionsOrd + "]"; + } + } + + private final Values values; + private final int positionsOrd; + + public ColumnLoadOperator(Values values, int positionsOrd) { + this.values = values; + this.positionsOrd = positionsOrd; + } + + /** + * The target size of each loaded block. + * TODO target the size more intelligently + */ + static final ByteSizeValue TARGET_BLOCK_SIZE = ByteSizeValue.ofKb(10); + + @Override + protected ReleasableIterator receive(Page page) { + // TODO tracking is complex for values + /* + * values is likely shared across many threads so tracking it is complex. + * Lookup will incRef it on the way in and decrement the ref on the way + * out but it's not really clear what the right way to get all that thread + * safe is. For now we can ignore this because we're not actually tracking + * the memory of the block. + */ + return appendBlocks(page, values.block.lookup(page.getBlock(positionsOrd), TARGET_BLOCK_SIZE)); + } + + @Override + public String toString() { + return "ColumnLoad[values=" + values + ", positions=" + positionsOrd + "]"; + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/ColumnLoadOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/ColumnLoadOperatorTests.java new file mode 100644 index 0000000000000..c606e4fd4c736 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/ColumnLoadOperatorTests.java @@ -0,0 +1,84 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.operator; + +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.data.TestBlockFactory; + +import java.util.List; +import java.util.stream.IntStream; + +import static org.hamcrest.Matchers.equalTo; + +public class ColumnLoadOperatorTests extends OperatorTestCase { + @Override + protected SourceOperator simpleInput(BlockFactory blockFactory, int size) { + return new SequenceIntBlockSourceOperator(blockFactory, IntStream.range(0, size).map(l -> between(0, 4))); + } + + @Override + protected void assertSimpleOutput(List input, List results) { + int count = input.stream().mapToInt(Page::getPositionCount).sum(); + assertThat(results.stream().mapToInt(Page::getPositionCount).sum(), equalTo(count)); + int keysIdx = 0; + int loadedIdx = 0; + IntBlock keys = null; + int keysOffset = 0; + LongBlock loaded = null; + int loadedOffset = 0; + int p = 0; + while (p < count) { + if (keys == null) { + keys = input.get(keysIdx++).getBlock(0); + } + if (loaded == null) { + loaded = results.get(loadedIdx++).getBlock(1); + } + int valueCount = keys.getValueCount(p - keysOffset); + assertThat(loaded.getValueCount(p - loadedOffset), equalTo(valueCount)); + int keysStart = keys.getFirstValueIndex(p - keysOffset); + int loadedStart = loaded.getFirstValueIndex(p - loadedOffset); + for (int k = keysStart, l = loadedStart; k < keysStart + valueCount; k++, l++) { + assertThat(loaded.getLong(l), equalTo(3L * keys.getInt(k))); + } + p++; + if (p - keysOffset == keys.getPositionCount()) { + keysOffset += keys.getPositionCount(); + keys = null; + } + if (p - loadedOffset == loaded.getPositionCount()) { + loadedOffset += loaded.getPositionCount(); + loaded = null; + } + } + } + + @Override + protected Operator.OperatorFactory simple() { + return new ColumnLoadOperator.Factory( + new ColumnLoadOperator.Values( + "values", + TestBlockFactory.getNonBreakingInstance().newLongArrayVector(new long[] { 0, 3, 6, 9, 12 }, 5).asBlock() + ), + 0 + ); + } + + @Override + protected String expectedDescriptionOfSimple() { + return "ColumnLoad[values=values:LONG, positions=0]"; + } + + @Override + protected String expectedToStringOfSimple() { + return expectedDescriptionOfSimple(); + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/HashLookupOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/HashLookupOperatorTests.java index e2af5e82f0d46..711800197aa03 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/HashLookupOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/HashLookupOperatorTests.java @@ -8,6 +8,8 @@ package org.elasticsearch.compute.operator; import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.LongBlock; import org.elasticsearch.compute.data.Page; import org.elasticsearch.compute.data.TestBlockFactory; @@ -24,7 +26,45 @@ protected SourceOperator simpleInput(BlockFactory blockFactory, int size) { @Override protected void assertSimpleOutput(List input, List results) { - assertThat(results.stream().mapToInt(Page::getPositionCount).sum(), equalTo(input.stream().mapToInt(Page::getPositionCount).sum())); + int count = input.stream().mapToInt(Page::getPositionCount).sum(); + assertThat(results.stream().mapToInt(Page::getPositionCount).sum(), equalTo(count)); + int keysIdx = 0; + int ordsIdx = 0; + LongBlock keys = null; + int keysOffset = 0; + IntBlock ords = null; + int ordsOffset = 0; + int p = 0; + while (p < count) { + if (keys == null) { + keys = input.get(keysIdx++).getBlock(0); + } + if (ords == null) { + ords = results.get(ordsIdx++).getBlock(1); + } + int valueCount = keys.getValueCount(p - keysOffset); + assertThat(ords.getValueCount(p - ordsOffset), equalTo(valueCount)); + int keysStart = keys.getFirstValueIndex(p - keysOffset); + int ordsStart = ords.getFirstValueIndex(p - ordsOffset); + for (int k = keysStart, l = ordsStart; k < keysStart + valueCount; k++, l++) { + assertThat(ords.getInt(l), equalTo(switch ((int) keys.getLong(k)) { + case 1 -> 0; + case 7 -> 1; + case 14 -> 2; + case 20 -> 3; + default -> null; + })); + } + p++; + if (p - keysOffset == keys.getPositionCount()) { + keysOffset += keys.getPositionCount(); + keys = null; + } + if (p - ordsOffset == ords.getPositionCount()) { + ordsOffset += ords.getPositionCount(); + ords = null; + } + } } @Override @@ -33,7 +73,7 @@ protected Operator.OperatorFactory simple() { new HashLookupOperator.Key[] { new HashLookupOperator.Key( "foo", - TestBlockFactory.getNonBreakingInstance().newLongArrayVector(new long[] { 7, 14, 20 }, 3).asBlock() + TestBlockFactory.getNonBreakingInstance().newLongArrayVector(new long[] { 1, 7, 14, 20 }, 4).asBlock() ) }, new int[] { 0 } ); @@ -41,11 +81,11 @@ protected Operator.OperatorFactory simple() { @Override protected String expectedDescriptionOfSimple() { - return "HashLookup[keys=[{name=foo, type=LONG, positions=3, size=96b}], mapping=[0]]"; + return "HashLookup[keys=[{name=foo, type=LONG, positions=4, size=104b}], mapping=[0]]"; } @Override protected String expectedToStringOfSimple() { - return "HashLookup[keys=[foo], hash=PackedValuesBlockHash{groups=[0:LONG], entries=3, size=536b}, mapping=[0]]"; + return "HashLookup[keys=[foo], hash=PackedValuesBlockHash{groups=[0:LONG], entries=4, size=544b}, mapping=[0]]"; } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/OperatorTestCase.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/OperatorTestCase.java index eebcbc091d3ea..be792a0ef2612 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/OperatorTestCase.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/OperatorTestCase.java @@ -23,6 +23,7 @@ import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.BlockTestUtils; +import org.elasticsearch.compute.data.MockBlockFactory; import org.elasticsearch.compute.data.Page; import org.elasticsearch.compute.data.TestBlockFactory; import org.elasticsearch.core.Releasables; @@ -112,7 +113,7 @@ public final void testSimpleCircuitBreaking() { private void runWithLimit(Operator.OperatorFactory factory, List input, ByteSizeValue limit) { BigArrays bigArrays = new MockBigArrays(PageCacheRecycler.NON_RECYCLING_INSTANCE, limit).withCircuitBreaking(); CircuitBreaker breaker = bigArrays.breakerService().getBreaker(CircuitBreaker.REQUEST); - BlockFactory blockFactory = BlockFactory.getInstance(breaker, bigArrays); + MockBlockFactory blockFactory = new MockBlockFactory(breaker, bigArrays); DriverContext driverContext = new DriverContext(bigArrays, blockFactory); List localInput = CannedSourceOperator.deepCopyOf(blockFactory, input); boolean driverStarted = false; @@ -125,7 +126,8 @@ private void runWithLimit(Operator.OperatorFactory factory, List input, By // if drive hasn't even started then we need to release the input pages manually Releasables.closeExpectNoException(Releasables.wrap(() -> Iterators.map(localInput.iterator(), p -> p::releaseBlocks))); } - assertThat(bigArrays.breakerService().getBreaker(CircuitBreaker.REQUEST).getUsed(), equalTo(0L)); + blockFactory.ensureAllBlocksAreReleased(); + assertThat(breaker.getUsed(), equalTo(0L)); } } From b6ccb842cabd0d5b518c6bb6ab1997f1ab1f3152 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Mon, 29 Apr 2024 12:25:37 -0700 Subject: [PATCH 12/19] Widen numeric counter types (#108036) We should widen the numeric root types before converting them into counter types. Relates #107877 --- .../src/main/resources/tsdb-mapping.json | 4 ++++ .../xpack/esql/type/EsqlDataTypes.java | 7 ++++++- .../xpack/esql/analysis/AnalyzerTests.java | 20 +++++++++++++++++++ 3 files changed, 30 insertions(+), 1 deletion(-) diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/tsdb-mapping.json b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/tsdb-mapping.json index c3bba9724602b..dd4073d5dc7cf 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/tsdb-mapping.json +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/tsdb-mapping.json @@ -23,6 +23,10 @@ "bytes_out": { "type": "long", "time_series_metric": "counter" + }, + "message_in": { + "type": "float", + "time_series_metric": "counter" } } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypes.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypes.java index 912c17dae0865..44f6844544698 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypes.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypes.java @@ -249,7 +249,12 @@ public static DataType widenSmallNumericTypes(DataType type) { } public static DataType getCounterType(String typeName) { - return fromTypeName("counter_" + typeName); + final DataType rootType = widenSmallNumericTypes(fromName(typeName)); + if (rootType == UNSUPPORTED) { + return rootType; + } + assert rootType == LONG || rootType == INTEGER || rootType == DOUBLE : rootType; + return fromTypeName("counter_" + rootType.typeName()); } public static boolean isCounterType(DataType dt) { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java index 1f32a5a76f3e8..3757720cc203a 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java @@ -35,6 +35,7 @@ import org.elasticsearch.xpack.esql.plugin.EsqlPlugin; import org.elasticsearch.xpack.esql.session.EsqlIndexResolver; import org.elasticsearch.xpack.esql.type.EsqlDataTypeRegistry; +import org.elasticsearch.xpack.esql.type.EsqlDataTypes; import org.elasticsearch.xpack.ql.expression.Alias; import org.elasticsearch.xpack.ql.expression.Attribute; import org.elasticsearch.xpack.ql.expression.Expressions; @@ -59,6 +60,8 @@ import java.util.ArrayList; import java.util.List; import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; import java.util.stream.IntStream; import static org.elasticsearch.xpack.esql.EsqlTestUtils.TEST_VERIFIER; @@ -69,6 +72,7 @@ import static org.elasticsearch.xpack.esql.analysis.AnalyzerTestUtils.analyze; import static org.elasticsearch.xpack.esql.analysis.AnalyzerTestUtils.analyzer; import static org.elasticsearch.xpack.esql.analysis.AnalyzerTestUtils.loadMapping; +import static org.elasticsearch.xpack.esql.analysis.AnalyzerTestUtils.tsdbIndexResolution; import static org.elasticsearch.xpack.ql.tree.Source.EMPTY; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsString; @@ -1626,6 +1630,22 @@ public void testChainedEvalFieldsUse() { assertProjection(query + " | keep x*", IntStream.range(0, additionalEvals + 3).mapToObj(v -> "x" + v).toArray(String[]::new)); } + public void testCounterTypes() { + var query = "FROM test | KEEP network.* | LIMIT 10"; + Analyzer analyzer = analyzer(tsdbIndexResolution()); + LogicalPlan plan = analyze(query, analyzer); + var limit = as(plan, Limit.class); + var attributes = limit.output().stream().collect(Collectors.toMap(NamedExpression::name, a -> a)); + assertThat( + attributes.keySet(), + equalTo(Set.of("network.connections", "network.bytes_in", "network.bytes_out", "network.message_in")) + ); + assertThat(attributes.get("network.connections").dataType(), equalTo(DataTypes.LONG)); + assertThat(attributes.get("network.bytes_in").dataType(), equalTo(EsqlDataTypes.COUNTER_LONG)); + assertThat(attributes.get("network.bytes_out").dataType(), equalTo(EsqlDataTypes.COUNTER_LONG)); + assertThat(attributes.get("network.message_in").dataType(), equalTo(EsqlDataTypes.COUNTER_DOUBLE)); + } + public void testMissingAttributeException_InChainedEval() { var e = expectThrows(VerificationException.class, () -> analyze(""" from test From 32deb7fa465913aac8fdbb6b6eecc848706f17fd Mon Sep 17 00:00:00 2001 From: Athena Brown Date: Mon, 29 Apr 2024 13:25:47 -0600 Subject: [PATCH 13/19] Decouple API key version from node version (#104156) This commit decouples the version stored in API key documents from the node version, as part of the broader effort to stop relying on node version for product logic. The actual data format does not change - the version is stored as an integer before and after this commit, it's just determined by a manually set `ApiKey.Version` class rather than being derived from the node version. --- .../ComponentVersionsNodesInfoIT.java | 2 +- .../core/src/main/java/module-info.java | 3 ++- .../core/security/action/apikey/ApiKey.java | 24 +++++++++++++++++++ ...n.cluster.node.info.ComponentVersionNumber | 1 + .../xpack/security/authc/ApiKeyService.java | 20 +++++++--------- .../security/authc/ApiKeyServiceTests.java | 17 +++++++------ 6 files changed, 47 insertions(+), 20 deletions(-) diff --git a/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/nodesinfo/ComponentVersionsNodesInfoIT.java b/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/nodesinfo/ComponentVersionsNodesInfoIT.java index 32024ff03ed15..1202f828059f6 100644 --- a/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/nodesinfo/ComponentVersionsNodesInfoIT.java +++ b/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/nodesinfo/ComponentVersionsNodesInfoIT.java @@ -29,7 +29,7 @@ public void testNodesInfoComponentVersions() { assertThat(response.getNodesMap().get(server1NodeId), notNullValue()); assertThat( response.getNodesMap().get(server1NodeId).getComponentVersions().keySet(), - containsInAnyOrder("transform_config_version", "ml_config_version") + containsInAnyOrder("transform_config_version", "ml_config_version", "api_key_version") ); } } diff --git a/x-pack/plugin/core/src/main/java/module-info.java b/x-pack/plugin/core/src/main/java/module-info.java index 77def0fd12459..070df2efc2629 100644 --- a/x-pack/plugin/core/src/main/java/module-info.java +++ b/x-pack/plugin/core/src/main/java/module-info.java @@ -232,7 +232,8 @@ provides org.elasticsearch.action.admin.cluster.node.info.ComponentVersionNumber with org.elasticsearch.xpack.core.ml.MlConfigVersionComponent, - org.elasticsearch.xpack.core.transform.TransformConfigVersionComponent; + org.elasticsearch.xpack.core.transform.TransformConfigVersionComponent, + org.elasticsearch.xpack.core.security.action.apikey.ApiKey.VersionComponent; provides org.elasticsearch.features.FeatureSpecification with org.elasticsearch.xpack.core.XPackFeatures; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/ApiKey.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/ApiKey.java index 57cf816a46072..cee63c16229e0 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/ApiKey.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/ApiKey.java @@ -7,6 +7,8 @@ package org.elasticsearch.xpack.core.security.action.apikey; +import org.elasticsearch.action.admin.cluster.node.info.ComponentVersionNumber; +import org.elasticsearch.common.VersionId; import org.elasticsearch.common.xcontent.XContentParserUtils; import org.elasticsearch.core.Assertions; import org.elasticsearch.core.Nullable; @@ -81,6 +83,28 @@ public String value() { } } + public record Version(int version) implements VersionId { + @Override + public int id() { + return version; + } + } + + public static class VersionComponent implements ComponentVersionNumber { + + @Override + public String componentId() { + return "api_key_version"; + } + + @Override + public VersionId versionNumber() { + return CURRENT_API_KEY_VERSION; + } + } + + public static final ApiKey.Version CURRENT_API_KEY_VERSION = new ApiKey.Version(8_13_00_99); + private final String name; private final String id; private final Type type; diff --git a/x-pack/plugin/core/src/main/resources/META-INF/services/org.elasticsearch.action.admin.cluster.node.info.ComponentVersionNumber b/x-pack/plugin/core/src/main/resources/META-INF/services/org.elasticsearch.action.admin.cluster.node.info.ComponentVersionNumber index 078217faee53a..568483f03f756 100644 --- a/x-pack/plugin/core/src/main/resources/META-INF/services/org.elasticsearch.action.admin.cluster.node.info.ComponentVersionNumber +++ b/x-pack/plugin/core/src/main/resources/META-INF/services/org.elasticsearch.action.admin.cluster.node.info.ComponentVersionNumber @@ -1,2 +1,3 @@ org.elasticsearch.xpack.core.ml.MlConfigVersionComponent org.elasticsearch.xpack.core.transform.TransformConfigVersionComponent +org.elasticsearch.xpack.core.security.action.apikey.ApiKey$VersionComponent diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java index 1a5b1ab39cd83..fa9b53c5af935 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java @@ -14,7 +14,6 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.TransportVersion; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.DocWriteRequest; @@ -415,7 +414,6 @@ private void createApiKeyAndIndexIt( final Instant expiration = getApiKeyExpiration(created, request.getExpiration()); final SecureString apiKey = UUIDs.randomBase64UUIDSecureString(); assert ApiKey.Type.CROSS_CLUSTER != request.getType() || API_KEY_SECRET_LENGTH == apiKey.length(); - final Version version = clusterService.state().nodes().getMinNodeVersion(); computeHashForApiKey(apiKey, listener.delegateFailure((l, apiKeyHashChars) -> { try ( @@ -428,7 +426,7 @@ private void createApiKeyAndIndexIt( expiration, request.getRoleDescriptors(), request.getType(), - version, + ApiKey.CURRENT_API_KEY_VERSION, request.getMetadata() ) ) { @@ -712,7 +710,7 @@ static XContentBuilder newDocument( Instant expiration, List keyRoleDescriptors, ApiKey.Type type, - Version version, + ApiKey.Version version, @Nullable Map metadata ) throws IOException { final XContentBuilder builder = XContentFactory.jsonBuilder(); @@ -727,7 +725,7 @@ static XContentBuilder newDocument( addRoleDescriptors(builder, keyRoleDescriptors); addLimitedByRoleDescriptors(builder, userRoleDescriptors); - builder.field("name", name).field("version", version.id).field("metadata_flattened", metadata); + builder.field("name", name).field("version", version.version()).field("metadata_flattened", metadata); addCreator(builder, authentication); return builder.endObject(); @@ -742,7 +740,7 @@ static XContentBuilder newDocument( static XContentBuilder maybeBuildUpdatedDocument( final String apiKeyId, final ApiKeyDoc currentApiKeyDoc, - final Version targetDocVersion, + final ApiKey.Version targetDocVersion, final Authentication authentication, final BaseUpdateApiKeyRequest request, final Set userRoleDescriptors, @@ -779,7 +777,7 @@ static XContentBuilder maybeBuildUpdatedDocument( addLimitedByRoleDescriptors(builder, userRoleDescriptors); - builder.field("name", currentApiKeyDoc.name).field("version", targetDocVersion.id); + builder.field("name", currentApiKeyDoc.name).field("version", targetDocVersion.version()); assert currentApiKeyDoc.metadataFlattened == null || MetadataUtils.containsReservedMetadata( @@ -807,12 +805,12 @@ static XContentBuilder maybeBuildUpdatedDocument( private static boolean isNoop( final String apiKeyId, final ApiKeyDoc apiKeyDoc, - final Version targetDocVersion, + final ApiKey.Version targetDocVersion, final Authentication authentication, final BaseUpdateApiKeyRequest request, final Set userRoleDescriptors ) throws IOException { - if (apiKeyDoc.version != targetDocVersion.id) { + if (apiKeyDoc.version != targetDocVersion.version()) { return false; } @@ -1468,8 +1466,8 @@ private IndexRequest maybeBuildIndexRequest( currentVersionedDoc.primaryTerm() ); } - final var targetDocVersion = clusterService.state().nodes().getMinNodeVersion(); - final var currentDocVersion = Version.fromId(currentVersionedDoc.doc().version); + final var targetDocVersion = ApiKey.CURRENT_API_KEY_VERSION; + final var currentDocVersion = new ApiKey.Version(currentVersionedDoc.doc().version); assert currentDocVersion.onOrBefore(targetDocVersion) : "current API key doc version must be on or before target version"; if (logger.isDebugEnabled() && currentDocVersion.before(targetDocVersion)) { logger.debug( diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java index 0cb7a270099ad..b3ec3ef117c3e 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java @@ -67,7 +67,6 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.MockLogAppender; import org.elasticsearch.test.TransportVersionUtils; -import org.elasticsearch.test.VersionUtils; import org.elasticsearch.test.XContentTestUtils; import org.elasticsearch.threadpool.FixedExecutorBuilder; import org.elasticsearch.threadpool.TestThreadPool; @@ -1161,7 +1160,7 @@ private static Tuple, Map> newApiKeyDocument Instant.now().plus(expiry), keyRoles, type, - Version.CURRENT, + ApiKey.CURRENT_API_KEY_VERSION, metadataMap ); Map keyMap = XContentHelper.convertToMap(BytesReference.bytes(docSource), true, XContentType.JSON).v2(); @@ -2368,7 +2367,7 @@ public void testMaybeBuildUpdatedDocument() throws IOException { final long now = randomMillisUpToYear9999(); when(clock.instant()).thenReturn(Instant.ofEpochMilli(now)); final Map oldMetadata = ApiKeyTests.randomMetadata(); - final Version oldVersion = VersionUtils.randomVersion(random()); + final ApiKey.Version oldVersion = new ApiKey.Version(randomIntBetween(1, ApiKey.CURRENT_API_KEY_VERSION.version())); final ApiKeyDoc oldApiKeyDoc = ApiKeyDoc.fromXContent( XContentHelper.createParser( XContentParserConfiguration.EMPTY, @@ -2419,8 +2418,8 @@ public void testMaybeBuildUpdatedDocument() throws IOException { final Map newMetadata = changeMetadata ? randomValueOtherThanMany(md -> md == null || md.equals(oldMetadata), ApiKeyTests::randomMetadata) : (randomBoolean() ? oldMetadata : null); - final Version newVersion = changeVersion - ? randomValueOtherThan(oldVersion, () -> VersionUtils.randomVersion(random())) + final ApiKey.Version newVersion = changeVersion + ? randomValueOtherThan(oldVersion, ApiKeyServiceTests::randomApiKeyVersion) : oldVersion; final Authentication newAuthentication = changeCreator ? randomValueOtherThanMany( @@ -2468,7 +2467,7 @@ public void testMaybeBuildUpdatedDocument() throws IOException { assertEquals(oldApiKeyDoc.hash, updatedApiKeyDoc.hash); assertEquals(oldApiKeyDoc.creationTime, updatedApiKeyDoc.creationTime); assertEquals(oldApiKeyDoc.invalidated, updatedApiKeyDoc.invalidated); - assertEquals(newVersion.id, updatedApiKeyDoc.version); + assertEquals(newVersion.version(), updatedApiKeyDoc.version); final var actualUserRoles = service.parseRoleDescriptorsBytes( "", updatedApiKeyDoc.limitedByRoleDescriptorsBytes, @@ -2991,7 +2990,7 @@ public static Authentication createApiKeyAuthentication( Instant.now().plus(Duration.ofSeconds(3600)), keyRoles, ApiKey.Type.REST, - Version.CURRENT, + ApiKey.CURRENT_API_KEY_VERSION, randomBoolean() ? null : Map.of(randomAlphaOfLengthBetween(3, 8), randomAlphaOfLengthBetween(3, 8)) ); final ApiKeyDoc apiKeyDoc = ApiKeyDoc.fromXContent( @@ -3207,4 +3206,8 @@ private static Authenticator.Context getAuthenticatorContext(ThreadContext threa mock(Realms.class) ); } + + private static ApiKey.Version randomApiKeyVersion() { + return new ApiKey.Version(randomIntBetween(1, ApiKey.CURRENT_API_KEY_VERSION.version())); + } } From a451511e3a127e0c9f2b8c91d67a7d2c4e66c44f Mon Sep 17 00:00:00 2001 From: Michael Peterson Date: Mon, 29 Apr 2024 15:53:47 -0400 Subject: [PATCH 14/19] Change skip_unavailable default value to true (#105792) In order to improve the experience of cross-cluster search, we are changing the default value of the remote cluster `skip_unavailable` setting from `false` to `true`. This setting causes any cross-cluster _search (or _async_search) to entirely fail when any remote cluster with `skip_unavailable=false` is either unavailable (connection to it fails) or if the search on it fails on all shards. Setting `skip_unavailable=true` allows partial results from other clusters to be returned. In that case, the search response cluster metadata will show a `skipped` status, so the user can see that no data came in from that cluster. Kibana also now leverages this metadata in the cross-cluster search responses to allow users to see how many clusters returned data and drill down into which clusters did not (including failure messages). Currently, the user/admin has to specifically set the value to `true` in the configs, like so: ``` cluster: remote: remote1: seeds: 10.10.10.10:9300 skip_unavailable: true ``` even though that is probably what search admins want in the vast majority of cases. Setting `skip_unavailable=false` should be a conscious (and probably rare) choice by an Elasticsearch admin that a particular cluster's results are so essential to a search (or visualization in dashboard or Discover panel) that no results at all should be shown if it cannot return any results. --- docs/changelog/105792.yaml | 18 ++++ docs/reference/ccr/getting-started.asciidoc | 2 +- .../cluster/remote-clusters-connect.asciidoc | 18 ++-- .../cluster/remote-clusters-settings.asciidoc | 17 +++- .../search-across-clusters.asciidoc | 54 ++++++---- .../index/reindex/CrossClusterReindexIT.java | 6 ++ .../rest/yaml/CcsCommonYamlTestSuiteIT.java | 1 + .../yaml/RcsCcsCommonYamlTestSuiteIT.java | 1 + qa/multi-cluster-search/build.gradle | 1 + .../test/multi_cluster/10_basic.yml | 2 +- .../test/multi_cluster/20_info.yml | 16 +-- .../transport/RemoteClusterService.java | 2 +- .../search/TransportSearchActionTests.java | 40 +++++--- .../transport/RemoteClusterClientTests.java | 1 + .../transport/RemoteClusterServiceTests.java | 2 +- .../transport/RemoteClusterSettingsTests.java | 2 +- .../RemoteClusterSecurityApiKeyRestIT.java | 4 +- .../RemoteClusterSecurityEsqlIT.java | 18 +++- ...lusterSecurityFcActionAuthorizationIT.java | 54 +++++++--- ...ecurityLicensingAndFeatureUsageRestIT.java | 2 + .../RemoteClusterSecurityRestIT.java | 99 +++++++++++++------ .../legacy-with-basic-license/build.gradle | 1 + 22 files changed, 254 insertions(+), 107 deletions(-) create mode 100644 docs/changelog/105792.yaml diff --git a/docs/changelog/105792.yaml b/docs/changelog/105792.yaml new file mode 100644 index 0000000000000..2ad5aa970c214 --- /dev/null +++ b/docs/changelog/105792.yaml @@ -0,0 +1,18 @@ +pr: 105792 +summary: "Change `skip_unavailable` remote cluster setting default value to true" +area: Search +type: breaking +issues: [] +breaking: + title: "Change `skip_unavailable` remote cluster setting default value to true" + area: Cluster and node setting + details: The default value of the `skip_unavailable` setting is now set to true. + All existing and future remote clusters that do not define this setting will use the new default. + This setting only affects cross-cluster searches using the _search or _async_search API. + impact: Unavailable remote clusters in a cross-cluster search will no longer cause the search to fail unless + skip_unavailable is configured to be `false` in elasticsearch.yml or via the `_cluster/settings` API. + Unavailable clusters with `skip_unavailable`=`true` (either explicitly or by using the new default) are marked + as SKIPPED in the search response metadata section and do not fail the entire search. If users want to ensure that a + search returns a failure when a particular remote cluster is not available, `skip_unavailable` must be now be + set explicitly. + notable: false diff --git a/docs/reference/ccr/getting-started.asciidoc b/docs/reference/ccr/getting-started.asciidoc index d30cd43a4db5e..a9fe8be93d018 100644 --- a/docs/reference/ccr/getting-started.asciidoc +++ b/docs/reference/ccr/getting-started.asciidoc @@ -147,7 +147,7 @@ cluster with cluster alias `leader`. "num_nodes_connected" : 1, <1> "max_connections_per_cluster" : 3, "initial_connect_timeout" : "30s", - "skip_unavailable" : false, + "skip_unavailable" : true, "mode" : "sniff" } } diff --git a/docs/reference/modules/cluster/remote-clusters-connect.asciidoc b/docs/reference/modules/cluster/remote-clusters-connect.asciidoc index 7fb345660e086..5344cb97465d7 100644 --- a/docs/reference/modules/cluster/remote-clusters-connect.asciidoc +++ b/docs/reference/modules/cluster/remote-clusters-connect.asciidoc @@ -37,7 +37,7 @@ clusters on individual nodes in the local cluster, define static settings in `elasticsearch.yml` for each node. The following request adds a remote cluster with an alias of `cluster_one`. This -_cluster alias_ is a unique identifier that represents the connection to the +_cluster alias_ is a unique identifier that represents the connection to the remote cluster and is used to distinguish between local and remote indices. [source,console,subs=attributes+] @@ -60,7 +60,7 @@ PUT /_cluster/settings // TEST[setup:host] // TEST[s/127.0.0.1:\{remote-interface-default-port\}/\${transport_host}/] <1> The cluster alias of this remote cluster is `cluster_one`. -<2> Specifies the hostname and {remote-interface} port of a seed node in the +<2> Specifies the hostname and {remote-interface} port of a seed node in the remote cluster. You can use the <> to verify that @@ -86,7 +86,7 @@ cluster with the cluster alias `cluster_one`: "num_nodes_connected" : 1, <1> "max_connections_per_cluster" : 3, "initial_connect_timeout" : "30s", - "skip_unavailable" : false, <2> + "skip_unavailable" : true, <2> ifeval::["{trust-mechanism}"=="api-key"] "cluster_credentials": "::es_redacted::", <3> endif::[] @@ -103,7 +103,7 @@ connected to. <2> Indicates whether to skip the remote cluster if searched through {ccs} but no nodes are available. ifeval::["{trust-mechanism}"=="api-key"] -<3> If present, indicates the remote cluster has connected using API key +<3> If present, indicates the remote cluster has connected using API key authentication. endif::[] @@ -187,7 +187,7 @@ PUT _cluster/settings You can delete a remote cluster from the cluster settings by passing `null` values for each remote cluster setting. The following request removes -`cluster_two` from the cluster settings, leaving `cluster_one` and +`cluster_two` from the cluster settings, leaving `cluster_one` and `cluster_three` intact: [source,console] @@ -212,15 +212,15 @@ PUT _cluster/settings ===== Statically configure remote clusters If you specify settings in `elasticsearch.yml`, only the nodes with -those settings can connect to the remote cluster and serve remote cluster +those settings can connect to the remote cluster and serve remote cluster requests. -NOTE: Remote cluster settings that are specified using the +NOTE: Remote cluster settings that are specified using the <> take precedence over settings that you specify in `elasticsearch.yml` for individual nodes. -In the following example, `cluster_one`, `cluster_two`, and `cluster_three` are -arbitrary cluster aliases representing the connection to each cluster. These +In the following example, `cluster_one`, `cluster_two`, and `cluster_three` are +arbitrary cluster aliases representing the connection to each cluster. These names are subsequently used to distinguish between local and remote indices. [source,yaml,subs=attributes+] diff --git a/docs/reference/modules/cluster/remote-clusters-settings.asciidoc b/docs/reference/modules/cluster/remote-clusters-settings.asciidoc index bba8c7ffb3491..ec61c4c59fc74 100644 --- a/docs/reference/modules/cluster/remote-clusters-settings.asciidoc +++ b/docs/reference/modules/cluster/remote-clusters-settings.asciidoc @@ -28,9 +28,20 @@ mode are described separately. Per cluster boolean setting that allows to skip specific clusters when no nodes belonging to them are available and they are the target of a remote - cluster request. Default is `false`, meaning that all clusters are mandatory - by default, but they can selectively be made optional by setting this setting - to `true`. + cluster request. + +IMPORTANT: In Elasticsearch 8.15, the default value for `skip_unavailable` was +changed from `false` to `true`. Before Elasticsearch 8.15, if you want a cluster +to be treated as optional for a {ccs}, then you need to set that configuration. +From Elasticsearch 8.15 forward, you need to set the configuration in order to +make a cluster required for the {ccs}. Once you upgrade the local ("querying") +cluster search coordinator node (the node you send CCS requests to) to 8.15 or later, +any remote clusters that do not have an explicit setting for `skip_unavailable` will +immediately change over to using the new default of true. This is true regardless of +whether you have upgraded the remote clusters to 8.15, as the `skip_unavailable` +search behavior is entirely determined by the setting on the local cluster where +you configure the remotes. + `cluster.remote..transport.ping_schedule`:: diff --git a/docs/reference/search/search-your-data/search-across-clusters.asciidoc b/docs/reference/search/search-your-data/search-across-clusters.asciidoc index 2573722b6d2e7..5f9e92c575793 100644 --- a/docs/reference/search/search-your-data/search-across-clusters.asciidoc +++ b/docs/reference/search/search-your-data/search-across-clusters.asciidoc @@ -1178,7 +1178,13 @@ gathered from all 3 clusters and the total shard count on each cluster is listed By default, a {ccs} fails if a remote cluster in the request is unavailable or returns an error where the search on all shards failed. Use the `skip_unavailable` cluster setting to mark a specific remote cluster as -optional for {ccs}. +either optional or required for {ccs}. + +IMPORTANT: In Elasticsearch 8.15, the default value for `skip_unavailable` was +changed from `false` to `true`. Before Elasticsearch 8.15, if you want a cluster +to be treated as optional for a {ccs}, then you need to set that configuration. +From Elasticsearch 8.15 forward, you need to set the configuration in order to +make a cluster required for the {ccs}. If `skip_unavailable` is `true`, a {ccs}: @@ -1196,25 +1202,33 @@ parameter and the related `search.default_allow_partial_results` cluster setting when searching the remote cluster. This means searches on the remote cluster may return partial results. -The following <> -API request changes `skip_unavailable` setting to `true` for `cluster_two`. - -[source,console] --------------------------------- -PUT _cluster/settings -{ - "persistent": { - "cluster.remote.cluster_two.skip_unavailable": true - } -} --------------------------------- -// TEST[continued] - -If `cluster_two` is disconnected or unavailable during a {ccs}, {es} won't -include matching documents from that cluster in the final results. If at -least one shard provides results, those results will be used and the -search will return partial data. (If doing {ccs} using async search, -the `is_partial` field will be set to `true` to indicate partial results.) +You can modify the `skip_unavailable` setting by editing the `cluster.remote.` +settings in the elasticsearch.yml config file. For example: + +``` +cluster: + remote: + cluster_one: + seeds: 35.238.149.1:9300 + skip_unavailable: false + cluster_two: + seeds: 35.238.149.2:9300 + skip_unavailable: true +``` + +Or you can set the cluster.remote settings via the +<> API as shown +<>. + +When a remote cluster configured with `skip_unavailable: true` (such as +`cluster_two` above) is disconnected or unavailable during a {ccs}, {es} won't +include matching documents from that cluster in the final results and the +search will be considered successful (HTTP status 200 OK). + +If at least one shard from a cluster provides search results, those results will +be used and the search will return partial data. This is true regardless of +the `skip_unavailable` setting of the remote cluster. (If doing {ccs} using async +search, the `is_partial` field will be set to `true` to indicate partial results.) [discrete] [[ccs-network-delays]] diff --git a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/index/reindex/CrossClusterReindexIT.java b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/index/reindex/CrossClusterReindexIT.java index a4f939fbe3af8..e0396039029c5 100644 --- a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/index/reindex/CrossClusterReindexIT.java +++ b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/index/reindex/CrossClusterReindexIT.java @@ -19,6 +19,7 @@ import java.util.Collection; import java.util.List; +import java.util.Map; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.containsString; @@ -38,6 +39,11 @@ protected Collection remoteClusterAlias() { return List.of(REMOTE_CLUSTER); } + @Override + protected Map skipUnavailableForRemoteClusters() { + return Map.of(REMOTE_CLUSTER, false); + } + @Override protected Collection> nodePlugins(String clusterAlias) { return List.of(ReindexPlugin.class); diff --git a/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/CcsCommonYamlTestSuiteIT.java b/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/CcsCommonYamlTestSuiteIT.java index cc613671c860c..a8cff14ff6220 100644 --- a/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/CcsCommonYamlTestSuiteIT.java +++ b/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/CcsCommonYamlTestSuiteIT.java @@ -101,6 +101,7 @@ public class CcsCommonYamlTestSuiteIT extends ESClientYamlSuiteTestCase { .setting("node.roles", "[data,ingest,master,remote_cluster_client]") .setting("cluster.remote.remote_cluster.seeds", () -> "\"" + remoteCluster.getTransportEndpoint(0) + "\"") .setting("cluster.remote.connections_per_cluster", "1") + .setting("cluster.remote.remote_cluster.skip_unavailable", "false") .apply(commonClusterConfig) .build(); diff --git a/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/RcsCcsCommonYamlTestSuiteIT.java b/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/RcsCcsCommonYamlTestSuiteIT.java index 5a58f3629df14..e3639ffabf664 100644 --- a/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/RcsCcsCommonYamlTestSuiteIT.java +++ b/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/RcsCcsCommonYamlTestSuiteIT.java @@ -246,6 +246,7 @@ public void initSearchClient() throws IOException { private static void configureRemoteCluster() throws IOException { final Settings.Builder builder = Settings.builder(); + builder.put("cluster.remote." + REMOTE_CLUSTER_NAME + ".skip_unavailable", "false"); if (randomBoolean()) { builder.put("cluster.remote." + REMOTE_CLUSTER_NAME + ".mode", "proxy") .put("cluster.remote." + REMOTE_CLUSTER_NAME + ".proxy_address", fulfillingCluster.getRemoteClusterServerEndpoint(0)); diff --git a/qa/multi-cluster-search/build.gradle b/qa/multi-cluster-search/build.gradle index 23c46c5804a6e..d0cbc208f4d8e 100644 --- a/qa/multi-cluster-search/build.gradle +++ b/qa/multi-cluster-search/build.gradle @@ -48,6 +48,7 @@ BuildParams.bwcVersions.withWireCompatible(ccsSupportedVersion) { bwcVersion, ba setting 'cluster.remote.connections_per_cluster', '1' setting 'cluster.remote.my_remote_cluster.seeds', { "\"${remoteCluster.get().getAllTransportPortURI().get(0)}\"" } + setting 'cluster.remote.my_remote_cluster.skip_unavailable', 'false' } tasks.register("${baseName}#remote-cluster", RestIntegTestTask) { diff --git a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/10_basic.yml b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/10_basic.yml index 8bbbc7435ff5a..da1245268a0a2 100644 --- a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/10_basic.yml +++ b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/10_basic.yml @@ -249,7 +249,7 @@ persistent: cluster.remote.test_remote_cluster.seeds: $remote_ip - - match: {persistent: {cluster.remote.test_remote_cluster.seeds: $remote_ip}} + - match: {persistent.cluster\.remote\.test_remote_cluster\.seeds: $remote_ip} - do: search: diff --git a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/20_info.yml b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/20_info.yml index 144990163583b..da4c91869e53d 100644 --- a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/20_info.yml +++ b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/20_info.yml @@ -113,33 +113,33 @@ - do: cluster.remote_info: {} - - is_false: remote1.skip_unavailable + - is_true: remote1.skip_unavailable - do: cluster.put_settings: body: persistent: - cluster.remote.remote1.skip_unavailable: true + cluster.remote.remote1.skip_unavailable: false - - is_true: persistent.cluster.remote.remote1.skip_unavailable + - is_false: persistent.cluster.remote.remote1.skip_unavailable - do: cluster.remote_info: {} - - is_true: remote1.skip_unavailable + - is_false: remote1.skip_unavailable - do: cluster.put_settings: body: persistent: - cluster.remote.remote1.skip_unavailable: false + cluster.remote.remote1.skip_unavailable: true - - is_false: persistent.cluster.remote.remote1.skip_unavailable + - is_true: persistent.cluster.remote.remote1.skip_unavailable - do: cluster.remote_info: {} - - is_false: remote1.skip_unavailable + - is_true: remote1.skip_unavailable - do: cluster.put_settings: @@ -152,7 +152,7 @@ - do: cluster.remote_info: {} - - is_false: remote1.skip_unavailable + - is_true: remote1.skip_unavailable - do: cluster.put_settings: diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java index 6060e1fed1397..06fb23ba14749 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java @@ -87,7 +87,7 @@ public final class RemoteClusterService extends RemoteClusterAware implements Cl public static final Setting.AffixSetting REMOTE_CLUSTER_SKIP_UNAVAILABLE = Setting.affixKeySetting( "cluster.remote.", "skip_unavailable", - (ns, key) -> boolSetting(key, false, new RemoteConnectionEnabled<>(ns, key), Setting.Property.Dynamic, Setting.Property.NodeScope) + (ns, key) -> boolSetting(key, true, new RemoteConnectionEnabled<>(ns, key), Setting.Property.Dynamic, Setting.Property.NodeScope) ); public static final Setting.AffixSetting REMOTE_CLUSTER_PING_SCHEDULE = Setting.affixKeySetting( diff --git a/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java b/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java index fea391e8205f5..a35dac8157517 100644 --- a/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java @@ -469,7 +469,8 @@ private MockTransportService[] startTransport( int numClusters, DiscoveryNode[] nodes, Map remoteIndices, - Settings.Builder settingsBuilder + Settings.Builder settingsBuilder, + boolean skipUnavailable ) { MockTransportService[] mockTransportServices = new MockTransportService[numClusters]; for (int i = 0; i < numClusters; i++) { @@ -486,6 +487,7 @@ private MockTransportService[] startTransport( knownNodes.add(remoteSeedNode); nodes[i] = remoteSeedNode; settingsBuilder.put("cluster.remote.remote" + i + ".seeds", remoteSeedNode.getAddress().toString()); + settingsBuilder.put("cluster.remote.remote" + i + ".skip_unavailable", Boolean.toString(skipUnavailable)); remoteIndices.put("remote" + i, new OriginalIndices(new String[] { "index" }, IndicesOptions.lenientExpandOpen())); } return mockTransportServices; @@ -496,7 +498,8 @@ public void testCCSRemoteReduceMergeFails() throws Exception { DiscoveryNode[] nodes = new DiscoveryNode[numClusters]; Map remoteIndicesByCluster = new HashMap<>(); Settings.Builder builder = Settings.builder(); - MockTransportService[] mockTransportServices = startTransport(numClusters, nodes, remoteIndicesByCluster, builder); + boolean skipUnavailable = randomBoolean(); + MockTransportService[] mockTransportServices = startTransport(numClusters, nodes, remoteIndicesByCluster, builder, skipUnavailable); Settings settings = builder.build(); boolean local = randomBoolean(); OriginalIndices localIndices = local ? new OriginalIndices(new String[] { "index" }, SearchRequest.DEFAULT_INDICES_OPTIONS) : null; @@ -566,7 +569,8 @@ public void testCCSRemoteReduce() throws Exception { DiscoveryNode[] nodes = new DiscoveryNode[numClusters]; Map remoteIndicesByCluster = new HashMap<>(); Settings.Builder builder = Settings.builder(); - MockTransportService[] mockTransportServices = startTransport(numClusters, nodes, remoteIndicesByCluster, builder); + boolean skipUnavailable = randomBoolean(); + MockTransportService[] mockTransportServices = startTransport(numClusters, nodes, remoteIndicesByCluster, builder, skipUnavailable); Settings settings = builder.build(); boolean local = randomBoolean(); OriginalIndices localIndices = local ? new OriginalIndices(new String[] { "index" }, SearchRequest.DEFAULT_INDICES_OPTIONS) : null; @@ -709,7 +713,8 @@ public void testCCSRemoteReduceWhereRemoteClustersFail() throws Exception { DiscoveryNode[] nodes = new DiscoveryNode[numClusters]; Map remoteIndicesByCluster = new HashMap<>(); Settings.Builder builder = Settings.builder(); - MockTransportService[] mockTransportServices = startTransport(numClusters, nodes, remoteIndicesByCluster, builder); + boolean skipUnavailable = randomBoolean(); + MockTransportService[] mockTransportServices = startTransport(numClusters, nodes, remoteIndicesByCluster, builder, skipUnavailable); Settings settings = builder.build(); boolean local = randomBoolean(); OriginalIndices localIndices = local ? new OriginalIndices(new String[] { "index" }, SearchRequest.DEFAULT_INDICES_OPTIONS) : null; @@ -734,10 +739,13 @@ public void testCCSRemoteReduceWhereRemoteClustersFail() throws Exception { final CountDownLatch latch = new CountDownLatch(1); SetOnce>> setOnce = new SetOnce<>(); AtomicReference failure = new AtomicReference<>(); - LatchedActionListener listener = new LatchedActionListener<>( - ActionListener.wrap(r -> fail("no response expected"), failure::set), - latch - ); + LatchedActionListener listener = new LatchedActionListener<>(ActionListener.wrap(r -> { + if (skipUnavailable) { + assertThat(r.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED), equalTo(numClusters)); + } else { + fail("no response expected"); // failure should be returned, not SearchResponse + } + }, failure::set), latch); TaskId parentTaskId = new TaskId("n", 1); SearchTask task = new SearchTask(2, "search", "search", () -> "desc", parentTaskId, Collections.emptyMap()); @@ -763,10 +771,14 @@ public void testCCSRemoteReduceWhereRemoteClustersFail() throws Exception { resolveWithEmptySearchResponse(tuple); } awaitLatch(latch, 5, TimeUnit.SECONDS); - assertNotNull(failure.get()); - assertThat(failure.get(), instanceOf(RemoteTransportException.class)); - RemoteTransportException remoteTransportException = (RemoteTransportException) failure.get(); - assertEquals(RestStatus.NOT_FOUND, remoteTransportException.status()); + if (skipUnavailable) { + assertNull(failure.get()); + } else { + assertNotNull(failure.get()); + assertThat(failure.get(), instanceOf(RemoteTransportException.class)); + RemoteTransportException remoteTransportException = (RemoteTransportException) failure.get(); + assertEquals(RestStatus.NOT_FOUND, remoteTransportException.status()); + } } } finally { @@ -781,7 +793,7 @@ public void testCCSRemoteReduceWithDisconnectedRemoteClusters() throws Exception DiscoveryNode[] nodes = new DiscoveryNode[numClusters]; Map remoteIndicesByCluster = new HashMap<>(); Settings.Builder builder = Settings.builder(); - MockTransportService[] mockTransportServices = startTransport(numClusters, nodes, remoteIndicesByCluster, builder); + MockTransportService[] mockTransportServices = startTransport(numClusters, nodes, remoteIndicesByCluster, builder, false); Settings settings = builder.build(); boolean local = randomBoolean(); OriginalIndices localIndices = local ? new OriginalIndices(new String[] { "index" }, SearchRequest.DEFAULT_INDICES_OPTIONS) : null; @@ -1035,7 +1047,7 @@ public void testCollectSearchShards() throws Exception { DiscoveryNode[] nodes = new DiscoveryNode[numClusters]; Map remoteIndicesByCluster = new HashMap<>(); Settings.Builder builder = Settings.builder(); - MockTransportService[] mockTransportServices = startTransport(numClusters, nodes, remoteIndicesByCluster, builder); + MockTransportService[] mockTransportServices = startTransport(numClusters, nodes, remoteIndicesByCluster, builder, false); Settings settings = builder.build(); try ( MockTransportService service = MockTransportService.createNewService( diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterClientTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterClientTests.java index bfd626dd3d153..bb18420276190 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterClientTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterClientTests.java @@ -145,6 +145,7 @@ public void testEnsureWeReconnect() throws Exception { Settings localSettings = Settings.builder() .put(onlyRole(DiscoveryNodeRole.REMOTE_CLUSTER_CLIENT_ROLE)) .put("cluster.remote.test.seeds", remoteNode.getAddress().getAddress() + ":" + remoteNode.getAddress().getPort()) + .put("cluster.remote.test.skip_unavailable", "false") // ensureConnected is only true for skip_unavailable=false .build(); try ( MockTransportService service = MockTransportService.createNewService( diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java index 29a5d5a34e37f..9f70ab879cb25 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java @@ -1282,7 +1282,7 @@ public void testSkipUnavailable() { service.start(); service.acceptIncomingRequests(); - assertFalse(service.getRemoteClusterService().isSkipUnavailable("cluster1")); + assertTrue(service.getRemoteClusterService().isSkipUnavailable("cluster1")); if (randomBoolean()) { updateSkipUnavailable(service.getRemoteClusterService(), "cluster1", false); diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterSettingsTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterSettingsTests.java index c61dc93f962c6..be474b4a5d530 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterSettingsTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterSettingsTests.java @@ -68,7 +68,7 @@ public void testRemoveRemoteClusterClientRole() { public void testSkipUnavailableDefault() { final String alias = randomAlphaOfLength(8); - assertFalse(REMOTE_CLUSTER_SKIP_UNAVAILABLE.getConcreteSettingForNamespace(alias).get(Settings.EMPTY)); + assertTrue(REMOTE_CLUSTER_SKIP_UNAVAILABLE.getConcreteSettingForNamespace(alias).get(Settings.EMPTY)); } public void testSeedsDefault() { diff --git a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityApiKeyRestIT.java b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityApiKeyRestIT.java index 3d644103dfb6f..2f3ece56b3281 100644 --- a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityApiKeyRestIT.java +++ b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityApiKeyRestIT.java @@ -320,14 +320,16 @@ public void testCrossClusterSearchWithApiKey() throws Exception { ) ); - // Check that authentication fails if we use a non-existent cross cluster access API key + // Check that authentication fails if we use a non-existent cross cluster access API key (when skip_unavailable=false) updateClusterSettings( randomBoolean() ? Settings.builder() .put("cluster.remote.invalid_remote.seeds", fulfillingCluster.getRemoteClusterServerEndpoint(0)) + .put("cluster.remote.invalid_remote.skip_unavailable", "false") .build() : Settings.builder() .put("cluster.remote.invalid_remote.mode", "proxy") + .put("cluster.remote.invalid_remote.skip_unavailable", "false") .put("cluster.remote.invalid_remote.proxy_address", fulfillingCluster.getRemoteClusterServerEndpoint(0)) .build() ); diff --git a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityEsqlIT.java b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityEsqlIT.java index 3a7bc49340333..931d3b94669fb 100644 --- a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityEsqlIT.java +++ b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityEsqlIT.java @@ -499,12 +499,18 @@ public void testCrossClusterQueryAgainstInvalidRemote() throws Exception { configureRemoteCluster(); populateData(); + final boolean skipUnavailable = randomBoolean(); + // avoids getting 404 errors updateClusterSettings( randomBoolean() - ? Settings.builder().put("cluster.remote.invalid_remote.seeds", fulfillingCluster.getRemoteClusterServerEndpoint(0)).build() + ? Settings.builder() + .put("cluster.remote.invalid_remote.seeds", fulfillingCluster.getRemoteClusterServerEndpoint(0)) + .put("cluster.remote.invalid_remote.skip_unavailable", Boolean.toString(skipUnavailable)) + .build() : Settings.builder() .put("cluster.remote.invalid_remote.mode", "proxy") + .put("cluster.remote.invalid_remote.skip_unavailable", Boolean.toString(skipUnavailable)) .put("cluster.remote.invalid_remote.proxy_address", fulfillingCluster.getRemoteClusterServerEndpoint(0)) .build() ); @@ -520,8 +526,14 @@ public void testCrossClusterQueryAgainstInvalidRemote() throws Exception { var q2 = "FROM invalid_remote:employees | SORT emp_id DESC | LIMIT 10"; performRequestWithRemoteSearchUser(esqlRequest(q2)); }); - assertThat(error.getResponse().getStatusLine().getStatusCode(), equalTo(401)); - assertThat(error.getMessage(), containsString("unable to find apikey")); + + if (skipUnavailable == false) { + assertThat(error.getResponse().getStatusLine().getStatusCode(), equalTo(401)); + assertThat(error.getMessage(), containsString("unable to find apikey")); + } else { + assertThat(error.getResponse().getStatusLine().getStatusCode(), equalTo(500)); + assertThat(error.getMessage(), containsString("Unable to connect to [invalid_remote]")); + } } @SuppressWarnings("unchecked") diff --git a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityFcActionAuthorizationIT.java b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityFcActionAuthorizationIT.java index a5ffeacf28112..793313e238651 100644 --- a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityFcActionAuthorizationIT.java +++ b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityFcActionAuthorizationIT.java @@ -47,6 +47,7 @@ import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.ConnectTransportException; import org.elasticsearch.transport.RemoteClusterService; import org.elasticsearch.transport.RemoteConnectionInfo; import org.elasticsearch.xpack.ccr.action.repositories.ClearCcrRestoreSessionAction; @@ -82,6 +83,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; public class RemoteClusterSecurityFcActionAuthorizationIT extends ESRestTestCase { @@ -176,7 +178,9 @@ public void testIndicesPrivilegesAreEnforcedForCcrRestoreSessionActions() throws } // Simulate QC behaviours by directly connecting to the FC using a transport service - try (MockTransportService service = startTransport("node", threadPool, (String) crossClusterApiKeyMap.get("encoded"))) { + final String apiKey = (String) crossClusterApiKeyMap.get("encoded"); + final boolean skipUnavailable = randomBoolean(); + try (MockTransportService service = startTransport("node", threadPool, apiKey, skipUnavailable)) { final RemoteClusterService remoteClusterService = service.getRemoteClusterService(); final List remoteConnectionInfos = remoteClusterService.getRemoteConnectionInfos().toList(); assertThat(remoteConnectionInfos, hasSize(1)); @@ -328,28 +332,35 @@ public void testRestApiKeyIsNotAllowedOnRemoteClusterPort() throws IOException { final Response createApiKeyResponse = adminClient().performRequest(createApiKeyRequest); assertOK(createApiKeyResponse); final Map apiKeyMap = responseAsMap(createApiKeyResponse); - try (MockTransportService service = startTransport("node", threadPool, (String) apiKeyMap.get("encoded"))) { + final String apiKey = (String) apiKeyMap.get("encoded"); + final boolean skipUnavailable = randomBoolean(); + try (MockTransportService service = startTransport("node", threadPool, apiKey, skipUnavailable)) { final RemoteClusterService remoteClusterService = service.getRemoteClusterService(); final var remoteClusterClient = remoteClusterService.getRemoteClusterClient( "my_remote_cluster", EsExecutors.DIRECT_EXECUTOR_SERVICE, RemoteClusterService.DisconnectedStrategy.RECONNECT_UNLESS_SKIP_UNAVAILABLE ); - - final ElasticsearchSecurityException e = expectThrows( - ElasticsearchSecurityException.class, + final Exception e = expectThrows( + Exception.class, () -> executeRemote( remoteClusterClient, RemoteClusterNodesAction.REMOTE_TYPE, RemoteClusterNodesAction.Request.REMOTE_CLUSTER_SERVER_NODES ) ); - assertThat( - e.getMessage(), - containsString( - "authentication expected API key type of [cross_cluster], but API key [" + apiKeyMap.get("id") + "] has type [rest]" - ) - ); + if (skipUnavailable) { + assertThat(e, instanceOf(ConnectTransportException.class)); + assertThat(e.getMessage(), containsString("Unable to connect to [my_remote_cluster]")); + } else { + assertThat(e, instanceOf(ElasticsearchSecurityException.class)); + assertThat( + e.getMessage(), + containsString( + "authentication expected API key type of [cross_cluster], but API key [" + apiKeyMap.get("id") + "] has type [rest]" + ) + ); + } } } @@ -392,12 +403,14 @@ public void testUpdateCrossClusterApiKey() throws Exception { final FieldCapabilitiesRequest request = new FieldCapabilitiesRequest().indices("index").fields("name"); // Perform cross-cluster requests + boolean skipUnavailable = randomBoolean(); try ( MockTransportService service = startTransport( "node", threadPool, (String) crossClusterApiKeyMap.get("encoded"), - Map.of(TransportFieldCapabilitiesAction.NAME, crossClusterAccessSubjectInfo) + Map.of(TransportFieldCapabilitiesAction.NAME, crossClusterAccessSubjectInfo), + skipUnavailable ) ) { final RemoteClusterService remoteClusterService = service.getRemoteClusterService(); @@ -508,7 +521,8 @@ public void testMalformedShardLevelActionIsRejected() throws Exception { "node", threadPool, (String) crossClusterApiKeyMap.get("encoded"), - Map.of(TransportGetAction.TYPE.name() + "[s]", buildCrossClusterAccessSubjectInfo(indexA)) + Map.of(TransportGetAction.TYPE.name() + "[s]", buildCrossClusterAccessSubjectInfo(indexA)), + randomBoolean() ) ) { final RemoteClusterService remoteClusterService = service.getRemoteClusterService(); @@ -552,15 +566,21 @@ private static CrossClusterAccessSubjectInfo buildCrossClusterAccessSubjectInfo( ); } - private static MockTransportService startTransport(final String nodeName, final ThreadPool threadPool, String encodedApiKey) { - return startTransport(nodeName, threadPool, encodedApiKey, Map.of()); + private static MockTransportService startTransport( + final String nodeName, + final ThreadPool threadPool, + String encodedApiKey, + boolean skipUnavailable + ) { + return startTransport(nodeName, threadPool, encodedApiKey, Map.of(), skipUnavailable); } private static MockTransportService startTransport( final String nodeName, final ThreadPool threadPool, String encodedApiKey, - Map subjectInfoLookup + Map subjectInfoLookup, + boolean skipUnavailable ) { final String remoteClusterServerEndpoint = testCluster.getRemoteClusterServerEndpoint(0); @@ -573,9 +593,11 @@ private static MockTransportService startTransport( builder.setSecureSettings(secureSettings); if (randomBoolean()) { builder.put("cluster.remote.my_remote_cluster.mode", "sniff") + .put("cluster.remote.my_remote_cluster.skip_unavailable", Boolean.toString(skipUnavailable)) .put("cluster.remote.my_remote_cluster.seeds", remoteClusterServerEndpoint); } else { builder.put("cluster.remote.my_remote_cluster.mode", "proxy") + .put("cluster.remote.my_remote_cluster.skip_unavailable", Boolean.toString(skipUnavailable)) .put("cluster.remote.my_remote_cluster.proxy_address", remoteClusterServerEndpoint); } diff --git a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityLicensingAndFeatureUsageRestIT.java b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityLicensingAndFeatureUsageRestIT.java index 29afda08500ca..c791752e76de0 100644 --- a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityLicensingAndFeatureUsageRestIT.java +++ b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityLicensingAndFeatureUsageRestIT.java @@ -105,9 +105,11 @@ protected void configureRemoteCluster(boolean isProxyMode) throws Exception { final Settings.Builder builder = Settings.builder(); if (isProxyMode) { builder.put("cluster.remote.my_remote_cluster.mode", "proxy") + .put("cluster.remote.my_remote_cluster.skip_unavailable", "false") .put("cluster.remote.my_remote_cluster.proxy_address", fulfillingCluster.getRemoteClusterServerEndpoint(0)); } else { builder.put("cluster.remote.my_remote_cluster.mode", "sniff") + .put("cluster.remote.my_remote_cluster.skip_unavailable", "false") .putList("cluster.remote.my_remote_cluster.seeds", fulfillingCluster.getRemoteClusterServerEndpoint(0)); } updateClusterSettings(builder.build()); diff --git a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityRestIT.java b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityRestIT.java index d1e78d4f3ad39..c6bb6e10f0537 100644 --- a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityRestIT.java +++ b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityRestIT.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.remotecluster; +import org.apache.http.util.EntityUtils; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.client.Request; import org.elasticsearch.client.RequestOptions; @@ -331,66 +332,108 @@ public void testCrossClusterSearch() throws Exception { ) ); - // Check that authentication fails if we use a non-existent API key + // Check that authentication fails if we use a non-existent API key (when skip_unavailable=false) + boolean skipUnavailable = randomBoolean(); updateClusterSettings( randomBoolean() ? Settings.builder() .put("cluster.remote.invalid_remote.seeds", fulfillingCluster.getRemoteClusterServerEndpoint(0)) + .put("cluster.remote.invalid_remote.skip_unavailable", Boolean.toString(skipUnavailable)) .build() : Settings.builder() .put("cluster.remote.invalid_remote.mode", "proxy") + .put("cluster.remote.invalid_remote.skip_unavailable", Boolean.toString(skipUnavailable)) .put("cluster.remote.invalid_remote.proxy_address", fulfillingCluster.getRemoteClusterServerEndpoint(0)) .build() ); - final ResponseException exception4 = expectThrows( - ResponseException.class, - () -> performRequestWithRemoteSearchUser(new Request("GET", "/invalid_remote:index1/_search")) - ); - assertThat(exception4.getResponse().getStatusLine().getStatusCode(), equalTo(401)); - assertThat(exception4.getMessage(), containsString("unable to find apikey")); + if (skipUnavailable) { + /* + when skip_unavailable=true, response should be something like: + {"took":1,"timed_out":false,"num_reduce_phases":0,"_shards":{"total":0,"successful":0,"skipped":0,"failed":0}, + "_clusters":{"total":1,"successful":0,"skipped":1,"running":0,"partial":0,"failed":0, + "details":{"invalid_remote":{"status":"skipped","indices":"index1","timed_out":false, + "failures":[{"shard":-1,"index":null,"reason":{"type":"connect_transport_exception", + "reason":"Unable to connect to [invalid_remote]"}}]}}}, + "hits":{"total":{"value":0,"relation":"eq"},"max_score":null,"hits":[]}} + */ + Response invalidRemoteResponse = performRequestWithRemoteSearchUser(new Request("GET", "/invalid_remote:index1/_search")); + assertThat(invalidRemoteResponse.getStatusLine().getStatusCode(), equalTo(200)); + String responseJson = EntityUtils.toString(invalidRemoteResponse.getEntity()); + assertThat(responseJson, containsString("\"status\":\"skipped\"")); + assertThat(responseJson, containsString("connect_transport_exception")); + } else { + final ResponseException exception4 = expectThrows( + ResponseException.class, + () -> performRequestWithRemoteSearchUser(new Request("GET", "/invalid_remote:index1/_search")) + ); + assertThat(exception4.getResponse().getStatusLine().getStatusCode(), equalTo(401)); + assertThat(exception4.getMessage(), containsString("unable to find apikey")); + } - // check that REST API key is not supported by cross cluster access + // check that REST API key is not supported by cross cluster access (when skip_unavailable=false) + skipUnavailable = randomBoolean(); updateClusterSettings( randomBoolean() ? Settings.builder() .put("cluster.remote.wrong_api_key_type.seeds", fulfillingCluster.getRemoteClusterServerEndpoint(0)) + .put("cluster.remote.wrong_api_key_type.skip_unavailable", Boolean.toString(skipUnavailable)) .build() : Settings.builder() .put("cluster.remote.wrong_api_key_type.mode", "proxy") + .put("cluster.remote.wrong_api_key_type.skip_unavailable", Boolean.toString(skipUnavailable)) .put("cluster.remote.wrong_api_key_type.proxy_address", fulfillingCluster.getRemoteClusterServerEndpoint(0)) .build() ); - final ResponseException exception5 = expectThrows( - ResponseException.class, - () -> performRequestWithRemoteSearchUser(new Request("GET", "/wrong_api_key_type:*/_search")) - ); - assertThat(exception5.getResponse().getStatusLine().getStatusCode(), equalTo(401)); - assertThat( - exception5.getMessage(), - containsString( - "authentication expected API key type of [cross_cluster], but API key [" - + REST_API_KEY_MAP_REF.get().get("id") - + "] has type [rest]" - ) - ); + if (skipUnavailable) { + Response invalidRemoteResponse = performRequestWithRemoteSearchUser(new Request("GET", "/wrong_api_key_type:*/_search")); + assertThat(invalidRemoteResponse.getStatusLine().getStatusCode(), equalTo(200)); + String responseJson = EntityUtils.toString(invalidRemoteResponse.getEntity()); + assertThat(responseJson, containsString("\"status\":\"skipped\"")); + assertThat(responseJson, containsString("connect_transport_exception")); + } else { + final ResponseException exception5 = expectThrows( + ResponseException.class, + () -> performRequestWithRemoteSearchUser(new Request("GET", "/wrong_api_key_type:*/_search")) + ); + assertThat(exception5.getResponse().getStatusLine().getStatusCode(), equalTo(401)); + assertThat( + exception5.getMessage(), + containsString( + "authentication expected API key type of [cross_cluster], but API key [" + + REST_API_KEY_MAP_REF.get().get("id") + + "] has type [rest]" + ) + ); + } - // Check invalid cross-cluster API key length is rejected + // Check invalid cross-cluster API key length is rejected (and gets security error when skip_unavailable=false) + skipUnavailable = randomBoolean(); updateClusterSettings( randomBoolean() ? Settings.builder() .put("cluster.remote.invalid_secret_length.seeds", fulfillingCluster.getRemoteClusterServerEndpoint(0)) + .put("cluster.remote.invalid_secret_length.skip_unavailable", Boolean.toString(skipUnavailable)) .build() : Settings.builder() .put("cluster.remote.invalid_secret_length.mode", "proxy") + .put("cluster.remote.invalid_secret_length.skip_unavailable", Boolean.toString(skipUnavailable)) .put("cluster.remote.invalid_secret_length.proxy_address", fulfillingCluster.getRemoteClusterServerEndpoint(0)) .build() ); - final ResponseException exception6 = expectThrows( - ResponseException.class, - () -> performRequestWithRemoteSearchUser(new Request("GET", "/invalid_secret_length:*/_search")) - ); - assertThat(exception6.getResponse().getStatusLine().getStatusCode(), equalTo(401)); - assertThat(exception6.getMessage(), containsString("invalid cross-cluster API key value")); + if (skipUnavailable) { + Response invalidRemoteResponse = performRequestWithRemoteSearchUser(new Request("GET", "/invalid_secret_length:*/_search")); + assertThat(invalidRemoteResponse.getStatusLine().getStatusCode(), equalTo(200)); + String responseJson = EntityUtils.toString(invalidRemoteResponse.getEntity()); + assertThat(responseJson, containsString("\"status\":\"skipped\"")); + assertThat(responseJson, containsString("connect_transport_exception")); + } else { + final ResponseException exception6 = expectThrows( + ResponseException.class, + () -> performRequestWithRemoteSearchUser(new Request("GET", "/invalid_secret_length:*/_search")) + ); + assertThat(exception6.getResponse().getStatusLine().getStatusCode(), equalTo(401)); + assertThat(exception6.getMessage(), containsString("invalid cross-cluster API key value")); + } } } diff --git a/x-pack/qa/multi-cluster-search-security/legacy-with-basic-license/build.gradle b/x-pack/qa/multi-cluster-search-security/legacy-with-basic-license/build.gradle index 87db264356484..ca44d7fe6a85c 100644 --- a/x-pack/qa/multi-cluster-search-security/legacy-with-basic-license/build.gradle +++ b/x-pack/qa/multi-cluster-search-security/legacy-with-basic-license/build.gradle @@ -45,6 +45,7 @@ def queryingCluster = testClusters.register('querying-cluster') { setting 'cluster.remote.connections_per_cluster', "1" user username: "test_user", password: "x-pack-test-password" + setting 'cluster.remote.my_remote_cluster.skip_unavailable', 'false' if (proxyMode) { setting 'cluster.remote.my_remote_cluster.mode', 'proxy' setting 'cluster.remote.my_remote_cluster.proxy_address', { From a4cfd02baba77e892adc18d5b5fdda42eba95d64 Mon Sep 17 00:00:00 2001 From: Athena Brown Date: Mon, 29 Apr 2024 16:57:56 -0600 Subject: [PATCH 15/19] Mute HashLookupOperatorTests testSimpleToString (#108047) See https://github.com/elastic/elasticsearch/issues/108045 --- .../compute/operator/AnyOperatorTestCase.java | 2 +- .../compute/operator/HashLookupOperatorTests.java | 7 +++++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AnyOperatorTestCase.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AnyOperatorTestCase.java index 25d79d0808741..9e0a6470e14c6 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AnyOperatorTestCase.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AnyOperatorTestCase.java @@ -68,7 +68,7 @@ public final void testSimpleDescription() { /** * Makes sure the description of {@link #simple} matches the {@link #expectedDescriptionOfSimple}. */ - public final void testSimpleToString() { + public void testSimpleToString() { try (Operator operator = simple().get(driverContext())) { assertThat(operator.toString(), equalTo(expectedToStringOfSimple())); } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/HashLookupOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/HashLookupOperatorTests.java index 711800197aa03..31d3764ac67fc 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/HashLookupOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/HashLookupOperatorTests.java @@ -88,4 +88,11 @@ protected String expectedDescriptionOfSimple() { protected String expectedToStringOfSimple() { return "HashLookup[keys=[foo], hash=PackedValuesBlockHash{groups=[0:LONG], entries=4, size=544b}, mapping=[0]]"; } + + @Override + // when you remove this AwaitsFix, also make this method in the superclass final again + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/108045") + public void testSimpleToString() { + super.testSimpleToString(); + } } From 10139a30ed1a4caa86b543f14b8417594306daf5 Mon Sep 17 00:00:00 2001 From: Athena Brown Date: Mon, 29 Apr 2024 17:53:20 -0600 Subject: [PATCH 16/19] Refactor IDP plugin to use template registry (#107882) This started as removing references to `Version`, but the simplest way to do that turned out to be to just convert it to use template registries instead of this custom management. --- .../xpack/core/template/TemplateUtils.java | 100 ------------------ .../idp/saml-service-provider-template.json | 4 +- .../sp/SamlServiceProviderIndexTests.java | 59 ++++------- .../xpack/idp/IdentityProviderPlugin.java | 12 ++- .../idp/saml/sp/SamlServiceProviderIndex.java | 72 ++----------- ...lServiceProviderIndexTemplateRegistry.java | 56 ++++++++++ 6 files changed, 96 insertions(+), 207 deletions(-) create mode 100644 x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/sp/SamlServiceProviderIndexTemplateRegistry.java diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/TemplateUtils.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/TemplateUtils.java index bae2d530a21a4..ba9639d3d5156 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/TemplateUtils.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/TemplateUtils.java @@ -6,28 +6,16 @@ */ package org.elasticsearch.xpack.core.template; -import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; -import org.elasticsearch.cluster.metadata.IndexTemplateMetadata; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.xcontent.XContentHelper; -import org.elasticsearch.xcontent.XContentFactory; -import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xcontent.XContentParserConfiguration; -import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xcontent.json.JsonXContent; import org.elasticsearch.xpack.core.template.resources.TemplateResources; -import java.io.IOException; import java.util.Collections; import java.util.Map; -import java.util.function.Predicate; - -import static org.elasticsearch.common.xcontent.XContentHelper.convertToMap; /** * Handling versioned templates for time-based indices in x-pack @@ -36,28 +24,6 @@ public class TemplateUtils { private TemplateUtils() {} - /** - * Loads a JSON template as a resource and puts it into the provided map - */ - public static void loadLegacyTemplateIntoMap( - String resource, - Map map, - String templateName, - String version, - String versionProperty, - Logger logger - ) { - final String template = loadTemplate(resource, version, versionProperty); - try ( - XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(XContentParserConfiguration.EMPTY, template) - ) { - map.put(templateName, IndexTemplateMetadata.Builder.fromXContent(parser, templateName)); - } catch (IOException e) { - // TODO: should we handle this with a thrown exception? - logger.error("Error loading template [{}] as part of metadata upgrading", templateName); - } - } - /** * Loads a built-in template and returns its source. */ @@ -126,70 +92,4 @@ public static boolean checkTemplateExistsAndVersionIsGTECurrentVersion(String te return templateMetadata.version() != null && templateMetadata.version() >= currentVersion; } - - /** - * Checks if a versioned template exists, and if it exists checks if it is up-to-date with current version. - * @param versionKey The property in the mapping's _meta field which stores the version info - * @param templateName Name of the index template - * @param state Cluster state - * @param logger Logger - */ - public static boolean checkTemplateExistsAndIsUpToDate(String templateName, String versionKey, ClusterState state, Logger logger) { - - return checkTemplateExistsAndVersionMatches(templateName, versionKey, state, logger, Version.CURRENT::equals); - } - - /** - * Checks if template with given name exists and if it matches the version predicate given - * @param versionKey The property in the mapping's _meta field which stores the version info - * @param templateName Name of the index template - * @param state Cluster state - * @param logger Logger - * @param predicate Predicate to execute on version check - */ - public static boolean checkTemplateExistsAndVersionMatches( - String templateName, - String versionKey, - ClusterState state, - Logger logger, - Predicate predicate - ) { - - IndexTemplateMetadata templateMeta = state.metadata().templates().get(templateName); - if (templateMeta == null) { - return false; - } - CompressedXContent mappings = templateMeta.getMappings(); - - // check all mappings contain correct version in _meta - // we have to parse the source here which is annoying - if (mappings != null) { - try { - Map typeMappingMap = convertToMap(mappings.uncompressed(), false, XContentType.JSON).v2(); - // should always contain one entry with key = typename - assert (typeMappingMap.size() == 1); - String key = typeMappingMap.keySet().iterator().next(); - // get the actual mapping entries - @SuppressWarnings("unchecked") - Map mappingMap = (Map) typeMappingMap.get(key); - if (containsCorrectVersion(versionKey, mappingMap, predicate) == false) { - return false; - } - } catch (ElasticsearchParseException e) { - logger.error(() -> "Cannot parse the template [" + templateName + "]", e); - throw new IllegalStateException("Cannot parse the template " + templateName, e); - } - } - return true; - } - - private static boolean containsCorrectVersion(String versionKey, Map typeMappingMap, Predicate predicate) { - @SuppressWarnings("unchecked") - Map meta = (Map) typeMappingMap.get("_meta"); - if (meta == null) { - // pre 5.0, cannot be up to date - return false; - } - return predicate.test(Version.fromString((String) meta.get(versionKey))); - } } diff --git a/x-pack/plugin/core/template-resources/src/main/resources/idp/saml-service-provider-template.json b/x-pack/plugin/core/template-resources/src/main/resources/idp/saml-service-provider-template.json index dd69b9cecefc5..0e82cc0f2a6df 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/idp/saml-service-provider-template.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/idp/saml-service-provider-template.json @@ -13,10 +13,12 @@ "index.priority": 10, "index.format": 1 }, + "version": ${idp.template.version}, "mappings": { "_doc": { "_meta": { - "idp-version": "${idp.template.version}" + "idp-version": "${idp.template.version_deprecated}", + "idp-template-version": "${idp.template.version}" }, "dynamic": "strict", "properties": { diff --git a/x-pack/plugin/identity-provider/src/internalClusterTest/java/org/elasticsearch/xpack/idp/saml/sp/SamlServiceProviderIndexTests.java b/x-pack/plugin/identity-provider/src/internalClusterTest/java/org/elasticsearch/xpack/idp/saml/sp/SamlServiceProviderIndexTests.java index 10d636c0cf851..39b04d8915b89 100644 --- a/x-pack/plugin/identity-provider/src/internalClusterTest/java/org/elasticsearch/xpack/idp/saml/sp/SamlServiceProviderIndexTests.java +++ b/x-pack/plugin/identity-provider/src/internalClusterTest/java/org/elasticsearch/xpack/idp/saml/sp/SamlServiceProviderIndexTests.java @@ -16,7 +16,6 @@ import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.cluster.metadata.IndexMetadata; -import org.elasticsearch.cluster.metadata.IndexTemplateMetadata; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.Plugin; @@ -30,7 +29,6 @@ import java.util.ArrayList; import java.util.Collection; import java.util.List; -import java.util.Locale; import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; @@ -53,7 +51,7 @@ public class SamlServiceProviderIndexTests extends ESSingleNodeTestCase { @Override protected Collection> getPlugins() { - return List.of(LocalStateCompositeXPackPlugin.class, IdentityProviderPlugin.class); + return List.of(LocalStateCompositeXPackPlugin.class, IdentityProviderPlugin.class, IndexTemplateRegistryPlugin.class); } @Override @@ -82,11 +80,6 @@ public void testWriteAndFindServiceProvidersFromIndex() { final int count = randomIntBetween(3, 5); List documents = new ArrayList<>(count); - // Install the template - assertTrue("Template should have been installed", installTemplate()); - // No need to install it again - assertFalse("Template should not have been installed a second time", installTemplate()); - // Index should not exist yet assertThat(clusterService.state().metadata().index(SamlServiceProviderIndex.INDEX_NAME), nullValue()); @@ -128,7 +121,6 @@ public void testWriteAndFindServiceProvidersFromIndex() { } public void testWritesViaAliasIfItExists() { - assertTrue(installTemplate()); // Create an index that will trigger the template, but isn't the standard index name final String customIndexName = SamlServiceProviderIndex.INDEX_NAME + "-test"; @@ -155,38 +147,6 @@ public void testWritesViaAliasIfItExists() { assertThat(readDocument(document.docId), equalTo(document)); } - public void testInstallTemplateAutomaticallyOnClusterChange() throws Exception { - // Create an index that will trigger a cluster state change - final String indexName = randomAlphaOfLength(7).toLowerCase(Locale.ROOT); - indicesAdmin().create(new CreateIndexRequest(indexName)).actionGet(); - - ensureGreen(indexName); - - IndexTemplateMetadata templateMeta = clusterService.state().metadata().templates().get(SamlServiceProviderIndex.TEMPLATE_NAME); - - assertBusy(() -> assertThat("template should have been installed", templateMeta, notNullValue())); - - assertFalse("Template is already installed, should not install again", installTemplate()); - } - - public void testInstallTemplateAutomaticallyOnDocumentWrite() { - final SamlServiceProviderDocument doc = randomDocument(1); - writeDocument(doc); - - assertThat(readDocument(doc.docId), equalTo(doc)); - - IndexTemplateMetadata templateMeta = clusterService.state().metadata().templates().get(SamlServiceProviderIndex.TEMPLATE_NAME); - assertThat("template should have been installed", templateMeta, notNullValue()); - - assertFalse("Template is already installed, should not install again", installTemplate()); - } - - private boolean installTemplate() { - final PlainActionFuture installTemplate = new PlainActionFuture<>(); - serviceProviderIndex.installIndexTemplate(assertListenerIsOnlyCalledOnce(installTemplate)); - return installTemplate.actionGet(); - } - private Set getAllDocs() { final PlainActionFuture> future = new PlainActionFuture<>(); serviceProviderIndex.findAll( @@ -264,4 +224,21 @@ private static ActionListener assertListenerIsOnlyCalledOnce(ActionListen }); } + // Since we just want to test the template handling in this test suite, we don't need to go through + // all the hassle of the setup required to *actually* enable the plugin (we do that elsewhere), we + // just need to make sure the template registry is here. + public static class IndexTemplateRegistryPlugin extends Plugin { + @Override + public Collection createComponents(PluginServices services) { + var indexTemplateRegistry = new SamlServiceProviderIndexTemplateRegistry( + services.environment().settings(), + services.clusterService(), + services.threadPool(), + services.client(), + services.xContentRegistry() + ); + indexTemplateRegistry.initialize(); + return List.of(indexTemplateRegistry); + } + } } diff --git a/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/IdentityProviderPlugin.java b/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/IdentityProviderPlugin.java index e493c8e61ca58..5e6bc5f703879 100644 --- a/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/IdentityProviderPlugin.java +++ b/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/IdentityProviderPlugin.java @@ -47,6 +47,7 @@ import org.elasticsearch.xpack.idp.saml.rest.action.RestSamlValidateAuthenticationRequestAction; import org.elasticsearch.xpack.idp.saml.sp.SamlServiceProviderFactory; import org.elasticsearch.xpack.idp.saml.sp.SamlServiceProviderIndex; +import org.elasticsearch.xpack.idp.saml.sp.SamlServiceProviderIndexTemplateRegistry; import org.elasticsearch.xpack.idp.saml.sp.SamlServiceProviderResolver; import org.elasticsearch.xpack.idp.saml.sp.ServiceProviderCacheSettings; import org.elasticsearch.xpack.idp.saml.sp.ServiceProviderDefaults; @@ -80,6 +81,15 @@ public Collection createComponents(PluginServices services) { return List.of(); } + var indexTemplateRegistry = new SamlServiceProviderIndexTemplateRegistry( + services.environment().settings(), + services.clusterService(), + services.threadPool(), + services.client(), + services.xContentRegistry() + ); + indexTemplateRegistry.initialize(); + SamlInit.initialize(); final SamlServiceProviderIndex index = new SamlServiceProviderIndex(services.client(), services.clusterService()); final SecurityContext securityContext = new SecurityContext(settings, services.threadPool().getThreadContext()); @@ -111,7 +121,7 @@ public Collection createComponents(PluginServices services) { final SamlFactory factory = new SamlFactory(); - return List.of(index, idp, factory, userPrivilegeResolver); + return List.of(index, idp, factory, userPrivilegeResolver, indexTemplateRegistry); } @Override diff --git a/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/sp/SamlServiceProviderIndex.java b/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/sp/SamlServiceProviderIndex.java index bd425487b9ad0..1eb6c5586a48b 100644 --- a/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/sp/SamlServiceProviderIndex.java +++ b/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/sp/SamlServiceProviderIndex.java @@ -9,12 +9,10 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; -import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.action.get.GetRequest; @@ -35,7 +33,6 @@ import org.elasticsearch.common.util.CachedSupplier; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.XContentHelper; -import org.elasticsearch.gateway.GatewayService; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.get.GetResult; import org.elasticsearch.index.query.QueryBuilder; @@ -46,7 +43,6 @@ import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.ClientHelper; -import org.elasticsearch.xpack.core.template.TemplateUtils; import java.io.ByteArrayOutputStream; import java.io.Closeable; @@ -70,15 +66,19 @@ public class SamlServiceProviderIndex implements Closeable { private final ClusterService clusterService; private final ClusterStateListener clusterStateListener; private volatile boolean aliasExists; - private volatile boolean templateInstalled; public static final String ALIAS_NAME = "saml-service-provider"; public static final String INDEX_NAME = "saml-service-provider-v1"; static final String TEMPLATE_NAME = ALIAS_NAME; - private static final String TEMPLATE_RESOURCE = "/idp/saml-service-provider-template.json"; - private static final String TEMPLATE_META_VERSION_KEY = "idp-version"; - private static final String TEMPLATE_VERSION_SUBSTITUTE = "idp.template.version"; + static final String TEMPLATE_RESOURCE = "/idp/saml-service-provider-template.json"; + static final String TEMPLATE_VERSION_VARIABLE = "idp.template.version"; + + // This field is only populated with an old-school version string for BWC purposes + static final String TEMPLATE_VERSION_STRING_DEPRECATED = "idp.template.version_deprecated"; + static final String FINAL_TEMPLATE_VERSION_STRING_DEPRECATED = "8.14.0"; + + static final int CURRENT_TEMPLATE_VERSION = 1; public static final class DocumentVersion { public final String id; @@ -140,34 +140,9 @@ public SamlServiceProviderIndex(Client client, ClusterService clusterService) { private void clusterChanged(ClusterChangedEvent clusterChangedEvent) { final ClusterState state = clusterChangedEvent.state(); - installTemplateIfRequired(state); checkForAliasStateChange(state); } - private void installTemplateIfRequired(ClusterState state) { - if (templateInstalled) { - return; - } - if (state.blocks().hasGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK)) { - return; - } - if (isTemplateUpToDate(state)) { - templateInstalled = true; - return; - } - if (state.nodes().isLocalNodeElectedMaster() == false) { - return; - } - installIndexTemplate(ActionListener.wrap(installed -> { - templateInstalled = true; - if (installed) { - logger.debug("Template [{}] has been updated", TEMPLATE_NAME); - } else { - logger.debug("Template [{}] appears to be up to date", TEMPLATE_NAME); - } - }, e -> logger.warn(() -> "Failed to install template [" + TEMPLATE_NAME + "]", e))); - } - private void checkForAliasStateChange(ClusterState state) { final IndexAbstraction aliasInfo = state.getMetadata().getIndicesLookup().get(ALIAS_NAME); final boolean previousState = aliasExists; @@ -199,24 +174,6 @@ private void logChangedAliasState(IndexAbstraction aliasInfo) { } } - public void installIndexTemplate(ActionListener listener) { - final ClusterState state = clusterService.state(); - if (isTemplateUpToDate(state)) { - listener.onResponse(false); - return; - } - final String template = TemplateUtils.loadTemplate(TEMPLATE_RESOURCE, Version.CURRENT.toString(), TEMPLATE_VERSION_SUBSTITUTE); - final PutIndexTemplateRequest request = new PutIndexTemplateRequest(TEMPLATE_NAME).source(template, XContentType.JSON); - client.admin().indices().putTemplate(request, listener.delegateFailureAndWrap((l, response) -> { - logger.info("Installed template [{}]", TEMPLATE_NAME); - l.onResponse(true); - })); - } - - private boolean isTemplateUpToDate(ClusterState state) { - return TemplateUtils.checkTemplateExistsAndIsUpToDate(TEMPLATE_NAME, TEMPLATE_META_VERSION_KEY, state, logger); - } - public void deleteDocument(DocumentVersion version, WriteRequest.RefreshPolicy refreshPolicy, ActionListener listener) { final DeleteRequest request = new DeleteRequest(aliasExists ? ALIAS_NAME : INDEX_NAME).id(version.id) .setIfSeqNo(version.seqNo) @@ -240,19 +197,6 @@ public void writeDocument( return; } - if (templateInstalled) { - _writeDocument(document, opType, refreshPolicy, listener); - } else { - installIndexTemplate(listener.delegateFailureAndWrap((l, installed) -> _writeDocument(document, opType, refreshPolicy, l))); - } - } - - private void _writeDocument( - SamlServiceProviderDocument document, - DocWriteRequest.OpType opType, - WriteRequest.RefreshPolicy refreshPolicy, - ActionListener listener - ) { try ( ByteArrayOutputStream out = new ByteArrayOutputStream(); XContentBuilder xContentBuilder = new XContentBuilder(XContentType.JSON.xContent(), out) diff --git a/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/sp/SamlServiceProviderIndexTemplateRegistry.java b/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/sp/SamlServiceProviderIndexTemplateRegistry.java new file mode 100644 index 0000000000000..bd6bdbabbd4f2 --- /dev/null +++ b/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/sp/SamlServiceProviderIndexTemplateRegistry.java @@ -0,0 +1,56 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.idp.saml.sp; + +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xcontent.NamedXContentRegistry; +import org.elasticsearch.xpack.core.template.IndexTemplateConfig; +import org.elasticsearch.xpack.core.template.IndexTemplateRegistry; + +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.xpack.idp.saml.sp.SamlServiceProviderIndex.CURRENT_TEMPLATE_VERSION; +import static org.elasticsearch.xpack.idp.saml.sp.SamlServiceProviderIndex.FINAL_TEMPLATE_VERSION_STRING_DEPRECATED; +import static org.elasticsearch.xpack.idp.saml.sp.SamlServiceProviderIndex.TEMPLATE_NAME; +import static org.elasticsearch.xpack.idp.saml.sp.SamlServiceProviderIndex.TEMPLATE_RESOURCE; +import static org.elasticsearch.xpack.idp.saml.sp.SamlServiceProviderIndex.TEMPLATE_VERSION_STRING_DEPRECATED; +import static org.elasticsearch.xpack.idp.saml.sp.SamlServiceProviderIndex.TEMPLATE_VERSION_VARIABLE; + +public class SamlServiceProviderIndexTemplateRegistry extends IndexTemplateRegistry { + public SamlServiceProviderIndexTemplateRegistry( + Settings nodeSettings, + ClusterService clusterService, + ThreadPool threadPool, + Client client, + NamedXContentRegistry xContentRegistry + ) { + super(nodeSettings, clusterService, threadPool, client, xContentRegistry); + } + + @Override + protected String getOrigin() { + return "idp"; + } + + @Override + protected List getLegacyTemplateConfigs() { + return List.of( + new IndexTemplateConfig( + TEMPLATE_NAME, + TEMPLATE_RESOURCE, + CURRENT_TEMPLATE_VERSION, + TEMPLATE_VERSION_VARIABLE, + Map.of(TEMPLATE_VERSION_STRING_DEPRECATED, FINAL_TEMPLATE_VERSION_STRING_DEPRECATED) + ) + ); + } +} From 1c6119fa91327fb0db791f39015d6b89b132fa10 Mon Sep 17 00:00:00 2001 From: Moritz Mack Date: Tue, 30 Apr 2024 08:29:34 +0200 Subject: [PATCH 17/19] Introduce skipping on known_issues and awaits_fix for YAML tests (#107836) See README on YAML REST tests for detailed instructions. --- .../yaml/section/PrerequisiteSection.java | 302 ++++++++++-------- .../test/rest/yaml/section/Prerequisites.java | 5 + .../section/ClientYamlTestSectionTests.java | 2 +- .../section/ClientYamlTestSuiteTests.java | 30 ++ .../section/PrerequisiteSectionTests.java | 90 +++++- 5 files changed, 295 insertions(+), 134 deletions(-) diff --git a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/PrerequisiteSection.java b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/PrerequisiteSection.java index f4c9aaa619911..1ee447da1f111 100644 --- a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/PrerequisiteSection.java +++ b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/PrerequisiteSection.java @@ -10,6 +10,7 @@ import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.core.CheckedFunction; import org.elasticsearch.test.rest.yaml.ClientYamlTestExecutionContext; import org.elasticsearch.test.rest.yaml.Features; import org.elasticsearch.xcontent.XContentLocation; @@ -20,9 +21,13 @@ import java.util.ArrayList; import java.util.HashSet; import java.util.List; +import java.util.Map; import java.util.Set; +import java.util.function.Consumer; import java.util.function.Predicate; +import static java.util.Collections.emptyList; + /** * Represents a section where prerequisites to run a specific test section or suite are specified. It is possible to specify preconditions * as a set of `skip` criteria (the test or suite will be skipped if the specified conditions are met) or `requires` criteria (the test or @@ -34,13 +39,18 @@ * - an operating system (full name, including specific Linux distributions) - some OS might show a certain behavior */ public class PrerequisiteSection { + record KnownIssue(String clusterFeature, String fixedBy) { + private static final Set FIELD_NAMES = Set.of("cluster_feature", "fixed_by"); + } + static class PrerequisiteSectionBuilder { String skipVersionRange = null; String skipReason = null; String requiresReason = null; List requiredYamlRunnerFeatures = new ArrayList<>(); List skipOperatingSystems = new ArrayList<>(); - + List skipKnownIssues = new ArrayList<>(); + String skipAwaitsFix = null; Set skipClusterFeatures = new HashSet<>(); Set requiredClusterFeatures = new HashSet<>(); @@ -53,6 +63,11 @@ enum XPackRequired { XPackRequired xpackRequired = XPackRequired.NOT_SPECIFIED; + public PrerequisiteSectionBuilder skipIfAwaitsFix(String bugUrl) { + this.skipAwaitsFix = bugUrl; + return this; + } + public PrerequisiteSectionBuilder skipIfVersion(String skipVersionRange) { this.skipVersionRange = skipVersionRange; return this; @@ -96,6 +111,11 @@ public PrerequisiteSectionBuilder skipIfClusterFeature(String featureName) { return this; } + public PrerequisiteSectionBuilder skipKnownIssue(KnownIssue knownIssue) { + skipKnownIssues.add(knownIssue); + return this; + } + public PrerequisiteSectionBuilder requireClusterFeature(String featureName) { requiredClusterFeatures.add(featureName); return this; @@ -107,29 +127,30 @@ public PrerequisiteSectionBuilder skipIfOs(String osName) { } void validate(XContentLocation contentLocation) { - if ((Strings.hasLength(skipVersionRange) == false) + if ((Strings.isEmpty(skipVersionRange)) && requiredYamlRunnerFeatures.isEmpty() && skipOperatingSystems.isEmpty() && xpackRequired == XPackRequired.NOT_SPECIFIED && requiredClusterFeatures.isEmpty() - && skipClusterFeatures.isEmpty()) { - throw new ParsingException( - contentLocation, - "at least one criteria (version, cluster features, runner features, os) is mandatory within a skip section" - ); + && skipClusterFeatures.isEmpty() + && skipKnownIssues.isEmpty() + && Strings.isEmpty(skipAwaitsFix)) { + // TODO separate the validation for requires / skip when dropping parsing of legacy fields, e.g. features in skip + throw new ParsingException(contentLocation, "at least one predicate is mandatory within a skip or requires section"); } - if (Strings.hasLength(skipVersionRange) && Strings.hasLength(skipReason) == false) { - throw new ParsingException(contentLocation, "reason is mandatory within skip version section"); - } - if (skipOperatingSystems.isEmpty() == false && Strings.hasLength(skipReason) == false) { - throw new ParsingException(contentLocation, "reason is mandatory within skip os section"); - } - if (skipClusterFeatures.isEmpty() == false && Strings.hasLength(skipReason) == false) { - throw new ParsingException(contentLocation, "reason is mandatory within skip cluster_features section"); + + if (Strings.isEmpty(skipReason) + && (Strings.isEmpty(skipVersionRange) + && skipOperatingSystems.isEmpty() + && skipClusterFeatures.isEmpty() + && skipKnownIssues.isEmpty()) == false) { + throw new ParsingException(contentLocation, "reason is mandatory within this skip section"); } - if (requiredClusterFeatures.isEmpty() == false && Strings.hasLength(requiresReason) == false) { - throw new ParsingException(contentLocation, "reason is mandatory within requires cluster_features section"); + + if (Strings.isEmpty(requiresReason) && (requiredClusterFeatures.isEmpty() == false)) { + throw new ParsingException(contentLocation, "reason is mandatory within this requires section"); } + // make feature "skip_os" mandatory if os is given, this is a temporary solution until language client tests know about os if (skipOperatingSystems.isEmpty() == false && requiredYamlRunnerFeatures.contains("skip_os") == false) { throw new ParsingException(contentLocation, "if os is specified, test runner feature [skip_os] must be set"); @@ -143,33 +164,49 @@ void validate(XContentLocation contentLocation) { } public PrerequisiteSection build() { - final List> skipCriteriaList = new ArrayList<>(); - final List> requiresCriteriaList; - - // Check if the test runner supports all YAML framework features (see {@link Features}). If not, default to always skip this - // section. if (Features.areAllSupported(requiredYamlRunnerFeatures) == false) { - requiresCriteriaList = List.of(Prerequisites.FALSE); - } else { - requiresCriteriaList = new ArrayList<>(); - if (xpackRequired == XPackRequired.YES) { - requiresCriteriaList.add(Prerequisites.hasXPack()); - } - if (xpackRequired == XPackRequired.NO) { - skipCriteriaList.add(Prerequisites.hasXPack()); - } - if (Strings.hasLength(skipVersionRange)) { - skipCriteriaList.add(Prerequisites.skipOnVersionRange(skipVersionRange)); - } - if (skipOperatingSystems.isEmpty() == false) { - skipCriteriaList.add(Prerequisites.skipOnOsList(skipOperatingSystems)); - } - if (requiredClusterFeatures.isEmpty() == false) { - requiresCriteriaList.add(Prerequisites.requireClusterFeatures(requiredClusterFeatures)); - } - if (skipClusterFeatures.isEmpty() == false) { - skipCriteriaList.add(Prerequisites.skipOnClusterFeatures(skipClusterFeatures)); - } + // always skip this section due to missing required test runner features (see {@link Features}) + return new PrerequisiteSection( + emptyList(), + skipReason, + List.of(Prerequisites.FALSE), + requiresReason, + requiredYamlRunnerFeatures + ); + } + if (Strings.hasLength(skipAwaitsFix)) { + // always skip this section due to a pending fix + return new PrerequisiteSection( + List.of(Prerequisites.TRUE), + skipReason, + emptyList(), + requiresReason, + requiredYamlRunnerFeatures + ); + } + + final List> skipCriteriaList = new ArrayList<>(); + final List> requiresCriteriaList = new ArrayList<>(); + if (xpackRequired == XPackRequired.YES) { + requiresCriteriaList.add(Prerequisites.hasXPack()); + } + if (xpackRequired == XPackRequired.NO) { + skipCriteriaList.add(Prerequisites.hasXPack()); + } + if (Strings.hasLength(skipVersionRange)) { + skipCriteriaList.add(Prerequisites.skipOnVersionRange(skipVersionRange)); + } + if (skipOperatingSystems.isEmpty() == false) { + skipCriteriaList.add(Prerequisites.skipOnOsList(skipOperatingSystems)); + } + if (requiredClusterFeatures.isEmpty() == false) { + requiresCriteriaList.add(Prerequisites.requireClusterFeatures(requiredClusterFeatures)); + } + if (skipClusterFeatures.isEmpty() == false) { + skipCriteriaList.add(Prerequisites.skipOnClusterFeatures(skipClusterFeatures)); + } + if (skipKnownIssues.isEmpty() == false) { + skipCriteriaList.add(Prerequisites.skipOnKnownIssue(skipKnownIssues)); } return new PrerequisiteSection(skipCriteriaList, skipReason, requiresCriteriaList, requiresReason, requiredYamlRunnerFeatures); } @@ -228,97 +265,106 @@ private static void parseFeatureField(String feature, PrerequisiteSectionBuilder // package private for tests static void parseSkipSection(XContentParser parser, PrerequisiteSectionBuilder builder) throws IOException { - if (parser.nextToken() != XContentParser.Token.START_OBJECT) { - throw new IllegalArgumentException( - "Expected [" - + XContentParser.Token.START_OBJECT - + ", found [" - + parser.currentToken() - + "], the skip section is not properly indented" - ); - } - String currentFieldName = null; - XContentParser.Token token; - - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentFieldName = parser.currentName(); - } else if (token.isValue()) { - if ("version".equals(currentFieldName)) { - builder.skipIfVersion(parser.text()); - } else if ("reason".equals(currentFieldName)) { - builder.setSkipReason(parser.text()); - } else if ("features".equals(currentFieldName)) { - parseFeatureField(parser.text(), builder); - } else if ("os".equals(currentFieldName)) { - builder.skipIfOs(parser.text()); - } else if ("cluster_features".equals(currentFieldName)) { - builder.skipIfClusterFeature(parser.text()); - } else { - throw new ParsingException( - parser.getTokenLocation(), - "field " + currentFieldName + " not supported within skip section" - ); - } - } else if (token == XContentParser.Token.START_ARRAY) { - if ("features".equals(currentFieldName)) { - while (parser.nextToken() != XContentParser.Token.END_ARRAY) { - parseFeatureField(parser.text(), builder); - } - } else if ("os".equals(currentFieldName)) { - while (parser.nextToken() != XContentParser.Token.END_ARRAY) { - builder.skipIfOs(parser.text()); - } - } else if ("cluster_features".equals(currentFieldName)) { - while (parser.nextToken() != XContentParser.Token.END_ARRAY) { - builder.skipIfClusterFeature(parser.text()); - } - } + requireStartObject("skip", parser.nextToken()); + + while (parser.nextToken() != XContentParser.Token.END_OBJECT) { + if (parser.currentToken() == XContentParser.Token.FIELD_NAME) continue; + + boolean valid = false; + if (parser.currentToken().isValue()) { + valid = switch (parser.currentName()) { + case "version" -> parseString(parser, builder::skipIfVersion); + case "reason" -> parseString(parser, builder::setSkipReason); + case "features" -> parseString(parser, f -> parseFeatureField(f, builder)); + case "os" -> parseString(parser, builder::skipIfOs); + case "cluster_features" -> parseString(parser, builder::skipIfClusterFeature); + case "awaits_fix" -> parseString(parser, builder::skipIfAwaitsFix); + default -> false; + }; + } else if (parser.currentToken() == XContentParser.Token.START_ARRAY) { + valid = switch (parser.currentName()) { + case "features" -> parseStrings(parser, f -> parseFeatureField(f, builder)); + case "os" -> parseStrings(parser, builder::skipIfOs); + case "cluster_features" -> parseStrings(parser, builder::skipIfClusterFeature); + case "known_issues" -> parseArray(parser, PrerequisiteSection::parseKnownIssue, builder::skipKnownIssue); + default -> false; + }; } + if (valid == false) throwUnexpectedField("skip", parser); } parser.nextToken(); } - static void parseRequiresSection(XContentParser parser, PrerequisiteSectionBuilder builder) throws IOException { - if (parser.nextToken() != XContentParser.Token.START_OBJECT) { + private static void throwUnexpectedField(String section, XContentParser parser) throws IOException { + throw new ParsingException( + parser.getTokenLocation(), + Strings.format("field [%s] of type [%s] not supported within %s section", parser.currentName(), parser.currentToken(), section) + ); + } + + private static void requireStartObject(String section, XContentParser.Token token) throws IOException { + if (token != XContentParser.Token.START_OBJECT) { throw new IllegalArgumentException( - "Expected [" - + XContentParser.Token.START_OBJECT - + ", found [" - + parser.currentToken() - + "], the requires section is not properly indented" + Strings.format( + "Expected [%s], found [%s], the %s section is not properly indented", + XContentParser.Token.START_OBJECT, + token, + section + ) + ); + } + } + + private static boolean parseString(XContentParser parser, Consumer consumer) throws IOException { + consumer.accept(parser.text()); + return true; + } + + private static boolean parseStrings(XContentParser parser, Consumer consumer) throws IOException { + return parseArray(parser, XContentParser::text, consumer); + } + + private static boolean parseArray(XContentParser parser, CheckedFunction item, Consumer consumer) + throws IOException { + while (parser.nextToken() != XContentParser.Token.END_ARRAY) { + consumer.accept(item.apply(parser)); + } + return true; + } + + private static KnownIssue parseKnownIssue(XContentParser parser) throws IOException { + Map fields = parser.mapStrings(); + if (fields.keySet().equals(KnownIssue.FIELD_NAMES) == false) { + throw new ParsingException( + parser.getTokenLocation(), + Strings.format("Expected fields %s, but got %s", KnownIssue.FIELD_NAMES, fields.keySet()) ); } - String currentFieldName = null; - XContentParser.Token token; - - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentFieldName = parser.currentName(); - } else if (token.isValue()) { - if ("reason".equals(currentFieldName)) { - builder.setRequiresReason(parser.text()); - } else if ("test_runner_features".equals(currentFieldName)) { - parseFeatureField(parser.text(), builder); - } else if ("cluster_features".equals(currentFieldName)) { - builder.requireClusterFeature(parser.text()); - } else { - throw new ParsingException( - parser.getTokenLocation(), - "field " + currentFieldName + " not supported within requires section" - ); - } - } else if (token == XContentParser.Token.START_ARRAY) { - if ("test_runner_features".equals(currentFieldName)) { - while (parser.nextToken() != XContentParser.Token.END_ARRAY) { - parseFeatureField(parser.text(), builder); - } - } else if ("cluster_features".equals(currentFieldName)) { - while (parser.nextToken() != XContentParser.Token.END_ARRAY) { - builder.requireClusterFeature(parser.text()); - } - } + return new KnownIssue(fields.get("cluster_feature"), fields.get("fixed_by")); + } + + static void parseRequiresSection(XContentParser parser, PrerequisiteSectionBuilder builder) throws IOException { + requireStartObject("requires", parser.nextToken()); + + while (parser.nextToken() != XContentParser.Token.END_OBJECT) { + if (parser.currentToken() == XContentParser.Token.FIELD_NAME) continue; + + boolean valid = false; + if (parser.currentToken().isValue()) { + valid = switch (parser.currentName()) { + case "reason" -> parseString(parser, builder::setRequiresReason); + case "test_runner_features" -> parseString(parser, f -> parseFeatureField(f, builder)); + case "cluster_features" -> parseString(parser, builder::requireClusterFeature); + default -> false; + }; + } else if (parser.currentToken() == XContentParser.Token.START_ARRAY) { + valid = switch (parser.currentName()) { + case "test_runner_features" -> parseStrings(parser, f -> parseFeatureField(f, builder)); + case "cluster_features" -> parseStrings(parser, builder::requireClusterFeature); + default -> false; + }; } + if (valid == false) throwUnexpectedField("requires", parser); } parser.nextToken(); } @@ -332,9 +378,9 @@ static void parseRequiresSection(XContentParser parser, PrerequisiteSectionBuild final String requireReason; private PrerequisiteSection() { - this.skipCriteriaList = new ArrayList<>(); - this.requiresCriteriaList = new ArrayList<>(); - this.yamlRunnerFeatures = new ArrayList<>(); + this.skipCriteriaList = emptyList(); + this.requiresCriteriaList = emptyList(); + this.yamlRunnerFeatures = emptyList(); this.skipReason = null; this.requireReason = null; } diff --git a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/Prerequisites.java b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/Prerequisites.java index 8049c227b199e..ca10101a4612c 100644 --- a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/Prerequisites.java +++ b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/Prerequisites.java @@ -44,4 +44,9 @@ static Predicate requireClusterFeatures(Set skipOnClusterFeatures(Set clusterFeatures) { return context -> clusterFeatures.stream().anyMatch(context::clusterHasFeature); } + + static Predicate skipOnKnownIssue(List knownIssues) { + return context -> knownIssues.stream() + .anyMatch(i -> context.clusterHasFeature(i.clusterFeature()) && context.clusterHasFeature(i.fixedBy()) == false); + } } diff --git a/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSectionTests.java b/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSectionTests.java index 2c6e7e30e0d46..108a85b978af3 100644 --- a/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSectionTests.java +++ b/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSectionTests.java @@ -33,7 +33,7 @@ public void testWrongIndentation() throws Exception { assertEquals("Error parsing test named [First test section]", e.getMessage()); assertThat(e.getCause(), instanceOf(IllegalArgumentException.class)); assertEquals( - "Expected [START_OBJECT, found [VALUE_NULL], the skip section is not properly indented", + "Expected [START_OBJECT], found [VALUE_NULL], the skip section is not properly indented", e.getCause().getMessage() ); } diff --git a/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuiteTests.java b/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuiteTests.java index 1f5bdc71dde37..f8927f76c07ec 100644 --- a/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuiteTests.java +++ b/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuiteTests.java @@ -468,11 +468,41 @@ public void testParseSkipOs() throws Exception { assertThat(restTestSuite.getTestSections().get(0).getPrerequisiteSection().hasYamlRunnerFeature("skip_os"), equalTo(true)); } + public void testMuteUsingAwaitsFix() throws Exception { + parser = createParser(YamlXContent.yamlXContent, """ + "Mute": + + - skip: + awaits_fix: bugurl + + - do: + indices.get_mapping: + index: test_index + type: test_type + + - match: {test_type.properties.text.type: string} + - match: {test_type.properties.text.analyzer: whitespace} + """); + + ClientYamlTestSuite restTestSuite = ClientYamlTestSuite.parse(getTestClass().getName(), getTestName(), Optional.empty(), parser); + + assertThat(restTestSuite, notNullValue()); + assertThat(restTestSuite.getName(), equalTo(getTestName())); + assertThat(restTestSuite.getFile().isPresent(), equalTo(false)); + assertThat(restTestSuite.getTestSections().size(), equalTo(1)); + + assertThat(restTestSuite.getTestSections().get(0).getName(), equalTo("Mute")); + assertThat(restTestSuite.getTestSections().get(0).getPrerequisiteSection().isEmpty(), equalTo(false)); + } + public void testParseSkipAndRequireClusterFeatures() throws Exception { parser = createParser(YamlXContent.yamlXContent, """ "Broken on some os": - skip: + known_issues: + - cluster_feature: buggy_feature + fixed_by: buggy_feature_fix cluster_features: [unsupported-feature1, unsupported-feature2] reason: "unsupported-features are not supported" - requires: diff --git a/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/PrerequisiteSectionTests.java b/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/PrerequisiteSectionTests.java index 181ec34fefb7e..a77b2cc5b40f1 100644 --- a/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/PrerequisiteSectionTests.java +++ b/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/PrerequisiteSectionTests.java @@ -13,6 +13,7 @@ import org.elasticsearch.core.Strings; import org.elasticsearch.test.VersionUtils; import org.elasticsearch.test.rest.yaml.ClientYamlTestExecutionContext; +import org.elasticsearch.test.rest.yaml.section.PrerequisiteSection.KnownIssue; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.yaml.YamlXContent; import org.junit.AssumptionViolatedException; @@ -34,6 +35,7 @@ import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.oneOf; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -151,6 +153,11 @@ public void testSkipTestFeaturesOverridesAnySkipCriteria() { assertFalse(section.requiresCriteriaMet(mockContext)); } + public void testSkipAwaitsFix() { + PrerequisiteSection section = new PrerequisiteSection.PrerequisiteSectionBuilder().skipIfAwaitsFix("bugurl").build(); + assertTrue(section.skipCriteriaMet(mock(ClientYamlTestExecutionContext.class))); + } + public void testSkipOs() { PrerequisiteSection section = new PrerequisiteSection.PrerequisiteSectionBuilder().skipIfOs("windows95") .skipIfOs("debian-5") @@ -306,6 +313,57 @@ public void testParseSkipSectionBothFeatureAndVersion() throws Exception { assertThat(skipSectionBuilder.skipReason, equalTo("Delete ignores the parent param")); } + public void testParseSkipSectionAwaitsFix() throws Exception { + parser = createParser(YamlXContent.yamlXContent, """ + skip: + awaits_fix: "bugurl" + """); + + var skipSectionBuilder = PrerequisiteSection.parseInternal(parser); + assertThat(skipSectionBuilder.skipAwaitsFix, is("bugurl")); + } + + public void testParseSkipSectionKnownIssues() throws Exception { + parser = createParser(YamlXContent.yamlXContent, """ + skip: + reason: "skip known bugs" + known_issues: + - cluster_feature: feature1 + fixed_by: featureFix1 + - cluster_feature: feature2 + fixed_by: featureFix2"""); + + var skipSectionBuilder = PrerequisiteSection.parseInternal(parser); + assertThat(skipSectionBuilder.skipReason, is("skip known bugs")); + assertThat( + skipSectionBuilder.skipKnownIssues, + contains( + new KnownIssue("feature1", "featureFix1"), // + new KnownIssue("feature2", "featureFix2") + ) + ); + } + + public void testParseSkipSectionIncompleteKnownIssues() throws Exception { + parser = createParser(YamlXContent.yamlXContent, """ + skip: + reason: "skip known bugs" + known_issues: + - cluster_feature: feature1"""); + + Exception e = expectThrows(ParsingException.class, () -> PrerequisiteSection.parseInternal(parser)); + parser = null; // parser is not fully consumed, prevent validation + assertThat( + e.getMessage(), + is( + oneOf( + ("Expected fields [cluster_feature, fixed_by], but got [cluster_feature]"), + ("Expected fields [fixed_by, cluster_feature], but got [cluster_feature]") + ) + ) + ); + } + public void testParseSkipSectionNoReason() throws Exception { parser = createParser(YamlXContent.yamlXContent, """ skip: @@ -313,7 +371,7 @@ public void testParseSkipSectionNoReason() throws Exception { """); Exception e = expectThrows(ParsingException.class, () -> PrerequisiteSection.parseInternal(parser)); - assertThat(e.getMessage(), is("reason is mandatory within skip version section")); + assertThat(e.getMessage(), is("reason is mandatory within this skip section")); } public void testParseSkipSectionNoVersionNorFeature() throws Exception { @@ -323,10 +381,7 @@ public void testParseSkipSectionNoVersionNorFeature() throws Exception { """); Exception e = expectThrows(ParsingException.class, () -> PrerequisiteSection.parseInternal(parser)); - assertThat( - e.getMessage(), - is("at least one criteria (version, cluster features, runner features, os) is mandatory within a skip section") - ); + assertThat(e.getMessage(), is("at least one predicate is mandatory within a skip or requires section")); } public void testParseSkipSectionOsNoVersion() throws Exception { @@ -579,6 +634,31 @@ public void testSkipClusterFeaturesAllRequiredNoneToSkipMatch() { assertTrue(section.requiresCriteriaMet(mockContext)); } + public void testSkipKnownIssue() { + PrerequisiteSection section = new PrerequisiteSection( + List.of(Prerequisites.skipOnKnownIssue(List.of(new KnownIssue("bug1", "fix1"), new KnownIssue("bug2", "fix2")))), + "foobar", + emptyList(), + "foobar", + emptyList() + ); + + var mockContext = mock(ClientYamlTestExecutionContext.class); + assertFalse(section.skipCriteriaMet(mockContext)); + + when(mockContext.clusterHasFeature("bug1")).thenReturn(true); + assertTrue(section.skipCriteriaMet(mockContext)); + + when(mockContext.clusterHasFeature("fix1")).thenReturn(true); + assertFalse(section.skipCriteriaMet(mockContext)); + + when(mockContext.clusterHasFeature("bug2")).thenReturn(true); + assertTrue(section.skipCriteriaMet(mockContext)); + + when(mockContext.clusterHasFeature("fix2")).thenReturn(true); + assertFalse(section.skipCriteriaMet(mockContext)); + } + public void evaluateEmpty() { var section = new PrerequisiteSection(List.of(), "unsupported", List.of(), "required", List.of()); From 61a3415ad6afe44c3540a2946dfdf6767742502c Mon Sep 17 00:00:00 2001 From: Ievgen Degtiarenko Date: Tue, 30 Apr 2024 08:35:48 +0200 Subject: [PATCH 18/19] Add Range*Handler java docs (#105843) --- .../shared/SharedBlobCacheService.java | 24 +++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) diff --git a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java index bd67e71eac041..be93bcf9945eb 100644 --- a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java +++ b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java @@ -1149,13 +1149,33 @@ public CacheFile getCacheFile(KeyType cacheKey, long length) { @FunctionalInterface public interface RangeAvailableHandler { - // caller that wants to read from x should instead do a positional read from x + relativePos - // caller should also only read up to length, further bytes will be offered by another call to this method + /** + * Callback method used to read data from the cache. The target is typically captured by the callback implementation. + * + * A caller should only read up to length, further bytes will be offered by another call to this method + * + * @param channel is the cache region to read from + * @param channelPos a position in the channel (cache file) to read from + * @param relativePos a position in the target buffer to store bytes and pass to the caller + * @param length of the blob that can be read (must not be exceeded) + * @return number of bytes read + * @throws IOException on failure + */ int onRangeAvailable(SharedBytes.IO channel, int channelPos, int relativePos, int length) throws IOException; } @FunctionalInterface public interface RangeMissingHandler { + /** + * Callback method used to fetch data (usually from a remote storage) and write it in the cache. + * + * @param channel is the cache region to write to + * @param channelPos a position in the channel (cache file) to write to + * @param relativePos the relative position in the remote storage to read from + * @param length of data to fetch + * @param progressUpdater consumer to invoke with the number of copied bytes as they are written in cache. + * This is used to notify waiting readers that data become available in cache. + */ void fillCacheRange(SharedBytes.IO channel, int channelPos, int relativePos, int length, IntConsumer progressUpdater) throws IOException; } From a2d9cc6473b7c433243391b35a634aca6a7c62af Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 30 Apr 2024 07:39:39 +0100 Subject: [PATCH 19/19] Encapsulate `MasterNodeRequest#masterNodeTimeout` (#107999) There's no good reason for this field to have `protected` visibility, and we definitely don't want subclasses to be able to set it to `null`. This commit makes it `private`. Relates #107984 --- .../cluster/reroute/ClusterRerouteRequest.java | 4 ++-- .../snapshots/create/CreateSnapshotRequest.java | 4 ++-- .../admin/cluster/state/ClusterStateRequest.java | 2 +- .../indices/settings/put/UpdateSettingsRequest.java | 12 ++++++++++-- .../action/support/master/MasterNodeRequest.java | 13 +++++++++++-- .../UpdateIndexShardSnapshotStatusRequest.java | 3 +-- .../xpack/core/ccr/action/CcrStatsAction.java | 6 +++--- .../MountSearchableSnapshotRequest.java | 4 ++-- 8 files changed, 32 insertions(+), 16 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteRequest.java index 5aeef6b19298e..b355d3c50400e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteRequest.java @@ -131,12 +131,12 @@ public boolean equals(Object obj) { && Objects.equals(explain, other.explain) && Objects.equals(ackTimeout(), other.ackTimeout()) && Objects.equals(retryFailed, other.retryFailed) - && Objects.equals(masterNodeTimeout, other.masterNodeTimeout); + && Objects.equals(masterNodeTimeout(), other.masterNodeTimeout()); } @Override public int hashCode() { // Override equals and hashCode for testing - return Objects.hash(commands, dryRun, explain, ackTimeout(), retryFailed, masterNodeTimeout); + return Objects.hash(commands, dryRun, explain, ackTimeout(), retryFailed, masterNodeTimeout()); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java index 2e8a28d412e26..9127092bdb13a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java @@ -461,7 +461,7 @@ public boolean equals(Object o) { && Arrays.equals(indices, that.indices) && Objects.equals(indicesOptions, that.indicesOptions) && Arrays.equals(featureStates, that.featureStates) - && Objects.equals(masterNodeTimeout, that.masterNodeTimeout) + && Objects.equals(masterNodeTimeout(), that.masterNodeTimeout()) && Objects.equals(userMetadata, that.userMetadata); } @@ -495,7 +495,7 @@ public String toString() { + ", waitForCompletion=" + waitForCompletion + ", masterNodeTimeout=" - + masterNodeTimeout + + masterNodeTimeout() + ", metadata=" + userMetadata + '}'; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequest.java index e9de49dcbf5b4..d29996711d722 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequest.java @@ -222,7 +222,7 @@ public String getDescription() { if (indices.length > 0) { stringBuilder.append("indices ").append(Arrays.toString(indices)).append(", "); } - stringBuilder.append("master timeout [").append(masterNodeTimeout).append("]]"); + stringBuilder.append("master timeout [").append(masterNodeTimeout()).append("]]"); return stringBuilder.toString(); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java index 666419edc1bf0..7fa2e11317a43 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java @@ -254,7 +254,7 @@ public boolean equals(Object o) { return false; } UpdateSettingsRequest that = (UpdateSettingsRequest) o; - return masterNodeTimeout.equals(that.masterNodeTimeout) + return masterNodeTimeout().equals(that.masterNodeTimeout()) && ackTimeout().equals(that.ackTimeout()) && Objects.equals(settings, that.settings) && Objects.equals(indicesOptions, that.indicesOptions) @@ -265,7 +265,15 @@ && ackTimeout().equals(that.ackTimeout()) @Override public int hashCode() { - return Objects.hash(masterNodeTimeout, ackTimeout(), settings, indicesOptions, preserveExisting, reopen, Arrays.hashCode(indices)); + return Objects.hash( + masterNodeTimeout(), + ackTimeout(), + settings, + indicesOptions, + preserveExisting, + reopen, + Arrays.hashCode(indices) + ); } } diff --git a/server/src/main/java/org/elasticsearch/action/support/master/MasterNodeRequest.java b/server/src/main/java/org/elasticsearch/action/support/master/MasterNodeRequest.java index 6459f6c1b458a..063dbb0397de8 100644 --- a/server/src/main/java/org/elasticsearch/action/support/master/MasterNodeRequest.java +++ b/server/src/main/java/org/elasticsearch/action/support/master/MasterNodeRequest.java @@ -14,6 +14,7 @@ import org.elasticsearch.core.TimeValue; import java.io.IOException; +import java.util.Objects; /** * A based request for master based operation. @@ -22,10 +23,18 @@ public abstract class MasterNodeRequest